summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd6
-rw-r--r--Documentation/ABI/testing/configfs-tsm82
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac22
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cdx66
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl30
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i3c4
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio75
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-mcp356453
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-resolver-ad2s121027
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-vdpa4
-rw-r--r--Documentation/ABI/testing/sysfs-class-usb_power_delivery7
-rw-r--r--Documentation/ABI/testing/sysfs-tty15
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt21
-rw-r--r--Documentation/admin-guide/media/mgb4.rst374
-rw-r--r--Documentation/admin-guide/media/pci-cardlist.rst1
-rw-r--r--Documentation/admin-guide/media/v4l-drivers.rst1
-rw-r--r--Documentation/admin-guide/media/visl.rst6
-rw-r--r--Documentation/arch/powerpc/index.rst1
-rw-r--r--Documentation/arch/powerpc/kvm-nested.rst634
-rw-r--r--Documentation/arch/riscv/hwprobe.rst6
-rw-r--r--Documentation/arch/riscv/uabi.rst20
-rw-r--r--Documentation/bpf/kfuncs.rst6
-rw-r--r--Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml130
-rw-r--r--Documentation/devicetree/bindings/display/solomon,ssd132x.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt135
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml172
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml2
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lltc,ltc2497.yaml20
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml205
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,twl6030-gpadc.yaml43
-rw-r--r--Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/rohm,bm1390.yaml52
-rw-r--r--Documentation/devicetree/bindings/iio/resolver/adi,ad2s1210.yaml177
-rw-r--r--Documentation/devicetree/bindings/input/fsl,scu-key.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml3
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml74
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8996.yaml126
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml60
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpm-common.yaml28
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml250
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml108
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdx75-rpmh.yaml92
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml5
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.yaml5
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml2
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml6
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/cdns,csi2rx.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml114
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml6
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml141
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml10
-rw-r--r--Documentation/devicetree/bindings/media/nokia,n900-ir20
-rw-r--r--Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml43
-rw-r--r--Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml72
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml15
-rw-r--r--Documentation/devicetree/bindings/media/samsung,fimc.yaml27
-rw-r--r--Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml100
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml18
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd.yaml7
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml19
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ti,cc1352p7.yaml51
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic,meson6-efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/apple,efuses.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem-deprecated-cells.yaml28
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml9
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/u-boot,env.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,pxa1928-usb-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,mt7628-usbphy.yaml74
-rw-r--r--Documentation/devicetree/bindings/phy/phy-stih407-usb.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/pxa1928-usb-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml287
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml57
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml21
-rw-r--r--Documentation/devicetree/bindings/phy/ralink-usb-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/st,stih407-usb2-phy.yaml63
-rw-r--r--Documentation/devicetree/bindings/pinctrl/amlogic,meson-pinctrl-a1.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml25
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml217
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml188
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml187
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml186
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml21
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.yaml13
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml176
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml20
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml81
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml145
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml23
-rw-r--r--Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml42
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rtc7301.txt16
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml51
-rw-r--r--Documentation/devicetree/bindings/rtc/maxim,mcp795.txt11
-rw-r--r--Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml35
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml47
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt18
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml45
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/esp,esp32-acm.yaml42
-rw-r--r--Documentation/devicetree/bindings/serial/esp,esp32-uart.yaml51
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml13
-rw-r--r--Documentation/devicetree/bindings/serial/maxim,max310x.txt48
-rw-r--r--Documentation/devicetree/bindings/serial/maxim,max310x.yaml74
-rw-r--r--Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml10
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt118
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml127
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,em-uart.yaml14
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifa.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifb.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml8
-rw-r--r--Documentation/devicetree/bindings/serial/serial.yaml18
-rw-r--r--Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml39
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml19
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml43
-rw-r--r--Documentation/devicetree/bindings/usb/genesys,gl850g.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml94
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rtd-dwc3.yaml80
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rtd-type-c.yaml82
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml60
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml56
-rw-r--r--Documentation/devicetree/bindings/usb/ti,tps6598x.yaml81
-rw-r--r--Documentation/devicetree/bindings/usb/vialab,vl817.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml12
-rw-r--r--Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt18
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml5
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml2
-rw-r--r--Documentation/driver-api/i3c/protocol.rst4
-rw-r--r--Documentation/driver-api/media/camera-sensor.rst192
-rw-r--r--Documentation/driver-api/media/drivers/ccs/ccs.rst10
-rw-r--r--Documentation/driver-api/media/v4l2-core.rst1
-rw-r--r--Documentation/driver-api/media/v4l2-dev.rst8
-rw-r--r--Documentation/driver-api/media/v4l2-videobuf.rst403
-rw-r--r--Documentation/driver-api/tty/index.rst1
-rw-r--r--Documentation/driver-api/tty/tty_ioctl.rst10
-rw-r--r--Documentation/driver-api/usb/dma.rst48
-rw-r--r--Documentation/filesystems/nfs/exporting.rst7
-rw-r--r--Documentation/filesystems/overlayfs.rst40
-rw-r--r--Documentation/filesystems/porting.rst9
-rw-r--r--Documentation/i2c/busses/i2c-i801.rst1
-rw-r--r--Documentation/i2c/fault-codes.rst4
-rw-r--r--Documentation/kbuild/kbuild.rst6
-rw-r--r--Documentation/kbuild/makefiles.rst11
-rw-r--r--Documentation/misc-devices/eeprom.rst107
-rw-r--r--Documentation/misc-devices/index.rst1
-rw-r--r--Documentation/netlink/specs/devlink.yaml4
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/device_drivers/qlogic/index.rst18
-rw-r--r--Documentation/networking/device_drivers/qlogic/qlge.rst118
-rw-r--r--Documentation/networking/smc-sysctl.rst6
-rw-r--r--Documentation/process/security-bugs.rst35
-rw-r--r--Documentation/process/stable-kernel-rules.rst13
-rw-r--r--Documentation/trace/user_events.rst21
-rw-r--r--Documentation/translations/zh_CN/video4linux/v4l2-framework.txt12
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/landlock.rst99
-rw-r--r--Documentation/userspace-api/media/drivers/camera-sensor.rst104
-rw-r--r--Documentation/userspace-api/media/drivers/index.rst2
-rw-r--r--Documentation/userspace-api/media/drivers/npcm-video.rst66
-rw-r--r--Documentation/userspace-api/media/gen-errors.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/buffer.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/control.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/dev-subdev.rst49
-rw-r--r--Documentation/userspace-api/media/v4l/dv-timings.rst21
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-reserved.rst7
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/subdev-formats.rst72
-rw-r--r--MAINTAINERS228
-rw-r--r--Makefile38
-rw-r--r--arch/alpha/kernel/proto.h2
-rw-r--r--arch/alpha/kernel/setup.c8
-rw-r--r--arch/alpha/kernel/sys_sio.c8
-rw-r--r--arch/arm/Makefile7
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226.dtsi33
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/include/asm/setup.h5
-rw-r--r--arch/arm/include/asm/vga.h1
-rw-r--r--arch/arm/kernel/atags_parse.c20
-rw-r--r--arch/arm/kernel/efi.c6
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/vdso/Makefile25
-rw-r--r--arch/arm64/Makefile9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-mtp.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi21
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts6
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/vdso/Makefile10
-rw-r--r--arch/arm64/kernel/vdso32/Makefile10
-rw-r--r--arch/csky/kernel/setup.c12
-rw-r--r--arch/csky/kernel/vdso/Makefile10
-rw-r--r--arch/hexagon/kernel/Makefile2
-rw-r--r--arch/hexagon/kernel/screen_info.c3
-rw-r--r--arch/loongarch/Makefile4
-rw-r--r--arch/loongarch/kernel/efi.c8
-rw-r--r--arch/loongarch/kernel/image-vars.h2
-rw-r--r--arch/loongarch/kernel/setup.c3
-rw-r--r--arch/loongarch/vdso/Makefile10
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig22
-rw-r--r--arch/mips/Makefile.postlink3
-rw-r--r--arch/mips/ar7/Makefile11
-rw-r--r--arch/mips/ar7/Platform5
-rw-r--r--arch/mips/ar7/clock.c439
-rw-r--r--arch/mips/ar7/gpio.c332
-rw-r--r--arch/mips/ar7/irq.c165
-rw-r--r--arch/mips/ar7/memory.c51
-rw-r--r--arch/mips/ar7/platform.c722
-rw-r--r--arch/mips/ar7/prom.c256
-rw-r--r--arch/mips/ar7/setup.c93
-rw-r--r--arch/mips/ar7/time.c31
-rw-r--r--arch/mips/boot/compressed/uart-16550.c5
-rw-r--r--arch/mips/boot/dts/ingenic/jz4725b.dtsi1
-rw-r--r--arch/mips/boot/dts/ingenic/jz4770.dtsi1
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7621.dtsi5
-rw-r--r--arch/mips/configs/ar7_defconfig119
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/jazz_defconfig4
-rw-r--r--arch/mips/configs/lemote2f_defconfig3
-rw-r--r--arch/mips/configs/malta_defconfig5
-rw-r--r--arch/mips/configs/malta_kvm_defconfig5
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig5
-rw-r--r--arch/mips/configs/rm200_defconfig4
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h191
-rw-r--r--arch/mips/include/asm/mach-ar7/irq.h16
-rw-r--r--arch/mips/include/asm/mach-ar7/prom.h12
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h22
-rw-r--r--arch/mips/include/asm/mach-loongson32/dma.h21
-rw-r--r--arch/mips/include/asm/mach-loongson32/nand.h26
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h3
-rw-r--r--arch/mips/jazz/setup.c9
-rw-r--r--arch/mips/kernel/relocate_kernel.S1
-rw-r--r--arch/mips/kernel/setup.c11
-rw-r--r--arch/mips/loongson32/common/platform.c2
-rw-r--r--arch/mips/loongson32/ls1b/board.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/pci/fixup-lantiq.c2
-rw-r--r--arch/mips/sibyte/swarm/setup.c27
-rw-r--r--arch/mips/sni/setup.c18
-rw-r--r--arch/nios2/kernel/setup.c5
-rw-r--r--arch/parisc/Makefile8
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug14
-rw-r--r--arch/powerpc/Makefile.postlink3
-rwxr-xr-xarch/powerpc/boot/install.sh16
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/debug.config4
-rw-r--r--arch/powerpc/configs/g5_defconfig4
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig5
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h83
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h37
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h33
-rw-r--r--arch/powerpc/include/asm/code-patching.h1
-rw-r--r--arch/powerpc/include/asm/cpm1.h5
-rw-r--r--arch/powerpc/include/asm/cpm2.h4
-rw-r--r--arch/powerpc/include/asm/fixmap.h16
-rw-r--r--arch/powerpc/include/asm/guest-state-buffer.h995
-rw-r--r--arch/powerpc/include/asm/hvcall.h30
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h16
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h220
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h10
-rw-r--r--arch/powerpc/include/asm/kvm_host.h22
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h110
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h201
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h21
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h20
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-85xx.h20
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h92
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h120
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h217
-rw-r--r--arch/powerpc/include/asm/nohash/pte-e500.h41
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/paravirt.h47
-rw-r--r--arch/powerpc/include/asm/pgtable-masks.h32
-rw-r--r--arch/powerpc/include/asm/pgtable.h35
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h267
-rw-r--r--arch/powerpc/include/asm/ptrace.h17
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/kernel/btext.c360
-rw-r--r--arch/powerpc/kernel/crash_dump.c12
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/head_40x.S19
-rw-r--r--arch/powerpc/kernel/head_44x.S40
-rw-r--r--arch/powerpc/kernel/head_85xx.S12
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S63
-rw-r--r--arch/powerpc/kernel/io.c12
-rw-r--r--arch/powerpc/kernel/iommu.c61
-rw-r--r--arch/powerpc/kernel/process.c26
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c5
-rw-r--r--arch/powerpc/kernel/setup-common.c17
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c5
-rw-r--r--arch/powerpc/kernel/signal.h7
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kexec/core.c3
-rw-r--r--arch/powerpc/kexec/core_64.c4
-rw-r--r--arch/powerpc/kexec/file_load_64.c14
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c31
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c358
-rw-r--r--arch/powerpc/kvm/book3s_hv.h76
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c44
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c994
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c12
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c2
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c6
-rw-r--r--arch/powerpc/kvm/guest-state-buffer.c621
-rw-r--r--arch/powerpc/kvm/powerpc.c76
-rw-r--r--arch/powerpc/kvm/test-guest-state-buffer.c328
-rw-r--r--arch/powerpc/lib/code-patching.c146
-rw-r--r--arch/powerpc/lib/qspinlock.c122
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S32
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c6
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c10
-rw-r--r--arch/powerpc/mm/drmem.c2
-rw-r--r--arch/powerpc/mm/fault.c9
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/ioremap.c6
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/nohash/40x.c19
-rw-r--r--arch/powerpc/mm/nohash/8xx.c2
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c2
-rw-r--r--arch/powerpc/mm/nohash/e500.c6
-rw-r--r--arch/powerpc/mm/nohash/e500_hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/pgtable.c26
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c5
-rw-r--r--arch/powerpc/mm/ptdump/shared.c14
-rw-r--r--arch/powerpc/net/bpf_jit.h18
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c149
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c15
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c13
-rw-r--r--arch/powerpc/perf/power6-pmu.c46
-rw-r--r--arch/powerpc/platforms/4xx/soc.c2
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c1
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c34
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig1
-rw-r--r--arch/powerpc/platforms/powermac/feature.c3
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.h2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c3
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c4
-rw-r--r--arch/powerpc/platforms/pseries/vas.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c10
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/xive/native.c2
-rwxr-xr-xarch/powerpc/tools/gcc-check-mprofile-kernel.sh11
-rw-r--r--arch/riscv/Kconfig19
-rw-r--r--arch/riscv/Kconfig.debug1
-rw-r--r--arch/riscv/Makefile13
-rw-r--r--arch/riscv/Makefile.postlink3
-rw-r--r--arch/riscv/boot/Makefile1
-rw-r--r--arch/riscv/configs/defconfig54
-rw-r--r--arch/riscv/include/asm/acpi.h6
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h1
-rw-r--r--arch/riscv/include/asm/asm.h41
-rw-r--r--arch/riscv/include/asm/bitops.h254
-rw-r--r--arch/riscv/include/asm/cpufeature.h103
-rw-r--r--arch/riscv/include/asm/elf.h2
-rw-r--r--arch/riscv/include/asm/entry-common.h14
-rw-r--r--arch/riscv/include/asm/errata_list.h20
-rw-r--r--arch/riscv/include/asm/hwcap.h75
-rw-r--r--arch/riscv/include/asm/hwprobe.h7
-rw-r--r--arch/riscv/include/asm/insn-def.h8
-rw-r--r--arch/riscv/include/asm/irq_stack.h3
-rw-r--r--arch/riscv/include/asm/page.h4
-rw-r--r--arch/riscv/include/asm/pgtable-32.h3
-rw-r--r--arch/riscv/include/asm/pgtable-64.h14
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/processor.h9
-rw-r--r--arch/riscv/include/asm/sbi.h3
-rw-r--r--arch/riscv/include/asm/scs.h54
-rw-r--r--arch/riscv/include/asm/switch_to.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h16
-rw-r--r--arch/riscv/include/asm/tlb.h8
-rw-r--r--arch/riscv/include/asm/tlbflush.h15
-rw-r--r--arch/riscv/include/asm/vdso/processor.h2
-rw-r--r--arch/riscv/include/asm/vector.h2
-rw-r--r--arch/riscv/include/uapi/asm/elf.h5
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h2
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/acpi.c87
-rw-r--r--arch/riscv/kernel/asm-offsets.c9
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile10
-rw-r--r--arch/riscv/kernel/copy-unaligned.S8
-rw-r--r--arch/riscv/kernel/cpu.c33
-rw-r--r--arch/riscv/kernel/cpufeature.c109
-rw-r--r--arch/riscv/kernel/entry.S141
-rw-r--r--arch/riscv/kernel/fpu.S129
-rw-r--r--arch/riscv/kernel/head.S49
-rw-r--r--arch/riscv/kernel/hibernate-asm.S12
-rw-r--r--arch/riscv/kernel/image-vars.h2
-rw-r--r--arch/riscv/kernel/irq.c56
-rw-r--r--arch/riscv/kernel/kexec_relocate.S52
-rw-r--r--arch/riscv/kernel/mcount-dyn.S20
-rw-r--r--arch/riscv/kernel/mcount.S18
-rw-r--r--arch/riscv/kernel/module.c608
-rw-r--r--arch/riscv/kernel/probes/rethook_trampoline.S4
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.c2
-rw-r--r--arch/riscv/kernel/probes/uprobes.c6
-rw-r--r--arch/riscv/kernel/process.c18
-rw-r--r--arch/riscv/kernel/sbi.c32
-rw-r--r--arch/riscv/kernel/setup.c16
-rw-r--r--arch/riscv/kernel/signal.c85
-rw-r--r--arch/riscv/kernel/smpboot.c5
-rw-r--r--arch/riscv/kernel/suspend_entry.S9
-rw-r--r--arch/riscv/kernel/sys_riscv.c46
-rw-r--r--arch/riscv/kernel/tests/Kconfig.debug35
-rw-r--r--arch/riscv/kernel/tests/Makefile1
-rw-r--r--arch/riscv/kernel/tests/module_test/Makefile15
-rw-r--r--arch/riscv/kernel/tests/module_test/test_module_linking_main.c88
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set16.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set32.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set6.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set8.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub16.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub32.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub6.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub64.S25
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub8.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_uleb128.S31
-rw-r--r--arch/riscv/kernel/traps.c98
-rw-r--r--arch/riscv/kernel/traps_misaligned.c375
-rw-r--r--arch/riscv/kernel/vdso/Makefile12
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S4
-rw-r--r--arch/riscv/kernel/vdso/getcpu.S4
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c2
-rw-r--r--arch/riscv/kernel/vdso/rt_sigreturn.S4
-rw-r--r--arch/riscv/kernel/vdso/sys_hwprobe.S4
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S30
-rw-r--r--arch/riscv/kvm/aia.c2
-rw-r--r--arch/riscv/kvm/main.c2
-rw-r--r--arch/riscv/kvm/tlb.c2
-rw-r--r--arch/riscv/kvm/vcpu_fp.c2
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c2
-rw-r--r--arch/riscv/kvm/vcpu_vector.c2
-rw-r--r--arch/riscv/lib/clear_page.S32
-rw-r--r--arch/riscv/lib/memcpy.S6
-rw-r--r--arch/riscv/lib/memmove.S57
-rw-r--r--arch/riscv/lib/memset.S6
-rw-r--r--arch/riscv/lib/uaccess.S11
-rw-r--r--arch/riscv/mm/Makefile1
-rw-r--r--arch/riscv/mm/cache-ops.c17
-rw-r--r--arch/riscv/mm/cacheflush.c25
-rw-r--r--arch/riscv/mm/dma-noncoherent.c23
-rw-r--r--arch/riscv/mm/init.c14
-rw-r--r--arch/riscv/mm/pageattr.c270
-rw-r--r--arch/riscv/mm/pmem.c4
-rw-r--r--arch/riscv/mm/ptdump.c56
-rw-r--r--arch/riscv/mm/tlbflush.c181
-rw-r--r--arch/riscv/purgatory/Makefile8
-rw-r--r--arch/riscv/purgatory/entry.S16
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/boot/ipl_parm.c8
-rw-r--r--arch/s390/boot/startup.c46
-rw-r--r--arch/s390/boot/vmem.c34
-rw-r--r--arch/s390/include/asm/asm-extable.h27
-rw-r--r--arch/s390/include/asm/ctl_reg.h146
-rw-r--r--arch/s390/include/asm/ctlreg.h251
-rw-r--r--arch/s390/include/asm/fault.h28
-rw-r--r--arch/s390/include/asm/fpu/internal.h1
-rw-r--r--arch/s390/include/asm/irq.h23
-rw-r--r--arch/s390/include/asm/kprobes.h3
-rw-r--r--arch/s390/include/asm/lowcore.h15
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h11
-rw-r--r--arch/s390/include/asm/page-states.h59
-rw-r--r--arch/s390/include/asm/page.h1
-rw-r--r--arch/s390/include/asm/pci.h11
-rw-r--r--arch/s390/include/asm/pci_clp.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h121
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/pgtable.h3
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/stacktrace.h7
-rw-r--r--arch/s390/include/asm/tlb.h13
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/word-at-a-time.h64
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/crash_dump.c6
-rw-r--r--arch/s390/kernel/ctlreg.c121
-rw-r--r--arch/s390/kernel/diag.c3
-rw-r--r--arch/s390/kernel/early.c23
-rw-r--r--arch/s390/kernel/guarded_storage.c6
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/kprobes.c21
-rw-r--r--arch/s390/kernel/machine_kexec.c6
-rw-r--r--arch/s390/kernel/nmi.c24
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_event.c41
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c138
-rw-r--r--arch/s390/kernel/perf_pai_ext.c10
-rw-r--r--arch/s390/kernel/ptrace.c47
-rw-r--r--arch/s390/kernel/setup.c23
-rw-r--r--arch/s390/kernel/smp.c132
-rw-r--r--arch/s390/kernel/stacktrace.c43
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vdso32/Makefile10
-rw-r--r--arch/s390/kernel/vdso64/Makefile10
-rw-r--r--arch/s390/kvm/gaccess.c45
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/lib/uaccess.c15
-rw-r--r--arch/s390/mm/dump_pagetables.c2
-rw-r--r--arch/s390/mm/extable.c18
-rw-r--r--arch/s390/mm/fault.c458
-rw-r--r--arch/s390/mm/gmap.c26
-rw-r--r--arch/s390/mm/init.c8
-rw-r--r--arch/s390/mm/maccess.c2
-rw-r--r--arch/s390/mm/page-states.c200
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgalloc.c302
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c37
-rw-r--r--arch/s390/pci/pci_bus.c5
-rw-r--r--arch/s390/pci/pci_debug.c12
-rw-r--r--arch/s390/pci/pci_dma.c746
-rw-r--r--arch/s390/pci/pci_event.c17
-rw-r--r--arch/s390/pci/pci_sysfs.c19
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sparc/Makefile5
-rw-r--r--arch/sparc/kernel/setup_32.c13
-rw-r--r--arch/sparc/kernel/setup_64.c13
-rw-r--r--arch/sparc/vdso/Makefile27
-rw-r--r--arch/x86/Kconfig29
-rw-r--r--arch/x86/Makefile7
-rw-r--r--arch/x86/Makefile.postlink3
-rw-r--r--arch/x86/coco/tdx/tdx.c21
-rw-r--r--arch/x86/entry/vdso/Makefile27
-rw-r--r--arch/x86/include/asm/apic.h5
-rw-r--r--arch/x86/include/asm/cpu.h20
-rw-r--r--arch/x86/include/asm/microcode.h21
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/include/asm/shared/tdx.h1
-rw-r--r--arch/x86/include/asm/tdx.h2
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/ipi.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/cpu/common.c12
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c157
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c670
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c688
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h35
-rw-r--r--arch/x86/kernel/head32.c120
-rw-r--r--arch/x86/kernel/head_32.S10
-rw-r--r--arch/x86/kernel/nmi.c9
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c12
-rw-r--r--arch/x86/um/vdso/Makefile12
-rw-r--r--arch/xtensa/kernel/setup.c12
-rw-r--r--block/blk-core.c4
-rw-r--r--crypto/Kconfig28
-rw-r--r--crypto/ahash.c1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accessibility/speakup/kobjects.c25
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/riscv/rhct.c93
-rw-r--r--drivers/acpi/tables.c173
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/android/binderfs.c2
-rw-r--r--drivers/ata/libata-core.c16
-rw-r--r--drivers/ata/pata_falcon.c11
-rw-r--r--drivers/ata/pata_gayle.c11
-rw-r--r--drivers/base/class.c6
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/firmware_loader/fallback.c10
-rw-r--r--drivers/base/firmware_loader/fallback.h4
-rw-r--r--drivers/base/firmware_loader/firmware.h1
-rw-r--r--drivers/base/firmware_loader/main.c9
-rw-r--r--drivers/base/platform.c71
-rw-r--r--drivers/base/regmap/regmap.c16
-rw-r--r--drivers/base/test/Kconfig4
-rw-r--r--drivers/base/test/property-entry-test.c4
-rw-r--r--drivers/block/nbd.c11
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/cdx/Makefile2
-rw-r--r--drivers/cdx/cdx.c264
-rw-r--r--drivers/cdx/cdx.h22
-rw-r--r--drivers/cdx/controller/cdx_controller.c20
-rw-r--r--drivers/cdx/controller/mc_cdx_pcol.h54
-rw-r--r--drivers/cdx/controller/mcdi_functions.c31
-rw-r--r--drivers/cdx/controller/mcdi_functions.h18
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c138
-rw-r--r--drivers/clocksource/timer-riscv.c19
-rw-r--r--drivers/comedi/Kconfig45
-rw-r--r--drivers/comedi/drivers.c3
-rw-r--r--drivers/comedi/drivers/8255.c2
-rw-r--r--drivers/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/comedi/drivers/adl_pci9111.c8
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c8
-rw-r--r--drivers/comedi/drivers/adv_pci1710.c8
-rw-r--r--drivers/comedi/drivers/adv_pci_dio.c14
-rw-r--r--drivers/comedi/drivers/aio_aio12_8.c10
-rw-r--r--drivers/comedi/drivers/amplc_dio200_common.c104
-rw-r--r--drivers/comedi/drivers/amplc_dio200_pci.c12
-rw-r--r--drivers/comedi/drivers/amplc_pc236_common.c2
-rw-r--r--drivers/comedi/drivers/amplc_pci224.c8
-rw-r--r--drivers/comedi/drivers/amplc_pci230.c10
-rw-r--r--drivers/comedi/drivers/cb_das16_cs.c8
-rw-r--r--drivers/comedi/drivers/cb_pcidas.c23
-rw-r--r--drivers/comedi/drivers/cb_pcidas64.c7
-rw-r--r--drivers/comedi/drivers/cb_pcidda.c2
-rw-r--r--drivers/comedi/drivers/cb_pcimdas.c12
-rw-r--r--drivers/comedi/drivers/cb_pcimdda.c2
-rw-r--r--drivers/comedi/drivers/comedi_8254.c234
-rw-r--r--drivers/comedi/drivers/comedi_8255.c123
-rw-r--r--drivers/comedi/drivers/daqboard2000.c4
-rw-r--r--drivers/comedi/drivers/das08.c11
-rw-r--r--drivers/comedi/drivers/das16.c10
-rw-r--r--drivers/comedi/drivers/das16m1.c22
-rw-r--r--drivers/comedi/drivers/das1800.c8
-rw-r--r--drivers/comedi/drivers/das6402.c8
-rw-r--r--drivers/comedi/drivers/das800.c8
-rw-r--r--drivers/comedi/drivers/dmm32at.c3
-rw-r--r--drivers/comedi/drivers/me4000.c6
-rw-r--r--drivers/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/comedi/drivers/ni_at_ao.c8
-rw-r--r--drivers/comedi/drivers/ni_atmio16d.c2
-rw-r--r--drivers/comedi/drivers/ni_daq_dio24.c2
-rw-r--r--drivers/comedi/drivers/ni_labpc_common.c51
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c74
-rw-r--r--drivers/comedi/drivers/pcl711.c8
-rw-r--r--drivers/comedi/drivers/pcl724.c6
-rw-r--r--drivers/comedi/drivers/pcl812.c10
-rw-r--r--drivers/comedi/drivers/pcl816.c8
-rw-r--r--drivers/comedi/drivers/pcl818.c8
-rw-r--r--drivers/comedi/drivers/pcm3724.c2
-rw-r--r--drivers/comedi/drivers/rtd520.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c60
-rw-r--r--drivers/cxl/acpi.c3
-rw-r--r--drivers/cxl/core/core.h2
-rw-r--r--drivers/cxl/core/hdm.c93
-rw-r--r--drivers/cxl/core/mbox.c60
-rw-r--r--drivers/cxl/core/memdev.c161
-rw-r--r--drivers/cxl/core/pci.c275
-rw-r--r--drivers/cxl/core/port.c184
-rw-r--r--drivers/cxl/core/region.c266
-rw-r--r--drivers/cxl/core/regs.c73
-rw-r--r--drivers/cxl/cxl.h38
-rw-r--r--drivers/cxl/cxlmem.h17
-rw-r--r--drivers/cxl/mem.c7
-rw-r--r--drivers/cxl/pci.c107
-rw-r--r--drivers/cxl/port.c3
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/altera-msgdma.c6
-rw-r--r--drivers/dma/apple-admac.c8
-rw-r--r--drivers/dma/at_hdmac.c8
-rw-r--r--drivers/dma/at_xdmac.c6
-rw-r--r--drivers/dma/bcm-sba-raid.c6
-rw-r--r--drivers/dma/bcm2835-dma.c6
-rw-r--r--drivers/dma/bestcomm/bestcomm.c6
-rw-r--r--drivers/dma/dma-axi-dmac.c11
-rw-r--r--drivers/dma/dma-jz4780.c6
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c162
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h6
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c1
-rw-r--r--drivers/dma/fsl-edma-common.h2
-rw-r--r--drivers/dma/fsl-edma-main.c19
-rw-r--r--drivers/dma/fsl-qdma.c5
-rw-r--r--drivers/dma/fsl_raid.c6
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idma64.c6
-rw-r--r--drivers/dma/idxd/Makefile6
-rw-r--r--drivers/dma/idxd/cdev.c7
-rw-r--r--drivers/dma/idxd/dma.c6
-rw-r--r--drivers/dma/idxd/idxd.h9
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/sysfs.c34
-rw-r--r--drivers/dma/img-mdc-dma.c6
-rw-r--r--drivers/dma/imx-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/k3dma.c30
-rw-r--r--drivers/dma/mcf-edma-main.c6
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c6
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c6
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c6
-rw-r--r--drivers/dma/mmp_pdma.c10
-rw-r--r--drivers/dma/mmp_tdma.c35
-rw-r--r--drivers/dma/moxart-dma.c11
-rw-r--r--drivers/dma/mpc512x_dma.c6
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/dma/mv_xor_v2.c6
-rw-r--r--drivers/dma/nbpfaxi.c6
-rw-r--r--drivers/dma/owl-dma.c6
-rw-r--r--drivers/dma/ppc4xx/adma.c5
-rw-r--r--drivers/dma/pxa_dma.c26
-rw-r--r--drivers/dma/qcom/bam_dma.c8
-rw-r--r--drivers/dma/qcom/hidma.c8
-rw-r--r--drivers/dma/qcom/qcom_adm.c6
-rw-r--r--drivers/dma/sa11x0-dma.c12
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c6
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h2
-rw-r--r--drivers/dma/sh/rcar-dmac.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c6
-rw-r--r--drivers/dma/sh/shdmac.c6
-rw-r--r--drivers/dma/sh/usb-dmac.c8
-rw-r--r--drivers/dma/sprd-dma.c22
-rw-r--r--drivers/dma/st_fdma.c18
-rw-r--r--drivers/dma/st_fdma.h2
-rw-r--r--drivers/dma/stm32-dma.c19
-rw-r--r--drivers/dma/stm32-mdma.c13
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/sun6i-dma.c6
-rw-r--r--drivers/dma/tegra186-gpc-dma.c8
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/tegra210-adma.c8
-rw-r--r--drivers/dma/ti/cppi41.c5
-rw-r--r--drivers/dma/ti/edma.c12
-rw-r--r--drivers/dma/ti/omap-dma.c11
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/dma/txx9dmac.c10
-rw-r--r--drivers/dma/uniphier-xdmac.c8
-rw-r--r--drivers/dma/xgene-dma.c6
-rw-r--r--drivers/dma/xilinx/xdma-regs.h2
-rw-r--r--drivers/dma/xilinx/xdma.c187
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/extcon/Kconfig11
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-max77693.c7
-rw-r--r--drivers/extcon/extcon-max77843.c7
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c1794
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firmware/efi/efi-init.c14
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-entry.c8
-rw-r--r--drivers/firmware/xilinx/zynqmp.c2
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-ps-spi.c46
-rw-r--r--drivers/fpga/versal-fpga.c2
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c17
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/gpio-aspeed.c6
-rw-r--r--drivers/gpio/gpio-em.c9
-rw-r--r--drivers/gpio/gpio-ljca.c247
-rw-r--r--drivers/gpio/gpio-mvebu.c5
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-rockchip.c4
-rw-r--r--drivers/gpio/gpio-tegra.c13
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpiolib-cdev.c3
-rw-r--r--drivers/gpio/gpiolib.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h28
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h30
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c27
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h120
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c58
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c260
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c7
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c143
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h435
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h46
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h262
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c1671
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c664
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c359
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c2236
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c198
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c333
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios.h42
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig3
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Makefile3
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c82
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h19
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c650
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h27
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c179
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h18
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c77
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h9
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c326
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h5
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/greybus/Kconfig10
-rw-r--r--drivers/greybus/Makefile2
-rw-r--r--drivers/greybus/gb-beagleplay.c501
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c7
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h3
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h21
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c9
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h15
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c10
-rw-r--r--drivers/i2c/busses/i2c-axxia.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c133
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c23
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c19
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c80
-rw-r--r--drivers/i2c/busses/i2c-gpio.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c110
-rw-r--r--drivers/i2c/busses/i2c-ljca.c343
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c12
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c11
-rw-r--r--drivers/i2c/busses/i2c-omap.c8
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c7
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c180
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c32
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c89
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c5
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-base.c19
-rw-r--r--drivers/i2c/i2c-dev.c22
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c7
-rw-r--r--drivers/i3c/master.c24
-rw-r--r--drivers/i3c/master/dw-i3c-master.c2
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c8
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c14
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c29
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c21
-rw-r--r--drivers/i3c/master/svc-i3c-master.c60
-rw-r--r--drivers/iio/accel/Kconfig10
-rw-r--r--drivers/iio/accel/adxl345.h21
-rw-r--r--drivers/iio/accel/adxl345_core.c47
-rw-r--r--drivers/iio/accel/adxl345_i2c.c20
-rw-r--r--drivers/iio/accel/adxl345_spi.c20
-rw-r--r--drivers/iio/accel/bma180.c27
-rw-r--r--drivers/iio/accel/fxls8962af.h1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c24
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c19
-rw-r--r--drivers/iio/accel/kionix-kx022a.c351
-rw-r--r--drivers/iio/accel/kionix-kx022a.h114
-rw-r--r--drivers/iio/accel/mma8452.c28
-rw-r--r--drivers/iio/accel/msa311.c2
-rw-r--r--drivers/iio/accel/mxc4005.c8
-rw-r--r--drivers/iio/adc/Kconfig29
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c6
-rw-r--r--drivers/iio/adc/ad4130.c15
-rw-r--r--drivers/iio/adc/ad7192.c269
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c6
-rw-r--r--drivers/iio/adc/at91_adc.c114
-rw-r--r--drivers/iio/adc/axp20x_adc.c6
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c6
-rw-r--r--drivers/iio/adc/dln2-adc.c5
-rw-r--r--drivers/iio/adc/ep93xx_adc.c6
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c6
-rw-r--r--drivers/iio/adc/hx711.c6
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c6
-rw-r--r--drivers/iio/adc/imx93_adc.c6
-rw-r--r--drivers/iio/adc/ltc2309.c246
-rw-r--r--drivers/iio/adc/ltc2497.c5
-rw-r--r--drivers/iio/adc/max1363.c87
-rw-r--r--drivers/iio/adc/mcp3564.c1513
-rw-r--r--drivers/iio/adc/mcp3911.c562
-rw-r--r--drivers/iio/adc/meson_saradc.c22
-rw-r--r--drivers/iio/adc/mp2629_adc.c6
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c72
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c6
-rw-r--r--drivers/iio/adc/npcm_adc.c6
-rw-r--r--drivers/iio/adc/palmas_gpadc.c2
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c6
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c6
-rw-r--r--drivers/iio/adc/spear_adc.c64
-rw-r--r--drivers/iio/adc/stm32-adc-core.c12
-rw-r--r--drivers/iio/adc/stm32-adc.c8
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c6
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c6
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c8
-rw-r--r--drivers/iio/adc/ti-adc081c.c12
-rw-r--r--drivers/iio/adc/ti-ads1015.c7
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c6
-rw-r--r--drivers/iio/adc/twl4030-madc.c6
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c16
-rw-r--r--drivers/iio/adc/vf610_adc.c6
-rw-r--r--drivers/iio/addac/Kconfig1
-rw-r--r--drivers/iio/addac/ad74413r.c24
-rw-r--r--drivers/iio/addac/stx104.c61
-rw-r--r--drivers/iio/amplifiers/hmc425a.c17
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c6
-rw-r--r--drivers/iio/chemical/atlas-sensor.c32
-rw-r--r--drivers/iio/chemical/sgp30.c24
-rw-r--r--drivers/iio/chemical/vz89x.c16
-rw-r--r--drivers/iio/dac/dpot-dac.c6
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c6
-rw-r--r--drivers/iio/dac/mcp4725.c42
-rw-r--r--drivers/iio/dac/stm32-dac-core.c15
-rw-r--r--drivers/iio/dac/stm32-dac.c9
-rw-r--r--drivers/iio/dac/ti-dac5571.c48
-rw-r--r--drivers/iio/dac/vf610_dac.c6
-rw-r--r--drivers/iio/frequency/adf4350.c75
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c6
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c6
-rw-r--r--drivers/iio/humidity/si7005.c7
-rw-r--r--drivers/iio/iio_core.h3
-rw-r--r--drivers/iio/imu/adis16475.c238
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c13
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/industrialio-event.c55
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c136
-rw-r--r--drivers/iio/light/hid-sensor-prox.c6
-rw-r--r--drivers/iio/light/lm3533-als.c6
-rw-r--r--drivers/iio/light/opt4001.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c97
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c6
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c4
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c6
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c6
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c6
-rw-r--r--drivers/iio/potentiometer/ad5110.c21
-rw-r--r--drivers/iio/potentiometer/ds1803.c13
-rw-r--r--drivers/iio/pressure/Kconfig9
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c6
-rw-r--r--drivers/iio/pressure/ms5637.c6
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c934
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c6
-rw-r--r--drivers/iio/proximity/srf04.c6
-rw-r--r--drivers/iio/proximity/sx9310.c46
-rw-r--r--drivers/iio/resolver/Kconfig13
-rw-r--r--drivers/iio/resolver/Makefile1
-rw-r--r--drivers/iio/resolver/ad2s1210.c1519
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c6
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp117.c94
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c6
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c6
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/input-leds.c2
-rw-r--r--drivers/input/joystick/walkera0701.c13
-rw-r--r--drivers/input/keyboard/adp5520-keys.c6
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c16
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c6
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c6
-rw-r--r--drivers/input/keyboard/matrix_keypad.c6
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/omap4-keypad.c6
-rw-r--r--drivers/input/keyboard/samsung-keypad.c6
-rw-r--r--drivers/input/keyboard/sh_keysc.c6
-rw-r--r--drivers/input/keyboard/spear-keyboard.c6
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c6
-rw-r--r--drivers/input/keyboard/tegra-kbc.c7
-rw-r--r--drivers/input/misc/88pm80x_onkey.c5
-rw-r--r--drivers/input/misc/axp20x-pek.c11
-rw-r--r--drivers/input/misc/da9052_onkey.c6
-rw-r--r--drivers/input/misc/da9055_onkey.c6
-rw-r--r--drivers/input/misc/ideapad_slidebar.c6
-rw-r--r--drivers/input/misc/iqs269a.c10
-rw-r--r--drivers/input/misc/kxtj9.c29
-rw-r--r--drivers/input/misc/m68kspkr.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c6
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c6
-rw-r--r--drivers/input/misc/pcap_keys.c6
-rw-r--r--drivers/input/misc/pcf50633-input.c6
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c6
-rw-r--r--drivers/input/misc/soc_button_array.c6
-rw-r--r--drivers/input/misc/sparcspkr.c12
-rw-r--r--drivers/input/misc/wistron_btns.c6
-rw-r--r--drivers/input/misc/wm831x-on.c6
-rw-r--r--drivers/input/mouse/cyapa.c14
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/rmi4/rmi_bus.c2
-rw-r--r--drivers/input/rmi4/rmi_f34.c2
-rw-r--r--drivers/input/serio/altera_ps2.c6
-rw-r--r--drivers/input/serio/ams_delta_serio.c6
-rw-r--r--drivers/input/serio/apbps2.c6
-rw-r--r--drivers/input/serio/arc_ps2.c6
-rw-r--r--drivers/input/serio/ct82c710.c6
-rw-r--r--drivers/input/serio/i8042-sparcio.h6
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/ioc3kbd.c6
-rw-r--r--drivers/input/serio/maceps2.c6
-rw-r--r--drivers/input/serio/olpc_apsp.c6
-rw-r--r--drivers/input/serio/ps2-gpio.c5
-rw-r--r--drivers/input/serio/q40kbd.c6
-rw-r--r--drivers/input/serio/rpckbd.c6
-rw-r--r--drivers/input/serio/sun4i-ps2.c6
-rw-r--r--drivers/input/serio/xilinx_ps2.c6
-rw-r--r--drivers/input/touchscreen/ad7877.c12
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c7
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c7
-rw-r--r--drivers/input/touchscreen/ad7879.c10
-rw-r--r--drivers/input/touchscreen/ad7879.h3
-rw-r--r--drivers/input/touchscreen/ads7846.c18
-rw-r--r--drivers/input/touchscreen/cyttsp5.c19
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c6
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c10
-rw-r--r--drivers/input/touchscreen/elants_i2c.c15
-rw-r--r--drivers/input/touchscreen/exc3000.c12
-rw-r--r--drivers/input/touchscreen/hideep.c15
-rw-r--r--drivers/input/touchscreen/hycon-hy46xx.c10
-rw-r--r--drivers/input/touchscreen/ili210x.c15
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c12
-rw-r--r--drivers/input/touchscreen/iqs5xx.c10
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c6
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c6
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c13
-rw-r--r--drivers/input/touchscreen/pcap_ts.c6
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c16
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c12
-rw-r--r--drivers/input/touchscreen/s6sy761.c10
-rw-r--r--drivers/input/touchscreen/stmfts.c10
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c6
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c18
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h1
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c16
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c6
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c6
-rw-r--r--drivers/interconnect/imx/imx.h9
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/icc-rpm-clocks.c6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c90
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h15
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c4
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h2
-rw-r--r--drivers/interconnect/qcom/msm8974.c4
-rw-r--r--drivers/interconnect/qcom/msm8996.c8
-rw-r--r--drivers/interconnect/qcom/osm-l3.c3
-rw-r--r--drivers/interconnect/qcom/qcm2290.c9
-rw-r--r--drivers/interconnect/qcom/qdu1000.c3
-rw-r--r--drivers/interconnect/qcom/sa8775p.c2
-rw-r--r--drivers/interconnect/qcom/sc7180.c3
-rw-r--r--drivers/interconnect/qcom/sc7280.c3
-rw-r--r--drivers/interconnect/qcom/sc8180x.c3
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c3
-rw-r--r--drivers/interconnect/qcom/sdm660.c4
-rw-r--r--drivers/interconnect/qcom/sdm670.c3
-rw-r--r--drivers/interconnect/qcom/sdm845.c3
-rw-r--r--drivers/interconnect/qcom/sdx55.c2
-rw-r--r--drivers/interconnect/qcom/sdx65.c2
-rw-r--r--drivers/interconnect/qcom/sdx75.c1107
-rw-r--r--drivers/interconnect/qcom/sdx75.h97
-rw-r--r--drivers/interconnect/qcom/sm6350.c3
-rw-r--r--drivers/interconnect/qcom/sm8150.c3
-rw-r--r--drivers/interconnect/qcom/sm8250.c3
-rw-r--r--drivers/interconnect/qcom/sm8350.c3
-rw-r--r--drivers/interconnect/qcom/sm8450.c2
-rw-r--r--drivers/interconnect/qcom/sm8550.c2
-rw-r--r--drivers/iommu/Kconfig15
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/Kconfig9
-rw-r--r--drivers/iommu/amd/Makefile1
-rw-r--r--drivers/iommu/amd/amd_iommu.h35
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h52
-rw-r--r--drivers/iommu/amd/init.c117
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c8
-rw-r--r--drivers/iommu/amd/iommu.c577
-rw-r--r--drivers/iommu/amd/iommu_v2.c996
-rw-r--r--drivers/iommu/apple-dart.c138
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c71
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c251
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h17
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c45
-rw-r--r--drivers/iommu/dma-iommu.c200
-rw-r--r--drivers/iommu/exynos-iommu.c83
-rw-r--r--drivers/iommu/fsl_pamu_domain.c41
-rw-r--r--drivers/iommu/intel/debugfs.c215
-rw-r--r--drivers/iommu/intel/iommu.c19
-rw-r--r--drivers/iommu/intel/iommu.h14
-rw-r--r--drivers/iommu/iommu.c455
-rw-r--r--drivers/iommu/iommufd/selftest.c31
-rw-r--r--drivers/iommu/iova.c95
-rw-r--r--drivers/iommu/ipmmu-vmsa.c72
-rw-r--r--drivers/iommu/msm_iommu.c35
-rw-r--r--drivers/iommu/mtk_iommu.c35
-rw-r--r--drivers/iommu/mtk_iommu_v1.c28
-rw-r--r--drivers/iommu/omap-iommu.c69
-rw-r--r--drivers/iommu/omap-iommu.h2
-rw-r--r--drivers/iommu/rockchip-iommu.c59
-rw-r--r--drivers/iommu/s390-iommu.c424
-rw-r--r--drivers/iommu/sprd-iommu.c36
-rw-r--r--drivers/iommu/sun50i-iommu.c80
-rw-r--r--drivers/iommu/tegra-gart.c371
-rw-r--r--drivers/iommu/tegra-smmu.c58
-rw-r--r--drivers/iommu/virtio-iommu.c4
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c1
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c10
-rw-r--r--drivers/mailbox/imx-mailbox.c32
-rw-r--r--drivers/mailbox/mailbox-sti.c8
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c80
-rw-r--r--drivers/mailbox/ti-msgmgr.c12
-rw-r--r--drivers/mcb/mcb-core.c7
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/media/cec/platform/Makefile2
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c387
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c22
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c13
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c10
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c6
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c3
-rw-r--r--drivers/media/i2c/Kconfig11
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adp1653.c28
-rw-r--r--drivers/media/i2c/adv7180.c65
-rw-r--r--drivers/media/i2c/ar0521.c30
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c431
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.h4
-rw-r--r--drivers/media/i2c/ccs/ccs.h4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c3694
-rw-r--r--drivers/media/i2c/ds90ub913.c2
-rw-r--r--drivers/media/i2c/ds90ub953.c2
-rw-r--r--drivers/media/i2c/ds90ub960.c2
-rw-r--r--drivers/media/i2c/hi556.c114
-rw-r--r--drivers/media/i2c/hi846.c34
-rw-r--r--drivers/media/i2c/hi847.c57
-rw-r--r--drivers/media/i2c/imx208.c61
-rw-r--r--drivers/media/i2c/imx214.c40
-rw-r--r--drivers/media/i2c/imx219.c1279
-rw-r--r--drivers/media/i2c/imx258.c40
-rw-r--r--drivers/media/i2c/imx296.c16
-rw-r--r--drivers/media/i2c/imx319.c44
-rw-r--r--drivers/media/i2c/imx334.c66
-rw-r--r--drivers/media/i2c/imx335.c9
-rw-r--r--drivers/media/i2c/imx355.c45
-rw-r--r--drivers/media/i2c/imx412.c9
-rw-r--r--drivers/media/i2c/imx415.c45
-rw-r--r--drivers/media/i2c/max9286.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/mt9m001.c6
-rw-r--r--drivers/media/i2c/mt9m111.c13
-rw-r--r--drivers/media/i2c/mt9m114.c2481
-rw-r--r--drivers/media/i2c/mt9v011.c6
-rw-r--r--drivers/media/i2c/mt9v032.c26
-rw-r--r--drivers/media/i2c/mt9v111.c14
-rw-r--r--drivers/media/i2c/og01a1b.c55
-rw-r--r--drivers/media/i2c/ov01a10.c59
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/media/i2c/ov08d10.c52
-rw-r--r--drivers/media/i2c/ov08x40.c44
-rw-r--r--drivers/media/i2c/ov13858.c44
-rw-r--r--drivers/media/i2c/ov13b10.c98
-rw-r--r--drivers/media/i2c/ov2640.c21
-rw-r--r--drivers/media/i2c/ov2659.c16
-rw-r--r--drivers/media/i2c/ov2685.c19
-rw-r--r--drivers/media/i2c/ov2740.c168
-rw-r--r--drivers/media/i2c/ov4689.c9
-rw-r--r--drivers/media/i2c/ov5640.c24
-rw-r--r--drivers/media/i2c/ov5647.c6
-rw-r--r--drivers/media/i2c/ov5670.c36
-rw-r--r--drivers/media/i2c/ov5675.c44
-rw-r--r--drivers/media/i2c/ov5693.c9
-rw-r--r--drivers/media/i2c/ov5695.c25
-rw-r--r--drivers/media/i2c/ov7251.c8
-rw-r--r--drivers/media/i2c/ov7670.c72
-rw-r--r--drivers/media/i2c/ov772x.c4
-rw-r--r--drivers/media/i2c/ov7740.c30
-rw-r--r--drivers/media/i2c/ov8856.c47
-rw-r--r--drivers/media/i2c/ov9282.c9
-rw-r--r--drivers/media/i2c/ov9734.c50
-rw-r--r--drivers/media/i2c/rdacm20.c3
-rw-r--r--drivers/media/i2c/st-vgxy61.c9
-rw-r--r--drivers/media/i2c/tc358746.c6
-rw-r--r--drivers/media/i2c/tvp514x.c49
-rw-r--r--drivers/media/i2c/video-i2c.c12
-rw-r--r--drivers/media/mc/mc-entity.c15
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile1
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c14
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c11
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c8
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig12
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c9
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c5
-rw-r--r--drivers/media/pci/mgb4/Kconfig17
-rw-r--r--drivers/media/pci/mgb4/Makefile6
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c244
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.h17
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c686
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h74
-rw-r--r--drivers/media/pci/mgb4/mgb4_dma.c123
-rw-r--r--drivers/media/pci/mgb4/mgb4_dma.h18
-rw-r--r--drivers/media/pci/mgb4/mgb4_i2c.c140
-rw-r--r--drivers/media/pci/mgb4/mgb4_i2c.h35
-rw-r--r--drivers/media/pci/mgb4/mgb4_io.h33
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c30
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.h35
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs.h18
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c772
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_out.c740
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_pci.c71
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c208
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.h8
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c939
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h69
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c602
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h65
-rw-r--r--drivers/media/pci/zoran/zoran.h2
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c2
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h2
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h1
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c1
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c1
-rw-r--r--drivers/media/platform/amphion/vpu_msgs.c31
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c15
-rw-r--r--drivers/media/platform/cadence/Kconfig2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c180
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c6
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c5
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c1
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c56
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c21
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c18
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h3
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c5
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c6
-rw-r--r--drivers/media/platform/nuvoton/Kconfig15
-rw-r--r--drivers/media/platform/nuvoton/Makefile2
-rw-r--r--drivers/media/platform/nuvoton/npcm-regs.h152
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c1831
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h11
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c36
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h1
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-1.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-7.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c33
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c39
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c10
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c95
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h4
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c52
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.h4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c73
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c61
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c228
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h15
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c81
-rw-r--r--drivers/media/platform/qcom/camss/camss.c482
-rw-r--r--drivers/media/platform/qcom/camss/camss.h34
-rw-r--r--drivers/media/platform/qcom/venus/core.c10
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c15
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c10
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c2
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c3
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c24
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h18
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c107
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c261
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h620
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c251
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c33
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c26
-rw-r--r--drivers/media/platform/ti/Kconfig12
-rw-r--r--drivers/media/platform/ti/Makefile1
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/Makefile2
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c1159
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c8
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c5
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-miropcm20.c4
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/rc/Kconfig10
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/imon.c6
-rw-r--r--drivers/media/rc/ir-rx51.c285
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c8
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c1
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c1
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c1
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c1
-rw-r--r--drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c1
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c1
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c1
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c1
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c1
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c1
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c1
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-gs1.c1
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-mxiii.c1
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c1
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c1
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c1
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c1
-rw-r--r--drivers/media/rc/keymaps/rc-ct-90405.c1
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c1
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c1
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dreambox.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c1
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c1
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c1
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c1
-rw-r--r--drivers/media/rc/keymaps/rc-geekbox.c1
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c1
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c1
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-poplar.c1
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-tv-demo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-imon-pad.c1
-rw-r--r--drivers/media/rc/keymaps/rc-imon-rsc.c1
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c1
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v1.c1
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c1
-rw-r--r--drivers/media/rc/keymaps/rc-khadas.c1
-rw-r--r--drivers/media/rc/keymaps/rc-khamsin.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c1
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c1
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c1
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c1
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c1
-rw-r--r--drivers/media/rc/keymaps/rc-mecool-kii-pro.c1
-rw-r--r--drivers/media/rc/keymaps/rc-mecool-kiii-pro.c1
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c1
-rw-r--r--drivers/media/rc/keymaps/rc-minix-neo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c1
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c1
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c1
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c1
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c1
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c1
-rw-r--r--drivers/media/rc/keymaps/rc-odroid.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pine64.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c1
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c1
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c1
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c1
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c1
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c1
-rw-r--r--drivers/media/rc/keymaps/rc-reddo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c1
-rw-r--r--drivers/media/rc/keymaps/rc-streamzap.c1
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tanix-tx3mini.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tanix-tx5max.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-ts35.c1
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c1
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c1
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c1
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c1
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c1
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c1
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c1
-rw-r--r--drivers/media/rc/keymaps/rc-vega-s9x.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c1
-rw-r--r--drivers/media/rc/keymaps/rc-videostrong-kii-pro.c1
-rw-r--r--drivers/media/rc/keymaps/rc-wetek-hub.c1
-rw-r--r--drivers/media/rc/keymaps/rc-wetek-play2.c1
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c1
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c1
-rw-r--r--drivers/media/rc/keymaps/rc-x96max.c1
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-360.c1
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-dvd.c1
-rw-r--r--drivers/media/rc/keymaps/rc-zx-irdec.c1
-rw-r--r--drivers/media/rc/lirc_dev.c6
-rw-r--r--drivers/media/rc/meson-ir.c522
-rw-r--r--drivers/media/rc/pwm-ir-tx.c1
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_mux.c7
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c45
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-rds-gen.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c7
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c13
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/gspca/cpia1.c3
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/v4l2-core/Kconfig16
-rw-r--r--drivers/media/v4l2-core/Makefile5
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c55
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c1198
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c402
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c681
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c326
-rw-r--r--drivers/memory/tegra/mc.c34
-rw-r--r--drivers/memory/tegra/tegra20.c28
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c2
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.h2
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cxl/file.c21
-rw-r--r--drivers/misc/eeprom/Kconfig14
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c42
-rw-r--r--drivers/misc/eeprom/eeprom.c214
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c11
-rw-r--r--drivers/misc/ibmvmc.c4
-rw-r--r--drivers/misc/lkdtm/cfi.c13
-rw-r--r--drivers/misc/mei/bus.c92
-rw-r--r--drivers/misc/mei/client.c20
-rw-r--r--drivers/misc/mei/dma-ring.c6
-rw-r--r--drivers/misc/mei/hbm.c6
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c77
-rw-r--r--drivers/misc/mei/hw-me.h4
-rw-r--r--drivers/misc/mei/hw.h12
-rw-r--r--drivers/misc/mei/init.c27
-rw-r--r--drivers/misc/mei/interrupt.c6
-rw-r--r--drivers/misc/mei/main.c8
-rw-r--r--drivers/misc/mei/mei_dev.h51
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c88
-rw-r--r--drivers/misc/ocxl/file.c27
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/phantom.c24
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c58
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c58
-rw-r--r--drivers/misc/pvpanic/pvpanic.c76
-rw-r--r--drivers/misc/pvpanic/pvpanic.h10
-rw-r--r--drivers/misc/ti-st/st_core.c7
-rw-r--r--drivers/misc/ti-st/st_kim.c5
-rw-r--r--drivers/misc/uacce/uacce.c17
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c20
-rw-r--r--drivers/mtd/chips/map_ram.c8
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c6
-rw-r--r--drivers/mtd/devices/docg3.c5
-rw-r--r--drivers/mtd/devices/phram.c6
-rw-r--r--drivers/mtd/devices/powernv_flash.c6
-rw-r--r--drivers/mtd/devices/spear_smi.c6
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c6
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c6
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c6
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c6
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c2
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/physmap-core.c15
-rw-r--r--drivers/mtd/maps/plat-ram.c8
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c5
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/maps/sun_uflash.c6
-rw-r--r--drivers/mtd/mtdcore.c5
-rw-r--r--drivers/mtd/mtdpart.c6
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c14
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c10
-rw-r--r--drivers/mtd/nand/raw/internals.h1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c2
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c5
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c4
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c10
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c1
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/nand/spi/foresee.c95
-rw-r--r--drivers/mtd/nand/spi/winbond.c45
-rw-r--r--drivers/mtd/nand/spi/xtx.c134
-rw-r--r--drivers/mtd/parsers/Kconfig5
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ar7part.c129
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/atmel.c121
-rw-r--r--drivers/mtd/spi-nor/catalyst.c24
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c5
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c6
-rw-r--r--drivers/mtd/spi-nor/core.c63
-rw-r--r--drivers/mtd/spi-nor/core.h138
-rw-r--r--drivers/mtd/spi-nor/eon.c74
-rw-r--r--drivers/mtd/spi-nor/esmt.c29
-rw-r--r--drivers/mtd/spi-nor/everspin.c28
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c21
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c82
-rw-r--r--drivers/mtd/spi-nor/intel.c23
-rw-r--r--drivers/mtd/spi-nor/issi.c118
-rw-r--r--drivers/mtd/spi-nor/macronix.c220
-rw-r--r--drivers/mtd/spi-nor/micron-st.c423
-rw-r--r--drivers/mtd/spi-nor/spansion.c392
-rw-r--r--drivers/mtd/spi-nor/sst.c164
-rw-r--r--drivers/mtd/spi-nor/swp.c13
-rw-r--r--drivers/mtd/spi-nor/sysfs.c6
-rw-r--r--drivers/mtd/spi-nor/winbond.c288
-rw-r--r--drivers/mtd/spi-nor/xilinx.c32
-rw-r--r--drivers/mtd/spi-nor/xmc.c19
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/build.c25
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c112
-rw-r--r--drivers/mtd/ubi/fastmap.c66
-rw-r--r--drivers/mtd/ubi/ubi.h10
-rw-r--r--drivers/mtd/ubi/wl.c48
-rw-r--r--drivers/mtd/ubi/wl.h6
-rw-r--r--drivers/net/dsa/lan9303_mdio.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c56
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c114
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c42
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c14
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/mdio/acpi_mdio.c1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c1
-rw-r--r--drivers/net/mdio/mdio-aspeed.c1
-rw-r--r--drivers/net/mdio/mdio-bitbang.c1
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c1
-rw-r--r--drivers/net/phy/bcm87xx.c1
-rw-r--r--drivers/net/phy/phylink.c2
-rw-r--r--drivers/net/phy/sfp.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/nvme/Makefile2
-rw-r--r--drivers/nvme/common/Kconfig7
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c23
-rw-r--r--drivers/nvme/common/keyring.c11
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/auth.c13
-rw-r--r--drivers/nvme/host/core.c30
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/ioctl.c7
-rw-r--r--drivers/nvme/host/tcp.c9
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c2
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvmem/apple-efuses.c1
-rw-r--r--drivers/nvmem/core.c10
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c1
-rw-r--r--drivers/nvmem/imx-ocotp.c1
-rw-r--r--drivers/nvmem/meson-efuse.c1
-rw-r--r--drivers/nvmem/meson-mx-efuse.c1
-rw-r--r--drivers/nvmem/microchip-otpc.c1
-rw-r--r--drivers/nvmem/mtk-efuse.c1
-rw-r--r--drivers/nvmem/mxs-ocotp.c10
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/qfprom.c7
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c1
-rw-r--r--drivers/nvmem/rockchip-efuse.c1
-rw-r--r--drivers/nvmem/sc27xx-efuse.c1
-rw-r--r--drivers/nvmem/sec-qfprom.c1
-rw-r--r--drivers/nvmem/sprd-efuse.c1
-rw-r--r--drivers/nvmem/stm32-romem.c8
-rw-r--r--drivers/nvmem/sunplus-ocotp.c1
-rw-r--r--drivers/nvmem/sunxi_sid.c1
-rw-r--r--drivers/nvmem/uniphier-efuse.c1
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c1
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/parport/procfs.c53
-rw-r--r--drivers/parport/share.c26
-rw-r--r--drivers/pci/pci-sysfs.c26
-rw-r--r--drivers/pci/pcie/Kconfig9
-rw-r--r--drivers/pci/pcie/aer.c159
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/ds.c14
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c13
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c9
-rw-r--r--drivers/phy/marvell/phy-berlin-usb.c7
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c146
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c59
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c50
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c37
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v4.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h20
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c142
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c225
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h8
-rw-r--r--drivers/phy/ralink/phy-ralink-usb.c9
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c2
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c2
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c16
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c10
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c11
-rw-r--r--drivers/phy/ti/phy-omap-control.c9
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c11
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c14
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c19
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c19
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c8
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c8
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c8
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c8
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c9
-rw-r--r--drivers/pinctrl/berlin/berlin.c8
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c9
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c9
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c6
-rw-r--r--drivers/pinctrl/core.c139
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c161
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c198
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c38
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c8
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-t7.c1611
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c9
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-39x.c9
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-ap806.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-cp110.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c9
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c14
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c10
-rw-r--r--drivers/pinctrl/nuvoton/Kconfig17
-rw-r--r--drivers/pinctrl/nuvoton/Makefile1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c29
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c2486
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c12
-rw-r--r--drivers/pinctrl/pinconf-generic.c16
-rw-r--r--drivers/pinctrl/pinconf.c14
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c6
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c14
-rw-r--r--drivers/pinctrl/pinctrl-at91.c11
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c7
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c8
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c19
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.h8
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c93
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c97
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c10
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c10
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c6
-rw-r--r--drivers/pinctrl/pinctrl-single.c21
-rw-r--r--drivers/pinctrl/pinctrl-st.c13
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c16
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c7
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c6
-rw-r--r--drivers/pinctrl/pinctrl-utils.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c11
-rw-r--r--drivers/pinctrl/pinmux.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c22
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c19
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c6
-rw-r--r--drivers/pinctrl/realtek/Kconfig23
-rw-r--r--drivers/pinctrl/realtek/Makefile6
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c596
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.h124
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd1315e.c1440
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd1319d.c1610
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd1619b.c1601
-rw-r--r--drivers/pinctrl/renesas/gpio.c8
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c38
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7779.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c719
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c56
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c3
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.h2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c14
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c14
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c20
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c380
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c8
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c29
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c9
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c28
-rw-r--r--drivers/platform/x86/intel/ifs/load.c8
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/ptp/ptp_chardev.c23
-rw-r--r--drivers/ptp/ptp_clock.c8
-rw-r--r--drivers/ptp/ptp_private.h1
-rw-r--r--drivers/pwm/Kconfig20
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c55
-rw-r--r--drivers/pwm/pwm-ab8500.c1
-rw-r--r--drivers/pwm/pwm-apple.c1
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c1
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c1
-rw-r--r--drivers/pwm/pwm-atmel.c3
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c38
-rw-r--r--drivers/pwm/pwm-bcm-kona.c1
-rw-r--r--drivers/pwm/pwm-bcm2835.c36
-rw-r--r--drivers/pwm/pwm-berlin.c63
-rw-r--r--drivers/pwm/pwm-brcmstb.c49
-rw-r--r--drivers/pwm/pwm-clk.c1
-rw-r--r--drivers/pwm/pwm-clps711x.c1
-rw-r--r--drivers/pwm/pwm-crc.c5
-rw-r--r--drivers/pwm/pwm-cros-ec.c67
-rw-r--r--drivers/pwm/pwm-dwc-core.c184
-rw-r--r--drivers/pwm/pwm-dwc.c197
-rw-r--r--drivers/pwm/pwm-dwc.h60
-rw-r--r--drivers/pwm/pwm-ep93xx.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c1
-rw-r--r--drivers/pwm/pwm-hibvt.c1
-rw-r--r--drivers/pwm/pwm-img.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c30
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-imx27.c1
-rw-r--r--drivers/pwm/pwm-intel-lgm.c1
-rw-r--r--drivers/pwm/pwm-iqs620a.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c46
-rw-r--r--drivers/pwm/pwm-keembay.c1
-rw-r--r--drivers/pwm/pwm-lp3943.c22
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c1
-rw-r--r--drivers/pwm/pwm-lpc32xx.c1
-rw-r--r--drivers/pwm/pwm-lpss.c1
-rw-r--r--drivers/pwm/pwm-mediatek.c1
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/pwm-microchip-core.c1
-rw-r--r--drivers/pwm/pwm-mtk-disp.c25
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-ntxec.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c3
-rw-r--r--drivers/pwm/pwm-pca9685.c1
-rw-r--r--drivers/pwm/pwm-pxa.c3
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c1
-rw-r--r--drivers/pwm/pwm-rcar.c1
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c23
-rw-r--r--drivers/pwm/pwm-rockchip.c1
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c1
-rw-r--r--drivers/pwm/pwm-samsung.c152
-rw-r--r--drivers/pwm/pwm-sifive.c1
-rw-r--r--drivers/pwm/pwm-sl28cpld.c1
-rw-r--r--drivers/pwm/pwm-spear.c41
-rw-r--r--drivers/pwm/pwm-sprd.c29
-rw-r--r--drivers/pwm/pwm-sti.c30
-rw-r--r--drivers/pwm/pwm-stm32-lp.c1
-rw-r--r--drivers/pwm/pwm-stm32.c1
-rw-r--r--drivers/pwm/pwm-stmpe.c1
-rw-r--r--drivers/pwm/pwm-sun4i.c1
-rw-r--r--drivers/pwm/pwm-sunplus.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c1
-rw-r--r--drivers/pwm/pwm-twl-led.c2
-rw-r--r--drivers/pwm/pwm-twl.c2
-rw-r--r--drivers/pwm/pwm-visconti.c1
-rw-r--r--drivers/pwm/pwm-vt8500.c43
-rw-r--r--drivers/pwm/pwm-xilinx.c1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c16
-rw-r--r--drivers/rapidio/rio_cm.c18
-rw-r--r--drivers/remoteproc/mtk_common.h39
-rw-r--r--drivers/remoteproc/mtk_scp.c536
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c19
-rw-r--r--drivers/remoteproc/st_remoteproc.c16
-rw-r--r--drivers/remoteproc/stm32_rproc.c4
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c110
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_ns.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c6
-rw-r--r--drivers/rtc/Kconfig13
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/nvmem.c1
-rw-r--r--drivers/rtc/rtc-at91rm9200.c14
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c47
-rw-r--r--drivers/rtc/rtc-efi.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c8
-rw-r--r--drivers/rtc/rtc-imxdi.c14
-rw-r--r--drivers/rtc/rtc-mv.c14
-rw-r--r--drivers/rtc/rtc-omap.c8
-rw-r--r--drivers/rtc/rtc-pcap.c6
-rw-r--r--drivers/rtc/rtc-pcf85363.c2
-rw-r--r--drivers/rtc/rtc-pxa.c13
-rw-r--r--drivers/rtc/rtc-r7301.c35
-rw-r--r--drivers/rtc/rtc-sh.c16
-rw-r--r--drivers/rtc/rtc-ssd202d.c249
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/sclp_cmd.c3
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c7
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/s390/cio/chsc.c8
-rw-r--r--drivers/s390/cio/crw.c4
-rw-r--r--drivers/s390/cio/isc.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c68
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c94
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_card.c4
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h18
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c5
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/soc/fsl/qe/qe_common.c4
-rw-r--r--drivers/soundwire/bus.c12
-rw-r--r--drivers/soundwire/dmi-quirks.c2
-rw-r--r--drivers/soundwire/qcom.c18
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ljca.c297
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/fbtft/fbtft.h5
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c17
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c1
-rw-r--r--drivers/staging/greybus/audio_manager_private.h7
-rw-r--r--drivers/staging/greybus/authentication.c19
-rw-r--r--drivers/staging/greybus/bootrom.c2
-rw-r--r--drivers/staging/greybus/camera.c13
-rw-r--r--drivers/staging/greybus/fw-management.c19
-rw-r--r--drivers/staging/greybus/pwm.c1
-rw-r--r--drivers/staging/greybus/raw.c23
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c15
-rw-r--r--drivers/staging/iio/frequency/ad9834.c21
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c22
-rw-r--r--drivers/staging/iio/resolver/Kconfig18
-rw-r--r--drivers/staging/iio/resolver/Makefile6
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c716
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c4
-rw-r--r--drivers/staging/media/atomisp/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/Makefile16
-rw-r--r--drivers/staging/media/atomisp/TODO3
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c26
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h63
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c1763
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h1331
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c19
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h3
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h7
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c266
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h116
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c446
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c19
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c60
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h31
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c47
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h13
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h13
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h16
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h16
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_local.h7
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_private.h7
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c21
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h198
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h88
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h112
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c15
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h85
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c1495
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c31
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c229
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c8
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c238
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h41
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c29
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c41
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c62
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h4
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c4
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c44
-rw-r--r--drivers/staging/media/ipu3/ipu3.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c24
-rw-r--r--drivers/staging/media/tegra-video/csi.c4
-rw-r--r--drivers/staging/media/tegra-video/vip.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c21
-rw-r--r--drivers/staging/qlge/Kconfig11
-rw-r--r--drivers/staging/qlge/Makefile8
-rw-r--r--drivers/staging/qlge/TODO33
-rw-r--r--drivers/staging/qlge/qlge.h2293
-rw-r--r--drivers/staging/qlge/qlge_dbg.c1311
-rw-r--r--drivers/staging/qlge/qlge_devlink.c139
-rw-r--r--drivers/staging/qlge/qlge_devlink.h9
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c746
-rw-r--r--drivers/staging/qlge/qlge_main.c4845
-rw-r--r--drivers/staging/qlge/qlge_mpi.c1273
-rw-r--r--drivers/staging/rtl8192e/dot11d.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c173
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h104
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h62
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c120
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c358
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c309
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h27
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c273
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c292
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c115
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h130
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h8
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c256
-rw-r--r--drivers/staging/rtl8192e/rtllib.h308
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c40
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h49
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c455
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c489
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c51
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c513
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/Kconfig12
-rw-r--r--drivers/staging/rtl8192u/Makefile27
-rw-r--r--drivers/staging/rtl8192u/TODO16
-rw-r--r--drivers/staging/rtl8192u/authors1
-rw-r--r--drivers/staging/rtl8192u/changes4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c174
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h57
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2385
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c235
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h86
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c421
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c718
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c247
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c287
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2430
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c3056
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c598
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c839
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c810
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c700
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h302
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c1295
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h82
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h102
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c534
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c170
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h25
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c294
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h24
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1129
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c4800
-rw-r--r--drivers/staging/rtl8192u/r8192U_debugfs.c188
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c2821
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h176
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h246
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c943
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h24
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c508
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h190
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c340
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h19
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.c549
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.h26
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c1646
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h81
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h143
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c6
-rw-r--r--drivers/staging/rts5208/rtsx.c24
-rw-r--r--drivers/staging/rts5208/rtsx.h66
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c94
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.h2
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/vc04_services/Makefile1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c32
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h1
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c31
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c227
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c99
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h57
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c407
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h67
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c103
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c66
-rw-r--r--drivers/staging/vme_user/vme.c256
-rw-r--r--drivers/staging/vme_user/vme.h6
-rw-r--r--drivers/staging/vme_user/vme_fake.c77
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c9
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h704
-rw-r--r--drivers/staging/vme_user/vme_user.c26
-rw-r--r--drivers/staging/vt6655/baseband.c86
-rw-r--r--drivers/staging/vt6655/card.c194
-rw-r--r--drivers/staging/vt6655/card.h10
-rw-r--r--drivers/staging/vt6655/channel.c28
-rw-r--r--drivers/staging/vt6655/device.h28
-rw-r--r--drivers/staging/vt6655/device_main.c46
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/rf.c18
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c12
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h3
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h12
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c179
-rw-r--r--drivers/tee/tee_core.c21
-rw-r--r--drivers/thunderbolt/clx.c47
-rw-r--r--drivers/thunderbolt/dma_test.c14
-rw-r--r--drivers/thunderbolt/path.c7
-rw-r--r--drivers/thunderbolt/quirks.c3
-rw-r--r--drivers/thunderbolt/retimer.c1
-rw-r--r--drivers/thunderbolt/switch.c337
-rw-r--r--drivers/thunderbolt/tb.c774
-rw-r--r--drivers/thunderbolt/tb.h60
-rw-r--r--drivers/thunderbolt/tb_regs.h19
-rw-r--r--drivers/thunderbolt/tunnel.c263
-rw-r--r--drivers/thunderbolt/tunnel.h26
-rw-r--r--drivers/thunderbolt/usb4.c135
-rw-r--r--drivers/tty/hvc/hvc_xen.c39
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_gsm.c3
-rw-r--r--drivers/tty/n_tty.c77
-rw-r--r--drivers/tty/serdev/core.c24
-rw-r--r--drivers/tty/serial/21285.c8
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c33
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c69
-rw-r--r--drivers/tty/serial/8250/8250_core.c14
-rw-r--r--drivers/tty/serial/8250/8250_dma.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c35
-rw-r--r--drivers/tty/serial/8250/8250_exar.c63
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c6
-rw-r--r--drivers/tty/serial/8250/8250_mid.c18
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c8
-rw-r--r--drivers/tty/serial/8250/8250_of.c44
-rw-r--r--drivers/tty/serial/8250/8250_omap.c114
-rw-r--r--drivers/tty/serial/8250/8250_pci.c591
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c118
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/8250/Makefile42
-rw-r--r--drivers/tty/serial/Kconfig26
-rw-r--r--drivers/tty/serial/Makefile126
-rw-r--r--drivers/tty/serial/altera_jtaguart.c28
-rw-r--r--drivers/tty/serial/altera_uart.c20
-rw-r--r--drivers/tty/serial/amba-pl010.c20
-rw-r--r--drivers/tty/serial/amba-pl011.c74
-rw-r--r--drivers/tty/serial/apbuart.c8
-rw-r--r--drivers/tty/serial/ar933x_uart.c26
-rw-r--r--drivers/tty/serial/arc_uart.c16
-rw-r--r--drivers/tty/serial/atmel_serial.c24
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c22
-rw-r--r--drivers/tty/serial/cpm_uart.c8
-rw-r--r--drivers/tty/serial/digicolor-usart.c18
-rw-r--r--drivers/tty/serial/dz.c32
-rw-r--r--drivers/tty/serial/esp32_acm.c459
-rw-r--r--drivers/tty/serial/esp32_uart.c784
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c32
-rw-r--r--drivers/tty/serial/fsl_lpuart.c88
-rw-r--r--drivers/tty/serial/icom.c26
-rw-r--r--drivers/tty/serial/imx.c128
-rw-r--r--drivers/tty/serial/ip22zilog.c36
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c4
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c16
-rw-r--r--drivers/tty/serial/liteuart.c20
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c26
-rw-r--r--drivers/tty/serial/ma35d1_serial.c25
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mcf.c20
-rw-r--r--drivers/tty/serial/men_z135_uart.c8
-rw-r--r--drivers/tty/serial/meson_uart.c48
-rw-r--r--drivers/tty/serial/milbeaut_usio.c16
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c12
-rw-r--r--drivers/tty/serial/mps2-uart.c16
-rw-r--r--drivers/tty/serial/msm_serial.c38
-rw-r--r--drivers/tty/serial/mvebu-uart.c18
-rw-r--r--drivers/tty/serial/omap-serial.c38
-rw-r--r--drivers/tty/serial/owl-uart.c26
-rw-r--r--drivers/tty/serial/pch_uart.c10
-rw-r--r--drivers/tty/serial/pic32_uart.c20
-rw-r--r--drivers/tty/serial/pmac_zilog.c52
-rw-r--r--drivers/tty/serial/pxa.c30
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c8
-rw-r--r--drivers/tty/serial/rda-uart.c34
-rw-r--r--drivers/tty/serial/rp2.c20
-rw-r--r--drivers/tty/serial/sa1100.c20
-rw-r--r--drivers/tty/serial/samsung_tty.c171
-rw-r--r--drivers/tty/serial/sb1250-duart.c12
-rw-r--r--drivers/tty/serial/sc16is7xx.c83
-rw-r--r--drivers/tty/serial/serial-tegra.c32
-rw-r--r--drivers/tty/serial/serial_core.c131
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c4
-rw-r--r--drivers/tty/serial/serial_port.c4
-rw-r--r--drivers/tty/serial/serial_txx9.c26
-rw-r--r--drivers/tty/serial/sh-sci.c68
-rw-r--r--drivers/tty/serial/sifive.c16
-rw-r--r--drivers/tty/serial/sprd_serial.c30
-rw-r--r--drivers/tty/serial/st-asc.c18
-rw-r--r--drivers/tty/serial/stm32-usart.c51
-rw-r--r--drivers/tty/serial/sunhv.c28
-rw-r--r--drivers/tty/serial/sunplus-uart.c26
-rw-r--r--drivers/tty/serial/sunsab.c34
-rw-r--r--drivers/tty/serial/sunsu.c46
-rw-r--r--drivers/tty/serial/sunzilog.c42
-rw-r--r--drivers/tty/serial/timbuart.c8
-rw-r--r--drivers/tty/serial/uartlite.c18
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c58
-rw-r--r--drivers/tty/sysrq.c3
-rw-r--r--drivers/tty/tty.h13
-rw-r--r--drivers/tty/tty_buffer.c5
-rw-r--r--drivers/tty/tty_io.c130
-rw-r--r--drivers/tty/tty_ioctl.c234
-rw-r--r--drivers/tty/tty_jobctrl.c17
-rw-r--r--drivers/tty/tty_port.c6
-rw-r--r--drivers/tty/vcc.c16
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/keyboard.c7
-rw-r--r--drivers/tty/vt/vc_screen.c32
-rw-r--r--drivers/tty/vt/vt.c29
-rw-r--r--drivers/usb/Kconfig10
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h1
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h3
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c6
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h5
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c24
-rw-r--r--drivers/usb/chipidea/Kconfig4
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c114
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c16
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c13
-rw-r--r--drivers/usb/chipidea/host.c48
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/core/hcd-pci.c3
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/port.c22
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc2/params.c21
-rw-r--r--drivers/usb/dwc3/Kconfig11
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c188
-rw-r--r--drivers/usb/dwc3/core.h19
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c475
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c14
-rw-r--r--drivers/usb/gadget/function/f_ncm.c27
-rw-r--r--drivers/usb/gadget/function/f_uac2.c16
-rw-r--r--drivers/usb/gadget/function/f_uvc.c14
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_uac2.h8
-rw-r--r--drivers/usb/gadget/legacy/inode.c17
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c91
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c19
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c3
-rw-r--r--drivers/usb/gadget/udc/core.c19
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c10
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c3
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c3
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c3
-rw-r--r--drivers/usb/host/pci-quirks.c144
-rw-r--r--drivers/usb/host/pci-quirks.h34
-rw-r--r--drivers/usb/host/xhci-debugfs.c2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h27
-rw-r--r--drivers/usb/host/xhci-hub.c4
-rw-r--r--drivers/usb/host/xhci-mem.c96
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c404
-rw-r--r--drivers/usb/host/xhci-mtk.h17
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-plat.c23
-rw-r--r--drivers/usb/host/xhci-ring.c38
-rw-r--r--drivers/usb/host/xhci-trace.h23
-rw-r--r--drivers/usb/host/xhci.c40
-rw-r--r--drivers/usb/host/xhci.h45
-rw-r--r--drivers/usb/misc/Kconfig13
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c7
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h1
-rw-r--r--drivers/usb/misc/usb-ljca.c902
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/mon/mon_main.c23
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c18
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/storage/uas-detect.h4
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/usb.c20
-rw-r--r--drivers/usb/storage/usb.h4
-rw-r--r--drivers/usb/storage/usual-tables.c17
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/anx7411.c3
-rw-r--r--drivers/usb/typec/class.c108
-rw-r--r--drivers/usb/typec/class.h16
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/fsa4480.c71
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c25
-rw-r--r--drivers/usb/typec/mux/ptn36502.c444
-rw-r--r--drivers/usb/typec/pd.c10
-rw-r--r--drivers/usb/typec/port-mapper.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c46
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c9
-rw-r--r--drivers/usb/typec/tipd/core.c626
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h36
-rw-r--r--drivers/usb/typec/tipd/trace.h92
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c54
-rw-r--r--drivers/usb/usbip/stub_dev.c9
-rw-r--r--drivers/usb/usbip/vhci_hcd.c44
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h32
-rw-r--r--drivers/vdpa/mlx5/core/mr.c209
-rw-r--r--drivers/vdpa/mlx5/core/resources.c6
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c137
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c52
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c40
-rw-r--r--drivers/vfio/cdx/main.c1
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/vdpa.c79
-rw-r--r--drivers/video/console/Kconfig11
-rw-r--r--drivers/video/console/dummycon.c7
-rw-r--r--drivers/video/console/vgacon.c68
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c8
-rw-r--r--drivers/video/fbdev/vga16fb.c9
-rw-r--r--drivers/virt/Kconfig6
-rw-r--r--drivers/virt/Makefile4
-rw-r--r--drivers/virt/coco/Kconfig14
-rw-r--r--drivers/virt/coco/Makefile8
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig1
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c214
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c229
-rw-r--r--drivers/virt/coco/tsm.c425
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c36
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c6
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/vlynq/Kconfig21
-rw-r--r--drivers/vlynq/Makefile6
-rw-r--r--drivers/vlynq/vlynq.c799
-rw-r--r--drivers/w1/slaves/w1_ds250x.c1
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/apple_wdt.c25
-rw-r--r--drivers/watchdog/ar7_wdt.c315
-rw-r--r--drivers/watchdog/aspeed_wdt.c11
-rw-r--r--drivers/watchdog/at91sam9_wdt.c20
-rw-r--r--drivers/watchdog/ath79_wdt.c19
-rw-r--r--drivers/watchdog/gpio_wdt.c16
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c5
-rw-r--r--drivers/watchdog/it87_wdt.c8
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c28
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c14
-rw-r--r--drivers/watchdog/mlx_wdt.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c13
-rw-r--r--drivers/watchdog/sbsa_gwdt.c4
-rw-r--r--drivers/watchdog/st_lpc_wdt.c11
-rw-r--r--drivers/watchdog/sunplus_wdt.c17
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--fs/9p/v9fs.c1
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/xattr.c5
-rw-r--r--fs/affs/namei.c1
-rw-r--r--fs/bcachefs/Kconfig4
-rw-r--r--fs/bcachefs/Makefile1
-rw-r--r--fs/bcachefs/alloc_background.c185
-rw-r--r--fs/bcachefs/alloc_background.h11
-rw-r--r--fs/bcachefs/alloc_foreground.c38
-rw-r--r--fs/bcachefs/backpointers.c114
-rw-r--r--fs/bcachefs/backpointers.h11
-rw-r--r--fs/bcachefs/bbpos.h17
-rw-r--r--fs/bcachefs/bbpos_types.h18
-rw-r--r--fs/bcachefs/bcachefs.h23
-rw-r--r--fs/bcachefs/bcachefs_format.h122
-rw-r--r--fs/bcachefs/bkey.h22
-rw-r--r--fs/bcachefs/bkey_methods.c169
-rw-r--r--fs/bcachefs/bkey_methods.h15
-rw-r--r--fs/bcachefs/bkey_sort.c6
-rw-r--r--fs/bcachefs/btree_cache.c23
-rw-r--r--fs/bcachefs/btree_cache.h5
-rw-r--r--fs/bcachefs/btree_gc.c148
-rw-r--r--fs/bcachefs/btree_io.c221
-rw-r--r--fs/bcachefs/btree_iter.c71
-rw-r--r--fs/bcachefs/btree_iter.h8
-rw-r--r--fs/bcachefs/btree_key_cache.c8
-rw-r--r--fs/bcachefs/btree_locking.c44
-rw-r--r--fs/bcachefs/btree_locking.h18
-rw-r--r--fs/bcachefs/btree_trans_commit.c35
-rw-r--r--fs/bcachefs/btree_types.h49
-rw-r--r--fs/bcachefs/btree_update_interior.c20
-rw-r--r--fs/bcachefs/btree_update_interior.h6
-rw-r--r--fs/bcachefs/buckets.c182
-rw-r--r--fs/bcachefs/buckets.h15
-rw-r--r--fs/bcachefs/chardev.c4
-rw-r--r--fs/bcachefs/compress.c26
-rw-r--r--fs/bcachefs/compress.h36
-rw-r--r--fs/bcachefs/darray.h6
-rw-r--r--fs/bcachefs/data_update.c33
-rw-r--r--fs/bcachefs/data_update.h1
-rw-r--r--fs/bcachefs/debug.c8
-rw-r--r--fs/bcachefs/dirent.c76
-rw-r--r--fs/bcachefs/dirent.h2
-rw-r--r--fs/bcachefs/disk_groups.c146
-rw-r--r--fs/bcachefs/disk_groups.h7
-rw-r--r--fs/bcachefs/disk_groups_types.h18
-rw-r--r--fs/bcachefs/ec.c67
-rw-r--r--fs/bcachefs/ec.h4
-rw-r--r--fs/bcachefs/errcode.h4
-rw-r--r--fs/bcachefs/error.c32
-rw-r--r--fs/bcachefs/error.h90
-rw-r--r--fs/bcachefs/extents.c409
-rw-r--r--fs/bcachefs/extents.h51
-rw-r--r--fs/bcachefs/fs-common.c2
-rw-r--r--fs/bcachefs/fs-io-buffered.c19
-rw-r--r--fs/bcachefs/fs-io-direct.c1
-rw-r--r--fs/bcachefs/fs-ioctl.c4
-rw-r--r--fs/bcachefs/fs-ioctl.h28
-rw-r--r--fs/bcachefs/fs.c9
-rw-r--r--fs/bcachefs/fsck.c183
-rw-r--r--fs/bcachefs/fsck.h1
-rw-r--r--fs/bcachefs/inode.c265
-rw-r--r--fs/bcachefs/inode.h28
-rw-r--r--fs/bcachefs/io_misc.c15
-rw-r--r--fs/bcachefs/io_misc.h2
-rw-r--r--fs/bcachefs/io_read.c6
-rw-r--r--fs/bcachefs/io_write.c40
-rw-r--r--fs/bcachefs/journal.c19
-rw-r--r--fs/bcachefs/journal.h1
-rw-r--r--fs/bcachefs/journal_io.c295
-rw-r--r--fs/bcachefs/lru.c18
-rw-r--r--fs/bcachefs/lru.h2
-rw-r--r--fs/bcachefs/move.c407
-rw-r--r--fs/bcachefs/move.h63
-rw-r--r--fs/bcachefs/move_types.h8
-rw-r--r--fs/bcachefs/movinggc.c69
-rw-r--r--fs/bcachefs/opts.c15
-rw-r--r--fs/bcachefs/opts.h4
-rw-r--r--fs/bcachefs/printbuf.c4
-rw-r--r--fs/bcachefs/quota.c15
-rw-r--r--fs/bcachefs/quota.h2
-rw-r--r--fs/bcachefs/rebalance.c558
-rw-r--r--fs/bcachefs/rebalance.h9
-rw-r--r--fs/bcachefs/rebalance_types.h31
-rw-r--r--fs/bcachefs/recovery.c52
-rw-r--r--fs/bcachefs/recovery_types.h6
-rw-r--r--fs/bcachefs/reflink.c61
-rw-r--r--fs/bcachefs/reflink.h6
-rw-r--r--fs/bcachefs/replicas.c18
-rw-r--r--fs/bcachefs/sb-clean.c5
-rw-r--r--fs/bcachefs/sb-errors.c172
-rw-r--r--fs/bcachefs/sb-errors.h270
-rw-r--r--fs/bcachefs/sb-errors_types.h16
-rw-r--r--fs/bcachefs/sb-members.c159
-rw-r--r--fs/bcachefs/sb-members.h49
-rw-r--r--fs/bcachefs/six.c8
-rw-r--r--fs/bcachefs/snapshot.c202
-rw-r--r--fs/bcachefs/snapshot.h6
-rw-r--r--fs/bcachefs/subvolume.c37
-rw-r--r--fs/bcachefs/subvolume.h2
-rw-r--r--fs/bcachefs/super-io.c20
-rw-r--r--fs/bcachefs/super-io.h40
-rw-r--r--fs/bcachefs/super.c127
-rw-r--r--fs/bcachefs/super_types.h12
-rw-r--r--fs/bcachefs/sysfs.c45
-rw-r--r--fs/bcachefs/trace.c1
-rw-r--r--fs/bcachefs/trace.h90
-rw-r--r--fs/bcachefs/util.c18
-rw-r--r--fs/bcachefs/util.h21
-rw-r--r--fs/bcachefs/xattr.c60
-rw-r--r--fs/bcachefs/xattr.h2
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/ceph/acl.c12
-rw-r--r--fs/ceph/addr.c299
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c763
-rw-r--r--fs/ceph/crypto.c43
-rw-r--r--fs/ceph/debugfs.c10
-rw-r--r--fs/ceph/dir.c242
-rw-r--r--fs/ceph/export.c49
-rw-r--r--fs/ceph/file.c282
-rw-r--r--fs/ceph/inode.c511
-rw-r--r--fs/ceph/ioctl.c21
-rw-r--r--fs/ceph/locks.c57
-rw-r--r--fs/ceph/mds_client.c678
-rw-r--r--fs/ceph/mds_client.h13
-rw-r--r--fs/ceph/mdsmap.c29
-rw-r--r--fs/ceph/mdsmap.h (renamed from include/linux/ceph/mdsmap.h)5
-rw-r--r--fs/ceph/metric.c5
-rw-r--r--fs/ceph/quota.c29
-rw-r--r--fs/ceph/snap.c192
-rw-r--r--fs/ceph/super.c99
-rw-r--r--fs/ceph/super.h23
-rw-r--r--fs/ceph/xattr.c108
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/efivarfs/internal.h9
-rw-r--r--fs/efivarfs/super.c70
-rw-r--r--fs/efs/super.c1
-rw-r--r--fs/erofs/super.c1
-rw-r--r--fs/exfat/file.c1
-rw-r--r--fs/exfat/inode.c4
-rw-r--r--fs/exportfs/expfs.c57
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/f2fs/compress.c63
-rw-r--r--fs/f2fs/data.c35
-rw-r--r--fs/f2fs/extent_cache.c53
-rw-r--r--fs/f2fs/file.c17
-rw-r--r--fs/f2fs/inode.c2
-rw-r--r--fs/f2fs/node.c20
-rw-r--r--fs/f2fs/segment.c92
-rw-r--r--fs/f2fs/segment.h4
-rw-r--r--fs/f2fs/super.c99
-rw-r--r--fs/f2fs/xattr.c20
-rw-r--r--fs/fat/nfs.c1
-rw-r--r--fs/fhandle.c6
-rw-r--r--fs/freevxfs/vxfs_super.c2
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/gfs2/acl.h8
-rw-r--r--fs/gfs2/aops.c72
-rw-r--r--fs/gfs2/aops.h6
-rw-r--r--fs/gfs2/bmap.c17
-rw-r--r--fs/gfs2/bmap.h38
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/dir.h38
-rw-r--r--fs/gfs2/file.c18
-rw-r--r--fs/gfs2/glock.c1
-rw-r--r--fs/gfs2/glock.h113
-rw-r--r--fs/gfs2/glops.c13
-rw-r--r--fs/gfs2/glops.h4
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/inode.c33
-rw-r--r--fs/gfs2/inode.h60
-rw-r--r--fs/gfs2/log.h46
-rw-r--r--fs/gfs2/lops.h22
-rw-r--r--fs/gfs2/meta_io.h20
-rw-r--r--fs/gfs2/ops_fstype.c28
-rw-r--r--fs/gfs2/quota.c31
-rw-r--r--fs/gfs2/quota.h38
-rw-r--r--fs/gfs2/recovery.h18
-rw-r--r--fs/gfs2/rgrp.c12
-rw-r--r--fs/gfs2/rgrp.h85
-rw-r--r--fs/gfs2/super.c29
-rw-r--r--fs/gfs2/super.h50
-rw-r--r--fs/gfs2/trans.h24
-rw-r--r--fs/gfs2/util.h8
-rw-r--r--fs/gfs2/xattr.c6
-rw-r--r--fs/gfs2/xattr.h12
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/kernfs/file.c29
-rw-r--r--fs/libfs.c44
-rw-r--r--fs/mnt_idmapping.c2
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c29
-rw-r--r--fs/nfs/nfs3proc.c3
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c62
-rw-r--r--fs/nfs/pnfs.c8
-rw-r--r--fs/nfs/pnfs.h5
-rw-r--r--fs/nfs/proc.c3
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/export.c3
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/ntfs/namei.c1
-rw-r--r--fs/ntfs3/super.c1
-rw-r--r--fs/overlayfs/Makefile2
-rw-r--r--fs/overlayfs/copy_up.c142
-rw-r--r--fs/overlayfs/dir.c64
-rw-r--r--fs/overlayfs/export.c7
-rw-r--r--fs/overlayfs/file.c88
-rw-r--r--fs/overlayfs/inode.c165
-rw-r--r--fs/overlayfs/namei.c52
-rw-r--r--fs/overlayfs/overlayfs.h72
-rw-r--r--fs/overlayfs/params.c322
-rw-r--r--fs/overlayfs/params.h1
-rw-r--r--fs/overlayfs/readdir.c27
-rw-r--r--fs/overlayfs/super.c92
-rw-r--r--fs/overlayfs/util.c117
-rw-r--r--fs/overlayfs/xattrs.c271
-rw-r--r--fs/smb/client/cached_dir.c84
-rw-r--r--fs/smb/client/cifs_debug.c8
-rw-r--r--fs/smb/client/cifsfs.c1
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/cifsproto.h7
-rw-r--r--fs/smb/client/connect.c44
-rw-r--r--fs/smb/client/export.c11
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/link.c16
-rw-r--r--fs/smb/client/ntlmssp.h4
-rw-r--r--fs/smb/client/sess.c13
-rw-r--r--fs/smb/client/smb2misc.c2
-rw-r--r--fs/smb/client/smb2ops.c8
-rw-r--r--fs/smb/client/transport.c11
-rw-r--r--fs/smb/common/smb2pdu.h24
-rw-r--r--fs/smb/server/smb_common.c11
-rw-r--r--fs/smb/server/smbacl.c29
-rw-r--r--fs/smb/server/vfs.c7
-rw-r--r--fs/squashfs/export.c1
-rw-r--r--fs/super.c1
-rw-r--r--fs/sysfs/file.c13
-rw-r--r--fs/tracefs/event_inode.c1093
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/tracefs/internal.h54
-rw-r--r--fs/ubifs/dir.c4
-rw-r--r--fs/ubifs/file.c3
-rw-r--r--fs/ubifs/journal.c1
-rw-r--r--fs/ubifs/super.c5
-rw-r--r--fs/ubifs/tnc.c1
-rw-r--r--fs/ufs/super.c1
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c45
-rw-r--r--fs/xfs/libxfs/xfs_format.h34
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c807
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h383
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c10
-rw-r--r--fs/xfs/libxfs/xfs_types.c4
-rw-r--r--fs/xfs/libxfs/xfs_types.h10
-rw-r--r--fs/xfs/scrub/bmap.c2
-rw-r--r--fs/xfs/scrub/fscounters.c2
-rw-r--r--fs/xfs/scrub/inode.c3
-rw-r--r--fs/xfs/scrub/rtbitmap.c28
-rw-r--r--fs/xfs/scrub/rtsummary.c72
-rw-r--r--fs/xfs/scrub/trace.c1
-rw-r--r--fs/xfs/scrub/trace.h15
-rw-r--r--fs/xfs/xfs_bmap_util.c74
-rw-r--r--fs/xfs/xfs_file.c63
-rw-r--r--fs/xfs/xfs_fsmap.c15
-rw-r--r--fs/xfs/xfs_inode.c24
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_inode_item.c3
-rw-r--r--fs/xfs/xfs_ioctl.c5
-rw-r--r--fs/xfs/xfs_linux.h12
-rw-r--r--fs/xfs/xfs_mount.h8
-rw-r--r--fs/xfs/xfs_ondisk.h4
-rw-r--r--fs/xfs/xfs_reflink.c4
-rw-r--r--fs/xfs/xfs_rtalloc.c638
-rw-r--r--fs/xfs/xfs_rtalloc.h94
-rw-r--r--fs/xfs/xfs_super.c3
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/drm/amd_asic_type.h5
-rw-r--r--include/drm/i915_pxp_tee_interface.h6
-rw-r--r--include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h179
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx75.h102
-rw-r--r--include/dt-bindings/watchdog/aspeed-wdt.h92
-rw-r--r--include/linux/acpi.h47
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/amd-iommu.h120
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/bitmap-str.h16
-rw-r--r--include/linux/bitmap.h87
-rw-r--r--include/linux/btf.h11
-rw-r--r--include/linux/buildid.h3
-rw-r--r--include/linux/cdx/cdx_bus.h41
-rw-r--r--include/linux/ceph/ceph_debug.h38
-rw-r--r--include/linux/ceph/ceph_fs.h10
-rw-r--r--include/linux/closure.h12
-rw-r--r--include/linux/comedi/comedi_8254.h51
-rw-r--r--include/linux/comedi/comedi_8255.h24
-rw-r--r--include/linux/comedi/comedidev.h2
-rw-r--r--include/linux/console.h138
-rw-r--r--include/linux/cpumask.h113
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/device.h8
-rw-r--r--include/linux/dma-mapping.h19
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/exportfs.h50
-rw-r--r--include/linux/f2fs_fs.h70
-rw-r--r--include/linux/fw_table.h43
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/i3c/master.h6
-rw-r--r--include/linux/idr.h6
-rw-r--r--include/linux/iio/iio.h12
-rw-r--r--include/linux/iio/sw_device.h3
-rw-r--r--include/linux/iio/sw_trigger.h3
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/iommu.h38
-rw-r--r--include/linux/ioport.h12
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kernfs.h1
-rw-r--r--include/linux/lsm_hook_defs.h4
-rw-r--r--include/linux/mei_cl_bus.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h7
-rw-r--r--include/linux/mnt_idmapping.h3
-rw-r--r--include/linux/mod_devicetable.h14
-rw-r--r--include/linux/mtd/cfi.h2
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nvme-keyring.h10
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/nvmem-provider.h4
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pinctrl/consumer.h61
-rw-r--r--include/linux/pinctrl/machine.h2
-rw-r--r--include/linux/platform_data/cros_ec_commands.h66
-rw-r--r--include/linux/platform_data/shmob_drm.h57
-rw-r--r--include/linux/property.h3
-rw-r--r--include/linux/pwm.h28
-rw-r--r--include/linux/seq_buf.h28
-rw-r--r--include/linux/serial_core.h91
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/trace_events.h6
-rw-r--r--include/linux/trace_seq.h2
-rw-r--r--include/linux/tracefs.h73
-rw-r--r--include/linux/tsm.h69
-rw-r--r--include/linux/tty.h25
-rw-r--r--include/linux/usb.h16
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h8
-rw-r--r--include/linux/usb/gadget.h9
-rw-r--r--include/linux/usb/hcd.h17
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/pd_vdo.h1
-rw-r--r--include/linux/usb/renesas_usbhs.h10
-rw-r--r--include/linux/usb/typec.h37
-rw-r--r--include/linux/usb/typec_dp.h28
-rw-r--r--include/linux/usb/typec_tbt.h1
-rw-r--r--include/linux/vdpa.h41
-rw-r--r--include/linux/virtio_pci_modern.h35
-rw-r--r--include/linux/vlynq.h149
-rw-r--r--include/media/cec.h2
-rw-r--r--include/media/ipu-bridge.h2
-rw-r--r--include/media/mipi-csi2.h1
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-device.h4
-rw-r--r--include/media/v4l2-event.h2
-rw-r--r--include/media/v4l2-mc.h6
-rw-r--r--include/media/v4l2-subdev.h13
-rw-r--r--include/media/videobuf-core.h233
-rw-r--r--include/media/videobuf-dma-contig.h30
-rw-r--r--include/media/videobuf-dma-sg.h102
-rw-r--r--include/media/videobuf-vmalloc.h43
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h30
-rw-r--r--include/net/tcp_ao.h13
-rw-r--r--include/soc/tegra/mc.h26
-rw-r--r--include/uapi/linux/counter.h2
-rw-r--r--include/uapi/linux/gsmmux.h1
-rw-r--r--include/uapi/linux/idxd.h1
-rw-r--r--include/uapi/linux/iio/types.h4
-rw-r--r--include/uapi/linux/landlock.h55
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/mei.h14
-rw-r--r--include/uapi/linux/nfsd_netlink.h6
-rw-r--r--include/uapi/linux/npcm-video.h41
-rw-r--r--include/uapi/linux/psp-sev.h1
-rw-r--r--include/uapi/linux/serial_core.h21
-rw-r--r--include/uapi/linux/serial_reg.h1
-rw-r--r--include/uapi/linux/sev-guest.h4
-rw-r--r--include/uapi/linux/usb/raw_gadget.h14
-rw-r--r--include/uapi/linux/user_events.h11
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/vhost.h8
-rw-r--r--include/uapi/linux/vhost_types.h7
-rw-r--r--include/uapi/linux/videodev2.h1
-rw-r--r--include/uapi/linux/virtio_config.h5
-rw-r--r--include/uapi/mtd/ubi-user.h4
-rw-r--r--io_uring/kbuf.c6
-rw-r--r--io_uring/kbuf.h13
-rw-r--r--io_uring/net.c24
-rw-r--r--io_uring/opdef.c8
-rw-r--r--io_uring/rw.c72
-rw-r--r--io_uring/rw.h2
-rw-r--r--kernel/bpf/bpf_iter.c6
-rw-r--r--kernel/bpf/cgroup_iter.c8
-rw-r--r--kernel/bpf/cpumask.c6
-rw-r--r--kernel/bpf/helpers.c39
-rw-r--r--kernel/bpf/map_iter.c6
-rw-r--r--kernel/bpf/task_iter.c24
-rw-r--r--kernel/bpf/verifier.c33
-rw-r--r--kernel/cgroup/rstat.c9
-rw-r--r--kernel/debug/debug_core.c3
-rw-r--r--kernel/debug/kdb/kdb_main.c7
-rw-r--r--kernel/dma/direct.c40
-rw-r--r--kernel/dma/direct.h1
-rw-r--r--kernel/dma/mapping.c22
-rw-r--r--kernel/dma/swiotlb.c28
-rw-r--r--kernel/livepatch/core.c2
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/internal.h31
-rw-r--r--kernel/printk/nbcon.c1029
-rw-r--r--kernel/printk/printk.c162
-rw-r--r--kernel/rcu/rcu.h2
-rw-r--r--kernel/rcu/tasks.h32
-rw-r--r--kernel/rcu/tree.c43
-rw-r--r--kernel/resource.c51
-rw-r--r--kernel/trace/bpf_trace.c6
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace.c90
-rw-r--r--kernel/trace/trace.h16
-rw-r--r--kernel/trace/trace_events.c362
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_events_hist.c11
-rw-r--r--kernel/trace/trace_events_synth.c2
-rw-r--r--kernel/trace/trace_events_user.c36
-rw-r--r--kernel/trace/trace_seq.c6
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap-str.c510
-rw-r--r--lib/bitmap.c680
-rw-r--r--lib/closure.c9
-rw-r--r--lib/cpumask.c17
-rw-r--r--lib/fonts/Kconfig2
-rw-r--r--lib/fw_table.c188
-rw-r--r--lib/seq_buf.c28
-rw-r--r--lib/test_bitmap.c24
-rw-r--r--lib/vsprintf.c25
-rw-r--r--mm/memblock.c2
-rw-r--r--net/9p/client.c8
-rw-r--r--net/9p/trans_fd.c21
-rw-r--r--net/9p/trans_xen.c15
-rw-r--r--net/bpf/test_run.c7
-rw-r--r--net/bridge/netfilter/ebtable_broute.c1
-rw-r--r--net/bridge/netfilter/ebtable_filter.c1
-rw-r--r--net/bridge/netfilter/ebtable_nat.c1
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c1
-rw-r--r--net/core/filter.c13
-rw-r--r--net/core/page_pool.c6
-rw-r--r--net/core/xdp.c6
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/devlink/netlink_gen.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/fou_bpf.c6
-rw-r--r--net/ipv4/netfilter/iptable_nat.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c1
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_ao.c5
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_output.c72
-rw-r--r--net/ipv4/tcp_sigpool.c8
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c1
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c1
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c1
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/syncookies.c7
-rw-r--r--net/iucv/iucv.c4
-rw-r--r--net/kcm/kcmsock.c1
-rw-r--r--net/llc/llc_input.c10
-rw-r--r--net/llc/llc_s_ac.c3
-rw-r--r--net/llc/llc_station.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_fo.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ovf.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c1
-rw-r--r--net/netfilter/nf_conntrack_bpf.c6
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c1
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_conntrack_proto.c1
-rw-r--r--net/netfilter/nf_nat_bpf.c6
-rw-r--r--net/netfilter/nf_nat_core.c1
-rw-r--r--net/netfilter/nf_nat_redirect.c27
-rw-r--r--net/netfilter/nf_tables_api.c23
-rw-r--r--net/netfilter/nfnetlink_osf.c1
-rw-r--r--net/netfilter/nft_chain_nat.c1
-rw-r--r--net/netfilter/nft_fib.c1
-rw-r--r--net/netfilter/nft_fwd_netdev.c1
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlink/diag.c1
-rw-r--r--net/openvswitch/conntrack.c2
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_ct.c15
-rw-r--r--net/sched/act_gate.c1
-rw-r--r--net/sched/cls_api.c9
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_cgroup.c1
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/sch_cbs.c1
-rw-r--r--net/sched/sch_choke.c1
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_etf.c1
-rw-r--r--net/sched/sch_ets.c1
-rw-r--r--net/sched/sch_fifo.c1
-rw-r--r--net/sched/sch_fq.c10
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_ingress.c1
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_mqprio_lib.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_qfq.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_skbprio.c1
-rw-r--r--net/sched/sch_taprio.c1
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c11
-rw-r--r--net/smc/smc_close.c5
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/clnt.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c14
-rw-r--r--net/tipc/netlink.c4
-rw-r--r--net/vmw_vsock/virtio_transport_common.c18
-rw-r--r--net/xfrm/xfrm_interface_bpf.c6
-rw-r--r--rust/Makefile2
-rw-r--r--samples/landlock/sandboxer.c115
-rw-r--r--scripts/Makefile.btf19
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.host2
-rw-r--r--scripts/Makefile.modfinal5
-rw-r--r--scripts/Makefile.package10
-rw-r--r--scripts/Makefile.userprogs6
-rw-r--r--scripts/Makefile.vdsoinst45
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py8
-rwxr-xr-xscripts/clang-tools/run-clang-tools.py32
-rwxr-xr-xscripts/dummy-tools/gcc10
-rw-r--r--scripts/mod/Makefile4
-rw-r--r--scripts/mod/devicetable-offsets.c7
-rw-r--r--scripts/mod/file2alias.c31
-rw-r--r--scripts/mod/modpost.c201
-rw-r--r--scripts/mod/modpost.h50
-rw-r--r--scripts/mod/symsearch.c199
-rwxr-xr-xscripts/package/mkspec6
-rwxr-xr-xscripts/pahole-flags.sh30
-rwxr-xr-xscripts/remove-stale-files2
-rw-r--r--security/apparmor/apparmorfs.c49
-rw-r--r--security/apparmor/audit.c75
-rw-r--r--security/apparmor/capability.c33
-rw-r--r--security/apparmor/domain.c193
-rw-r--r--security/apparmor/file.c217
-rw-r--r--security/apparmor/include/apparmor.h3
-rw-r--r--security/apparmor/include/audit.h44
-rw-r--r--security/apparmor/include/capability.h3
-rw-r--r--security/apparmor/include/file.h54
-rw-r--r--security/apparmor/include/ipc.h4
-rw-r--r--security/apparmor/include/lib.h2
-rw-r--r--security/apparmor/include/match.h6
-rw-r--r--security/apparmor/include/mount.h26
-rw-r--r--security/apparmor/include/net.h25
-rw-r--r--security/apparmor/include/perms.h7
-rw-r--r--security/apparmor/include/policy.h68
-rw-r--r--security/apparmor/include/policy_ns.h14
-rw-r--r--security/apparmor/include/resource.h3
-rw-r--r--security/apparmor/include/task.h11
-rw-r--r--security/apparmor/ipc.c53
-rw-r--r--security/apparmor/label.c46
-rw-r--r--security/apparmor/lib.c54
-rw-r--r--security/apparmor/lsm.c414
-rw-r--r--security/apparmor/match.c48
-rw-r--r--security/apparmor/mount.c186
-rw-r--r--security/apparmor/net.c65
-rw-r--r--security/apparmor/policy.c133
-rw-r--r--security/apparmor/policy_compat.c1
-rw-r--r--security/apparmor/policy_ns.c37
-rw-r--r--security/apparmor/policy_unpack.c152
-rw-r--r--security/apparmor/resource.c54
-rw-r--r--security/apparmor/task.c104
-rw-r--r--security/landlock/Kconfig1
-rw-r--r--security/landlock/Makefile2
-rw-r--r--security/landlock/fs.c232
-rw-r--r--security/landlock/limits.h6
-rw-r--r--security/landlock/net.c200
-rw-r--r--security/landlock/net.h33
-rw-r--r--security/landlock/ruleset.c405
-rw-r--r--security/landlock/ruleset.h185
-rw-r--r--security/landlock/setup.c2
-rw-r--r--security/landlock/syscalls.c158
-rw-r--r--tools/build/Makefile.build10
-rw-r--r--tools/iio/iio_event_monitor.c8
-rw-r--r--tools/iio/iio_generic_buffer.c13
-rw-r--r--tools/include/asm-generic/unaligned.h145
-rw-r--r--tools/lib/api/io.h1
-rw-r--r--tools/lib/perf/evlist.c9
-rw-r--r--tools/lib/perf/include/internal/evlist.h2
-rw-r--r--tools/lib/perf/include/internal/rc_check.h13
-rw-r--r--tools/net/ynl/generated/devlink-user.c2
-rw-r--r--tools/net/ynl/generated/nfsd-user.c120
-rw-r--r--tools/net/ynl/generated/nfsd-user.h44
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py7
-rw-r--r--tools/perf/Documentation/perf-bench.txt19
-rw-r--r--tools/perf/Documentation/perf-kwork.txt38
-rw-r--r--tools/perf/Documentation/perf-lock.txt7
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Makefile.config85
-rw-r--r--tools/perf/Makefile.perf18
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c35
-rw-r--r--tools/perf/arch/arm/util/pmu.c10
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c48
-rw-r--r--tools/perf/arch/arm64/util/pmu.c20
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rw-r--r--tools/perf/arch/s390/util/pmu.c3
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c9
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c42
-rw-r--r--tools/perf/arch/x86/util/pmu.c145
-rw-r--r--tools/perf/bench/sched-messaging.c102
-rw-r--r--tools/perf/bench/sched-pipe.c132
-rw-r--r--tools/perf/bench/uprobe.c1
-rw-r--r--tools/perf/builtin-buildid-cache.c6
-rw-r--r--tools/perf/builtin-kwork.c760
-rw-r--r--tools/perf/builtin-lock.c117
-rw-r--r--tools/perf/builtin-record.c92
-rw-r--r--tools/perf/builtin-report.c15
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-version.c1
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf-completion.sh10
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json418
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json266
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json74
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json45
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/fp_operation.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/general.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json119
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1d_cache.json54
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1i_cache.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l2_cache.json50
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l3_cache.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/ll_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json21
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/metrics.json233
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/retired.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spe.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spec_operation.json110
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/stall.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/sve.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/tlb.json66
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/nds32/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv8
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/datasource.json1787
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/metrics.json388
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json10
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json5
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json888
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json193
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json389
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json343
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/other.json315
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json975
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json5988
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json6199
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json3587
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json3308
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json197
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json165
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json55
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json219
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/memory.json183
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/other.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json217
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv38
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c8
-rwxr-xr-xtools/perf/pmu-events/jevents.py124
-rw-r--r--tools/perf/pmu-events/metric.py3
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/attr/system-wide-dummy14
-rw-r--r--tools/perf/tests/attr/test-record-C04
-rw-r--r--tools/perf/tests/attr/test-record-dummy-C055
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/code-reading.c10
-rw-r--r--tools/perf/tests/expr.c33
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/tests/make7
-rw-r--r--tools/perf/tests/parse-events.c51
-rw-r--r--tools/perf/tests/pmu-events.c65
-rw-r--r--tools/perf/tests/pmu.c23
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh4
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh4
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh4
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh4
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh4
-rw-r--r--tools/perf/tests/shell/lib/coresight.sh2
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh52
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh2
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/record.sh1
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh2
-rwxr-xr-xtools/perf/tests/shell/record_sideband.sh58
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh1
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh34
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh1
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh16
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh4
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh1
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/thread-maps-share.c9
-rw-r--r--tools/perf/tests/util.c31
-rw-r--r--tools/perf/ui/browsers/hists.c6
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/arm-spe.h4
-rw-r--r--tools/perf/util/bpf-filter.y4
-rw-r--r--tools/perf/util/bpf_counter.c5
-rw-r--r--tools/perf/util/bpf_kwork_top.c308
-rw-r--r--tools/perf/util/bpf_lock_contention.c51
-rw-r--r--tools/perf/util/bpf_off_cpu.c13
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c16
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c338
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c145
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h3
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/.gitignore1
-rw-r--r--tools/perf/util/branch.c4
-rw-r--r--tools/perf/util/branch.h4
-rw-r--r--tools/perf/util/callchain.c76
-rw-r--r--tools/perf/util/callchain.h18
-rw-r--r--tools/perf/util/cgroup.c63
-rw-r--r--tools/perf/util/cgroup.h5
-rw-r--r--tools/perf/util/cs-etm.c106
-rw-r--r--tools/perf/util/cs-etm.h2
-rw-r--r--tools/perf/util/data.c12
-rw-r--r--tools/perf/util/dlfilter.c4
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/env.c6
-rw-r--r--tools/perf/util/evlist.c23
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c43
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/expr.c2
-rw-r--r--tools/perf/util/expr.y2
-rw-r--r--tools/perf/util/header.c60
-rw-r--r--tools/perf/util/hisi-ptt.c4
-rw-r--r--tools/perf/util/hist.c32
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c43
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/intel-pt.h3
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/kwork.h61
-rw-r--r--tools/perf/util/lock-contention.h10
-rw-r--r--tools/perf/util/machine.c37
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/map_symbol.c15
-rw-r--r--tools/perf/util/map_symbol.h4
-rw-r--r--tools/perf/util/mem-events.c3
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/parse-events.c213
-rw-r--r--tools/perf/util/parse-events.h34
-rw-r--r--tools/perf/util/parse-events.l6
-rw-r--r--tools/perf/util/parse-events.y68
-rw-r--r--tools/perf/util/pfm.c15
-rw-r--r--tools/perf/util/pmu.c173
-rw-r--r--tools/perf/util/pmu.h34
-rw-r--r--tools/perf/util/pmu.y4
-rw-r--r--tools/perf/util/pmus.c18
-rw-r--r--tools/perf/util/print-events.c28
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rlimit.c28
-rw-r--r--tools/perf/util/rlimit.h11
-rw-r--r--tools/perf/util/rwsem.c34
-rw-r--r--tools/perf/util/rwsem.h11
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/string.c48
-rw-r--r--tools/perf/util/string2.h1
-rw-r--r--tools/perf/util/svghelper.c5
-rw-r--r--tools/perf/util/symbol-elf.c4
-rw-r--r--tools/perf/util/symbol.c15
-rw-r--r--tools/perf/util/trace-event-info.c3
-rw-r--r--tools/power/cpupower/man/cpupower-powercap-info.12
-rw-r--r--tools/scripts/Makefile.include10
-rw-r--r--tools/testing/cxl/test/cxl.c2
-rw-r--r--tools/testing/cxl/test/mem.c83
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c6
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css_task.c55
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_failure.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c93
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c32
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc4
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/common.h13
-rw-r--r--tools/testing/selftests/landlock/config4
-rw-r--r--tools/testing/selftests/landlock/fs_test.c69
-rw-r--r--tools/testing/selftests/landlock/net_test.c1738
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/Makefile9
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c228
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.c64
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.h15
-rw-r--r--tools/testing/selftests/user_events/abi_test.c55
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c54
-rw-r--r--tools/testing/vsock/util.c87
-rw-r--r--tools/testing/vsock/util.h3
-rw-r--r--tools/testing/vsock/vsock_test.c50
-rw-r--r--tools/tracing/rtla/src/utils.c2
-rw-r--r--tools/verification/dot2/dot2k2
3776 files changed, 147553 insertions, 103428 deletions
diff --git a/.gitignore b/.gitignore
index 0bbae167bf93..98274e1160d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,7 +74,6 @@ modules.order
#
# RPM spec file (make rpm-pkg)
#
-/kernel.spec
/rpmbuild/
#
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index 825e619250bf..f2ec42949a54 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format
correlates to the operations allowed. It's visible only
on platforms that support the capability.
+What: /sys/bus/dsa/devices/wq<m>.<n>/driver_name
+Date: Sept 8, 2023
+KernelVersion: 6.7.0
+Contact: dmaengine@vger.kernel.org
+Description: Name of driver to be bounded to the wq.
+
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019
KernelVersion: 5.6.0
diff --git a/Documentation/ABI/testing/configfs-tsm b/Documentation/ABI/testing/configfs-tsm
new file mode 100644
index 000000000000..dd24202b5ba5
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-tsm
@@ -0,0 +1,82 @@
+What: /sys/kernel/config/tsm/report/$name/inblob
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (WO) Up to 64 bytes of user specified binary data. For replay
+ protection this should include a nonce, but the kernel does not
+ place any restrictions on the content.
+
+What: /sys/kernel/config/tsm/report/$name/outblob
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) Binary attestation report generated from @inblob and other
+ options The format of the report is implementation specific
+ where the implementation is conveyed via the @provider
+ attribute.
+
+What: /sys/kernel/config/tsm/report/$name/auxblob
+Date: October, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) Optional supplemental data that a TSM may emit, visibility
+ of this attribute depends on TSM, and may be empty if no
+ auxiliary data is available.
+
+ When @provider is "sev_guest" this file contains the
+ "cert_table" from SEV-ES Guest-Hypervisor Communication Block
+ Standardization v2.03 Section 4.1.8.1 MSG_REPORT_REQ.
+ https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56421.pdf
+
+What: /sys/kernel/config/tsm/report/$name/provider
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) A name for the format-specification of @outblob like
+ "sev_guest" [1] or "tdx_guest" [2] in the near term, or a
+ common standard format in the future.
+
+ [1]: SEV Secure Nested Paging Firmware ABI Specification
+ Revision 1.55 Table 22
+ https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56860.pdf
+
+ [2]: Intel® Trust Domain Extensions Data Center Attestation
+ Primitives : Quote Generation Library and Quote Verification
+ Library Revision 0.8 Appendix 4,5
+ https://download.01.org/intel-sgx/latest/dcap-latest/linux/docs/Intel_TDX_DCAP_Quoting_Library_API.pdf
+
+What: /sys/kernel/config/tsm/report/$name/generation
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) The value in this attribute increments each time @inblob or
+ any option is written. Userspace can detect conflicts by
+ checking generation before writing to any attribute and making
+ sure the number of writes matches expectations after reading
+ @outblob, or it can prevent conflicts by creating a report
+ instance per requesting context.
+
+What: /sys/kernel/config/tsm/report/$name/privlevel
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (WO) Attribute is visible if a TSM implementation provider
+ supports the concept of attestation reports for TVMs running at
+ different privilege levels, like SEV-SNP "VMPL", specify the
+ privilege level via this attribute. The minimum acceptable
+ value is conveyed via @privlevel_floor and the maximum
+ acceptable value is TSM_PRIVLEVEL_MAX (3).
+
+What: /sys/kernel/config/tsm/report/$name/privlevel_floor
+Date: September, 2023
+KernelVersion: v6.7
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) Indicates the minimum permissible value that can be written
+ to @privlevel.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2
index 3371c39f651d..a2bf4fd82a5b 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2
@@ -35,4 +35,6 @@ Description:
req_number the number of pre-allocated requests
for both capture and playback
function_name name of the interface
+ c_terminal_type code of the capture terminal type
+ p_terminal_type code of the playback terminal type
===================== =======================================
diff --git a/Documentation/ABI/testing/sysfs-bus-cdx b/Documentation/ABI/testing/sysfs-bus-cdx
index 7af477f49998..8c067ff99e54 100644
--- a/Documentation/ABI/testing/sysfs-bus-cdx
+++ b/Documentation/ABI/testing/sysfs-bus-cdx
@@ -28,14 +28,57 @@ Description:
of a device manufacturer.
Combination of Vendor ID and Device ID identifies a device.
+What: /sys/bus/cdx/devices/.../subsystem_vendor
+Date: July 2023
+Contact: puneet.gupta@amd.com
+Description:
+ Subsystem Vendor ID for this CDX device, in hexadecimal.
+ Subsystem Vendor ID is 16 bit identifier specific to the
+ card manufacturer.
+
+What: /sys/bus/cdx/devices/.../subsystem_device
+Date: July 2023
+Contact: puneet.gupta@amd.com
+Description:
+ Subsystem Device ID for this CDX device, in hexadecimal
+ Subsystem Device ID is 16 bit identifier specific to the
+ card manufacturer.
+
+What: /sys/bus/cdx/devices/.../class
+Date: July 2023
+Contact: puneet.gupta@amd.com
+Description:
+ This file contains the class of the CDX device, in hexadecimal.
+ Class is 24 bit identifier specifies the functionality of the device.
+
+What: /sys/bus/cdx/devices/.../revision
+Date: July 2023
+Contact: puneet.gupta@amd.com
+Description:
+ This file contains the revision field of the CDX device, in hexadecimal.
+ Revision is 8 bit revision identifier of the device.
+
+What: /sys/bus/cdx/devices/.../enable
+Date: October 2023
+Contact: abhijit.gangurde@amd.com
+Description:
+ CDX bus should be disabled before updating the devices in FPGA.
+ Writing n/0/off will attempt to disable the CDX bus and.
+ writing y/1/on will attempt to enable the CDX bus. Reading this file
+ gives the current state of the bus, 1 for enabled and 0 for disabled.
+
+ For example::
+
+ # echo 1 > /sys/bus/cdx/.../enable
+
What: /sys/bus/cdx/devices/.../reset
Date: March 2023
Contact: nipun.gupta@amd.com
Description:
- Writing y/1/on to this file resets the CDX device.
- On resetting the device, the corresponding driver is notified
- twice, once before the device is being reset, and again after
- the reset has been complete.
+ Writing y/1/on to this file resets the CDX device or all devices
+ on the bus. On resetting the device, the corresponding driver is
+ notified twice, once before the device is being reset, and again
+ after the reset has been complete.
For example::
@@ -54,3 +97,18 @@ Description:
For example::
# echo 1 > /sys/bus/cdx/devices/.../remove
+
+What: /sys/bus/cdx/devices/.../modalias
+Date: July 2023
+Contact: nipun.gupta@amd.com
+Description:
+ This attribute indicates the CDX ID of the device.
+ That is in the format:
+ cdx:vXXXXdXXXXsvXXXXsdXXXXcXXXXXX,
+ where:
+
+ - vXXXX contains the vendor ID;
+ - dXXXX contains the device ID;
+ - svXXXX contains the subsystem vendor ID;
+ - sdXXXX contains the subsystem device ID;
+ - cXXXXXX contains the device class.
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 087f762ebfd5..e76c3600607f 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -178,6 +178,21 @@ Description:
hardware decoder target list.
+What: /sys/bus/cxl/devices/portX/decoders_committed
+Date: October, 2023
+KernelVersion: v6.7
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) A memory device is considered active when any of its
+ decoders are in the "committed" state (See CXL 3.0 8.2.4.19.7
+ CXL HDM Decoder n Control Register). Hotplug and destructive
+ operations like "sanitize" are blocked while device is actively
+ decoding a Host Physical Address range. Note that this number
+ may be elevated without any regionX objects active or even
+ enumerated, as this may be due to decoders established by
+ platform firwmare or a previous kernel (kexec).
+
+
What: /sys/bus/cxl/devices/decoderX.Y
Date: June, 2021
KernelVersion: v5.14
@@ -369,6 +384,21 @@ Description:
provided it is currently idle / not bound to a driver.
+What: /sys/bus/cxl/devices/decoderX.Y/qos_class
+Date: May, 2023
+KernelVersion: v6.5
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) For CXL host platforms that support "QoS Telemmetry" this
+ root-decoder-only attribute conveys a platform specific cookie
+ that identifies a QoS performance class for the CXL Window.
+ This class-id can be compared against a similar "qos_class"
+ published for each memory-type that an endpoint supports. While
+ it is not required that endpoints map their local memory-class
+ to a matching platform class, mismatches are not recommended and
+ there are platform specific side-effects that may result.
+
+
What: /sys/bus/cxl/devices/regionZ/uuid
Date: May, 2022
KernelVersion: v6.0
diff --git a/Documentation/ABI/testing/sysfs-bus-i3c b/Documentation/ABI/testing/sysfs-bus-i3c
index 1f4a2662335b..e5248fd67a56 100644
--- a/Documentation/ABI/testing/sysfs-bus-i3c
+++ b/Documentation/ABI/testing/sysfs-bus-i3c
@@ -67,7 +67,7 @@ What: /sys/bus/i3c/devices/i3c-<bus-id>/pid
KernelVersion: 5.0
Contact: linux-i3c@vger.kernel.org
Description:
- PID stands for Provisional ID and is used to uniquely identify
+ PID stands for Provisioned ID and is used to uniquely identify
a device on a bus. This PID contains information about the
vendor, the part and an instance ID so that several devices of
the same type can be connected on the same bus.
@@ -123,7 +123,7 @@ What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/pid
KernelVersion: 5.0
Contact: linux-i3c@vger.kernel.org
Description:
- PID stands for Provisional ID and is used to uniquely identify
+ PID stands for Provisioned ID and is used to uniquely identify
a device on a bus. This PID contains information about the
vendor, the part and an instance ID so that several devices of
the same type can be connected on the same bus.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index a2854dc9a839..19cde14f3869 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -279,6 +279,35 @@ Description:
but should match other such assignments on device).
Units after application of scale and offset are m/s^2.
+What: /sys/bus/iio/devices/iio:deviceX/in_deltaangl_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_deltaangl_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_deltaangl_z_raw
+KernelVersion: 6.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ Angular displacement between two consecutive samples on x, y or
+ z (may be arbitrarily assigned but should match other such
+ assignments on device).
+ In order to compute the total angular displacement during a
+ desired period of time, the application should sum-up the delta
+ angle samples acquired during that time.
+ Units after application of scale and offset are radians.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_deltavelocity_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_deltavelocity_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_deltavelocity_z_raw
+KernelVersion: 6.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ The linear velocity change between two consecutive samples on x,
+ y or z (may be arbitrarily assigned but should match other such
+ assignments on device).
+ In order to compute the total linear velocity change during a
+ desired period of time, the application should sum-up the delta
+ velocity samples acquired during that time.
+ Units after application of scale and offset are meters per
+ second.
+
What: /sys/bus/iio/devices/iio:deviceX/in_angl_raw
What: /sys/bus/iio/devices/iio:deviceX/in_anglY_raw
KernelVersion: 4.17
@@ -461,6 +490,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_deltaangl_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_deltavelocity_scale
What: /sys/bus/iio/devices/iio:deviceX/in_angl_scale
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_x_scale
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_y_scale
@@ -1332,6 +1363,12 @@ Description:
What: /sys/.../iio:deviceX/bufferY/in_accel_x_en
What: /sys/.../iio:deviceX/bufferY/in_accel_y_en
What: /sys/.../iio:deviceX/bufferY/in_accel_z_en
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_x_en
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_y_en
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_z_en
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_x_en
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_y_en
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_z_en
What: /sys/.../iio:deviceX/bufferY/in_anglvel_x_en
What: /sys/.../iio:deviceX/bufferY/in_anglvel_y_en
What: /sys/.../iio:deviceX/bufferY/in_anglvel_z_en
@@ -1362,6 +1399,8 @@ Description:
Scan element control for triggered data capture.
What: /sys/.../iio:deviceX/bufferY/in_accel_type
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_type
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_type
What: /sys/.../iio:deviceX/bufferY/in_anglvel_type
What: /sys/.../iio:deviceX/bufferY/in_magn_type
What: /sys/.../iio:deviceX/bufferY/in_incli_type
@@ -1416,6 +1455,12 @@ What: /sys/.../iio:deviceX/bufferY/in_voltage_q_index
What: /sys/.../iio:deviceX/bufferY/in_accel_x_index
What: /sys/.../iio:deviceX/bufferY/in_accel_y_index
What: /sys/.../iio:deviceX/bufferY/in_accel_z_index
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_x_index
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_y_index
+What: /sys/.../iio:deviceX/bufferY/in_deltaangl_z_index
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_x_index
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_y_index
+What: /sys/.../iio:deviceX/bufferY/in_deltavelocity_z_index
What: /sys/.../iio:deviceX/bufferY/in_anglvel_x_index
What: /sys/.../iio:deviceX/bufferY/in_anglvel_y_index
What: /sys/.../iio:deviceX/bufferY/in_anglvel_z_index
@@ -2179,3 +2224,33 @@ Contact: linux-iio@vger.kernel.org
Description:
Number of conditions that must occur, during a running
period, before an event is generated.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_colortemp_raw
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Represents light color temperature, which measures light color
+ temperature in Kelvin.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_chromaticity_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_chromaticity_y_raw
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ The x and y light color coordinate on the CIE 1931 chromaticity
+ diagram.
+
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltageY_mag_either_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltageY_mag_rising_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltageY_thresh_falling_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltageY_thresh_rising_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_anglvelY_mag_rising_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_anglY_thresh_rising_label
+What: /sys/bus/iio/devices/iio:deviceX/events/in_phaseY_mag_rising_label
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Optional symbolic label to a device channel event.
+ If a label is defined for this event add that to the event
+ specific attributes. This is useful for userspace to be able to
+ better identify an individual event.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-mcp3564 b/Documentation/ABI/testing/sysfs-bus-iio-adc-mcp3564
new file mode 100644
index 000000000000..b168aa44b233
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-mcp3564
@@ -0,0 +1,53 @@
+What: /sys/bus/iio/devices/iio:deviceX/boost_current_gain
+KernelVersion: 6.4
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to set the gain of the biasing current
+ circuit of the Delta-Sigma modulator. The different BOOST
+ settings are applied to the entire modulator circuit, including
+ the voltage reference buffers.
+
+What: /sys/bus/iio/devices/iio:deviceX/boost_current_gain_available
+KernelVersion: 6.4
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns a list with the possible gain values for
+ the current biasing circuit of the Delta-Sigma modulator.
+
+What: /sys/bus/iio/devices/iio:deviceX/auto_zeroing_mux_enable
+KernelVersion: 6.4
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to enable the analog input multiplexer
+ auto-zeroing algorithm (the input multiplexer and the ADC
+ include an offset cancellation algorithm that cancels the offset
+ contribution of the ADC). When the offset cancellation algorithm
+ is enabled, ADC takes two conversions, one with the differential
+ input as VIN+/VIN-, one with VIN+/VIN- inverted. In this case the
+ conversion time is multiplied by two compared to the default
+ case where the algorithm is disabled. This technique allows the
+ cancellation of the ADC offset error and the achievement of
+ ultra-low offset without any digital calibration. The resulting
+ offset is the residue of the difference between the two
+ conversions, which is on the order of magnitude of the noise
+ floor. This offset is effectively canceled at every conversion,
+ so the residual offset error temperature drift is extremely low.
+ Write '1' to enable it, write '0' to disable it.
+
+What: /sys/bus/iio/devices/iio:deviceX/auto_zeroing_ref_enable
+KernelVersion: 6.4
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is used to enable the chopping algorithm for the
+ internal voltage reference buffer. This setting has no effect
+ when external voltage reference is selected.
+ Internal voltage reference buffer injects a certain quantity of
+ 1/f noise into the system that can be modulated with the
+ incoming input signals and can limit the SNR performance at
+ higher Oversampling Ratio values (over 256). To overcome this
+ limitation, the buffer includes an auto-zeroing algorithm that
+ greatly reduces (cancels out) the 1/f noise and cancels the
+ offset value of the reference buffer. As a result, the SNR of
+ the system is not affected by this 1/f noise component of the
+ reference buffer, even at maximum oversampling ratio values.
+ Write '1' to enable it, write '0' to disable it.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-resolver-ad2s1210 b/Documentation/ABI/testing/sysfs-bus-iio-resolver-ad2s1210
new file mode 100644
index 000000000000..f92c79342b93
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-resolver-ad2s1210
@@ -0,0 +1,27 @@
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltage0_mag_rising_reset_max
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns the current Degradation of Signal Reset Maximum
+ Threshold value in millivolts. Writing sets the value.
+
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltage0_mag_rising_reset_max_available
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns the allowable voltage range for
+ in_altvoltage0_mag_rising_reset_max.
+
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltage0_mag_rising_reset_min
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns the current Degradation of Signal Reset Minimum
+ Threshold value in millivolts. Writing sets the value.
+
+What: /sys/bus/iio/devices/iio:deviceX/events/in_altvoltage0_mag_rising_reset_min_available
+KernelVersion: 6.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns the allowable voltage range for
+ in_altvoltage0_mag_rising_reset_min.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a44bfe020061..2b7108e21977 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -313,6 +313,15 @@ Description:
Inter-Chip SSIC devices support asymmetric lanes up to 4 lanes per
direction. Devices before USB 3.2 are single lane (tx_lanes = 1)
+What: /sys/bus/usb/devices/.../typec
+Date: November 2023
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Symlink to the USB Type-C partner device. USB Type-C partner
+ represents the component that communicates over the
+ Configuration Channel (CC signal on USB Type-C connectors and
+ cables) with the local port.
+
What: /sys/bus/usb/devices/usbX/bAlternateSetting
Description:
The current interface alternate setting number, in decimal.
diff --git a/Documentation/ABI/testing/sysfs-bus-vdpa b/Documentation/ABI/testing/sysfs-bus-vdpa
index 28a6111202ba..4da53878bff6 100644
--- a/Documentation/ABI/testing/sysfs-bus-vdpa
+++ b/Documentation/ABI/testing/sysfs-bus-vdpa
@@ -1,4 +1,4 @@
-What: /sys/bus/vdpa/driver_autoprobe
+What: /sys/bus/vdpa/drivers_autoprobe
Date: March 2020
Contact: virtualization@lists.linux-foundation.org
Description:
@@ -17,7 +17,7 @@ Description:
Writing a device name to this file will cause the kernel binds
devices to a compatible driver.
- This can be useful when /sys/bus/vdpa/driver_autoprobe is
+ This can be useful when /sys/bus/vdpa/drivers_autoprobe is
disabled.
What: /sys/bus/vdpa/drivers/.../bind
diff --git a/Documentation/ABI/testing/sysfs-class-usb_power_delivery b/Documentation/ABI/testing/sysfs-class-usb_power_delivery
index 1bf9d1d7902c..61d233c320ea 100644
--- a/Documentation/ABI/testing/sysfs-class-usb_power_delivery
+++ b/Documentation/ABI/testing/sysfs-class-usb_power_delivery
@@ -124,6 +124,13 @@ Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Description:
The voltage the supply supports in millivolts.
+What: /sys/class/usb_power_delivery/.../source-capabilities/<position>:fixed_supply/peak_current
+Date: October 2023
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ This file shows the value of the Fixed Power Source Peak Current
+ Capability field.
+
What: /sys/class/usb_power_delivery/.../source-capabilities/<position>:fixed_supply/maximum_current
Date: May 2022
Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty
index 820e412d38a8..895c47f05f6f 100644
--- a/Documentation/ABI/testing/sysfs-tty
+++ b/Documentation/ABI/testing/sysfs-tty
@@ -87,19 +87,22 @@ What: /sys/class/tty/ttyS<x>/close_delay
Date: October 2012
Contact: Alan Cox <alan@linux.intel.com>
Description:
- Show the closing delay time for this port in ms.
+ Show the closing delay time for this port in centiseconds.
- These sysfs values expose the TIOCGSERIAL interface via
- sysfs rather than via ioctls.
+ These sysfs values expose the TIOCGSERIAL interface via
+ sysfs rather than via ioctls.
What: /sys/class/tty/ttyS<x>/closing_wait
Date: October 2012
Contact: Alan Cox <alan@linux.intel.com>
Description:
- Show the close wait time for this port in ms.
+ Show the close wait time for this port in centiseconds.
- These sysfs values expose the TIOCGSERIAL interface via
- sysfs rather than via ioctls.
+ Waiting forever is represented as 0. If waiting on close is
+ disabled then the value is 65535.
+
+ These sysfs values expose the TIOCGSERIAL interface via
+ sysfs rather than via ioctls.
What: /sys/class/tty/ttyS<x>/custom_divisor
Date: October 2012
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 01c7082ee999..65731b060e3f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2227,7 +2227,7 @@
forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing.
- iommu.strict= [ARM64, X86] Configure TLB invalidation behaviour
+ iommu.strict= [ARM64, X86, S390] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
Request that DMA unmap operations use deferred
@@ -3333,6 +3333,11 @@
mga= [HW,DRM]
+ microcode.force_minrev= [X86]
+ Format: <bool>
+ Enable or disable the microcode minimal revision
+ enforcement for the runtime microcode loader.
+
min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this
physical address is ignored.
@@ -3591,6 +3596,13 @@
[NFS] set the TCP port on which the NFSv4 callback
channel should listen.
+ nfs.delay_retrans=
+ [NFS] specifies the number of times the NFSv4 client
+ retries the request before returning an EAGAIN error,
+ after a reply of NFS4ERR_DELAY from the server.
+ Only applies if the softerr mount option is enabled,
+ and the specified value is >= 0.
+
nfs.enable_ino64=
[NFS] enable 64-bit inode numbers.
If zero, the NFS client will fake up a 32-bit inode
@@ -5682,9 +5694,10 @@
s390_iommu= [HW,S390]
Set s390 IOTLB flushing mode
strict
- With strict flushing every unmap operation will result in
- an IOTLB flush. Default is lazy flushing before reuse,
- which is faster.
+ With strict flushing every unmap operation will result
+ in an IOTLB flush. Default is lazy flushing before
+ reuse, which is faster. Deprecated, equivalent to
+ iommu.strict=1.
s390_iommu_aperture= [KNL,S390]
Specifies the size of the per device DMA address space
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
new file mode 100644
index 000000000000..2977f74d7e26
--- /dev/null
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -0,0 +1,374 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+mgb4 sysfs interface
+====================
+
+The mgb4 driver provides a sysfs interface, that is used to configure video
+stream related parameters (some of them must be set properly before the v4l2
+device can be opened) and obtain the video device/stream status.
+
+There are two types of parameters - global / PCI card related, found under
+``/sys/class/video4linux/videoX/device`` and module specific found under
+``/sys/class/video4linux/videoX``.
+
+
+Global (PCI card) parameters
+============================
+
+**module_type** (R):
+ Module type.
+
+ | 0 - No module present
+ | 1 - FPDL3
+ | 2 - GMSL
+
+**module_version** (R):
+ Module version number. Zero in case of a missing module.
+
+**fw_type** (R):
+ Firmware type.
+
+ | 1 - FPDL3
+ | 2 - GMSL
+
+**fw_version** (R):
+ Firmware version number.
+
+**serial_number** (R):
+ Card serial number. The format is::
+
+ PRODUCT-REVISION-SERIES-SERIAL
+
+ where each component is a 8b number.
+
+
+Common FPDL3/GMSL input parameters
+==================================
+
+**input_id** (R):
+ Input number ID, zero based.
+
+**oldi_lane_width** (RW):
+ Number of deserializer output lanes.
+
+ | 0 - single
+ | 1 - dual (default)
+
+**color_mapping** (RW):
+ Mapping of the incoming bits in the signal to the colour bits of the pixels.
+
+ | 0 - OLDI/JEIDA
+ | 1 - SPWG/VESA (default)
+
+**link_status** (R):
+ Video link status. If the link is locked, chips are properly connected and
+ communicating at the same speed and protocol. The link can be locked without
+ an active video stream.
+
+ A value of 0 is equivalent to the V4L2_IN_ST_NO_SYNC flag of the V4L2
+ VIDIOC_ENUMINPUT status bits.
+
+ | 0 - unlocked
+ | 1 - locked
+
+**stream_status** (R):
+ Video stream status. A stream is detected if the link is locked, the input
+ pixel clock is running and the DE signal is moving.
+
+ A value of 0 is equivalent to the V4L2_IN_ST_NO_SIGNAL flag of the V4L2
+ VIDIOC_ENUMINPUT status bits.
+
+ | 0 - not detected
+ | 1 - detected
+
+**video_width** (R):
+ Video stream width. This is the actual width as detected by the HW.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in the width
+ field of the v4l2_bt_timings struct.
+
+**video_height** (R):
+ Video stream height. This is the actual height as detected by the HW.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in the height
+ field of the v4l2_bt_timings struct.
+
+**vsync_status** (R):
+ The type of VSYNC pulses as detected by the video format detector.
+
+ The value is equivalent to the flags returned by VIDIOC_QUERY_DV_TIMINGS in
+ the polarities field of the v4l2_bt_timings struct.
+
+ | 0 - active low
+ | 1 - active high
+ | 2 - not available
+
+**hsync_status** (R):
+ The type of HSYNC pulses as detected by the video format detector.
+
+ The value is equivalent to the flags returned by VIDIOC_QUERY_DV_TIMINGS in
+ the polarities field of the v4l2_bt_timings struct.
+
+ | 0 - active low
+ | 1 - active high
+ | 2 - not available
+
+**vsync_gap_length** (RW):
+ If the incoming video signal does not contain synchronization VSYNC and
+ HSYNC pulses, these must be generated internally in the FPGA to achieve
+ the correct frame ordering. This value indicates, how many "empty" pixels
+ (pixels with deasserted Data Enable signal) are necessary to generate the
+ internal VSYNC pulse.
+
+**hsync_gap_length** (RW):
+ If the incoming video signal does not contain synchronization VSYNC and
+ HSYNC pulses, these must be generated internally in the FPGA to achieve
+ the correct frame ordering. This value indicates, how many "empty" pixels
+ (pixels with deasserted Data Enable signal) are necessary to generate the
+ internal HSYNC pulse. The value must be greater than 1 and smaller than
+ vsync_gap_length.
+
+**pclk_frequency** (R):
+ Input pixel clock frequency in kHz.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the pixelclock field of the v4l2_bt_timings struct.
+
+ *Note: The frequency_range parameter must be set properly first to get
+ a valid frequency here.*
+
+**hsync_width** (R):
+ Width of the HSYNC signal in PCLK clock ticks.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the hsync field of the v4l2_bt_timings struct.
+
+**vsync_width** (R):
+ Width of the VSYNC signal in PCLK clock ticks.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the vsync field of the v4l2_bt_timings struct.
+
+**hback_porch** (R):
+ Number of PCLK pulses between deassertion of the HSYNC signal and the first
+ valid pixel in the video line (marked by DE=1).
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the hbackporch field of the v4l2_bt_timings struct.
+
+**hfront_porch** (R):
+ Number of PCLK pulses between the end of the last valid pixel in the video
+ line (marked by DE=1) and assertion of the HSYNC signal.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the hfrontporch field of the v4l2_bt_timings struct.
+
+**vback_porch** (R):
+ Number of video lines between deassertion of the VSYNC signal and the video
+ line with the first valid pixel (marked by DE=1).
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the vbackporch field of the v4l2_bt_timings struct.
+
+**vfront_porch** (R):
+ Number of video lines between the end of the last valid pixel line (marked
+ by DE=1) and assertion of the VSYNC signal.
+
+ The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+ the vfrontporch field of the v4l2_bt_timings struct.
+
+**frequency_range** (RW)
+ PLL frequency range of the OLDI input clock generator. The PLL frequency is
+ derived from the Pixel Clock Frequency (PCLK) and is equal to PCLK if
+ oldi_lane_width is set to "single" and PCLK/2 if oldi_lane_width is set to
+ "dual".
+
+ | 0 - PLL < 50MHz (default)
+ | 1 - PLL >= 50MHz
+
+ *Note: This parameter can not be changed while the input v4l2 device is
+ open.*
+
+
+Common FPDL3/GMSL output parameters
+===================================
+
+**output_id** (R):
+ Output number ID, zero based.
+
+**video_source** (RW):
+ Output video source. If set to 0 or 1, the source is the corresponding card
+ input and the v4l2 output devices are disabled. If set to 2 or 3, the source
+ is the corresponding v4l2 video output device. The default is
+ the corresponding v4l2 output, i.e. 2 for OUT1 and 3 for OUT2.
+
+ | 0 - input 0
+ | 1 - input 1
+ | 2 - v4l2 output 0
+ | 3 - v4l2 output 1
+
+ *Note: This parameter can not be changed while ANY of the input/output v4l2
+ devices is open.*
+
+**display_width** (RW):
+ Display width. There is no autodetection of the connected display, so the
+ proper value must be set before the start of streaming. The default width
+ is 1280.
+
+ *Note: This parameter can not be changed while the output v4l2 device is
+ open.*
+
+**display_height** (RW):
+ Display height. There is no autodetection of the connected display, so the
+ proper value must be set before the start of streaming. The default height
+ is 640.
+
+ *Note: This parameter can not be changed while the output v4l2 device is
+ open.*
+
+**frame_rate** (RW):
+ Output video frame rate in frames per second. The default frame rate is
+ 60Hz.
+
+**hsync_polarity** (RW):
+ HSYNC signal polarity.
+
+ | 0 - active low (default)
+ | 1 - active high
+
+**vsync_polarity** (RW):
+ VSYNC signal polarity.
+
+ | 0 - active low (default)
+ | 1 - active high
+
+**de_polarity** (RW):
+ DE signal polarity.
+
+ | 0 - active low
+ | 1 - active high (default)
+
+**pclk_frequency** (RW):
+ Output pixel clock frequency. Allowed values are between 25000-190000(kHz)
+ and there is a non-linear stepping between two consecutive allowed
+ frequencies. The driver finds the nearest allowed frequency to the given
+ value and sets it. When reading this property, you get the exact
+ frequency set by the driver. The default frequency is 70000kHz.
+
+ *Note: This parameter can not be changed while the output v4l2 device is
+ open.*
+
+**hsync_width** (RW):
+ Width of the HSYNC signal in pixels. The default value is 16.
+
+**vsync_width** (RW):
+ Width of the VSYNC signal in video lines. The default value is 2.
+
+**hback_porch** (RW):
+ Number of PCLK pulses between deassertion of the HSYNC signal and the first
+ valid pixel in the video line (marked by DE=1). The default value is 32.
+
+**hfront_porch** (RW):
+ Number of PCLK pulses between the end of the last valid pixel in the video
+ line (marked by DE=1) and assertion of the HSYNC signal. The default value
+ is 32.
+
+**vback_porch** (RW):
+ Number of video lines between deassertion of the VSYNC signal and the video
+ line with the first valid pixel (marked by DE=1). The default value is 2.
+
+**vfront_porch** (RW):
+ Number of video lines between the end of the last valid pixel line (marked
+ by DE=1) and assertion of the VSYNC signal. The default value is 2.
+
+
+FPDL3 specific input parameters
+===============================
+
+**fpdl3_input_width** (RW):
+ Number of deserializer input lines.
+
+ | 0 - auto (default)
+ | 1 - single
+ | 2 - dual
+
+FPDL3 specific output parameters
+================================
+
+**fpdl3_output_width** (RW):
+ Number of serializer output lines.
+
+ | 0 - auto (default)
+ | 1 - single
+ | 2 - dual
+
+GMSL specific input parameters
+==============================
+
+**gmsl_mode** (RW):
+ GMSL speed mode.
+
+ | 0 - 12Gb/s (default)
+ | 1 - 6Gb/s
+ | 2 - 3Gb/s
+ | 3 - 1.5Gb/s
+
+**gmsl_stream_id** (RW):
+ The GMSL multi-stream contains up to four video streams. This parameter
+ selects which stream is captured by the video input. The value is the
+ zero-based index of the stream. The default stream id is 0.
+
+ *Note: This parameter can not be changed while the input v4l2 device is
+ open.*
+
+**gmsl_fec** (RW):
+ GMSL Forward Error Correction (FEC).
+
+ | 0 - disabled
+ | 1 - enabled (default)
+
+
+====================
+mgb4 mtd partitions
+====================
+
+The mgb4 driver creates a MTD device with two partitions:
+ - mgb4-fw.X - FPGA firmware.
+ - mgb4-data.X - Factory settings, e.g. card serial number.
+
+The *mgb4-fw* partition is writable and is used for FW updates, *mgb4-data* is
+read-only. The *X* attached to the partition name represents the card number.
+Depending on the CONFIG_MTD_PARTITIONED_MASTER kernel configuration, you may
+also have a third partition named *mgb4-flash* available in the system. This
+partition represents the whole, unpartitioned, card's FLASH memory and one should
+not fiddle with it...
+
+====================
+mgb4 iio (triggers)
+====================
+
+The mgb4 driver creates an Industrial I/O (IIO) device that provides trigger and
+signal level status capability. The following scan elements are available:
+
+**activity**:
+ The trigger levels and pending status.
+
+ | bit 1 - trigger 1 pending
+ | bit 2 - trigger 2 pending
+ | bit 5 - trigger 1 level
+ | bit 6 - trigger 2 level
+
+**timestamp**:
+ The trigger event timestamp.
+
+The iio device can operate either in "raw" mode where you can fetch the signal
+levels (activity bits 5 and 6) using sysfs access or in triggered buffer mode.
+In the triggered buffer mode you can follow the signal level changes (activity
+bits 1 and 2) using the iio device in /dev. If you enable the timestamps, you
+will also get the exact trigger event time that can be matched to a video frame
+(every mgb4 video frame has a timestamp with the same clock source).
+
+*Note: although the activity sample always contains all the status bits, it makes
+no sense to get the pending bits in raw mode or the level bits in the triggered
+buffer mode - the values do not represent valid data in such case.*
diff --git a/Documentation/admin-guide/media/pci-cardlist.rst b/Documentation/admin-guide/media/pci-cardlist.rst
index 42528795d4da..7d8e3c8987db 100644
--- a/Documentation/admin-guide/media/pci-cardlist.rst
+++ b/Documentation/admin-guide/media/pci-cardlist.rst
@@ -77,6 +77,7 @@ ipu3-cio2 Intel ipu3-cio2 driver
ivtv Conexant cx23416/cx23415 MPEG encoder/decoder
ivtvfb Conexant cx23415 framebuffer
mantis MANTIS based cards
+mgb4 Digiteq Automotive MGB4 frame grabber
mxb Siemens-Nixdorf 'Multimedia eXtension Board'
netup-unidvb NetUP Universal DVB card
ngene Micronas nGene
diff --git a/Documentation/admin-guide/media/v4l-drivers.rst b/Documentation/admin-guide/media/v4l-drivers.rst
index 1c41f87c3917..61283d67ceef 100644
--- a/Documentation/admin-guide/media/v4l-drivers.rst
+++ b/Documentation/admin-guide/media/v4l-drivers.rst
@@ -17,6 +17,7 @@ Video4Linux (V4L) driver-specific documentation
imx7
ipu3
ivtv
+ mgb4
omap3isp
omap4_camera
philips
diff --git a/Documentation/admin-guide/media/visl.rst b/Documentation/admin-guide/media/visl.rst
index 7d2dc78341c9..4328c6c72d30 100644
--- a/Documentation/admin-guide/media/visl.rst
+++ b/Documentation/admin-guide/media/visl.rst
@@ -78,7 +78,7 @@ The trace events are defined on a per-codec basis, e.g.:
.. code-block:: bash
- $ ls /sys/kernel/debug/tracing/events/ | grep visl
+ $ ls /sys/kernel/tracing/events/ | grep visl
visl_fwht_controls
visl_h264_controls
visl_hevc_controls
@@ -90,13 +90,13 @@ For example, in order to dump HEVC SPS data:
.. code-block:: bash
- $ echo 1 > /sys/kernel/debug/tracing/events/visl_hevc_controls/v4l2_ctrl_hevc_sps/enable
+ $ echo 1 > /sys/kernel/tracing/events/visl_hevc_controls/v4l2_ctrl_hevc_sps/enable
The SPS data will be dumped to the trace buffer, i.e.:
.. code-block:: bash
- $ cat /sys/kernel/debug/tracing/trace
+ $ cat /sys/kernel/tracing/trace
video_parameter_set_id 0
seq_parameter_set_id 0
pic_width_in_luma_samples 1920
diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst
index a50834798454..9749f6dc258f 100644
--- a/Documentation/arch/powerpc/index.rst
+++ b/Documentation/arch/powerpc/index.rst
@@ -26,6 +26,7 @@ powerpc
isa-versions
kaslr-booke32
mpc52xx
+ kvm-nested
papr_hcalls
pci_iov_resource_on_powernv
pmu-ebb
diff --git a/Documentation/arch/powerpc/kvm-nested.rst b/Documentation/arch/powerpc/kvm-nested.rst
new file mode 100644
index 000000000000..630602a8aa00
--- /dev/null
+++ b/Documentation/arch/powerpc/kvm-nested.rst
@@ -0,0 +1,634 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================
+Nested KVM on POWER
+====================================
+
+Introduction
+============
+
+This document explains how a guest operating system can act as a
+hypervisor and run nested guests through the use of hypercalls, if the
+hypervisor has implemented them. The terms L0, L1, and L2 are used to
+refer to different software entities. L0 is the hypervisor mode entity
+that would normally be called the "host" or "hypervisor". L1 is a
+guest virtual machine that is directly run under L0 and is initiated
+and controlled by L0. L2 is a guest virtual machine that is initiated
+and controlled by L1 acting as a hypervisor.
+
+Existing API
+============
+
+Linux/KVM has had support for Nesting as an L0 or L1 since 2018
+
+The L0 code was added::
+
+ commit 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce
+ Author: Paul Mackerras <paulus@ozlabs.org>
+ Date: Mon Oct 8 16:31:03 2018 +1100
+ KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization
+
+The L1 code was added::
+
+ commit 360cae313702cdd0b90f82c261a8302fecef030a
+ Author: Paul Mackerras <paulus@ozlabs.org>
+ Date: Mon Oct 8 16:31:04 2018 +1100
+ KVM: PPC: Book3S HV: Nested guest entry via hypercall
+
+This API works primarily using a single hcall h_enter_nested(). This
+call made by the L1 to tell the L0 to start an L2 vCPU with the given
+state. The L0 then starts this L2 and runs until an L2 exit condition
+is reached. Once the L2 exits, the state of the L2 is given back to
+the L1 by the L0. The full L2 vCPU state is always transferred from
+and to L1 when the L2 is run. The L0 doesn't keep any state on the L2
+vCPU (except in the short sequence in the L0 on L1 -> L2 entry and L2
+-> L1 exit).
+
+The only state kept by the L0 is the partition table. The L1 registers
+it's partition table using the h_set_partition_table() hcall. All
+other state held by the L0 about the L2s is cached state (such as
+shadow page tables).
+
+The L1 may run any L2 or vCPU without first informing the L0. It
+simply starts the vCPU using h_enter_nested(). The creation of L2s and
+vCPUs is done implicitly whenever h_enter_nested() is called.
+
+In this document, we call this existing API the v1 API.
+
+New PAPR API
+===============
+
+The new PAPR API changes from the v1 API such that the creating L2 and
+associated vCPUs is explicit. In this document, we call this the v2
+API.
+
+h_enter_nested() is replaced with H_GUEST_VCPU_RUN(). Before this can
+be called the L1 must explicitly create the L2 using h_guest_create()
+and any associated vCPUs() created with h_guest_create_vCPU(). Getting
+and setting vCPU state can also be performed using h_guest_{g|s}et
+hcall.
+
+The basic execution flow is for an L1 to create an L2, run it, and
+delete it is:
+
+- L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES()
+ (normally at L1 boot time).
+
+- L1 requests the L0 create an L2 with H_GUEST_CREATE() and receives a token
+
+- L1 requests the L0 create an L2 vCPU with H_GUEST_CREATE_VCPU()
+
+- L1 and L0 communicate the vCPU state using the H_GUEST_{G,S}ET() hcall
+
+- L1 requests the L0 runs the vCPU running H_GUEST_VCPU_RUN() hcall
+
+- L1 deletes L2 with H_GUEST_DELETE()
+
+More details of the individual hcalls follows:
+
+HCALL Details
+=============
+
+This documentation is provided to give an overall understating of the
+API. It doesn't aim to provide all the details required to implement
+an L1 or L0. Latest version of PAPR can be referred to for more details.
+
+All these HCALLs are made by the L1 to the L0.
+
+H_GUEST_GET_CAPABILITIES()
+--------------------------
+
+This is called to get the capabilities of the L0 nested
+hypervisor. This includes capabilities such the CPU versions (eg
+POWER9, POWER10) that are supported as L2s::
+
+ H_GUEST_GET_CAPABILITIES(uint64 flags)
+
+ Parameters:
+ Input:
+ flags: Reserved
+ Output:
+ R3: Return code
+ R4: Hypervisor Supported Capabilities bitmap 1
+
+H_GUEST_SET_CAPABILITIES()
+--------------------------
+
+This is called to inform the L0 of the capabilities of the L1
+hypervisor. The set of flags passed here are the same as
+H_GUEST_GET_CAPABILITIES()
+
+Typically, GET will be called first and then SET will be called with a
+subset of the flags returned from GET. This process allows the L0 and
+L1 to negotiate an agreed set of capabilities::
+
+ H_GUEST_SET_CAPABILITIES(uint64 flags,
+ uint64 capabilitiesBitmap1)
+ Parameters:
+ Input:
+ flags: Reserved
+ capabilitiesBitmap1: Only capabilities advertised through
+ H_GUEST_GET_CAPABILITIES
+ Output:
+ R3: Return code
+ R4: If R3 = H_P2: The number of invalid bitmaps
+ R5: If R3 = H_P2: The index of first invalid bitmap
+
+H_GUEST_CREATE()
+----------------
+
+This is called to create an L2. A unique ID of the L2 created
+(similar to an LPID) is returned, which can be used on subsequent HCALLs to
+identify the L2::
+
+ H_GUEST_CREATE(uint64 flags,
+ uint64 continueToken);
+ Parameters:
+ Input:
+ flags: Reserved
+ continueToken: Initial call set to -1. Subsequent calls,
+ after H_Busy or H_LongBusyOrder has been
+ returned, value that was returned in R4.
+ Output:
+ R3: Return code. Notable:
+ H_Not_Enough_Resources: Unable to create Guest VCPU due to not
+ enough Hypervisor memory. See H_GUEST_CREATE_GET_STATE(flags =
+ takeOwnershipOfVcpuState)
+ R4: If R3 = H_Busy or_H_LongBusyOrder -> continueToken
+
+H_GUEST_CREATE_VCPU()
+---------------------
+
+This is called to create a vCPU associated with an L2. The L2 id
+(returned from H_GUEST_CREATE()) should be passed it. Also passed in
+is a unique (for this L2) vCPUid. This vCPUid is allocated by the
+L1::
+
+ H_GUEST_CREATE_VCPU(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId);
+ Parameters:
+ Input:
+ flags: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU to be created. This must be within the
+ range of 0 to 2047
+ Output:
+ R3: Return code. Notable:
+ H_Not_Enough_Resources: Unable to create Guest VCPU due to not
+ enough Hypervisor memory. See H_GUEST_CREATE_GET_STATE(flags =
+ takeOwnershipOfVcpuState)
+
+H_GUEST_GET_STATE()
+-------------------
+
+This is called to get state associated with an L2 (Guest-wide or vCPU specific).
+This info is passed via the Guest State Buffer (GSB), a standard format as
+explained later in this doc, necessary details below:
+
+This can get either L2 wide or vcpu specific information. Examples of
+L2 wide is the timebase offset or process scoped page table
+info. Examples of vCPU specific are GPRs or VSRs. A bit in the flags
+parameter specifies if this call is L2 wide or vCPU specific and the
+IDs in the GSB must match this.
+
+The L1 provides a pointer to the GSB as a parameter to this call. Also
+provided is the L2 and vCPU IDs associated with the state to set.
+
+The L1 writes only the IDs and sizes in the GSB. L0 writes the
+associated values for each ID in the GSB::
+
+ H_GUEST_GET_STATE(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: getGuestWideState: Request state of the Guest instead
+ of an individual VCPU.
+ Bit 1: takeOwnershipOfVcpuState Indicate the L1 is taking
+ over ownership of the VCPU state and that the L0 can free
+ the storage holding the state. The VCPU state will need to
+ be returned to the Hypervisor via H_GUEST_SET_STATE prior
+ to H_GUEST_RUN_VCPU being called for this VCPU. The data
+ returned in the dataBuffer is in a Hypervisor internal
+ format.
+ Bits 2-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ dataBuffer: A L1 real address of the GSB.
+ If takeOwnershipOfVcpuState, size must be at least the size
+ returned by ID=0x0001
+ dataBufferSizeInBytes: Size of dataBuffer
+ Output:
+ R3: Return code
+ R4: If R3 = H_Invalid_Element_Id: The array index of the bad
+ element ID.
+ If R3 = H_Invalid_Element_Size: The array index of the bad
+ element size.
+ If R3 = H_Invalid_Element_Value: The array index of the bad
+ element value.
+
+H_GUEST_SET_STATE()
+-------------------
+
+This is called to set L2 wide or vCPU specific L2 state. This info is
+passed via the Guest State Buffer (GSB), necessary details below:
+
+This can set either L2 wide or vcpu specific information. Examples of
+L2 wide is the timebase offset or process scoped page table
+info. Examples of vCPU specific are GPRs or VSRs. A bit in the flags
+parameter specifies if this call is L2 wide or vCPU specific and the
+IDs in the GSB must match this.
+
+The L1 provides a pointer to the GSB as a parameter to this call. Also
+provided is the L2 and vCPU IDs associated with the state to set.
+
+The L1 writes all values in the GSB and the L0 only reads the GSB for
+this call::
+
+ H_GUEST_SET_STATE(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: getGuestWideState: Request state of the Guest instead
+ of an individual VCPU.
+ Bit 1: returnOwnershipOfVcpuState Return Guest VCPU state. See
+ GET_STATE takeOwnershipOfVcpuState
+ Bits 2-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ dataBuffer: A L1 real address of the GSB.
+ If takeOwnershipOfVcpuState, size must be at least the size
+ returned by ID=0x0001
+ dataBufferSizeInBytes: Size of dataBuffer
+ Output:
+ R3: Return code
+ R4: If R3 = H_Invalid_Element_Id: The array index of the bad
+ element ID.
+ If R3 = H_Invalid_Element_Size: The array index of the bad
+ element size.
+ If R3 = H_Invalid_Element_Value: The array index of the bad
+ element value.
+
+H_GUEST_RUN_VCPU()
+------------------
+
+This is called to run an L2 vCPU. The L2 and vCPU IDs are passed in as
+parameters. The vCPU runs with the state set previously using
+H_GUEST_SET_STATE(). When the L2 exits, the L1 will resume from this
+hcall.
+
+This hcall also has associated input and output GSBs. Unlike
+H_GUEST_{S,G}ET_STATE(), these GSB pointers are not passed in as
+parameters to the hcall (This was done in the interest of
+performance). The locations of these GSBs must be preregistered using
+the H_GUEST_SET_STATE() call with ID 0x0c00 and 0x0c01 (see table
+below).
+
+The input GSB may contain only VCPU specific elements to be set. This
+GSB may also contain zero elements (ie 0 in the first 4 bytes of the
+GSB) if nothing needs to be set.
+
+On exit from the hcall, the output buffer is filled with elements
+determined by the L0. The reason for the exit is contained in GPR4 (ie
+NIP is put in GPR4). The elements returned depend on the exit
+type. For example, if the exit reason is the L2 doing a hcall (GPR4 =
+0xc00), then GPR3-12 are provided in the output GSB as this is the
+state likely needed to service the hcall. If additional state is
+needed, H_GUEST_GET_STATE() may be called by the L1.
+
+To synthesize interrupts in the L2, when calling H_GUEST_RUN_VCPU()
+the L1 may set a flag (as a hcall parameter) and the L0 will
+synthesize the interrupt in the L2. Alternatively, the L1 may
+synthesize the interrupt itself using H_GUEST_SET_STATE() or the
+H_GUEST_RUN_VCPU() input GSB to set the state appropriately::
+
+ H_GUEST_RUN_VCPU(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: generateExternalInterrupt: Generate an external interrupt
+ Bit 1: generatePrivilegedDoorbell: Generate a Privileged Doorbell
+ Bit 2: sendToSystemReset”: Generate a System Reset Interrupt
+ Bits 3-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ Output:
+ R3: Return code
+ R4: If R3 = H_Success: The reason L1 VCPU exited (ie. NIA)
+ 0x000: The VCPU stopped running for an unspecified reason. An
+ example of this is the Hypervisor stopping a VCPU running
+ due to an outstanding interrupt for the Host Partition.
+ 0x980: HDEC
+ 0xC00: HCALL
+ 0xE00: HDSI
+ 0xE20: HISI
+ 0xE40: HEA
+ 0xF80: HV Fac Unavail
+ If R3 = H_Invalid_Element_Id, H_Invalid_Element_Size, or
+ H_Invalid_Element_Value: R4 is offset of the invalid element
+ in the input buffer.
+
+H_GUEST_DELETE()
+----------------
+
+This is called to delete an L2. All associated vCPUs are also
+deleted. No specific vCPU delete call is provided.
+
+A flag may be provided to delete all guests. This is used to reset the
+L0 in the case of kdump/kexec::
+
+ H_GUEST_DELETE(uint64 flags,
+ uint64 guestId)
+ Parameters:
+ Input:
+ flags:
+ Bit 0: deleteAllGuests: deletes all guests
+ Bits 1-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ Output:
+ R3: Return code
+
+Guest State Buffer
+==================
+
+The Guest State Buffer (GSB) is the main method of communicating state
+about the L2 between the L1 and L0 via H_GUEST_{G,S}ET() and
+H_GUEST_VCPU_RUN() calls.
+
+State may be associated with a whole L2 (eg timebase offset) or a
+specific L2 vCPU (eg. GPR state). Only L2 VCPU state maybe be set by
+H_GUEST_VCPU_RUN().
+
+All data in the GSB is big endian (as is standard in PAPR)
+
+The Guest state buffer has a header which gives the number of
+elements, followed by the GSB elements themselves.
+
+GSB header:
+
++----------+----------+-------------------------------------------+
+| Offset | Size | Purpose |
+| Bytes | Bytes | |
++==========+==========+===========================================+
+| 0 | 4 | Number of elements |
++----------+----------+-------------------------------------------+
+| 4 | | Guest state buffer elements |
++----------+----------+-------------------------------------------+
+
+GSB element:
+
++----------+----------+-------------------------------------------+
+| Offset | Size | Purpose |
+| Bytes | Bytes | |
++==========+==========+===========================================+
+| 0 | 2 | ID |
++----------+----------+-------------------------------------------+
+| 2 | 2 | Size of Value |
++----------+----------+-------------------------------------------+
+| 4 | As above | Value |
++----------+----------+-------------------------------------------+
+
+The ID in the GSB element specifies what is to be set. This includes
+archtected state like GPRs, VSRs, SPRs, plus also some meta data about
+the partition like the timebase offset and partition scoped page
+table information.
+
++--------+-------+----+--------+----------------------------------+
+| ID | Size | RW | Thread | Details |
+| | Bytes | | Guest | |
+| | | | Scope | |
++========+=======+====+========+==================================+
+| 0x0000 | | RW | TG | NOP element |
++--------+-------+----+--------+----------------------------------+
+| 0x0001 | 0x08 | R | G | Size of L0 vCPU state. See: |
+| | | | | H_GUEST_GET_STATE: |
+| | | | | flags = takeOwnershipOfVcpuState |
++--------+-------+----+--------+----------------------------------+
+| 0x0002 | 0x08 | R | G | Size Run vCPU out buffer |
++--------+-------+----+--------+----------------------------------+
+| 0x0003 | 0x04 | RW | G | Logical PVR |
++--------+-------+----+--------+----------------------------------+
+| 0x0004 | 0x08 | RW | G | TB Offset (L1 relative) |
++--------+-------+----+--------+----------------------------------+
+| 0x0005 | 0x18 | RW | G |Partition scoped page tbl info: |
+| | | | | |
+| | | | |- 0x00 Addr part scope table |
+| | | | |- 0x08 Num addr bits |
+| | | | |- 0x10 Size root dir |
++--------+-------+----+--------+----------------------------------+
+| 0x0006 | 0x10 | RW | G |Process Table Information: |
+| | | | | |
+| | | | |- 0x0 Addr proc scope table |
+| | | | |- 0x8 Table size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0007-| | | | Reserved |
+| 0x0BFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x0C00 | 0x10 | RW | T |Run vCPU Input Buffer: |
+| | | | | |
+| | | | |- 0x0 Addr of buffer |
+| | | | |- 0x8 Buffer Size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0C01 | 0x10 | RW | T |Run vCPU Output Buffer: |
+| | | | | |
+| | | | |- 0x0 Addr of buffer |
+| | | | |- 0x8 Buffer Size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0C02 | 0x08 | RW | T | vCPU VPA Address |
++--------+-------+----+--------+----------------------------------+
+| 0x0C03-| | | | Reserved |
+| 0x0FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x1000-| 0x08 | RW | T | GPR 0-31 |
+| 0x101F | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x1020 | 0x08 | T | T | HDEC expiry TB |
++--------+-------+----+--------+----------------------------------+
+| 0x1021 | 0x08 | RW | T | NIA |
++--------+-------+----+--------+----------------------------------+
+| 0x1022 | 0x08 | RW | T | MSR |
++--------+-------+----+--------+----------------------------------+
+| 0x1023 | 0x08 | RW | T | LR |
++--------+-------+----+--------+----------------------------------+
+| 0x1024 | 0x08 | RW | T | XER |
++--------+-------+----+--------+----------------------------------+
+| 0x1025 | 0x08 | RW | T | CTR |
++--------+-------+----+--------+----------------------------------+
+| 0x1026 | 0x08 | RW | T | CFAR |
++--------+-------+----+--------+----------------------------------+
+| 0x1027 | 0x08 | RW | T | SRR0 |
++--------+-------+----+--------+----------------------------------+
+| 0x1028 | 0x08 | RW | T | SRR1 |
++--------+-------+----+--------+----------------------------------+
+| 0x1029 | 0x08 | RW | T | DAR |
++--------+-------+----+--------+----------------------------------+
+| 0x102A | 0x08 | RW | T | DEC expiry TB |
++--------+-------+----+--------+----------------------------------+
+| 0x102B | 0x08 | RW | T | VTB |
++--------+-------+----+--------+----------------------------------+
+| 0x102C | 0x08 | RW | T | LPCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102D | 0x08 | RW | T | HFSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102E | 0x08 | RW | T | FSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102F | 0x08 | RW | T | FPSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1030 | 0x08 | RW | T | DAWR0 |
++--------+-------+----+--------+----------------------------------+
+| 0x1031 | 0x08 | RW | T | DAWR1 |
++--------+-------+----+--------+----------------------------------+
+| 0x1032 | 0x08 | RW | T | CIABR |
++--------+-------+----+--------+----------------------------------+
+| 0x1033 | 0x08 | RW | T | PURR |
++--------+-------+----+--------+----------------------------------+
+| 0x1034 | 0x08 | RW | T | SPURR |
++--------+-------+----+--------+----------------------------------+
+| 0x1035 | 0x08 | RW | T | IC |
++--------+-------+----+--------+----------------------------------+
+| 0x1036-| 0x08 | RW | T | SPRG 0-3 |
+| 0x1039 | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x103A | 0x08 | W | T | PPR |
++--------+-------+----+--------+----------------------------------+
+| 0x103B | 0x08 | RW | T | MMCR 0-3 |
+| 0x103E | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x103F | 0x08 | RW | T | MMCRA |
++--------+-------+----+--------+----------------------------------+
+| 0x1040 | 0x08 | RW | T | SIER |
++--------+-------+----+--------+----------------------------------+
+| 0x1041 | 0x08 | RW | T | SIER 2 |
++--------+-------+----+--------+----------------------------------+
+| 0x1042 | 0x08 | RW | T | SIER 3 |
++--------+-------+----+--------+----------------------------------+
+| 0x1043 | 0x08 | RW | T | BESCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1044 | 0x08 | RW | T | EBBHR |
++--------+-------+----+--------+----------------------------------+
+| 0x1045 | 0x08 | RW | T | EBBRR |
++--------+-------+----+--------+----------------------------------+
+| 0x1046 | 0x08 | RW | T | AMR |
++--------+-------+----+--------+----------------------------------+
+| 0x1047 | 0x08 | RW | T | IAMR |
++--------+-------+----+--------+----------------------------------+
+| 0x1048 | 0x08 | RW | T | AMOR |
++--------+-------+----+--------+----------------------------------+
+| 0x1049 | 0x08 | RW | T | UAMOR |
++--------+-------+----+--------+----------------------------------+
+| 0x104A | 0x08 | RW | T | SDAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104B | 0x08 | RW | T | SIAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104C | 0x08 | RW | T | DSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x104D | 0x08 | RW | T | TAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104E | 0x08 | RW | T | DEXCR |
++--------+-------+----+--------+----------------------------------+
+| 0x104F | 0x08 | RW | T | HDEXCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1050 | 0x08 | RW | T | HASHKEYR |
++--------+-------+----+--------+----------------------------------+
+| 0x1051 | 0x08 | RW | T | HASHPKEYR |
++--------+-------+----+--------+----------------------------------+
+| 0x1052 | 0x08 | RW | T | CTRL |
++--------+-------+----+--------+----------------------------------+
+| 0x1053-| | | | Reserved |
+| 0x1FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x2000 | 0x04 | RW | T | CR |
++--------+-------+----+--------+----------------------------------+
+| 0x2001 | 0x04 | RW | T | PIDR |
++--------+-------+----+--------+----------------------------------+
+| 0x2002 | 0x04 | RW | T | DSISR |
++--------+-------+----+--------+----------------------------------+
+| 0x2003 | 0x04 | RW | T | VSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x2004 | 0x04 | RW | T | VRSAVE |
++--------+-------+----+--------+----------------------------------+
+| 0x2005 | 0x04 | RW | T | DAWRX0 |
++--------+-------+----+--------+----------------------------------+
+| 0x2006 | 0x04 | RW | T | DAWRX1 |
++--------+-------+----+--------+----------------------------------+
+| 0x2007-| 0x04 | RW | T | PMC 1-6 |
+| 0x200c | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x200D | 0x04 | RW | T | WORT |
++--------+-------+----+--------+----------------------------------+
+| 0x200E | 0x04 | RW | T | PSPB |
++--------+-------+----+--------+----------------------------------+
+| 0x200F-| | | | Reserved |
+| 0x2FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x3000-| 0x10 | RW | T | VSR 0-63 |
+| 0x303F | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x3040-| | | | Reserved |
+| 0xEFFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0xF000 | 0x08 | R | T | HDAR |
++--------+-------+----+--------+----------------------------------+
+| 0xF001 | 0x04 | R | T | HDSISR |
++--------+-------+----+--------+----------------------------------+
+| 0xF002 | 0x04 | R | T | HEIR |
++--------+-------+----+--------+----------------------------------+
+| 0xF003 | 0x08 | R | T | ASDR |
++--------+-------+----+--------+----------------------------------+
+
+
+Miscellaneous info
+==================
+
+State not in ptregs/hvregs
+--------------------------
+
+In the v1 API, some state is not in the ptregs/hvstate. This includes
+the vector register and some SPRs. For the L1 to set this state for
+the L2, the L1 loads up these hardware registers before the
+h_enter_nested() call and the L0 ensures they end up as the L2 state
+(by not touching them).
+
+The v2 API removes this and explicitly sets this state via the GSB.
+
+L1 Implementation details: Caching state
+----------------------------------------
+
+In the v1 API, all state is sent from the L1 to the L0 and vice versa
+on every h_enter_nested() hcall. If the L0 is not currently running
+any L2s, the L0 has no state information about them. The only
+exception to this is the location of the partition table, registered
+via h_set_partition_table().
+
+The v2 API changes this so that the L0 retains the L2 state even when
+it's vCPUs are no longer running. This means that the L1 only needs to
+communicate with the L0 about L2 state when it needs to modify the L2
+state, or when it's value is out of date. This provides an opportunity
+for performance optimisation.
+
+When a vCPU exits from a H_GUEST_RUN_VCPU() call, the L1 internally
+marks all L2 state as invalid. This means that if the L1 wants to know
+the L2 state (say via a kvm_get_one_reg() call), it needs call
+H_GUEST_GET_STATE() to get that state. Once it's read, it's marked as
+valid in L1 until the L2 is run again.
+
+Also, when an L1 modifies L2 vcpu state, it doesn't need to write it
+to the L0 until that L2 vcpu runs again. Hence when the L1 updates
+state (say via a kvm_set_one_reg() call), it writes to an internal L1
+copy and only flushes this copy to the L0 when the L2 runs again via
+the H_GUEST_VCPU_RUN() input buffer.
+
+This lazy updating of state by the L1 avoids unnecessary
+H_GUEST_{G|S}ET_STATE() calls.
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index a52996b22f75..7b2384de471f 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -77,6 +77,9 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined
in version 1.0 of the Bit-Manipulation ISA extensions.
+ * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as
+ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
+
* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
information about the selected set of processors.
@@ -96,3 +99,6 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
not supported at all and will generate a misaligned address fault.
+
+* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which
+ represents the size of the Zicboz block in bytes.
diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst
index 8960fac42c40..54d199dce78b 100644
--- a/Documentation/arch/riscv/uabi.rst
+++ b/Documentation/arch/riscv/uabi.rst
@@ -42,6 +42,26 @@ An example string following the order is::
rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
+"isa" and "hart isa" lines in /proc/cpuinfo
+-------------------------------------------
+
+The "isa" line in /proc/cpuinfo describes the lowest common denominator of
+RISC-V ISA extensions recognized by the kernel and implemented on all harts. The
+"hart isa" line, in contrast, describes the set of extensions recognized by the
+kernel on the particular hart being described, even if those extensions may not
+be present on all harts in the system.
+
+In both lines, the presence of an extension guarantees only that the hardware
+has the described capability. Additional kernel support or policy changes may be
+required before an extension's capability is fully usable by userspace programs.
+Similarly, for S-mode extensions, presence in one of these lines does not
+guarantee that the kernel is taking advantage of the extension, or that the
+feature will be visible in guest VMs managed by this kernel.
+
+Inversely, the absence of an extension in these lines does not necessarily mean
+the hardware does not support that feature. The running kernel may not recognize
+the extension, or may have deliberately removed it from the listing.
+
Misaligned accesses
-------------------
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 0d2647fb358d..723408e399ab 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -37,16 +37,14 @@ prototype in a header for the wrapper kfunc.
An example is given below::
/* Disables missing prototype warnings */
- __diag_push();
- __diag_ignore_all("-Wmissing-prototypes",
- "Global kfuncs as their definitions will be in BTF");
+ __bpf_kfunc_start_defs();
__bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
{
return find_get_task_by_vpid(nr);
}
- __diag_pop();
+ __bpf_kfunc_end_defs();
A wrapper kfunc is often needed when we need to annotate parameters of the
kfunc. Otherwise one may directly make the kfunc visible to the BPF program by
diff --git a/Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml b/Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
new file mode 100644
index 000000000000..9816c4cacc7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/renesas,shmobile-lcdc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas SH-Mobile LCD Controller (LCDC)
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a7740-lcdc # R-Mobile A1
+ - renesas,sh73a0-lcdc # SH-Mobile AG5
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 5
+ description:
+ Only the functional clock is mandatory.
+ Some of the optional clocks are model-dependent (e.g. "video" (a.k.a.
+ "vou" or "dv_clk") is available on R-Mobile A1 only).
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: fck
+ - enum: [ media, lclk, hdmi, video ]
+ - enum: [ media, lclk, hdmi, video ]
+ - enum: [ media, lclk, hdmi, video ]
+ - enum: [ media, lclk, hdmi, video ]
+
+ power-domains:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: LCD port (R-Mobile A1 and SH-Mobile AG5)
+ unevaluatedProperties: false
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: HDMI port (R-Mobile A1 LCDC1 and SH-Mobile AG5)
+ unevaluatedProperties: false
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: MIPI-DSI port (SH-Mobile AG5)
+ unevaluatedProperties: false
+
+ required:
+ - port@0
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - ports
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r8a7740-lcdc
+ then:
+ properties:
+ ports:
+ properties:
+ port@2: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,sh73a0-lcdc
+ then:
+ properties:
+ ports:
+ required:
+ - port@1
+ - port@2
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7740-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ lcd-controller@fe940000 {
+ compatible = "renesas,r8a7740-lcdc";
+ reg = <0xfe940000 0x4000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7740_CLK_LCDC0>,
+ <&cpg_clocks R8A7740_CLK_M3>, <&lcdlclk0_clk>,
+ <&vou_clk>;
+ clock-names = "fck", "media", "lclk", "video";
+ power-domains = <&pd_a4lc>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lcdc0_rgb: endpoint {
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml b/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml
index 0aa41bd9ddca..37975ee61c5a 100644
--- a/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml
+++ b/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml
@@ -11,10 +11,10 @@ maintainers:
properties:
compatible:
- - enum:
- - solomon,ssd1322
- - solomon,ssd1325
- - solomon,ssd1327
+ enum:
+ - solomon,ssd1322
+ - solomon,ssd1325
+ - solomon,ssd1327
required:
- compatible
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index f61145c91b6d..88d0de3d1b46 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -69,6 +69,8 @@ properties:
dma-channel-mask:
maxItems: 1
+ dma-coherent: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 98139489d4b5..b6864d0ee81e 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: /schemas/nvmem/nvmem.yaml
+ - $ref: /schemas/nvmem/nvmem-deprecated-cells.yaml
select:
properties:
@@ -68,10 +69,14 @@ properties:
- items:
pattern: c32$
- items:
+ pattern: c32d-wl$
+ - items:
pattern: cs32$
- items:
pattern: c64$
- items:
+ pattern: c64d-wl$
+ - items:
pattern: cs64$
- items:
pattern: c128$
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
deleted file mode 100644
index 86b2e433a969..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-Pinctrl-based I2C Bus DeMux
-
-This binding describes an I2C bus demultiplexer that uses pin multiplexing to
-route the I2C signals, and represents the pin multiplexing configuration using
-the pinctrl device tree bindings. This may be used to select one I2C IP core at
-runtime which may have a better feature set for a given task than another I2C
-IP core on the SoC. The most simple example is to fall back to GPIO bitbanging
-if your current runtime configuration hits an errata of the internal IP core.
-
- +-------------------------------+
- | SoC |
- | | +-----+ +-----+
- | +------------+ | | dev | | dev |
- | |I2C IP Core1|--\ | +-----+ +-----+
- | +------------+ \-------+ | | |
- | |Pinctrl|--|------+--------+
- | +------------+ +-------+ |
- | |I2C IP Core2|--/ |
- | +------------+ |
- | |
- +-------------------------------+
-
-Required properties:
-- compatible: "i2c-demux-pinctrl"
-- i2c-parent: List of phandles of I2C masters available for selection. The first
- one will be used as default.
-- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
- parents.
-
-Furthermore, I2C mux properties and child nodes. See i2c-mux.yaml in this
-directory.
-
-Example:
-
-Here is a snipplet for a bus to be demuxed. It contains various i2c clients for
-HDMI, so the bus is named "i2c-hdmi":
-
- i2chdmi: i2c@8 {
-
- compatible = "i2c-demux-pinctrl";
- i2c-parent = <&gpioi2c>, <&iic2>, <&i2c2>;
- i2c-bus-name = "i2c-hdmi";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ak4643: sound-codec@12 {
- compatible = "asahi-kasei,ak4643";
-
- #sound-dai-cells = <0>;
- reg = <0x12>;
- };
-
- composite-in@20 {
- compatible = "adi,adv7180";
- reg = <0x20>;
- remote = <&vin1>;
-
- port {
- adv7180: endpoint {
- bus-width = <8>;
- remote-endpoint = <&vin1ep0>;
- };
- };
- };
-
- hdmi@39 {
- compatible = "adi,adv7511w";
- reg = <0x39>;
- interrupt-parent = <&gpio1>;
- interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
-
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- adv7511_in: endpoint {
- remote-endpoint = <&du_out_lvds0>;
- };
- };
-
- port@1 {
- reg = <1>;
- adv7511_out: endpoint {
- remote-endpoint = <&hdmi_con>;
- };
- };
- };
- };
- };
-
-And for clarification, here are the snipplets for the i2c-parents:
-
- gpioi2c: i2c@9 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "i2c-gpio";
- gpios = <&gpio5 6 GPIO_ACTIVE_HIGH /* sda */
- &gpio5 5 GPIO_ACTIVE_HIGH /* scl */
- >;
- i2c-gpio,delay-us = <5>;
- };
-
-...
-
-&i2c2 {
- pinctrl-0 = <&i2c2_pins>;
- pinctrl-names = "i2c-hdmi";
-
- clock-frequency = <100000>;
-};
-
-...
-
-&iic2 {
- pinctrl-0 = <&iic2_pins>;
- pinctrl-names = "i2c-hdmi";
-
- clock-frequency = <100000>;
-};
-
-Please note:
-
-- pinctrl properties for the parent I2C controllers need a pinctrl state
- with the same name as i2c-bus-name, not "default"!
-
-- the i2c masters must have their status "disabled". This driver will
- enable them at runtime when needed.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml
new file mode 100644
index 000000000000..2c08f2a7cf1e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-demux-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pinctrl-based I2C Bus Demultiplexer
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+description: |
+ This binding describes an I2C bus demultiplexer that uses pin multiplexing to
+ route the I2C signals, and represents the pin multiplexing configuration
+ using the pinctrl device tree bindings. This may be used to select one I2C
+ IP core at runtime which may have a better feature set for a given task than
+ another I2C IP core on the SoC. The most simple example is to fall back to
+ GPIO bitbanging if your current runtime configuration hits an errata of the
+ internal IP core.
+
+ +-------------------------------+
+ | SoC |
+ | | +-----+ +-----+
+ | +------------+ | | dev | | dev |
+ | |I2C IP Core1|--\ | +-----+ +-----+
+ | +------------+ \-------+ | | |
+ | |Pinctrl|--|------+--------+
+ | +------------+ +-------+ |
+ | |I2C IP Core2|--/ |
+ | +------------+ |
+ | |
+ +-------------------------------+
+
+allOf:
+ - $ref: i2c-mux.yaml
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: i2c-demux-pinctrl
+
+ i2c-parent:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of phandles of I2C masters available for selection. The first one
+ will be used as default.
+
+ i2c-bus-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ The name of this bus. Also needed as pinctrl-name for the I2C parents.
+
+required:
+ - compatible
+ - i2c-parent
+ - i2c-bus-name
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gpioi2c2: i2c-9 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+ scl-gpios = <&gpio5 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <5>;
+
+ // The I2C controller must have its status "disabled". The I2C bus
+ // demultiplexer will enable it at runtime when needed.
+ status = "disabled";
+ };
+
+ iic2: i2c@e6520000 {
+ reg = <0xe6520000 0x425>;
+ pinctrl-0 = <&iic2_pins>;
+ // The pinctrl property for the parent I2C controller needs a pinctrl
+ // state with the same name as i2c-bus-name in the I2C bus demultiplexer
+ // node, not "default"!
+ pinctrl-names = "i2c-hdmi";
+
+ clock-frequency = <100000>;
+
+ // The I2C controller must have its status "disabled". The I2C bus
+ // demultiplexer will enable it at runtime when needed.
+ status = "disabled";
+ };
+
+ i2c2: i2c@e6530000 {
+ reg = <0 0xe6530000 0 0x40>;
+ pinctrl-0 = <&i2c2_pins>;
+ // The pinctrl property for the parent I2C controller needs a pinctrl
+ // state with the same name as i2c-bus-name in the I2C bus demultiplexer
+ // node, not "default"!
+ pinctrl-names = "i2c-hdmi";
+
+ clock-frequency = <100000>;
+
+ // The I2C controller must have its status "disabled". The I2C bus
+ // demultiplexer will enable it at runtime when needed.
+ status = "disabled";
+ };
+
+ // Example for a bus to be demuxed. It contains various I2C clients for
+ // HDMI, so the bus is named "i2c-hdmi":
+ i2chdmi: i2c-mux3 {
+ compatible = "i2c-demux-pinctrl";
+ i2c-parent = <&iic2>, <&i2c2>, <&gpioi2c2>;
+ i2c-bus-name = "i2c-hdmi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ak4643: codec@12 {
+ compatible = "asahi-kasei,ak4643";
+ #sound-dai-cells = <0>;
+ reg = <0x12>;
+ };
+
+ composite-in@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+
+ port {
+ adv7180: endpoint {
+ bus-width = <8>;
+ remote-endpoint = <&vin1ep0>;
+ };
+ };
+ };
+
+ hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&cec_clock>;
+ clock-names = "cec";
+
+ avdd-supply = <&fixedregulator1v8>;
+ dvdd-supply = <&fixedregulator1v8>;
+ pvdd-supply = <&fixedregulator1v8>;
+ dvdd-3v-supply = <&fixedregulator3v3>;
+ bgvdd-supply = <&fixedregulator1v8>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_con_out>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index 042d4dc636ee..8386cfe21532 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -25,6 +25,7 @@ properties:
- items:
- enum:
+ - qcom,sc7280-cci
- qcom,sdm845-cci
- qcom,sm6350-cci
- qcom,sm8250-cci
@@ -159,6 +160,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc7280-cci
- qcom,sm8250-cci
- qcom,sm8450-cci
then:
diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml
index d9483fbd2454..c816e295d565 100644
--- a/Documentation/devicetree/bindings/i3c/i3c.yaml
+++ b/Documentation/devicetree/bindings/i3c/i3c.yaml
@@ -125,12 +125,12 @@ patternProperties:
minimum: 0
maximum: 0x7f
- description: |
- First half of the Provisional ID (following the PID
+ First half of the Provisioned ID (following the PID
definition provided by the I3C specification).
Contains the manufacturer ID left-shifted by 1.
- description: |
- Second half of the Provisional ID (following the PID
+ Second half of the Provisioned ID (following the PID
definition provided by the I3C specification).
Contains the ORing of the part ID left-shifted by 16,
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
index 986df1a6ff0a..66ea894dbe55 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
@@ -4,19 +4,23 @@
$id: http://devicetree.org/schemas/iio/accel/kionix,kx022a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ROHM/Kionix KX022A Accelerometer
+title: ROHM/Kionix KX022A, KX132-1211 and KX132ACR-LBZ Accelerometers
maintainers:
- Matti Vaittinen <mazziesaccount@gmail.com>
description: |
- KX022A is a 3-axis accelerometer supporting +/- 2G, 4G, 8G and 16G ranges,
- output data-rates from 0.78Hz to 1600Hz and a hardware-fifo buffering.
- KX022A can be accessed either via I2C or SPI.
+ KX022A, KX132ACR-LBZ and KX132-1211 are 3-axis accelerometers supporting
+ +/- 2G, 4G, 8G and 16G ranges, variable output data-rates and a
+ hardware-fifo buffering. These accelerometers can be accessed either
+ via I2C or SPI.
properties:
compatible:
- const: kionix,kx022a
+ enum:
+ - kionix,kx022a
+ - kionix,kx132-1211
+ - rohm,kx132acr-lbz
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2497.yaml b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2497.yaml
index 875f394576c2..5cc6a9684077 100644
--- a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2497.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2497.yaml
@@ -4,21 +4,31 @@
$id: http://devicetree.org/schemas/iio/adc/lltc,ltc2497.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Linear Technology / Analog Devices LTC2497 ADC
+title: Linear Technology / Analog Devices LTC2497 and LTC2309 ADC
maintainers:
- Michael Hennerich <michael.hennerich@analog.com>
+ - Liam Beguin <liambeguin@gmail.com>
description: |
- 16bit ADC supporting up to 16 single ended or 8 differential inputs.
- I2C interface.
+ LTC2309:
+ low noise, low power, 8-channel, 12-bit successive approximation ADC with an
+ I2C compatible serial interface.
- https://www.analog.com/media/en/technical-documentation/data-sheets/2497fb.pdf
- https://www.analog.com/media/en/technical-documentation/data-sheets/2499fe.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2309fd.pdf
+
+ LTC2497:
+ LTC2499:
+ 16bit ADC supporting up to 16 single ended or 8 differential inputs.
+ I2C interface.
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2497fb.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2499fe.pdf
properties:
compatible:
enum:
+ - lltc,ltc2309
- lltc,ltc2497
- lltc,ltc2499
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml
new file mode 100644
index 000000000000..675319276197
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml
@@ -0,0 +1,205 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/microchip,mcp3564.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip MCP346X and MCP356X ADC Family
+
+maintainers:
+ - Marius Cristea <marius.cristea@microchip.com>
+
+description: |
+ Bindings for the Microchip family of 153.6 ksps, Low-Noise 16/24-Bit
+ Delta-Sigma ADCs with an SPI interface. Datasheet can be found here:
+ Datasheet for MCP3561, MCP3562, MCP3564 can be found here:
+ https://ww1.microchip.com/downloads/aemDocuments/documents/MSLD/ProductDocuments/DataSheets/MCP3561-2-4-Family-Data-Sheet-DS20006181C.pdf
+ Datasheet for MCP3561R, MCP3562R, MCP3564R can be found here:
+ https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3561_2_4R-Data-Sheet-DS200006391C.pdf
+ Datasheet for MCP3461, MCP3462, MCP3464 can be found here:
+ https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3461-2-4-Two-Four-Eight-Channel-153.6-ksps-Low-Noise-16-Bit-Delta-Sigma-ADC-Data-Sheet-20006180D.pdf
+ Datasheet for MCP3461R, MCP3462R, MCP3464R can be found here:
+ https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3461-2-4R-Family-Data-Sheet-DS20006404C.pdf
+
+properties:
+ compatible:
+ enum:
+ - microchip,mcp3461
+ - microchip,mcp3462
+ - microchip,mcp3464
+ - microchip,mcp3461r
+ - microchip,mcp3462r
+ - microchip,mcp3464r
+ - microchip,mcp3561
+ - microchip,mcp3562
+ - microchip,mcp3564
+ - microchip,mcp3561r
+ - microchip,mcp3562r
+ - microchip,mcp3564r
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 20000000
+
+ spi-cpha: true
+
+ spi-cpol: true
+
+ vdd-supply: true
+
+ avdd-supply: true
+
+ clocks:
+ description:
+ Phandle and clock identifier for external sampling clock.
+ If not specified, the internal crystal oscillator will be used.
+ maxItems: 1
+
+ interrupts:
+ description: IRQ line of the ADC
+ maxItems: 1
+
+ drive-open-drain:
+ description:
+ Whether to drive the IRQ signal as push-pull (default) or open-drain. Note
+ that the device requires this pin to become "high", otherwise it will stop
+ converting.
+ type: boolean
+
+ vref-supply:
+ description:
+ Some devices have a specific reference voltage supplied on a different
+ pin to the other supplies. Needed to be able to establish channel scaling
+ unless there is also an internal reference available (e.g. mcp3564r). In
+ case of "r" devices (e. g. mcp3564r), if it does not exists the internal
+ reference will be used.
+
+ microchip,hw-device-address:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ description:
+ The address is set on a per-device basis by fuses in the factory,
+ configured on request. If not requested, the fuses are set for 0x1.
+ The device address is part of the device markings to avoid
+ potential confusion. This address is coded on two bits, so four possible
+ addresses are available when multiple devices are present on the same
+ SPI bus with only one Chip Select line for all devices.
+ Each device communication starts by a CS falling edge, followed by the
+ clocking of the device address (BITS[7:6] - top two bits of COMMAND BYTE
+ which is first one on the wire).
+
+ "#io-channel-cells":
+ const: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^channel@([0-9]|([1-7][0-9]))$":
+ $ref: adc.yaml
+ type: object
+ unevaluatedProperties: false
+ description: Represents the external channels which are connected to the ADC.
+
+ properties:
+ reg:
+ description: The channel number in single-ended and differential mode.
+ minimum: 0
+ maximum: 79
+
+ required:
+ - reg
+
+dependencies:
+ spi-cpol: [ spi-cpha ]
+ spi-cpha: [ spi-cpol ]
+
+required:
+ - compatible
+ - reg
+ - microchip,hw-device-address
+ - spi-max-frequency
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - # External vref, no internal reference
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - microchip,mcp3461
+ - microchip,mcp3462
+ - microchip,mcp3464
+ - microchip,mcp3561
+ - microchip,mcp3562
+ - microchip,mcp3564
+ then:
+ required:
+ - vref-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "microchip,mcp3564r";
+ reg = <0>;
+ vref-supply = <&vref_reg>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <10000000>;
+ microchip,hw-device-address = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ /* CH0 to AGND */
+ reg = <0>;
+ label = "CH0";
+ };
+
+ channel@1 {
+ /* CH1 to AGND */
+ reg = <1>;
+ label = "CH1";
+ };
+
+ /* diff-channels */
+ channel@11 {
+ reg = <11>;
+
+ /* CN0, CN1 */
+ diff-channels = <0 1>;
+ label = "CH0_CH1";
+ };
+
+ channel@22 {
+ reg = <0x22>;
+
+ /* CN1, CN2 */
+ diff-channels = <1 2>;
+ label = "CH1_CH3";
+ };
+
+ channel@23 {
+ reg = <0x23>;
+
+ /* CN1, CN3 */
+ diff-channels = <1 3>;
+ label = "CH1_CH3";
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
index f7b3fde4115a..06951ec5f5da 100644
--- a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
@@ -18,7 +18,13 @@ description: |
properties:
compatible:
enum:
+ - microchip,mcp3910
- microchip,mcp3911
+ - microchip,mcp3912
+ - microchip,mcp3913
+ - microchip,mcp3914
+ - microchip,mcp3918
+ - microchip,mcp3919
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
index e004659099c1..d605999ffe28 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
@@ -23,6 +23,9 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
"#address-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,twl6030-gpadc.yaml b/Documentation/devicetree/bindings/iio/adc/ti,twl6030-gpadc.yaml
new file mode 100644
index 000000000000..e779a8986e0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti,twl6030-gpadc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/ti,twl6030-gpadc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPADC subsystem in the TWL6030 power module
+
+maintainers:
+ - Andreas Kemnade <andreas@kemnade.info>
+
+description:
+ The GPADC subsystem in the TWL603X consists of a 10-bit ADC
+ combined with a 15-input analog multiplexer in the TWL6030 resp. a
+ 19-input analog muliplexer in the TWL6032.
+
+properties:
+ compatible:
+ enum:
+ - ti,twl6030-gpadc
+ - ti,twl6032-gpadc
+
+ interrupts:
+ maxItems: 1
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - interrupts
+ - "#io-channel-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ gpadc {
+ compatible = "ti,twl6030-gpadc";
+ interrupts = <3>;
+ #io-channel-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
index 9fda56fa49c3..2ee6080deac7 100644
--- a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
+++ b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
@@ -4,20 +4,26 @@
$id: http://devicetree.org/schemas/iio/amplifiers/adi,hmc425a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: HMC425A 6-bit Digital Step Attenuator
+title: Analog Devices HMC425A and similar Digital Step Attenuators
maintainers:
- Michael Hennerich <michael.hennerich@analog.com>
description: |
- Digital Step Attenuator IIO device with gpio interface.
+ Digital Step Attenuator IIO devices with gpio interface.
+ Offer various frequency and attenuation ranges.
HMC425A 0.5 dB LSB GaAs MMIC 6-BIT DIGITAL POSITIVE CONTROL ATTENUATOR, 2.2 - 8.0 GHz
- https://www.analog.com/media/en/technical-documentation/data-sheets/hmc425A.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/hmc425A.pdf
+
+ HMC540S 1 dB LSB Silicon MMIC 4-Bit Digital Positive Control Attenuator, 0.1 - 8 GHz
+ https://www.analog.com/media/en/technical-documentation/data-sheets/hmc540s.pdf
+
properties:
compatible:
enum:
- adi,hmc425a
+ - adi,hmc540s
vcc-supply: true
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
index 1db6952ddca5..297b8a1a7ffb 100644
--- a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -48,6 +48,11 @@ properties:
mount-matrix: true
+ invensense,level-shifter:
+ type: boolean
+ description: |
+ From ancient platform data struct: false: VLogic, true: VDD
+
i2c-gate:
$ref: /schemas/i2c/i2c-controller.yaml
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
index ee8724ad33ab..28b667a9cb76 100644
--- a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
@@ -93,6 +93,9 @@ properties:
wakeup-source:
$ref: /schemas/types.yaml#/definitions/flag
+ mount-matrix:
+ description: an optional 3x3 mounting rotation matrix
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/pressure/rohm,bm1390.yaml b/Documentation/devicetree/bindings/iio/pressure/rohm,bm1390.yaml
new file mode 100644
index 000000000000..7c4ca6322bf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/rohm,bm1390.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/rohm,bm1390.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BM1390 pressure sensor
+
+maintainers:
+ - Matti Vaittinen <mazziesaccount@gmail.com>
+
+description:
+ BM1390GLV-Z is a pressure sensor which performs internal temperature
+ compensation for the MEMS. Pressure range is from 300 hPa to 1300 hPa
+ and sample averaging and IIR filtering is built in. Temperature
+ measurement is also supported.
+
+properties:
+ compatible:
+ const: rohm,bm1390glv-z
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pressure-sensor@5d {
+ compatible = "rohm,bm1390glv-z";
+ reg = <0x5d>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vdd>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/resolver/adi,ad2s1210.yaml b/Documentation/devicetree/bindings/iio/resolver/adi,ad2s1210.yaml
new file mode 100644
index 000000000000..8980b3cd8337
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/resolver/adi,ad2s1210.yaml
@@ -0,0 +1,177 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/resolver/adi,ad2s1210.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD2S1210 Resolver-to-Digital Converter
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+
+description: |
+ The AD2S1210 is a complete 10-bit to 16-bit resolution tracking
+ resolver-to-digital converter, integrating an on-board programmable
+ sinusoidal oscillator that provides sine wave excitation for
+ resolvers.
+
+ The AD2S1210 allows the user to read the angular position or the
+ angular velocity data directly from the parallel outputs or through
+ the serial interface.
+
+ The mode of operation of the communication channel (parallel or serial) is
+ selected by the A0 and A1 input pins. In normal mode, data is latched by
+ toggling the SAMPLE line and can then be read directly. In configuration mode,
+ data is read or written using a register access scheme (address byte with
+ read/write flag and data byte).
+
+ A1 A0 Result
+ 0 0 Normal mode - position output
+ 0 1 Normal mode - velocity output
+ 1 0 Reserved
+ 1 1 Configuration mode
+
+ In normal mode, the resolution of the digital output is selected using
+ the RES0 and RES1 input pins. In configuration mode, the resolution is
+ selected by setting the RES0 and RES1 bits in the control register.
+
+ RES1 RES0 Resolution (Bits)
+ 0 0 10
+ 0 1 12
+ 1 0 14
+ 1 1 16
+
+ Note on SPI connections: The CS line on the AD2S1210 should hard-wired to
+ logic low and the WR/FSYNC line on the AD2S1210 should be connected to the
+ SPI CSn output of the SPI controller.
+
+ Datasheet:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad2s1210.pdf
+
+properties:
+ compatible:
+ const: adi,ad2s1210
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 25000000
+
+ spi-cpha: true
+
+ avdd-supply:
+ description:
+ A 4.75 to 5.25 V regulator that powers the Analog Supply Voltage (AVDD)
+ pin.
+
+ dvdd-supply:
+ description:
+ A 4.75 to 5.25 V regulator that powers the Digital Supply Voltage (DVDD)
+ pin.
+
+ vdrive-supply:
+ description:
+ A 2.3 to 5.25 V regulator that powers the Logic Power Supply Input
+ (VDrive) pin.
+
+ clocks:
+ maxItems: 1
+ description: External oscillator clock (CLKIN).
+
+ reset-gpios:
+ description:
+ GPIO connected to the /RESET pin. As the line needs to be low for the
+ reset to be active, it should be configured as GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ sample-gpios:
+ description:
+ GPIO connected to the /SAMPLE pin. As the line needs to be low to trigger
+ a sample, it should be configured as GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ mode-gpios:
+ description:
+ GPIO lines connected to the A0 and A1 pins. These pins select the data
+ transfer mode.
+ minItems: 2
+ maxItems: 2
+
+ resolution-gpios:
+ description:
+ GPIO lines connected to the RES0 and RES1 pins. These pins select the
+ resolution of the digital output. If omitted, it is assumed that the
+ RES0 and RES1 pins are hard-wired to match the assigned-resolution-bits
+ property.
+ minItems: 2
+ maxItems: 2
+
+ fault-gpios:
+ description:
+ GPIO lines connected to the LOT and DOS pins. These pins combined indicate
+ the type of fault present, if any. As these pins a pulled low to indicate
+ a fault condition, they should be configured as GPIO_ACTIVE_LOW.
+ minItems: 2
+ maxItems: 2
+
+ adi,fixed-mode:
+ description:
+ This is used to indicate the selected mode if A0 and A1 are hard-wired
+ instead of connected to GPIOS (i.e. mode-gpios is omitted).
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [config, velocity, position]
+
+ assigned-resolution-bits:
+ description:
+ Resolution of the digital output required by the application. This
+ determines the precision of the angle and/or the maximum speed that can
+ be measured. If resolution-gpios is omitted, it is assumed that RES0 and
+ RES1 are hard-wired to match this value.
+ enum: [10, 12, 14, 16]
+
+required:
+ - compatible
+ - reg
+ - spi-cpha
+ - avdd-supply
+ - dvdd-supply
+ - vdrive-supply
+ - clocks
+ - sample-gpios
+ - assigned-resolution-bits
+
+oneOf:
+ - required:
+ - mode-gpios
+ - required:
+ - adi,fixed-mode
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ resolver@0 {
+ compatible = "adi,ad2s1210";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ spi-cpha;
+ avdd-supply = <&avdd_regulator>;
+ dvdd-supply = <&dvdd_regulator>;
+ vdrive-supply = <&vdrive_regulator>;
+ clocks = <&ext_osc>;
+ sample-gpios = <&gpio0 90 GPIO_ACTIVE_LOW>;
+ mode-gpios = <&gpio0 86 0>, <&gpio0 87 0>;
+ resolution-gpios = <&gpio0 88 0>, <&gpio0 89 0>;
+ assigned-resolution-bits = <16>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/fsl,scu-key.yaml b/Documentation/devicetree/bindings/input/fsl,scu-key.yaml
index e5a3c355ee1f..29921aab9d97 100644
--- a/Documentation/devicetree/bindings/input/fsl,scu-key.yaml
+++ b/Documentation/devicetree/bindings/input/fsl,scu-key.yaml
@@ -24,6 +24,8 @@ properties:
linux,keycodes:
maxItems: 1
+ wakeup-source: true
+
required:
- compatible
- linux,keycodes
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
index 4080422a9eb5..037e5d3c447f 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
@@ -34,6 +34,9 @@ properties:
vdd-supply:
description: Regulator for voltage.
+ vddio-supply:
+ description: Optional Regulator for I/O voltage.
+
reset-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
new file mode 100644
index 000000000000..fd15ab5014fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8939.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8939 Network-On-Chip interconnect
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description: |
+ The Qualcomm MSM8939 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
+
+allOf:
+ - $ref: qcom,rpm-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,msm8939-bimc
+ - qcom,msm8939-pcnoc
+ - qcom,msm8939-snoc
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '^interconnect-[a-z0-9\-]+$':
+ type: object
+ $ref: qcom,rpm-common.yaml#
+ description:
+ The interconnect providers do not have a separate QoS register space,
+ but share parent's space.
+
+ allOf:
+ - $ref: qcom,rpm-common.yaml#
+
+ properties:
+ compatible:
+ const: qcom,msm8939-snoc-mm
+
+ required:
+ - compatible
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ snoc: interconnect@580000 {
+ compatible = "qcom,msm8939-snoc";
+ reg = <0x00580000 0x14000>;
+ #interconnect-cells = <1>;
+ };
+
+ bimc: interconnect@400000 {
+ compatible = "qcom,msm8939-bimc";
+ reg = <0x00400000 0x62000>;
+ #interconnect-cells = <1>;
+
+ snoc_mm: interconnect-snoc {
+ compatible = "qcom,msm8939-snoc-mm";
+ #interconnect-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8996.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8996.yaml
new file mode 100644
index 000000000000..e3f964aaad1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8996.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8996.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8996 Network-On-Chip interconnect
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description: |
+ The Qualcomm MSM8996 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
+
+properties:
+ compatible:
+ enum:
+ - qcom,msm8996-a0noc
+ - qcom,msm8996-a1noc
+ - qcom,msm8996-a2noc
+ - qcom,msm8996-bimc
+ - qcom,msm8996-cnoc
+ - qcom,msm8996-mnoc
+ - qcom,msm8996-pnoc
+ - qcom,msm8996-snoc
+
+ reg:
+ maxItems: 1
+
+ clock-names:
+ minItems: 1
+ maxItems: 3
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+allOf:
+ - $ref: qcom,rpm-common.yaml#
+ - if:
+ properties:
+ compatible:
+ const: qcom,msm8996-a0noc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Aggregate0 System NoC AXI Clock.
+ - description: Aggregate0 Config NoC AHB Clock.
+ - description: Aggregate0 NoC MPU Clock.
+
+ clock-names:
+ items:
+ - const: aggre0_snoc_axi
+ - const: aggre0_cnoc_ahb
+ - const: aggre0_noc_mpu_cfg
+
+ required:
+ - power-domains
+
+ - if:
+ properties:
+ compatible:
+ const: qcom,msm8996-mnoc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: CPU-NoC High-performance Bus Clock.
+
+ clock-names:
+ const: iface
+
+ - if:
+ properties:
+ compatible:
+ const: qcom,msm8996-a2noc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Aggregate2 NoC UFS AXI Clock
+ - description: UFS AXI Clock
+
+ clock-names:
+ items:
+ - const: aggre2_ufs_axi
+ - const: ufs_axi
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-msm8996.h>
+ #include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ bimc: interconnect@408000 {
+ compatible = "qcom,msm8996-bimc";
+ reg = <0x00408000 0x5a000>;
+ #interconnect-cells = <1>;
+ };
+
+ a0noc: interconnect@543000 {
+ compatible = "qcom,msm8996-a0noc";
+ reg = <0x00543000 0x6000>;
+ #interconnect-cells = <1>;
+ clocks = <&gcc GCC_AGGRE0_SNOC_AXI_CLK>,
+ <&gcc GCC_AGGRE0_CNOC_AHB_CLK>,
+ <&gcc GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK>;
+ clock-names = "aggre0_snoc_axi",
+ "aggre0_cnoc_ahb",
+ "aggre0_noc_mpu_cfg";
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml b/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml
index f65a2fe846de..b6c15314c5c5 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml
@@ -13,6 +13,9 @@ description: |
The Qualcomm QCM2290 interconnect providers support adjusting the
bandwidth requirements between the various NoC fabrics.
+allOf:
+ - $ref: qcom,rpm-common.yaml#
+
properties:
reg:
maxItems: 1
@@ -23,19 +26,6 @@ properties:
- qcom,qcm2290-cnoc
- qcom,qcm2290-snoc
- '#interconnect-cells':
- const: 1
-
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
# Child node's properties
patternProperties:
'^interconnect-[a-z0-9]+$':
@@ -44,6 +34,9 @@ patternProperties:
The interconnect providers do not have a separate QoS register space,
but share parent's space.
+ allOf:
+ - $ref: qcom,rpm-common.yaml#
+
properties:
compatible:
enum:
@@ -51,35 +44,16 @@ patternProperties:
- qcom,qcm2290-mmrt-virt
- qcom,qcm2290-mmnrt-virt
- '#interconnect-cells':
- const: 1
-
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
required:
- compatible
- - '#interconnect-cells'
- - clock-names
- - clocks
- additionalProperties: false
+ unevaluatedProperties: false
required:
- compatible
- reg
- - '#interconnect-cells'
- - clock-names
- - clocks
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -89,32 +63,20 @@ examples:
compatible = "qcom,qcm2290-snoc";
reg = <0x01880000 0x60200>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
- <&rpmcc RPM_SMD_SNOC_A_CLK>;
qup_virt: interconnect-qup {
compatible = "qcom,qcm2290-qup-virt";
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_QUP_CLK>,
- <&rpmcc RPM_SMD_QUP_A_CLK>;
};
mmnrt_virt: interconnect-mmnrt {
compatible = "qcom,qcm2290-mmnrt-virt";
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_MMNRT_CLK>,
- <&rpmcc RPM_SMD_MMNRT_A_CLK>;
};
mmrt_virt: interconnect-mmrt {
compatible = "qcom,qcm2290-mmrt-virt";
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_MMRT_CLK>,
- <&rpmcc RPM_SMD_MMRT_A_CLK>;
};
};
@@ -122,16 +84,10 @@ examples:
compatible = "qcom,qcm2290-cnoc";
reg = <0x01900000 0x8200>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
- <&rpmcc RPM_SMD_CNOC_A_CLK>;
};
bimc: interconnect@4480000 {
compatible = "qcom,qcm2290-bimc";
reg = <0x04480000 0x80000>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
- <&rpmcc RPM_SMD_BIMC_A_CLK>;
};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpm-common.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpm-common.yaml
new file mode 100644
index 000000000000..1ea52b091609
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpm-common.yaml
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,rpm-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPMh Network-On-Chip Interconnect
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description:
+ RPM interconnect providers support for managing system bandwidth requirements
+ through manual requests based on either predefined values or as indicated by
+ the bus monitor hardware. Each provider node represents a NoC bus master,
+ driven by a dedicated clock source.
+
+properties:
+ '#interconnect-cells':
+ oneOf:
+ - const: 2
+ - const: 1
+ deprecated: true
+
+required:
+ - '#interconnect-cells'
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
index 4f95d512012a..08c1c6b9d7cf 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
@@ -7,13 +7,16 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm RPM Network-On-Chip Interconnect
maintainers:
- - Georgi Djakov <georgi.djakov@linaro.org>
+ - Georgi Djakov <djakov@kernel.org>
description: |
RPM interconnect providers support system bandwidth requirements through
RPM processor. The provider is able to communicate with the RPM through
the RPM shared memory device.
+allOf:
+ - $ref: qcom,rpm-common.yaml#
+
properties:
reg:
maxItems: 1
@@ -23,259 +26,22 @@ properties:
- qcom,msm8916-bimc
- qcom,msm8916-pcnoc
- qcom,msm8916-snoc
- - qcom,msm8939-bimc
- - qcom,msm8939-pcnoc
- - qcom,msm8939-snoc
- - qcom,msm8996-a0noc
- - qcom,msm8996-a1noc
- - qcom,msm8996-a2noc
- - qcom,msm8996-bimc
- - qcom,msm8996-cnoc
- - qcom,msm8996-mnoc
- - qcom,msm8996-pnoc
- - qcom,msm8996-snoc
- qcom,qcs404-bimc
- qcom,qcs404-pcnoc
- qcom,qcs404-snoc
- - qcom,sdm660-a2noc
- - qcom,sdm660-bimc
- - qcom,sdm660-cnoc
- - qcom,sdm660-gnoc
- - qcom,sdm660-mnoc
- - qcom,sdm660-snoc
-
- '#interconnect-cells':
- description: |
- Value: <1> is one cell in an interconnect specifier for the
- interconnect node id, <2> requires the interconnect node id and an
- extra path tag.
- enum: [ 1, 2 ]
-
- clocks:
- minItems: 2
- maxItems: 7
-
- clock-names:
- minItems: 2
- maxItems: 7
-
- power-domains:
- maxItems: 1
-
-# Child node's properties
-patternProperties:
- '^interconnect-[a-z0-9]+$':
- type: object
- additionalProperties: false
- description:
- snoc-mm is a child of snoc, sharing snoc's register address space.
-
- properties:
- compatible:
- enum:
- - qcom,msm8939-snoc-mm
-
- '#interconnect-cells':
- const: 1
-
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
- required:
- - compatible
- - '#interconnect-cells'
- - clock-names
- - clocks
required:
- compatible
- reg
- - '#interconnect-cells'
- - clock-names
- - clocks
-
-additionalProperties: false
-
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8916-bimc
- - qcom,msm8916-pcnoc
- - qcom,msm8916-snoc
- - qcom,msm8939-bimc
- - qcom,msm8939-pcnoc
- - qcom,msm8939-snoc
- - qcom,msm8996-a1noc
- - qcom,msm8996-bimc
- - qcom,msm8996-cnoc
- - qcom,msm8996-pnoc
- - qcom,msm8996-snoc
- - qcom,qcs404-bimc
- - qcom,qcs404-pcnoc
- - qcom,qcs404-snoc
- - qcom,sdm660-bimc
- - qcom,sdm660-cnoc
- - qcom,sdm660-gnoc
- - qcom,sdm660-snoc
-
- then:
- properties:
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-mnoc
- - qcom,sdm660-mnoc
-
- then:
- properties:
- clock-names:
- items:
- - const: bus
- - const: bus_a
- - const: iface
-
- clocks:
- items:
- - description: Bus Clock.
- - description: Bus A Clock.
- - description: CPU-NoC High-performance Bus Clock.
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-a0noc
-
- then:
- properties:
- clock-names:
- items:
- - const: aggre0_snoc_axi
- - const: aggre0_cnoc_ahb
- - const: aggre0_noc_mpu_cfg
-
- clocks:
- items:
- - description: Aggregate0 System NoC AXI Clock.
- - description: Aggregate0 Config NoC AHB Clock.
- - description: Aggregate0 NoC MPU Clock.
-
- required:
- - power-domains
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-a2noc
-
- then:
- properties:
- clock-names:
- items:
- - const: bus
- - const: bus_a
- - const: aggre2_ufs_axi
- - const: ufs_axi
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
- - description: Aggregate2 NoC UFS AXI Clock
- - description: UFS AXI Clock
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm660-a2noc
-
- then:
- properties:
- clock-names:
- items:
- - const: bus
- - const: bus_a
- - const: ipa
- - const: ufs_axi
- - const: aggre2_ufs_axi
- - const: aggre2_usb3_axi
- - const: cfg_noc_usb2_axi
-
- clocks:
- items:
- - description: Bus Clock.
- - description: Bus A Clock.
- - description: IPA Clock.
- - description: UFS AXI Clock.
- - description: Aggregate2 UFS AXI Clock.
- - description: Aggregate2 USB3 AXI Clock.
- - description: Config NoC USB2 AXI Clock.
-
- - if:
- not:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8939-snoc
- then:
- patternProperties:
- '^interconnect-[a-z0-9]+$': false
+unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
bimc: interconnect@400000 {
- compatible = "qcom,msm8916-bimc";
- reg = <0x00400000 0x62000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
- <&rpmcc RPM_SMD_BIMC_A_CLK>;
- };
-
- pcnoc: interconnect@500000 {
- compatible = "qcom,msm8916-pcnoc";
- reg = <0x00500000 0x11000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
- <&rpmcc RPM_SMD_PCNOC_A_CLK>;
- };
-
- snoc: interconnect@580000 {
- compatible = "qcom,msm8916-snoc";
- reg = <0x00580000 0x14000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
- <&rpmcc RPM_SMD_SNOC_A_CLK>;
+ compatible = "qcom,msm8916-bimc";
+ reg = <0x00400000 0x62000>;
+ #interconnect-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index a46497af1fd8..74ab080249ff 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -113,6 +113,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sdx65-mc-virt
- qcom,sm8250-qup-virt
then:
required:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
new file mode 100644
index 000000000000..8f6bc6399626
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sdm660.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM660 Network-On-Chip interconnect
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description: |
+ The Qualcomm SDM660 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sdm660-a2noc
+ - qcom,sdm660-bimc
+ - qcom,sdm660-cnoc
+ - qcom,sdm660-gnoc
+ - qcom,sdm660-mnoc
+ - qcom,sdm660-snoc
+
+ reg:
+ maxItems: 1
+
+ clock-names:
+ minItems: 1
+ maxItems: 5
+
+ clocks:
+ minItems: 1
+ maxItems: 5
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+allOf:
+ - $ref: qcom,rpm-common.yaml#
+ - if:
+ properties:
+ compatible:
+ const: qcom,sdm660-mnoc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: CPU-NoC High-performance Bus Clock.
+
+ clock-names:
+ const: iface
+
+ - if:
+ properties:
+ compatible:
+ const: qcom,sdm660-a2noc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: IPA Clock.
+ - description: UFS AXI Clock.
+ - description: Aggregate2 UFS AXI Clock.
+ - description: Aggregate2 USB3 AXI Clock.
+ - description: Config NoC USB2 AXI Clock.
+
+ clock-names:
+ items:
+ - const: ipa
+ - const: ufs_axi
+ - const: aggre2_ufs_axi
+ - const: aggre2_usb3_axi
+ - const: cfg_noc_usb2_axi
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm660.h>
+ #include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ bimc: interconnect@1008000 {
+ compatible = "qcom,sdm660-bimc";
+ reg = <0x01008000 0x78000>;
+ #interconnect-cells = <1>;
+ };
+
+ a2noc: interconnect@1704000 {
+ compatible = "qcom,sdm660-a2noc";
+ reg = <0x01704000 0xc100>;
+ #interconnect-cells = <1>;
+ clocks = <&rpmcc RPM_SMD_IPA_CLK>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
+ clock-names = "ipa",
+ "ufs_axi",
+ "aggre2_ufs_axi",
+ "aggre2_usb3_axi",
+ "cfg_noc_usb2_axi";
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdx75-rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdx75-rpmh.yaml
new file mode 100644
index 000000000000..71cf7e252bfc
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdx75-rpmh.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sdx75-rpmh.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPMh Network-On-Chip Interconnect on SDX75
+
+maintainers:
+ - Rohit Agarwal <quic_rohiagar@quicinc.com>
+
+description:
+ RPMh interconnect providers support system bandwidth requirements through
+ RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+ able to communicate with the BCM through the Resource State Coordinator (RSC)
+ associated with each execution environment. Provider nodes must point to at
+ least one RPMh device child node pertaining to their RSC and each provider
+ can map to multiple RPMh resources.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sdx75-clk-virt
+ - qcom,sdx75-dc-noc
+ - qcom,sdx75-gem-noc
+ - qcom,sdx75-mc-virt
+ - qcom,sdx75-pcie-anoc
+ - qcom,sdx75-system-noc
+
+ '#interconnect-cells': true
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+
+allOf:
+ - $ref: qcom,rpmh-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdx75-clk-virt
+ - qcom,sdx75-mc-virt
+ then:
+ properties:
+ reg: false
+ else:
+ required:
+ - reg
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdx75-clk-virt
+ then:
+ properties:
+ clocks:
+ items:
+ - description: RPMH CC QPIC Clock
+ required:
+ - clocks
+ else:
+ properties:
+ clocks: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+
+ clk_virt: interconnect-0 {
+ compatible = "qcom,sdx75-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ clocks = <&rpmhcc RPMH_QPIC_CLK>;
+ };
+
+ system_noc: interconnect@1640000 {
+ compatible = "qcom,sdx75-system-noc";
+ reg = <0x1640000 0x4b400>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index b1b2cf81b42f..aa9e1c0895a5 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -110,6 +110,7 @@ properties:
- qcom,sdm630-smmu-v2
- qcom,sdm845-smmu-v2
- qcom,sm6350-smmu-v2
+ - qcom,sm7150-smmu-v2
- const: qcom,adreno-smmu
- const: qcom,smmu-v2
- description: Qcom Adreno GPUs on Google Cheza platform
@@ -409,6 +410,7 @@ allOf:
contains:
enum:
- qcom,sm6350-smmu-v2
+ - qcom,sm7150-smmu-v2
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
then:
diff --git a/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
index f2a6fa140f38..7526e3149f72 100644
--- a/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
+++ b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
@@ -15,7 +15,10 @@ description:
properties:
compatible:
- const: pwm-ir-tx
+ oneOf:
+ - const: pwm-ir-tx
+ - const: nokia,n900-ir
+ deprecated: true
pwms:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
index 191c1ce15009..12e7a7d536a3 100644
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
@@ -72,9 +72,9 @@ properties:
type : Channel type
channel : Channel number
- This MU support 5 type of unidirectional channels, each type
+ This MU support 6 type of unidirectional channels, each type
has 4 channels except RST channel which only has 1 channel.
- A total of 17 channels. Following types are
+ A total of 21 channels. Following types are
supported:
0 - TX channel with 32bit transmit register and IRQ transmit
acknowledgment support.
@@ -82,6 +82,7 @@ properties:
2 - TX doorbell channel. Without own register and no ACK support.
3 - RX doorbell channel.
4 - RST channel
+ 5 - Tx doorbell channel. With S/W ACK from the other side.
const: 2
clocks:
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index d2e25ff6db7f..a38413f8d132 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -125,10 +125,12 @@ allOf:
items:
- description: primary pll parent of the clock driver
- description: XO clock
+ - description: GCC GPLL0 clock source
clock-names:
items:
- const: pll
- const: xo
+ - const: gpll0
- if:
properties:
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index cc6f66eccc84..a35f9483dc71 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -34,6 +34,7 @@ properties:
- qcom,sm8350-ipcc
- qcom,sm8450-ipcc
- qcom,sm8550-ipcc
+ - qcom,sm8650-ipcc
- const: qcom,ipcc
reg:
diff --git a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml
index aeaddbf574b0..8b15a0532120 100644
--- a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml
@@ -74,6 +74,10 @@ patternProperties:
type: object # DT nodes are json objects
additionalProperties: false
properties:
+
+ compatible:
+ const: xlnx,zynqmp-ipi-dest-mailbox
+
xlnx,ipi-id:
description:
Remote Xilinx IPI agent ID of which the mailbox is connected to.
@@ -95,6 +99,7 @@ patternProperties:
- const: remote_response_region
required:
+ - compatible
- reg
- reg-names
- "#mbox-cells"
@@ -124,6 +129,7 @@ examples:
ranges;
mailbox: mailbox@ff9905c0 {
+ compatible = "xlnx,zynqmp-ipi-dest-mailbox";
reg = <0x0 0xff9905c0 0x0 0x20>,
<0x0 0xff9905e0 0x0 0x20>,
<0x0 0xff990e80 0x0 0x20>,
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml b/Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml
index 3f9fa92703bb..0f95fe8dd9ac 100644
--- a/Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml
@@ -19,6 +19,7 @@ properties:
- amlogic,meson6-ir
- amlogic,meson8b-ir
- amlogic,meson-gxbb-ir
+ - amlogic,meson-s4-ir
- items:
- const: amlogic,meson-gx-ir
- const: amlogic,meson-gxbb-ir
diff --git a/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
index 30a335b10762..2008a47c0580 100644
--- a/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
+++ b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
@@ -18,6 +18,7 @@ properties:
items:
- enum:
- starfive,jh7110-csi2rx
+ - ti,j721e-csi2rx
- const: cdns,csi2rx
reg:
diff --git a/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml b/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
index 1e2df8cf2937..60f19e1152b3 100644
--- a/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
@@ -14,6 +14,9 @@ description: |-
interface and CCI (I2C compatible) control bus. The output format
is raw Bayer.
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
const: hynix,hi846
@@ -86,7 +89,7 @@ required:
- vddd-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -109,6 +112,8 @@ examples:
vddio-supply = <&reg_camera_vddio>;
reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
shutdown-gpios = <&gpio5 4 GPIO_ACTIVE_LOW>;
+ orientation = <0>;
+ rotation = <0>;
port {
camera_out: endpoint {
diff --git a/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
new file mode 100644
index 000000000000..f6b87892068a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/onnn,mt9m114.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: onsemi 1/6-inch 720p CMOS Digital Image Sensor
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |-
+ The onsemi MT9M114 is a 1/6-inch 720p (1.26 Mp) CMOS digital image sensor
+ with an active pixel-array size of 1296H x 976V. It is programmable through
+ an I2C interface and outputs image data over a 8-bit parallel or 1-lane MIPI
+ CSI-2 connection.
+
+properties:
+ compatible:
+ const: onnn,mt9m114
+
+ reg:
+ description: I2C device address
+ enum:
+ - 0x48
+ - 0x5d
+
+ clocks:
+ description: EXTCLK clock signal
+ maxItems: 1
+
+ vdd-supply:
+ description:
+ Core digital voltage supply, 1.8V
+
+ vddio-supply:
+ description:
+ I/O digital voltage supply, 1.8V or 2.8V
+
+ vaa-supply:
+ description:
+ Analog voltage supply, 2.8V
+
+ reset-gpios:
+ description: |-
+ Reference to the GPIO connected to the RESET_BAR pin, if any (active
+ low).
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ additionalProperties: false
+
+ properties:
+ bus-type:
+ enum: [4, 5, 6]
+
+ link-frequencies: true
+ remote-endpoint: true
+
+ # The number and mapping of lanes (for CSI-2), and the bus width and
+ # signal polarities (for parallel and BT.656) are fixed and must not
+ # be specified.
+
+ required:
+ - bus-type
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - vdd-supply
+ - vddio-supply
+ - vaa-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/media/video-interfaces.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@48 {
+ compatible = "onnn,mt9m114";
+ reg = <0x48>;
+
+ clocks = <&clk24m 0>;
+
+ reset-gpios = <&gpio5 21 GPIO_ACTIVE_LOW>;
+
+ vddio-supply = <&reg_cam_1v8>;
+ vdd-supply = <&reg_cam_1v8>;
+ vaa-supply = <&reg_2p8v>;
+
+ port {
+ endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ link-frequencies = /bits/ 64 <384000000>;
+ remote-endpoint = <&mipi_csi_in>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
index 763cebe03dc2..67c1c291327b 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
@@ -68,12 +68,6 @@ properties:
marked GPIO_ACTIVE_LOW.
maxItems: 1
- rotation:
- enum:
- - 0 # Sensor Mounted Upright
- - 180 # Sensor Mounted Upside Down
- default: 0
-
port:
$ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
@@ -114,7 +108,7 @@ required:
- reset-gpios
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
index 50579c947f3c..d96199031b66 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
@@ -52,10 +52,6 @@ properties:
description:
GPIO connected to the reset pin (active low)
- orientation: true
-
- rotation: true
-
port:
$ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
@@ -95,7 +91,7 @@ required:
- dvdd-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml
index a621032f9bd0..2c5e69356658 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml
@@ -44,11 +44,6 @@ properties:
description: >
Reference to the GPIO connected to the reset pin, if any.
- rotation:
- enum:
- - 0
- - 180
-
port:
description: Digital Output Port
$ref: /schemas/graph.yaml#/$defs/port-base
@@ -85,7 +80,7 @@ required:
- DOVDD-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml
new file mode 100644
index 000000000000..01f8b2b3fd17
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov5642.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OV5642 Image Sensor
+
+maintainers:
+ - Fabio Estevam <festevam@gmail.com>
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ const: ovti,ov5642
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: XCLK Input Clock
+
+ AVDD-supply:
+ description: Analog voltage supply, 2.8V.
+
+ DVDD-supply:
+ description: Digital core voltage supply, 1.5V.
+
+ DOVDD-supply:
+ description: Digital I/O voltage supply, 1.8V.
+
+ powerdown-gpios:
+ maxItems: 1
+ description: Reference to the GPIO connected to the powerdown pin, if any.
+
+ reset-gpios:
+ maxItems: 1
+ description: Reference to the GPIO connected to the reset pin, if any.
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description: |
+ Video output port.
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ bus-type:
+ enum: [5, 6]
+
+ bus-width:
+ enum: [8, 10]
+ default: 10
+
+ data-shift:
+ enum: [0, 2]
+ default: 0
+
+ hsync-active:
+ enum: [0, 1]
+ default: 1
+
+ vsync-active:
+ enum: [0, 1]
+ default: 1
+
+ pclk-sample:
+ enum: [0, 1]
+ default: 1
+
+ allOf:
+ - if:
+ properties:
+ bus-type:
+ const: 6
+ then:
+ properties:
+ hsync-active: false
+ vsync-active: false
+
+ - if:
+ properties:
+ bus-width:
+ const: 10
+ then:
+ properties:
+ data-shift:
+ const: 0
+
+ required:
+ - bus-type
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/media/video-interfaces.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@3c {
+ compatible = "ovti,ov5642";
+ reg = <0x3c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov5642>;
+ clocks = <&clk_ext_camera>;
+ DOVDD-supply = <&vgen4_reg>;
+ AVDD-supply = <&vgen3_reg>;
+ DVDD-supply = <&vgen2_reg>;
+ powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+
+ port {
+ ov5642_to_parallel: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_PARALLEL>;
+ remote-endpoint = <&parallel_from_ov5642>;
+ bus-width = <8>;
+ data-shift = <2>; /* lines 9:2 are used */
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
index 6829a4aadd22..3368b3bd8ef2 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Omnivision OV5693/OV5695 CMOS Sensors
maintainers:
- - Tommaso Merciai <tommaso.merciai@amarulasolutions.com>
+ - Tommaso Merciai <tomm.merciai@gmail.com>
description: |
The Omnivision OV5693/OV5695 are high performance, 1/4-inch, 5 megapixel, CMOS
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
index e2470dd5920c..60903da84e1f 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
@@ -91,7 +91,7 @@ required:
- vddd-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
index 642f9b15d359..9a00dab2e8a3 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
@@ -44,14 +44,6 @@ properties:
description: Sensor reset (XCLR) GPIO
maxItems: 1
- flash-leds: true
-
- lens-focus: true
-
- orientation: true
-
- rotation: true
-
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
@@ -89,7 +81,7 @@ required:
- ovdd-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/nokia,n900-ir b/Documentation/devicetree/bindings/media/nokia,n900-ir
deleted file mode 100644
index 13a18ce37dd1..000000000000
--- a/Documentation/devicetree/bindings/media/nokia,n900-ir
+++ /dev/null
@@ -1,20 +0,0 @@
-Device-Tree bindings for LIRC TX driver for Nokia N900(RX51)
-
-Required properties:
- - compatible: should be "nokia,n900-ir".
- - pwms: specifies PWM used for IR signal transmission.
-
-Example node:
-
- pwm9: dmtimer-pwm@9 {
- compatible = "ti,omap-dmtimer-pwm";
- ti,timers = <&timer9>;
- ti,clock-source = <0x00>; /* timer_sys_ck */
- #pwm-cells = <3>;
- };
-
- ir: n900-ir {
- compatible = "nokia,n900-ir";
-
- pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
- };
diff --git a/Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml b/Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml
new file mode 100644
index 000000000000..b47468e54504
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/nuvoton,npcm-ece.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM Encoding Compression Engine
+
+maintainers:
+ - Joseph Liu <kwliu@nuvoton.com>
+ - Marvin Lin <kflin@nuvoton.com>
+
+description: |
+ Video Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,npcm750-ece
+ - nuvoton,npcm845-ece
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/nuvoton,npcm7xx-reset.h>
+
+ ece: video-codec@f0820000 {
+ compatible = "nuvoton,npcm750-ece";
+ reg = <0xf0820000 0x2000>;
+ resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_ECE>;
+ };
diff --git a/Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml b/Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml
new file mode 100644
index 000000000000..c885f559d2e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/nuvoton,npcm-vcd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM Video Capture/Differentiation Engine
+
+maintainers:
+ - Joseph Liu <kwliu@nuvoton.com>
+ - Marvin Lin <kflin@nuvoton.com>
+
+description: |
+ Video Capture/Differentiation Engine (VCD) present on Nuvoton NPCM SoCs.
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,npcm750-vcd
+ - nuvoton,npcm845-vcd
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ nuvoton,sysgcr:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to access GCR (Global Control Register) registers.
+
+ nuvoton,sysgfxi:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to access GFXI (Graphics Core Information) registers.
+
+ nuvoton,ece:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to access ECE (Encoding Compression Engine) registers.
+
+ memory-region:
+ maxItems: 1
+ description:
+ CMA pool to use for buffers allocation instead of the default CMA pool.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - resets
+ - nuvoton,sysgcr
+ - nuvoton,sysgfxi
+ - nuvoton,ece
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/nuvoton,npcm7xx-reset.h>
+
+ vcd: vcd@f0810000 {
+ compatible = "nuvoton,npcm750-vcd";
+ reg = <0xf0810000 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_VCD>;
+ nuvoton,sysgcr = <&gcr>;
+ nuvoton,sysgfxi = <&gfxi>;
+ nuvoton,ece = <&ece>;
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
index d5f80976f4cf..6228fd2b3246 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
@@ -48,6 +48,14 @@ properties:
iommus:
maxItems: 2
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: video-mem
+ - const: cpu-cfg
+
operating-points-v2: true
opp-table:
type: object
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index 772ec3283bc6..c57e1f488895 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -68,6 +68,13 @@ properties:
iommus:
maxItems: 1
+ resets:
+ items:
+ - description: AXI reset line
+ - description: AXI bus interface unit reset line
+ - description: APB reset line
+ - description: APB bus interface unit reset line
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml b/Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml
index 3691cd4962b2..3a5ff3f47060 100644
--- a/Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml
+++ b/Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml
@@ -75,13 +75,20 @@ properties:
power-domains:
maxItems: 1
+ samsung,pmu-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Power Management Unit (PMU) system controller interface, used to
+ power/start the ISP.
+
patternProperties:
"^pmu@[0-9a-f]+$":
type: object
additionalProperties: false
+ deprecated: true
description:
Node representing the SoC's Power Management Unit (duplicated with the
- correct PMU node in the SoC).
+ correct PMU node in the SoC). Deprecated, use samsung,pmu-syscon.
properties:
reg:
@@ -131,6 +138,7 @@ required:
- clock-names
- interrupts
- ranges
+ - samsung,pmu-syscon
- '#size-cells'
additionalProperties: false
@@ -179,15 +187,12 @@ examples:
<&sysmmu_fimc_fd>, <&sysmmu_fimc_mcuctl>;
iommu-names = "isp", "drc", "fd", "mcuctl";
power-domains = <&pd_isp>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- pmu@10020000 {
- reg = <0x10020000 0x3000>;
- };
-
i2c-isp@12140000 {
compatible = "samsung,exynos4212-i2c-isp";
reg = <0x12140000 0x100>;
diff --git a/Documentation/devicetree/bindings/media/samsung,fimc.yaml b/Documentation/devicetree/bindings/media/samsung,fimc.yaml
index b3486c38a05b..7808d61f1fa3 100644
--- a/Documentation/devicetree/bindings/media/samsung,fimc.yaml
+++ b/Documentation/devicetree/bindings/media/samsung,fimc.yaml
@@ -118,7 +118,7 @@ examples:
#clock-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x0 0x0 0x18000000>;
+ ranges = <0x0 0x0 0xba1000>;
clocks = <&clock CLK_SCLK_CAM0>, <&clock CLK_SCLK_CAM1>,
<&clock CLK_PIXELASYNCM0>, <&clock CLK_PIXELASYNCM1>;
@@ -133,9 +133,9 @@ examples:
pinctrl-0 = <&cam_port_a_clk_active &cam_port_b_clk_active>;
pinctrl-names = "default";
- fimc@11800000 {
+ fimc@0 {
compatible = "samsung,exynos4212-fimc";
- reg = <0x11800000 0x1000>;
+ reg = <0x00000000 0x1000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_FIMC0>,
<&clock CLK_SCLK_FIMC0>;
@@ -152,9 +152,9 @@ examples:
/* ... FIMC 1-3 */
- csis@11880000 {
+ csis@80000 {
compatible = "samsung,exynos4210-csis";
- reg = <0x11880000 0x4000>;
+ reg = <0x00080000 0x4000>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_CSIS0>,
<&clock CLK_SCLK_CSIS0>;
@@ -187,9 +187,9 @@ examples:
/* ... CSIS 1 */
- fimc-lite@12390000 {
+ fimc-lite@b90000 {
compatible = "samsung,exynos4212-fimc-lite";
- reg = <0x12390000 0x1000>;
+ reg = <0xb90000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&pd_isp>;
clocks = <&isp_clock CLK_ISP_FIMC_LITE0>;
@@ -199,9 +199,9 @@ examples:
/* ... FIMC-LITE 1 */
- fimc-is@12000000 {
+ fimc-is@800000 {
compatible = "samsung,exynos4212-fimc-is";
- reg = <0x12000000 0x260000>;
+ reg = <0x00800000 0x260000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&isp_clock CLK_ISP_FIMC_LITE0>,
@@ -237,18 +237,15 @@ examples:
<&sysmmu_fimc_fd>, <&sysmmu_fimc_mcuctl>;
iommu-names = "isp", "drc", "fd", "mcuctl";
power-domains = <&pd_isp>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- pmu@10020000 {
- reg = <0x10020000 0x3000>;
- };
-
- i2c-isp@12140000 {
+ i2c-isp@940000 {
compatible = "samsung,exynos4212-i2c-isp";
- reg = <0x12140000 0x100>;
+ reg = <0x00940000 0x100>;
clocks = <&isp_clock CLK_ISP_I2C1_ISP>;
clock-names = "i2c_isp";
pinctrl-0 = <&fimc_is_i2c1>;
diff --git a/Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml b/Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
new file mode 100644
index 000000000000..f762fdc05e4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/ti,j721e-csi2rx-shim.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI J721E CSI2RX Shim
+
+description: |
+ The TI J721E CSI2RX Shim is a wrapper around Cadence CSI2RX bridge that
+ enables sending captured frames to memory over PSI-L DMA. In the J721E
+ Technical Reference Manual (SPRUIL1B) it is referred to as "SHIM" under the
+ CSI_RX_IF section.
+
+maintainers:
+ - Jai Luthra <j-luthra@ti.com>
+
+properties:
+ compatible:
+ const: ti,j721e-csi2rx-shim
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: rx0
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ ranges: true
+
+ "#address-cells": true
+
+ "#size-cells": true
+
+patternProperties:
+ "^csi-bridge@":
+ type: object
+ description: CSI2 bridge node.
+ $ref: cdns,csi2rx.yaml#
+
+required:
+ - compatible
+ - reg
+ - dmas
+ - dma-names
+ - power-domains
+ - ranges
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ ti_csi2rx0: ticsi2rx@4500000 {
+ compatible = "ti,j721e-csi2rx-shim";
+ dmas = <&main_udmap 0x4940>;
+ dma-names = "rx0";
+ reg = <0x4500000 0x1000>;
+ power-domains = <&k3_pds 26 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ cdns_csi2rx: csi-bridge@4504000 {
+ compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+ reg = <0x4504000 0x1000>;
+ clocks = <&k3_clks 26 2>, <&k3_clks 26 0>, <&k3_clks 26 2>,
+ <&k3_clks 26 2>, <&k3_clks 26 3>, <&k3_clks 26 3>;
+ clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+ "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+ phys = <&dphy0>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi2_0: port@0 {
+
+ reg = <0>;
+
+ csi2rx0_in_sensor: endpoint {
+ remote-endpoint = <&csi2_cam0>;
+ bus-type = <4>; /* CSI2 DPHY. */
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.yaml b/Documentation/devicetree/bindings/media/video-interfaces.yaml
index a211d49dc2ac..26e3e7d7c67b 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.yaml
+++ b/Documentation/devicetree/bindings/media/video-interfaces.yaml
@@ -160,6 +160,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 8
+ uniqueItems: true
items:
# Assume up to 9 physical lane indices
maximum: 8
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
index c14def1b2ad2..9c2a04829da5 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
@@ -148,47 +148,47 @@ examples:
pinctrl_nand: nand-pins {
function = "nand";
- group = "nand_grp";
+ pins = "nand_grp";
};
pinctrl_gpio35_alt: gpio35_alt-pins {
function = "gpio35_alt";
- pin = "gpio35";
+ pins = "gpio35";
};
pinctrl_dectpd: dectpd-pins {
function = "dectpd";
- group = "dectpd_grp";
+ pins = "dectpd_grp";
};
pinctrl_vdsl_phy_override_0: vdsl_phy_override_0-pins {
function = "vdsl_phy_override_0";
- group = "vdsl_phy_override_0_grp";
+ pins = "vdsl_phy_override_0_grp";
};
pinctrl_vdsl_phy_override_1: vdsl_phy_override_1-pins {
function = "vdsl_phy_override_1";
- group = "vdsl_phy_override_1_grp";
+ pins = "vdsl_phy_override_1_grp";
};
pinctrl_vdsl_phy_override_2: vdsl_phy_override_2-pins {
function = "vdsl_phy_override_2";
- group = "vdsl_phy_override_2_grp";
+ pins = "vdsl_phy_override_2_grp";
};
pinctrl_vdsl_phy_override_3: vdsl_phy_override_3-pins {
function = "vdsl_phy_override_3";
- group = "vdsl_phy_override_3_grp";
+ pins = "vdsl_phy_override_3_grp";
};
pinctrl_dsl_gpio8: dsl_gpio8-pins {
function = "dsl_gpio8";
- group = "dsl_gpio8";
+ pins = "dsl_gpio8";
};
pinctrl_dsl_gpio9: dsl_gpio9-pins {
function = "dsl_gpio9";
- group = "dsl_gpio9";
+ pins = "dsl_gpio9";
};
};
};
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
index 4d594739b382..c2941638c8f2 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
@@ -230,7 +230,7 @@ examples:
pinctrl_nand: nand-pins {
function = "nand";
- group = "nand_grp";
+ pins = "nand_grp";
};
};
};
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
index aae83d432880..44e77d77d314 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
@@ -240,7 +240,7 @@ examples:
pinctrl_uart1: uart1-pins {
function = "uart1";
- group = "uart1_grp";
+ pins = "uart1_grp";
};
};
};
diff --git a/Documentation/devicetree/bindings/mtd/mtd.yaml b/Documentation/devicetree/bindings/mtd/mtd.yaml
index b82ca03e969c..f322290ee516 100644
--- a/Documentation/devicetree/bindings/mtd/mtd.yaml
+++ b/Documentation/devicetree/bindings/mtd/mtd.yaml
@@ -43,7 +43,12 @@ patternProperties:
deprecated: true
"^otp(-[0-9]+)?$":
- $ref: ../nvmem/nvmem.yaml#
+ type: object
+
+ allOf:
+ - $ref: ../nvmem/nvmem.yaml#
+ - $ref: ../nvmem/nvmem-deprecated-cells.yaml#
+
unevaluatedProperties: false
description: |
diff --git a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
index 331e564f29dc..058253d6d889 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
@@ -29,6 +29,24 @@ properties:
"#size-cells": true
+ compression:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: |
+ Compression algorithm used to store the data in this partition, chosen
+ from a list of well-known algorithms.
+
+ The contents are compressed using this algorithm.
+
+ enum:
+ - none
+ - bzip2
+ - gzip
+ - lzop
+ - lz4
+ - lzma
+ - xz
+ - zstd
+
patternProperties:
"@[0-9a-f]+$":
$ref: partition.yaml#
@@ -64,6 +82,7 @@ examples:
uimage@100000 {
reg = <0x0100000 0x200000>;
+ compress = "lzma";
};
};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml b/Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml
index 5474d63268dc..9518281007af 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml
@@ -19,6 +19,7 @@ maintainers:
allOf:
- $ref: /schemas/mtd/partitions/partition.yaml#
- $ref: /schemas/nvmem/nvmem.yaml#
+ - $ref: /schemas/nvmem/nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
new file mode 100644
index 000000000000..3dde10de4630
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,cc1352p7.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments Simplelink CC1352P7 wireless MCU
+
+description:
+ The CC1352P7 MCU can be connected via SPI or UART.
+
+maintainers:
+ - Ayush Singh <ayushdevel1325@gmail.com>
+
+properties:
+ compatible:
+ const: ti,cc1352p7
+
+ clocks:
+ items:
+ - description: high-frequency main system (MCU and peripherals) clock
+ - description: low-frequency system clock
+
+ clock-names:
+ items:
+ - const: sclk_hf
+ - const: sclk_lf
+
+ reset-gpios:
+ maxItems: 1
+
+ vdds-supply: true
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ serial {
+ mcu {
+ compatible = "ti,cc1352p7";
+ clocks = <&sclk_hf 0>, <&sclk_lf 25>;
+ clock-names = "sclk_hf", "sclk_lf";
+ reset-gpios = <&pio 35 GPIO_ACTIVE_LOW>;
+ vdds-supply = <&vdds>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index 0928ec408170..4424c3c5e75c 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml b/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
index e49c2754ff55..9801fe6f91b5 100644
--- a/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/amlogic,meson-gxbb-efuse.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic,meson6-efuse.yaml b/Documentation/devicetree/bindings/nvmem/amlogic,meson6-efuse.yaml
index 84b3dfd21e09..b5cf740f96fa 100644
--- a/Documentation/devicetree/bindings/nvmem/amlogic,meson6-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/amlogic,meson6-efuse.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml b/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
index e0860b6b85f3..d3abdafdbca0 100644
--- a/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
+++ b/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
@@ -16,6 +16,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
index 99e60d713dac..be1314454bec 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
@@ -16,6 +16,7 @@ description: |
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
index 7ec2988b597e..cf5f9e22bb7e 100644
--- a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
@@ -16,6 +16,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml b/Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
index a296d348adb4..cc25f2927682 100644
--- a/Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
+++ b/Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
@@ -16,6 +16,7 @@ description: |
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
index a9b822aeaa7e..f43186f98607 100644
--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem-deprecated-cells.yaml b/Documentation/devicetree/bindings/nvmem/nvmem-deprecated-cells.yaml
new file mode 100644
index 000000000000..951af28bbfb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem-deprecated-cells.yaml
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/nvmem-deprecated-cells.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVMEM old syntax for fixed cells
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ Before introducing NVMEM layouts all NVMEM (fixed) cells were defined
+ as direct device subnodes. That syntax was replaced by "fixed-layout"
+ and is deprecated now. No new bindings should use it.
+
+patternProperties:
+ "@[0-9a-f]+(,[0-7])?$":
+ type: object
+ allOf:
+ - $ref: layouts/fixed-cell.yaml
+ - properties:
+ compatible: false
+ deprecated: true
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 9f921d940142..4fd015d402ce 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -46,15 +46,6 @@ properties:
container may reference more advanced (dynamic) layout
parsers.
-patternProperties:
- "@[0-9a-f]+(,[0-7])?$":
- type: object
- allOf:
- - $ref: layouts/fixed-cell.yaml
- - properties:
- compatible: false
- deprecated: true
-
additionalProperties: true
examples:
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 8740938c32eb..8c8f05d9eaf1 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml
index 9b133f783d29..2ada2099946d 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml
@@ -16,6 +16,7 @@ description:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
index cd980def97b8..068bedf5dbc9 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
@@ -16,6 +16,7 @@ description: |
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
index 9c6eff788928..a44d44b32809 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
@@ -49,6 +49,7 @@ required:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
- if:
properties:
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml
index c5403e149080..b80fd8d1ae5b 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
index efccc5aacbe0..e27cbae2d63a 100644
--- a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
index da3f1de7d281..af97eeb8316c 100644
--- a/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml b/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
index 36d97fb87865..9c36afc7084b 100644
--- a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
+++ b/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
@@ -51,6 +51,8 @@ properties:
ethaddr:
type: object
description: Ethernet interfaces base MAC address.
+ additionalProperties: false
+
properties:
"#nvmem-cell-cells":
description: The first argument is a MAC address offset.
diff --git a/Documentation/devicetree/bindings/phy/marvell,pxa1928-usb-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,pxa1928-usb-phy.yaml
new file mode 100644
index 000000000000..be33f036cda8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,pxa1928-usb-phy.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,pxa1928-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell PXA1928 USB/HSIC PHY
+
+maintainers:
+ - Duje Mihanović <duje.mihanovic@skole.hr>
+
+properties:
+ compatible:
+ enum:
+ - marvell,pxa1928-usb-phy
+ - marvell,pxa1928-hsic-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,pxa1928.h>
+
+ usbphy: phy@7000 {
+ compatible = "marvell,pxa1928-usb-phy";
+ reg = <0x7000 0xe0>;
+ clocks = <&apmu PXA1928_CLK_USB>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
index a63b20dfa4a5..6703689fcdbe 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
@@ -30,6 +30,7 @@ properties:
- const: mediatek,mt8173-mipi-tx
- items:
- enum:
+ - mediatek,mt8188-mipi-tx
- mediatek,mt8365-mipi-tx
- const: mediatek,mt8183-mipi-tx
- const: mediatek,mt2701-mipi-tx
diff --git a/Documentation/devicetree/bindings/phy/mediatek,mt7628-usbphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7628-usbphy.yaml
new file mode 100644
index 000000000000..ce2c228e3216
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,mt7628-usbphy.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,mt7628-usbphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek/Ralink USB PHY
+
+maintainers:
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt7620-usbphy
+ - mediatek,mt7628-usbphy
+ - ralink,rt3352-usbphy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ ralink,sysctl:
+ description:
+ phandle to a ralink syscon register region.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ resets:
+ items:
+ - description: USB Host reset controller
+ - description: USB Device reset controller
+
+ reset-names:
+ items:
+ - const: host
+ - const: device
+
+required:
+ - compatible
+ - "#phy-cells"
+ - ralink,sysctl
+ - resets
+ - reset-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt7628-usbphy
+ then:
+ required:
+ - reg
+ else:
+ properties:
+ reg: false
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@10120000 {
+ compatible = "mediatek,mt7628-usbphy";
+ reg = <0x10120000 0x1000>;
+ #phy-cells = <0>;
+ ralink,sysctl = <&sysc>;
+ resets = <&rstctrl 22>,
+ <&rstctrl 25>;
+ reset-names = "host", "device";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt b/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
deleted file mode 100644
index 35f03df00130..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-ST STiH407 USB PHY controller
-
-This file documents the dt bindings for the usb picoPHY driver which is the PHY for both USB2 and USB3
-host controllers (when controlling usb2/1.1 devices) available on STiH407 SoC family from STMicroelectronics.
-
-Required properties:
-- compatible : should be "st,stih407-usb2-phy"
-- st,syscfg : phandle of sysconfig bank plus integer array containing phyparam and phyctrl register offsets
-- resets : list of phandle and reset specifier pairs. There should be two entries, one
- for the whole phy and one for the port
-- reset-names : list of reset signal names. Should be "global" and "port"
-See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
-See: Documentation/devicetree/bindings/reset/reset.txt
-
-Example:
-
-usb2_picophy0: usbpicophy@f8 {
- compatible = "st,stih407-usb2-phy";
- #phy-cells = <0>;
- st,syscfg = <&syscfg_core 0x100 0xf4>;
- resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
- <&picophyreset STIH407_PICOPHY0_RESET>;
- reset-names = "global", "port";
-};
diff --git a/Documentation/devicetree/bindings/phy/pxa1928-usb-phy.txt b/Documentation/devicetree/bindings/phy/pxa1928-usb-phy.txt
deleted file mode 100644
index da94426aa694..000000000000
--- a/Documentation/devicetree/bindings/phy/pxa1928-usb-phy.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* Marvell PXA1928 USB and HSIC PHYs
-
-Required properties:
-- compatible: "marvell,pxa1928-usb-phy" or "marvell,pxa1928-hsic-phy"
-- reg: base address and length of the registers
-- clocks - A single clock. From common clock binding.
-- #phys-cells: should be 0. From common phy binding.
-- resets: reference to the reset controller
-
-Example:
-
- usbphy: phy@7000 {
- compatible = "marvell,pxa1928-usb-phy";
- reg = <0x7000 0xe0>;
- clocks = <&apmu_clocks PXA1928_CLK_USB>;
- #phy-cells = <0>;
- };
-
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml
index 2671a048c926..e77576d06c0e 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml
@@ -17,7 +17,9 @@ description:
properties:
compatible:
items:
- - const: qcom,ipq5332-usb-hsphy
+ - enum:
+ - qcom,ipq5018-usb-hsphy
+ - qcom,ipq5332-usb-hsphy
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
deleted file mode 100644
index 827109d37041..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
+++ /dev/null
@@ -1,287 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/phy/qcom,msm8996-qmp-usb3-phy.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm QMP PHY controller (USB, MSM8996)
-
-maintainers:
- - Vinod Koul <vkoul@kernel.org>
-
-description:
- QMP PHY controller supports physical layer functionality for a number of
- controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
-
- Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see
- qcom,sc8280xp-qmp-usb3-uni-phy.yaml.
-
-properties:
- compatible:
- enum:
- - qcom,ipq6018-qmp-usb3-phy
- - qcom,ipq8074-qmp-usb3-phy
- - qcom,msm8996-qmp-usb3-phy
- - qcom,msm8998-qmp-usb3-phy
- - qcom,sdm845-qmp-usb3-uni-phy
- - qcom,sdx55-qmp-usb3-uni-phy
- - qcom,sdx65-qmp-usb3-uni-phy
- - qcom,sm8150-qmp-usb3-uni-phy
- - qcom,sm8250-qmp-usb3-uni-phy
- - qcom,sm8350-qmp-usb3-uni-phy
-
- reg:
- items:
- - description: serdes
-
- "#address-cells":
- enum: [ 1, 2 ]
-
- "#size-cells":
- enum: [ 1, 2 ]
-
- ranges: true
-
- clocks:
- minItems: 3
- maxItems: 4
-
- clock-names:
- minItems: 3
- maxItems: 4
-
- power-domains:
- maxItems: 1
-
- resets:
- maxItems: 2
-
- reset-names:
- maxItems: 2
-
- vdda-phy-supply: true
-
- vdda-pll-supply: true
-
- vddp-ref-clk-supply: true
-
-patternProperties:
- "^phy@[0-9a-f]+$":
- type: object
- description: single PHY-provider child node
- properties:
- reg:
- minItems: 3
- maxItems: 6
-
- clocks:
- items:
- - description: PIPE clock
-
- clock-names:
- deprecated: true
- items:
- - const: pipe0
-
- "#clock-cells":
- const: 0
-
- clock-output-names:
- maxItems: 1
-
- "#phy-cells":
- const: 0
-
- required:
- - reg
- - clocks
- - "#clock-cells"
- - clock-output-names
- - "#phy-cells"
-
- additionalProperties: false
-
-required:
- - compatible
- - reg
- - "#address-cells"
- - "#size-cells"
- - ranges
- - clocks
- - clock-names
- - resets
- - reset-names
- - vdda-phy-supply
- - vdda-pll-supply
-
-additionalProperties: false
-
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm845-qmp-usb3-uni-phy
- then:
- properties:
- clocks:
- maxItems: 4
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- - const: com_aux
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,ipq8074-qmp-usb3-phy
- - qcom,msm8996-qmp-usb3-phy
- - qcom,msm8998-qmp-usb3-phy
- - qcom,sdx55-qmp-usb3-uni-phy
- - qcom,sdx65-qmp-usb3-uni-phy
- then:
- properties:
- clocks:
- maxItems: 3
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sm8150-qmp-usb3-uni-phy
- - qcom,sm8250-qmp-usb3-uni-phy
- - qcom,sm8350-qmp-usb3-uni-phy
- then:
- properties:
- clocks:
- maxItems: 4
- clock-names:
- items:
- - const: aux
- - const: ref_clk_src
- - const: ref
- - const: com_aux
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8998-qmp-usb3-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX lane 1
- - description: RX lane 1
- - description: PCS
- - description: TX lane 2
- - description: RX lane 2
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,ipq6018-qmp-usb3-phy
- - qcom,ipq8074-qmp-usb3-phy
- - qcom,sdx55-qmp-usb3-uni-phy
- - qcom,sdx65-qmp-usb3-uni-phy
- - qcom,sm8150-qmp-usb3-uni-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
- - description: PCS_MISC
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-qmp-usb3-phy
- - qcom,sm8250-qmp-usb3-uni-phy
- - qcom,sm8350-qmp-usb3-uni-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,gcc-sdm845.h>
- usb_2_qmpphy: phy-wrapper@88eb000 {
- compatible = "qcom,sdm845-qmp-usb3-uni-phy";
- reg = <0x088eb000 0x18c>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x088eb000 0x2000>;
-
- clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK >,
- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
- <&gcc GCC_USB3_SEC_CLKREF_CLK>,
- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
-
- resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
- <&gcc GCC_USB3_PHY_SEC_BCR>;
- reset-names = "phy", "common";
-
- vdda-phy-supply = <&vdda_usb2_ss_1p2>;
- vdda-pll-supply = <&vdda_usb2_ss_core>;
-
- usb_2_ssphy: phy@200 {
- reg = <0x200 0x128>,
- <0x400 0x1fc>,
- <0x800 0x218>,
- <0x600 0x70>;
-
- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
-
- #clock-cells = <0>;
- clock-output-names = "usb3_uni_phy_pipe_clk_src";
-
- #phy-cells = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index d981d77e82e4..f3a3296c811c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,msm8996-qmp-ufs-phy
- qcom,msm8998-qmp-ufs-phy
- qcom,sa8775p-qmp-ufs-phy
+ - qcom,sc7280-qmp-ufs-phy
- qcom,sc8180x-qmp-ufs-phy
- qcom,sc8280xp-qmp-ufs-phy
- qcom,sdm845-qmp-ufs-phy
@@ -85,6 +86,7 @@ allOf:
contains:
enum:
- qcom,sa8775p-qmp-ufs-phy
+ - qcom,sc7280-qmp-ufs-phy
- qcom,sm8450-qmp-ufs-phy
then:
properties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index f99fbbcd68fb..57702f7f2a46 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -16,20 +16,34 @@ description:
properties:
compatible:
enum:
+ - qcom,ipq6018-qmp-usb3-phy
+ - qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
+ - qcom,msm8996-qmp-usb3-phy
+ - qcom,msm8998-qmp-usb3-phy
- qcom,qcm2290-qmp-usb3-phy
- qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
+ - qcom,sdm845-qmp-usb3-uni-phy
+ - qcom,sdx55-qmp-usb3-uni-phy
+ - qcom,sdx65-qmp-usb3-uni-phy
+ - qcom,sdx75-qmp-usb3-uni-phy
- qcom,sm6115-qmp-usb3-phy
+ - qcom,sm8150-qmp-usb3-uni-phy
+ - qcom,sm8250-qmp-usb3-uni-phy
+ - qcom,sm8350-qmp-usb3-uni-phy
+
reg:
maxItems: 1
clocks:
- maxItems: 4
+ minItems: 4
+ maxItems: 5
clock-names:
- maxItems: 4
+ minItems: 4
+ maxItems: 5
power-domains:
maxItems: 1
@@ -74,9 +88,18 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq6018-qmp-usb3-phy
+ - qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
+ - qcom,msm8996-qmp-usb3-phy
+ - qcom,msm8998-qmp-usb3-phy
+ - qcom,sdx55-qmp-usb3-uni-phy
+ - qcom,sdx65-qmp-usb3-uni-phy
+ - qcom,sdx75-qmp-usb3-uni-phy
then:
properties:
+ clocks:
+ maxItems: 4
clock-names:
items:
- const: aux
@@ -109,6 +132,9 @@ allOf:
enum:
- qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
+ - qcom,sm8150-qmp-usb3-uni-phy
+ - qcom,sm8250-qmp-usb3-uni-phy
+ - qcom,sm8350-qmp-usb3-uni-phy
then:
properties:
clocks:
@@ -119,6 +145,33 @@ allOf:
- const: ref
- const: com_aux
- const: pipe
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm845-qmp-usb3-uni-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 5
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ - const: com_aux
+ - const: pipe
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8280xp-qmp-usb3-uni-phy
+ then:
required:
- power-domains
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
index c53bab107b6d..c95828607ab6 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
@@ -14,7 +14,12 @@ description:
properties:
compatible:
- const: qcom,sm8550-snps-eusb2-phy
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sdx75-snps-eusb2-phy
+ - const: qcom,sm8550-snps-eusb2-phy
+ - const: qcom,sm8550-snps-eusb2-phy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index 029569d5fcf3..24c733c10e0e 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -32,6 +32,27 @@ properties:
vdd3-supply: true
+ qcom,tune-usb2-disc-thres:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: High-Speed disconnect threshold
+ minimum: 0
+ maximum: 7
+ default: 0
+
+ qcom,tune-usb2-amplitude:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: High-Speed trasmit amplitude
+ minimum: 0
+ maximum: 15
+ default: 8
+
+ qcom,tune-usb2-preem:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: High-Speed TX pre-emphasis tuning
+ minimum: 0
+ maximum: 7
+ default: 5
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt b/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt
deleted file mode 100644
index 9d2868a437ab..000000000000
--- a/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Mediatek/Ralink USB PHY
-
-Required properties:
- - compatible: "ralink,rt3352-usbphy"
- "mediatek,mt7620-usbphy"
- "mediatek,mt7628-usbphy"
- - reg: required for "mediatek,mt7628-usbphy", unused otherwise
- - #phy-cells: should be 0
- - ralink,sysctl: a phandle to a ralink syscon register region
- - resets: the two reset controllers for host and device
- - reset-names: the names of the 2 reset controllers
-
-Example:
-
-usbphy: phy {
- compatible = "mediatek,mt7628-usbphy";
- reg = <0x10120000 0x1000>;
- #phy-cells = <0>;
-
- ralink,sysctl = <&sysc>;
- resets = <&rstctrl 22 &rstctrl 25>;
- reset-names = "host", "device";
-};
diff --git a/Documentation/devicetree/bindings/phy/st,stih407-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/st,stih407-usb2-phy.yaml
new file mode 100644
index 000000000000..e45cdd98aab2
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/st,stih407-usb2-phy.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/st,stih407-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STiH407 USB PHY controller
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+description:
+ The USB picoPHY device is the PHY for both USB2 and USB3 host controllers
+ (when controlling usb2/1.1 devices) available on STiH407 SoC family from
+ STMicroelectronics.
+
+properties:
+ compatible:
+ const: st,stih407-usb2-phy
+
+ st,syscfg:
+ description: Phandle to the syscfg bank
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to syscfg
+ - description: phyparam register offset
+ - description: phyctrl register offset
+
+ resets:
+ items:
+ - description: Phandle and reset specifier pair for the whole phy.
+ - description: Phandle and reset specifier pair for the port.
+
+ reset-names:
+ items:
+ - const: global
+ - const: port
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - st,syscfg
+ - resets
+ - reset-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/stih407-resets.h>
+ usb-phy {
+ compatible = "st,stih407-usb2-phy";
+ #phy-cells = <0>;
+ st,syscfg = <&syscfg_core 0x100 0xf4>;
+ resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+ <&picophyreset STIH407_PICOPHY0_RESET>;
+ reset-names = "global", "port";
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/amlogic,meson-pinctrl-a1.yaml b/Documentation/devicetree/bindings/pinctrl/amlogic,meson-pinctrl-a1.yaml
index 4e7a456ea4cc..c7df4cd34197 100644
--- a/Documentation/devicetree/bindings/pinctrl/amlogic,meson-pinctrl-a1.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/amlogic,meson-pinctrl-a1.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
enum:
- amlogic,c3-periphs-pinctrl
+ - amlogic,t7-periphs-pinctrl
- amlogic,meson-a1-periphs-pinctrl
- amlogic,meson-s4-periphs-pinctrl
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
index 4478a76171f7..62890a0aead1 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
@@ -24,6 +24,7 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ additionalProperties: false
properties:
function:
@@ -37,6 +38,10 @@ patternProperties:
enum: [ gpio0, gpio1, gpio2, gpio3, gpio4, gpio5, gpio6, gpio7,
gpio8, gpio9, gpio10, gpio11, gpio12, gpio13, gpio40 ]
+ patternProperties:
+ '-pins$':
+ $ref: '#/patternProperties/-pins$'
+
allOf:
- $ref: pinctrl.yaml#
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
index 73e1caa7c011..7cc0e1650835 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
@@ -24,6 +24,7 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ unevaluatedProperties: false
properties:
function:
@@ -36,11 +37,15 @@ patternProperties:
pins:
enum: [ gpio0, gpio1, gpio16, gpio17, gpio8, gpio9, gpio18, gpio19,
- gpio22, gpio23, gpio30, gpio31, nand_grp, gpio35
+ gpio22, gpio23, gpio30, gpio31, nand_grp, gpio35,
dectpd_grp, vdsl_phy_override_0_grp,
vdsl_phy_override_1_grp, vdsl_phy_override_2_grp,
vdsl_phy_override_3_grp, dsl_gpio8, dsl_gpio9 ]
+ patternProperties:
+ '-pins$':
+ $ref: '#/patternProperties/-pins$'
+
allOf:
- $ref: pinctrl.yaml#
@@ -122,46 +127,46 @@ examples:
pinctrl_nand: nand-pins {
function = "nand";
- group = "nand_grp";
+ pins = "nand_grp";
};
pinctrl_gpio35_alt: gpio35_alt-pins {
function = "gpio35_alt";
- pin = "gpio35";
+ pins = "gpio35";
};
pinctrl_dectpd: dectpd-pins {
function = "dectpd";
- group = "dectpd_grp";
+ pins = "dectpd_grp";
};
pinctrl_vdsl_phy_override_0: vdsl_phy_override_0-pins {
function = "vdsl_phy_override_0";
- group = "vdsl_phy_override_0_grp";
+ pins = "vdsl_phy_override_0_grp";
};
pinctrl_vdsl_phy_override_1: vdsl_phy_override_1-pins {
function = "vdsl_phy_override_1";
- group = "vdsl_phy_override_1_grp";
+ pins = "vdsl_phy_override_1_grp";
};
pinctrl_vdsl_phy_override_2: vdsl_phy_override_2-pins {
function = "vdsl_phy_override_2";
- group = "vdsl_phy_override_2_grp";
+ pins = "vdsl_phy_override_2_grp";
};
pinctrl_vdsl_phy_override_3: vdsl_phy_override_3-pins {
function = "vdsl_phy_override_3";
- group = "vdsl_phy_override_3_grp";
+ pins = "vdsl_phy_override_3_grp";
};
pinctrl_dsl_gpio8: dsl_gpio8-pins {
function = "dsl_gpio8";
- group = "dsl_gpio8";
+ pins = "dsl_gpio8";
};
pinctrl_dsl_gpio9: dsl_gpio9-pins {
function = "dsl_gpio9";
- group = "dsl_gpio9";
+ pins = "dsl_gpio9";
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
index 2750ba42aeb8..f57bb34c3130 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
@@ -24,6 +24,7 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ unevaluatedProperties: false
properties:
function:
@@ -36,6 +37,10 @@ patternProperties:
gpio20, gpio25, gpio26, gpio27, gpio28, hsspi_cs1,
usb_port1 ]
+ patternProperties:
+ '-pins$':
+ $ref: '#/patternProperties/-pins$'
+
allOf:
- $ref: pinctrl.yaml#
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
index 2f6c540498bc..ce6fc5380c52 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
@@ -24,15 +24,16 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ unevaluatedProperties: false
properties:
function:
enum: [ ebi_cs, uart1, serial_led, legacy_led, led, spi_cs, utopia,
pwm_syn_clk, sys_irq ]
- pins:
+ groups:
enum: [ ebi_cs_grp, uart1_grp, serial_led_grp, legacy_led_grp,
- led_grp, spi_cs_grp, utopia_grp, pwm_syn_clk, sys_irq_grp ]
+ led_grp, spi_cs_grp, utopia_grp, pwm_syn_clk_grp, sys_irq_grp ]
allOf:
- $ref: pinctrl.yaml#
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
index b3044f805753..5f7ed7d3dd49 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
@@ -24,6 +24,7 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ unevaluatedProperties: false
properties:
function:
@@ -41,6 +42,10 @@ patternProperties:
gpio15, gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
gpio22, gpio23, gpio24, gpio25, gpio26, gpio27, nand_grp ]
+ patternProperties:
+ '-pins$':
+ $ref: '#/patternProperties/-pins$'
+
allOf:
- $ref: pinctrl.yaml#
@@ -204,6 +209,6 @@ examples:
pinctrl_nand: nand-pins {
function = "nand";
- group = "nand_grp";
+ pins = "nand_grp";
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
index 3236871827df..d549e945505b 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
@@ -24,6 +24,7 @@ patternProperties:
'-pins$':
type: object
$ref: pinmux-node.yaml#
+ unevaluatedProperties: false
properties:
function:
@@ -42,6 +43,10 @@ patternProperties:
gpio24, gpio25, gpio26, gpio27, gpio28, gpio29, gpio30,
gpio31, uart1_grp ]
+ patternProperties:
+ '-pins$':
+ $ref: '#/patternProperties/-pins$'
+
allOf:
- $ref: pinctrl.yaml#
@@ -215,6 +220,6 @@ examples:
pinctrl_uart1: uart1-pins {
function = "uart1";
- group = "uart1_grp";
+ pins = "uart1_grp";
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
new file mode 100644
index 000000000000..3e8472898800
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
@@ -0,0 +1,217 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/nuvoton,npcm845-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM845 Pin Controller and GPIO
+
+maintainers:
+ - Tomer Maimon <tmaimon77@gmail.com>
+
+description:
+ The Nuvoton BMC NPCM8XX Pin Controller multi-function routed through
+ the multiplexing block, Each pin supports GPIO functionality (GPIOx)
+ and multiple functions that directly connect the pin to different
+ hardware blocks.
+
+properties:
+ compatible:
+ const: nuvoton,npcm845-pinctrl
+
+ ranges:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ nuvoton,sysgcr:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: a phandle to access GCR registers.
+
+patternProperties:
+ '^gpio@':
+ type: object
+ additionalProperties: false
+
+ description:
+ Eight GPIO banks that each contain 32 GPIOs.
+
+ properties:
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-ranges:
+ maxItems: 1
+
+ required:
+ - gpio-controller
+ - '#gpio-cells'
+ - reg
+ - interrupts
+ - gpio-ranges
+
+ '-mux$':
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description:
+ One or more groups of pins to mux to a certain function
+ items:
+ enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
+ smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
+ smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
+ smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
+ spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
+ spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
+ bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
+ r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
+ fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
+ fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
+ pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
+ ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
+ smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
+ sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
+ mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
+ scipme, smi, smb6, smb7, spi1, faninx, r1, spi3, spi3cs1,
+ spi3quad, spi3cs2, spi3cs3, nprd_smi, smb0b, smb0c, smb0den,
+ smb0d, ddc, rg2mdio, wdog1, wdog2, smb12, smb13, spix,
+ spixcs1, clkreq, hgpio0, hgpio1, hgpio2, hgpio3, hgpio4,
+ hgpio5, hgpio6, hgpio7 ]
+
+ function:
+ description:
+ The function that a group of pins is muxed to
+ enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
+ smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
+ smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
+ smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
+ spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
+ spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
+ bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
+ r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
+ fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
+ fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
+ pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
+ ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
+ smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
+ sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
+ mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
+ scipme, smi, smb6, smb7, spi1, faninx, r1, spi3, spi3cs1,
+ spi3quad, spi3cs2, spi3cs3, nprd_smi, smb0b, smb0c, smb0den,
+ smb0d, ddc, rg2mdio, wdog1, wdog2, smb12, smb13, spix,
+ spixcs1, clkreq, hgpio0, hgpio1, hgpio2, hgpio3, hgpio4,
+ hgpio5, hgpio6, hgpio7 ]
+
+ dependencies:
+ groups: [ function ]
+ function: [ groups ]
+
+ additionalProperties: false
+
+ '^pin':
+ $ref: pincfg-node.yaml#
+
+ properties:
+ pins:
+ description:
+ A list of pins to configure in certain ways, such as enabling
+ debouncing
+ items:
+ pattern: '^GPIO([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-6])'
+
+ bias-disable: true
+
+ bias-pull-up: true
+
+ bias-pull-down: true
+
+ input-enable: true
+
+ output-low: true
+
+ output-high: true
+
+ drive-push-pull: true
+
+ drive-open-drain: true
+
+ input-debounce:
+ description:
+ Debouncing periods in microseconds, one period per interrupt
+ bank found in the controller
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+
+ slew-rate:
+ description: |
+ 0: Low rate
+ 1: High rate
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
+ drive-strength:
+ enum: [ 0, 1, 2, 4, 8, 12 ]
+
+ additionalProperties: false
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+required:
+ - compatible
+ - ranges
+ - '#address-cells'
+ - '#size-cells'
+ - nuvoton,sysgcr
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pinctrl: pinctrl@f0010000 {
+ compatible = "nuvoton,npcm845-pinctrl";
+ ranges = <0x0 0x0 0xf0010000 0x8000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nuvoton,sysgcr = <&gcr>;
+
+ gpio0: gpio@0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0xb0>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ };
+
+ fanin0_pin: fanin0-mux {
+ groups = "fanin0";
+ function = "fanin0";
+ };
+
+ pin34_slew: pin34-slew {
+ pins = "GPIO34/I3C4_SDA";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml
index 2aedb7e7bc8b..5ece3b9d676b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml
@@ -43,7 +43,8 @@ patternProperties:
"-state$":
oneOf:
- $ref: "#/$defs/qcom-mdm9607-tlmm-state"
- - patternProperties:
+ - additionalProperties: false
+ patternProperties:
".*":
$ref: "#/$defs/qcom-mdm9607-tlmm-state"
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
index 9efb76509580..a602bf0d27fb 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
@@ -67,8 +67,8 @@ $defs:
Specify the alternative function to be configured for the specified
pins. Functions are only valid for gpio pins.
enum: [ gpio, cci_i2c0, blsp_uim1, blsp_uim2, blsp_uim3, blsp_uim5,
- blsp_i2c1, blsp_i2c2, blsp_i2c3, blsp_i2c4, blsp_i2c5, blsp_spi1,
- blsp_spi2, blsp_spi3, blsp_spi5, blsp_uart1, blsp_uart2,
+ blsp_i2c1, blsp_i2c2, blsp_i2c3, blsp_i2c4, blsp_i2c5, blsp_i2c6,
+ blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi5, blsp_uart1, blsp_uart2,
blsp_uart3, blsp_uart4, blsp_uart5, cam_mclk0, cam_mclk1,
gp0_clk, gp1_clk, sdc3, wlan ]
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
index e119a226a4b1..2173c5255638 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
@@ -28,6 +28,7 @@ properties:
gpio-controller: true
"#gpio-cells": true
gpio-ranges: true
+ wakeup-parent: true
gpio-reserved-ranges:
minItems: 1
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
index 368d44ff5468..c8735ab97e40 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
@@ -41,6 +41,10 @@ properties:
gpio-ranges:
maxItems: 1
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 88
+
gpio-line-names:
maxItems: 175
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml
new file mode 100644
index 000000000000..fc6c65fea73b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/realtek,rtd1315e-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC RTD1315E Pin Controller
+
+maintainers:
+ - TY Chang <tychang@realtek.com>
+
+description:
+ The Realtek DHC RTD1315E is a high-definition media processor SoC. The
+ RTD1315E pin controller is used to control pin function, pull up/down
+ resistor, drive strength, schmitt trigger and power source.
+
+properties:
+ compatible:
+ const: realtek,rtd1315e-pinctrl
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ pins:
+ items:
+ enum: [ gpio_0, gpio_1, emmc_rst_n, emmc_dd_sb, emmc_clk, emmc_cmd,
+ gpio_6, gpio_7, gpio_8, gpio_9, gpio_10, gpio_11, gpio_12,
+ gpio_13, gpio_14, gpio_15, gpio_16, gpio_17, gpio_18, gpio_19,
+ gpio_20, emmc_data_0, emmc_data_1, emmc_data_2, usb_cc2, gpio_25,
+ gpio_26, gpio_27, gpio_28, gpio_29, gpio_30, gpio_31, gpio_32,
+ gpio_33, gpio_34, gpio_35, hif_data, hif_en, hif_rdy, hif_clk,
+ gpio_dummy_40, gpio_dummy_41, gpio_dummy_42, gpio_dummy_43,
+ gpio_dummy_44, gpio_dummy_45, gpio_46, gpio_47, gpio_48, gpio_49,
+ gpio_50, usb_cc1, emmc_data_3, emmc_data_4, ir_rx, ur0_rx, ur0_tx,
+ gpio_57, gpio_58, gpio_59, gpio_60, gpio_61, gpio_62, gpio_dummy_63,
+ gpio_dummy_64, gpio_dummy_65, gpio_66, gpio_67, gpio_68, gpio_69,
+ gpio_70, gpio_71, gpio_72, gpio_dummy_73, emmc_data_5, emmc_data_6,
+ emmc_data_7, gpio_dummy_77, gpio_78, gpio_79, gpio_80, gpio_81,
+ ur2_loc, gspi_loc, hi_width, sf_en, arm_trace_dbg_en,
+ ejtag_aucpu_loc, ejtag_acpu_loc, ejtag_vcpu_loc, ejtag_scpu_loc,
+ dmic_loc, vtc_dmic_loc, vtc_tdm_loc, vtc_i2si_loc, tdm_ai_loc,
+ ai_loc, spdif_loc, hif_en_loc, scan_switch, wd_rset, boot_sel,
+ reset_n, testmode ]
+
+ function:
+ enum: [ gpio, nf, emmc, ao, gspi_loc0, gspi_loc1, uart0, uart1,
+ uart2_loc0, uart2_loc1, i2c0, i2c1, i2c4, i2c5, pcie1,
+ etn_led, etn_phy, spi, pwm0_loc0, pwm0_loc1, pwm1_loc0,
+ pwm1_loc1, pwm2_loc0, pwm2_loc1, pwm3_loc0, pwm3_loc1,
+ spdif_optical_loc0, spdif_optical_loc1, usb_cc1, usb_cc2,
+ sd, dmic_loc0, dmic_loc1, ai_loc0, ai_loc1, tdm_ai_loc0,
+ tdm_ai_loc1, hi_loc0, hi_m, vtc_i2so, vtc_i2si_loc0,
+ vtc_i2si_loc1, vtc_dmic_loc0, vtc_dmic_loc1, vtc_tdm_loc0,
+ vtc_tdm_loc1, dc_fan, pll_test_loc0, pll_test_loc1,
+ ir_rx, uart2_disable, gspi_disable, hi_width_disable,
+ hi_width_1bit, sf_disable, sf_enable, scpu_ejtag_loc0,
+ scpu_ejtag_loc1, scpu_ejtag_loc2, scpu_ejtag_loc3,
+ acpu_ejtag_loc0, acpu_ejtag_loc1, acpu_ejtag_loc2,
+ vcpu_ejtag_loc0, vcpu_ejtag_loc1, vcpu_ejtag_loc2,
+ aucpu_ejtag_loc0, aucpu_ejtag_loc1, aucpu_ejtag_loc2,
+ gpu_ejtag, iso_tristate, dbg_out0, dbg_out1, standby_dbg,
+ spdif, arm_trace_debug_disable, arm_trace_debug_enable,
+ aucpu_ejtag_disable, acpu_ejtag_disable, vcpu_ejtag_disable,
+ scpu_ejtag_disable, vtc_dmic_loc_disable, vtc_tdm_disable,
+ vtc_i2si_disable, tdm_ai_disable, ai_disable, spdif_disable,
+ hif_disable, hif_enable, test_loop, pmic_pwrup ]
+
+ drive-strength:
+ enum: [4, 8]
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ drive-push-pull: true
+
+ power-source:
+ description: |
+ Valid arguments are described as below:
+ 0: power supply of 1.8V
+ 1: power supply of 3.3V
+ enum: [0, 1]
+
+ realtek,drive-strength-p:
+ description: |
+ Some of pins can be driven using the P-MOS and N-MOS transistor to
+ achieve finer adjustments. The block-diagram representation is as
+ follows:
+ VDD
+ |
+ ||--+
+ +-----o|| P-MOS-FET
+ | ||--+
+ IN --+ +----- out
+ | ||--+
+ +------|| N-MOS-FET
+ ||--+
+ |
+ GND
+ The driving strength of the P-MOS/N-MOS transistors impacts the
+ waveform's rise/fall times. Greater driving strength results in
+ shorter rise/fall times. Each P-MOS and N-MOS transistor offers
+ 8 configurable levels (0 to 7), with higher values indicating
+ greater driving strength, contributing to achieving the desired
+ speed.
+
+ The realtek,drive-strength-p is used to control the driving strength
+ of the P-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,drive-strength-n:
+ description: |
+ Similar to the realtek,drive-strength-p, the realtek,drive-strength-n
+ is used to control the driving strength of the N-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,duty-cycle:
+ description: |
+ An integer describing the level to adjust output duty cycle, controlling
+ the proportion of positive and negative waveforms in nanoseconds.
+ Valid arguments are described as below:
+ 0: 0ns
+ 2: + 0.25ns
+ 3: + 0.5ns
+ 4: -0.25ns
+ 5: -0.5ns
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 2, 3, 4, 5 ]
+
+ required:
+ - pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1315e-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml
new file mode 100644
index 000000000000..f07361d60acd
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml
@@ -0,0 +1,187 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/realtek,rtd1319d-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC RTD1319D Pin Controller
+
+maintainers:
+ - TY Chang <tychang@realtek.com>
+
+description:
+ The Realtek DHC RTD1319D is a high-definition media processor SoC. The
+ RTD1319D pin controller is used to control pin function, pull up/down
+ resistor, drive strength, schmitt trigger and power source.
+
+properties:
+ compatible:
+ const: realtek,rtd1319d-pinctrl
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ pins:
+ items:
+ enum: [ gpio_0, gpio_1, gpio_2, gpio_3, gpio_4, gpio_5, gpio_6, gpio_7,
+ gpio_8, gpio_9, gpio_10, gpio_11, gpio_12, gpio_13, gpio_14,
+ gpio_15, gpio_16, gpio_17, gpio_18, gpio_19, gpio_20, gpio_21,
+ gpio_22, gpio_23, usb_cc2, gpio_25, gpio_26, gpio_27, gpio_28,
+ gpio_29, gpio_30, gpio_31, gpio_32, gpio_33, gpio_34, gpio_35,
+ hif_data, hif_en, hif_rdy, hif_clk, gpio_40, gpio_41, gpio_42,
+ gpio_43, gpio_44, gpio_45, gpio_46, gpio_47, gpio_48, gpio_49,
+ gpio_50, usb_cc1, gpio_52, gpio_53, ir_rx, ur0_rx, ur0_tx,
+ gpio_57, gpio_58, gpio_59, gpio_60, gpio_61, gpio_62, gpio_63,
+ gpio_64, emmc_rst_n, emmc_dd_sb, emmc_clk, emmc_cmd, emmc_data_0,
+ emmc_data_1, emmc_data_2, emmc_data_3, emmc_data_4, emmc_data_5,
+ emmc_data_6, emmc_data_7, dummy, gpio_78, gpio_79, gpio_80,
+ gpio_81, ur2_loc, gspi_loc, hi_width, sf_en, arm_trace_dbg_en,
+ ejtag_aucpu_loc, ejtag_acpu_loc, ejtag_vcpu_loc, ejtag_scpu_loc,
+ dmic_loc, ejtag_secpu_loc, vtc_dmic_loc, vtc_tdm_loc, vtc_i2si_loc,
+ tdm_ai_loc, ai_loc, spdif_loc, hif_en_loc, sc0_loc, sc1_loc,
+ scan_switch, wd_rset, boot_sel, reset_n, testmode ]
+
+ function:
+ enum: [ gpio, nf, emmc, tp0, tp1, sc0, sc0_data0, sc0_data1, sc0_data2,
+ sc1, sc1_data0, sc1_data1, sc1_data2, ao, gspi_loc0, gspi_loc1,
+ uart0, uart1, uart2_loc0, uart2_loc1, i2c0, i2c1, i2c3, i2c4,
+ i2c5, pcie1, sdio, etn_led, etn_phy, spi, pwm0_loc0, pwm0_loc1,
+ pwm1_loc0, pwm1_loc1, pwm2_loc0, pwm2_loc1, pwm3_loc0, pwm3_loc1,
+ qam_agc_if0, qam_agc_if1, spdif_optical_loc0, spdif_optical_loc1,
+ usb_cc1, usb_cc2, vfd, sd, dmic_loc0, dmic_loc1, ai_loc0, ai_loc1,
+ tdm_ai_loc0, tdm_ai_loc1, hi_loc0, hi_m, vtc_i2so, vtc_i2si_loc0,
+ vtc_i2si_loc1, vtc_dmic_loc0, vtc_dmic_loc1, vtc_tdm_loc0,
+ vtc_tdm_loc1, dc_fan, pll_test_loc0, pll_test_loc1, ir_rx,
+ uart2_disable, gspi_disable, hi_width_disable, hi_width_1bit,
+ sf_disable, sf_enable, scpu_ejtag_loc0, scpu_ejtag_loc1,
+ scpu_ejtag_loc2, acpu_ejtag_loc0, acpu_ejtag_loc1, acpu_ejtag_loc2,
+ vcpu_ejtag_loc0, vcpu_ejtag_loc1, vcpu_ejtag_loc2, secpu_ejtag_loc0,
+ secpu_ejtag_loc1, secpu_ejtag_loc2, aucpu_ejtag_loc0, aucpu_ejtag_loc1,
+ aucpu_ejtag_loc2, iso_tristate, dbg_out0, dbg_out1, standby_dbg,
+ spdif, arm_trace_debug_disable, arm_trace_debug_enable,
+ aucpu_ejtag_disable, acpu_ejtag_disable, vcpu_ejtag_disable,
+ scpu_ejtag_disable, secpu_ejtag_disable, vtc_dmic_loc_disable,
+ vtc_tdm_disable, vtc_i2si_disable, tdm_ai_disable, ai_disable,
+ spdif_disable, hif_disable, hif_enable, test_loop, pmic_pwrup ]
+
+ drive-strength:
+ enum: [4, 8]
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ drive-push-pull: true
+
+ power-source:
+ description: |
+ Valid arguments are described as below:
+ 0: power supply of 1.8V
+ 1: power supply of 3.3V
+ enum: [0, 1]
+
+ realtek,drive-strength-p:
+ description: |
+ Some of pins can be driven using the P-MOS and N-MOS transistor to
+ achieve finer adjustments. The block-diagram representation is as
+ follows:
+ VDD
+ |
+ ||--+
+ +-----o|| P-MOS-FET
+ | ||--+
+ IN --+ +----- out
+ | ||--+
+ +------|| N-MOS-FET
+ ||--+
+ |
+ GND
+ The driving strength of the P-MOS/N-MOS transistors impacts the
+ waveform's rise/fall times. Greater driving strength results in
+ shorter rise/fall times. Each P-MOS and N-MOS transistor offers
+ 8 configurable levels (0 to 7), with higher values indicating
+ greater driving strength, contributing to achieving the desired
+ speed.
+
+ The realtek,drive-strength-p is used to control the driving strength
+ of the P-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,drive-strength-n:
+ description: |
+ Similar to the realtek,drive-strength-p, the realtek,drive-strength-n
+ is used to control the driving strength of the N-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,duty-cycle:
+ description: |
+ An integer describing the level to adjust output duty cycle, controlling
+ the proportion of positive and negative waveforms in nanoseconds.
+ Valid arguments are described as below:
+ 0: 0ns
+ 2: + 0.25ns
+ 3: + 0.5ns
+ 4: -0.25ns
+ 5: -0.5ns
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 2, 3, 4, 5 ]
+
+ required:
+ - pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1319d-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml
new file mode 100644
index 000000000000..671e4ec84624
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml
@@ -0,0 +1,186 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/realtek,rtd1619b-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC RTD1619B Pin Controller
+
+maintainers:
+ - TY Chang <tychang@realtek.com>
+
+description:
+ The Realtek DHC RTD1619B is a high-definition media processor SoC. The
+ RTD1619B pin controller is used to control pin function, pull up/down
+ resistor, drive strength, schmitt trigger and power source.
+
+properties:
+ compatible:
+ const: realtek,rtd1619b-pinctrl
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ pins:
+ items:
+ enum: [ gpio_0, gpio_1, gpio_2, gpio_3, gpio_4, gpio_5, gpio_6, gpio_7,
+ gpio_8, gpio_9, gpio_10, gpio_11, gpio_12, gpio_13, gpio_14,
+ gpio_15, gpio_16, gpio_17, gpio_18, gpio_19, gpio_20, gpio_21,
+ gpio_22, gpio_23, usb_cc2, gpio_25, gpio_26, gpio_27, gpio_28,
+ gpio_29, gpio_30, gpio_31, gpio_32, gpio_33, gpio_34, gpio_35,
+ hif_data, hif_en, hif_rdy, hif_clk, gpio_40, gpio_41, gpio_42,
+ gpio_43, gpio_44, gpio_45, gpio_46, gpio_47, gpio_48, gpio_49,
+ gpio_50, usb_cc1, gpio_52, gpio_53, ir_rx, ur0_rx, ur0_tx,
+ gpio_57, gpio_58, gpio_59, gpio_60, gpio_61, gpio_62, gpio_63,
+ gpio_64, gpio_65, gpio_66, gpio_67, gpio_68, gpio_69, gpio_70,
+ gpio_71, gpio_72, gpio_73, gpio_74, gpio_75, gpio_76, emmc_cmd,
+ spi_ce_n, spi_sck, spi_so, spi_si, emmc_rst_n, emmc_dd_sb,
+ emmc_clk, emmc_data_0, emmc_data_1, emmc_data_2, emmc_data_3,
+ emmc_data_4, emmc_data_5, emmc_data_6, emmc_data_7, ur2_loc,
+ gspi_loc, sdio_loc, hi_loc, hi_width, sf_en, arm_trace_dbg_en,
+ pwm_01_open_drain_en_loc0, pwm_23_open_drain_en_loc0,
+ pwm_01_open_drain_en_loc1, pwm_23_open_drain_en_loc1,
+ ejtag_acpu_loc, ejtag_vcpu_loc, ejtag_scpu_loc, dmic_loc,
+ iso_gspi_loc, ejtag_ve3_loc, ejtag_aucpu0_loc, ejtag_aucpu1_loc ]
+
+ function:
+ enum: [ gpio, nf, nf_spi, spi, pmic, spdif, spdif_coaxial, spdif_optical_loc0,
+ spdif_optical_loc1, emmc_spi, emmc, sc1, uart0, uart1, uart2_loc0, uart2_loc1,
+ gspi_loc1, iso_gspi_loc1, i2c0, i2c1, i2c3, i2c4, i2c5, pwm0, pwm1, pwm2,
+ pwm3, etn_led, etn_phy, etn_clk, sc0, vfd, gspi_loc0, iso_gspi_loc0, pcie1,
+ pcie2, sd, sdio_loc0, sdio_loc1, hi, hi_m, dc_fan, pll_test_loc0, pll_test_loc1,
+ usb_cc1, usb_cc2, ir_rx, tdm_ai_loc0, tdm_ai_loc1, dmic_loc0, dmic_loc1,
+ ai_loc0, ai_loc1, tp0, tp1, ao, uart2_disable, gspi_disable, sdio_disable,
+ hi_loc_disable, hi_loc0, hi_width_disable, hi_width_1bit, vtc_i2si_loc0,
+ vtc_tdm_loc0, vtc_dmic_loc0, vtc_i2si_loc1, vtc_tdm_loc1, vtc_dmic_loc1,
+ vtc_i2so, ve3_ejtag_loc0, aucpu0_ejtag_loc0, aucpu1_ejtag_loc0, ve3_ejtag_loc1,
+ aucpu0_ejtag_loc1, aucpu1_ejtag_loc1, ve3_ejtag_loc2, aucpu0_ejtag_loc2,
+ aucpu1_ejtag_loc2, scpu_ejtag_loc0, acpu_ejtag_loc0, vcpu_ejtag_loc0,
+ scpu_ejtag_loc1, acpu_ejtag_loc1, vcpu_ejtag_loc1, scpu_ejtag_loc2,
+ acpu_ejtag_loc2, vcpu_ejtag_loc2, ve3_ejtag_disable, aucpu0_ejtag_disable,
+ aucpu1_ejtag_disable, acpu_ejtag_disable, vcpu_ejtag_disable,
+ scpu_ejtag_disable, iso_gspi_disable, sf_disable, sf_enable,
+ arm_trace_debug_disable, arm_trace_debug_enable, pwm_normal, pwm_open_drain,
+ standby_dbg, test_loop_dis ]
+
+ drive-strength:
+ enum: [4, 8]
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ drive-push-pull: true
+
+ power-source:
+ description: |
+ Valid arguments are described as below:
+ 0: power supply of 1.8V
+ 1: power supply of 3.3V
+ enum: [0, 1]
+
+ realtek,drive-strength-p:
+ description: |
+ Some of pins can be driven using the P-MOS and N-MOS transistor to
+ achieve finer adjustments. The block-diagram representation is as
+ follows:
+ VDD
+ |
+ ||--+
+ +-----o|| P-MOS-FET
+ | ||--+
+ IN --+ +----- out
+ | ||--+
+ +------|| N-MOS-FET
+ ||--+
+ |
+ GND
+ The driving strength of the P-MOS/N-MOS transistors impacts the
+ waveform's rise/fall times. Greater driving strength results in
+ shorter rise/fall times. Each P-MOS and N-MOS transistor offers
+ 8 configurable levels (0 to 7), with higher values indicating
+ greater driving strength, contributing to achieving the desired
+ speed.
+
+ The realtek,drive-strength-p is used to control the driving strength
+ of the P-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,drive-strength-n:
+ description: |
+ Similar to the realtek,drive-strength-p, the realtek,drive-strength-n
+ is used to control the driving strength of the N-MOS output.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+
+ realtek,duty-cycle:
+ description: |
+ An integer describing the level to adjust output duty cycle, controlling
+ the proportion of positive and negative waveforms in nanoseconds.
+ Valid arguments are described as below:
+ 0: 0ns
+ 2: + 0.25ns
+ 3: + 0.5ns
+ 4: -0.25ns
+ 5: -0.5ns
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 2, 3, 4, 5 ]
+
+ required:
+ - pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1619b-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index 145c5442f268..b5ca40d0e251 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -25,6 +25,7 @@ properties:
- enum:
- renesas,r9a07g043-pinctrl # RZ/G2UL{Type-1,Type-2} and RZ/Five
- renesas,r9a07g044-pinctrl # RZ/G2{L,LC}
+ - renesas,r9a08g045-pinctrl # RZ/G3S
- items:
- enum:
@@ -73,10 +74,26 @@ properties:
additionalProperties:
anyOf:
- type: object
+ additionalProperties: false
allOf:
- $ref: pincfg-node.yaml#
- $ref: pinmux-node.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a08g045-pinctrl
+ then:
+ properties:
+ drive-strength: false
+ output-impedance-ohms: false
+ slew-rate: false
+ else:
+ properties:
+ drive-strength-microamp: false
+
description:
Pin controller client devices use pin configuration subnodes (children
and grandchildren) for desired pin configuration.
@@ -91,6 +108,10 @@ additionalProperties:
pins: true
drive-strength:
enum: [ 2, 4, 8, 12 ]
+ drive-strength-microamp:
+ enum: [ 1900, 2200, 4000, 4400, 4500, 4700, 5200, 5300, 5700,
+ 5800, 6000, 6050, 6100, 6550, 6800, 7000, 8000, 9000,
+ 10000 ]
output-impedance-ohms:
enum: [ 33, 50, 66, 100 ]
power-source:
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml
index cb81a17bd0b1..5fa5d31f8866 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml
@@ -53,6 +53,7 @@ properties:
additionalProperties:
anyOf:
- type: object
+ additionalProperties: false
allOf:
- $ref: pincfg-node.yaml#
- $ref: pinmux-node.yaml#
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 10c335efe619..20e806dce1ec 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -115,6 +115,8 @@ additionalProperties:
type: object
additionalProperties:
type: object
+ additionalProperties: false
+
properties:
rockchip,pins:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 2120ef71a78d..e1eb45a9eda4 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -48,7 +48,8 @@ properties:
description: Phandle+args to the syscon node which includes IRQ mux selection.
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- - items:
+ - minItems: 2
+ items:
- description: syscon node which includes IRQ mux selection
- description: The offset of the IRQ mux selection register
- description: The field mask of IRQ mux, needed if different of 0xf
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
index 6ffbed204c25..8f50e23ca8c9 100644
--- a/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
@@ -15,12 +15,19 @@ allOf:
properties:
compatible:
- enum:
- - fsl,imx23-pwm
+ oneOf:
+ - const: fsl,imx23-pwm
+ - items:
+ - enum:
+ - fsl,imx28-pwm
+ - const: fsl,imx23-pwm
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
"#pwm-cells":
const: 3
@@ -31,6 +38,7 @@ properties:
required:
- compatible
- reg
+ - clocks
- fsl,pwm-number
additionalProperties: false
@@ -40,6 +48,7 @@ examples:
pwm@80064000 {
compatible = "fsl,imx23-pwm";
reg = <0x80064000 0x2000>;
+ clocks = <&clks 30>;
#pwm-cells = <3>;
fsl,pwm-number = <8>;
};
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
index 895415772d1d..09102dda4942 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
@@ -21,6 +21,7 @@ properties:
- mediatek,mt8188-scp
- mediatek,mt8192-scp
- mediatek,mt8195-scp
+ - mediatek,mt8195-scp-dual
reg:
description:
@@ -31,10 +32,7 @@ properties:
reg-names:
minItems: 2
- items:
- - const: sram
- - const: cfg
- - const: l1tcm
+ maxItems: 3
clocks:
description:
@@ -58,6 +56,93 @@ properties:
memory-region:
maxItems: 1
+ cros-ec-rpmsg:
+ $ref: /schemas/mfd/google,cros-ec.yaml
+ description:
+ This subnode represents the rpmsg device. The properties
+ of this node are defined by the individual bindings for
+ the rpmsg devices.
+
+ required:
+ - mediatek,rpmsg-name
+
+ unevaluatedProperties: false
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ ranges:
+ description:
+ Standard ranges definition providing address translations for
+ local SCP SRAM address spaces to bus addresses.
+
+patternProperties:
+ "^scp@[a-f0-9]+$":
+ type: object
+ description:
+ The MediaTek SCP integrated to SoC might be a multi-core version.
+ The other cores are represented as child nodes of the boot core.
+ There are some integration differences for the IP like the usage of
+ address translator for translating SoC bus addresses into address space
+ for the processor.
+
+ Each SCP core has own cache memory. The SRAM and L1TCM are shared by
+ cores. The power of cache, SRAM and L1TCM power should be enabled
+ before booting SCP cores. The size of cache, SRAM, and L1TCM are varied
+ on differnt SoCs.
+
+ The SCP cores do not use an MMU, but has a set of registers to
+ control the translations between 32-bit CPU addresses into system bus
+ addresses. Cache and memory access settings are provided through a
+ Memory Protection Unit (MPU), programmable only from the SCP.
+
+ properties:
+ compatible:
+ enum:
+ - mediatek,scp-core
+
+ reg:
+ description: The base address and size of SRAM.
+ maxItems: 1
+
+ reg-names:
+ const: sram
+
+ interrupts:
+ maxItems: 1
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ If present, name (or relative path) of the file within the
+ firmware search path containing the firmware image used when
+ initializing sub cores of multi-core SCP.
+
+ memory-region:
+ maxItems: 1
+
+ cros-ec-rpmsg:
+ $ref: /schemas/mfd/google,cros-ec.yaml
+ description:
+ This subnode represents the rpmsg device. The properties
+ of this node are defined by the individual bindings for
+ the rpmsg devices.
+
+ required:
+ - mediatek,rpmsg-name
+
+ unevaluatedProperties: false
+
+ required:
+ - compatible
+ - reg
+ - reg-names
+
+ additionalProperties: false
+
required:
- compatible
- reg
@@ -87,23 +172,39 @@ allOf:
reg:
maxItems: 2
reg-names:
+ items:
+ - const: sram
+ - const: cfg
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mediatek,mt8192-scp
+ - mediatek,mt8195-scp
+ then:
+ properties:
+ reg:
+ maxItems: 3
+ reg-names:
+ items:
+ - const: sram
+ - const: cfg
+ - const: l1tcm
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mediatek,mt8195-scp-dual
+ then:
+ properties:
+ reg:
maxItems: 2
+ reg-names:
+ items:
+ - const: cfg
+ - const: l1tcm
-additionalProperties:
- type: object
- description:
- Subnodes of the SCP represent rpmsg devices. The names of the devices
- are not important. The properties of these nodes are defined by the
- individual bindings for the rpmsg devices.
- properties:
- mediatek,rpmsg-name:
- $ref: /schemas/types.yaml#/definitions/string-array
- description:
- Contains the name for the rpmsg device. Used to match
- the subnode to rpmsg device announced by SCP.
-
- required:
- - mediatek,rpmsg-name
+additionalProperties: false
examples:
- |
@@ -118,7 +219,42 @@ examples:
clocks = <&infracfg CLK_INFRA_SCPSYS>;
clock-names = "main";
- cros_ec {
+ cros-ec-rpmsg {
+ compatible = "google,cros-ec-rpmsg";
mediatek,rpmsg-name = "cros-ec-rpmsg";
};
};
+
+ - |
+ scp@10500000 {
+ compatible = "mediatek,mt8195-scp-dual";
+ reg = <0x10720000 0xe0000>,
+ <0x10700000 0x8000>;
+ reg-names = "cfg", "l1tcm";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x10500000 0x100000>;
+
+ scp@0 {
+ compatible = "mediatek,scp-core";
+ reg = <0x0 0xa0000>;
+ reg-names = "sram";
+
+ cros-ec-rpmsg {
+ compatible = "google,cros-ec-rpmsg";
+ mediatek,rpmsg-name = "cros-ec-rpmsg";
+ };
+ };
+
+ scp@a0000 {
+ compatible = "mediatek,scp-core";
+ reg = <0xa0000 0x20000>;
+ reg-names = "sram";
+
+ cros-ec-rpmsg {
+ compatible = "google,cros-ec-rpmsg";
+ mediatek,rpmsg-name = "cros-ec-rpmsg";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
index a2b0079de039..661c2b425da3 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -66,7 +66,9 @@ allOf:
- qcom,msm8953-adsp-pil
- qcom,msm8974-adsp-pil
- qcom,msm8996-adsp-pil
+ - qcom,msm8996-slpi-pil
- qcom,msm8998-adsp-pas
+ - qcom,msm8998-slpi-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
- qcom,sdm845-slpi-pas
@@ -84,24 +86,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,msm8996-slpi-pil
- - qcom,msm8998-slpi-pas
- then:
- properties:
- clocks:
- items:
- - description: XO clock
- - description: AGGRE2 clock
- clock-names:
- items:
- - const: xo
- - const: aggre2
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,msm8226-adsp-pil
- qcom,msm8953-adsp-pil
- qcom,msm8974-adsp-pil
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
index 0643faae2c39..971734085d51 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
@@ -220,7 +220,6 @@ allOf:
- description: GCC MSS GPLL0 clock
- description: GCC MSS SNOC_AXI clock
- description: GCC MSS MNOC_AXI clock
- - description: RPM PNOC clock
- description: RPM QDSS clock
clock-names:
items:
@@ -231,7 +230,6 @@ allOf:
- const: gpll0_mss
- const: snoc_axi
- const: mnoc_axi
- - const: pnoc
- const: qdss
glink-edge: false
required:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
index 689d5d535331..f10f329677d8 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,sc7180-adsp-pas
- qcom,sc7180-mpss-pas
- qcom,sc7280-mpss-pas
@@ -30,26 +31,6 @@ properties:
items:
- const: xo
- interrupts:
- minItems: 6
-
- interrupt-names:
- minItems: 6
-
- power-domains:
- minItems: 2
- items:
- - description: CX power domain
- - description: MX power domain
- - description: MSS power domain
-
- power-domain-names:
- minItems: 2
- items:
- - const: cx
- - const: mx
- - const: mss
-
memory-region:
maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
@@ -75,19 +56,69 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sc7180-adsp-pas
+ then:
+ properties:
+ interrupts:
+ maxItems: 5
+ interrupt-names:
+ maxItems: 5
+ else:
+ properties:
+ interrupts:
+ minItems: 6
+ interrupt-names:
+ minItems: 6
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sc7180-adsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: LCX power domain
+ - description: LMX power domain
+ power-domain-names:
+ items:
+ - const: lcx
+ - const: lmx
+
+ - if:
+ properties:
+ compatible:
+ enum:
- qcom,sc7180-mpss-pas
then:
properties:
power-domains:
- minItems: 3
+ items:
+ - description: CX power domain
+ - description: MX power domain
+ - description: MSS power domain
power-domain-names:
- minItems: 3
- else:
+ items:
+ - const: cx
+ - const: mx
+ - const: mss
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sc7280-mpss-pas
+ then:
properties:
power-domains:
- maxItems: 2
+ items:
+ - description: CX power domain
+ - description: MX power domain
power-domain-names:
- maxItems: 2
+ items:
+ - const: cx
+ - const: mx
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml
new file mode 100644
index 000000000000..3e4a03eb4532
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sm6375-pas.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM6375 Peripheral Authentication Service
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+ Qualcomm SM6375 SoC Peripheral Authentication Service loads and boots
+ firmware on the Qualcomm DSP Hexagon cores.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sm6375-adsp-pas
+ - qcom,sm6375-cdsp-pas
+ - qcom,sm6375-mpss-pas
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XO clock
+
+ clock-names:
+ items:
+ - const: xo
+
+ memory-region:
+ maxItems: 1
+ description: Reference to the reserved-memory for the Hexagon core
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Firmware name for the Hexagon core
+
+ smd-edge: false
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/remoteproc/qcom,pas-common.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sm6375-adsp-pas
+ - qcom,sm6375-cdsp-pas
+ then:
+ properties:
+ interrupts:
+ maxItems: 5
+ interrupt-names:
+ maxItems: 5
+ else:
+ properties:
+ interrupts:
+ minItems: 6
+ interrupt-names:
+ minItems: 6
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sm6375-adsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: LCX power domain
+ - description: LMX power domain
+ power-domain-names:
+ items:
+ - const: lcx
+ - const: lmx
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sm6375-cdsp-pas
+ - qcom,sm6375-mpss-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ power-domain-names:
+ items:
+ - const: cx
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ remoteproc_adsp: remoteproc@a400000 {
+ compatible = "qcom,sm6375-adsp-pas";
+ reg = <0x0a400000 0x100>;
+
+ interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ clock-names = "xo";
+
+ power-domains = <&rpmpd SM6375_VDD_LPI_CX>,
+ <&rpmpd SM6375_VDD_LPI_MX>;
+ power-domain-names = "lcx", "lmx";
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ /* ... */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
index baccd98754a9..faf16cf14085 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
@@ -66,6 +66,17 @@ properties:
Should contain the name of the default firmware image
file located on the firmware search path.
+ interrupts:
+ maxItems: 1
+ description:
+ Interrupt specifiers enable the virtio/rpmsg communication between MPU
+ and the PRU/RTU cores. For the values of the interrupt cells please refer
+ to interrupt-controller/ti,pruss-intc.yaml schema.
+
+ interrupt-names:
+ items:
+ - const: vring
+
if:
properties:
compatible:
@@ -171,6 +182,9 @@ examples:
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu0_0: rtu@4000 {
@@ -180,6 +194,9 @@ examples:
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru0_0: txpru@a000 {
@@ -198,6 +215,9 @@ examples:
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu0_1: rtu@6000 {
@@ -207,6 +227,9 @@ examples:
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru0_1: txpru@c000 {
diff --git a/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml b/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml
index 8bf7d3a9be98..3a4551253e3e 100644
--- a/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml
@@ -13,6 +13,7 @@ maintainers:
allOf:
- $ref: rtc.yaml#
- $ref: /schemas/nvmem/nvmem.yaml#
+ - $ref: /schemas/nvmem/nvmem-deprecated-cells.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml b/Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml
new file mode 100644
index 000000000000..a95f6af2b1c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/cirrus,ep9301-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus EP93xx Real Time Clock controller
+
+maintainers:
+ - Hartley Sweeten <hsweeten@visionengravers.com>
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-rtc
+ - items:
+ - enum:
+ - cirrus,ep9302-rtc
+ - cirrus,ep9307-rtc
+ - cirrus,ep9312-rtc
+ - cirrus,ep9315-rtc
+ - const: cirrus,ep9301-rtc
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ rtc@80920000 {
+ compatible = "cirrus,ep9301-rtc";
+ reg = <0x80920000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt b/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt
deleted file mode 100644
index 5f9df3f1467c..000000000000
--- a/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-EPSON TOYOCOM RTC-7301SF/DG
-
-Required properties:
-
-- compatible: Should be "epson,rtc7301sf" or "epson,rtc7301dg"
-- reg: Specifies base physical address and size of the registers.
-- interrupts: A single interrupt specifier.
-
-Example:
-
-rtc: rtc@44a00000 {
- compatible = "epson,rtc7301dg";
- reg = <0x44a00000 0x10000>;
- interrupt-parent = <&axi_intc_0>;
- interrupts = <3 2>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml b/Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml
new file mode 100644
index 000000000000..bdb5cadb31cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/epson,rtc7301.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Epson Toyocom RTC-7301SF/DG
+
+description:
+ The only difference between the two variants is the packaging.
+ The DG variant is a DIL package, and the SF variant is a flat
+ package.
+
+maintainers:
+ - Akinobu Mita <akinobu.mita@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - epson,rtc7301dg
+ - epson,rtc7301sf
+
+ reg:
+ maxItems: 1
+
+ reg-io-width:
+ description:
+ The size (in bytes) of the IO accesses that should be performed
+ on the device.
+ enum: [1, 4]
+ default: 4
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ rtc: rtc@44a00000 {
+ compatible = "epson,rtc7301dg";
+ reg = <0x44a00000 0x10000>;
+ reg-io-width = <4>;
+ interrupt-parent = <&axi_intc_0>;
+ interrupts = <3 2>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt b/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt
deleted file mode 100644
index a59fdd8c236d..000000000000
--- a/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-* Maxim MCP795 SPI Serial Real-Time Clock
-
-Required properties:
-- compatible: Should contain "maxim,mcp795".
-- reg: SPI address for chip
-
-Example:
- mcp795: rtc@0 {
- compatible = "maxim,mcp795";
- reg = <0>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
index 27a9de10f0af..7680089d1d92 100644
--- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
+++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
@@ -38,6 +38,8 @@ properties:
- 3000
- 4400
+ wakeup-source: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml b/Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml
new file mode 100644
index 000000000000..4c1f22ef5a2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/mstar,ssd202d-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mstar SSD202D Real Time Clock
+
+maintainers:
+ - Daniel Palmer <daniel@0x0f.com>
+ - Romain Perier <romain.perier@gmail.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ enum:
+ - mstar,ssd202d-rtc
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ rtc@6800 {
+ compatible = "mstar,ssd202d-rtc";
+ reg = <0x6800 0x200>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml
new file mode 100644
index 000000000000..96e377a4d1bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf2123.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF2123 SPI Real Time Clock
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ enum:
+ - nxp,pcf2123
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@3 {
+ compatible = "nxp,pcf2123";
+ reg = <3>;
+ interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_LOW>;
+ spi-cs-high;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt
deleted file mode 100644
index 0b1080c60f63..000000000000
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* NXP PCF8523 Real Time Clock
-
-Required properties:
-- compatible: Should contain "nxp,pcf8523".
-- reg: I2C address for chip.
-
-Optional property:
-- quartz-load-femtofarads: The capacitive load of the quartz(x-tal),
- expressed in femto Farad (fF). Valid values are 7000 and 12500.
- Default value (if no value is specified) is 12500fF.
-
-Example:
-
-pcf8523: rtc@68 {
- compatible = "nxp,pcf8523";
- reg = <0x68>;
- quartz-load-femtofarads = <7000>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml
new file mode 100644
index 000000000000..d11c8bc16bc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf8523.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF8523 Real Time Clock
+
+maintainers:
+ - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ const: nxp,pcf8523
+
+ reg:
+ maxItems: 1
+
+ quartz-load-femtofarads:
+ description:
+ The capacitive load of the crystal, expressed in femto Farad (fF).
+ enum: [ 7000, 12500 ]
+ default: 12500
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@68 {
+ compatible = "nxp,pcf8523";
+ reg = <0x68>;
+ quartz-load-femtofarads = <7000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
deleted file mode 100644
index 7371f525a687..000000000000
--- a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-NXP PCF2123 SPI Real Time Clock
-
-Required properties:
-- compatible: should be: "nxp,pcf2123"
- or "microcrystal,rv2123"
-- reg: should be the SPI slave chipselect address
-
-Optional properties:
-- spi-cs-high: PCF2123 needs chipselect high
-
-Example:
-
-pcf2123: rtc@3 {
- compatible = "nxp,pcf2123"
- reg = <3>
- spi-cs-high;
-};
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index 2a65f31ac5a0..c9e3c5262c21 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -45,6 +45,8 @@ properties:
- isil,isl1208
# Intersil ISL1218 Low Power RTC with Battery Backed SRAM
- isil,isl1218
+ # SPI-BUS INTERFACE REAL TIME CLOCK MODULE
+ - maxim,mcp795
# Real Time Clock Module with I2C-Bus
- microcrystal,rv3029
# Real Time Clock
diff --git a/Documentation/devicetree/bindings/serial/esp,esp32-acm.yaml b/Documentation/devicetree/bindings/serial/esp,esp32-acm.yaml
new file mode 100644
index 000000000000..77fbb2c72171
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/esp,esp32-acm.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/esp,esp32-acm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ESP32S3 ACM gadget controller
+
+maintainers:
+ - Max Filippov <jcmvbkbc@gmail.com>
+
+description:
+ Fixed function USB CDC-ACM gadget controller of the Espressif ESP32S3 SoC.
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ const: esp,esp32s3-acm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ serial@60038000 {
+ compatible = "esp,esp32s3-acm";
+ reg = <0x60038000 0x1000>;
+ interrupts = <96 3 0>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/esp,esp32-uart.yaml b/Documentation/devicetree/bindings/serial/esp,esp32-uart.yaml
new file mode 100644
index 000000000000..2a80ca997a0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/esp,esp32-uart.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/esp,esp32-uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ESP32xx UART controllers
+
+maintainers:
+ - Max Filippov <jcmvbkbc@gmail.com>
+
+description:
+ ESP32 UART controller is a part of the ESP32 SoC.
+ ESP32S3 UART controller is a part of the ESP32S3 SoC.
+ Both SoCs are produced by Espressif Systems Co. Ltd.
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ enum:
+ - esp,esp32-uart
+ - esp,esp32s3-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ serial@60000000 {
+ compatible = "esp,esp32s3-uart";
+ reg = <0x60000000 0x80>;
+ interrupts = <27 1 0>;
+ clocks = <&serial_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
index 40414247d61a..83035553044a 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
@@ -70,6 +70,8 @@ properties:
interrupts:
maxItems: 1
+ wakeup-source: true
+
fsl,dte-mode:
$ref: /schemas/types.yaml#/definitions/flag
description: |
diff --git a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
index 6a400a5e6fc7..da032effffe6 100644
--- a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
@@ -14,10 +14,13 @@ allOf:
properties:
compatible:
- enum:
- - fsl,imx23-auart
- - fsl,imx28-auart
- - alphascale,asm9260-auart
+ oneOf:
+ - const: fsl,imx23-auart
+ - const: alphascale,asm9260-auart
+ - items:
+ - enum:
+ - fsl,imx28-auart
+ - const: fsl,imx23-auart
reg:
maxItems: 1
@@ -82,7 +85,7 @@ examples:
};
auart0: serial@8006a000 {
- compatible = "fsl,imx28-auart";
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x8006a000 0x2000>;
interrupts = <112>;
dmas = <&dma_apbx 8>, <&dma_apbx 9>;
diff --git a/Documentation/devicetree/bindings/serial/maxim,max310x.txt b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
deleted file mode 100644
index 79e10a05a96a..000000000000
--- a/Documentation/devicetree/bindings/serial/maxim,max310x.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* Maxim MAX310X advanced Universal Asynchronous Receiver-Transmitter (UART)
-
-Required properties:
-- compatible: Should be one of the following:
- - "maxim,max3107" for Maxim MAX3107,
- - "maxim,max3108" for Maxim MAX3108,
- - "maxim,max3109" for Maxim MAX3109,
- - "maxim,max14830" for Maxim MAX14830.
-- reg: SPI chip select number.
-- interrupts: Specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends on the
- parent interrupt controller.
-- clocks: phandle to the IC source clock.
-- clock-names: Should be "xtal" if clock is an external crystal or
- "osc" if an external clock source is used.
-
-Optional properties:
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells: Should be two. The first cell is the GPIO number and
- the second cell is used to specify the GPIO polarity:
- 0 = active high,
- 1 = active low.
-
-Example:
-
-/ {
- clocks {
- spi_uart_clk: osc_max14830 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <3686400>;
- };
-
- };
-};
-
-&spi0 {
- max14830: max14830@0 {
- compatible = "maxim,max14830";
- reg = <0>;
- clocks = <&spi_uart_clk>;
- clock-names = "osc";
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/serial/maxim,max310x.yaml b/Documentation/devicetree/bindings/serial/maxim,max310x.yaml
new file mode 100644
index 000000000000..889eeaca64a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/maxim,max310x.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/maxim,max310x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX310X Advanced Universal Asynchronous Receiver-Transmitter (UART)
+
+maintainers:
+ - Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+properties:
+ compatible:
+ enum:
+ - maxim,max3107
+ - maxim,max3108
+ - maxim,max3109
+ - maxim,max14830
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ enum:
+ - xtal # External crystal
+ - osc # External clock source
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 16
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - $ref: /schemas/serial/serial.yaml#
+ - $ref: /schemas/serial/rs485.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ serial@2c {
+ compatible = "maxim,max3107";
+ reg = <0x2c>;
+ clocks = <&xtal4m>;
+ clock-names = "xtal";
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
index 04d55fecf47c..a5d67563cd53 100644
--- a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
@@ -91,11 +91,6 @@ properties:
- description: range upper bound
- description: adjustment (in permyriad, i.e. 0.01%)
-allOf:
- - $ref: serial.yaml
-
-unevaluatedProperties: false
-
required:
- compatible
- reg
@@ -106,6 +101,11 @@ required:
- dmas
- dma-names
+allOf:
+ - $ref: serial.yaml
+
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/clock/tegra30-car.h>
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
deleted file mode 100644
index 1a7e4bff0456..000000000000
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ /dev/null
@@ -1,118 +0,0 @@
-* NXP SC16IS7xx advanced Universal Asynchronous Receiver-Transmitter (UART)
-* i2c as bus
-
-Required properties:
-- compatible: Should be one of the following:
- - "nxp,sc16is740" for NXP SC16IS740,
- - "nxp,sc16is741" for NXP SC16IS741,
- - "nxp,sc16is750" for NXP SC16IS750,
- - "nxp,sc16is752" for NXP SC16IS752,
- - "nxp,sc16is760" for NXP SC16IS760,
- - "nxp,sc16is762" for NXP SC16IS762.
-- reg: I2C address of the SC16IS7xx device.
-- interrupts: Should contain the UART interrupt
-- clocks: Reference to the IC source clock.
- OR (when there is no clock provider visible to the platform)
-- clock-frequency: The source clock frequency for the IC.
-
-Optional properties:
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells: Should be two. The first cell is the GPIO number and
- the second cell is used to specify the GPIO polarity:
- 0 = active high,
- 1 = active low.
-- irda-mode-ports: An array that lists the indices of the port that
- should operate in IrDA mode.
-- nxp,modem-control-line-ports: An array that lists the indices of the port that
- should have shared GPIO lines configured as
- modem control lines.
-
-Example:
- sc16is750: sc16is750@51 {
- compatible = "nxp,sc16is750";
- reg = <0x51>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- sc16is752: sc16is752@53 {
- compatible = "nxp,sc16is752";
- reg = <0x53>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
- gpio-controller; /* Port 0 as GPIOs */
- #gpio-cells = <2>;
- };
-
- sc16is752: sc16is752@54 {
- compatible = "nxp,sc16is752";
- reg = <0x54>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
- };
-
-* spi as bus
-
-Required properties:
-- compatible: Should be one of the following:
- - "nxp,sc16is740" for NXP SC16IS740,
- - "nxp,sc16is741" for NXP SC16IS741,
- - "nxp,sc16is750" for NXP SC16IS750,
- - "nxp,sc16is752" for NXP SC16IS752,
- - "nxp,sc16is760" for NXP SC16IS760,
- - "nxp,sc16is762" for NXP SC16IS762.
-- reg: SPI chip select number.
-- interrupts: Specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends on the
- parent interrupt controller.
-- clocks: phandle to the IC source clock.
-
-Optional properties:
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells: Should be two. The first cell is the GPIO number and
- the second cell is used to specify the GPIO polarity:
- 0 = active high,
- 1 = active low.
-- irda-mode-ports: An array that lists the indices of the port that
- should operate in IrDA mode.
-- nxp,modem-control-line-ports: An array that lists the indices of the port that
- should have shared GPIO lines configured as
- modem control lines.
-
-Example:
- sc16is750: sc16is750@0 {
- compatible = "nxp,sc16is750";
- reg = <0>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- sc16is752: sc16is752@1 {
- compatible = "nxp,sc16is752";
- reg = <1>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
- gpio-controller; /* Port 0 as GPIOs */
- #gpio-cells = <2>;
- };
-
- sc16is752: sc16is752@2 {
- compatible = "nxp,sc16is752";
- reg = <2>;
- clocks = <&clk20m>;
- interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
- nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
- };
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml
new file mode 100644
index 000000000000..5dec15b7e7c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/nxp,sc16is7xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP SC16IS7xx Advanced Universal Asynchronous Receiver-Transmitter (UART)
+
+maintainers:
+ - Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,sc16is740
+ - nxp,sc16is741
+ - nxp,sc16is750
+ - nxp,sc16is752
+ - nxp,sc16is760
+ - nxp,sc16is762
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ description:
+ When there is no clock provider visible to the platform, this
+ is the source crystal or external clock frequency for the IC in Hz.
+ minimum: 1
+ maximum: 80000000
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 8
+
+ irda-mode-ports:
+ description: |
+ An array that lists the indices of the port that should operate in IrDA
+ mode:
+ 0: port A
+ 1: port B
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+ nxp,modem-control-line-ports:
+ description: |
+ An array that lists the indices of the port that should have shared GPIO
+ lines configured as modem control lines:
+ 0: port A
+ 1: port B
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - $ref: /schemas/serial/serial.yaml#
+ - $ref: /schemas/serial/rs485.yaml#
+
+oneOf:
+ - required:
+ - clocks
+ - required:
+ - clock-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ serial@51 {
+ compatible = "nxp,sc16is750";
+ reg = <0x51>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ serial@53 {
+ compatible = "nxp,sc16is752";
+ reg = <0x53>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
+ gpio-controller; /* Port 0 as GPIOs */
+ #gpio-cells = <2>;
+ };
+
+ serial@54 {
+ compatible = "nxp,sc16is752";
+ reg = <0x54>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uart.yaml b/Documentation/devicetree/bindings/serial/qcom,msm-uart.yaml
index a052aaef21f4..ea6abfe2d95e 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uart.yaml
@@ -40,11 +40,11 @@ required:
- interrupts
- reg
-unevaluatedProperties: false
-
allOf:
- $ref: /schemas/serial/serial.yaml#
+unevaluatedProperties: false
+
examples:
- |
serial@a9c00000 {
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml
index 484b9a51f6a9..ee52bf8e8917 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml
@@ -78,8 +78,6 @@ required:
- interrupts
- reg
-unevaluatedProperties: false
-
allOf:
- $ref: /schemas/serial/serial.yaml#
@@ -97,6 +95,8 @@ allOf:
reg:
maxItems: 1
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
index 3fc2601f1338..89f1eb0f2c5a 100644
--- a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
@@ -38,6 +38,13 @@ properties:
- const: sclk
- const: pclk
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
allOf:
- $ref: serial.yaml#
@@ -53,13 +60,6 @@ allOf:
clock-names:
minItems: 2
-required:
- - compatible
- - reg
- - interrupts
- - clocks
- - clock-names
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
index 1c7f1276aed6..2046e2dc0a3d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -111,8 +111,6 @@ required:
- clock-names
- power-domains
-unevaluatedProperties: false
-
if:
properties:
compatible:
@@ -125,6 +123,8 @@ then:
required:
- resets
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 99030fc18c45..4610a5bd580c 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -79,6 +79,7 @@ properties:
- enum:
- renesas,scif-r9a07g043 # RZ/G2UL and RZ/Five
- renesas,scif-r9a07g054 # RZ/V2L
+ - renesas,scif-r9a08g045 # RZ/G3S
- const: renesas,scif-r9a07g044 # RZ/G2{L,LC} fallback
reg:
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
index 499507678cdf..c98657cf4666 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
@@ -77,8 +77,6 @@ required:
- clock-names
- power-domains
-unevaluatedProperties: false
-
if:
properties:
compatible:
@@ -89,6 +87,8 @@ then:
required:
- resets
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/clock/r8a7790-cpg-mssr.h>
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
index 810d8a991fdd..fb695b3111ac 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
@@ -77,8 +77,6 @@ required:
- clock-names
- power-domains
-unevaluatedProperties: false
-
if:
properties:
compatible:
@@ -89,6 +87,8 @@ then:
required:
- resets
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/clock/r8a7740-clock.h>
diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
index 303a443d9e29..9418fd66a8e9 100644
--- a/Documentation/devicetree/bindings/serial/rs485.yaml
+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
@@ -29,6 +29,10 @@ properties:
default: 0
maximum: 100
+ rs485-rts-active-high:
+ description: drive RTS high when sending (this is the default).
+ $ref: /schemas/types.yaml#/definitions/flag
+
rs485-rts-active-low:
description: drive RTS low when sending (default is high).
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 8bd88d5cbb11..ac60ab1e35e3 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -24,9 +24,6 @@ properties:
- enum:
- apple,s5l-uart
- axis,artpec8-uart
- - samsung,s3c2410-uart
- - samsung,s3c2412-uart
- - samsung,s3c2440-uart
- samsung,s3c6400-uart
- samsung,s5pv210-uart
- samsung,exynos4210-uart
@@ -86,8 +83,6 @@ required:
- interrupts
- reg
-unevaluatedProperties: false
-
allOf:
- $ref: serial.yaml#
@@ -96,7 +91,6 @@ allOf:
compatible:
contains:
enum:
- - samsung,s3c2410-uart
- samsung,s5pv210-uart
then:
properties:
@@ -128,6 +122,8 @@ allOf:
- const: uart
- const: clk_uart_baud0
+unevaluatedProperties: false
+
examples:
- |
#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
index ea277560a596..65804ca274ae 100644
--- a/Documentation/devicetree/bindings/serial/serial.yaml
+++ b/Documentation/devicetree/bindings/serial/serial.yaml
@@ -87,16 +87,8 @@ properties:
description:
TX FIFO threshold configuration (in bytes).
-if:
- required:
- - uart-has-rtscts
-then:
- properties:
- cts-gpios: false
- rts-gpios: false
-
patternProperties:
- "^bluetooth|gnss|gps|mcu$":
+ "^(bluetooth|bluetooth-gnss|gnss|gps|mcu)$":
if:
type: object
then:
@@ -136,6 +128,14 @@ patternProperties:
required:
- compatible
+if:
+ required:
+ - uart-has-rtscts
+then:
+ properties:
+ cts-gpios: false
+ rts-gpios: false
+
additionalProperties: true
examples:
diff --git a/Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml b/Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml
new file mode 100644
index 000000000000..0222a43977ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/nuvoton/nuvoton,gfxi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Graphics Core Information block in Nuvoton SoCs
+
+maintainers:
+ - Joseph Liu <kwliu@nuvoton.com>
+ - Marvin Lin <kflin@nuvoton.com>
+
+description:
+ The Graphics Core Information (GFXI) are a block of registers in Nuvoton SoCs
+ that analyzes Graphics core behavior and provides information in registers.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - nuvoton,npcm750-gfxi
+ - nuvoton,npcm845-gfxi
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ gfxi: gfxi@e000 {
+ compatible = "nuvoton,npcm750-gfxi", "syscon";
+ reg = <0xe000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
index bceb479f74c5..422921cf1f82 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
@@ -35,6 +35,12 @@ properties:
'#size-cells':
const: 0
+ orientation-gpios:
+ description: Array of input gpios for the Type-C connector orientation indication.
+ The GPIO indication is used to detect the orientation of the Type-C connector.
+ The array should contain a gpio entry for each PMIC Glink connector, in reg order.
+ It is defined that GPIO active level means "CC2" or Reversed/Flipped orientation.
+
patternProperties:
'^connector@\d$':
$ref: /schemas/connector/usb-connector.yaml#
@@ -44,6 +50,19 @@ patternProperties:
required:
- compatible
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8450-pmic-glink
+ - qcom,sm8550-pmic-glink
+ then:
+ properties:
+ orientation-gpios: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 9a1443ec3aaa..c3190f2a168a 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -309,8 +309,6 @@ properties:
- nuvoton,w83773g
# OKI ML86V7667 video decoder
- oki,ml86v7667
- # OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI and Embedded TrueFocus
- - ovti,ov5642
# 48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
- plx,pex8648
# Pulsedlight LIDAR range-finding sensor
@@ -346,6 +344,8 @@ properties:
# Silicon Labs SI3210 Programmable CMOS SLIC/CODEC with SPI interface
- silabs,si3210
# Relative Humidity and Temperature Sensors
+ - silabs,si7005
+ # Relative Humidity and Temperature Sensors
- silabs,si7020
# Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
- skyworks,sky81452
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
index 1394557517b1..b7e664f7395b 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
@@ -15,7 +15,9 @@ properties:
oneOf:
- enum:
- chipidea,usb2
+ - fsl,imx27-usb
- lsi,zevio-usb
+ - nuvoton,npcm750-udc
- nvidia,tegra20-ehci
- nvidia,tegra20-udc
- nvidia,tegra30-ehci
@@ -66,6 +68,10 @@ properties:
- items:
- const: xlnx,zynq-usb-2.20a
- const: chipidea,usb2
+ - items:
+ - enum:
+ - nuvoton,npcm845-udc
+ - const: nuvoton,npcm750-udc
reg:
minItems: 1
@@ -388,6 +394,7 @@ allOf:
enum:
- chipidea,usb2
- lsi,zevio-usb
+ - nuvoton,npcm750-udc
- nvidia,tegra20-udc
- nvidia,tegra30-udc
- nvidia,tegra114-udc
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
index f6e7a5c1ff0b..f9410eb76a62 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
+++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
@@ -11,8 +11,12 @@ maintainers:
properties:
compatible:
- enum:
- - fcs,fsa4480
+ oneOf:
+ - const: fcs,fsa4480
+ - items:
+ - enum:
+ - ocs,ocp96011
+ - const: fcs,fsa4480
reg:
maxItems: 1
@@ -32,10 +36,43 @@ properties:
type: boolean
port:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
description:
A port node to link the FSA4480 to a TypeC controller for the purpose of
handling altmode muxing and orientation switching.
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ Specifies how the AUX+/- lines are connected to SBU1/2.
+ oneOf:
+ - items:
+ - const: 0
+ - const: 1
+ description: |
+ Default AUX/SBU layout (FSA4480)
+ - AUX+ connected to SBU2
+ - AUX- connected to SBU1
+ Default AUX/SBU layout (OCP96011)
+ - AUX+ connected to SBU1
+ - AUX- connected to SBU2
+ - items:
+ - const: 1
+ - const: 0
+ description: |
+ Swapped AUX/SBU layout (FSA4480)
+ - AUX+ connected to SBU1
+ - AUX- connected to SBU2
+ Swapped AUX/SBU layout (OCP96011)
+ - AUX+ connected to SBU2
+ - AUX- connected to SBU1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
index d0927f6768a4..ee08b9c3721f 100644
--- a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
+++ b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/usb/genesys,gl850g.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Genesys Logic GL850G USB 2.0 hub controller
+title: Genesys Logic USB hub controller
maintainers:
- Icenowy Zheng <uwu@icenowy.me>
@@ -18,6 +18,7 @@ properties:
- usb5e3,608
- usb5e3,610
- usb5e3,620
+ - usb5e3,626
reg: true
diff --git a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
index f196beb826d8..d3b2b666ec2a 100644
--- a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
+++ b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
@@ -19,6 +19,7 @@ properties:
compatible:
items:
- enum:
+ - nxp,cbdtu02043
- onnn,fsusb43l10x
- pericom,pi3usb102
- const: gpio-sbu-mux
@@ -50,7 +51,6 @@ required:
- compatible
- enable-gpios
- select-gpios
- - mode-switch
- orientation-switch
- port
diff --git a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
new file mode 100644
index 000000000000..eee548ac1abe
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/nxp,ptn36502.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PTN36502 Type-C USB 3.1 Gen 1 and DisplayPort v1.2 combo redriver
+
+maintainers:
+ - Luca Weiss <luca.weiss@fairphone.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,ptn36502
+
+ reg:
+ maxItems: 1
+
+ vdd18-supply:
+ description: Power supply for VDD18 pin
+
+ retimer-switch:
+ description: Flag the port as possible handle of SuperSpeed signals retiming
+ type: boolean
+
+ orientation-switch:
+ description: Flag the port as possible handler of orientation switching
+ type: boolean
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Input endpoint from the Super-Speed PHY
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Sideband Use (SBU) AUX lines endpoint to the Type-C connector for the purpose of
+ handling altmode muxing and orientation switching.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ typec-mux@1a {
+ compatible = "nxp,ptn36502";
+ reg = <0x1a>;
+
+ vdd18-supply = <&usb_redrive_1v8>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ phy_con_ss: endpoint {
+ remote-endpoint = <&usb_phy_ss>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ usb_con_sbu: endpoint {
+ remote-endpoint = <&typec_dp_aux>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index 67591057f234..e889158ca205 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- qcom,ipq4019-dwc3
+ - qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8064-dwc3
@@ -34,6 +35,7 @@ properties:
- qcom,sdm845-dwc3
- qcom,sdx55-dwc3
- qcom,sdx65-dwc3
+ - qcom,sdx75-dwc3
- qcom,sm4250-dwc3
- qcom,sm6115-dwc3
- qcom,sm6125-dwc3
@@ -180,6 +182,8 @@ allOf:
- qcom,sdm670-dwc3
- qcom,sdm845-dwc3
- qcom,sdx55-dwc3
+ - qcom,sdx65-dwc3
+ - qcom,sdx75-dwc3
- qcom,sm6350-dwc3
then:
properties:
@@ -238,6 +242,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
- qcom,msm8994-dwc3
- qcom,qcs404-dwc3
@@ -363,6 +368,7 @@ allOf:
- qcom,sdm845-dwc3
- qcom,sdx55-dwc3
- qcom,sdx65-dwc3
+ - qcom,sdx75-dwc3
- qcom,sm4250-dwc3
- qcom,sm6125-dwc3
- qcom,sm6350-dwc3
@@ -411,6 +417,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
- qcom,sdm660-dwc3
then:
diff --git a/Documentation/devicetree/bindings/usb/realtek,rtd-dwc3.yaml b/Documentation/devicetree/bindings/usb/realtek,rtd-dwc3.yaml
new file mode 100644
index 000000000000..345d0132d4a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/realtek,rtd-dwc3.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/realtek,rtd-dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DWC3 USB SoC Controller Glue
+
+maintainers:
+ - Stanley Chang <stanley_chang@realtek.com>
+
+description:
+ The Realtek DHC SoC embeds a DWC3 USB IP Core configured for USB 2.0
+ and USB 3.0 in host or dual-role mode.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - realtek,rtd1295-dwc3
+ - realtek,rtd1315e-dwc3
+ - realtek,rtd1319-dwc3
+ - realtek,rtd1319d-dwc3
+ - realtek,rtd1395-dwc3
+ - realtek,rtd1619-dwc3
+ - realtek,rtd1619b-dwc3
+ - const: realtek,rtd-dwc3
+
+ reg:
+ items:
+ - description: Address and length of register set for wrapper of dwc3 core.
+ - description: Address and length of register set for pm control.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ $ref: snps,dwc3.yaml#
+ description: Required child node
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ usb@98013e00 {
+ compatible = "realtek,rtd1319d-dwc3", "realtek,rtd-dwc3";
+ reg = <0x98013e00 0x140>, <0x98013f60 0x4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usb@98050000 {
+ compatible = "snps,dwc3";
+ reg = <0x98050000 0x9000>;
+ interrupts = <0 94 4>;
+ phys = <&usb2phy &usb3phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ dr_mode = "otg";
+ usb-role-switch;
+ role-switch-default-mode = "host";
+ snps,dis_u2_susphy_quirk;
+ snps,parkmode-disable-ss-quirk;
+ snps,parkmode-disable-hs-quirk;
+ maximum-speed = "high-speed";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/realtek,rtd-type-c.yaml b/Documentation/devicetree/bindings/usb/realtek,rtd-type-c.yaml
new file mode 100644
index 000000000000..6142b0b5b534
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/realtek,rtd-type-c.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/realtek,rtd-type-c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC RTD SoCs USB Type-C Connector detection
+
+maintainers:
+ - Stanley Chang <stanley_chang@realtek.com>
+
+description:
+ Realtek digital home center (DHC) RTD series SoCs include a type c module.
+ This module is able to detect the state of type c connector.
+
+properties:
+ compatible:
+ enum:
+ - realtek,rtd1295-type-c
+ - realtek,rtd1312c-type-c
+ - realtek,rtd1315e-type-c
+ - realtek,rtd1319-type-c
+ - realtek,rtd1319d-type-c
+ - realtek,rtd1395-type-c
+ - realtek,rtd1619-type-c
+ - realtek,rtd1619b-type-c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ nvmem-cell-names:
+ items:
+ - const: usb-cal
+
+ nvmem-cells:
+ maxItems: 1
+ description:
+ The phandle to nvmem cell that contains the trimming data.
+ The type c parameter trimming data specified via efuse.
+ If unspecified, default value is used.
+
+ realtek,rd-ctrl-gpios:
+ description: The gpio node to control external Rd on board.
+ maxItems: 1
+
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+ description: Properties for usb c connector.
+ type: object
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ type-c@7220 {
+ compatible = "realtek,rtd1619b-type-c";
+ reg = <0x7220 0x20>;
+ interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_cc1_pins>, <&usb_cc2_pins>;
+ nvmem-cells = <&otp_usb_cal>;
+ nvmem-cell-names = "usb-cal";
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
index c983dfe0f629..c4924113f9bd 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
@@ -20,9 +20,6 @@ description:
Type-C PHY
Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
-allOf:
- - $ref: snps,dwc3.yaml#
-
select:
properties:
compatible:
@@ -30,6 +27,7 @@ select:
enum:
- rockchip,rk3328-dwc3
- rockchip,rk3568-dwc3
+ - rockchip,rk3588-dwc3
required:
- compatible
@@ -39,6 +37,7 @@ properties:
- enum:
- rockchip,rk3328-dwc3
- rockchip,rk3568-dwc3
+ - rockchip,rk3588-dwc3
- const: snps,dwc3
reg:
@@ -58,7 +57,9 @@ properties:
Master/Core clock, must to be >= 62.5 MHz for SS
operation and >= 30MHz for HS operation
- description:
- Controller grf clock
+ Controller grf clock OR UTMI clock
+ - description:
+ PIPE clock
clock-names:
minItems: 3
@@ -66,7 +67,10 @@ properties:
- const: ref_clk
- const: suspend_clk
- const: bus_clk
- - const: grf_clk
+ - enum:
+ - grf_clk
+ - utmi
+ - const: pipe
power-domains:
maxItems: 1
@@ -86,6 +90,52 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: snps,dwc3.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3328-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 4
+ clock-names:
+ minItems: 3
+ items:
+ - const: ref_clk
+ - const: suspend_clk
+ - const: bus_clk
+ - const: grf_clk
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3568-dwc3
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ maxItems: 3
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3588-dwc3
+ then:
+ properties:
+ clock-names:
+ minItems: 3
+ items:
+ - const: ref_clk
+ - const: suspend_clk
+ - const: bus_clk
+ - const: utmi
+ - const: pipe
+
examples:
- |
#include <dt-bindings/clock/rk3328-cru.h>
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
index a696f23730d3..ee5af4b381b1 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -310,6 +310,62 @@ properties:
maximum: 62
deprecated: true
+ snps,rx-thr-num-pkt:
+ description:
+ USB RX packet threshold count. In host mode, this field specifies
+ the space that must be available in the RX FIFO before the core can
+ start the corresponding USB RX transaction (burst).
+ In device mode, this field specifies the space that must be
+ available in the RX FIFO before the core can send ERDY for a
+ flow-controlled endpoint. It is only used for SuperSpeed.
+ The valid values for this field are from 1 to 15. (DWC3 SuperSpeed
+ USB 3.0 Controller Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 15
+
+ snps,rx-max-burst:
+ description:
+ Max USB RX burst size. In host mode, this field specifies the
+ Maximum Bulk IN burst the DWC_usb3 core can perform. When the system
+ bus is slower than the USB, RX FIFO can overrun during a long burst.
+ You can program a smaller value to this field to limit the RX burst
+ size that the core can perform. It only applies to SS Bulk,
+ Isochronous, and Interrupt IN endpoints in the host mode.
+ In device mode, this field specifies the NUMP value that is sent in
+ ERDY for an OUT endpoint.
+ The valid values for this field are from 1 to 16. (DWC3 SuperSpeed
+ USB 3.0 Controller Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-thr-num-pkt:
+ description:
+ USB TX packet threshold count. This field specifies the number of
+ packets that must be in the TXFIFO before the core can start
+ transmission for the corresponding USB transaction (burst).
+ This count is valid in both host and device modes. It is only used
+ for SuperSpeed operation.
+ Valid values are from 1 to 15. (DWC3 SuperSpeed USB 3.0 Controller
+ Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 15
+
+ snps,tx-max-burst:
+ description:
+ Max USB TX burst size. When the system bus is slower than the USB,
+ TX FIFO can underrun during a long burst. Program a smaller value
+ to this field to limit the TX burst size that the core can execute.
+ In Host mode, it only applies to SS Bulk, Isochronous, and Interrupt
+ OUT endpoints. This value is not used in device mode.
+ Valid values are from 1 to 16. (DWC3 SuperSpeed USB 3.0 Controller
+ Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
snps,rx-thr-num-pkt-prd:
description:
Periodic ESS RX packet threshold count (host mode only). Set this and
diff --git a/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
index 6ab674dea4c6..323d664ae06a 100644
--- a/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
@@ -20,8 +20,23 @@ properties:
enum:
- ti,tps6598x
- apple,cd321x
+ - ti,tps25750
+
reg:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: main PD controller address
+ - description: |
+ I2C slave address field in PBMs input data
+ which is used as the device address when writing the
+ patch for TPS25750.
+ The patch address can be any value except 0x00, 0x20,
+ 0x21, 0x22, and 0x23
+
+ reg-names:
+ items:
+ - const: main
+ - const: patch-address
wakeup-source: true
@@ -35,10 +50,42 @@ properties:
connector:
$ref: /schemas/connector/usb-connector.yaml#
+ firmware-name:
+ description: |
+ Should contain the name of the default patch binary
+ file located on the firmware search path which is
+ used to switch the controller into APP mode.
+ This is used when tps25750 doesn't have an EEPROM
+ connected to it.
+ maxItems: 1
+
required:
- compatible
- reg
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,tps25750
+ then:
+ properties:
+ reg:
+ maxItems: 2
+
+ connector:
+ required:
+ - data-role
+
+ required:
+ - connector
+ - reg-names
+ else:
+ properties:
+ reg:
+ maxItems: 1
+
additionalProperties: false
examples:
@@ -71,4 +118,36 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ typec@21 {
+ compatible = "ti,tps25750";
+ reg = <0x21>, <0x0f>;
+ reg-names = "main", "patch-address";
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <100 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "irq";
+ firmware-name = "tps25750.bin";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec_pins>;
+
+ typec_con0: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ port {
+ typec_ep0: endpoint {
+ remote-endpoint = <&otg_ep>;
+ };
+ };
+ };
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/usb/vialab,vl817.yaml b/Documentation/devicetree/bindings/usb/vialab,vl817.yaml
index 76db9071b352..c815010ba9c2 100644
--- a/Documentation/devicetree/bindings/usb/vialab,vl817.yaml
+++ b/Documentation/devicetree/bindings/usb/vialab,vl817.yaml
@@ -37,7 +37,6 @@ properties:
required:
- compatible
- reg
- - reset-gpios
- vdd-supply
- peer-hub
diff --git a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
index 443e2e7ab467..69845ec32e81 100644
--- a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
@@ -15,9 +15,15 @@ allOf:
properties:
compatible:
- enum:
- - amlogic,meson-gxbb-wdt
- - amlogic,t7-wdt
+ oneOf:
+ - enum:
+ - amlogic,meson-gxbb-wdt
+ - amlogic,t7-wdt
+ - items:
+ - enum:
+ - amlogic,c3-wdt
+ - amlogic,s4-wdt
+ - const: amlogic,t7-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
index a8197632d6d2..3208adb3e52e 100644
--- a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
@@ -47,7 +47,15 @@ Optional properties for AST2500-compatible watchdogs:
is configured as push-pull, then set the pulse
polarity to active-high. The default is active-low.
-Example:
+Optional properties for AST2500- and AST2600-compatible watchdogs:
+ - aspeed,reset-mask: A bitmask indicating which peripherals will be reset if
+ the watchdog timer expires. On AST2500 this should be a
+ single word defined using the AST2500_WDT_RESET_* macros;
+ on AST2600 this should be a two-word array with the first
+ word defined using the AST2600_WDT_RESET1_* macros and the
+ second word defined using the AST2600_WDT_RESET2_* macros.
+
+Examples:
wdt1: watchdog@1e785000 {
compatible = "aspeed,ast2400-wdt";
@@ -55,3 +63,11 @@ Example:
aspeed,reset-type = "system";
aspeed,external-signal;
};
+
+ #include <dt-bindings/watchdog/aspeed-wdt.h>
+ wdt2: watchdog@1e785040 {
+ compatible = "aspeed,ast2600-wdt";
+ reg = <0x1e785040 0x40>;
+ aspeed,reset-mask = <AST2600_WDT_RESET1_DEFAULT
+ (AST2600_WDT_RESET2_DEFAULT & ~AST2600_WDT_RESET2_LPC)>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
index 4b7ed1355701..9c50766bf690 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
@@ -30,6 +30,11 @@ properties:
clocks:
maxItems: 1
+ fsl,ext-reset-output:
+ description:
+ When set, wdog can generate external reset from the wdog_any pin.
+ type: boolean
+
required:
- compatible
- interrupts
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 5046dfa55f13..c12bc852aedc 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -21,6 +21,8 @@ properties:
- qcom,apss-wdt-ipq5018
- qcom,apss-wdt-ipq5332
- qcom,apss-wdt-ipq9574
+ - qcom,apss-wdt-msm8226
+ - qcom,apss-wdt-msm8974
- qcom,apss-wdt-msm8994
- qcom,apss-wdt-qcm2290
- qcom,apss-wdt-qcs404
diff --git a/Documentation/driver-api/i3c/protocol.rst b/Documentation/driver-api/i3c/protocol.rst
index 02653defa011..23a0b93c62b1 100644
--- a/Documentation/driver-api/i3c/protocol.rst
+++ b/Documentation/driver-api/i3c/protocol.rst
@@ -71,8 +71,8 @@ During DAA, each I3C device reports 3 important things:
related capabilities
* DCR: Device Characteristic Register. This 8-bit register describes the
functionalities provided by the device
-* Provisional ID: A 48-bit unique identifier. On a given bus there should be no
- Provisional ID collision, otherwise the discovery mechanism may fail.
+* Provisioned ID: A 48-bit unique identifier. On a given bus there should be no
+ Provisioned ID collision, otherwise the discovery mechanism may fail.
I3C slave events
================
diff --git a/Documentation/driver-api/media/camera-sensor.rst b/Documentation/driver-api/media/camera-sensor.rst
index 93f4f2536c25..6456145f96ed 100644
--- a/Documentation/driver-api/media/camera-sensor.rst
+++ b/Documentation/driver-api/media/camera-sensor.rst
@@ -1,8 +1,14 @@
.. SPDX-License-Identifier: GPL-2.0
+.. _media_writing_camera_sensor_drivers:
+
Writing camera sensor drivers
=============================
+This document covers the in-kernel APIs only. For the best practices on
+userspace API implementation in camera sensor drivers, please see
+:ref:`media_using_camera_sensor_drivers`.
+
CSI-2 and parallel (BT.601 and BT.656) busses
---------------------------------------------
@@ -13,7 +19,7 @@ Handling clocks
Camera sensors have an internal clock tree including a PLL and a number of
divisors. The clock tree is generally configured by the driver based on a few
-input parameters that are specific to the hardware:: the external clock frequency
+input parameters that are specific to the hardware: the external clock frequency
and the link frequency. The two parameters generally are obtained from system
firmware. **No other frequencies should be used in any circumstances.**
@@ -32,110 +38,61 @@ can rely on this frequency being used.
Devicetree
~~~~~~~~~~
-The currently preferred way to achieve this is using ``assigned-clocks``,
-``assigned-clock-parents`` and ``assigned-clock-rates`` properties. See
-``Documentation/devicetree/bindings/clock/clock-bindings.txt`` for more
-information. The driver then gets the frequency using ``clk_get_rate()``.
+The preferred way to achieve this is using ``assigned-clocks``,
+``assigned-clock-parents`` and ``assigned-clock-rates`` properties. See the
+`clock device tree bindings
+<https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/clock/clock.yaml>`_
+for more information. The driver then gets the frequency using
+``clk_get_rate()``.
This approach has the drawback that there's no guarantee that the frequency
hasn't been modified directly or indirectly by another driver, or supported by
the board's clock tree to begin with. Changes to the Common Clock Framework API
are required to ensure reliability.
-Frame size
-----------
-
-There are two distinct ways to configure the frame size produced by camera
-sensors.
-
-Freely configurable camera sensor drivers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Freely configurable camera sensor drivers expose the device's internal
-processing pipeline as one or more sub-devices with different cropping and
-scaling configurations. The output size of the device is the result of a series
-of cropping and scaling operations from the device's pixel array's size.
-
-An example of such a driver is the CCS driver (see ``drivers/media/i2c/ccs``).
-
-Register list based drivers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Register list based drivers generally, instead of able to configure the device
-they control based on user requests, are limited to a number of preset
-configurations that combine a number of different parameters that on hardware
-level are independent. How a driver picks such configuration is based on the
-format set on a source pad at the end of the device's internal pipeline.
-
-Most sensor drivers are implemented this way, see e.g.
-``drivers/media/i2c/imx319.c`` for an example.
-
-Frame interval configuration
-----------------------------
-
-There are two different methods for obtaining possibilities for different frame
-intervals as well as configuring the frame interval. Which one to implement
-depends on the type of the device.
-
-Raw camera sensors
-~~~~~~~~~~~~~~~~~~
-
-Instead of a high level parameter such as frame interval, the frame interval is
-a result of the configuration of a number of camera sensor implementation
-specific parameters. Luckily, these parameters tend to be the same for more or
-less all modern raw camera sensors.
-
-The frame interval is calculated using the following equation::
-
- frame interval = (analogue crop width + horizontal blanking) *
- (analogue crop height + vertical blanking) / pixel rate
-
-The formula is bus independent and is applicable for raw timing parameters on
-large variety of devices beyond camera sensors. Devices that have no analogue
-crop, use the full source image size, i.e. pixel array size.
-
-Horizontal and vertical blanking are specified by ``V4L2_CID_HBLANK`` and
-``V4L2_CID_VBLANK``, respectively. The unit of the ``V4L2_CID_HBLANK`` control
-is pixels and the unit of the ``V4L2_CID_VBLANK`` is lines. The pixel rate in
-the sensor's **pixel array** is specified by ``V4L2_CID_PIXEL_RATE`` in the same
-sub-device. The unit of that control is pixels per second.
-
-Register list based drivers need to implement read-only sub-device nodes for the
-purpose. Devices that are not register list based need these to configure the
-device's internal processing pipeline.
-
-The first entity in the linear pipeline is the pixel array. The pixel array may
-be followed by other entities that are there to allow configuring binning,
-skipping, scaling or digital crop :ref:`v4l2-subdev-selections`.
-
-USB cameras etc. devices
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-USB video class hardware, as well as many cameras offering a similar higher
-level interface natively, generally use the concept of frame interval (or frame
-rate) on device level in firmware or hardware. This means lower level controls
-implemented by raw cameras may not be used on uAPI (or even kAPI) to control the
-frame interval on these devices.
-
Power management
----------------
-Always use runtime PM to manage the power states of your device. Camera sensor
-drivers are in no way special in this respect: they are responsible for
-controlling the power state of the device they otherwise control as well. In
-general, the device must be powered on at least when its registers are being
-accessed and when it is streaming.
-
-Existing camera sensor drivers may rely on the old
-struct v4l2_subdev_core_ops->s_power() callback for bridge or ISP drivers to
-manage their power state. This is however **deprecated**. If you feel you need
-to begin calling an s_power from an ISP or a bridge driver, instead please add
-runtime PM support to the sensor driver you are using. Likewise, new drivers
-should not use s_power.
-
-Please see examples in e.g. ``drivers/media/i2c/ov8856.c`` and
-``drivers/media/i2c/ccs/ccs-core.c``. The two drivers work in both ACPI
-and DT based systems.
+Camera sensors are used in conjunction with other devices to form a camera
+pipeline. They must obey the rules listed herein to ensure coherent power
+management over the pipeline.
+
+Camera sensor drivers are responsible for controlling the power state of the
+device they otherwise control as well. They shall use runtime PM to manage
+power states. Runtime PM shall be enabled at probe time and disabled at remove
+time. Drivers should enable runtime PM autosuspend.
+
+The runtime PM handlers shall handle clocks, regulators, GPIOs, and other
+system resources required to power the sensor up and down. For drivers that
+don't use any of those resources (such as drivers that support ACPI systems
+only), the runtime PM handlers may be left unimplemented.
+
+In general, the device shall be powered on at least when its registers are
+being accessed and when it is streaming. Drivers should use
+``pm_runtime_resume_and_get()`` when starting streaming and
+``pm_runtime_put()`` or ``pm_runtime_put_autosuspend()`` when stopping
+streaming. They may power the device up at probe time (for example to read
+identification registers), but should not keep it powered unconditionally after
+probe.
+
+At system suspend time, the whole camera pipeline must stop streaming, and
+restart when the system is resumed. This requires coordination between the
+camera sensor and the rest of the camera pipeline. Bridge drivers are
+responsible for this coordination, and instruct camera sensors to stop and
+restart streaming by calling the appropriate subdev operations
+(``.s_stream()``, ``.enable_streams()`` or ``.disable_streams()``). Camera
+sensor drivers shall therefore **not** keep track of the streaming state to
+stop streaming in the PM suspend handler and restart it in the resume handler.
+Drivers should in general not implement the system PM handlers.
+
+Camera sensor drivers shall **not** implement the subdev ``.s_power()``
+operation, as it is deprecated. While this operation is implemented in some
+existing drivers as they predate the deprecation, new drivers shall use runtime
+PM instead. If you feel you need to begin calling ``.s_power()`` from an ISP or
+a bridge driver, instead add runtime PM support to the sensor driver you are
+using and drop its ``.s_power()`` handler.
+
+Please also see :ref:`examples <media-camera-sensor-examples>`.
Control framework
~~~~~~~~~~~~~~~~~
@@ -155,21 +112,36 @@ access the device.
Rotation, orientation and flipping
----------------------------------
-Some systems have the camera sensor mounted upside down compared to its natural
-mounting rotation. In such cases, drivers shall expose the information to
-userspace with the :ref:`V4L2_CID_CAMERA_SENSOR_ROTATION
-<v4l2-camera-sensor-rotation>` control.
-
-Sensor drivers shall also report the sensor's mounting orientation with the
-:ref:`V4L2_CID_CAMERA_SENSOR_ORIENTATION <v4l2-camera-sensor-orientation>`.
-
Use ``v4l2_fwnode_device_parse()`` to obtain rotation and orientation
information from system firmware and ``v4l2_ctrl_new_fwnode_properties()`` to
register the appropriate controls.
-Sensor drivers that have any vertical or horizontal flips embedded in the
-register programming sequences shall initialize the V4L2_CID_HFLIP and
-V4L2_CID_VFLIP controls with the values programmed by the register sequences.
-The default values of these controls shall be 0 (disabled). Especially these
-controls shall not be inverted, independently of the sensor's mounting
-rotation.
+.. _media-camera-sensor-examples:
+
+Example drivers
+---------------
+
+Features implemented by sensor drivers vary, and depending on the set of
+supported features and other qualities, particular sensor drivers better serve
+the purpose of an example. The following drivers are known to be good examples:
+
+.. flat-table:: Example sensor drivers
+ :header-rows: 0
+ :widths: 1 1 1 2
+
+ * - Driver name
+ - File(s)
+ - Driver type
+ - Example topic
+ * - CCS
+ - ``drivers/media/i2c/ccs/``
+ - Freely configurable
+ - Power management (ACPI and DT), UAPI
+ * - imx219
+ - ``drivers/media/i2c/imx219.c``
+ - Register list based
+ - Power management (DT), UAPI, mode selection
+ * - imx319
+ - ``drivers/media/i2c/imx319.c``
+ - Register list based
+ - Power management (ACPI and DT)
diff --git a/Documentation/driver-api/media/drivers/ccs/ccs.rst b/Documentation/driver-api/media/drivers/ccs/ccs.rst
index 7389204afcb8..776eec72bc80 100644
--- a/Documentation/driver-api/media/drivers/ccs/ccs.rst
+++ b/Documentation/driver-api/media/drivers/ccs/ccs.rst
@@ -30,7 +30,7 @@ that purpose, selection target ``V4L2_SEL_TGT_COMPOSE`` is supported on the
sink pad (0).
Additionally, if a device has no scaler or digital crop functionality, the
-source pad (1) expses another digital crop selection rectangle that can only
+source pad (1) exposes another digital crop selection rectangle that can only
crop at the end of the lines and frames.
Scaler
@@ -78,6 +78,14 @@ For SMIA (non-++) compliant devices the static data file name is
vvvv or vv denotes MIPI and SMIA manufacturer IDs respectively, mmmm model ID
and rrrr or rr revision number.
+CCS tools
+~~~~~~~~~
+
+`CCS tools <https://github.com/MIPI-Alliance/ccs-tools/>`_ is a set of
+tools for working with CCS static data files. CCS tools includes a
+definition of the human-readable CCS static data YAML format and includes a
+program to convert it to a binary.
+
Register definition generator
-----------------------------
diff --git a/Documentation/driver-api/media/v4l2-core.rst b/Documentation/driver-api/media/v4l2-core.rst
index 239045ecc8f4..58cba831ade5 100644
--- a/Documentation/driver-api/media/v4l2-core.rst
+++ b/Documentation/driver-api/media/v4l2-core.rst
@@ -13,7 +13,6 @@ Video4Linux devices
v4l2-subdev
v4l2-event
v4l2-controls
- v4l2-videobuf
v4l2-videobuf2
v4l2-dv-timings
v4l2-flash-led-class
diff --git a/Documentation/driver-api/media/v4l2-dev.rst b/Documentation/driver-api/media/v4l2-dev.rst
index 99e3b5fa7444..d5cb19b21a9f 100644
--- a/Documentation/driver-api/media/v4l2-dev.rst
+++ b/Documentation/driver-api/media/v4l2-dev.rst
@@ -157,14 +157,6 @@ changing the e.g. exposure of the webcam.
Of course, you can always do all the locking yourself by leaving both lock
pointers at ``NULL``.
-If you use the old :ref:`videobuf framework <vb_framework>` then you must
-pass the :c:type:`video_device`->lock to the videobuf queue initialize
-function: if videobuf has to wait for a frame to arrive, then it will
-temporarily unlock the lock and relock it afterwards. If your driver also
-waits in the code, then you should do the same to allow other
-processes to access the device node while the first process is waiting for
-something.
-
In the case of :ref:`videobuf2 <vb2_framework>` you will need to implement the
``wait_prepare()`` and ``wait_finish()`` callbacks to unlock/lock if applicable.
If you use the ``queue->lock`` pointer, then you can use the helper functions
diff --git a/Documentation/driver-api/media/v4l2-videobuf.rst b/Documentation/driver-api/media/v4l2-videobuf.rst
deleted file mode 100644
index 4b1d84eefeb8..000000000000
--- a/Documentation/driver-api/media/v4l2-videobuf.rst
+++ /dev/null
@@ -1,403 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-.. _vb_framework:
-
-Videobuf Framework
-==================
-
-Author: Jonathan Corbet <corbet@lwn.net>
-
-Current as of 2.6.33
-
-.. note::
-
- The videobuf framework was deprecated in favor of videobuf2. Shouldn't
- be used on new drivers.
-
-Introduction
-------------
-
-The videobuf layer functions as a sort of glue layer between a V4L2 driver
-and user space. It handles the allocation and management of buffers for
-the storage of video frames. There is a set of functions which can be used
-to implement many of the standard POSIX I/O system calls, including read(),
-poll(), and, happily, mmap(). Another set of functions can be used to
-implement the bulk of the V4L2 ioctl() calls related to streaming I/O,
-including buffer allocation, queueing and dequeueing, and streaming
-control. Using videobuf imposes a few design decisions on the driver
-author, but the payback comes in the form of reduced code in the driver and
-a consistent implementation of the V4L2 user-space API.
-
-Buffer types
-------------
-
-Not all video devices use the same kind of buffers. In fact, there are (at
-least) three common variations:
-
- - Buffers which are scattered in both the physical and (kernel) virtual
- address spaces. (Almost) all user-space buffers are like this, but it
- makes great sense to allocate kernel-space buffers this way as well when
- it is possible. Unfortunately, it is not always possible; working with
- this kind of buffer normally requires hardware which can do
- scatter/gather DMA operations.
-
- - Buffers which are physically scattered, but which are virtually
- contiguous; buffers allocated with vmalloc(), in other words. These
- buffers are just as hard to use for DMA operations, but they can be
- useful in situations where DMA is not available but virtually-contiguous
- buffers are convenient.
-
- - Buffers which are physically contiguous. Allocation of this kind of
- buffer can be unreliable on fragmented systems, but simpler DMA
- controllers cannot deal with anything else.
-
-Videobuf can work with all three types of buffers, but the driver author
-must pick one at the outset and design the driver around that decision.
-
-[It's worth noting that there's a fourth kind of buffer: "overlay" buffers
-which are located within the system's video memory. The overlay
-functionality is considered to be deprecated for most use, but it still
-shows up occasionally in system-on-chip drivers where the performance
-benefits merit the use of this technique. Overlay buffers can be handled
-as a form of scattered buffer, but there are very few implementations in
-the kernel and a description of this technique is currently beyond the
-scope of this document.]
-
-Data structures, callbacks, and initialization
-----------------------------------------------
-
-Depending on which type of buffers are being used, the driver should
-include one of the following files:
-
-.. code-block:: none
-
- <media/videobuf-dma-sg.h> /* Physically scattered */
- <media/videobuf-vmalloc.h> /* vmalloc() buffers */
- <media/videobuf-dma-contig.h> /* Physically contiguous */
-
-The driver's data structure describing a V4L2 device should include a
-struct videobuf_queue instance for the management of the buffer queue,
-along with a list_head for the queue of available buffers. There will also
-need to be an interrupt-safe spinlock which is used to protect (at least)
-the queue.
-
-The next step is to write four simple callbacks to help videobuf deal with
-the management of buffers:
-
-.. code-block:: none
-
- struct videobuf_queue_ops {
- int (*buf_setup)(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size);
- int (*buf_prepare)(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field);
- void (*buf_queue)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
- void (*buf_release)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
- };
-
-buf_setup() is called early in the I/O process, when streaming is being
-initiated; its purpose is to tell videobuf about the I/O stream. The count
-parameter will be a suggested number of buffers to use; the driver should
-check it for rationality and adjust it if need be. As a practical rule, a
-minimum of two buffers are needed for proper streaming, and there is
-usually a maximum (which cannot exceed 32) which makes sense for each
-device. The size parameter should be set to the expected (maximum) size
-for each frame of data.
-
-Each buffer (in the form of a struct videobuf_buffer pointer) will be
-passed to buf_prepare(), which should set the buffer's size, width, height,
-and field fields properly. If the buffer's state field is
-VIDEOBUF_NEEDS_INIT, the driver should pass it to:
-
-.. code-block:: none
-
- int videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
-
-Among other things, this call will usually allocate memory for the buffer.
-Finally, the buf_prepare() function should set the buffer's state to
-VIDEOBUF_PREPARED.
-
-When a buffer is queued for I/O, it is passed to buf_queue(), which should
-put it onto the driver's list of available buffers and set its state to
-VIDEOBUF_QUEUED. Note that this function is called with the queue spinlock
-held; if it tries to acquire it as well things will come to a screeching
-halt. Yes, this is the voice of experience. Note also that videobuf may
-wait on the first buffer in the queue; placing other buffers in front of it
-could again gum up the works. So use list_add_tail() to enqueue buffers.
-
-Finally, buf_release() is called when a buffer is no longer intended to be
-used. The driver should ensure that there is no I/O active on the buffer,
-then pass it to the appropriate free routine(s):
-
-.. code-block:: none
-
- /* Scatter/gather drivers */
- int videobuf_dma_unmap(struct videobuf_queue *q,
- struct videobuf_dmabuf *dma);
- int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
- /* vmalloc drivers */
- void videobuf_vmalloc_free (struct videobuf_buffer *buf);
-
- /* Contiguous drivers */
- void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-One way to ensure that a buffer is no longer under I/O is to pass it to:
-
-.. code-block:: none
-
- int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
-
-Here, vb is the buffer, non_blocking indicates whether non-blocking I/O
-should be used (it should be zero in the buf_release() case), and intr
-controls whether an interruptible wait is used.
-
-File operations
----------------
-
-At this point, much of the work is done; much of the rest is slipping
-videobuf calls into the implementation of the other driver callbacks. The
-first step is in the open() function, which must initialize the
-videobuf queue. The function to use depends on the type of buffer used:
-
-.. code-block:: none
-
- void videobuf_queue_sg_init(struct videobuf_queue *q,
- struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv);
-
- void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv);
-
- void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv);
-
-In each case, the parameters are the same: q is the queue structure for the
-device, ops is the set of callbacks as described above, dev is the device
-structure for this video device, irqlock is an interrupt-safe spinlock to
-protect access to the data structures, type is the buffer type used by the
-device (cameras will use V4L2_BUF_TYPE_VIDEO_CAPTURE, for example), field
-describes which field is being captured (often V4L2_FIELD_NONE for
-progressive devices), msize is the size of any containing structure used
-around struct videobuf_buffer, and priv is a private data pointer which
-shows up in the priv_data field of struct videobuf_queue. Note that these
-are void functions which, evidently, are immune to failure.
-
-V4L2 capture drivers can be written to support either of two APIs: the
-read() system call and the rather more complicated streaming mechanism. As
-a general rule, it is necessary to support both to ensure that all
-applications have a chance of working with the device. Videobuf makes it
-easy to do that with the same code. To implement read(), the driver need
-only make a call to one of:
-
-.. code-block:: none
-
- ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count,
- loff_t *ppos, int nonblocking);
-
- ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count,
- loff_t *ppos, int vbihack, int nonblocking);
-
-Either one of these functions will read frame data into data, returning the
-amount actually read; the difference is that videobuf_read_one() will only
-read a single frame, while videobuf_read_stream() will read multiple frames
-if they are needed to satisfy the count requested by the application. A
-typical driver read() implementation will start the capture engine, call
-one of the above functions, then stop the engine before returning (though a
-smarter implementation might leave the engine running for a little while in
-anticipation of another read() call happening in the near future).
-
-The poll() function can usually be implemented with a direct call to:
-
-.. code-block:: none
-
- unsigned int videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait);
-
-Note that the actual wait queue eventually used will be the one associated
-with the first available buffer.
-
-When streaming I/O is done to kernel-space buffers, the driver must support
-the mmap() system call to enable user space to access the data. In many
-V4L2 drivers, the often-complex mmap() implementation simplifies to a
-single call to:
-
-.. code-block:: none
-
- int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma);
-
-Everything else is handled by the videobuf code.
-
-The release() function requires two separate videobuf calls:
-
-.. code-block:: none
-
- void videobuf_stop(struct videobuf_queue *q);
- int videobuf_mmap_free(struct videobuf_queue *q);
-
-The call to videobuf_stop() terminates any I/O in progress - though it is
-still up to the driver to stop the capture engine. The call to
-videobuf_mmap_free() will ensure that all buffers have been unmapped; if
-so, they will all be passed to the buf_release() callback. If buffers
-remain mapped, videobuf_mmap_free() returns an error code instead. The
-purpose is clearly to cause the closing of the file descriptor to fail if
-buffers are still mapped, but every driver in the 2.6.32 kernel cheerfully
-ignores its return value.
-
-ioctl() operations
-------------------
-
-The V4L2 API includes a very long list of driver callbacks to respond to
-the many ioctl() commands made available to user space. A number of these
-- those associated with streaming I/O - turn almost directly into videobuf
-calls. The relevant helper functions are:
-
-.. code-block:: none
-
- int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req);
- int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
- int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b);
- int videobuf_dqbuf(struct videobuf_queue *q, struct v4l2_buffer *b,
- int nonblocking);
- int videobuf_streamon(struct videobuf_queue *q);
- int videobuf_streamoff(struct videobuf_queue *q);
-
-So, for example, a VIDIOC_REQBUFS call turns into a call to the driver's
-vidioc_reqbufs() callback which, in turn, usually only needs to locate the
-proper struct videobuf_queue pointer and pass it to videobuf_reqbufs().
-These support functions can replace a great deal of buffer management
-boilerplate in a lot of V4L2 drivers.
-
-The vidioc_streamon() and vidioc_streamoff() functions will be a bit more
-complex, of course, since they will also need to deal with starting and
-stopping the capture engine.
-
-Buffer allocation
------------------
-
-Thus far, we have talked about buffers, but have not looked at how they are
-allocated. The scatter/gather case is the most complex on this front. For
-allocation, the driver can leave buffer allocation entirely up to the
-videobuf layer; in this case, buffers will be allocated as anonymous
-user-space pages and will be very scattered indeed. If the application is
-using user-space buffers, no allocation is needed; the videobuf layer will
-take care of calling get_user_pages() and filling in the scatterlist array.
-
-If the driver needs to do its own memory allocation, it should be done in
-the vidioc_reqbufs() function, *after* calling videobuf_reqbufs(). The
-first step is a call to:
-
-.. code-block:: none
-
- struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-The returned videobuf_dmabuf structure (defined in
-<media/videobuf-dma-sg.h>) includes a couple of relevant fields:
-
-.. code-block:: none
-
- struct scatterlist *sglist;
- int sglen;
-
-The driver must allocate an appropriately-sized scatterlist array and
-populate it with pointers to the pieces of the allocated buffer; sglen
-should be set to the length of the array.
-
-Drivers using the vmalloc() method need not (and cannot) concern themselves
-with buffer allocation at all; videobuf will handle those details. The
-same is normally true of contiguous-DMA drivers as well; videobuf will
-allocate the buffers (with dma_alloc_coherent()) when it sees fit. That
-means that these drivers may be trying to do high-order allocations at any
-time, an operation which is not always guaranteed to work. Some drivers
-play tricks by allocating DMA space at system boot time; videobuf does not
-currently play well with those drivers.
-
-As of 2.6.31, contiguous-DMA drivers can work with a user-supplied buffer,
-as long as that buffer is physically contiguous. Normal user-space
-allocations will not meet that criterion, but buffers obtained from other
-kernel drivers, or those contained within huge pages, will work with these
-drivers.
-
-Filling the buffers
--------------------
-
-The final part of a videobuf implementation has no direct callback - it's
-the portion of the code which actually puts frame data into the buffers,
-usually in response to interrupts from the device. For all types of
-drivers, this process works approximately as follows:
-
- - Obtain the next available buffer and make sure that somebody is actually
- waiting for it.
-
- - Get a pointer to the memory and put video data there.
-
- - Mark the buffer as done and wake up the process waiting for it.
-
-Step (1) above is done by looking at the driver-managed list_head structure
-- the one which is filled in the buf_queue() callback. Because starting
-the engine and enqueueing buffers are done in separate steps, it's possible
-for the engine to be running without any buffers available - in the
-vmalloc() case especially. So the driver should be prepared for the list
-to be empty. It is equally possible that nobody is yet interested in the
-buffer; the driver should not remove it from the list or fill it until a
-process is waiting on it. That test can be done by examining the buffer's
-done field (a wait_queue_head_t structure) with waitqueue_active().
-
-A buffer's state should be set to VIDEOBUF_ACTIVE before being mapped for
-DMA; that ensures that the videobuf layer will not try to do anything with
-it while the device is transferring data.
-
-For scatter/gather drivers, the needed memory pointers will be found in the
-scatterlist structure described above. Drivers using the vmalloc() method
-can get a memory pointer with:
-
-.. code-block:: none
-
- void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-For contiguous DMA drivers, the function to use is:
-
-.. code-block:: none
-
- dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-
-The contiguous DMA API goes out of its way to hide the kernel-space address
-of the DMA buffer from drivers.
-
-The final step is to set the size field of the relevant videobuf_buffer
-structure to the actual size of the captured image, set state to
-VIDEOBUF_DONE, then call wake_up() on the done queue. At this point, the
-buffer is owned by the videobuf layer and the driver should not touch it
-again.
-
-Developers who are interested in more information can go into the relevant
-header files; there are a few low-level functions declared there which have
-not been talked about here. Note also that all of these calls are exported
-GPL-only, so they will not be available to non-GPL kernel modules.
diff --git a/Documentation/driver-api/tty/index.rst b/Documentation/driver-api/tty/index.rst
index 2d32606a4278..b490da11f257 100644
--- a/Documentation/driver-api/tty/index.rst
+++ b/Documentation/driver-api/tty/index.rst
@@ -36,6 +36,7 @@ In-detail description of the named TTY structures is in separate documents:
tty_struct
tty_ldisc
tty_buffer
+ tty_ioctl
tty_internals
Writing TTY Driver
diff --git a/Documentation/driver-api/tty/tty_ioctl.rst b/Documentation/driver-api/tty/tty_ioctl.rst
new file mode 100644
index 000000000000..3ff1ac5e07f1
--- /dev/null
+++ b/Documentation/driver-api/tty/tty_ioctl.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+TTY IOCTL Helpers
+=================
+
+.. kernel-doc:: drivers/tty/tty_ioctl.c
+
+.. kernel-doc:: include/linux/tty.h
+ :identifiers: tty_get_baud_rate
diff --git a/Documentation/driver-api/usb/dma.rst b/Documentation/driver-api/usb/dma.rst
index d32c27e11b90..02f6825ff830 100644
--- a/Documentation/driver-api/usb/dma.rst
+++ b/Documentation/driver-api/usb/dma.rst
@@ -93,44 +93,18 @@ DMA address space of the device. However, most buffers passed to your
driver can safely be used with such DMA mapping. (See the first section
of Documentation/core-api/dma-api-howto.rst, titled "What memory is DMA-able?")
-- When you're using scatterlists, you can map everything at once. On some
- systems, this kicks in an IOMMU and turns the scatterlists into single
- DMA transactions::
+- When you have the scatterlists which have been mapped for the USB controller,
+ you could use the new ``usb_sg_*()`` calls, which would turn scatterlist
+ into URBs::
- int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe,
- struct scatterlist *sg, int nents);
+ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
+ unsigned pipe, unsigned period, struct scatterlist *sg,
+ int nents, size_t length, gfp_t mem_flags);
- void usb_buffer_dmasync_sg (struct usb_device *dev, unsigned pipe,
- struct scatterlist *sg, int n_hw_ents);
+ void usb_sg_wait(struct usb_sg_request *io);
- void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe,
- struct scatterlist *sg, int n_hw_ents);
+ void usb_sg_cancel(struct usb_sg_request *io);
- It's probably easier to use the new ``usb_sg_*()`` calls, which do the DMA
- mapping and apply other tweaks to make scatterlist i/o be fast.
-
-- Some drivers may prefer to work with the model that they're mapping large
- buffers, synchronizing their safe re-use. (If there's no re-use, then let
- usbcore do the map/unmap.) Large periodic transfers make good examples
- here, since it's cheaper to just synchronize the buffer than to unmap it
- each time an urb completes and then re-map it on during resubmission.
-
- These calls all work with initialized urbs: ``urb->dev``, ``urb->pipe``,
- ``urb->transfer_buffer``, and ``urb->transfer_buffer_length`` must all be
- valid when these calls are used (``urb->setup_packet`` must be valid too
- if urb is a control request)::
-
- struct urb *usb_buffer_map (struct urb *urb);
-
- void usb_buffer_dmasync (struct urb *urb);
-
- void usb_buffer_unmap (struct urb *urb);
-
- The calls manage ``urb->transfer_dma`` for you, and set
- ``URB_NO_TRANSFER_DMA_MAP`` so that usbcore won't map or unmap the buffer.
- They cannot be used for setup_packet buffers in control requests.
-
-Note that several of those interfaces are currently commented out, since
-they don't have current users. See the source code. Other than the dmasync
-calls (where the underlying DMA primitives have changed), most of them can
-easily be commented back in if you want to use them.
+ When the USB controller doesn't support DMA, the ``usb_sg_init()`` would try
+ to submit URBs in PIO way as long as the page in scatterlists is not in the
+ Highmem, which could be very rare in modern architectures.
diff --git a/Documentation/filesystems/nfs/exporting.rst b/Documentation/filesystems/nfs/exporting.rst
index 198d805d611c..f04ce1215a03 100644
--- a/Documentation/filesystems/nfs/exporting.rst
+++ b/Documentation/filesystems/nfs/exporting.rst
@@ -122,12 +122,9 @@ are exportable by setting the s_export_op field in the struct
super_block. This field must point to a "struct export_operations"
struct which has the following members:
- encode_fh (optional)
+ encode_fh (mandatory)
Takes a dentry and creates a filehandle fragment which may later be used
- to find or create a dentry for the same object. The default
- implementation creates a filehandle fragment that encodes a 32bit inode
- and generation number for the inode encoded, and if necessary the
- same information for the parent.
+ to find or create a dentry for the same object.
fh_to_dentry (mandatory)
Given a filehandle fragment, this should find the implied object and
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 5b93268e400f..0407f361f32a 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -344,10 +344,11 @@ escaping the colons with a single backslash. For example:
mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
-Since kernel version v6.5, directory names containing colons can also
-be provided as lower layer using the fsconfig syscall from new mount api:
+Since kernel version v6.8, directory names containing colons can also
+be configured as lower layer using the "lowerdir+" mount options and the
+fsconfig syscall from new mount api. For example:
- fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/a:lower::dir", 0);
In the latter case, colons in lower layer directory names will be escaped
as an octal characters (\072) when displayed in /proc/self/mountinfo.
@@ -416,6 +417,16 @@ Only the data of the files in the "data-only" lower layers may be visible
when a "metacopy" file in one of the lower layers above it, has a "redirect"
to the absolute path of the "lower data" file in the "data-only" lower layer.
+Since kernel version v6.8, "data-only" lower layers can also be added using
+the "datadir+" mount options and the fsconfig syscall from new mount api.
+For example:
+
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l1", 0);
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l2", 0);
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l3", 0);
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do1", 0);
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do2", 0);
+
fs-verity support
----------------------
@@ -504,6 +515,29 @@ directory tree on the same or different underlying filesystem, and even
to a different machine. With the "inodes index" feature, trying to mount
the copied layers will fail the verification of the lower root file handle.
+Nesting overlayfs mounts
+------------------------
+
+It is possible to use a lower directory that is stored on an overlayfs
+mount. For regular files this does not need any special care. However, files
+that have overlayfs attributes, such as whiteouts or "overlay.*" xattrs will be
+interpreted by the underlying overlayfs mount and stripped out. In order to
+allow the second overlayfs mount to see the attributes they must be escaped.
+
+Overlayfs specific xattrs are escaped by using a special prefix of
+"overlay.overlay.". So, a file with a "trusted.overlay.overlay.metacopy" xattr
+in the lower dir will be exposed as a regular file with a
+"trusted.overlay.metacopy" xattr in the overlayfs mount. This can be nested by
+repeating the prefix multiple time, as each instance only removes one prefix.
+
+A lower dir with a regular whiteout will always be handled by the overlayfs
+mount, so to support storing an effective whiteout file in an overlayfs mount an
+alternative form of whiteout is supported. This form is a regular, zero-size
+file with the "overlay.whiteout" xattr set, inside a directory with the
+"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
+but can be used by userspace tools (like containers) that generate lower layers.
+These alternative whiteouts can be escaped using the standard xattr escape
+mechanism in order to properly nest to any depth.
Non-standard behavior
---------------------
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index d69f59700a23..878e72b2f8b7 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1052,3 +1052,12 @@ kill_anon_super(), or kill_block_super() helpers.
Lock ordering has been changed so that s_umount ranks above open_mutex again.
All places where s_umount was taken under open_mutex have been fixed up.
+
+---
+
+**mandatory**
+
+export_operations ->encode_fh() no longer has a default implementation to
+encode FILEID_INO32_GEN* file handles.
+Filesystems that used the default implementation may use the generic helper
+generic_encode_ino32_fh() explicitly.
diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
index e76e68ccf718..10eced6c2e46 100644
--- a/Documentation/i2c/busses/i2c-i801.rst
+++ b/Documentation/i2c/busses/i2c-i801.rst
@@ -47,6 +47,7 @@ Supported adapters:
* Intel Alder Lake (PCH)
* Intel Raptor Lake (PCH)
* Intel Meteor Lake (SOC and PCH)
+ * Intel Birch Stream (SOC)
Datasheets: Publicly available at the Intel website
diff --git a/Documentation/i2c/fault-codes.rst b/Documentation/i2c/fault-codes.rst
index 80b14e718b52..b0864d1268bd 100644
--- a/Documentation/i2c/fault-codes.rst
+++ b/Documentation/i2c/fault-codes.rst
@@ -39,6 +39,10 @@ Also, codes returned by adapter probe methods follow rules which are
specific to their host bus (such as PCI, or the platform bus).
+EAFNOSUPPORT
+ Returned by I2C adapters not supporting 10 bit addresses when
+ they are requested to use such an address.
+
EAGAIN
Returned by I2C adapters when they lose arbitration in master
transmit mode: some other master was transmitting different
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index bd906407e307..9c8d1d046ea5 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -243,6 +243,12 @@ The output directory is often set using "O=..." on the commandline.
The value can be overridden in which case the default value is ignored.
+INSTALL_DTBS_PATH
+-----------------
+INSTALL_DTBS_PATH specifies where to install device tree blobs for
+relocations required by build roots. This is not defined in the
+makefile but the argument can be passed to make if needed.
+
KBUILD_ABS_SRCTREE
--------------------------------------------------
Kbuild uses a relative path to point to the tree when possible. For instance,
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 47a29a36b12b..ad118b7a1806 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -937,6 +937,10 @@ Example::
# net/bpfilter/Makefile
bpfilter_umh-userldflags += -static
+To specify libraries linked to a userspace program, you can use
+``<executable>-userldlibs``. The ``userldlibs`` syntax specifies libraries
+linked to all userspace programs created in the current Makefile.
+
When linking bpfilter_umh, it will be passed the extra option -static.
From command line, :ref:`USERCFLAGS and USERLDFLAGS <userkbuildflags>` will also be used.
@@ -1623,6 +1627,13 @@ INSTALL_MOD_STRIP
INSTALL_MOD_STRIP value will be used as the option(s) to the strip
command.
+INSTALL_DTBS_PATH
+ This variable specifies a prefix for relocations required by build
+ roots. It defines a place for installing the device tree blobs. Like
+ INSTALL_MOD_PATH, it isn't defined in the Makefile, but can be passed
+ by the user if desired. Otherwise it defaults to the kernel install
+ path.
+
Makefile language
=================
diff --git a/Documentation/misc-devices/eeprom.rst b/Documentation/misc-devices/eeprom.rst
deleted file mode 100644
index 008249675ccc..000000000000
--- a/Documentation/misc-devices/eeprom.rst
+++ /dev/null
@@ -1,107 +0,0 @@
-====================
-Kernel driver eeprom
-====================
-
-Supported chips:
-
- * Any EEPROM chip in the designated address range
-
- Prefix: 'eeprom'
-
- Addresses scanned: I2C 0x50 - 0x57
-
- Datasheets: Publicly available from:
-
- Atmel (www.atmel.com),
- Catalyst (www.catsemi.com),
- Fairchild (www.fairchildsemi.com),
- Microchip (www.microchip.com),
- Philips (www.semiconductor.philips.com),
- Rohm (www.rohm.com),
- ST (www.st.com),
- Xicor (www.xicor.com),
- and others.
-
- ========= ============= ============================================
- Chip Size (bits) Address
- ========= ============= ============================================
- 24C01 1K 0x50 (shadows at 0x51 - 0x57)
- 24C01A 1K 0x50 - 0x57 (Typical device on DIMMs)
- 24C02 2K 0x50 - 0x57
- 24C04 4K 0x50, 0x52, 0x54, 0x56
- (additional data at 0x51, 0x53, 0x55, 0x57)
- 24C08 8K 0x50, 0x54 (additional data at 0x51, 0x52,
- 0x53, 0x55, 0x56, 0x57)
- 24C16 16K 0x50 (additional data at 0x51 - 0x57)
- Sony 2K 0x57
-
- Atmel 34C02B 2K 0x50 - 0x57, SW write protect at 0x30-37
- Catalyst 34FC02 2K 0x50 - 0x57, SW write protect at 0x30-37
- Catalyst 34RC02 2K 0x50 - 0x57, SW write protect at 0x30-37
- Fairchild 34W02 2K 0x50 - 0x57, SW write protect at 0x30-37
- Microchip 24AA52 2K 0x50 - 0x57, SW write protect at 0x30-37
- ST M34C02 2K 0x50 - 0x57, SW write protect at 0x30-37
- ========= ============= ============================================
-
-
-Authors:
- - Frodo Looijaard <frodol@dds.nl>,
- - Philip Edelbrock <phil@netroedge.com>,
- - Jean Delvare <jdelvare@suse.de>,
- - Greg Kroah-Hartman <greg@kroah.com>,
- - IBM Corp.
-
-Description
------------
-
-This is a simple EEPROM module meant to enable reading the first 256 bytes
-of an EEPROM (on a SDRAM DIMM for example). However, it will access serial
-EEPROMs on any I2C adapter. The supported devices are generically called
-24Cxx, and are listed above; however the numbering for these
-industry-standard devices may vary by manufacturer.
-
-This module was a programming exercise to get used to the new project
-organization laid out by Frodo, but it should be at least completely
-effective for decoding the contents of EEPROMs on DIMMs.
-
-DIMMS will typically contain a 24C01A or 24C02, or the 34C02 variants.
-The other devices will not be found on a DIMM because they respond to more
-than one address.
-
-DDC Monitors may contain any device. Often a 24C01, which responds to all 8
-addresses, is found.
-
-Recent Sony Vaio laptops have an EEPROM at 0x57. We couldn't get the
-specification, so it is guess work and far from being complete.
-
-The Microchip 24AA52/24LCS52, ST M34C02, and others support an additional
-software write protect register at 0x30 - 0x37 (0x20 less than the memory
-location). The chip responds to "write quick" detection at this address but
-does not respond to byte reads. If this register is present, the lower 128
-bytes of the memory array are not write protected. Any byte data write to
-this address will write protect the memory array permanently, and the
-device will no longer respond at the 0x30-37 address. The eeprom driver
-does not support this register.
-
-Lacking functionality
----------------------
-
-* Full support for larger devices (24C04, 24C08, 24C16). These are not
- typically found on a PC. These devices will appear as separate devices at
- multiple addresses.
-
-* Support for really large devices (24C32, 24C64, 24C128, 24C256, 24C512).
- These devices require two-byte address fields and are not supported.
-
-* Enable Writing. Again, no technical reason why not, but making it easy
- to change the contents of the EEPROMs (on DIMMs anyway) also makes it easy
- to disable the DIMMs (potentially preventing the computer from booting)
- until the values are restored somehow.
-
-Use
----
-
-After inserting the module (and any other required SMBus/i2c modules), you
-should have some EEPROM directories in ``/sys/bus/i2c/devices/*`` of names such
-as "0-0050". Inside each of these is a series of files, the eeprom file
-contains the binary data from EEPROM.
diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst
index ecc40fbbcfb8..7de16797987a 100644
--- a/Documentation/misc-devices/index.rst
+++ b/Documentation/misc-devices/index.rst
@@ -17,7 +17,6 @@ fit into other categories.
ad525x_dpot
apds990x
bh1770glc
- eeprom
c2port
dw-xdata-pcie
ibmvmc
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index c6ba4889575a..572d83a414d0 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -71,6 +71,10 @@ definitions:
name: roce-bit
-
name: migratable-bit
+ -
+ name: ipsec-crypto-bit
+ -
+ name: ipsec-packet-bit
-
type: enum
name: sb-threshold-type
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index 2f0285a5bc80..0dd30a84ce25 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -15,7 +15,6 @@ Contents:
ethernet/index
fddi/index
hamradio/index
- qlogic/index
wifi/index
wwan/index
diff --git a/Documentation/networking/device_drivers/qlogic/index.rst b/Documentation/networking/device_drivers/qlogic/index.rst
deleted file mode 100644
index ad05b04286e4..000000000000
--- a/Documentation/networking/device_drivers/qlogic/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
-QLogic QLGE Device Drivers
-===============================================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- qlge
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/qlogic/qlge.rst b/Documentation/networking/device_drivers/qlogic/qlge.rst
deleted file mode 100644
index 0b888253d152..000000000000
--- a/Documentation/networking/device_drivers/qlogic/qlge.rst
+++ /dev/null
@@ -1,118 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=======================================
-QLogic QLGE 10Gb Ethernet device driver
-=======================================
-
-This driver use drgn and devlink for debugging.
-
-Dump kernel data structures in drgn
------------------------------------
-
-To dump kernel data structures, the following Python script can be used
-in drgn:
-
-.. code-block:: python
-
- def align(x, a):
- """the alignment a should be a power of 2
- """
- mask = a - 1
- return (x+ mask) & ~mask
-
- def struct_size(struct_type):
- struct_str = "struct {}".format(struct_type)
- return sizeof(Object(prog, struct_str, address=0x0))
-
- def netdev_priv(netdevice):
- NETDEV_ALIGN = 32
- return netdevice.value_() + align(struct_size("net_device"), NETDEV_ALIGN)
-
- name = 'xxx'
- qlge_device = None
- netdevices = prog['init_net'].dev_base_head.address_of_()
- for netdevice in list_for_each_entry("struct net_device", netdevices, "dev_list"):
- if netdevice.name.string_().decode('ascii') == name:
- print(netdevice.name)
-
- ql_adapter = Object(prog, "struct ql_adapter", address=netdev_priv(qlge_device))
-
-The struct ql_adapter will be printed in drgn as follows,
-
- >>> ql_adapter
- (struct ql_adapter){
- .ricb = (struct ricb){
- .base_cq = (u8)0,
- .flags = (u8)120,
- .mask = (__le16)26637,
- .hash_cq_id = (u8 [1024]){ 172, 142, 255, 255 },
- .ipv6_hash_key = (__le32 [10]){},
- .ipv4_hash_key = (__le32 [4]){},
- },
- .flags = (unsigned long)0,
- .wol = (u32)0,
- .nic_stats = (struct nic_stats){
- .tx_pkts = (u64)0,
- .tx_bytes = (u64)0,
- .tx_mcast_pkts = (u64)0,
- .tx_bcast_pkts = (u64)0,
- .tx_ucast_pkts = (u64)0,
- .tx_ctl_pkts = (u64)0,
- .tx_pause_pkts = (u64)0,
- ...
- },
- .active_vlans = (unsigned long [64]){
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52780853100545, 18446744073709551615,
- 18446619461681283072, 0, 42949673024, 2147483647,
- },
- .rx_ring = (struct rx_ring [17]){
- {
- .cqicb = (struct cqicb){
- .msix_vect = (u8)0,
- .reserved1 = (u8)0,
- .reserved2 = (u8)0,
- .flags = (u8)0,
- .len = (__le16)0,
- .rid = (__le16)0,
- ...
- },
- .cq_base = (void *)0x0,
- .cq_base_dma = (dma_addr_t)0,
- }
- ...
- }
- }
-
-coredump via devlink
---------------------
-
-
-And the coredump obtained via devlink in json format looks like,
-
-.. code:: shell
-
- $ devlink health dump show DEVICE reporter coredump -p -j
- {
- "Core Registers": {
- "segment": 1,
- "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
- },
- "Test Logic Regs": {
- "segment": 2,
- "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
- },
- "RMII Registers": {
- "segment": 3,
- "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
- },
- ...
- "Sem Registers": {
- "segment": 50,
- "values": [ 0,0,0,0 ]
- }
- }
-
-When the module parameter qlge_force_coredump is set to be true, the MPI
-RISC reset before coredumping. So coredumping will much longer since
-devlink tool has to wait for 5 secs for the resetting to be
-finished.
diff --git a/Documentation/networking/smc-sysctl.rst b/Documentation/networking/smc-sysctl.rst
index 6d8acdbe9be1..769149d98773 100644
--- a/Documentation/networking/smc-sysctl.rst
+++ b/Documentation/networking/smc-sysctl.rst
@@ -44,18 +44,16 @@ smcr_testlink_time - INTEGER
wmem - INTEGER
Initial size of send buffer used by SMC sockets.
- The default value inherits from net.ipv4.tcp_wmem[1].
The minimum value is 16KiB and there is no hard limit for max value, but
only allowed 512KiB for SMC-R and 1MiB for SMC-D.
- Default: 16K
+ Default: 64KiB
rmem - INTEGER
Initial size of receive buffer (RMB) used by SMC sockets.
- The default value inherits from net.ipv4.tcp_rmem[1].
The minimum value is 16KiB and there is no hard limit for max value, but
only allowed 512KiB for SMC-R and 1MiB for SMC-D.
- Default: 128K
+ Default: 64KiB
diff --git a/Documentation/process/security-bugs.rst b/Documentation/process/security-bugs.rst
index 5a6993795bd2..692a3ba56cca 100644
--- a/Documentation/process/security-bugs.rst
+++ b/Documentation/process/security-bugs.rst
@@ -66,15 +66,32 @@ lifted, in perpetuity.
Coordination with other groups
------------------------------
-The kernel security team strongly recommends that reporters of potential
-security issues NEVER contact the "linux-distros" mailing list until
-AFTER discussing it with the kernel security team. Do not Cc: both
-lists at once. You may contact the linux-distros mailing list after a
-fix has been agreed on and you fully understand the requirements that
-doing so will impose on you and the kernel community.
-
-The different lists have different goals and the linux-distros rules do
-not contribute to actually fixing any potential security problems.
+While the kernel security team solely focuses on getting bugs fixed,
+other groups focus on fixing issues in distros and coordinating
+disclosure between operating system vendors. Coordination is usually
+handled by the "linux-distros" mailing list and disclosure by the
+public "oss-security" mailing list, both of which are closely related
+and presented in the linux-distros wiki:
+<https://oss-security.openwall.org/wiki/mailing-lists/distros>
+
+Please note that the respective policies and rules are different since
+the 3 lists pursue different goals. Coordinating between the kernel
+security team and other teams is difficult since for the kernel security
+team occasional embargoes (as subject to a maximum allowed number of
+days) start from the availability of a fix, while for "linux-distros"
+they start from the initial post to the list regardless of the
+availability of a fix.
+
+As such, the kernel security team strongly recommends that as a reporter
+of a potential security issue you DO NOT contact the "linux-distros"
+mailing list UNTIL a fix is accepted by the affected code's maintainers
+and you have read the distros wiki page above and you fully understand
+the requirements that contacting "linux-distros" will impose on you and
+the kernel community. This also means that in general it doesn't make
+sense to Cc: both lists at once, except maybe for coordination if and
+while an accepted fix has not yet been merged. In other words, until a
+fix is accepted do not Cc: "linux-distros", and after it's merged do not
+Cc: the kernel security team.
CVE assignment
--------------
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 41f1e07abfdf..1704f1c686d0 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -101,6 +101,19 @@ comment:
git cherry-pick fd21073
git cherry-pick <this commit>
+ Note that for a patch series, you do not have to list as prerequisites the
+ patches present in the series itself. For example, if you have the following
+ patch series:
+
+ .. code-block:: none
+
+ patch1
+ patch2
+
+ where patch2 depends on patch1, you do not have to list patch1 as
+ prerequisite of patch2 if you have already marked patch1 for stable
+ inclusion.
+
* For patches that may have kernel version prerequisites specify them using
the following format in the sign-off area:
diff --git a/Documentation/trace/user_events.rst b/Documentation/trace/user_events.rst
index f9530d0ac5d3..d8f12442aaa6 100644
--- a/Documentation/trace/user_events.rst
+++ b/Documentation/trace/user_events.rst
@@ -14,6 +14,11 @@ Programs can view status of the events via
/sys/kernel/tracing/user_events_status and can both register and write
data out via /sys/kernel/tracing/user_events_data.
+Programs can also use /sys/kernel/tracing/dynamic_events to register and
+delete user based events via the u: prefix. The format of the command to
+dynamic_events is the same as the ioctl with the u: prefix applied. This
+requires CAP_PERFMON due to the event persisting, otherwise -EPERM is returned.
+
Typically programs will register a set of events that they wish to expose to
tools that can read trace_events (such as ftrace and perf). The registration
process tells the kernel which address and bit to reflect if any tool has
@@ -45,7 +50,7 @@ This command takes a packed struct user_reg as an argument::
/* Input: Enable size in bytes at address */
__u8 enable_size;
- /* Input: Flags for future use, set to 0 */
+ /* Input: Flags to use, if any */
__u16 flags;
/* Input: Address to update when enabled */
@@ -69,7 +74,7 @@ The struct user_reg requires all the above inputs to be set appropriately.
This must be 4 (32-bit) or 8 (64-bit). 64-bit values are only allowed to be
used on 64-bit kernels, however, 32-bit can be used on all kernels.
-+ flags: The flags to use, if any. For the initial version this must be 0.
++ flags: The flags to use, if any.
Callers should first attempt to use flags and retry without flags to ensure
support for lower versions of the kernel. If a flag is not supported -EINVAL
is returned.
@@ -80,6 +85,13 @@ The struct user_reg requires all the above inputs to be set appropriately.
+ name_args: The name and arguments to describe the event, see command format
for details.
+The following flags are currently supported.
+
++ USER_EVENT_REG_PERSIST: The event will not delete upon the last reference
+ closing. Callers may use this if an event should exist even after the
+ process closes or unregisters the event. Requires CAP_PERFMON otherwise
+ -EPERM is returned.
+
Upon successful registration the following is set.
+ write_index: The index to use for this file descriptor that represents this
@@ -141,7 +153,10 @@ event (in both user and kernel space). User programs should use a separate file
to request deletes than the one used for registration due to this.
**NOTE:** By default events will auto-delete when there are no references left
-to the event. Flags in the future may change this logic.
+to the event. If programs do not want auto-delete, they must use the
+USER_EVENT_REG_PERSIST flag when registering the event. Once that flag is used
+the event exists until DIAG_IOCSDEL is invoked. Both register and delete of an
+event that persists requires CAP_PERFMON, otherwise -EPERM is returned.
Unregistering
-------------
diff --git a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
index a88fcbc11eca..9cc97ec75d7a 100644
--- a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
+++ b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
@@ -768,18 +768,6 @@ const char *video_device_node_name(struct video_device *vdev);
此功能,而非访问 video_device::num 和 video_device::minor 域。
-视频缓冲辅助函数
----------------
-
-v4l2 核心 API 提供了一个处理视频缓冲的标准方法(称为“videobuf”)。
-这些方法使驱动可以通过统一的方式实现 read()、mmap() 和 overlay()。
-目前在设备上支持视频缓冲的方法有分散/聚集 DMA(videobuf-dma-sg)、
-线性 DMA(videobuf-dma-contig)以及大多用于 USB 设备的用 vmalloc
-分配的缓冲(videobuf-vmalloc)。
-
-请参阅 Documentation/driver-api/media/v4l2-videobuf.rst,以获得更多关于 videobuf
-层的使用信息。
-
v4l2_fh 结构体
-------------
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 394cd226bfae..29072c166d23 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -755,6 +755,8 @@ The uac2 function provides these attributes in its function directory:
req_number the number of pre-allocated request for both capture
and playback
function_name name of the interface
+ c_terminal_type code of the capture terminal type
+ p_terminal_type code of the playback terminal type
================ ====================================================
The attributes have sane default values.
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index d8cd8cd9ce25..2e3822677061 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,13 +8,13 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: October 2022
+:Date: October 2023
The goal of Landlock is to enable to restrict ambient rights (e.g. global
-filesystem access) for a set of processes. Because Landlock is a stackable
-LSM, it makes possible to create safe security sandboxes as new security layers
-in addition to the existing system-wide access-controls. This kind of sandbox
-is expected to help mitigate the security impact of bugs or
+filesystem or network access) for a set of processes. Because Landlock
+is a stackable LSM, it makes possible to create safe security sandboxes as new
+security layers in addition to the existing system-wide access-controls. This
+kind of sandbox is expected to help mitigate the security impact of bugs or
unexpected/malicious behaviors in user space applications. Landlock empowers
any process, including unprivileged ones, to securely restrict themselves.
@@ -28,20 +28,34 @@ appropriately <kernel_support>`.
Landlock rules
==============
-A Landlock rule describes an action on an object. An object is currently a
-file hierarchy, and the related filesystem actions are defined with `access
-rights`_. A set of rules is aggregated in a ruleset, which can then restrict
+A Landlock rule describes an action on an object which the process intends to
+perform. A set of rules is aggregated in a ruleset, which can then restrict
the thread enforcing it, and its future children.
+The two existing types of rules are:
+
+Filesystem rules
+ For these rules, the object is a file hierarchy,
+ and the related filesystem actions are defined with
+ `filesystem access rights`.
+
+Network rules (since ABI v4)
+ For these rules, the object is a TCP port,
+ and the related actions are defined with `network access rights`.
+
Defining and enforcing a security policy
----------------------------------------
-We first need to define the ruleset that will contain our rules. For this
-example, the ruleset will contain rules that only allow read actions, but write
-actions will be denied. The ruleset then needs to handle both of these kind of
-actions. This is required for backward and forward compatibility (i.e. the
-kernel and user space may not know each other's supported restrictions), hence
-the need to be explicit about the denied-by-default access rights.
+We first need to define the ruleset that will contain our rules.
+
+For this example, the ruleset will contain rules that only allow filesystem
+read actions and establish a specific TCP connection. Filesystem write
+actions and other TCP actions will be denied.
+
+The ruleset then needs to handle both these kinds of actions. This is
+required for backward and forward compatibility (i.e. the kernel and user
+space may not know each other's supported restrictions), hence the need
+to be explicit about the denied-by-default access rights.
.. code-block:: c
@@ -62,6 +76,9 @@ the need to be explicit about the denied-by-default access rights.
LANDLOCK_ACCESS_FS_MAKE_SYM |
LANDLOCK_ACCESS_FS_REFER |
LANDLOCK_ACCESS_FS_TRUNCATE,
+ .handled_access_net =
+ LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
};
Because we may not know on which kernel version an application will be
@@ -70,9 +87,7 @@ should try to protect users as much as possible whatever the kernel they are
using. To avoid binary enforcement (i.e. either all security features or
none), we can leverage a dedicated Landlock command to get the current version
of the Landlock ABI and adapt the handled accesses. Let's check if we should
-remove the ``LANDLOCK_ACCESS_FS_REFER`` or ``LANDLOCK_ACCESS_FS_TRUNCATE``
-access rights, which are only supported starting with the second and third
-version of the ABI.
+remove access rights which are only supported in higher versions of the ABI.
.. code-block:: c
@@ -92,6 +107,12 @@ version of the ABI.
case 2:
/* Removes LANDLOCK_ACCESS_FS_TRUNCATE for ABI < 3 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_TRUNCATE;
+ __attribute__((fallthrough));
+ case 3:
+ /* Removes network support for ABI < 4 */
+ ruleset_attr.handled_access_net &=
+ ~(LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP);
}
This enables to create an inclusive ruleset that will contain our rules.
@@ -143,10 +164,23 @@ for the ruleset creation, by filtering access rights according to the Landlock
ABI version. In this example, this is not required because all of the requested
``allowed_access`` rights are already available in ABI 1.
-We now have a ruleset with one rule allowing read access to ``/usr`` while
-denying all other handled accesses for the filesystem. The next step is to
-restrict the current thread from gaining more privileges (e.g. thanks to a SUID
-binary).
+For network access-control, we can add a set of rules that allow to use a port
+number for a specific action: HTTPS connections.
+
+.. code-block:: c
+
+ struct landlock_net_port_attr net_port = {
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = 443,
+ };
+
+ err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &net_port, 0);
+
+The next step is to restrict the current thread from gaining more privileges
+(e.g. through a SUID binary). We now have a ruleset with the first rule
+allowing read access to ``/usr`` while denying all other handled accesses for
+the filesystem, and a second rule allowing HTTPS connections.
.. code-block:: c
@@ -355,7 +389,7 @@ Access rights
-------------
.. kernel-doc:: include/uapi/linux/landlock.h
- :identifiers: fs_access
+ :identifiers: fs_access net_access
Creating a new ruleset
----------------------
@@ -374,6 +408,7 @@ Extending a ruleset
.. kernel-doc:: include/uapi/linux/landlock.h
:identifiers: landlock_rule_type landlock_path_beneath_attr
+ landlock_net_port_attr
Enforcing a ruleset
-------------------
@@ -387,9 +422,9 @@ Current limitations
Filesystem topology modification
--------------------------------
-As for file renaming and linking, a sandboxed thread cannot modify its
-filesystem topology, whether via :manpage:`mount(2)` or
-:manpage:`pivot_root(2)`. However, :manpage:`chroot(2)` calls are not denied.
+Threads sandboxed with filesystem restrictions cannot modify filesystem
+topology, whether via :manpage:`mount(2)` or :manpage:`pivot_root(2)`.
+However, :manpage:`chroot(2)` calls are not denied.
Special filesystems
-------------------
@@ -451,6 +486,14 @@ always allowed when using a kernel that only supports the first or second ABI.
Starting with the Landlock ABI version 3, it is now possible to securely control
truncation thanks to the new ``LANDLOCK_ACCESS_FS_TRUNCATE`` access right.
+Network support (ABI < 4)
+-------------------------
+
+Starting with the Landlock ABI version 4, it is now possible to restrict TCP
+bind and connect actions to only a set of allowed ports thanks to the new
+``LANDLOCK_ACCESS_NET_BIND_TCP`` and ``LANDLOCK_ACCESS_NET_CONNECT_TCP``
+access rights.
+
.. _kernel_support:
Kernel support
@@ -469,6 +512,12 @@ still enable it by adding ``lsm=landlock,[...]`` to
Documentation/admin-guide/kernel-parameters.rst thanks to the bootloader
configuration.
+To be able to explicitly allow TCP operations (e.g., adding a network rule with
+``LANDLOCK_ACCESS_NET_BIND_TCP``), the kernel must support TCP
+(``CONFIG_INET=y``). Otherwise, sys_landlock_add_rule() returns an
+``EAFNOSUPPORT`` error, which can safely be ignored because this kind of TCP
+operation is already not possible.
+
Questions and answers
=====================
diff --git a/Documentation/userspace-api/media/drivers/camera-sensor.rst b/Documentation/userspace-api/media/drivers/camera-sensor.rst
new file mode 100644
index 000000000000..919a50e8b9d9
--- /dev/null
+++ b/Documentation/userspace-api/media/drivers/camera-sensor.rst
@@ -0,0 +1,104 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _media_using_camera_sensor_drivers:
+
+Using camera sensor drivers
+===========================
+
+This section describes common practices for how the V4L2 sub-device interface is
+used to control the camera sensor drivers.
+
+You may also find :ref:`media_writing_camera_sensor_drivers` useful.
+
+Frame size
+----------
+
+There are two distinct ways to configure the frame size produced by camera
+sensors.
+
+Freely configurable camera sensor drivers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Freely configurable camera sensor drivers expose the device's internal
+processing pipeline as one or more sub-devices with different cropping and
+scaling configurations. The output size of the device is the result of a series
+of cropping and scaling operations from the device's pixel array's size.
+
+An example of such a driver is the CCS driver.
+
+Register list based drivers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Register list based drivers generally, instead of able to configure the device
+they control based on user requests, are limited to a number of preset
+configurations that combine a number of different parameters that on hardware
+level are independent. How a driver picks such configuration is based on the
+format set on a source pad at the end of the device's internal pipeline.
+
+Most sensor drivers are implemented this way.
+
+Frame interval configuration
+----------------------------
+
+There are two different methods for obtaining possibilities for different frame
+intervals as well as configuring the frame interval. Which one to implement
+depends on the type of the device.
+
+Raw camera sensors
+~~~~~~~~~~~~~~~~~~
+
+Instead of a high level parameter such as frame interval, the frame interval is
+a result of the configuration of a number of camera sensor implementation
+specific parameters. Luckily, these parameters tend to be the same for more or
+less all modern raw camera sensors.
+
+The frame interval is calculated using the following equation::
+
+ frame interval = (analogue crop width + horizontal blanking) *
+ (analogue crop height + vertical blanking) / pixel rate
+
+The formula is bus independent and is applicable for raw timing parameters on
+large variety of devices beyond camera sensors. Devices that have no analogue
+crop, use the full source image size, i.e. pixel array size.
+
+Horizontal and vertical blanking are specified by ``V4L2_CID_HBLANK`` and
+``V4L2_CID_VBLANK``, respectively. The unit of the ``V4L2_CID_HBLANK`` control
+is pixels and the unit of the ``V4L2_CID_VBLANK`` is lines. The pixel rate in
+the sensor's **pixel array** is specified by ``V4L2_CID_PIXEL_RATE`` in the same
+sub-device. The unit of that control is pixels per second.
+
+Register list based drivers need to implement read-only sub-device nodes for the
+purpose. Devices that are not register list based need these to configure the
+device's internal processing pipeline.
+
+The first entity in the linear pipeline is the pixel array. The pixel array may
+be followed by other entities that are there to allow configuring binning,
+skipping, scaling or digital crop, see :ref:`VIDIOC_SUBDEV_G_SELECTION
+<VIDIOC_SUBDEV_G_SELECTION>`.
+
+USB cameras etc. devices
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+USB video class hardware, as well as many cameras offering a similar higher
+level interface natively, generally use the concept of frame interval (or frame
+rate) on device level in firmware or hardware. This means lower level controls
+implemented by raw cameras may not be used on uAPI (or even kAPI) to control the
+frame interval on these devices.
+
+Rotation, orientation and flipping
+----------------------------------
+
+Some systems have the camera sensor mounted upside down compared to its natural
+mounting rotation. In such cases, drivers shall expose the information to
+userspace with the :ref:`V4L2_CID_CAMERA_SENSOR_ROTATION
+<v4l2-camera-sensor-rotation>` control.
+
+Sensor drivers shall also report the sensor's mounting orientation with the
+:ref:`V4L2_CID_CAMERA_SENSOR_ORIENTATION <v4l2-camera-sensor-orientation>`.
+
+Sensor drivers that have any vertical or horizontal flips embedded in the
+register programming sequences shall initialize the :ref:`V4L2_CID_HFLIP
+<v4l2-cid-hflip>` and :ref:`V4L2_CID_VFLIP <v4l2-cid-vflip>` controls with the
+values programmed by the register sequences. The default values of these
+controls shall be 0 (disabled). Especially these controls shall not be inverted,
+independently of the sensor's mounting rotation.
diff --git a/Documentation/userspace-api/media/drivers/index.rst b/Documentation/userspace-api/media/drivers/index.rst
index 6708d649afd7..1726f8ec86fa 100644
--- a/Documentation/userspace-api/media/drivers/index.rst
+++ b/Documentation/userspace-api/media/drivers/index.rst
@@ -32,11 +32,13 @@ For more details see the file COPYING in the source distribution of Linux.
:numbered:
aspeed-video
+ camera-sensor
ccs
cx2341x-uapi
dw100
imx-uapi
max2175
+ npcm-video
omap3isp-uapi
st-vgxy61
uvcvideo
diff --git a/Documentation/userspace-api/media/drivers/npcm-video.rst b/Documentation/userspace-api/media/drivers/npcm-video.rst
new file mode 100644
index 000000000000..b47771dd8b27
--- /dev/null
+++ b/Documentation/userspace-api/media/drivers/npcm-video.rst
@@ -0,0 +1,66 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: <isonum.txt>
+
+NPCM video driver
+=================
+
+This driver is used to control the Video Capture/Differentiation (VCD) engine
+and Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs. The VCD can
+capture a frame from digital video input and compare two frames in memory, and
+the ECE can compress the frame data into HEXTILE format.
+
+Driver-specific Controls
+------------------------
+
+V4L2_CID_NPCM_CAPTURE_MODE
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The VCD engine supports two modes:
+
+- COMPLETE mode:
+
+ Capture the next complete frame into memory.
+
+- DIFF mode:
+
+ Compare the incoming frame with the frame stored in memory, and updates the
+ differentiated frame in memory.
+
+Application can use ``V4L2_CID_NPCM_CAPTURE_MODE`` control to set the VCD mode
+with different control values (enum v4l2_npcm_capture_mode):
+
+- ``V4L2_NPCM_CAPTURE_MODE_COMPLETE``: will set VCD to COMPLETE mode.
+- ``V4L2_NPCM_CAPTURE_MODE_DIFF``: will set VCD to DIFF mode.
+
+V4L2_CID_NPCM_RECT_COUNT
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+If using V4L2_PIX_FMT_HEXTILE format, VCD will capture frame data and then ECE
+will compress the data into HEXTILE rectangles and store them in V4L2 video
+buffer with the layout defined in Remote Framebuffer Protocol:
+::
+
+ (RFC 6143, https://www.rfc-editor.org/rfc/rfc6143.html#section-7.6.1)
+
+ +--------------+--------------+-------------------+
+ | No. of bytes | Type [Value] | Description |
+ +--------------+--------------+-------------------+
+ | 2 | U16 | x-position |
+ | 2 | U16 | y-position |
+ | 2 | U16 | width |
+ | 2 | U16 | height |
+ | 4 | S32 | encoding-type (5) |
+ +--------------+--------------+-------------------+
+ | HEXTILE rectangle data |
+ +-------------------------------------------------+
+
+Application can get the video buffer through VIDIOC_DQBUF, and followed by
+calling ``V4L2_CID_NPCM_RECT_COUNT`` control to get the number of HEXTILE
+rectangles in this buffer.
+
+References
+----------
+include/uapi/linux/npcm-video.h
+
+**Copyright** |copy| 2022 Nuvoton Technologies
diff --git a/Documentation/userspace-api/media/gen-errors.rst b/Documentation/userspace-api/media/gen-errors.rst
index e595d0bea109..4e8defd3612b 100644
--- a/Documentation/userspace-api/media/gen-errors.rst
+++ b/Documentation/userspace-api/media/gen-errors.rst
@@ -59,9 +59,7 @@ Generic Error Codes
- - ``ENOTTY``
- - The ioctl is not supported by the driver, actually meaning that
- the required functionality is not available, or the file
- descriptor is not for a media device.
+ - The ioctl is not supported by the file descriptor.
- - ``ENOSPC``
diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst
index 04dec3e570ed..52bbee81c080 100644
--- a/Documentation/userspace-api/media/v4l/buffer.rst
+++ b/Documentation/userspace-api/media/v4l/buffer.rst
@@ -549,9 +549,9 @@ Buffer Flags
- 0x00000400
- The buffer has been prepared for I/O and can be queued by the
application. Drivers set or clear this flag when the
- :ref:`VIDIOC_QUERYBUF`,
+ :ref:`VIDIOC_QUERYBUF <VIDIOC_QUERYBUF>`,
:ref:`VIDIOC_PREPARE_BUF <VIDIOC_QBUF>`,
- :ref:`VIDIOC_QBUF` or
+ :ref:`VIDIOC_QBUF <VIDIOC_QBUF>` or
:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called.
* .. _`V4L2-BUF-FLAG-NO-CACHE-INVALIDATE`:
diff --git a/Documentation/userspace-api/media/v4l/control.rst b/Documentation/userspace-api/media/v4l/control.rst
index 4463fce694b0..57893814a1e5 100644
--- a/Documentation/userspace-api/media/v4l/control.rst
+++ b/Documentation/userspace-api/media/v4l/control.rst
@@ -143,9 +143,13 @@ Control IDs
recognise the difference between digital and analogue gain use
controls ``V4L2_CID_DIGITAL_GAIN`` and ``V4L2_CID_ANALOGUE_GAIN``.
+.. _v4l2-cid-hflip:
+
``V4L2_CID_HFLIP`` ``(boolean)``
Mirror the picture horizontally.
+.. _v4l2-cid-vflip:
+
``V4L2_CID_VFLIP`` ``(boolean)``
Mirror the picture vertically.
diff --git a/Documentation/userspace-api/media/v4l/dev-subdev.rst b/Documentation/userspace-api/media/v4l/dev-subdev.rst
index a4f1df7093e8..43988516acdd 100644
--- a/Documentation/userspace-api/media/v4l/dev-subdev.rst
+++ b/Documentation/userspace-api/media/v4l/dev-subdev.rst
@@ -579,20 +579,19 @@ is started.
There are three steps in configuring the streams:
-1) Set up links. Connect the pads between sub-devices using the :ref:`Media
-Controller API <media_controller>`
+1. Set up links. Connect the pads between sub-devices using the
+ :ref:`Media Controller API <media_controller>`
-2) Streams. Streams are declared and their routing is configured by
-setting the routing table for the sub-device using
-:ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl. Note that
-setting the routing table will reset formats and selections in the
-sub-device to default values.
+2. Streams. Streams are declared and their routing is configured by setting the
+ routing table for the sub-device using :ref:`VIDIOC_SUBDEV_S_ROUTING
+ <VIDIOC_SUBDEV_G_ROUTING>` ioctl. Note that setting the routing table will
+ reset formats and selections in the sub-device to default values.
-3) Configure formats and selections. Formats and selections of each stream
-are configured separately as documented for plain sub-devices in
-:ref:`format-propagation`. The stream ID is set to the same stream ID
-associated with either sink or source pads of routes configured using the
-:ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl.
+3. Configure formats and selections. Formats and selections of each stream are
+ configured separately as documented for plain sub-devices in
+ :ref:`format-propagation`. The stream ID is set to the same stream ID
+ associated with either sink or source pads of routes configured using the
+ :ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl.
Multiplexed streams setup example
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -618,11 +617,11 @@ modeled as V4L2 devices, exposed to userspace via /dev/videoX nodes.
To configure this pipeline, the userspace must take the following steps:
-1) Set up media links between entities: connect the sensors to the bridge,
-bridge to the receiver, and the receiver to the DMA engines. This step does
-not differ from normal non-multiplexed media controller setup.
+1. Set up media links between entities: connect the sensors to the bridge,
+ bridge to the receiver, and the receiver to the DMA engines. This step does
+ not differ from normal non-multiplexed media controller setup.
-2) Configure routing
+2. Configure routing
.. flat-table:: Bridge routing table
:header-rows: 1
@@ -656,14 +655,14 @@ not differ from normal non-multiplexed media controller setup.
- V4L2_SUBDEV_ROUTE_FL_ACTIVE
- Pixel data stream from Sensor B
-3) Configure formats and selections
+3. Configure formats and selections
-After configuring routing, the next step is configuring the formats and
-selections for the streams. This is similar to performing this step without
-streams, with just one exception: the ``stream`` field needs to be assigned
-to the value of the stream ID.
+ After configuring routing, the next step is configuring the formats and
+ selections for the streams. This is similar to performing this step without
+ streams, with just one exception: the ``stream`` field needs to be assigned
+ to the value of the stream ID.
-A common way to accomplish this is to start from the sensors and propagate the
-configurations along the stream towards the receiver,
-using :ref:`VIDIOC_SUBDEV_S_FMT <VIDIOC_SUBDEV_G_FMT>` ioctls to configure each
-stream endpoint in each sub-device.
+ A common way to accomplish this is to start from the sensors and propagate
+ the configurations along the stream towards the receiver, using
+ :ref:`VIDIOC_SUBDEV_S_FMT <VIDIOC_SUBDEV_G_FMT>` ioctls to configure each
+ stream endpoint in each sub-device.
diff --git a/Documentation/userspace-api/media/v4l/dv-timings.rst b/Documentation/userspace-api/media/v4l/dv-timings.rst
index e17f056b129f..4b19bcb4bd80 100644
--- a/Documentation/userspace-api/media/v4l/dv-timings.rst
+++ b/Documentation/userspace-api/media/v4l/dv-timings.rst
@@ -33,6 +33,27 @@ current DV timings they use the
the DV timings as seen by the video receiver applications use the
:ref:`VIDIOC_QUERY_DV_TIMINGS` ioctl.
+When the hardware detects a video source change (e.g. the video
+signal appears or disappears, or the video resolution changes), then
+it will issue a `V4L2_EVENT_SOURCE_CHANGE` event. Use the
+:ref:`ioctl VIDIOC_SUBSCRIBE_EVENT <VIDIOC_SUBSCRIBE_EVENT>` and the
+:ref:`VIDIOC_DQEVENT` to check if this event was reported.
+
+If the video signal changed, then the application has to stop
+streaming, free all buffers, and call the :ref:`VIDIOC_QUERY_DV_TIMINGS`
+to obtain the new video timings, and if they are valid, it can set
+those by calling the :ref:`ioctl VIDIOC_S_DV_TIMINGS <VIDIOC_G_DV_TIMINGS>`.
+This will also update the format, so use the :ref:`ioctl VIDIOC_G_FMT <VIDIOC_G_FMT>`
+to obtain the new format. Now the application can allocate new buffers
+and start streaming again.
+
+The :ref:`VIDIOC_QUERY_DV_TIMINGS` will just report what the
+hardware detects, it will never change the configuration. If the
+currently set timings and the actually detected timings differ, then
+typically this will mean that you will not be able to capture any
+video. The correct approach is to rely on the `V4L2_EVENT_SOURCE_CHANGE`
+event so you know when something changed.
+
Applications can make use of the :ref:`input-capabilities` and
:ref:`output-capabilities` flags to determine whether the digital
video ioctls can be used with the given input or output.
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
index 296ad2025e8d..886ba7b08d6b 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
@@ -288,6 +288,13 @@ please make a proposal on the linux-media mailing list.
- 'MT2110R'
- This format is two-planar 10-Bit raster mode and having similitude with
``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC.
+ * .. _V4L2-PIX-FMT-HEXTILE:
+
+ - ``V4L2_PIX_FMT_HEXTILE``
+ - 'HXTL'
+ - Compressed format used by Nuvoton NPCM video driver. This format is
+ defined in Remote Framebuffer Protocol (RFC 6143, chapter 7.7.4 Hextile
+ Encoding).
.. raw:: latex
\normalsize
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst b/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
index b6e79e2f8ce4..7c3810ff783c 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
@@ -60,7 +60,7 @@ Each cell is one byte.
G\ :sub:`10low`\ (bits 3--0)
- G\ :sub:`12high`
- R\ :sub:`13high`
- - R\ :sub:`13low`\ (bits 3--2)
+ - R\ :sub:`13low`\ (bits 7--4)
G\ :sub:`12low`\ (bits 3--0)
- - start + 12:
@@ -82,6 +82,6 @@ Each cell is one byte.
G\ :sub:`30low`\ (bits 3--0)
- G\ :sub:`32high`
- R\ :sub:`33high`
- - R\ :sub:`33low`\ (bits 3--2)
+ - R\ :sub:`33low`\ (bits 7--4)
G\ :sub:`32low`\ (bits 3--0)
diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
index a3a35eeed708..eb3cd20b0cf2 100644
--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
+++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
@@ -949,6 +949,78 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RGB666-2X9-BE:
+
+ - MEDIA_BUS_FMT_RGB666_2X9_BE
+ - 0x1025
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
* .. _MEDIA-BUS-FMT-BGR666-1X18:
- MEDIA_BUS_FMT_BGR666_1X18
diff --git a/MAINTAINERS b/MAINTAINERS
index c6bafe60419d..97f51d5ec1cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -294,6 +294,8 @@ F: drivers/pnp/pnpacpi/
F: include/acpi/
F: include/linux/acpi.h
F: include/linux/fwnode.h
+F: include/linux/fw_table.h
+F: lib/fw_table.c
F: tools/power/acpi/
ACPI APEI
@@ -309,7 +311,7 @@ ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
-L: acpica-devel@lists.linuxfoundation.org
+L: acpica-devel@lists.linux.dev
S: Supported
W: https://acpica.org/
W: https://github.com/acpica/acpica/
@@ -1119,7 +1121,7 @@ ANALOG DEVICES INC AD4130 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
L: linux-iio@vger.kernel.org
S: Supported
-W: http://ez.analog.com/community/linux-device-drivers
+W: https://ez.analog.com/linux-software-drivers
F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml
F: drivers/iio/adc/ad4130.c
@@ -1152,7 +1154,7 @@ ANALOG DEVICES INC AD74115 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
L: linux-iio@vger.kernel.org
S: Supported
-W: http://ez.analog.com/community/linux-device-drivers
+W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
F: drivers/iio/addac/ad74115.c
@@ -2075,12 +2077,16 @@ F: Documentation/trace/coresight/*
F: drivers/hwtracing/coresight/*
F: include/dt-bindings/arm/coresight-cti-dt.h
F: include/linux/coresight*
+F: include/uapi/linux/coresight*
F: samples/coresight/*
+F: tools/perf/Documentation/arm-coresight.txt
F: tools/perf/arch/arm/util/auxtrace.c
F: tools/perf/arch/arm/util/cs-etm.c
F: tools/perf/arch/arm/util/cs-etm.h
F: tools/perf/arch/arm/util/pmu.c
+F: tools/perf/tests/shell/*coresight*
F: tools/perf/tests/shell/coresight/*
+F: tools/perf/tests/shell/lib/*coresight*
F: tools/perf/util/cs-etm-decoder/*
F: tools/perf/util/cs-etm.*
@@ -2504,6 +2510,18 @@ F: drivers/rtc/rtc-nct3018y.c
F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
F: include/dt-bindings/clock/nuvoton,npcm845-clk.h
+ARM/NUVOTON NPCM VIDEO ENGINE DRIVER
+M: Joseph Liu <kwliu@nuvoton.com>
+M: Marvin Lin <kflin@nuvoton.com>
+L: linux-media@vger.kernel.org
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml
+F: Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml
+F: Documentation/userspace-api/media/drivers/npcm-video.rst
+F: drivers/media/platform/nuvoton/
+F: include/uapi/linux/npcm-video.h
+
ARM/NUVOTON WPCM450 ARCHITECTURE
M: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
@@ -3539,12 +3557,14 @@ R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
F: include/linux/bitfield.h
+F: include/linux/bitmap-str.h
F: include/linux/bitmap.h
F: include/linux/bits.h
F: include/linux/cpumask.h
F: include/linux/find.h
F: include/linux/nodemask.h
F: include/vdso/bits.h
+F: lib/bitmap-str.c
F: lib/bitmap.c
F: lib/cpumask.c
F: lib/cpumask_kunit.c
@@ -3775,7 +3795,7 @@ F: net/sched/act_bpf.c
F: net/sched/cls_bpf.c
F: samples/bpf/
F: scripts/bpf_doc.py
-F: scripts/pahole-flags.sh
+F: scripts/Makefile.btf
F: scripts/pahole-version.sh
F: tools/bpf/
F: tools/lib/bpf/
@@ -5238,6 +5258,7 @@ L: linux-cxl@vger.kernel.org
S: Maintained
F: drivers/cxl/
F: include/uapi/linux/cxl_mem.h
+F: tools/testing/cxl/
COMPUTE EXPRESS LINK PMU (CPMU)
M: Jonathan Cameron <jonathan.cameron@huawei.com>
@@ -6134,6 +6155,13 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-gpio-mm.c
+DIGITEQ AUTOMOTIVE MGB4 V4L2 DRIVER
+M: Martin Tuma <martin.tuma@digiteqautomotive.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/admin-guide/media/mgb4.rst
+F: drivers/media/pci/mgb4/
+
DIOLAN U2C-12 I2C DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-i2c@vger.kernel.org
@@ -6506,7 +6534,7 @@ F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/tiny/bochs.c
@@ -6753,7 +6781,7 @@ F: drivers/gpu/drm/tiny/repaper.c
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Obsolete
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -6762,7 +6790,7 @@ F: drivers/gpu/drm/tiny/cirrus.c
DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: spice-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -7105,7 +7133,7 @@ F: drivers/gpu/host1x/
F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
-DRM DRIVERS FOR RENESAS
+DRM DRIVERS FOR RENESAS R-CAR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
M: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
@@ -7116,7 +7144,16 @@ F: Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
F: Documentation/devicetree/bindings/display/renesas,du.yaml
-F: drivers/gpu/drm/renesas/
+F: drivers/gpu/drm/renesas/rcar-du/
+
+DRM DRIVERS FOR RENESAS SHMOBILE
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+M: Geert Uytterhoeven <geert+renesas@glider.be>
+L: dri-devel@lists.freedesktop.org
+L: linux-renesas-soc@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
+F: drivers/gpu/drm/renesas/shmobile/
F: include/linux/platform_data/shmob_drm.h
DRM DRIVERS FOR ROCKCHIP
@@ -7852,7 +7889,7 @@ F: drivers/net/can/usb/etas_es58x/
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@nvidia.com>
M: Nikolay Aleksandrov <razor@blackwall.org>
-L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
+L: bridge@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -8128,6 +8165,27 @@ F: include/linux/fs_types.h
F: include/uapi/linux/fs.h
F: include/uapi/linux/openat2.h
+FILESYSTEMS [EXPORTFS]
+M: Chuck Lever <chuck.lever@oracle.com>
+M: Jeff Layton <jlayton@kernel.org>
+R: Amir Goldstein <amir73il@gmail.com>
+L: linux-fsdevel@vger.kernel.org
+L: linux-nfs@vger.kernel.org
+S: Supported
+F: Documentation/filesystems/nfs/exporting.rst
+F: fs/exportfs/
+F: fs/fhandle.c
+F: include/linux/exportfs.h
+
+FILESYSTEMS [IOMAP]
+M: Christian Brauner <brauner@kernel.org>
+R: Darrick J. Wong <djwong@kernel.org>
+L: linux-xfs@vger.kernel.org
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+F: fs/iomap/
+F: include/linux/iomap.h
+
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
L: linux-hwmon@vger.kernel.org
@@ -8332,6 +8390,14 @@ F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
F: drivers/spi/spi-fsl-dspi.c
F: include/linux/spi/spi-fsl-dspi.h
+FREESCALE eDMA DRIVER
+M: Frank Li <Frank.Li@nxp.com>
+L: imx@lists.linux.dev
+L: dmaengine@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/dma/fsl,edma.yaml
+F: drivers/dma/fsl-edma*.*
+
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil@nxp.com>
M: Vladimir Oltean <vladimir.oltean@nxp.com>
@@ -8808,6 +8874,7 @@ F: include/linux/phy/
GENERIC PINCTRL I2C DEMULTIPLEXER DRIVER
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
+F: Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml
F: drivers/i2c/muxes/i2c-demux-pinctrl.c
GENERIC PM DOMAINS
@@ -9074,6 +9141,13 @@ F: drivers/staging/greybus/sdio.c
F: drivers/staging/greybus/spi.c
F: drivers/staging/greybus/spilib.c
+GREYBUS BEAGLEPLAY DRIVERS
+M: Ayush Singh <ayushdevel1325@gmail.com>
+L: greybus-dev@lists.linaro.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
+F: drivers/greybus/gb-beagleplay.c
+
GREYBUS SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
M: Alex Elder <elder@kernel.org>
@@ -11032,15 +11106,6 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/sgi/ioc3-eth.c
-IOMAP FILESYSTEM LIBRARY
-M: Darrick J. Wong <djwong@kernel.org>
-L: linux-xfs@vger.kernel.org
-L: linux-fsdevel@vger.kernel.org
-S: Supported
-T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
-F: fs/iomap/
-F: include/linux/iomap.h
-
IOMMU DMA-API LAYER
M: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
@@ -11504,7 +11569,6 @@ S: Supported
W: http://nfs.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
F: Documentation/filesystems/nfs/
-F: fs/exportfs/
F: fs/lockd/
F: fs/nfs_common/
F: fs/nfsd/
@@ -11991,12 +12055,6 @@ F: drivers/leds/
F: include/dt-bindings/leds/
F: include/linux/leds.h
-LEGACY EEPROM DRIVER
-M: Jean Delvare <jdelvare@suse.com>
-S: Maintained
-F: Documentation/misc-devices/eeprom.rst
-F: drivers/misc/eeprom/eeprom.c
-
LEGO MINDSTORMS EV3
R: David Lechner <david@lechnology.com>
S: Maintained
@@ -12923,7 +12981,7 @@ MAX31827 TEMPERATURE SWITCH DRIVER
M: Daniel Matyas <daniel.matyas@analog.com>
L: linux-hwmon@vger.kernel.org
S: Supported
-W: http://ez.analog.com/community/linux-device-drivers
+W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/hwmon/adi,max31827.yaml
F: Documentation/hwmon/max31827.rst
F: drivers/hwmon/max31827.c
@@ -13771,6 +13829,12 @@ F: drivers/infiniband/hw/mlx5/
F: include/linux/mlx5/
F: include/uapi/rdma/mlx5-abi.h
+MELLANOX MLX5 VDPA DRIVER
+M: Dragos Tatulea <dtatulea@nvidia.com>
+L: virtualization@lists.linux-foundation.org
+S: Supported
+F: drivers/vdpa/mlx5/
+
MELLANOX MLXCPLD I2C AND MUX DRIVER
M: Vadim Pasternak <vadimp@nvidia.com>
M: Michael Shych <michaelsh@nvidia.com>
@@ -14124,6 +14188,13 @@ S: Supported
F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
F: drivers/regulator/mcp16502.c
+MICROCHIP MCP3564 ADC DRIVER
+M: Marius Cristea <marius.cristea@microchip.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml
+F: drivers/iio/adc/mcp3564.c
+
MICROCHIP MCP3911 ADC DRIVER
M: Marcus Folkesson <marcus.folkesson@gmail.com>
M: Kent Gustavsson <kent@minoris.se>
@@ -14652,6 +14723,14 @@ L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/devices/docg3*
+MT9M114 ONSEMI SENSOR DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
+F: drivers/media/i2c/mt9m114.c
+
MT9P031 APTINA CAMERA SENSOR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -15894,7 +15973,7 @@ L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
-F: drivers/media/i2c/ov5647.c
+F: drivers/media/i2c/ov4689.c
OMNIVISION OV5640 SENSOR DRIVER
M: Steve Longerbeam <slongerbeam@gmail.com>
@@ -15984,8 +16063,7 @@ F: Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
F: drivers/media/i2c/ov8858.c
OMNIVISION OV9282 SENSOR DRIVER
-M: Paul J. Murphy <paul.j.murphy@intel.com>
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+M: Dave Stevenson <dave.stevenson@raspberrypi.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -16316,7 +16394,7 @@ M: Juergen Gross <jgross@suse.com>
R: Ajay Kaher <akaher@vmware.com>
R: Alexey Makhalov <amakhalov@vmware.com>
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
@@ -17659,15 +17737,6 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
-QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Manish Chopra <manishc@marvell.com>
-M: GR-Linux-NIC-Dev@marvell.com
-M: Coiby Xu <coiby.xu@gmail.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: Documentation/networking/device_drivers/qlogic/qlge.rst
-F: drivers/staging/qlge/
-
QM1D1B0004 MEDIA DRIVER
M: Akihiro Tsukada <tskd08@gmail.com>
L: linux-media@vger.kernel.org
@@ -18643,6 +18712,7 @@ F: sound/soc/rockchip/rockchip_i2s_tdm.*
ROCKCHIP ISP V1 DRIVER
M: Dafna Hirschfeld <dafna@fastmail.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
L: linux-rockchip@lists.infradead.org
S: Maintained
@@ -18693,6 +18763,12 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/light/bh1750.yaml
F: drivers/iio/light/bh1750.c
+ROHM BM1390 PRESSURE SENSOR DRIVER
+M: Matti Vaittinen <mazziesaccount@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: drivers/iio/pressure/rohm-bm1390.c
+
ROHM BU270xx LIGHT SENSOR DRIVERs
M: Matti Vaittinen <mazziesaccount@gmail.com>
L: linux-iio@vger.kernel.org
@@ -20131,19 +20207,15 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/imx319.c
SONY IMX334 SENSOR DRIVER
-M: Paul J. Murphy <paul.j.murphy@intel.com>
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
L: linux-media@vger.kernel.org
-S: Maintained
+S: Orphan
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
F: drivers/media/i2c/imx334.c
SONY IMX335 SENSOR DRIVER
-M: Paul J. Murphy <paul.j.murphy@intel.com>
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
L: linux-media@vger.kernel.org
-S: Maintained
+S: Orphan
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
F: drivers/media/i2c/imx335.c
@@ -20156,10 +20228,8 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/imx355.c
SONY IMX412 SENSOR DRIVER
-M: Paul J. Murphy <paul.j.murphy@intel.com>
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
L: linux-media@vger.kernel.org
-S: Maintained
+S: Orphan
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
F: drivers/media/i2c/imx412.c
@@ -21723,6 +21793,13 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
F: drivers/media/i2c/ds90*
F: include/media/i2c/ds90*
+TI J721E CSI2RX DRIVER
+M: Jai Luthra <j-luthra@ti.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
+F: drivers/media/platform/ti/j721e-csi2rx/
+
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
@@ -22039,6 +22116,14 @@ W: https://github.com/srcres258/linux-doc
T: git git://github.com/srcres258/linux-doc.git doc-zh-tw
F: Documentation/translations/zh_TW/
+TRUSTED SECURITY MODULE (TSM) ATTESTATION REPORTS
+M: Dan Williams <dan.j.williams@intel.com>
+L: linux-coco@lists.linux.dev
+S: Maintained
+F: Documentation/ABI/testing/configfs-tsm
+F: drivers/virt/coco/tsm.c
+F: include/linux/tsm.h
+
TTY LAYER AND SERIAL DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Jiri Slaby <jirislaby@kernel.org>
@@ -22887,7 +22972,7 @@ VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Stefano Garzarella <sgarzare@redhat.com>
L: kvm@vger.kernel.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
F: drivers/vhost/vsock.c
@@ -22899,7 +22984,7 @@ F: net/vmw_vsock/virtio_transport_common.c
VIRTIO BALLOON
M: "Michael S. Tsirkin" <mst@redhat.com>
M: David Hildenbrand <david@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/virtio/virtio_balloon.c
F: include/linux/balloon_compaction.h
@@ -22911,7 +22996,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/block/virtio_blk.c
F: drivers/scsi/virtio_scsi.c
@@ -22920,7 +23005,7 @@ F: include/uapi/linux/virtio_scsi.h
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit@kernel.org>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/char/virtio_console.c
F: include/linux/virtio_console.h
@@ -22930,7 +23015,7 @@ VIRTIO CORE AND NET DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-vdpa
F: Documentation/ABI/testing/sysfs-class-vduse
@@ -22949,7 +23034,7 @@ F: tools/virtio/
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/virtio/
@@ -22960,7 +23045,7 @@ M: Cornelia Huck <cohuck@redhat.com>
M: Halil Pasic <pasic@linux.ibm.com>
M: Eric Farman <farman@linux.ibm.com>
L: linux-s390@vger.kernel.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: kvm@vger.kernel.org
S: Supported
F: arch/s390/include/uapi/asm/virtio-ccw.h
@@ -22970,7 +23055,7 @@ VIRTIO FILE SYSTEM
M: Vivek Goyal <vgoyal@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Miklos Szeredi <miklos@szeredi.hu>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: linux-fsdevel@vger.kernel.org
S: Supported
W: https://virtio-fs.gitlab.io/
@@ -22982,7 +23067,7 @@ VIRTIO GPIO DRIVER
M: Enrico Weigelt, metux IT consult <info@metux.net>
M: Viresh Kumar <vireshk@kernel.org>
L: linux-gpio@vger.kernel.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/gpio/gpio-virtio.c
F: include/uapi/linux/virtio_gpio.h
@@ -22993,7 +23078,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
R: Gurchetan Singh <gurchetansingh@chromium.org>
R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ci/xfails/virtio*
@@ -23004,7 +23089,7 @@ VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: kvm@vger.kernel.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git
@@ -23020,7 +23105,7 @@ M: Jason Wang <jasowang@redhat.com>
M: Mike Christie <michael.christie@oracle.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/vhost/scsi.c
@@ -23028,7 +23113,7 @@ VIRTIO I2C DRIVER
M: Conghui Chen <conghui.chen@intel.com>
M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-i2c@vger.kernel.org
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/i2c/busses/i2c-virtio.c
F: include/uapi/linux/virtio_i2c.h
@@ -23041,14 +23126,14 @@ F: include/uapi/linux/virtio_input.h
VIRTIO IOMMU DRIVER
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/iommu/virtio-iommu.c
F: include/uapi/linux/virtio_iommu.h
VIRTIO MEM DRIVER
M: David Hildenbrand <david@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
W: https://virtio-mem.gitlab.io/
F: drivers/virtio/virtio_mem.c
@@ -23056,7 +23141,7 @@ F: include/uapi/linux/virtio_mem.h
VIRTIO PMEM DRIVER
M: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
S: Maintained
F: drivers/nvdimm/nd_virtio.c
F: drivers/nvdimm/virtio_pmem.c
@@ -23064,7 +23149,7 @@ F: drivers/nvdimm/virtio_pmem.c
VIRTIO SOUND DRIVER
M: Anton Yakovlev <anton.yakovlev@opensynergy.com>
M: "Michael S. Tsirkin" <mst@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: include/uapi/linux/virtio_snd.h
@@ -23113,16 +23198,9 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/test-drivers/vivid/*
-VLYNQ BUS
-M: Florian Fainelli <f.fainelli@gmail.com>
-L: openwrt-devel@lists.openwrt.org (subscribers-only)
-S: Maintained
-F: drivers/vlynq/vlynq.c
-F: include/linux/vlynq.h
-
VM SOCKETS (AF_VSOCK)
M: Stefano Garzarella <sgarzare@redhat.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/vsockmon.c
@@ -23166,7 +23244,7 @@ VMWARE HYPERVISOR INTERFACE
M: Ajay Kaher <akaher@vmware.com>
M: Alexey Makhalov <amakhalov@vmware.com>
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
-L: virtualization@lists.linux-foundation.org
+L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vmware
diff --git a/Makefile b/Makefile
index a1e931ff7eb0..a82587fbdad3 100644
--- a/Makefile
+++ b/Makefile
@@ -277,10 +277,6 @@ no-dot-config-targets := $(clean-targets) \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
outputmakefile rustavailable rustfmt rustfmtcheck
-# Installation targets should not require compiler. Unfortunately, vdso_install
-# is an exception where build artifacts may be updated. This must be fixed.
-no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
- headers_install modules_install modules_sign kernelrelease image_name
no-sync-config-targets := $(no-dot-config-targets) %install modules_sign kernelrelease \
image_name
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
@@ -288,7 +284,6 @@ single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %
config-build :=
mixed-build :=
need-config := 1
-need-compiler := 1
may-sync-config := 1
single-build :=
@@ -298,18 +293,14 @@ ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
endif
endif
-ifneq ($(filter $(no-compiler-targets), $(MAKECMDGOALS)),)
- ifeq ($(filter-out $(no-compiler-targets), $(MAKECMDGOALS)),)
- need-compiler :=
- endif
-endif
-
ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
may-sync-config :=
endif
endif
+need-compiler := $(may-sync-config)
+
ifneq ($(KBUILD_EXTMOD),)
may-sync-config :=
endif
@@ -522,8 +513,6 @@ LZ4 = lz4c
XZ = xz
ZSTD = zstd
-PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
-
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
NOSTDINC_FLAGS :=
@@ -614,7 +603,6 @@ export KBUILD_RUSTFLAGS RUSTFLAGS_KERNEL RUSTFLAGS_MODULE
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_RUSTFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
-export PAHOLE_FLAGS
# Files to ignore in find ... statements
@@ -684,7 +672,7 @@ ifdef config-build
# *config targets only - make sure prerequisites are updated, and descend
# in scripts/kconfig to make the *config target
-# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed.
+# Read arch-specific Makefile to set KBUILD_DEFCONFIG as needed.
# KBUILD_DEFCONFIG may point out an alternative default configuration
# used for 'make defconfig'
include $(srctree)/arch/$(SRCARCH)/Makefile
@@ -698,7 +686,7 @@ config: outputmakefile scripts_basic FORCE
else #!config-build
# ===========================================================================
-# Build targets only - this includes vmlinux, arch specific targets, clean
+# Build targets only - this includes vmlinux, arch-specific targets, clean
# targets and others. In general all targets except *config targets.
# If building an external module we do not care about the all: rule
@@ -1011,6 +999,7 @@ KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# include additional Makefiles when needed
include-y := scripts/Makefile.extrawarn
include-$(CONFIG_DEBUG_INFO) += scripts/Makefile.debug
+include-$(CONFIG_DEBUG_INFO_BTF)+= scripts/Makefile.btf
include-$(CONFIG_KASAN) += scripts/Makefile.kasan
include-$(CONFIG_KCSAN) += scripts/Makefile.kcsan
include-$(CONFIG_KMSAN) += scripts/Makefile.kmsan
@@ -1318,6 +1307,14 @@ quiet_cmd_install = INSTALL $(INSTALL_PATH)
cmd_install = unset sub_make_done; $(srctree)/scripts/install.sh
# ---------------------------------------------------------------------------
+# vDSO install
+
+PHONY += vdso_install
+vdso_install: export INSTALL_FILES = $(vdso-install-y)
+vdso_install:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vdsoinst
+
+# ---------------------------------------------------------------------------
# Tools
ifdef CONFIG_OBJTOOL
@@ -1486,7 +1483,7 @@ MRPROPER_FILES += include/config include/generated \
certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
- kernel.spec rpmbuild \
+ rpmbuild \
rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
@@ -1560,6 +1557,7 @@ help:
@echo '* vmlinux - Build the bare kernel'
@echo '* modules - Build all modules'
@echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)'
+ @echo ' vdso_install - Install unstripped vdso to INSTALL_MOD_PATH (default: /)'
@echo ' dir/ - Build all files in dir and below'
@echo ' dir/file.[ois] - Build specified target only'
@echo ' dir/file.ll - Build the LLVM assembly file'
@@ -1637,9 +1635,9 @@ help:
@echo 'Documentation targets:'
@$(MAKE) -f $(srctree)/Documentation/Makefile dochelp
@echo ''
- @echo 'Architecture specific targets ($(SRCARCH)):'
+ @echo 'Architecture-specific targets ($(SRCARCH)):'
@$(or $(archhelp),\
- echo ' No architecture specific help defined for $(SRCARCH)')
+ echo ' No architecture-specific help defined for $(SRCARCH)')
@echo ''
@$(if $(boards), \
$(foreach b, $(boards), \
@@ -1681,7 +1679,7 @@ help-boards: $(help-board-dirs)
boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)))
$(help-board-dirs): help-%:
- @echo 'Architecture specific targets ($(SRCARCH) $*):'
+ @echo 'Architecture-specific targets ($(SRCARCH) $*):'
@$(if $(boards-per-dir), \
$(foreach b, $(boards-per-dir), \
printf " %-24s - Build for %s\\n" $*/$(b) $(subst _defconfig,,$(b));) \
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 5816a31c1b38..2c89c1c55712 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/interrupt.h>
+#include <linux/screen_info.h>
#include <linux/io.h>
/* Prototypes of functions used across modules here in this directory. */
@@ -113,6 +114,7 @@ extern int boot_cpuid;
#ifdef CONFIG_VERBOSE_MCHECK
extern unsigned long alpha_verbose_mcheck;
#endif
+extern struct screen_info vgacon_screen_info;
/* srmcons.c */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index c80258ec332f..0738f9396f95 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -131,13 +131,14 @@ static void determine_cpu_caches (unsigned int);
static char __initdata command_line[COMMAND_LINE_SIZE];
+#ifdef CONFIG_VGA_CONSOLE
/*
* The format of "screen_info" is strange, and due to early
* i386-setup code. This is just enough to make the console
* code think we're on a VGA color display.
*/
-struct screen_info screen_info = {
+struct screen_info vgacon_screen_info = {
.orig_x = 0,
.orig_y = 25,
.orig_video_cols = 80,
@@ -145,8 +146,7 @@ struct screen_info screen_info = {
.orig_video_isVGA = 1,
.orig_video_points = 16
};
-
-EXPORT_SYMBOL(screen_info);
+#endif
/*
* The direct map I/O window, if any. This should be the same
@@ -652,7 +652,7 @@ setup_arch(char **cmdline_p)
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
+ vgacon_register_screen(&vgacon_screen_info);
#endif
#endif
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 7c420d8dac53..086488ed83a7 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -57,11 +57,13 @@ sio_init_irq(void)
static inline void __init
alphabook1_init_arch(void)
{
+#ifdef CONFIG_VGA_CONSOLE
/* The AlphaBook1 has LCD video fixed at 800x600,
37 rows and 100 cols. */
- screen_info.orig_y = 37;
- screen_info.orig_video_cols = 100;
- screen_info.orig_video_lines = 37;
+ vgacon_screen_info.orig_y = 37;
+ vgacon_screen_info.orig_video_cols = 100;
+ vgacon_screen_info.orig_video_lines = 37;
+#endif
lca_init_arch();
}
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 547e5856eaa0..5ba42f69f8ce 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -304,11 +304,7 @@ $(INSTALL_TARGETS): KBUILD_IMAGE = $(boot)/$(patsubst %install,%Image,$@)
$(INSTALL_TARGETS):
$(call cmd,install)
-PHONY += vdso_install
-vdso_install:
-ifeq ($(CONFIG_VDSO),y)
- $(Q)$(MAKE) $(build)=arch/arm/vdso $@
-endif
+vdso-install-$(CONFIG_VDSO) += arch/arm/vdso/vdso.so.dbg
# My testing targets (bypasses dependencies)
bp:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/bootpImage
@@ -331,7 +327,6 @@ define archhelp
echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
- echo ' vdso_install - Install unstripped vdso.so to $$(INSTALL_MOD_PATH)/vdso'
echo
echo ' multi_v7_lpae_defconfig - multi_v7_defconfig with CONFIG_ARM_LPAE enabled'
endef
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
index 98cc5ea637e1..97a377b5a0ec 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
@@ -242,6 +242,17 @@
status = "disabled";
};
+ blsp1_uart2: serial@f991e000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf991e000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core",
+ "iface";
+ status = "disabled";
+ };
+
blsp1_uart3: serial@f991f000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf991f000 0x1000>;
@@ -325,6 +336,21 @@
#size-cells = <0>;
};
+ blsp1_i2c6: i2c@f9928000 {
+ compatible = "qcom,i2c-qup-v2.1.1";
+ reg = <0xf9928000 0x1000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core",
+ "iface";
+ pinctrl-0 = <&blsp1_i2c6_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
cci: cci@fda0c000 {
compatible = "qcom,msm8226-cci";
#address-cells = <1>;
@@ -472,6 +498,13 @@
bias-disable;
};
+ blsp1_i2c6_pins: blsp1-i2c6-state {
+ pins = "gpio22", "gpio23";
+ function = "blsp_i2c6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
cci_default: cci-default-state {
pins = "gpio29", "gpio30";
function = "cci_i2c0";
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index bb9f0e5b0b63..10fd74bf85f9 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1076,7 +1076,6 @@ CONFIG_QCOM_IPCC=y
CONFIG_OMAP_IOMMU=y
CONFIG_OMAP_IOMMU_DEBUG=y
CONFIG_ROCKCHIP_IOMMU=y
-CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_EXYNOS_IOMMU=y
CONFIG_QCOM_IOMMU=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b2f0862f4bd9..7b1b41b4b160 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -477,7 +477,6 @@ CONFIG_LIRC=y
CONFIG_RC_DEVICES=y
CONFIG_IR_GPIO_TX=m
CONFIG_IR_PWM_TX=m
-CONFIG_IR_RX51=m
CONFIG_IR_SPI=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_V4L_PLATFORM_DRIVERS=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 23c131b0854b..9e81b1849e4c 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -100,7 +100,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_AR7_PARTS=m
CONFIG_MTD_CMDLINE_PARTS=m
CONFIG_MTD_OF_PARTS=m
CONFIG_MTD_AFS_PARTS=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 613f07b8ce15..8635b7216bfc 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -292,7 +292,6 @@ CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=m
CONFIG_CROS_EC_SPI=m
-CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 546af8b1e3f6..cc106f946c69 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -11,6 +11,7 @@
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
+#include <linux/screen_info.h>
#include <uapi/asm/setup.h>
@@ -35,4 +36,8 @@ void early_mm_init(const struct machine_desc *);
void adjust_lowmem_bounds(void);
void setup_dma_zone(const struct machine_desc *desc);
+#ifdef CONFIG_VGA_CONSOLE
+extern struct screen_info vgacon_screen_info;
+#endif
+
#endif
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 7c0bee57855a..6c430ec371df 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -5,6 +5,7 @@
#include <linux/io.h>
extern unsigned long vga_base;
+extern struct screen_info vgacon_screen_info;
#define VGA_MAP_MEM(x,s) (vga_base + (x))
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 33f6eb5213a5..4ec591bde3df 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -69,18 +69,18 @@ static int __init parse_tag_mem32(const struct tag *tag)
__tagtable(ATAG_MEM, parse_tag_mem32);
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+#if defined(CONFIG_ARCH_FOOTBRIDGE) && defined(CONFIG_VGA_CONSOLE)
static int __init parse_tag_videotext(const struct tag *tag)
{
- screen_info.orig_x = tag->u.videotext.x;
- screen_info.orig_y = tag->u.videotext.y;
- screen_info.orig_video_page = tag->u.videotext.video_page;
- screen_info.orig_video_mode = tag->u.videotext.video_mode;
- screen_info.orig_video_cols = tag->u.videotext.video_cols;
- screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
- screen_info.orig_video_lines = tag->u.videotext.video_lines;
- screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
- screen_info.orig_video_points = tag->u.videotext.video_points;
+ vgacon_screen_info.orig_x = tag->u.videotext.x;
+ vgacon_screen_info.orig_y = tag->u.videotext.y;
+ vgacon_screen_info.orig_video_page = tag->u.videotext.video_page;
+ vgacon_screen_info.orig_video_mode = tag->u.videotext.video_mode;
+ vgacon_screen_info.orig_video_cols = tag->u.videotext.video_cols;
+ vgacon_screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
+ vgacon_screen_info.orig_video_lines = tag->u.videotext.video_lines;
+ vgacon_screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
+ vgacon_screen_info.orig_video_points = tag->u.videotext.video_points;
return 0;
}
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
index e94655ef16bb..6f9ec7d28a71 100644
--- a/arch/arm/kernel/efi.c
+++ b/arch/arm/kernel/efi.c
@@ -123,12 +123,6 @@ void __init arm_efi_init(void)
{
efi_init();
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
- /* dummycon on ARM needs non-zero values for columns/lines */
- screen_info.orig_video_cols = 80;
- screen_info.orig_video_lines = 25;
- }
-
/* ARM does not permit early mappings to persist across paging_init() */
efi_memmap_unmap();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b01cac05bd4c..ff2299ce1ad7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -928,9 +928,8 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&ioport_resource, &lp2);
}
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
- defined(CONFIG_EFI)
-struct screen_info screen_info = {
+#if defined(CONFIG_VGA_CONSOLE)
+struct screen_info vgacon_screen_info = {
.orig_video_lines = 30,
.orig_video_cols = 80,
.orig_video_mode = 0,
@@ -1194,7 +1193,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
+ vgacon_register_screen(&vgacon_screen_info);
#endif
#endif
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 515ca33b854c..d761bd2e2f40 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -63,28 +63,3 @@ quiet_cmd_vdsold_and_vdso_check = LD $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
-
-#
-# Install the unstripped copy of vdso.so.dbg. If our toolchain
-# supports build-id, install .build-id links as well.
-#
-# Cribbed from arch/x86/vdso/Makefile.
-#
-quiet_cmd_vdso_install = INSTALL $<
-define cmd_vdso_install
- cp $< "$(MODLIB)/vdso/vdso.so"; \
- if readelf -n $< | grep -q 'Build ID'; then \
- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
- first=`echo $$buildid | cut -b-2`; \
- last=`echo $$buildid | cut -b3-`; \
- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
- ln -sf "../../vdso.so" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
- fi
-endef
-
-$(MODLIB)/vdso: FORCE
- @mkdir -p $(MODLIB)/vdso
-
-PHONY += vdso_install
-vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso
- $(call cmd,vdso_install)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2d49aea0ff67..4bd85cc0d32b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -169,12 +169,6 @@ install: KBUILD_IMAGE := $(boot)/Image
install zinstall:
$(call cmd,install)
-PHONY += vdso_install
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
- $(if $(CONFIG_COMPAT_VDSO), \
- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
-
archprepare:
$(Q)$(MAKE) $(build)=arch/arm64/tools kapi
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
@@ -205,6 +199,9 @@ ifdef CONFIG_COMPAT_VDSO
endif
endif
+vdso-install-y += arch/arm64/kernel/vdso/vdso.so.dbg
+vdso-install-$(CONFIG_COMPAT_VDSO) += arch/arm64/kernel/vdso32/vdso.so.dbg:vdso32.so
+
include $(srctree)/scripts/Makefile.defconf
PHONY += virtconfig
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 4a74f3b6d775..bf7de35ffcbc 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -859,7 +859,7 @@
pinctrl-names = "default";
pinctrl-0 = <&scp_pins>;
- cros_ec {
+ cros-ec-rpmsg {
compatible = "google,cros-ec-rpmsg";
mediatek,rpmsg-name = "cros-ec-rpmsg";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 1447eed0ea36..f2281250ac35 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1312,7 +1312,7 @@
pinctrl-names = "default";
pinctrl-0 = <&scp_pins>;
- cros-ec {
+ cros-ec-rpmsg {
compatible = "google,cros-ec-rpmsg";
mediatek,rpmsg-name = "cros-ec-rpmsg";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
index 5b3488736fbe..9a70875028b7 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
@@ -59,6 +59,7 @@
compatible = "qcom,sm8550-pmic-glink", "qcom,pmic-glink";
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 11 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index 320662024e89..eef811def39b 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -77,6 +77,7 @@
compatible = "qcom,sm8550-pmic-glink", "qcom,pmic-glink";
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 11 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index 2993e1255042..7064c0e9179f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -443,6 +443,27 @@
status = "disabled";
};
+ usb_host2_xhci: usb@fcd00000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfcd00000 0x0 0x400000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG2>, <&cru SUSPEND_CLK_USB3OTG2>,
+ <&cru ACLK_USB3OTG2>, <&cru CLK_UTMI_OTG2>,
+ <&cru CLK_PIPEPHY2_PIPE_U3_G>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk", "utmi", "pipe";
+ dr_mode = "host";
+ phys = <&combphy2_psu PHY_TYPE_USB3>;
+ phy-names = "usb3-phy";
+ phy_type = "utmi_wide";
+ resets = <&cru SRST_A_USB3OTG2>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,dis_rxdet_inp3_quirk;
+ status = "disabled";
+ };
+
pmu1grf: syscon@fd58a000 {
compatible = "rockchip,rk3588-pmugrf", "syscon", "simple-mfd";
reg = <0x0 0xfd58a000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 00891a0f8fc3..9a6bd0a3c94f 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -896,6 +896,12 @@
pinctrl-names = "default";
pinctrl-0 = <&wifi_debug_uart_pins_default>;
status = "okay";
+
+ mcu {
+ compatible = "ti,cc1352p7";
+ reset-gpios = <&main_gpio0 72 GPIO_ACTIVE_LOW>;
+ vdds-supply = <&vdd_3v3>;
+ };
};
&dss {
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 3f8c9c143552..0228001347be 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -71,10 +71,6 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
return pgprot_val(PAGE_KERNEL_EXEC);
}
-/* we will fill this structure from the stub, so don't put it in .bss */
-struct screen_info screen_info __section(".data");
-EXPORT_SYMBOL(screen_info);
-
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
pteval_t prot_val = create_mapping_protection(md);
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 35f3c7959513..5e4dc72ab1bd 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -27,7 +27,9 @@ PROVIDE(__efistub__text = _text);
PROVIDE(__efistub__end = _end);
PROVIDE(__efistub___inittext_end = __inittext_end);
PROVIDE(__efistub__edata = _edata);
+#if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB)
PROVIDE(__efistub_screen_info = screen_info);
+#endif
PROVIDE(__efistub__ctype = _ctype);
PROVIDE(__pi___memcpy = __pi_memcpy);
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index fe7a53c6781f..8818287f1095 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -78,13 +78,3 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
# Actual build commands
quiet_cmd_vdsold_and_vdso_check = LD $@
cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
-
-# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 2f73e5bca213..1f911a76c5af 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -172,13 +172,3 @@ gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
# The AArch64 nm should be able to read an AArch32 binary
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-
-# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL32 $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index 106fbf0b6f3b..51012e90780d 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -8,22 +8,10 @@
#include <linux/of_fdt.h>
#include <linux/start_kernel.h>
#include <linux/dma-map-ops.h>
-#include <linux/screen_info.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#ifdef CONFIG_DUMMY_CONSOLE
-struct screen_info screen_info = {
- .orig_video_lines = 30,
- .orig_video_cols = 80,
- .orig_video_mode = 0,
- .orig_video_ega_bx = 0,
- .orig_video_isVGA = 1,
- .orig_video_points = 8
-};
-#endif
-
static void __init csky_memblock_init(void)
{
unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
index 299e4e41ebc5..ddf784a62c11 100644
--- a/arch/csky/kernel/vdso/Makefile
+++ b/arch/csky/kernel/vdso/Makefile
@@ -58,13 +58,3 @@ quiet_cmd_vdsold = VDSOLD $@
# that contains the same symbols at the same offsets.
quiet_cmd_so2s = SO2S $@
cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/hexagon/kernel/Makefile b/arch/hexagon/kernel/Makefile
index e73cb321630e..3fdf937eb572 100644
--- a/arch/hexagon/kernel/Makefile
+++ b/arch/hexagon/kernel/Makefile
@@ -17,5 +17,3 @@ obj-y += vm_vectors.o
obj-$(CONFIG_HAS_DMA) += dma.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
-
-obj-$(CONFIG_VGA_CONSOLE) += screen_info.o
diff --git a/arch/hexagon/kernel/screen_info.c b/arch/hexagon/kernel/screen_info.c
deleted file mode 100644
index 1e1ceb18bafe..000000000000
--- a/arch/hexagon/kernel/screen_info.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include <linux/screen_info.h>
-
-struct screen_info screen_info;
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index fb0fada43197..b86f2ff31659 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -136,9 +136,7 @@ vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h
endif
-PHONY += vdso_install
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/loongarch/vdso $@
+vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index 9fc10cea21e1..acb5d3385675 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -68,6 +68,11 @@ void __init efi_runtime_init(void)
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
+#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
+struct screen_info screen_info __section(".data");
+EXPORT_SYMBOL_GPL(screen_info);
+#endif
+
static void __init init_screen_info(void)
{
struct screen_info *si;
@@ -115,7 +120,8 @@ void __init efi_init(void)
set_bit(EFI_CONFIG_TABLES, &efi.flags);
- init_screen_info();
+ if (IS_ENABLED(CONFIG_EFI_EARLYCON) || IS_ENABLED(CONFIG_SYSFB))
+ init_screen_info();
if (boot_memmap == EFI_INVALID_TABLE_ADDR)
return;
diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h
index e561989d02de..5087416b9678 100644
--- a/arch/loongarch/kernel/image-vars.h
+++ b/arch/loongarch/kernel/image-vars.h
@@ -12,7 +12,9 @@ __efistub_kernel_entry = kernel_entry;
__efistub_kernel_asize = kernel_asize;
__efistub_kernel_fsize = kernel_fsize;
__efistub_kernel_offset = kernel_offset;
+#if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB)
__efistub_screen_info = screen_info;
+#endif
#endif
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index b35186f7b254..d183a745fb85 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
-#include <linux/screen_info.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/ioport.h>
@@ -57,8 +56,6 @@
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
#define LOONGSON_EFI_ENABLE (1 << 3)
-struct screen_info screen_info __section(".data");
-
unsigned long fw_arg0, fw_arg1, fw_arg2;
DEFINE_PER_CPU(unsigned long, kernelsp);
struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 5c97d1463328..c74c9921304f 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -83,13 +83,3 @@ $(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
obj-y += vdso.o
$(obj)/vdso.o : $(obj)/vdso.so
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index caad195ba5c1..a2311c4bce6a 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -2,7 +2,6 @@
# All platforms listed in alphabetic order
platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/
-platform-$(CONFIG_AR7) += ar7/
platform-$(CONFIG_ATH25) += ath25/
platform-$(CONFIG_ATH79) += ath79/
platform-$(CONFIG_BCM47XX) += bcm47xx/
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bc8421859006..76db82542519 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -202,28 +202,6 @@ config MIPS_ALCHEMY
select SYS_SUPPORTS_ZBOOT
select COMMON_CLK
-config AR7
- bool "Texas Instruments AR7"
- select BOOT_ELF32
- select COMMON_CLK
- select DMA_NONCOHERENT
- select CEVT_R4K
- select CSRC_R4K
- select IRQ_MIPS_CPU
- select NO_EXCEPT_FILL
- select SWAP_IO_SPACE
- select SYS_HAS_CPU_MIPS32_R1
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_MIPS16
- select SYS_SUPPORTS_ZBOOT_UART16550
- select GPIOLIB
- select VLYNQ
- help
- Support for the Texas Instruments AR7 System-on-a-Chip
- family: TNETD7100, 7200 and 7300.
-
config ATH25
bool "Atheros AR231x/AR531x SoC support"
select CEVT_R4K
diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink
index 34e3bd71f3b0..6cfdc149d3bc 100644
--- a/arch/mips/Makefile.postlink
+++ b/arch/mips/Makefile.postlink
@@ -31,9 +31,6 @@ ifeq ($(CONFIG_RELOCATABLE),y)
$(call if_changed,relocs)
endif
-%.ko: FORCE
- @true
-
clean:
@true
diff --git a/arch/mips/ar7/Makefile b/arch/mips/ar7/Makefile
deleted file mode 100644
index cd51c6c6e686..000000000000
--- a/arch/mips/ar7/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-y := \
- prom.o \
- setup.o \
- memory.o \
- irq.o \
- time.o \
- platform.o \
- gpio.o \
- clock.o
diff --git a/arch/mips/ar7/Platform b/arch/mips/ar7/Platform
deleted file mode 100644
index a9257cc01c3c..000000000000
--- a/arch/mips/ar7/Platform
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Texas Instruments AR7
-#
-cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
-load-$(CONFIG_AR7) += 0xffffffff94100000
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
deleted file mode 100644
index c717acbc5506..000000000000
--- a/arch/mips/ar7/clock.c
+++ /dev/null
@@ -1,439 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/delay.h>
-#include <linux/gcd.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clkdev.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define BOOT_PLL_SOURCE_MASK 0x3
-#define CPU_PLL_SOURCE_SHIFT 16
-#define BUS_PLL_SOURCE_SHIFT 14
-#define USB_PLL_SOURCE_SHIFT 18
-#define DSP_PLL_SOURCE_SHIFT 22
-#define BOOT_PLL_SOURCE_AFE 0
-#define BOOT_PLL_SOURCE_BUS 0
-#define BOOT_PLL_SOURCE_REF 1
-#define BOOT_PLL_SOURCE_XTAL 2
-#define BOOT_PLL_SOURCE_CPU 3
-#define BOOT_PLL_BYPASS 0x00000020
-#define BOOT_PLL_ASYNC_MODE 0x02000000
-#define BOOT_PLL_2TO1_MODE 0x00008000
-
-#define TNETD7200_CLOCK_ID_CPU 0
-#define TNETD7200_CLOCK_ID_DSP 1
-#define TNETD7200_CLOCK_ID_USB 2
-
-#define TNETD7200_DEF_CPU_CLK 211000000
-#define TNETD7200_DEF_DSP_CLK 125000000
-#define TNETD7200_DEF_USB_CLK 48000000
-
-struct tnetd7300_clock {
- u32 ctrl;
-#define PREDIV_MASK 0x001f0000
-#define PREDIV_SHIFT 16
-#define POSTDIV_MASK 0x0000001f
- u32 unused1[3];
- u32 pll;
-#define MUL_MASK 0x0000f000
-#define MUL_SHIFT 12
-#define PLL_MODE_MASK 0x00000001
-#define PLL_NDIV 0x00000800
-#define PLL_DIV 0x00000002
-#define PLL_STATUS 0x00000001
- u32 unused2[3];
-};
-
-struct tnetd7300_clocks {
- struct tnetd7300_clock bus;
- struct tnetd7300_clock cpu;
- struct tnetd7300_clock usb;
- struct tnetd7300_clock dsp;
-};
-
-struct tnetd7200_clock {
- u32 ctrl;
- u32 unused1[3];
-#define DIVISOR_ENABLE_MASK 0x00008000
- u32 mul;
- u32 prediv;
- u32 postdiv;
- u32 postdiv2;
- u32 unused2[6];
- u32 cmd;
- u32 status;
- u32 cmden;
- u32 padding[15];
-};
-
-struct tnetd7200_clocks {
- struct tnetd7200_clock cpu;
- struct tnetd7200_clock dsp;
- struct tnetd7200_clock usb;
-};
-
-struct clk_rate {
- u32 rate;
-};
-static struct clk_rate bus_clk = {
- .rate = 125000000,
-};
-
-static struct clk_rate cpu_clk = {
- .rate = 150000000,
-};
-
-static void approximate(int base, int target, int *prediv,
- int *postdiv, int *mul)
-{
- int i, j, k, freq, res = target;
- for (i = 1; i <= 16; i++)
- for (j = 1; j <= 32; j++)
- for (k = 1; k <= 32; k++) {
- freq = abs(base / j * i / k - target);
- if (freq < res) {
- res = freq;
- *mul = i;
- *prediv = j;
- *postdiv = k;
- }
- }
-}
-
-static void calculate(int base, int target, int *prediv, int *postdiv,
- int *mul)
-{
- int tmp_gcd, tmp_base, tmp_freq;
-
- for (*prediv = 1; *prediv <= 32; (*prediv)++) {
- tmp_base = base / *prediv;
- tmp_gcd = gcd(target, tmp_base);
- *mul = target / tmp_gcd;
- *postdiv = tmp_base / tmp_gcd;
- if ((*mul < 1) || (*mul >= 16))
- continue;
- if ((*postdiv > 0) & (*postdiv <= 32))
- break;
- }
-
- if (base / *prediv * *mul / *postdiv != target) {
- approximate(base, target, prediv, postdiv, mul);
- tmp_freq = base / *prediv * *mul / *postdiv;
- printk(KERN_WARNING
- "Adjusted requested frequency %d to %d\n",
- target, tmp_freq);
- }
-
- printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n",
- *prediv, *postdiv, *mul);
-}
-
-static int tnetd7300_dsp_clock(void)
-{
- u32 didr1, didr2;
- u8 rev = ar7_chip_rev();
- didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18));
- didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c));
- if (didr2 & (1 << 23))
- return 0;
- if ((rev >= 0x23) && (rev != 0x57))
- return 250000000;
- if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22))
- > 4208000)
- return 250000000;
- return 0;
-}
-
-static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock,
- u32 *bootcr, u32 bus_clock)
-{
- int product;
- int base_clock = AR7_REF_CLOCK;
- u32 ctrl = readl(&clock->ctrl);
- u32 pll = readl(&clock->pll);
- int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1;
- int postdiv = (ctrl & POSTDIV_MASK) + 1;
- int divisor = prediv * postdiv;
- int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1;
-
- switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
- case BOOT_PLL_SOURCE_BUS:
- base_clock = bus_clock;
- break;
- case BOOT_PLL_SOURCE_REF:
- base_clock = AR7_REF_CLOCK;
- break;
- case BOOT_PLL_SOURCE_XTAL:
- base_clock = AR7_XTAL_CLOCK;
- break;
- case BOOT_PLL_SOURCE_CPU:
- base_clock = cpu_clk.rate;
- break;
- }
-
- if (*bootcr & BOOT_PLL_BYPASS)
- return base_clock / divisor;
-
- if ((pll & PLL_MODE_MASK) == 0)
- return (base_clock >> (mul / 16 + 1)) / divisor;
-
- if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) {
- product = (mul & 1) ?
- (base_clock * mul) >> 1 :
- (base_clock * (mul - 1)) >> 2;
- return product / divisor;
- }
-
- if (mul == 16)
- return base_clock / divisor;
-
- return base_clock * mul / divisor;
-}
-
-static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
- u32 *bootcr, u32 frequency)
-{
- int prediv, postdiv, mul;
- int base_clock = bus_clk.rate;
-
- switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
- case BOOT_PLL_SOURCE_BUS:
- base_clock = bus_clk.rate;
- break;
- case BOOT_PLL_SOURCE_REF:
- base_clock = AR7_REF_CLOCK;
- break;
- case BOOT_PLL_SOURCE_XTAL:
- base_clock = AR7_XTAL_CLOCK;
- break;
- case BOOT_PLL_SOURCE_CPU:
- base_clock = cpu_clk.rate;
- break;
- }
-
- calculate(base_clock, frequency, &prediv, &postdiv, &mul);
-
- writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
- mdelay(1);
- writel(4, &clock->pll);
- while (readl(&clock->pll) & PLL_STATUS)
- ;
- writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
- mdelay(75);
-}
-
-static void __init tnetd7300_init_clocks(void)
-{
- u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
- struct tnetd7300_clocks *clocks =
- ioremap(UR8_REGS_CLOCKS,
- sizeof(struct tnetd7300_clocks));
- u32 dsp_clk;
- struct clk *clk;
-
- bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
- &clocks->bus, bootcr, AR7_AFE_CLOCK);
-
- if (*bootcr & BOOT_PLL_ASYNC_MODE)
- cpu_clk.rate = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT,
- &clocks->cpu, bootcr, AR7_AFE_CLOCK);
- else
- cpu_clk.rate = bus_clk.rate;
-
- dsp_clk = tnetd7300_dsp_clock();
- if (dsp_clk == 250000000)
- tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
- bootcr, dsp_clk);
-
- iounmap(clocks);
- iounmap(bootcr);
-
- clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
- clkdev_create(clk, "cpu", NULL);
- clk = clk_register_fixed_rate(NULL, "dsp", NULL, 0, dsp_clk);
- clkdev_create(clk, "dsp", NULL);
-}
-
-static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
- int prediv, int postdiv, int postdiv2, int mul, u32 frequency)
-{
- printk(KERN_INFO
- "Clocks: base = %d, frequency = %u, prediv = %d, "
- "postdiv = %d, postdiv2 = %d, mul = %d\n",
- base, frequency, prediv, postdiv, postdiv2, mul);
-
- writel(0, &clock->ctrl);
- writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv);
- writel((mul - 1) & 0xF, &clock->mul);
-
- while (readl(&clock->status) & 0x1)
- ; /* nop */
-
- writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv);
-
- writel(readl(&clock->cmden) | 1, &clock->cmden);
- writel(readl(&clock->cmd) | 1, &clock->cmd);
-
- while (readl(&clock->status) & 0x1)
- ; /* nop */
-
- writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2);
-
- writel(readl(&clock->cmden) | 1, &clock->cmden);
- writel(readl(&clock->cmd) | 1, &clock->cmd);
-
- while (readl(&clock->status) & 0x1)
- ; /* nop */
-
- writel(readl(&clock->ctrl) | 1, &clock->ctrl);
-}
-
-static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
-{
- if (*bootcr & BOOT_PLL_ASYNC_MODE)
- /* Async */
- switch (clock_id) {
- case TNETD7200_CLOCK_ID_DSP:
- return AR7_REF_CLOCK;
- default:
- return AR7_AFE_CLOCK;
- }
- else
- /* Sync */
- if (*bootcr & BOOT_PLL_2TO1_MODE)
- /* 2:1 */
- switch (clock_id) {
- case TNETD7200_CLOCK_ID_DSP:
- return AR7_REF_CLOCK;
- default:
- return AR7_AFE_CLOCK;
- }
- else
- /* 1:1 */
- return AR7_REF_CLOCK;
-}
-
-
-static void __init tnetd7200_init_clocks(void)
-{
- u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
- struct tnetd7200_clocks *clocks =
- ioremap(AR7_REGS_CLOCKS,
- sizeof(struct tnetd7200_clocks));
- int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
- int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
- int usb_base, usb_mul, usb_prediv, usb_postdiv;
- struct clk *clk;
-
- cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
- dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
-
- if (*bootcr & BOOT_PLL_ASYNC_MODE) {
- printk(KERN_INFO "Clocks: Async mode\n");
-
- printk(KERN_INFO "Clocks: Setting DSP clock\n");
- calculate(dsp_base, TNETD7200_DEF_DSP_CLK,
- &dsp_prediv, &dsp_postdiv, &dsp_mul);
- bus_clk.rate =
- ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv;
- tnetd7200_set_clock(dsp_base, &clocks->dsp,
- dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2,
- bus_clk.rate);
-
- printk(KERN_INFO "Clocks: Setting CPU clock\n");
- calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
- &cpu_postdiv, &cpu_mul);
- cpu_clk.rate =
- ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv;
- tnetd7200_set_clock(cpu_base, &clocks->cpu,
- cpu_prediv, cpu_postdiv, -1, cpu_mul,
- cpu_clk.rate);
-
- } else
- if (*bootcr & BOOT_PLL_2TO1_MODE) {
- printk(KERN_INFO "Clocks: Sync 2:1 mode\n");
-
- printk(KERN_INFO "Clocks: Setting CPU clock\n");
- calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
- &cpu_postdiv, &cpu_mul);
- cpu_clk.rate = ((cpu_base / cpu_prediv) * cpu_mul)
- / cpu_postdiv;
- tnetd7200_set_clock(cpu_base, &clocks->cpu,
- cpu_prediv, cpu_postdiv, -1, cpu_mul,
- cpu_clk.rate);
-
- printk(KERN_INFO "Clocks: Setting DSP clock\n");
- calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
- &dsp_postdiv, &dsp_mul);
- bus_clk.rate = cpu_clk.rate / 2;
- tnetd7200_set_clock(dsp_base, &clocks->dsp,
- dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
- dsp_mul * 2, bus_clk.rate);
- } else {
- printk(KERN_INFO "Clocks: Sync 1:1 mode\n");
-
- printk(KERN_INFO "Clocks: Setting DSP clock\n");
- calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
- &dsp_postdiv, &dsp_mul);
- bus_clk.rate = ((dsp_base / dsp_prediv) * dsp_mul)
- / dsp_postdiv;
- tnetd7200_set_clock(dsp_base, &clocks->dsp,
- dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
- dsp_mul * 2, bus_clk.rate);
-
- cpu_clk.rate = bus_clk.rate;
- }
-
- printk(KERN_INFO "Clocks: Setting USB clock\n");
- usb_base = bus_clk.rate;
- calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv,
- &usb_postdiv, &usb_mul);
- tnetd7200_set_clock(usb_base, &clocks->usb,
- usb_prediv, usb_postdiv, -1, usb_mul,
- TNETD7200_DEF_USB_CLK);
-
- iounmap(clocks);
- iounmap(bootcr);
-
- clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
- clkdev_create(clk, "cpu", NULL);
- clkdev_create(clk, "dsp", NULL);
-}
-
-void __init ar7_init_clocks(void)
-{
- struct clk *clk;
-
- switch (ar7_chip_id()) {
- case AR7_CHIP_7100:
- case AR7_CHIP_7200:
- tnetd7200_init_clocks();
- break;
- case AR7_CHIP_7300:
- tnetd7300_init_clocks();
- break;
- default:
- break;
- }
- clk = clk_register_fixed_rate(NULL, "bus", NULL, 0, bus_clk.rate);
- clkdev_create(clk, "bus", NULL);
- /* adjust vbus clock rate */
- clk = clk_register_fixed_factor(NULL, "vbus", "bus", 0, 1, 2);
- clkdev_create(clk, "vbus", NULL);
- clkdev_create(clk, "cpmac", "cpmac.1");
- clkdev_create(clk, "cpmac", "cpmac.1");
-}
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
deleted file mode 100644
index 4ed833b9cc2f..000000000000
--- a/arch/mips/ar7/gpio.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/gpio/driver.h>
-
-#include <asm/mach-ar7/ar7.h>
-
-#define AR7_GPIO_MAX 32
-#define TITAN_GPIO_MAX 51
-
-struct ar7_gpio_chip {
- void __iomem *regs;
- struct gpio_chip chip;
-};
-
-static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT;
-
- return !!(readl(gpio_in) & (1 << gpio));
-}
-
-static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
- void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
-
- return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
-}
-
-static void ar7_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_out = gpch->regs + AR7_GPIO_OUTPUT;
- unsigned tmp;
-
- tmp = readl(gpio_out) & ~(1 << gpio);
- if (value)
- tmp |= 1 << gpio;
- writel(tmp, gpio_out);
-}
-
-static void titan_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
- void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
- unsigned tmp;
-
- tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
- if (value)
- tmp |= 1 << (gpio & 0x1f);
- writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
-}
-
-static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
-
- writel(readl(gpio_dir) | (1 << gpio), gpio_dir);
-
- return 0;
-}
-
-static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
- void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
-
- if (gpio >= TITAN_GPIO_MAX)
- return -EINVAL;
-
- writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
- gpio >> 5 ? gpio_dir1 : gpio_dir0);
- return 0;
-}
-
-static int ar7_gpio_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
-
- ar7_gpio_set_value(chip, gpio, value);
- writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir);
-
- return 0;
-}
-
-static int titan_gpio_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
-{
- struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
- void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
- void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
-
- if (gpio >= TITAN_GPIO_MAX)
- return -EINVAL;
-
- titan_gpio_set_value(chip, gpio, value);
- writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
- (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
-
- return 0;
-}
-
-static struct ar7_gpio_chip ar7_gpio_chip = {
- .chip = {
- .label = "ar7-gpio",
- .direction_input = ar7_gpio_direction_input,
- .direction_output = ar7_gpio_direction_output,
- .set = ar7_gpio_set_value,
- .get = ar7_gpio_get_value,
- .base = 0,
- .ngpio = AR7_GPIO_MAX,
- }
-};
-
-static struct ar7_gpio_chip titan_gpio_chip = {
- .chip = {
- .label = "titan-gpio",
- .direction_input = titan_gpio_direction_input,
- .direction_output = titan_gpio_direction_output,
- .set = titan_gpio_set_value,
- .get = titan_gpio_get_value,
- .base = 0,
- .ngpio = TITAN_GPIO_MAX,
- }
-};
-
-static inline int ar7_gpio_enable_ar7(unsigned gpio)
-{
- void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
-
- writel(readl(gpio_en) | (1 << gpio), gpio_en);
-
- return 0;
-}
-
-static inline int ar7_gpio_enable_titan(unsigned gpio)
-{
- void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
- void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
-
- writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
- gpio >> 5 ? gpio_en1 : gpio_en0);
-
- return 0;
-}
-
-int ar7_gpio_enable(unsigned gpio)
-{
- return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
- ar7_gpio_enable_ar7(gpio);
-}
-EXPORT_SYMBOL(ar7_gpio_enable);
-
-static inline int ar7_gpio_disable_ar7(unsigned gpio)
-{
- void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
-
- writel(readl(gpio_en) & ~(1 << gpio), gpio_en);
-
- return 0;
-}
-
-static inline int ar7_gpio_disable_titan(unsigned gpio)
-{
- void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
- void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
-
- writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
- gpio >> 5 ? gpio_en1 : gpio_en0);
-
- return 0;
-}
-
-int ar7_gpio_disable(unsigned gpio)
-{
- return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
- ar7_gpio_disable_ar7(gpio);
-}
-EXPORT_SYMBOL(ar7_gpio_disable);
-
-struct titan_gpio_cfg {
- u32 reg;
- u32 shift;
- u32 func;
-};
-
-static const struct titan_gpio_cfg titan_gpio_table[] = {
- /* reg, start bit, mux value */
- {4, 24, 1},
- {4, 26, 1},
- {4, 28, 1},
- {4, 30, 1},
- {5, 6, 1},
- {5, 8, 1},
- {5, 10, 1},
- {5, 12, 1},
- {7, 14, 3},
- {7, 16, 3},
- {7, 18, 3},
- {7, 20, 3},
- {7, 22, 3},
- {7, 26, 3},
- {7, 28, 3},
- {7, 30, 3},
- {8, 0, 3},
- {8, 2, 3},
- {8, 4, 3},
- {8, 10, 3},
- {8, 14, 3},
- {8, 16, 3},
- {8, 18, 3},
- {8, 20, 3},
- {9, 8, 3},
- {9, 10, 3},
- {9, 12, 3},
- {9, 14, 3},
- {9, 18, 3},
- {9, 20, 3},
- {9, 24, 3},
- {9, 26, 3},
- {9, 28, 3},
- {9, 30, 3},
- {10, 0, 3},
- {10, 2, 3},
- {10, 8, 3},
- {10, 10, 3},
- {10, 12, 3},
- {10, 14, 3},
- {13, 12, 3},
- {13, 14, 3},
- {13, 16, 3},
- {13, 18, 3},
- {13, 24, 3},
- {13, 26, 3},
- {13, 28, 3},
- {13, 30, 3},
- {14, 2, 3},
- {14, 6, 3},
- {14, 8, 3},
- {14, 12, 3}
-};
-
-static int titan_gpio_pinsel(unsigned gpio)
-{
- struct titan_gpio_cfg gpio_cfg;
- u32 mux_status, pin_sel_reg, tmp;
- void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
-
- if (gpio >= ARRAY_SIZE(titan_gpio_table))
- return -EINVAL;
-
- gpio_cfg = titan_gpio_table[gpio];
- pin_sel_reg = gpio_cfg.reg - 1;
-
- mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
-
- /* Check the mux status */
- if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
- return 0;
-
- /* Set the pin sel value */
- tmp = readl(pin_sel + pin_sel_reg);
- tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
- writel(tmp, pin_sel + pin_sel_reg);
-
- return 0;
-}
-
-/* Perform minimal Titan GPIO configuration */
-static void titan_gpio_init(void)
-{
- unsigned i;
-
- for (i = 44; i < 48; i++) {
- titan_gpio_pinsel(i);
- ar7_gpio_enable_titan(i);
- titan_gpio_direction_input(&titan_gpio_chip.chip, i);
- }
-}
-
-int __init ar7_gpio_init(void)
-{
- int ret;
- struct ar7_gpio_chip *gpch;
- unsigned size;
-
- if (!ar7_is_titan()) {
- gpch = &ar7_gpio_chip;
- size = 0x10;
- } else {
- gpch = &titan_gpio_chip;
- size = 0x1f;
- }
-
- gpch->regs = ioremap(AR7_REGS_GPIO, size);
- if (!gpch->regs) {
- printk(KERN_ERR "%s: failed to ioremap regs\n",
- gpch->chip.label);
- return -ENOMEM;
- }
-
- ret = gpiochip_add_data(&gpch->chip, gpch);
- if (ret) {
- printk(KERN_ERR "%s: failed to add gpiochip\n",
- gpch->chip.label);
- iounmap(gpch->regs);
- return ret;
- }
- printk(KERN_INFO "%s: registered %d GPIOs\n",
- gpch->chip.label, gpch->chip.ngpio);
-
- if (ar7_is_titan())
- titan_gpio_init();
-
- return ret;
-}
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
deleted file mode 100644
index f0a7942d393e..000000000000
--- a/arch/mips/ar7/irq.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define EXCEPT_OFFSET 0x80
-#define PACE_OFFSET 0xA0
-#define CHNLS_OFFSET 0x200
-
-#define REG_OFFSET(irq, reg) ((irq) / 32 * 0x4 + reg * 0x10)
-#define SEC_REG_OFFSET(reg) (EXCEPT_OFFSET + reg * 0x8)
-#define SEC_SR_OFFSET (SEC_REG_OFFSET(0)) /* 0x80 */
-#define CR_OFFSET(irq) (REG_OFFSET(irq, 1)) /* 0x10 */
-#define SEC_CR_OFFSET (SEC_REG_OFFSET(1)) /* 0x88 */
-#define ESR_OFFSET(irq) (REG_OFFSET(irq, 2)) /* 0x20 */
-#define SEC_ESR_OFFSET (SEC_REG_OFFSET(2)) /* 0x90 */
-#define ECR_OFFSET(irq) (REG_OFFSET(irq, 3)) /* 0x30 */
-#define SEC_ECR_OFFSET (SEC_REG_OFFSET(3)) /* 0x98 */
-#define PIR_OFFSET (0x40)
-#define MSR_OFFSET (0x44)
-#define PM_OFFSET(irq) (REG_OFFSET(irq, 5)) /* 0x50 */
-#define TM_OFFSET(irq) (REG_OFFSET(irq, 6)) /* 0x60 */
-
-#define REG(addr) ((u32 *)(KSEG1ADDR(AR7_REGS_IRQ) + addr))
-
-#define CHNL_OFFSET(chnl) (CHNLS_OFFSET + (chnl * 4))
-
-static int ar7_irq_base;
-
-static void ar7_unmask_irq(struct irq_data *d)
-{
- writel(1 << ((d->irq - ar7_irq_base) % 32),
- REG(ESR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_mask_irq(struct irq_data *d)
-{
- writel(1 << ((d->irq - ar7_irq_base) % 32),
- REG(ECR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_ack_irq(struct irq_data *d)
-{
- writel(1 << ((d->irq - ar7_irq_base) % 32),
- REG(CR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_unmask_sec_irq(struct irq_data *d)
-{
- writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
-}
-
-static void ar7_mask_sec_irq(struct irq_data *d)
-{
- writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
-}
-
-static void ar7_ack_sec_irq(struct irq_data *d)
-{
- writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
-}
-
-static struct irq_chip ar7_irq_type = {
- .name = "AR7",
- .irq_unmask = ar7_unmask_irq,
- .irq_mask = ar7_mask_irq,
- .irq_ack = ar7_ack_irq
-};
-
-static struct irq_chip ar7_sec_irq_type = {
- .name = "AR7",
- .irq_unmask = ar7_unmask_sec_irq,
- .irq_mask = ar7_mask_sec_irq,
- .irq_ack = ar7_ack_sec_irq,
-};
-
-static void __init ar7_irq_init(int base)
-{
- int i;
- /*
- * Disable interrupts and clear pending
- */
- writel(0xffffffff, REG(ECR_OFFSET(0)));
- writel(0xff, REG(ECR_OFFSET(32)));
- writel(0xffffffff, REG(SEC_ECR_OFFSET));
- writel(0xffffffff, REG(CR_OFFSET(0)));
- writel(0xff, REG(CR_OFFSET(32)));
- writel(0xffffffff, REG(SEC_CR_OFFSET));
-
- ar7_irq_base = base;
-
- for (i = 0; i < 40; i++) {
- writel(i, REG(CHNL_OFFSET(i)));
- /* Primary IRQ's */
- irq_set_chip_and_handler(base + i, &ar7_irq_type,
- handle_level_irq);
- /* Secondary IRQ's */
- if (i < 32)
- irq_set_chip_and_handler(base + i + 40,
- &ar7_sec_irq_type,
- handle_level_irq);
- }
-
- if (request_irq(2, no_action, IRQF_NO_THREAD, "AR7 cascade interrupt",
- NULL))
- pr_err("Failed to request irq 2 (AR7 cascade interrupt)\n");
- if (request_irq(ar7_irq_base, no_action, IRQF_NO_THREAD,
- "AR7 cascade interrupt", NULL)) {
- pr_err("Failed to request irq %d (AR7 cascade interrupt)\n",
- ar7_irq_base);
- }
- set_c0_status(IE_IRQ0);
-}
-
-void __init arch_init_irq(void)
-{
- mips_cpu_irq_init();
- ar7_irq_init(8);
-}
-
-static void ar7_cascade(void)
-{
- u32 status;
- int i, irq;
-
- /* Primary IRQ's */
- irq = readl(REG(PIR_OFFSET)) & 0x3f;
- if (irq) {
- do_IRQ(ar7_irq_base + irq);
- return;
- }
-
- /* Secondary IRQ's are cascaded through primary '0' */
- writel(1, REG(CR_OFFSET(irq)));
- status = readl(REG(SEC_SR_OFFSET));
- for (i = 0; i < 32; i++) {
- if (status & 1) {
- do_IRQ(ar7_irq_base + i + 40);
- return;
- }
- status >>= 1;
- }
-
- spurious_interrupt();
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
- if (pending & STATUSF_IP7) /* cpu timer */
- do_IRQ(7);
- else if (pending & STATUSF_IP2) /* int0 hardware line */
- ar7_cascade();
- else
- spurious_interrupt();
-}
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
deleted file mode 100644
index ce8024c1a54e..000000000000
--- a/arch/mips/ar7/memory.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- */
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/pfn.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <linux/swap.h>
-
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/sections.h>
-
-#include <asm/mach-ar7/ar7.h>
-
-static int __init memsize(void)
-{
- u32 size = (64 << 20);
- u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4);
- u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end));
- u32 *tmpaddr = addr;
-
- while (tmpaddr > kernel_end) {
- *tmpaddr = (u32)tmpaddr;
- size >>= 1;
- tmpaddr -= size >> 2;
- }
-
- do {
- tmpaddr += size >> 2;
- if (*tmpaddr != (u32)tmpaddr)
- break;
- size <<= 1;
- } while (size < (64 << 20));
-
- writel((u32)tmpaddr, &addr);
-
- return size;
-}
-
-void __init prom_meminit(void)
-{
- unsigned long pages;
-
- pages = memsize() >> PAGE_SHIFT;
- memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
-}
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
deleted file mode 100644
index 215149a85d83..000000000000
--- a/arch/mips/ar7/platform.c
+++ /dev/null
@@ -1,722 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/vlynq.h>
-#include <linux/leds.h>
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-/*****************************************************************************
- * VLYNQ Bus
- ****************************************************************************/
-struct plat_vlynq_data {
- struct plat_vlynq_ops ops;
- int gpio_bit;
- int reset_bit;
-};
-
-static int vlynq_on(struct vlynq_device *dev)
-{
- int ret;
- struct plat_vlynq_data *pdata = dev->dev.platform_data;
-
- ret = gpio_request(pdata->gpio_bit, "vlynq");
- if (ret)
- goto out;
-
- ar7_device_reset(pdata->reset_bit);
-
- ret = ar7_gpio_disable(pdata->gpio_bit);
- if (ret)
- goto out_enabled;
-
- ret = ar7_gpio_enable(pdata->gpio_bit);
- if (ret)
- goto out_enabled;
-
- ret = gpio_direction_output(pdata->gpio_bit, 0);
- if (ret)
- goto out_gpio_enabled;
-
- msleep(50);
-
- gpio_set_value(pdata->gpio_bit, 1);
-
- msleep(50);
-
- return 0;
-
-out_gpio_enabled:
- ar7_gpio_disable(pdata->gpio_bit);
-out_enabled:
- ar7_device_disable(pdata->reset_bit);
- gpio_free(pdata->gpio_bit);
-out:
- return ret;
-}
-
-static void vlynq_off(struct vlynq_device *dev)
-{
- struct plat_vlynq_data *pdata = dev->dev.platform_data;
-
- ar7_gpio_disable(pdata->gpio_bit);
- gpio_free(pdata->gpio_bit);
- ar7_device_disable(pdata->reset_bit);
-}
-
-static struct resource vlynq_low_res[] = {
- {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = AR7_REGS_VLYNQ0,
- .end = AR7_REGS_VLYNQ0 + 0xff,
- },
- {
- .name = "irq",
- .flags = IORESOURCE_IRQ,
- .start = 29,
- .end = 29,
- },
- {
- .name = "mem",
- .flags = IORESOURCE_MEM,
- .start = 0x04000000,
- .end = 0x04ffffff,
- },
- {
- .name = "devirq",
- .flags = IORESOURCE_IRQ,
- .start = 80,
- .end = 111,
- },
-};
-
-static struct resource vlynq_high_res[] = {
- {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = AR7_REGS_VLYNQ1,
- .end = AR7_REGS_VLYNQ1 + 0xff,
- },
- {
- .name = "irq",
- .flags = IORESOURCE_IRQ,
- .start = 33,
- .end = 33,
- },
- {
- .name = "mem",
- .flags = IORESOURCE_MEM,
- .start = 0x0c000000,
- .end = 0x0cffffff,
- },
- {
- .name = "devirq",
- .flags = IORESOURCE_IRQ,
- .start = 112,
- .end = 143,
- },
-};
-
-static struct plat_vlynq_data vlynq_low_data = {
- .ops = {
- .on = vlynq_on,
- .off = vlynq_off,
- },
- .reset_bit = 20,
- .gpio_bit = 18,
-};
-
-static struct plat_vlynq_data vlynq_high_data = {
- .ops = {
- .on = vlynq_on,
- .off = vlynq_off,
- },
- .reset_bit = 16,
- .gpio_bit = 19,
-};
-
-static struct platform_device vlynq_low = {
- .id = 0,
- .name = "vlynq",
- .dev = {
- .platform_data = &vlynq_low_data,
- },
- .resource = vlynq_low_res,
- .num_resources = ARRAY_SIZE(vlynq_low_res),
-};
-
-static struct platform_device vlynq_high = {
- .id = 1,
- .name = "vlynq",
- .dev = {
- .platform_data = &vlynq_high_data,
- },
- .resource = vlynq_high_res,
- .num_resources = ARRAY_SIZE(vlynq_high_res),
-};
-
-/*****************************************************************************
- * Flash
- ****************************************************************************/
-static struct resource physmap_flash_resource = {
- .name = "mem",
- .flags = IORESOURCE_MEM,
- .start = 0x10000000,
- .end = 0x107fffff,
-};
-
-static const char *ar7_probe_types[] = { "ar7part", NULL };
-
-static struct physmap_flash_data physmap_flash_data = {
- .width = 2,
- .part_probe_types = ar7_probe_types,
-};
-
-static struct platform_device physmap_flash = {
- .name = "physmap-flash",
- .dev = {
- .platform_data = &physmap_flash_data,
- },
- .resource = &physmap_flash_resource,
- .num_resources = 1,
-};
-
-/*****************************************************************************
- * Ethernet
- ****************************************************************************/
-static struct resource cpmac_low_res[] = {
- {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = AR7_REGS_MAC0,
- .end = AR7_REGS_MAC0 + 0x7ff,
- },
- {
- .name = "irq",
- .flags = IORESOURCE_IRQ,
- .start = 27,
- .end = 27,
- },
-};
-
-static struct resource cpmac_high_res[] = {
- {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = AR7_REGS_MAC1,
- .end = AR7_REGS_MAC1 + 0x7ff,
- },
- {
- .name = "irq",
- .flags = IORESOURCE_IRQ,
- .start = 41,
- .end = 41,
- },
-};
-
-static struct fixed_phy_status fixed_phy_status __initdata = {
- .link = 1,
- .speed = 100,
- .duplex = 1,
-};
-
-static struct plat_cpmac_data cpmac_low_data = {
- .reset_bit = 17,
- .power_bit = 20,
- .phy_mask = 0x80000000,
-};
-
-static struct plat_cpmac_data cpmac_high_data = {
- .reset_bit = 21,
- .power_bit = 22,
- .phy_mask = 0x7fffffff,
-};
-
-static u64 cpmac_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device cpmac_low = {
- .id = 0,
- .name = "cpmac",
- .dev = {
- .dma_mask = &cpmac_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &cpmac_low_data,
- },
- .resource = cpmac_low_res,
- .num_resources = ARRAY_SIZE(cpmac_low_res),
-};
-
-static struct platform_device cpmac_high = {
- .id = 1,
- .name = "cpmac",
- .dev = {
- .dma_mask = &cpmac_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &cpmac_high_data,
- },
- .resource = cpmac_high_res,
- .num_resources = ARRAY_SIZE(cpmac_high_res),
-};
-
-static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
-{
- char name[5], *mac;
-
- sprintf(name, "mac%c", 'a' + instance);
- mac = prom_getenv(name);
- if (!mac && instance) {
- sprintf(name, "mac%c", 'a');
- mac = prom_getenv(name);
- }
-
- if (mac) {
- if (!mac_pton(mac, dev_addr)) {
- pr_warn("cannot parse mac address, using random address\n");
- eth_random_addr(dev_addr);
- }
- } else
- eth_random_addr(dev_addr);
-}
-
-/*****************************************************************************
- * USB
- ****************************************************************************/
-static struct resource usb_res[] = {
- {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = AR7_REGS_USB,
- .end = AR7_REGS_USB + 0xff,
- },
- {
- .name = "irq",
- .flags = IORESOURCE_IRQ,
- .start = 32,
- .end = 32,
- },
- {
- .name = "mem",
- .flags = IORESOURCE_MEM,
- .start = 0x03400000,
- .end = 0x03401fff,
- },
-};
-
-static struct platform_device ar7_udc = {
- .name = "ar7_udc",
- .resource = usb_res,
- .num_resources = ARRAY_SIZE(usb_res),
-};
-
-/*****************************************************************************
- * LEDs
- ****************************************************************************/
-static const struct gpio_led default_leds[] = {
- {
- .name = "status",
- .gpio = 8,
- .active_low = 1,
- },
-};
-
-static const struct gpio_led titan_leds[] = {
- { .name = "status", .gpio = 8, .active_low = 1, },
- { .name = "wifi", .gpio = 13, .active_low = 1, },
-};
-
-static const struct gpio_led dsl502t_leds[] = {
- {
- .name = "status",
- .gpio = 9,
- .active_low = 1,
- },
- {
- .name = "ethernet",
- .gpio = 7,
- .active_low = 1,
- },
- {
- .name = "usb",
- .gpio = 12,
- .active_low = 1,
- },
-};
-
-static const struct gpio_led dg834g_leds[] = {
- {
- .name = "ppp",
- .gpio = 6,
- .active_low = 1,
- },
- {
- .name = "status",
- .gpio = 7,
- .active_low = 1,
- },
- {
- .name = "adsl",
- .gpio = 8,
- .active_low = 1,
- },
- {
- .name = "wifi",
- .gpio = 12,
- .active_low = 1,
- },
- {
- .name = "power",
- .gpio = 14,
- .active_low = 1,
- .default_trigger = "default-on",
- },
-};
-
-static const struct gpio_led fb_sl_leds[] = {
- {
- .name = "1",
- .gpio = 7,
- },
- {
- .name = "2",
- .gpio = 13,
- .active_low = 1,
- },
- {
- .name = "3",
- .gpio = 10,
- .active_low = 1,
- },
- {
- .name = "4",
- .gpio = 12,
- .active_low = 1,
- },
- {
- .name = "5",
- .gpio = 9,
- .active_low = 1,
- },
-};
-
-static const struct gpio_led fb_fon_leds[] = {
- {
- .name = "1",
- .gpio = 8,
- },
- {
- .name = "2",
- .gpio = 3,
- .active_low = 1,
- },
- {
- .name = "3",
- .gpio = 5,
- },
- {
- .name = "4",
- .gpio = 4,
- .active_low = 1,
- },
- {
- .name = "5",
- .gpio = 11,
- .active_low = 1,
- },
-};
-
-static const struct gpio_led gt701_leds[] = {
- {
- .name = "inet:green",
- .gpio = 13,
- .active_low = 1,
- },
- {
- .name = "usb",
- .gpio = 12,
- .active_low = 1,
- },
- {
- .name = "inet:red",
- .gpio = 9,
- .active_low = 1,
- },
- {
- .name = "power:red",
- .gpio = 7,
- .active_low = 1,
- },
- {
- .name = "power:green",
- .gpio = 8,
- .active_low = 1,
- .default_trigger = "default-on",
- },
- {
- .name = "ethernet",
- .gpio = 10,
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data ar7_led_data;
-
-static struct platform_device ar7_gpio_leds = {
- .name = "leds-gpio",
- .dev = {
- .platform_data = &ar7_led_data,
- }
-};
-
-static void __init detect_leds(void)
-{
- char *prid, *usb_prod;
-
- /* Default LEDs */
- ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
- ar7_led_data.leds = default_leds;
-
- /* FIXME: the whole thing is unreliable */
- prid = prom_getenv("ProductID");
- usb_prod = prom_getenv("usb_prod");
-
- /* If we can't get the product id from PROM, use the default LEDs */
- if (!prid)
- return;
-
- if (strstr(prid, "Fritz_Box_FON")) {
- ar7_led_data.num_leds = ARRAY_SIZE(fb_fon_leds);
- ar7_led_data.leds = fb_fon_leds;
- } else if (strstr(prid, "Fritz_Box_")) {
- ar7_led_data.num_leds = ARRAY_SIZE(fb_sl_leds);
- ar7_led_data.leds = fb_sl_leds;
- } else if ((!strcmp(prid, "AR7RD") || !strcmp(prid, "AR7DB"))
- && usb_prod != NULL && strstr(usb_prod, "DSL-502T")) {
- ar7_led_data.num_leds = ARRAY_SIZE(dsl502t_leds);
- ar7_led_data.leds = dsl502t_leds;
- } else if (strstr(prid, "DG834")) {
- ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
- ar7_led_data.leds = dg834g_leds;
- } else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
- ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
- ar7_led_data.leds = titan_leds;
- } else if (strstr(prid, "GT701")) {
- ar7_led_data.num_leds = ARRAY_SIZE(gt701_leds);
- ar7_led_data.leds = gt701_leds;
- }
-}
-
-/*****************************************************************************
- * Watchdog
- ****************************************************************************/
-static struct resource ar7_wdt_res = {
- .name = "regs",
- .flags = IORESOURCE_MEM,
- .start = -1, /* Filled at runtime */
- .end = -1, /* Filled at runtime */
-};
-
-static struct platform_device ar7_wdt = {
- .name = "ar7_wdt",
- .resource = &ar7_wdt_res,
- .num_resources = 1,
-};
-
-/*****************************************************************************
- * Init
- ****************************************************************************/
-static int __init ar7_register_uarts(void)
-{
-#ifdef CONFIG_SERIAL_8250
- static struct uart_port uart_port __initdata;
- struct clk *bus_clk;
- int res;
-
- memset(&uart_port, 0, sizeof(struct uart_port));
-
- bus_clk = clk_get(NULL, "bus");
- if (IS_ERR(bus_clk))
- panic("unable to get bus clk");
-
- uart_port.type = PORT_AR7;
- uart_port.uartclk = clk_get_rate(bus_clk) / 2;
- uart_port.iotype = UPIO_MEM32;
- uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
- uart_port.regshift = 2;
-
- uart_port.line = 0;
- uart_port.irq = AR7_IRQ_UART0;
- uart_port.mapbase = AR7_REGS_UART0;
- uart_port.membase = ioremap(uart_port.mapbase, 256);
-
- res = early_serial_setup(&uart_port);
- if (res)
- return res;
-
- /* Only TNETD73xx have a second serial port */
- if (ar7_has_second_uart()) {
- uart_port.line = 1;
- uart_port.irq = AR7_IRQ_UART1;
- uart_port.mapbase = UR8_REGS_UART1;
- uart_port.membase = ioremap(uart_port.mapbase, 256);
-
- res = early_serial_setup(&uart_port);
- if (res)
- return res;
- }
-#endif
-
- return 0;
-}
-
-static void __init titan_fixup_devices(void)
-{
- /* Set vlynq0 data */
- vlynq_low_data.reset_bit = 15;
- vlynq_low_data.gpio_bit = 14;
-
- /* Set vlynq1 data */
- vlynq_high_data.reset_bit = 16;
- vlynq_high_data.gpio_bit = 7;
-
- /* Set vlynq0 resources */
- vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
- vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
- vlynq_low_res[1].start = 33;
- vlynq_low_res[1].end = 33;
- vlynq_low_res[2].start = 0x0c000000;
- vlynq_low_res[2].end = 0x0fffffff;
- vlynq_low_res[3].start = 80;
- vlynq_low_res[3].end = 111;
-
- /* Set vlynq1 resources */
- vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
- vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
- vlynq_high_res[1].start = 34;
- vlynq_high_res[1].end = 34;
- vlynq_high_res[2].start = 0x40000000;
- vlynq_high_res[2].end = 0x43ffffff;
- vlynq_high_res[3].start = 112;
- vlynq_high_res[3].end = 143;
-
- /* Set cpmac0 data */
- cpmac_low_data.phy_mask = 0x40000000;
-
- /* Set cpmac1 data */
- cpmac_high_data.phy_mask = 0x80000000;
-
- /* Set cpmac0 resources */
- cpmac_low_res[0].start = TITAN_REGS_MAC0;
- cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
-
- /* Set cpmac1 resources */
- cpmac_high_res[0].start = TITAN_REGS_MAC1;
- cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
-}
-
-static int __init ar7_register_devices(void)
-{
- void __iomem *bootcr;
- u32 val;
- int res;
-
- res = ar7_gpio_init();
- if (res)
- pr_warn("unable to register gpios: %d\n", res);
-
- res = ar7_register_uarts();
- if (res)
- pr_err("unable to setup uart(s): %d\n", res);
-
- res = platform_device_register(&physmap_flash);
- if (res)
- pr_warn("unable to register physmap-flash: %d\n", res);
-
- if (ar7_is_titan())
- titan_fixup_devices();
-
- ar7_device_disable(vlynq_low_data.reset_bit);
- res = platform_device_register(&vlynq_low);
- if (res)
- pr_warn("unable to register vlynq-low: %d\n", res);
-
- if (ar7_has_high_vlynq()) {
- ar7_device_disable(vlynq_high_data.reset_bit);
- res = platform_device_register(&vlynq_high);
- if (res)
- pr_warn("unable to register vlynq-high: %d\n", res);
- }
-
- if (ar7_has_high_cpmac()) {
- res = fixed_phy_add(PHY_POLL, cpmac_high.id,
- &fixed_phy_status);
- if (!res) {
- cpmac_get_mac(1, cpmac_high_data.dev_addr);
-
- res = platform_device_register(&cpmac_high);
- if (res)
- pr_warn("unable to register cpmac-high: %d\n",
- res);
- } else
- pr_warn("unable to add cpmac-high phy: %d\n", res);
- } else
- cpmac_low_data.phy_mask = 0xffffffff;
-
- res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
- if (!res) {
- cpmac_get_mac(0, cpmac_low_data.dev_addr);
- res = platform_device_register(&cpmac_low);
- if (res)
- pr_warn("unable to register cpmac-low: %d\n", res);
- } else
- pr_warn("unable to add cpmac-low phy: %d\n", res);
-
- detect_leds();
- res = platform_device_register(&ar7_gpio_leds);
- if (res)
- pr_warn("unable to register leds: %d\n", res);
-
- res = platform_device_register(&ar7_udc);
- if (res)
- pr_warn("unable to register usb slave: %d\n", res);
-
- /* Register watchdog only if enabled in hardware */
- bootcr = ioremap(AR7_REGS_DCL, 4);
- val = readl(bootcr);
- iounmap(bootcr);
- if (val & AR7_WDT_HW_ENA) {
- if (ar7_has_high_vlynq())
- ar7_wdt_res.start = UR8_REGS_WDT;
- else
- ar7_wdt_res.start = AR7_REGS_WDT;
-
- ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
- res = platform_device_register(&ar7_wdt);
- if (res)
- pr_warn("unable to register watchdog: %d\n", res);
- }
-
- return 0;
-}
-device_initcall(ar7_register_devices);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
deleted file mode 100644
index 5810d3993fc6..000000000000
--- a/arch/mips/ar7/prom.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * Putting things on the screen/serial line using YAMONs facilities.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <asm/bootinfo.h>
-#include <asm/setup.h>
-
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-#define MAX_ENTRY 80
-
-struct env_var {
- char *name;
- char *value;
-};
-
-static struct env_var adam2_env[MAX_ENTRY];
-
-char *prom_getenv(const char *name)
-{
- int i;
-
- for (i = 0; (i < MAX_ENTRY) && adam2_env[i].name; i++)
- if (!strcmp(name, adam2_env[i].name))
- return adam2_env[i].value;
-
- return NULL;
-}
-EXPORT_SYMBOL(prom_getenv);
-
-static void __init ar7_init_cmdline(int argc, char *argv[])
-{
- int i;
-
- for (i = 1; i < argc; i++) {
- strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
- if (i < (argc - 1))
- strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
- }
-}
-
-struct psbl_rec {
- u32 psbl_size;
- u32 env_base;
- u32 env_size;
- u32 ffs_base;
- u32 ffs_size;
-};
-
-static const char psp_env_version[] __initconst = "TIENV0.8";
-
-struct psp_env_chunk {
- u8 num;
- u8 ctrl;
- u16 csum;
- u8 len;
- char data[11];
-} __packed;
-
-struct psp_var_map_entry {
- u8 num;
- char *value;
-};
-
-static const struct psp_var_map_entry psp_var_map[] = {
- { 1, "cpufrequency" },
- { 2, "memsize" },
- { 3, "flashsize" },
- { 4, "modetty0" },
- { 5, "modetty1" },
- { 8, "maca" },
- { 9, "macb" },
- { 28, "sysfrequency" },
- { 38, "mipsfrequency" },
-};
-
-/*
-
-Well-known variable (num is looked up in table above for matching variable name)
-Example: cpufrequency=211968000
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 01 |CTRL|CHECKSUM | 01 | _2 | _1 | _1 | _9 | _6 | _8 | _0 | _0 | _0 | \0 | FF
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Name=Value pair in a single chunk
-Example: NAME=VALUE
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 00 |CTRL|CHECKSUM | 01 | _N | _A | _M | _E | _0 | _V | _A | _L | _U | _E | \0
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Name=Value pair in 2 chunks (len is the number of chunks)
-Example: bootloaderVersion=1.3.7.15
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 00 |CTRL|CHECKSUM | 02 | _b | _o | _o | _t | _l | _o | _a | _d | _e | _r | _V
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| _e | _r | _s | _i | _o | _n | \0 | _1 | _. | _3 | _. | _7 | _. | _1 | _5 | \0
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Data is padded with 0xFF
-
-*/
-
-#define PSP_ENV_SIZE 4096
-
-static char psp_env_data[PSP_ENV_SIZE] = { 0, };
-
-static char * __init lookup_psp_var_map(u8 num)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(psp_var_map); i++)
- if (psp_var_map[i].num == num)
- return psp_var_map[i].value;
-
- return NULL;
-}
-
-static void __init add_adam2_var(char *name, char *value)
-{
- int i;
-
- for (i = 0; i < MAX_ENTRY; i++) {
- if (!adam2_env[i].name) {
- adam2_env[i].name = name;
- adam2_env[i].value = value;
- return;
- } else if (!strcmp(adam2_env[i].name, name)) {
- adam2_env[i].value = value;
- return;
- }
- }
-}
-
-static int __init parse_psp_env(void *psp_env_base)
-{
- int i, n;
- char *name, *value;
- struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data;
-
- memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE);
-
- i = 1;
- n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk);
- while (i < n) {
- if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n))
- break;
- value = chunks[i].data;
- if (chunks[i].num) {
- name = lookup_psp_var_map(chunks[i].num);
- } else {
- name = value;
- value += strlen(name) + 1;
- }
- if (name)
- add_adam2_var(name, value);
- i += chunks[i].len;
- }
- return 0;
-}
-
-static void __init ar7_init_env(struct env_var *env)
-{
- int i;
- struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x14000300));
- void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
-
- if (strcmp(psp_env, psp_env_version) == 0) {
- parse_psp_env(psp_env);
- } else {
- for (i = 0; i < MAX_ENTRY; i++, env++)
- if (env->name)
- add_adam2_var(env->name, env->value);
- }
-}
-
-static void __init console_config(void)
-{
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- char console_string[40];
- int baud = 0;
- char parity = '\0', bits = '\0', flow = '\0';
- char *s, *p;
-
- if (strstr(arcs_cmdline, "console="))
- return;
-
- s = prom_getenv("modetty0");
- if (s) {
- baud = simple_strtoul(s, &p, 10);
- s = p;
- if (*s == ',')
- s++;
- if (*s)
- parity = *s++;
- if (*s == ',')
- s++;
- if (*s)
- bits = *s++;
- if (*s == ',')
- s++;
- if (*s == 'h')
- flow = 'r';
- }
-
- if (baud == 0)
- baud = 38400;
- if (parity != 'n' && parity != 'o' && parity != 'e')
- parity = 'n';
- if (bits != '7' && bits != '8')
- bits = '8';
-
- if (flow == 'r')
- sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
- parity, bits, flow);
- else
- sprintf(console_string, " console=ttyS0,%d%c%c", baud, parity,
- bits);
- strlcat(arcs_cmdline, console_string, COMMAND_LINE_SIZE);
-#endif
-}
-
-void __init prom_init(void)
-{
- ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
- ar7_init_env((struct env_var *)fw_arg2);
- console_config();
-}
-
-#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
-static inline unsigned int serial_in(int offset)
-{
- return readl((void *)PORT(offset));
-}
-
-static inline void serial_out(int offset, int value)
-{
- writel(value, (void *)PORT(offset));
-}
-
-void prom_putchar(char c)
-{
- while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
- ;
- serial_out(UART_TX, c);
-}
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
deleted file mode 100644
index 352d5dbc777c..000000000000
--- a/arch/mips/ar7/setup.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/pm.h>
-#include <linux/time.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-static void ar7_machine_restart(char *command)
-{
- u32 *softres_reg = ioremap(AR7_REGS_RESET + AR7_RESET_SOFTWARE, 1);
-
- writel(1, softres_reg);
-}
-
-static void ar7_machine_halt(void)
-{
- while (1)
- ;
-}
-
-static void ar7_machine_power_off(void)
-{
- u32 *power_reg = (u32 *)ioremap(AR7_REGS_POWER, 1);
- u32 power_state = readl(power_reg) | (3 << 30);
-
- writel(power_state, power_reg);
- ar7_machine_halt();
-}
-
-const char *get_system_type(void)
-{
- u16 chip_id = ar7_chip_id();
- u16 titan_variant_id = titan_chip_id();
-
- switch (chip_id) {
- case AR7_CHIP_7100:
- return "TI AR7 (TNETD7100)";
- case AR7_CHIP_7200:
- return "TI AR7 (TNETD7200)";
- case AR7_CHIP_7300:
- return "TI AR7 (TNETD7300)";
- case AR7_CHIP_TITAN:
- switch (titan_variant_id) {
- case TITAN_CHIP_1050:
- return "TI AR7 (TNETV1050)";
- case TITAN_CHIP_1055:
- return "TI AR7 (TNETV1055)";
- case TITAN_CHIP_1056:
- return "TI AR7 (TNETV1056)";
- case TITAN_CHIP_1060:
- return "TI AR7 (TNETV1060)";
- }
- fallthrough;
- default:
- return "TI AR7 (unknown)";
- }
-}
-
-static int __init ar7_init_console(void)
-{
- return 0;
-}
-console_initcall(ar7_init_console);
-
-/*
- * Initializes basic routines and structures pointers, memory size (as
- * given by the bios and saves the command line.
- */
-void __init plat_mem_setup(void)
-{
- unsigned long io_base;
-
- _machine_restart = ar7_machine_restart;
- _machine_halt = ar7_machine_halt;
- pm_power_off = ar7_machine_power_off;
-
- io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
- if (!io_base)
- panic("Can't remap IO base!");
- set_io_port_base(io_base);
-
- prom_meminit();
-
- printk(KERN_INFO "%s, ID: 0x%04x, Revision: 0x%02x\n",
- get_system_type(), ar7_chip_id(), ar7_chip_rev());
-}
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
deleted file mode 100644
index 72aa77d7087b..000000000000
--- a/arch/mips/ar7/time.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * Setting up the clock on the MIPS boards.
- */
-
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/mach-ar7/ar7.h>
-
-void __init plat_time_init(void)
-{
- struct clk *cpu_clk;
-
- /* Initialize ar7 clocks so the CPU clock frequency is correct */
- ar7_init_clocks();
-
- cpu_clk = clk_get(NULL, "cpu");
- if (IS_ERR(cpu_clk)) {
- printk(KERN_ERR "unable to get cpu clock\n");
- return;
- }
-
- mips_hpt_frequency = clk_get_rate(cpu_clk) / 2;
-}
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index 96d28f211121..09dcd2c561d9 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -13,11 +13,6 @@
#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
#endif
-#ifdef CONFIG_AR7
-#include <ar7.h>
-#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
-#endif
-
#ifdef CONFIG_MACH_INGENIC
#define INGENIC_UART_BASE_ADDR (0x10030000 + 0x1000 * CONFIG_ZBOOT_INGENIC_UART)
#define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))
diff --git a/arch/mips/boot/dts/ingenic/jz4725b.dtsi b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
index acbbe8c4664c..c5c5a094c37d 100644
--- a/arch/mips/boot/dts/ingenic/jz4725b.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
@@ -366,7 +366,6 @@
rom: memory@1fc00000 {
compatible = "mtd-rom";
- probe-type = "map_rom";
reg = <0x1fc00000 0x2000>;
bank-width = <4>;
diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi
index 9c0099919db7..504e895e916e 100644
--- a/arch/mips/boot/dts/ingenic/jz4770.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi
@@ -461,7 +461,6 @@
rom: memory@1fc00000 {
compatible = "mtd-rom";
- probe-type = "map_rom";
reg = <0x1fc00000 0x2000>;
bank-width = <4>;
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
index 129b6710b699..f9c262cc2e96 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
@@ -8,7 +8,7 @@
/ {
compatible = "gnubee,gb-pc1", "mediatek,mt7621-soc";
- model = "GB-PC1";
+ model = "GnuBee GB-PC1";
memory@0 {
device_type = "memory";
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
index f810cd10f4f4..b281e13f22ed 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
@@ -8,7 +8,7 @@
/ {
compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc";
- model = "GB-PC2";
+ model = "GnuBee GB-PC2";
memory@0 {
device_type = "memory";
diff --git a/arch/mips/boot/dts/ralink/mt7621.dtsi b/arch/mips/boot/dts/ralink/mt7621.dtsi
index 7caed0d14f11..35a10258f235 100644
--- a/arch/mips/boot/dts/ralink/mt7621.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7621.dtsi
@@ -300,14 +300,13 @@
compatible = "mediatek,mt7621-eth";
reg = <0x1e100000 0x10000>;
- clocks = <&sysc MT7621_CLK_FE>,
- <&sysc MT7621_CLK_ETH>;
+ clocks = <&sysc MT7621_CLK_FE>, <&sysc MT7621_CLK_ETH>;
clock-names = "fe", "ethif";
#address-cells = <1>;
#size-cells = <0>;
- resets = <&sysc MT7621_RST_FE &sysc MT7621_RST_ETH>;
+ resets = <&sysc MT7621_RST_FE>, <&sysc MT7621_RST_ETH>;
reset-names = "fe", "eth";
interrupt-parent = <&gic>;
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
deleted file mode 100644
index 329c60aa570a..000000000000
--- a/arch/mips/configs/ar7_defconfig
+++ /dev/null
@@ -1,119 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_KERNEL_LZMA=y
-CONFIG_SYSVIPC=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_ELF_CORE is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_AR7=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC=y
-# CONFIG_SECCOMP is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_MROUTE=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_DIAG is not set
-CONFIG_TCP_CONG_ADVANCED=y
-# CONFIG_TCP_CONG_BIC is not set
-# CONFIG_TCP_CONG_CUBIC is not set
-CONFIG_TCP_CONG_WESTWOOD=y
-# CONFIG_TCP_CONG_HTCP is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_RAW=m
-CONFIG_ATM=m
-CONFIG_ATM_BR2684=m
-CONFIG_ATM_BR2684_IPFILTER=y
-CONFIG_BRIDGE=y
-CONFIG_VLAN_8021Q=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=y
-CONFIG_HAMRADIO=y
-CONFIG_CFG80211=m
-CONFIG_MAC80211=m
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_NETDEVICES=y
-CONFIG_CPMAC=y
-CONFIG_FIXED_PHY=y
-CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPPOATM=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_HW_RANDOM=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_AR7_WDT=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-# CONFIG_DNOTIFY is not set
-CONFIG_PROC_KCORE=y
-# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_SQUASHFS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 1843468f84a3..00329bb5de5a 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -177,7 +177,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index fdf374574105..65adb538030d 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -70,10 +70,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HWMON is not set
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_AUTOFS_FS=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 83d9a8ff4270..38f17b658421 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -229,9 +229,6 @@ CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_XFS_FS=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index ae1a7793e810..6f8046024557 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -317,11 +317,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index c07e30f63d8b..16a91eeff67f 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -323,11 +323,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 0a5701020d3f..264aba29ea4f 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -323,11 +323,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 5c5e2186210c..08e1c1f2f4de 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -310,10 +310,6 @@ CONFIG_USB_LD=m
CONFIG_USB_TEST=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_AUTOFS_FS=m
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
deleted file mode 100644
index 1e8621a6afa3..000000000000
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __AR7_H__
-#define __AR7_H__
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-
-#include <asm/addrspace.h>
-
-#define AR7_SDRAM_BASE 0x14000000
-
-#define AR7_REGS_BASE 0x08610000
-
-#define AR7_REGS_MAC0 (AR7_REGS_BASE + 0x0000)
-#define AR7_REGS_GPIO (AR7_REGS_BASE + 0x0900)
-/* 0x08610A00 - 0x08610BFF (512 bytes, 128 bytes / clock) */
-#define AR7_REGS_POWER (AR7_REGS_BASE + 0x0a00)
-#define AR7_REGS_CLOCKS (AR7_REGS_POWER + 0x80)
-#define UR8_REGS_CLOCKS (AR7_REGS_POWER + 0x20)
-#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
-#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
-#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
-#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
-#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
-#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
-#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
-#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
-#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
-#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
-
-#define AR7_REGS_WDT (AR7_REGS_BASE + 0x1f00)
-#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
-#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
-
-/* Titan registers */
-#define TITAN_REGS_ESWITCH_BASE (0x08640000)
-#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
-#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
-#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
-#define TITAN_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1c00)
-#define TITAN_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1300)
-
-#define AR7_RESET_PERIPHERAL 0x0
-#define AR7_RESET_SOFTWARE 0x4
-#define AR7_RESET_STATUS 0x8
-
-#define AR7_RESET_BIT_CPMAC_LO 17
-#define AR7_RESET_BIT_CPMAC_HI 21
-#define AR7_RESET_BIT_MDIO 22
-#define AR7_RESET_BIT_EPHY 26
-
-#define TITAN_RESET_BIT_EPHY1 28
-
-/* GPIO control registers */
-#define AR7_GPIO_INPUT 0x0
-#define AR7_GPIO_OUTPUT 0x4
-#define AR7_GPIO_DIR 0x8
-#define AR7_GPIO_ENABLE 0xc
-#define TITAN_GPIO_INPUT_0 0x0
-#define TITAN_GPIO_INPUT_1 0x4
-#define TITAN_GPIO_OUTPUT_0 0x8
-#define TITAN_GPIO_OUTPUT_1 0xc
-#define TITAN_GPIO_DIR_0 0x10
-#define TITAN_GPIO_DIR_1 0x14
-#define TITAN_GPIO_ENBL_0 0x18
-#define TITAN_GPIO_ENBL_1 0x1c
-
-#define AR7_CHIP_7100 0x18
-#define AR7_CHIP_7200 0x2b
-#define AR7_CHIP_7300 0x05
-#define AR7_CHIP_TITAN 0x07
-#define TITAN_CHIP_1050 0x0f
-#define TITAN_CHIP_1055 0x0e
-#define TITAN_CHIP_1056 0x0d
-#define TITAN_CHIP_1060 0x07
-
-/* Interrupts */
-#define AR7_IRQ_UART0 15
-#define AR7_IRQ_UART1 16
-
-/* Clocks */
-#define AR7_AFE_CLOCK 35328000
-#define AR7_REF_CLOCK 25000000
-#define AR7_XTAL_CLOCK 24000000
-
-/* DCL */
-#define AR7_WDT_HW_ENA 0x10
-
-struct plat_cpmac_data {
- int reset_bit;
- int power_bit;
- u32 phy_mask;
- char dev_addr[6];
-};
-
-struct plat_dsl_data {
- int reset_bit_dsl;
- int reset_bit_sar;
-};
-
-static inline int ar7_is_titan(void)
-{
- return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
- AR7_CHIP_TITAN;
-}
-
-static inline u16 ar7_chip_id(void)
-{
- return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
- KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
-}
-
-static inline u16 titan_chip_id(void)
-{
- unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
- TITAN_GPIO_INPUT_1));
- return ((val >> 12) & 0x0f);
-}
-
-static inline u8 ar7_chip_rev(void)
-{
- return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
- 0x14))) >> 16) & 0xff;
-}
-
-static inline int ar7_has_high_cpmac(void)
-{
- u16 chip_id = ar7_chip_id();
- switch (chip_id) {
- case AR7_CHIP_7100:
- case AR7_CHIP_7200:
- return 0;
- case AR7_CHIP_7300:
- return 1;
- default:
- return -ENXIO;
- }
-}
-#define ar7_has_high_vlynq ar7_has_high_cpmac
-#define ar7_has_second_uart ar7_has_high_cpmac
-
-static inline void ar7_device_enable(u32 bit)
-{
- void *reset_reg =
- (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
- writel(readl(reset_reg) | (1 << bit), reset_reg);
- msleep(20);
-}
-
-static inline void ar7_device_disable(u32 bit)
-{
- void *reset_reg =
- (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
- writel(readl(reset_reg) & ~(1 << bit), reset_reg);
- msleep(20);
-}
-
-static inline void ar7_device_reset(u32 bit)
-{
- ar7_device_disable(bit);
- ar7_device_enable(bit);
-}
-
-static inline void ar7_device_on(u32 bit)
-{
- void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
- writel(readl(power_reg) | (1 << bit), power_reg);
- msleep(20);
-}
-
-static inline void ar7_device_off(u32 bit)
-{
- void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
- writel(readl(power_reg) & ~(1 << bit), power_reg);
- msleep(20);
-}
-
-int __init ar7_gpio_init(void);
-void __init ar7_init_clocks(void);
-
-/* Board specific GPIO functions */
-int ar7_gpio_enable(unsigned gpio);
-int ar7_gpio_disable(unsigned gpio);
-
-#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
deleted file mode 100644
index 46bb730ea970..000000000000
--- a/arch/mips/include/asm/mach-ar7/irq.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Shamelessly copied from asm-mips/mach-emma2rh/
- * Copyright (C) 2003 by Ralf Baechle
- */
-#ifndef __ASM_AR7_IRQ_H
-#define __ASM_AR7_IRQ_H
-
-#define NR_IRQS 256
-
-#include <asm/mach-generic/irq.h>
-
-#endif /* __ASM_AR7_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ar7/prom.h b/arch/mips/include/asm/mach-ar7/prom.h
deleted file mode 100644
index 9e1d20b06f57..000000000000
--- a/arch/mips/include/asm/mach-ar7/prom.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Florian Fainelli <florian@openwrt.org>
- */
-
-#ifndef __PROM_H__
-#define __PROM_H__
-
-extern char *prom_getenv(const char *name);
-extern void prom_meminit(void);
-
-#endif /* __PROM_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
deleted file mode 100644
index a004d94dfbdd..000000000000
--- a/arch/mips/include/asm/mach-ar7/spaces.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
- * Copyright (C) 2000, 2002 Maciej W. Rozycki
- * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- */
-#ifndef _ASM_AR7_SPACES_H
-#define _ASM_AR7_SPACES_H
-
-/*
- * This handles the memory map.
- * We handle pages at KSEG0 for kernels with 32 bit address space.
- */
-#define PAGE_OFFSET _AC(0x94000000, UL)
-#define PHYS_OFFSET _AC(0x14000000, UL)
-
-#include <asm/mach-generic/spaces.h>
-
-#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-loongson32/dma.h b/arch/mips/include/asm/mach-loongson32/dma.h
deleted file mode 100644
index e917b3ccb2c2..000000000000
--- a/arch/mips/include/asm/mach-loongson32/dma.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * Loongson 1 NAND platform support.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_DMA_H
-#define __ASM_MACH_LOONGSON32_DMA_H
-
-#define LS1X_DMA_CHANNEL0 0
-#define LS1X_DMA_CHANNEL1 1
-#define LS1X_DMA_CHANNEL2 2
-
-struct plat_ls1x_dma {
- int nr_channels;
-};
-
-extern struct plat_ls1x_dma ls1b_dma_pdata;
-
-#endif /* __ASM_MACH_LOONGSON32_DMA_H */
diff --git a/arch/mips/include/asm/mach-loongson32/nand.h b/arch/mips/include/asm/mach-loongson32/nand.h
deleted file mode 100644
index aaf5ed19d78d..000000000000
--- a/arch/mips/include/asm/mach-loongson32/nand.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * Loongson 1 NAND platform support.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_NAND_H
-#define __ASM_MACH_LOONGSON32_NAND_H
-
-#include <linux/dmaengine.h>
-#include <linux/mtd/partitions.h>
-
-struct plat_ls1x_nand {
- struct mtd_partition *parts;
- unsigned int nr_parts;
-
- int hold_cycle;
- int wait_cycle;
-};
-
-extern struct plat_ls1x_nand ls1b_nand_pdata;
-
-bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
-
-#endif /* __ASM_MACH_LOONGSON32_NAND_H */
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index 2cdcfb5f6012..f74292b13bc3 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -8,9 +8,6 @@
#include <linux/platform_device.h>
-#include <dma.h>
-#include <nand.h>
-
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 04aab419a0fc..e318ea11c858 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
-#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/dma-mapping.h>
@@ -76,14 +75,6 @@ void __init plat_mem_setup(void)
_machine_restart = jazz_machine_restart;
-#ifdef CONFIG_VT
- screen_info = (struct screen_info) {
- .orig_video_cols = 160,
- .orig_video_lines = 64,
- .orig_video_points = 16,
- };
-#endif
-
add_preferred_console("ttyS", 0, "9600");
}
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index f5b2ef979b43..8f0a7263a9d6 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -66,7 +66,6 @@ copy_word:
LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
- b process_entry
done:
#ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 08321c945ac4..2d2ca024bd47 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/export.h>
-#include <linux/screen_info.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
@@ -54,10 +53,6 @@ struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
-#ifdef CONFIG_VT
-struct screen_info screen_info;
-#endif
-
/*
* Setup information
*
@@ -793,12 +788,6 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
check_bugs64_early();
-#if defined(CONFIG_VT)
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#endif
-#endif
-
arch_mem_init(cmdline_p);
dmi_setup();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 8075590a9f83..623eb4bc7b41 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -15,8 +15,6 @@
#include <platform.h>
#include <loongson1.h>
-#include <dma.h>
-#include <nand.h>
/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
index fed8d432ef20..fe115bdcb22c 100644
--- a/arch/mips/loongson32/ls1b/board.c
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -8,8 +8,6 @@
#include <linux/sizes.h>
#include <loongson1.h>
-#include <dma.h>
-#include <nand.h>
#include <platform.h>
static const struct gpio_led ls1x_gpio_leds[] __initconst = {
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 21cb3ac1237b..3a2836e9d856 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -161,7 +161,7 @@ static void __init pci_clock_check(void)
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
static void __init screen_info_setup(void)
{
- screen_info = (struct screen_info) {
+ static struct screen_info si = {
.orig_x = 0,
.orig_y = 25,
.ext_mem_k = 0,
@@ -175,6 +175,8 @@ static void __init screen_info_setup(void)
.orig_video_isVGA = VIDEO_TYPE_VGAC,
.orig_video_points = 16
};
+
+ vgacon_register_screen(&si);
}
#endif
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
index 105569c1b712..13009666204f 100644
--- a/arch/mips/pci/fixup-lantiq.c
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -4,8 +4,8 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
+#include <linux/pci.h>
int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 76683993cdd3..38c90b5e8754 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -13,6 +13,7 @@
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/initrd.h>
@@ -112,6 +113,19 @@ int update_persistent_clock64(struct timespec64 now)
}
}
+#ifdef CONFIG_VGA_CONSOLE
+static struct screen_info vgacon_screen_info = {
+ .orig_video_page = 52,
+ .orig_video_mode = 3,
+ .orig_video_cols = 80,
+ .flags = 12,
+ .orig_video_ega_bx = 3,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 0x22,
+ .orig_video_points = 16,
+};
+#endif
+
void __init plat_mem_setup(void)
{
#ifdef CONFIG_SIBYTE_BCM1x80
@@ -129,17 +143,8 @@ void __init plat_mem_setup(void)
if (m41t81_probe())
swarm_rtc_type = RTC_M41T81;
-#ifdef CONFIG_VT
- screen_info = (struct screen_info) {
- .orig_video_page = 52,
- .orig_video_mode = 3,
- .orig_video_cols = 80,
- .flags = 12,
- .orig_video_ega_bx = 3,
- .orig_video_lines = 25,
- .orig_video_isVGA = 0x22,
- .orig_video_points = 16,
- };
+#ifdef CONFIG_VGA_CONSOLE
+ vgacon_register_screen(&vgacon_screen_info);
/* XXXKW for CFE, get lines/cols from environment */
#endif
}
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index efad85c8c823..42fdb939c88d 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -38,19 +38,21 @@ extern void sni_machine_power_off(void);
static void __init sni_display_setup(void)
{
-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
- struct screen_info *si = &screen_info;
+#if defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
+ static struct screen_info si;
DISPLAY_STATUS *di;
di = ArcGetDisplayStatus(1);
if (di) {
- si->orig_x = di->CursorXPosition;
- si->orig_y = di->CursorYPosition;
- si->orig_video_cols = di->CursorMaxXPosition;
- si->orig_video_lines = di->CursorMaxYPosition;
- si->orig_video_isVGA = VIDEO_TYPE_VGAC;
- si->orig_video_points = 16;
+ si.orig_x = di->CursorXPosition;
+ si.orig_y = di->CursorYPosition;
+ si.orig_video_cols = di->CursorMaxXPosition;
+ si.orig_video_lines = di->CursorMaxYPosition;
+ si.orig_video_isVGA = VIDEO_TYPE_VGAC;
+ si.orig_video_points = 16;
+
+ vgacon_register_screen(&si);
}
#endif
}
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 8582ed965844..da122a5fa43b 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -19,7 +19,6 @@
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
-#include <linux/screen_info.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
@@ -36,10 +35,6 @@ static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0};
-#ifdef CONFIG_VT
-struct screen_info screen_info;
-#endif
-
/* Copy a short hook instruction sequence to the exception address */
static inline void copy_exception_handler(unsigned int addr)
{
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 968ebe17494c..920db57b6b4c 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -177,12 +177,8 @@ vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h
endif
-PHONY += vdso_install
-
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@
- $(if $(CONFIG_COMPAT_VDSO), \
- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@)
+vdso-install-y += arch/parisc/kernel/vdso32/vdso32.so
+vdso-install-$(CONFIG_64BIT) += arch/parisc/kernel/vdso64/vdso64.so
install: KBUILD_IMAGE := vmlinux
zinstall: KBUILD_IMAGE := vmlinuz
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index f6ded7147b4d..19a804860ed5 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -248,7 +248,6 @@ CONFIG_UIO_AEC=m
CONFIG_UIO_SERCOS3=m
CONFIG_UIO_PCI_GENERIC=m
CONFIG_STAGING=y
-CONFIG_QLGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d5d5388973ac..6f105ee4f3cf 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -237,6 +237,7 @@ config PPC
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2a54fadbeaf5..ea4033abc07d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -82,6 +82,18 @@ config MSI_BITMAP_SELFTEST
bool "Run self-tests of the MSI bitmap code"
depends on DEBUG_KERNEL
+config GUEST_STATE_BUFFER_TEST
+ def_tristate n
+ prompt "Enable Guest State Buffer unit tests"
+ depends on KUNIT
+ depends on KVM_BOOK3S_HV_POSSIBLE
+ default KUNIT_ALL_TESTS
+ help
+ The Guest State Buffer is a data format specified in the PAPR.
+ It is by hcalls to communicate the state of L2 guests between
+ the L1 and L0 hypervisors. Enable unit tests for the library
+ used to create and use guest state buffers.
+
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
depends on PPC64
@@ -147,6 +159,8 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
depends on PPC_BOOK3S
+ select FONT_SUN8x16
+ select FONT_SUPPORT
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 1f860b3c9bec..ae5a4256b03d 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -35,9 +35,6 @@ ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
endif
-%.ko: FORCE
- @true
-
clean:
rm -f .tmp_symbols.txt
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 461902c8a46d..101fcb397a0f 100755
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -21,13 +21,17 @@ set -e
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
-if [ -f $4/$image_name ]; then
- mv $4/$image_name $4/$image_name.old
+
+echo "Warning: '${INSTALLKERNEL}' command not available... Copying" \
+ "directly to $4/$image_name-$1" >&2
+
+if [ -f $4/$image_name-$1 ]; then
+ mv $4/$image_name-$1 $4/$image_name-$1.old
fi
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System-$1.old
fi
-cat $2 > $4/$image_name
-cp $3 $4/System.map
+cat $2 > $4/$image_name-$1
+cp $3 $4/System.map-$1
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 51499ee6366b..2479ab62d12f 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -78,7 +78,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/debug.config b/arch/powerpc/configs/debug.config
index a14ae1f20d60..bcc1fcf25e10 100644
--- a/arch/powerpc/configs/debug.config
+++ b/arch/powerpc/configs/debug.config
@@ -1 +1,5 @@
+CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG=y
+CONFIG_PPC_IRQ_SOFT_MASK_DEBUG=y
+CONFIG_PPC_KUAP_DEBUG=y
+CONFIG_PPC_RFI_SRR_DEBUG=y
CONFIG_SCOM_DEBUGFS=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 71d9d112c0b6..9215bed53291 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -202,10 +202,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_FS_DAX=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index a205da9ee5f2..57ded82c2840 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -138,7 +138,6 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_ADB=y
-CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
CONFIG_ADB_PMU_LED=y
CONFIG_ADB_PMU_LED_DISK=y
@@ -181,6 +180,7 @@ CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
CONFIG_NVRAM=y
CONFIG_I2C_CHARDEV=m
+CONFIG_POWER_RESET=y
CONFIG_APM_POWER=y
CONFIG_BATTERY_PMU=y
CONFIG_HWMON=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 624c371ffcc3..4c05f4e4d505 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -175,10 +175,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=y
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index eaf3273372a9..f279703425d4 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -954,11 +954,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_JBD2_DEBUG=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 9b13eb14e21b..52971ee30717 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -20,7 +20,7 @@
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_READ 0x004 /* software: read access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
@@ -28,7 +28,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_WRITE 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT
@@ -42,26 +42,13 @@
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
-/* We borrow the _PAGE_USER bit to store the exclusive marker in swap PTEs. */
-#define _PAGE_SWP_EXCLUSIVE _PAGE_USER
+/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_READ
/* And here we include common definitions */
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
-#ifndef __ASSEMBLY__
-
-static inline bool pte_user(pte_t pte)
-{
- return pte_val(pte) & _PAGE_USER;
-}
-#endif /* __ASSEMBLY__ */
-
/*
* Location of the PFN in the PTE. Most 32-bit platforms use the same
* as _PAGE_SHIFT here (ie, naturally aligned).
@@ -97,20 +84,7 @@ static inline bool pte_user(pte_t pte)
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
-/*
- * Permission masks used to generate the __P and __S table.
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- *
- * Write permissions imply read permissions for now.
- */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
@@ -170,7 +144,14 @@ void unmap_kernel_page(unsigned long va);
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
-#include <asm/fixmap.h>
+
+#define FIXADDR_SIZE 0
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
+#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
@@ -224,9 +205,6 @@ void unmap_kernel_page(unsigned long va);
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
- (unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
@@ -343,7 +321,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -402,8 +380,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
}
/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
-static inline int pte_read(pte_t pte) { return 1; }
+static inline bool pte_read(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_READ);
+}
+
+static inline bool pte_write(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_WRITE);
+}
+
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
@@ -438,10 +424,10 @@ static inline bool pte_ci(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
- * A read-only access is controlled by _PAGE_USER bit.
- * We have _PAGE_READ set for WRITE and EXECUTE
+ * A read-only access is controlled by _PAGE_READ bit.
+ * We have _PAGE_READ set for WRITE
*/
- if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ if (!pte_present(pte) || !pte_read(pte))
return false;
if (write && !pte_write(pte))
@@ -465,7 +451,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_RW);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
static inline pte_t pte_exprotect(pte_t pte)
@@ -495,6 +481,9 @@ static inline pte_t pte_mkpte(pte_t pte)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
+ /*
+ * write implies read, hence set both
+ */
return __pte(pte_val(pte) | _PAGE_RW);
}
@@ -518,16 +507,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_USER);
-}
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_USER);
-}
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index 4be572908124..e43534da5207 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -80,7 +80,7 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
unsigned long vmaddr, int psize)
{
- BUILD_BUG();
+ flush_range(mm, vmaddr, vmaddr);
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5c497c862d75..cb77eddca54b 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -17,6 +17,10 @@
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
+#define _PAGE_NA _PAGE_PRIVILEGED
+#define _PAGE_NAX _PAGE_EXEC
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
@@ -136,23 +140,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table,
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- *
- * Write permissions imply read permissions for now (we could make write-only
- * pages on BookE but we don't bother for now). Execute permission control is
- * possible on platforms that define _PAGE_EXEC
- */
-#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
-#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
@@ -316,6 +304,7 @@ extern unsigned long pci_io_base;
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
#ifndef __ASSEMBLY__
@@ -629,16 +618,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
}
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
-}
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
-}
-
/*
* This is potentially called with a pmd as the argument, in which case it's not
* safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
@@ -647,7 +626,7 @@ static inline pte_t pte_mkuser(pte_t pte)
*/
static inline int pte_devmap(pte_t pte)
{
- u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
+ __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
return (pte_raw(pte) & mask) == mask;
}
@@ -1014,8 +993,6 @@ static inline pmd_t *pud_pgtable(pud_t pud)
return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
}
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_ERROR(e) \
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 3b7bd36a2321..f42d68c6b314 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,37 +8,4 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#ifndef __ASSEMBLY__
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, pte_t entry, int dirty);
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- * On machines which use an MMU hash table, we use this to put a
- * corresponding HPTE into the hash table ahead of time, instead of
- * waiting for the inevitable extra hash-table miss exception.
- */
-static inline void update_mmu_cache_range(struct vm_fault *vmf,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, unsigned int nr)
-{
- if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
- return;
- if (radix_enabled())
- return;
- __update_mmu_cache(vma, address, ptep);
-}
-
-#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 3f881548fb61..0e29ccf903d0 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -74,6 +74,7 @@ int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
int patch_branch(u32 *addr, unsigned long target, int flags);
int patch_instruction(u32 *addr, ppc_inst_t instr);
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
static inline unsigned long patch_site_addr(s32 *site)
{
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 3bdd74739cb8..e3c6969853ef 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -49,11 +49,6 @@
*/
extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
-#define cpm_dpalloc cpm_muram_alloc
-#define cpm_dpfree cpm_muram_free
-#define cpm_dpram_addr cpm_muram_addr
-#define cpm_dpram_phys cpm_muram_dma
-
extern void cpm_setbrg(uint brg, uint rate);
extern void __init cpm_load_patch(cpm8xx_t *cp);
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 249d43cc6427..a22acc36eb9b 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -87,10 +87,6 @@
*/
extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */
-#define cpm_dpalloc cpm_muram_alloc
-#define cpm_dpfree cpm_muram_free
-#define cpm_dpram_addr cpm_muram_addr
-
extern void cpm2_reset(void);
/* Baud rate generators.
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index a832aeafe560..f9068dd8dfce 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -23,18 +23,6 @@
#include <asm/kmap_size.h>
#endif
-#ifdef CONFIG_PPC64
-#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
-#else
-#define FIXADDR_SIZE 0
-#ifdef CONFIG_KASAN
-#include <asm/kasan.h>
-#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
-#else
-#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
-#endif
-#endif
-
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
@@ -119,5 +107,9 @@ static inline void __set_fixmap(enum fixed_addresses idx,
#define __early_set_fixmap __set_fixmap
+#ifdef CONFIG_PPC_8xx
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h
new file mode 100644
index 000000000000..808149f31576
--- /dev/null
+++ b/arch/powerpc/include/asm/guest-state-buffer.h
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interface based on include/net/netlink.h
+ */
+#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H
+#define _ASM_POWERPC_GUEST_STATE_BUFFER_H
+
+#include "asm/hvcall.h"
+#include <linux/gfp.h>
+#include <linux/bitmap.h>
+#include <asm/plpar_wrappers.h>
+
+/**************************************************************************
+ * Guest State Buffer Constants
+ **************************************************************************/
+/* Element without a value and any length */
+#define KVMPPC_GSID_BLANK 0x0000
+/* Size required for the L0's internal VCPU representation */
+#define KVMPPC_GSID_HOST_STATE_SIZE 0x0001
+ /* Minimum size for the H_GUEST_RUN_VCPU output buffer */
+#define KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE 0x0002
+ /* "Logical" PVR value as defined in the PAPR */
+#define KVMPPC_GSID_LOGICAL_PVR 0x0003
+ /* L0 relative timebase offset */
+#define KVMPPC_GSID_TB_OFFSET 0x0004
+ /* Partition Scoped Page Table Info */
+#define KVMPPC_GSID_PARTITION_TABLE 0x0005
+ /* Process Table Info */
+#define KVMPPC_GSID_PROCESS_TABLE 0x0006
+
+/* H_GUEST_RUN_VCPU input buffer Info */
+#define KVMPPC_GSID_RUN_INPUT 0x0C00
+/* H_GUEST_RUN_VCPU output buffer Info */
+#define KVMPPC_GSID_RUN_OUTPUT 0x0C01
+#define KVMPPC_GSID_VPA 0x0C02
+
+#define KVMPPC_GSID_GPR(x) (0x1000 + (x))
+#define KVMPPC_GSID_HDEC_EXPIRY_TB 0x1020
+#define KVMPPC_GSID_NIA 0x1021
+#define KVMPPC_GSID_MSR 0x1022
+#define KVMPPC_GSID_LR 0x1023
+#define KVMPPC_GSID_XER 0x1024
+#define KVMPPC_GSID_CTR 0x1025
+#define KVMPPC_GSID_CFAR 0x1026
+#define KVMPPC_GSID_SRR0 0x1027
+#define KVMPPC_GSID_SRR1 0x1028
+#define KVMPPC_GSID_DAR 0x1029
+#define KVMPPC_GSID_DEC_EXPIRY_TB 0x102A
+#define KVMPPC_GSID_VTB 0x102B
+#define KVMPPC_GSID_LPCR 0x102C
+#define KVMPPC_GSID_HFSCR 0x102D
+#define KVMPPC_GSID_FSCR 0x102E
+#define KVMPPC_GSID_FPSCR 0x102F
+#define KVMPPC_GSID_DAWR0 0x1030
+#define KVMPPC_GSID_DAWR1 0x1031
+#define KVMPPC_GSID_CIABR 0x1032
+#define KVMPPC_GSID_PURR 0x1033
+#define KVMPPC_GSID_SPURR 0x1034
+#define KVMPPC_GSID_IC 0x1035
+#define KVMPPC_GSID_SPRG0 0x1036
+#define KVMPPC_GSID_SPRG1 0x1037
+#define KVMPPC_GSID_SPRG2 0x1038
+#define KVMPPC_GSID_SPRG3 0x1039
+#define KVMPPC_GSID_PPR 0x103A
+#define KVMPPC_GSID_MMCR(x) (0x103B + (x))
+#define KVMPPC_GSID_MMCRA 0x103F
+#define KVMPPC_GSID_SIER(x) (0x1040 + (x))
+#define KVMPPC_GSID_BESCR 0x1043
+#define KVMPPC_GSID_EBBHR 0x1044
+#define KVMPPC_GSID_EBBRR 0x1045
+#define KVMPPC_GSID_AMR 0x1046
+#define KVMPPC_GSID_IAMR 0x1047
+#define KVMPPC_GSID_AMOR 0x1048
+#define KVMPPC_GSID_UAMOR 0x1049
+#define KVMPPC_GSID_SDAR 0x104A
+#define KVMPPC_GSID_SIAR 0x104B
+#define KVMPPC_GSID_DSCR 0x104C
+#define KVMPPC_GSID_TAR 0x104D
+#define KVMPPC_GSID_DEXCR 0x104E
+#define KVMPPC_GSID_HDEXCR 0x104F
+#define KVMPPC_GSID_HASHKEYR 0x1050
+#define KVMPPC_GSID_HASHPKEYR 0x1051
+#define KVMPPC_GSID_CTRL 0x1052
+
+#define KVMPPC_GSID_CR 0x2000
+#define KVMPPC_GSID_PIDR 0x2001
+#define KVMPPC_GSID_DSISR 0x2002
+#define KVMPPC_GSID_VSCR 0x2003
+#define KVMPPC_GSID_VRSAVE 0x2004
+#define KVMPPC_GSID_DAWRX0 0x2005
+#define KVMPPC_GSID_DAWRX1 0x2006
+#define KVMPPC_GSID_PMC(x) (0x2007 + (x))
+#define KVMPPC_GSID_WORT 0x200D
+#define KVMPPC_GSID_PSPB 0x200E
+
+#define KVMPPC_GSID_VSRS(x) (0x3000 + (x))
+
+#define KVMPPC_GSID_HDAR 0xF000
+#define KVMPPC_GSID_HDSISR 0xF001
+#define KVMPPC_GSID_HEIR 0xF002
+#define KVMPPC_GSID_ASDR 0xF003
+
+#define KVMPPC_GSE_GUESTWIDE_START KVMPPC_GSID_BLANK
+#define KVMPPC_GSE_GUESTWIDE_END KVMPPC_GSID_PROCESS_TABLE
+#define KVMPPC_GSE_GUESTWIDE_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1)
+
+#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT
+#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA
+#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
+
+#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
+#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL
+#define KVMPPC_GSE_DW_REGS_COUNT \
+ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)
+
+#define KVMPPC_GSE_W_REGS_START KVMPPC_GSID_CR
+#define KVMPPC_GSE_W_REGS_END KVMPPC_GSID_PSPB
+#define KVMPPC_GSE_W_REGS_COUNT \
+ (KVMPPC_GSE_W_REGS_END - KVMPPC_GSE_W_REGS_START + 1)
+
+#define KVMPPC_GSE_VSRS_START KVMPPC_GSID_VSRS(0)
+#define KVMPPC_GSE_VSRS_END KVMPPC_GSID_VSRS(63)
+#define KVMPPC_GSE_VSRS_COUNT (KVMPPC_GSE_VSRS_END - KVMPPC_GSE_VSRS_START + 1)
+
+#define KVMPPC_GSE_INTR_REGS_START KVMPPC_GSID_HDAR
+#define KVMPPC_GSE_INTR_REGS_END KVMPPC_GSID_ASDR
+#define KVMPPC_GSE_INTR_REGS_COUNT \
+ (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1)
+
+#define KVMPPC_GSE_IDEN_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
+ KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \
+ KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT)
+
+/**
+ * Ranges of guest state buffer elements
+ */
+enum {
+ KVMPPC_GS_CLASS_GUESTWIDE = 0x01,
+ KVMPPC_GS_CLASS_META = 0x02,
+ KVMPPC_GS_CLASS_DWORD_REG = 0x04,
+ KVMPPC_GS_CLASS_WORD_REG = 0x08,
+ KVMPPC_GS_CLASS_VECTOR = 0x10,
+ KVMPPC_GS_CLASS_INTR = 0x20,
+};
+
+/**
+ * Types of guest state buffer elements
+ */
+enum {
+ KVMPPC_GSE_BE32,
+ KVMPPC_GSE_BE64,
+ KVMPPC_GSE_VEC128,
+ KVMPPC_GSE_PARTITION_TABLE,
+ KVMPPC_GSE_PROCESS_TABLE,
+ KVMPPC_GSE_BUFFER,
+ __KVMPPC_GSE_TYPE_MAX,
+};
+
+/**
+ * Flags for guest state elements
+ */
+enum {
+ KVMPPC_GS_FLAGS_WIDE = 0x01,
+};
+
+/**
+ * struct kvmppc_gs_part_table - deserialized partition table information
+ * element
+ * @address: start of the partition table
+ * @ea_bits: number of bits in the effective address
+ * @gpd_size: root page directory size
+ */
+struct kvmppc_gs_part_table {
+ u64 address;
+ u64 ea_bits;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_proc_table - deserialized process table information element
+ * @address: start of the process table
+ * @gpd_size: process table size
+ */
+struct kvmppc_gs_proc_table {
+ u64 address;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_buff_info - deserialized meta guest state buffer information
+ * @address: start of the guest state buffer
+ * @size: size of the guest state buffer
+ */
+struct kvmppc_gs_buff_info {
+ u64 address;
+ u64 size;
+};
+
+/**
+ * struct kvmppc_gs_header - serialized guest state buffer header
+ * @nelem: count of guest state elements in the buffer
+ * @data: start of the stream of elements in the buffer
+ */
+struct kvmppc_gs_header {
+ __be32 nelems;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_elem - serialized guest state buffer element
+ * @iden: Guest State ID
+ * @len: length of data
+ * @data: the guest state buffer element's value
+ */
+struct kvmppc_gs_elem {
+ __be16 iden;
+ __be16 len;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_buff - a guest state buffer with metadata.
+ * @capacity: total length of the buffer
+ * @len: current length of the elements and header
+ * @guest_id: guest id associated with the buffer
+ * @vcpu_id: vcpu_id associated with the buffer
+ * @hdr: the serialised guest state buffer
+ */
+struct kvmppc_gs_buff {
+ size_t capacity;
+ size_t len;
+ unsigned long guest_id;
+ unsigned long vcpu_id;
+ struct kvmppc_gs_header *hdr;
+};
+
+/**
+ * struct kvmppc_gs_bitmap - a bitmap for element ids
+ * @bitmap: a bitmap large enough for all Guest State IDs
+ */
+struct kvmppc_gs_bitmap {
+ /* private: */
+ DECLARE_BITMAP(bitmap, KVMPPC_GSE_IDEN_COUNT);
+};
+
+/**
+ * struct kvmppc_gs_parser - a map of element ids to locations in a buffer
+ * @iterator: bitmap used for iterating
+ * @gses: contains the pointers to elements
+ *
+ * A guest state parser is used for deserialising a guest state buffer.
+ * Given a buffer, it then allows looking up guest state elements using
+ * a guest state id.
+ */
+struct kvmppc_gs_parser {
+ /* private: */
+ struct kvmppc_gs_bitmap iterator;
+ struct kvmppc_gs_elem *gses[KVMPPC_GSE_IDEN_COUNT];
+};
+
+enum {
+ GSM_GUEST_WIDE = 0x1,
+ GSM_SEND = 0x2,
+ GSM_RECEIVE = 0x4,
+ GSM_GSB_OWNER = 0x8,
+};
+
+struct kvmppc_gs_msg;
+
+/**
+ * struct kvmppc_gs_msg_ops - guest state message behavior
+ * @get_size: maximum size required for the message data
+ * @fill_info: serializes to the guest state buffer format
+ * @refresh_info: dserializes from the guest state buffer format
+ */
+struct kvmppc_gs_msg_ops {
+ size_t (*get_size)(struct kvmppc_gs_msg *gsm);
+ int (*fill_info)(struct kvmppc_gs_buff *gsb, struct kvmppc_gs_msg *gsm);
+ int (*refresh_info)(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+};
+
+/**
+ * struct kvmppc_gs_msg - a guest state message
+ * @bitmap: the guest state ids that should be included
+ * @ops: modify message behavior for reading and writing to buffers
+ * @flags: guest wide or thread wide
+ * @data: location where buffer data will be written to or from.
+ *
+ * A guest state message is allows flexibility in sending in receiving data
+ * in a guest state buffer format.
+ */
+struct kvmppc_gs_msg {
+ struct kvmppc_gs_bitmap bitmap;
+ struct kvmppc_gs_msg_ops *ops;
+ unsigned long flags;
+ void *data;
+};
+
+/**************************************************************************
+ * Guest State IDs
+ **************************************************************************/
+
+u16 kvmppc_gsid_size(u16 iden);
+unsigned long kvmppc_gsid_flags(u16 iden);
+u64 kvmppc_gsid_mask(u16 iden);
+
+/**************************************************************************
+ * Guest State Buffers
+ **************************************************************************/
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags);
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb);
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size);
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags);
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags);
+
+/**
+ * kvmppc_gsb_header() - the header of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the buffer header.
+ */
+static inline struct kvmppc_gs_header *
+kvmppc_gsb_header(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->hdr;
+}
+
+/**
+ * kvmppc_gsb_data() - the elements of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the first element of the buffer data.
+ */
+static inline struct kvmppc_gs_elem *kvmppc_gsb_data(struct kvmppc_gs_buff *gsb)
+{
+ return (struct kvmppc_gs_elem *)kvmppc_gsb_header(gsb)->data;
+}
+
+/**
+ * kvmppc_gsb_len() - the current length of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the length including the header of a buffer.
+ */
+static inline size_t kvmppc_gsb_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len;
+}
+
+/**
+ * kvmppc_gsb_capacity() - the capacity of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer.
+ */
+static inline size_t kvmppc_gsb_capacity(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity;
+}
+
+/**
+ * kvmppc_gsb_paddress() - the physical address of buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the physical address of the buffer.
+ */
+static inline u64 kvmppc_gsb_paddress(struct kvmppc_gs_buff *gsb)
+{
+ return __pa(kvmppc_gsb_header(gsb));
+}
+
+/**
+ * kvmppc_gsb_nelems() - the number of elements in a buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the number of elements in a buffer
+ */
+static inline u32 kvmppc_gsb_nelems(struct kvmppc_gs_buff *gsb)
+{
+ return be32_to_cpu(kvmppc_gsb_header(gsb)->nelems);
+}
+
+/**
+ * kvmppc_gsb_reset() - empty a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Reset the number of elements and length of buffer to empty.
+ */
+static inline void kvmppc_gsb_reset(struct kvmppc_gs_buff *gsb)
+{
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(0);
+ gsb->len = sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_len() - the length of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the length of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_cap() - the capacity of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_cap(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_for_each_elem - iterate over the elements in a buffer
+ * @i: loop counter
+ * @pos: set to current element
+ * @gsb: guest state buffer
+ * @rem: initialized to buffer capacity, holds bytes currently remaining in
+ * stream
+ */
+#define kvmppc_gsb_for_each_elem(i, pos, gsb, rem) \
+ kvmppc_gse_for_each_elem(i, kvmppc_gsb_nelems(gsb), pos, \
+ kvmppc_gsb_data(gsb), \
+ kvmppc_gsb_data_cap(gsb), rem)
+
+/**************************************************************************
+ * Guest State Elements
+ **************************************************************************/
+
+/**
+ * kvmppc_gse_iden() - guest state ID of element
+ * @gse: guest state element
+ *
+ * Return the guest state ID in host endianness.
+ */
+static inline u16 kvmppc_gse_iden(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->iden);
+}
+
+/**
+ * kvmppc_gse_len() - length of guest state element data
+ * @gse: guest state element
+ *
+ * Returns the length of guest state element data
+ */
+static inline u16 kvmppc_gse_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len);
+}
+
+/**
+ * kvmppc_gse_total_len() - total length of guest state element
+ * @gse: guest state element
+ *
+ * Returns the length of the data plus the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len) + sizeof(*gse);
+}
+
+/**
+ * kvmppc_gse_total_size() - space needed for a given data length
+ * @size: data length
+ *
+ * Returns size plus the space needed for the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_size(u16 size)
+{
+ return sizeof(struct kvmppc_gs_elem) + size;
+}
+
+/**
+ * kvmppc_gse_data() - pointer to data of a guest state element
+ * @gse: guest state element
+ *
+ * Returns a pointer to the beginning of guest state element data.
+ */
+static inline void *kvmppc_gse_data(const struct kvmppc_gs_elem *gse)
+{
+ return (void *)gse->data;
+}
+
+/**
+ * kvmppc_gse_ok() - checks space exists for guest state element
+ * @gse: guest state element
+ * @remaining: bytes of space remaining
+ *
+ * Returns true if the guest state element can fit in remaining space.
+ */
+static inline bool kvmppc_gse_ok(const struct kvmppc_gs_elem *gse,
+ int remaining)
+{
+ return remaining >= kvmppc_gse_total_len(gse);
+}
+
+/**
+ * kvmppc_gse_next() - iterate to the next guest state element in a stream
+ * @gse: stream of guest state elements
+ * @remaining: length of the guest element stream
+ *
+ * Returns the next guest state element in a stream of elements. The length of
+ * the stream is updated in remaining.
+ */
+static inline struct kvmppc_gs_elem *
+kvmppc_gse_next(const struct kvmppc_gs_elem *gse, int *remaining)
+{
+ int len = sizeof(*gse) + kvmppc_gse_len(gse);
+
+ *remaining -= len;
+ return (struct kvmppc_gs_elem *)(gse->data + kvmppc_gse_len(gse));
+}
+
+/**
+ * kvmppc_gse_for_each_elem - iterate over a stream of guest state elements
+ * @i: loop counter
+ * @max: number of elements
+ * @pos: set to current element
+ * @head: head of elements
+ * @len: length of the stream
+ * @rem: initialized to len, holds bytes currently remaining elements
+ */
+#define kvmppc_gse_for_each_elem(i, max, pos, head, len, rem) \
+ for (i = 0, pos = head, rem = len; kvmppc_gse_ok(pos, rem) && i < max; \
+ pos = kvmppc_gse_next(pos, &(rem)), i++)
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gse_put_be32() - add a be32 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be32(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be32 val)
+{
+ __be32 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be32), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u32() - add a host endian 32bit int guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u32(struct kvmppc_gs_buff *gsb, u16 iden,
+ u32 val)
+{
+ __be32 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be32(val);
+ return kvmppc_gse_put_be32(gsb, iden, tmp);
+}
+
+/**
+ * kvmppc_gse_put_be64() - add a be64 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be64(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be64 val)
+{
+ __be64 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be64), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u64() - add a host endian 64bit guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u64(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ __be64 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be64(val);
+ return kvmppc_gse_put_be64(gsb, iden, tmp);
+}
+
+/**
+ * __kvmppc_gse_put_reg() - add a register type guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ *
+ * Adds a register type guest state element. Uses the guest state ID for
+ * determining the length of the guest element. If the guest state ID has
+ * bits that can not be set they will be cleared.
+ */
+static inline int __kvmppc_gse_put_reg(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ val &= kvmppc_gsid_mask(iden);
+ if (kvmppc_gsid_size(iden) == sizeof(u64))
+ return kvmppc_gse_put_u64(gsb, iden, val);
+
+ if (kvmppc_gsid_size(iden) == sizeof(u32)) {
+ u32 tmp;
+
+ tmp = (u32)val;
+ if (tmp != val)
+ return -EINVAL;
+
+ return kvmppc_gse_put_u32(gsb, iden, tmp);
+ }
+ return -EINVAL;
+}
+
+/**
+ * kvmppc_gse_put_vector128() - add a vector guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: 16 byte vector value
+ */
+static inline int kvmppc_gse_put_vector128(struct kvmppc_gs_buff *gsb, u16 iden,
+ vector128 *val)
+{
+ __be64 tmp[2] = { 0 };
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+
+ u.v = *val;
+ tmp[0] = cpu_to_be64(u.dw[TS_FPROFFSET]);
+#ifdef CONFIG_VSX
+ tmp[1] = cpu_to_be64(u.dw[TS_VSRLOWOFFSET]);
+#endif
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_part_table() - add a partition table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: partition table value
+ */
+static inline int kvmppc_gse_put_part_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_part_table val)
+{
+ __be64 tmp[3];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.ea_bits);
+ tmp[2] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PARTITION_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_proc_table() - add a process table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: process table value
+ */
+static inline int kvmppc_gse_put_proc_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_proc_table val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PROCESS_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_buff_info() - adds a GSB description guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: guest state buffer description value
+ */
+static inline int kvmppc_gse_put_buff_info(struct kvmppc_gs_buff *gsb, u16 iden,
+ struct kvmppc_gs_buff_info val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.size);
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+
+/**
+ * kvmppc_gse_get_be32() - return the data of a be32 element
+ * @gse: guest state element
+ */
+static inline __be32 kvmppc_gse_get_be32(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be32)))
+ return 0;
+ return *(__be32 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u32() - return the data of a be32 element in host endianness
+ * @gse: guest state element
+ */
+static inline u32 kvmppc_gse_get_u32(const struct kvmppc_gs_elem *gse)
+{
+ return be32_to_cpu(kvmppc_gse_get_be32(gse));
+}
+
+/**
+ * kvmppc_gse_get_be64() - return the data of a be64 element
+ * @gse: guest state element
+ */
+static inline __be64 kvmppc_gse_get_be64(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be64)))
+ return 0;
+ return *(__be64 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u64() - return the data of a be64 element in host endianness
+ * @gse: guest state element
+ */
+static inline u64 kvmppc_gse_get_u64(const struct kvmppc_gs_elem *gse)
+{
+ return be64_to_cpu(kvmppc_gse_get_be64(gse));
+}
+
+/**
+ * kvmppc_gse_get_vector128() - return the data of a vector element
+ * @gse: guest state element
+ */
+static inline void kvmppc_gse_get_vector128(const struct kvmppc_gs_elem *gse,
+ vector128 *v)
+{
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u = { 0 };
+ __be64 *src;
+
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__vector128)))
+ *v = u.v;
+
+ src = (__be64 *)kvmppc_gse_data(gse);
+ u.dw[TS_FPROFFSET] = be64_to_cpu(src[0]);
+#ifdef CONFIG_VSX
+ u.dw[TS_VSRLOWOFFSET] = be64_to_cpu(src[1]);
+#endif
+ *v = u.v;
+}
+
+/**************************************************************************
+ * Guest State Bitmap
+ **************************************************************************/
+
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev);
+
+/**
+ * kvmppc_gsbm_zero - zero the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_zero(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_zero(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+}
+
+/**
+ * kvmppc_gsbm_fill - fill the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_fill(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_fill(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+ clear_bit(0, gsbm->bitmap);
+}
+
+/**
+ * kvmppc_gsbm_for_each - iterate the present guest state IDs
+ * @gsbm: guest state buffer bitmap
+ * @iden: current guest state ID
+ */
+#define kvmppc_gsbm_for_each(gsbm, iden) \
+ for (iden = kvmppc_gsbm_next(gsbm, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(gsbm, iden))
+
+/**************************************************************************
+ * Guest State Parser
+ **************************************************************************/
+
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse);
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp,
+ u16 iden);
+
+/**
+ * kvmppc_gsp_for_each - iterate the <guest state IDs, guest state element>
+ * pairs
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsp_for_each(gsp, iden, gse) \
+ for (iden = kvmppc_gsbm_next(&(gsp)->iterator, 0), \
+ gse = kvmppc_gsp_lookup((gsp), iden); \
+ iden != 0; iden = kvmppc_gsbm_next(&(gsp)->iterator, iden), \
+ gse = kvmppc_gsp_lookup((gsp), iden))
+
+/**************************************************************************
+ * Guest State Message
+ **************************************************************************/
+
+/**
+ * kvmppc_gsm_for_each - iterate the guest state IDs included in a guest state
+ * message
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsm_for_each(gsm, iden) \
+ for (iden = kvmppc_gsbm_next(&gsm->bitmap, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(&gsm->bitmap, iden))
+
+int kvmppc_gsm_init(struct kvmppc_gs_msg *mgs, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags);
+
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags);
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm);
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm);
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb);
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gsm_include - indicate a guest state ID should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ kvmppc_gsbm_set(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - check if a guest state ID will be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline bool kvmppc_gsm_includes(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ return kvmppc_gsbm_test(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - indicate all guest state IDs should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include_all(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_fill(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsm_include - clear the guest state IDs that should be included when
+ * serializing
+ * @gsm: guest state message
+ */
+static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_zero(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsb_receive_data - flexibly update values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Requests updated values for the guest state values included in the guest
+ * state message. The guest state message will then deserialize the guest state
+ * buffer.
+ */
+static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsb_recv(gsb, gsm->flags);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsm_refresh_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_recv - receive a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_receive_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_send_data - flexibly send values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Sends the guest state values included in the guest state message.
+ */
+static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ rc = kvmppc_gsb_send(gsb, gsm->flags);
+
+ return rc;
+}
+
+/**
+ * kvmppc_gsb_recv - send a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+#endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c099780385dd..ddb99e982917 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -100,6 +100,18 @@
#define H_COP_HW -74
#define H_STATE -75
#define H_IN_USE -77
+
+#define H_INVALID_ELEMENT_ID -79
+#define H_INVALID_ELEMENT_SIZE -80
+#define H_INVALID_ELEMENT_VALUE -81
+#define H_INPUT_BUFFER_NOT_DEFINED -82
+#define H_INPUT_BUFFER_TOO_SMALL -83
+#define H_OUTPUT_BUFFER_NOT_DEFINED -84
+#define H_OUTPUT_BUFFER_TOO_SMALL -85
+#define H_PARTITION_PAGE_TABLE_NOT_DEFINED -86
+#define H_GUEST_VCPU_STATE_NOT_HV_OWNED -87
+
+
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -381,6 +393,15 @@
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_COPY_MEMORY 0x484
+#define H_GUEST_DELETE 0x488
/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1
@@ -467,6 +488,15 @@
#define H_RPTI_PAGE_1G 0x08
#define H_RPTI_PAGE_ALL (-1UL)
+/* Flags for H_GUEST_{S,G}_STATE */
+#define H_GUEST_FLAGS_WIDE (1UL<<(63-0))
+
+/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */
+#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0))
+#define H_GUEST_CAP_POWER9 (1UL<<(63-1))
+#define H_GUEST_CAP_POWER10 (1UL<<(63-2))
+#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 699a88584ae1..a656635df386 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -74,14 +74,14 @@ struct imc_events {
* The following is the data structure to hold trace imc data.
*/
struct trace_imc_data {
- u64 tb1;
- u64 ip;
- u64 val;
- u64 cpmc1;
- u64 cpmc2;
- u64 cpmc3;
- u64 cpmc4;
- u64 tb2;
+ __be64 tb1;
+ __be64 ip;
+ __be64 val;
+ __be64 cpmc1;
+ __be64 cpmc2;
+ __be64 cpmc3;
+ __be64 cpmc4;
+ __be64 tb2;
};
/* Event attribute array index */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 0732b743e099..5220274a6277 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -950,7 +950,7 @@ extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
* almost all conceivable cases a device driver should not be using
* this function
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(const volatile void * address)
{
WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address));
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index a1ddba01e7d1..e1b43aa12175 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -99,10 +99,14 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co
void kexec_copy_flush(struct kimage *image);
-#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
+#if defined(CONFIG_CRASH_DUMP)
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
+#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
-#endif
+#endif /* CONFIG_PPC_RTAS */
+#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bbf5e2c5fe09..4f527d09c92b 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/guest-state-buffer.h>
struct kvmppc_bat {
u64 raw;
@@ -191,14 +192,14 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid);
+ unsigned int pshift, u64 lpid);
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid);
+ u64 lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
@@ -207,7 +208,7 @@ extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
@@ -295,12 +296,13 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+extern unsigned long nested_capabilities;
long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
@@ -316,6 +318,69 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+extern struct static_key_false __kvmhv_is_nestedv2;
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return static_branch_unlikely(&__kvmhv_is_nestedv2);
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return !static_branch_likely(&__kvmhv_is_nestedv2);
+}
+
+#else
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+#endif
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_cached_reload(vcpu, iden);
+ return 0;
+}
+
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
@@ -335,60 +400,72 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.regs.gpr[num] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.regs.ccr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.xer = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.ctr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.link = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.nip = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
return vcpu->arch.regs.nip;
}
@@ -403,10 +480,141 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
+ return vcpu->arch.fp.fpscr;
+}
+
+static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.fp.fpscr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
+}
+
+
+static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][j];
+}
+
+static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
+ u64 val)
+{
+ vcpu->arch.fp.fpr[i][j] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+#ifdef CONFIG_ALTIVEC
+static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
+ *v = vcpu->arch.vr.vr[i];
+}
+
+static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
+ vector128 *val)
+{
+ vcpu->arch.vr.vr[i] = *val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
+}
+
+static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
+ return vcpu->arch.vr.vscr.u[3];
+}
+
+static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.vr.vscr.u[3] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
+}
+#endif
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
+
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.vcore->reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.vcore->reg; \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+
+
+KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+
+static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
+ return vcpu->arch.dec_expires;
+}
+
+static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.dec_expires = val;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
+}
+
/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+ return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
}
static inline bool is_kvmppc_resume_guest(int r)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d49065af08e9..2477021bff54 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -624,7 +624,7 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap);
extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
struct rmap_nested **n_rmap);
@@ -677,6 +677,12 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift);
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 0c3401b2e19e..7c3291aa8922 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.regs.nip;
}
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+}
+
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
#ifdef CONFIG_BOOKE
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 14ee0dece853..8799b37be295 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/hvcall.h>
#include <asm/mce.h>
+#include <asm/guest-state-buffer.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -276,7 +277,7 @@ struct kvm_resize_hpt;
#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
struct kvm_arch {
- unsigned int lpid;
+ u64 lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -509,6 +510,23 @@ union xive_tma_w01 {
__be64 w01;
};
+ /* Nestedv2 H_GUEST_RUN_VCPU configuration */
+struct kvmhv_nestedv2_config {
+ struct kvmppc_gs_buff_info vcpu_run_output_cfg;
+ struct kvmppc_gs_buff_info vcpu_run_input_cfg;
+ u64 vcpu_run_output_size;
+};
+
+ /* Nestedv2 L1<->L0 communication state */
+struct kvmhv_nestedv2_io {
+ struct kvmhv_nestedv2_config cfg;
+ struct kvmppc_gs_buff *vcpu_run_output;
+ struct kvmppc_gs_buff *vcpu_run_input;
+ struct kvmppc_gs_msg *vcpu_message;
+ struct kvmppc_gs_msg *vcore_message;
+ struct kvmppc_gs_bitmap valids;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -829,6 +847,8 @@ struct kvm_vcpu_arch {
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
+ /* For nested APIv2 guests*/
+ struct kvmhv_nestedv2_io nestedv2_io;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b4da8514af43..3281215097cc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -615,6 +615,42 @@ static inline bool kvmhv_on_pseries(void)
{
return false;
}
+
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -927,79 +963,85 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
#endif
}
-#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
return mfspr(bookehv_spr); \
} \
-#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
{ \
mtspr(bookehv_spr, val); \
} \
-#define SHARED_WRAPPER_GET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ if (iden) \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
if (kvmppc_shared_big_endian(vcpu)) \
- return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \
else \
- return le##size##_to_cpu(vcpu->arch.shared->reg); \
+ return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \
} \
-#define SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
if (kvmppc_shared_big_endian(vcpu)) \
- vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \
else \
- vcpu->arch.shared->reg = cpu_to_le##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \
+ \
+ if (iden) \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
} \
-#define SHARED_WRAPPER(reg, size) \
- SHARED_WRAPPER_GET(reg, size) \
- SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
-#define SPRNG_WRAPPER(reg, bookehv_spr) \
- SPRNG_WRAPPER_GET(reg, bookehv_spr) \
- SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
#ifdef CONFIG_KVM_BOOKE_HV
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SPRNG_WRAPPER(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
#else
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SHARED_WRAPPER(reg, size) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
#endif
-SHARED_WRAPPER(critical, 64)
-SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
-SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
-SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
-SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
-SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
-SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
-SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
-SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
-SHARED_WRAPPER_GET(msr, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
if (kvmppc_shared_big_endian(vcpu))
vcpu->arch.shared->msr = cpu_to_be64(val);
else
vcpu->arch.shared->msr = cpu_to_le64(val);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
-SHARED_WRAPPER(dsisr, 32)
-SHARED_WRAPPER(int_pending, 32)
-SHARED_WRAPPER(sprg4, 64)
-SHARED_WRAPPER(sprg5, 64)
-SHARED_WRAPPER(sprg6, 64)
-SHARED_WRAPPER(sprg7, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 0e93a4728c9e..141d82e249a8 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -188,7 +188,6 @@ typedef struct {
} mm_context_t;
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
-#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
/* Page size definitions, common between 32 and 64-bit
*
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index f99c53a5f184..9164a9e41b02 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -9,10 +9,6 @@
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
-#ifdef CONFIG_44x
-extern int icache_44x_need_flush;
-#endif
-
#endif /* __ASSEMBLY__ */
#define PTE_INDEX_SIZE PTE_SHIFT
@@ -55,26 +51,22 @@ extern int icache_44x_need_flush;
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
- (unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-#ifndef __ASSEMBLY__
-
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
-void unmap_kernel_page(unsigned long va);
-
-#endif /* !__ASSEMBLY__ */
-
-
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
-#include <asm/fixmap.h>
+
+#define FIXADDR_SIZE 0
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
+#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
@@ -151,7 +143,7 @@ void unmap_kernel_page(unsigned long va);
* The mask covered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs.
*/
-#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
@@ -159,48 +151,8 @@ void unmap_kernel_page(unsigned long va);
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
-/*
- * _PAGE_CHG_MASK masks of bits that are to be preserved across
- * pgprot changes.
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
-
#ifndef __ASSEMBLY__
-#define pte_clear(mm, addr, ptep) \
- do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
-
-#ifndef pte_mkwrite_novma
-static inline pte_t pte_mkwrite_novma(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_RW);
-}
-#endif
-
-static inline pte_t pte_mkdirty(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
-}
-
-static inline pte_t pte_mkyoung(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
-}
-
-#ifndef pte_wrprotect
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_RW);
-}
-#endif
-
-#ifndef pte_mkexec
-static inline pte_t pte_mkexec(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_EXEC);
-}
-#endif
-
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
@@ -210,141 +162,6 @@ static inline void pmd_clear(pmd_t *pmdp)
}
/*
- * PTE updates. This function is called whenever an existing
- * valid PTE is updated. This does -not- include set_pte_at()
- * which nowadays only sets a new PTE.
- *
- * Depending on the type of MMU, we may need to use atomic updates
- * and the PTE may be either 32 or 64 bit wide. In the later case,
- * when using atomic updates, only the low part of the PTE is
- * accessed atomically.
- *
- * In addition, on 44x, we also maintain a global flag indicating
- * that an executable user mapping was modified, which is needed
- * to properly flush the virtually tagged instruction cache of
- * those implementations.
- *
- * On the 8xx, the page tables are a bit special. For 16k pages, we have
- * 4 identical entries. For 512k pages, we have 128 entries as if it was
- * 4k pages, but they are flagged as 512k pages for the hardware.
- * For other page sizes, we have a single entry in the table.
- */
-#ifdef CONFIG_PPC_8xx
-static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
-static int hugepd_ok(hugepd_t hpd);
-
-static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
-{
- if (!huge)
- return PAGE_SIZE / SZ_4K;
- else if (hugepd_ok(*((hugepd_t *)pmd)))
- return 1;
- else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
- return SZ_16K / SZ_4K;
- else
- return SZ_512K / SZ_4K;
-}
-
-static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
- unsigned long clr, unsigned long set, int huge)
-{
- pte_basic_t *entry = (pte_basic_t *)p;
- pte_basic_t old = pte_val(*p);
- pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
- int num, i;
- pmd_t *pmd = pmd_off(mm, addr);
-
- num = number_of_cells_per_pte(pmd, new, huge);
-
- for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
- *entry++ = new;
- if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) {
- *entry++ = new;
- *entry++ = new;
- *entry++ = new;
- }
- }
-
- return old;
-}
-
-#ifdef CONFIG_PPC_16K_PAGES
-#define ptep_get ptep_get
-static inline pte_t ptep_get(pte_t *ptep)
-{
- pte_basic_t val = READ_ONCE(ptep->pte);
- pte_t pte = {val, val, val, val};
-
- return pte;
-}
-#endif /* CONFIG_PPC_16K_PAGES */
-
-#else
-static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
- unsigned long clr, unsigned long set, int huge)
-{
- pte_basic_t old = pte_val(*p);
- pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
-
- *p = __pte(new);
-
-#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
-#endif
- return old;
-}
-#endif
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
-}
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#ifndef ptep_set_wrprotect
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
-}
-#endif
-
-#ifndef __ptep_set_access_flags
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
- pte_t *ptep, pte_t entry,
- unsigned long address,
- int psize)
-{
- unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- int huge = psize > mmu_virtual_psize ? 1 : 0;
-
- pte_update(vma->vm_mm, address, ptep, 0, set, huge);
-
- flush_tlb_page(vma, address);
-}
-#endif
-
-static inline int pte_young(pte_t pte)
-{
- return pte_val(pte) & _PAGE_ACCESSED;
-}
-
-/*
* Note that on Book E processors, the pmd contains the kernel virtual
* (lowmem) address of the pte page. The physical address is less useful
* because everything runs with translation enabled (even the TLB miss
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 6fe46e754556..d759cfd74754 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -42,10 +42,10 @@
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
-#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_READ 0x010 /* software: read permission */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
+#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
@@ -55,11 +55,6 @@
/* cache related flags non existing on 40x */
#define _PAGE_COHERENT 0
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
#define _PMD_BAD 0x802
@@ -69,20 +64,10 @@
#define _PTE_NONE_MASK 0
-/* Until my rework is finished, 40x still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES 1
-
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
index b7ed13cee137..851813725237 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -63,12 +63,12 @@
*/
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
-#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_WRITE 0x00000002 /* S: Write permission */
#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
-#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_READ 0x00000008 /* S: Read permission */
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
-#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ACCESSED 0x00000040 /* S: Page referenced */
#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
#define _PAGE_GUARDED 0x00000100 /* H: G bit */
#define _PAGE_COHERENT 0x00000200 /* H: M bit */
@@ -78,11 +78,6 @@
/* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
/* TODO: Add large page lowmem mapping support */
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
@@ -105,14 +100,7 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
index 16451df5ddb0..653a342d3b25 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-85xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
@@ -17,9 +17,9 @@
*/
/* Definitions for FSL Book-E Cores */
-#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
-#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
-#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+#define _PAGE_READ 0x00001 /* H: Read permission (SR) */
+#define _PAGE_PRESENT 0x00002 /* S: PTE contains a translation */
+#define _PAGE_WRITE 0x00004 /* S: Write permission (SW) */
#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
#define _PAGE_EXEC 0x00010 /* H: SX permission */
#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
@@ -31,11 +31,6 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
/* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0
@@ -61,14 +56,7 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index e6fe1d5731f2..137dc3c84e45 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -48,6 +48,11 @@
#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+#define _PAGE_NAX (_PAGE_NA | _PAGE_EXEC)
+#define _PAGE_ROX (_PAGE_RO | _PAGE_EXEC)
+#define _PAGE_RW 0
+#define _PAGE_RWX _PAGE_EXEC
+
/* cache related flags non existing on 8xx */
#define _PAGE_COHERENT 0
#define _PAGE_WRITETHRU 0
@@ -77,14 +82,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
-#define PAGE_SHARED __pgprot(_PAGE_BASE)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#ifndef __ASSEMBLY__
static inline pte_t pte_wrprotect(pte_t pte)
@@ -115,27 +113,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
#define pte_mkwrite_novma pte_mkwrite_novma
-static inline bool pte_user(pte_t pte)
-{
- return !(pte_val(pte) & _PAGE_SH);
-}
-
-#define pte_user pte_user
-
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_SH);
-}
-
-#define pte_mkprivileged pte_mkprivileged
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_SH);
-}
-
-#define pte_mkuser pte_mkuser
-
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
@@ -187,6 +164,63 @@ static inline unsigned long pte_leaf_size(pte_t pte)
#define pte_leaf_size pte_leaf_size
+/*
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
+ */
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+static int hugepd_ok(hugepd_t hpd);
+
+static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
+{
+ if (!huge)
+ return PAGE_SIZE / SZ_4K;
+ else if (hugepd_ok(*((hugepd_t *)pmd)))
+ return 1;
+ else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
+ return SZ_16K / SZ_4K;
+ else
+ return SZ_512K / SZ_4K;
+}
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t *entry = (pte_basic_t *)p;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_off(mm, addr);
+
+ num = number_of_cells_per_pte(pmd, new, huge);
+
+ for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
+ *entry++ = new;
+ if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) {
+ *entry++ = new;
+ *entry++ = new;
+ *entry++ = new;
+ }
+ }
+
+ return old;
+}
+
+#define pte_update pte_update
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define ptep_get ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ pte_basic_t val = READ_ONCE(ptep->pte);
+ pte_t pte = {val, val, val, val};
+
+ return pte;
+}
+#endif /* CONFIG_PPC_16K_PAGES */
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index eb6891e34cbd..2202c78730e8 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -57,6 +57,7 @@
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
/*
* Defines the address of the vmemap area, in its own region on
@@ -74,37 +75,11 @@
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
-/*
- * _PAGE_CHG_MASK masks of bits that are to be preserved across
- * pgprot changes.
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
-
#define H_PAGE_4K_PFN 0
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
-static inline pte_t pte_mkwrite_novma(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_RW);
-}
-
-static inline pte_t pte_mkdirty(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
-}
-
-static inline pte_t pte_mkyoung(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
-}
-
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_RW);
-}
-
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -170,107 +145,20 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val)
*p4dp = __p4d(val);
}
-/* Atomic PTE updates */
-static inline unsigned long pte_update(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, unsigned long clr,
- unsigned long set,
- int huge)
-{
- unsigned long old = pte_val(*ptep);
- *ptep = __pte((old & ~clr) | set);
-
- /* huge pages use the old page table lock */
- if (!huge)
- assert_pte_locked(mm, addr);
-
- return old;
-}
-
-static inline int pte_young(pte_t pte)
-{
- return pte_val(pte) & _PAGE_ACCESSED;
-}
-
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if (!pte_young(*ptep))
- return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
-
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
-}
-
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
- int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
- __ptep); \
+ int __young = ptep_test_and_clear_young(__vma, __address, __ptep);\
__young; \
})
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
- return __pte(old);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t * ptep)
-{
- pte_update(mm, addr, ptep, ~0UL, 0, 0);
-}
-
-
-/* Set the dirty and/or accessed bits atomically in a linux PTE */
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
- pte_t *ptep, pte_t entry,
- unsigned long address,
- int psize)
-{
- unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-
- unsigned long old = pte_val(*ptep);
- *ptep = __pte(old | bits);
-
- flush_tlb_page(vma, address);
-}
-
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
@@ -310,8 +198,6 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
/* We borrow MSB 56 (LSB 7) to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x80
-int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
-void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index c721478c5934..427db14292c9 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -2,12 +2,23 @@
#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
#define _ASM_POWERPC_NOHASH_PGTABLE_H
+#ifndef __ASSEMBLY__
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge);
+#endif
+
#if defined(CONFIG_PPC64)
#include <asm/nohash/64/pgtable.h>
#else
#include <asm/nohash/32/pgtable.h>
#endif
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
+
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
@@ -18,16 +29,136 @@
#ifndef __ASSEMBLY__
+extern int icache_44x_need_flush;
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef pte_update
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ if (new == old)
+ return old;
+
+ *p = __pte(new);
+
+ if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
+ icache_44x_need_flush = 1;
+
+ /* huge pages use the old page table lock */
+ if (!huge)
+ assert_pte_locked(mm, addr);
+
+ return old;
+}
+#endif
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+
+#ifndef ptep_set_wrprotect
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+}
+#endif
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+}
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
+#ifndef __ptep_set_access_flags
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, 0, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#endif
+
/* Generic accessors to PTE bits */
+#ifndef pte_mkwrite_novma
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ /*
+ * write implies read, hence set both
+ */
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+#endif
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+#ifndef pte_wrprotect
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
+}
+#endif
+
+#ifndef pte_mkexec
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+#endif
+
#ifndef pte_write
static inline int pte_write(pte_t pte)
{
- return pte_val(pte) & _PAGE_RW;
+ return pte_val(pte) & _PAGE_WRITE;
}
#endif
-#ifndef pte_read
-static inline int pte_read(pte_t pte) { return 1; }
-#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
@@ -35,23 +166,6 @@ static inline bool pte_hashpte(pte_t pte) { return false; }
static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/linux/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
-static inline int pte_protnone(pte_t pte)
-{
- return pte_present(pte) && !pte_user(pte);
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
- return pte_protnone(pmd_pte(pmd));
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & _PAGE_PRESENT;
@@ -62,15 +176,20 @@ static inline bool pte_hw_valid(pte_t pte)
return pte_val(pte) & _PAGE_PRESENT;
}
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
/*
- * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * Don't just check for any non zero bits in __PAGE_READ, since for book3e
* and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
- * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
*/
-#ifndef pte_user
-static inline bool pte_user(pte_t pte)
+#ifndef pte_read
+static inline bool pte_read(pte_t pte)
{
- return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+ return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
}
#endif
@@ -82,10 +201,10 @@ static inline bool pte_user(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
- * A read-only access is controlled by _PAGE_USER bit.
- * We have _PAGE_READ set for WRITE and EXECUTE
+ * A read-only access is controlled by _PAGE_READ bit.
+ * We have _PAGE_READ set for WRITE
*/
- if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ if (!pte_present(pte) || !pte_read(pte))
return false;
if (write && !pte_write(pte))
@@ -132,20 +251,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif
-#ifndef pte_mkprivileged
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_USER);
-}
-#endif
-
-#ifndef pte_mkuser
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_USER);
-}
-#endif
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
@@ -207,11 +312,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mb();
}
-
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, pte_t entry, int dirty);
-
/*
* Macro to mark a page protection value as "uncacheable".
*/
@@ -240,11 +340,6 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#define pgprot_writecombine pgprot_noncached_wc
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd)
{
@@ -269,20 +364,8 @@ static inline int pud_huge(pud_t pud)
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- */
-#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
-void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, unsigned int nr);
-#else
-static inline void update_mmu_cache_range(struct vm_fault *vmf,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, unsigned int nr) {}
-#endif
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index d8924cbd61e4..f516f0b5b7a8 100644
--- a/arch/powerpc/include/asm/nohash/pte-e500.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -48,13 +48,20 @@
/* "Higher level" linux bit combinations */
#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
-#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_READ (_PAGE_BAP_SR | _PAGE_BAP_UR) /* User read permission */
+#define _PAGE_WRITE (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+
#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
-#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
-#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)
+
+#define _PAGE_NA 0
+#define _PAGE_NAX _PAGE_BAP_UX
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_BAP_UX)
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_BAP_UX)
#define _PAGE_SPECIAL _PAGE_SW0
@@ -89,36 +96,12 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+#include <asm/pgtable-masks.h>
#ifndef __ASSEMBLY__
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
-}
-
-#define pte_mkprivileged pte_mkprivileged
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
-}
-
-#define pte_mkuser pte_mkuser
-
static inline pte_t pte_mkexec(pte_t pte)
{
- if (pte_val(pte) & _PAGE_BAP_UR)
- return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
- else
- return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
}
#define pte_mkexec pte_mkexec
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a9b31cc258fc..b66b0c615f4f 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -227,7 +227,7 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
uint64_t data);
int64_t opal_pci_poll2(uint64_t id, uint64_t data);
-int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_get_xirr(__be32 *out_xirr, bool just_poll);
int64_t opal_int_set_cppr(uint8_t cppr);
int64_t opal_int_eoi(uint32_t xirr);
int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index e08513d73119..ac4279208d63 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -71,6 +71,11 @@ static inline void yield_to_any(void)
{
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
+
+static inline bool is_vcpu_idle(int vcpu)
+{
+ return lppaca_of(vcpu).idle;
+}
#else
static inline bool is_shared_processor(void)
{
@@ -100,6 +105,10 @@ static inline void prod_cpu(int cpu)
___bad_prod_cpu(); /* This would be a bug */
}
+static inline bool is_vcpu_idle(int vcpu)
+{
+ return false;
+}
#endif
#define vcpu_is_preempted vcpu_is_preempted
@@ -121,9 +130,23 @@ static inline bool vcpu_is_preempted(int cpu)
if (!is_shared_processor())
return false;
+ /*
+ * If the hypervisor has dispatched the target CPU on a physical
+ * processor, then the target CPU is definitely not preempted.
+ */
+ if (!(yield_count_of(cpu) & 1))
+ return false;
+
+ /*
+ * If the target CPU has yielded to Hypervisor but OS has not
+ * requested idle then the target CPU is definitely preempted.
+ */
+ if (!is_vcpu_idle(cpu))
+ return true;
+
#ifdef CONFIG_PPC_SPLPAR
if (!is_kvm_guest()) {
- int first_cpu;
+ int first_cpu, i;
/*
* The result of vcpu_is_preempted() is used in a
@@ -149,11 +172,29 @@ static inline bool vcpu_is_preempted(int cpu)
*/
if (cpu_first_thread_sibling(cpu) == first_cpu)
return false;
+
+ /*
+ * If any of the threads of the target CPU's core are not
+ * preempted or ceded, then consider target CPU to be
+ * non-preempted.
+ */
+ first_cpu = cpu_first_thread_sibling(cpu);
+ for (i = first_cpu; i < first_cpu + threads_per_core; i++) {
+ if (i == cpu)
+ continue;
+ if (!(yield_count_of(i) & 1))
+ return false;
+ if (!is_vcpu_idle(i))
+ return true;
+ }
}
#endif
- if (yield_count_of(cpu) & 1)
- return true;
+ /*
+ * None of the threads in target CPU's core are running but none of
+ * them were preempted too. Hence assume the target CPU to be
+ * non-preempted.
+ */
return false;
}
diff --git a/arch/powerpc/include/asm/pgtable-masks.h b/arch/powerpc/include/asm/pgtable-masks.h
new file mode 100644
index 000000000000..6e8e2db26a5a
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-masks.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_MASKS_H
+#define _ASM_POWERPC_PGTABLE_MASKS_H
+
+#ifndef _PAGE_NA
+#define _PAGE_NA 0
+#define _PAGE_NAX _PAGE_EXEC
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+#endif
+
+/* Permission flags for kernel mappings */
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO _PAGE_RO
+#define _PAGE_KERNEL_ROX _PAGE_ROX
+#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RWX (_PAGE_RWX | _PAGE_DIRTY)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
+#define PAGE_EXECONLY_X __pgprot(_PAGE_BASE | _PAGE_NAX)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RWX)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_ROX)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_ROX)
+
+#endif /* _ASM_POWERPC_PGTABLE_MASKS_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d0ee46de248e..2bfb7dd3b49e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -71,6 +71,12 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+static inline pgprot_t pgprot_nx(pgprot_t prot)
+{
+ return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
+}
+#define pgprot_nx pgprot_nx
+
#ifndef pmd_page_vaddr
static inline const void *pmd_page_vaddr(pmd_t pmd)
{
@@ -110,6 +116,35 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { }
#endif
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
+struct file;
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
+ (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
+ __update_mmu_cache(vma, address, ptep);
+}
+
/*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h
* so we are sure it is included when arriving here.
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index fe3d0ea0058a..b3ee44a40c2f 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/delay.h>
#include <asm/hvcall.h>
#include <asm/paca.h>
@@ -343,6 +344,212 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long token;
+ long rc;
+
+ token = -1UL;
+ do {
+ rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
+ if (rc == H_SUCCESS)
+ *guest_id = retbuf[0];
+
+ if (rc == H_BUSY) {
+ token = retbuf[0];
+ cond_resched();
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ token = retbuf[0];
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
+
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
+ if (rc == H_SUCCESS)
+ *trap = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ *capabilities = retbuf[0];
+
+ return rc;
+}
+
/*
* Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
*
@@ -355,7 +562,7 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
* error recovery of killing the process/guest will be eventually
* needed.
*/
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
long rc;
@@ -401,12 +608,68 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
return 0;
}
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
return 0;
}
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9db8b16567e2..ea8f91fbc62f 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -397,6 +397,23 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
return 0;
}
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * We support up to 8 arguments and assume they are sent in through the GPRs.
+ * This will fail for fp/vector arguments, but those aren't usually found in
+ * kernel code. This is expected to be called from kprobes or ftrace with regs.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, unsigned int n)
+{
+#define NR_REG_ARGUMENTS 8
+ if (n < NR_REG_ARGUMENTS)
+ return regs_get_register(regs, offsetof(struct pt_regs, gpr[3 + n]));
+ return 0;
+}
+
#endif /* __ASSEMBLY__ */
#ifndef __powerpc64__
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index fb725ec77926..f1f9890f50d3 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -374,7 +374,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n)
if (check_copy_size(from, n, true)) {
if (access_ok(to, n)) {
allow_write_to_user(to, n);
- n = copy_mc_generic((void *)to, from, n);
+ n = copy_mc_generic((void __force *)to, from, n);
prevent_write_to_user(to, n);
}
}
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 19e46fd623b0..7f63f1cdc6c3 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/font.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <linux/of.h>
@@ -41,10 +42,6 @@ static unsigned char *logicalDisplayBase __force_data;
unsigned long disp_BAT[2] __initdata = {0, 0};
-#define cmapsz (16*256)
-
-static unsigned char vga_font[cmapsz];
-
static int boot_text_mapped __force_data;
extern void rmci_on(void);
@@ -407,7 +404,7 @@ static unsigned int expand_bits_16[4] = {
};
-static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
+static void draw_byte_32(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -428,7 +425,7 @@ static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
}
}
-static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
+static inline void draw_byte_16(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -446,7 +443,7 @@ static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
}
}
-static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+static inline void draw_byte_8(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
@@ -465,7 +462,8 @@ static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
static noinline void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
- unsigned char *font = &vga_font[((unsigned int)c) * 16];
+ unsigned int font_index = c * 16;
+ const unsigned char *font = font_sun_8x16.data + font_index;
int rb = dispDeviceRowBytes;
rmci_maybe_on();
@@ -583,349 +581,3 @@ void __init udbg_init_btext(void)
*/
udbg_putc = btext_drawchar;
}
-
-static unsigned char vga_font[cmapsz] = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
-0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
-0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
-0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
-0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
-0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
-0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
-0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
-0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
-0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
-0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
-0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
-0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
-0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
-0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
-0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
-0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
-0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
-0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
-0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
-0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
-0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
-0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
-0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
-0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
-0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
-0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
-0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
-0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
-0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
-0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
-0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
-0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
-0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
-0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
-0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
-0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
-0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
-0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
-0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
-0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
-0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
-0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
-0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
-0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
-0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
-0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
-0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
-0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
-0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
-0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
-0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
-0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
-0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
-0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
-0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
-0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
-0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
-0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
-0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
-0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
-0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
-0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
-0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
-0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
-0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
-0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00,
-};
-
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9a3b85bfc83f..2086fa6cdc25 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
+#include <asm/fadump.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -92,6 +93,17 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
return csize;
}
+/*
+ * Return true only when kexec based kernel dump capturing method is used.
+ * This ensures all restritions applied for kdump case are not automatically
+ * applied for fadump case.
+ */
+bool is_kdump_kernel(void)
+{
+ return !is_fadump_active() && elfcorehdr_addr != ELFCORE_ADDR_MAX;
+}
+EXPORT_SYMBOL_GPL(is_kdump_kernel);
+
#ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region, so
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 438568a472d0..48773d2d9be3 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -39,7 +39,7 @@ static int eeh_result_priority(enum pci_ers_result result)
case PCI_ERS_RESULT_NEED_RESET:
return 6;
default:
- WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
+ WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", result);
return 0;
}
};
@@ -60,7 +60,7 @@ static const char *pci_ers_result_name(enum pci_ers_result result)
case PCI_ERS_RESULT_NO_AER_DRIVER:
return "no AER driver";
default:
- WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
+ WARN_ONCE(1, "Unknown result type: %d\n", result);
return "unknown";
}
};
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index b32e7b2ebdcf..9fc90410b385 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -312,13 +312,13 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r11) /* Get Linux PTE */
- li r9, _PAGE_PRESENT | _PAGE_ACCESSED
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
andc. r9, r9, r11 /* Check permission */
bne 5f
- rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
- and r9, r9, r11 /* hwwrite = dirty & rw */
- rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
+ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
+ and r9, r9, r11 /* hwwrite = dirty & w */
+ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
@@ -400,9 +400,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
andc. r9, r9, r11 /* Check permission */
bne 5f
- rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
- and r9, r9, r11 /* hwwrite = dirty & rw */
- rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
+ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
+ and r9, r9, r11 /* hwwrite = dirty & w */
+ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
@@ -561,10 +561,11 @@ finish_tlb_load:
/*
* Clear out the software-only bits in the PTE to generate the
* TLB_DATA value. These are the bottom 2 bits of the RPM, the
- * top 3 bits of the zone field, and M.
+ * 4 bits of the zone field, and M.
*/
- li r9, 0x0ce2
+ li r9, 0x0cf2
andc r11, r11, r9
+ rlwimi r11, r10, 8, 24, 27 /* Copy 4 upper address bit into zone */
/* load the next available TLB index. */
lwz r9, tlb_4xx_index@l(0)
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index a3197c9f721c..25642e802ed3 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -314,8 +314,8 @@ interrupt_base:
* kernel page tables.
*/
lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
+ cmplw cr7, r10, r11
+ blt+ cr7, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
@@ -342,7 +342,7 @@ interrupt_base:
mtspr SPRN_MMUCR,r12
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -355,7 +355,7 @@ interrupt_base:
* place or can we save a couple of instructions here ?
*/
mfspr r12,SPRN_ESR
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30
/* Load the PTE */
@@ -428,8 +428,8 @@ interrupt_base:
* kernel page tables.
*/
lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
+ cmplw cr7, r10, r11
+ blt+ cr7, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
@@ -515,6 +515,7 @@ interrupt_base:
* r11 - PTE high word value
* r12 - PTE low word value
* r13 - TLB index
+ * cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
@@ -533,11 +534,10 @@ finish_tlb_load_44x:
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */
- li r10,0xf85 /* Mask to apply from PTE */
- rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ li r10,0xf84 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */
- andi. r10,r12,_PAGE_USER /* User page ? */
- beq 1f /* nope, leave U bits empty */
+ bge cr7,1f /* User page ? no, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
@@ -568,8 +568,8 @@ finish_tlb_load_44x:
* kernel page tables.
*/
lis r11,PAGE_OFFSET@h
- cmplw cr0,r10,r11
- blt+ 3f
+ cmplw cr7,r10,r11
+ blt+ cr7,3f
lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */
@@ -586,7 +586,7 @@ finish_tlb_load_44x:
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -599,7 +599,7 @@ finish_tlb_load_44x:
* place or can we save a couple of instructions here ?
*/
mfspr r12,SPRN_ESR
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30
/* Load the PTE */
@@ -669,8 +669,8 @@ finish_tlb_load_44x:
* kernel page tables.
*/
lis r11,PAGE_OFFSET@h
- cmplw cr0,r10,r11
- blt+ 3f
+ cmplw cr7,r10,r11
+ blt+ cr7,3f
lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */
@@ -744,6 +744,7 @@ finish_tlb_load_44x:
* r11 - PTE high word value
* r12 - PTE low word value
* r13 - free to use
+ * cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
@@ -753,11 +754,10 @@ finish_tlb_load_47x:
tlbwe r11,r13,1
/* And make up word 2 */
- li r10,0xf85 /* Mask to apply from PTE */
- rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ li r10,0xf84 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */
- andi. r10,r12,_PAGE_USER /* User page ? */
- beq 1f /* nope, leave U bits empty */
+ bge cr7,1f /* User page ? no, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
1: tlbwe r11,r13,2
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 0f1641a31250..39724ff5ae1f 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -471,7 +471,7 @@ END_BTB_FLUSH_SECTION
4:
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -485,10 +485,10 @@ END_BTB_FLUSH_SECTION
*/
mfspr r12,SPRN_ESR
#ifdef CONFIG_PTE_64BIT
- li r13,_PAGE_PRESENT
+ li r13,_PAGE_PRESENT|_PAGE_BAP_SR
oris r13,r13,_PAGE_ACCESSED@h
#else
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED
#endif
rlwimi r13,r12,11,29,29
@@ -783,15 +783,15 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
- li r10, (_PAGE_EXEC | _PAGE_PRESENT)
+ li r10, (_PAGE_EXEC | _PAGE_READ)
mr r13, r11
rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
and r12, r11, r10
- andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
+ mcrf cr0, cr5 /* Test for user page */
slwi r10, r12, 1
or r10, r10, r12
rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
- iseleq r12, r12, r10
+ isellt r12, r10, r12
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
mtspr SPRN_MAS3, r13
#endif
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 6764b98ca360..c1d89764dd22 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -412,10 +412,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
. = INTERRUPT_INST_TLB_MISS_603
InstructionTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
@@ -424,12 +424,13 @@ InstructionTLBMiss:
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
rlwinm r2, r2, 28, 0xfffff000
#ifdef CONFIG_MODULES
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
#endif
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -437,13 +438,15 @@ InstructionTLBMiss:
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- InstructionAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+#ifdef CONFIG_MODULES
+ rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
+#endif
ori r1, r1, 0xe06 /* clear out reserved bits */
- andc r1, r0, r1 /* PP = user? 1 : 0 */
+ andc r1, r2, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -478,38 +481,38 @@ InstructionAddressInvalid:
. = INTERRUPT_DATA_LOAD_TLB_MISS_603
DataLoadTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
rlwinm r2, r2, 28, 0xfffff000
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- DataAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
- rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
- rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
+ rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
+ rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
+ rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
ori r1,r1,0xe04 /* clear out reserved bits */
- andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
+ andc r1,r2,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -558,34 +561,35 @@ DataAddressInvalid:
. = INTERRUPT_DATA_STORE_TLB_MISS_603
DataStoreTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
rlwinm r2, r2, 28, 0xfffff000
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- DataAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
- andc r1,r0,r1 /* PP = user? 1: 0 */
+ andc r1,r2,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -689,7 +693,8 @@ hash_page_dsi:
mfdar r4
mfsrr0 r5
mfsrr1 r9
- rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
+ rlwinm r3, r3, 32 - 15, _PAGE_WRITE /* DSISR_STORE -> _PAGE_WRITE */
+ ori r3, r3, _PAGE_PRESENT | _PAGE_READ
bl hash_page
mfspr r10, SPRN_SPRG_THREAD
restore_regs_thread r10
@@ -699,7 +704,7 @@ hash_page_isi:
mr r11, r10
mfspr r10, SPRN_SPRG_THREAD
save_regs_thread r10
- li r3, 0
+ li r3, _PAGE_PRESENT | _PAGE_EXEC
lwz r4, SRR0(r10)
lwz r9, SRR1(r10)
bl hash_page
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 2f29b7d432de..6af535905984 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -33,7 +33,7 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u8 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -49,7 +49,7 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u8 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
@@ -64,7 +64,7 @@ void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u16 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -80,7 +80,7 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u16 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
@@ -95,7 +95,7 @@ void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u32 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -111,7 +111,7 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u32 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 14251bc5219e..ebe259bdd462 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1074,10 +1074,10 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
}
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
-extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
- struct iommu_table *tbl,
- unsigned long entry, unsigned long *hpa,
- enum dma_data_direction *direction)
+long iommu_tce_xchg_no_kill(struct mm_struct *mm,
+ struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
{
long ret;
unsigned long size = 0;
@@ -1280,13 +1280,19 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = {
/*
* A simple iommu_ops to allow less cruft in generic VFIO code.
*/
-static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
- struct device *dev)
+static int
+spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
+ struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
+ /* At first attach the ownership is already set */
+ if (!domain)
+ return 0;
+
if (!grp)
return -ENODEV;
@@ -1297,17 +1303,22 @@ static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
return ret;
}
-static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev)
-{
- struct iommu_group *grp = iommu_group_get(dev);
- struct iommu_table_group *table_group;
+static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
+ .attach_dev = spapr_tce_platform_iommu_attach_dev,
+};
- table_group = iommu_group_get_iommudata(grp);
- table_group->ops->release_ownership(table_group);
-}
+static struct iommu_domain spapr_tce_platform_domain = {
+ .type = IOMMU_DOMAIN_PLATFORM,
+ .ops = &spapr_tce_platform_domain_ops,
+};
-static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = {
- .attach_dev = spapr_tce_blocking_iommu_attach_dev,
+static struct iommu_domain spapr_tce_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ /*
+ * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
+ * also sets the dma_api ops
+ */
+ .ops = &spapr_tce_platform_domain_ops,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
@@ -1322,22 +1333,6 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
return false;
}
-static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type)
-{
- struct iommu_domain *dom;
-
- if (type != IOMMU_DOMAIN_BLOCKED)
- return NULL;
-
- dom = kzalloc(sizeof(*dom), GFP_KERNEL);
- if (!dom)
- return NULL;
-
- dom->ops = &spapr_tce_blocking_domain_ops;
-
- return dom;
-}
-
static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
{
struct pci_dev *pdev;
@@ -1371,12 +1366,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
}
static const struct iommu_ops spapr_tce_iommu_ops = {
+ .default_domain = &spapr_tce_platform_domain,
+ .blocked_domain = &spapr_tce_blocked_domain,
.capable = spapr_tce_iommu_capable,
- .domain_alloc = spapr_tce_iommu_domain_alloc,
.probe_device = spapr_tce_iommu_probe_device,
.release_device = spapr_tce_iommu_release_device,
.device_group = spapr_tce_iommu_device_group,
- .set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma,
};
static struct attribute *spapr_tce_iommu_attrs[] = {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b68898ac07e1..392404688cec 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p)
return ret;
}
+static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk)
+{
+ unsigned long stack_page;
+
+ // A non-empty pt_regs should never have a zero MSR or TRAP value.
+ if (regs->msr || regs->trap)
+ return false;
+
+ // Check it sits at the very base of the stack
+ stack_page = (unsigned long)task_stack_page(tsk);
+ if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE)
+ return false;
+
+ return true;
+}
+
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void __no_sanitize_address show_stack(struct task_struct *tsk,
@@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
lr = regs->link;
printk("%s--- interrupt: %lx at %pS\n",
loglvl, regs->trap, (void *)regs->nip);
- __show_regs(regs);
- printk("%s--- interrupt: %lx\n",
- loglvl, regs->trap);
+
+ // Detect the case of an empty pt_regs at the very base
+ // of the stack and suppress showing it in full.
+ if (!empty_user_regs(regs, tsk)) {
+ __show_regs(regs);
+ printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
+ }
firstframe = 1;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d464ba412084..e67effdba85c 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -947,7 +947,7 @@ struct option_vector7 {
} __packed;
struct ibm_arch_vec {
- struct { u32 mask, val; } pvrs[14];
+ struct { __be32 mask, val; } pvrs[14];
u8 num_vectors;
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 5d7a72b41ae7..727ed4a14545 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -9,10 +10,6 @@
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#include <linux/regset.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 20f72cd1d813..9b142b9d5187 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -22,7 +22,6 @@
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
-#include <linux/screen_info.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
@@ -98,21 +97,6 @@ int boot_cpu_hwid = -1;
int dcache_bsize;
int icache_bsize;
-/*
- * This still seems to be needed... -- paulus
- */
-struct screen_info screen_info = {
- .orig_x = 0,
- .orig_y = 25,
- .orig_video_cols = 80,
- .orig_video_lines = 25,
- .orig_video_isVGA = 1,
- .orig_video_points = 16
-};
-#if defined(CONFIG_FB_VGA16_MODULE)
-EXPORT_SYMBOL(screen_info);
-#endif
-
/* Variables required to store legacy IO irq routing */
int of_i8042_kbd_irq;
EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
@@ -601,7 +585,6 @@ struct seq_buf ppc_hw_desc __initdata = {
.buffer = ppc_hw_desc_buf,
.size = sizeof(ppc_hw_desc_buf),
.len = 0,
- .readpos = 0,
};
static __init void probe_machine(void)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 246201d0d879..2f19d5e94485 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -364,7 +364,7 @@ void __init early_setup(unsigned long dt_ptr)
*/
initialise_paca(&boot_paca, 0);
fixup_boot_paca(&boot_paca);
- WARN_ON(local_paca != 0);
+ WARN_ON(local_paca);
setup_paca(&boot_paca); /* install the paca into registers */
/* -------- printk is now safe to use ------- */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 68a91e553e14..aa17e62f3754 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common signal handling code for both 32 and 64 bits
*
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
* Extracted from signal_32.c and signal_64.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#include <linux/resume_user_mode.h>
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index a429c57ed433..58ecea1cdc27 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -1,10 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
* Extracted from signal_32.c and signal_64.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#ifndef _POWERPC_ARCH_SIGNAL_H
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 64ff37721fd0..5ea2014aff90 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -157,7 +157,7 @@ static int die_owner = -1;
static unsigned int die_nest_count;
static int die_counter;
-extern void panic_flush_kmsg_start(void)
+void panic_flush_kmsg_start(void)
{
/*
* These are mostly taken from kernel/panic.c, but tries to do
@@ -170,7 +170,7 @@ extern void panic_flush_kmsg_start(void)
bust_spinlocks(1);
}
-extern void panic_flush_kmsg_end(void)
+void panic_flush_kmsg_end(void)
{
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
@@ -1164,6 +1164,7 @@ void emulate_single_step(struct pt_regs *regs)
__single_step_exception(regs);
}
+#ifdef CONFIG_PPC_FPU_REGS
static inline int __parse_fpscr(unsigned long fpscr)
{
int ret = FPE_FLTUNK;
@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
return ret;
}
+#endif
static void parse_fpe(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 9346c960b296..85846cadb9b5 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -74,6 +74,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
VMCOREINFO_OFFSET(mmu_psize_def, shift);
#endif
+ VMCOREINFO_SYMBOL(cur_cpu_spec);
+ VMCOREINFO_OFFSET(cpu_spec, mmu_features);
+ vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index a79e28c91e2b..0bee7ca9a77c 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -379,8 +379,8 @@ void default_machine_kexec(struct kimage *image)
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base;
-static unsigned long htab_size;
+static __be64 htab_base;
+static __be64 htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index a3de5369d22c..961a6dd67365 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -32,7 +32,7 @@
#include <asm/plpks.h>
struct umem_info {
- u64 *buf; /* data buffer for usable-memory property */
+ __be64 *buf; /* data buffer for usable-memory property */
u32 size; /* size allocated for the data buffer */
u32 max_entries; /* maximum no. of entries */
u32 idx; /* index of current entry */
@@ -443,10 +443,10 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
*
* Returns buffer on success, NULL on error.
*/
-static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
+static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
{
u32 new_size;
- u64 *tbuf;
+ __be64 *tbuf;
if ((um_info->idx + cnt) <= um_info->max_entries)
return um_info->buf;
@@ -1138,11 +1138,15 @@ static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
continue;
ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(dn);
break;
+ }
ret = copy_property(fdt, pci_offset, dn, dmapropname);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(dn);
break;
+ }
}
return ret;
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 5319d889b184..4bd9d1230869 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -87,8 +87,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_p9_perf.o \
+ book3s_hv_nestedv2.o \
+ guest-state-buffer.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
+
+obj-$(CONFIG_GUEST_STATE_BUFFER_TEST) += test-guest-state-buffer.o
endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 686d8d9eda3e..6cd20ab9e94e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -565,7 +565,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->msr = kvmppc_get_msr(vcpu);
regs->srr0 = kvmppc_get_srr0(vcpu);
regs->srr1 = kvmppc_get_srr1(vcpu);
- regs->pid = vcpu->arch.pid;
+ regs->pid = kvmppc_get_pid(vcpu);
regs->sprg0 = kvmppc_get_sprg0(vcpu);
regs->sprg1 = kvmppc_get_sprg1(vcpu);
regs->sprg2 = kvmppc_get_sprg2(vcpu);
@@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- *val = get_reg_val(id, VCPU_FPR(vcpu, i));
+ *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
- *val = get_reg_val(id, vcpu->arch.fp.fpscr);
+ *val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
- val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
+ val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
+ val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
} else {
r = -ENXIO;
}
@@ -683,19 +683,19 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.fscr);
break;
case KVM_REG_PPC_TAR:
- *val = get_reg_val(id, vcpu->arch.tar);
+ *val = get_reg_val(id, kvmppc_get_tar(vcpu));
break;
case KVM_REG_PPC_EBBHR:
- *val = get_reg_val(id, vcpu->arch.ebbhr);
+ *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
break;
case KVM_REG_PPC_EBBRR:
- *val = get_reg_val(id, vcpu->arch.ebbrr);
+ *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
break;
case KVM_REG_PPC_BESCR:
- *val = get_reg_val(id, vcpu->arch.bescr);
+ *val = get_reg_val(id, kvmppc_get_bescr(vcpu));
break;
case KVM_REG_PPC_IC:
- *val = get_reg_val(id, vcpu->arch.ic);
+ *val = get_reg_val(id, kvmppc_get_ic(vcpu));
break;
default:
r = -EINVAL;
@@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
+ kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPSCR:
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
- vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
+ kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
} else {
r = -ENXIO;
}
@@ -765,22 +765,22 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
- vcpu->arch.fscr = set_reg_val(id, *val);
+ kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TAR:
- vcpu->arch.tar = set_reg_val(id, *val);
+ kvmppc_set_tar(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBHR:
- vcpu->arch.ebbhr = set_reg_val(id, *val);
+ kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBRR:
- vcpu->arch.ebbrr = set_reg_val(id, *val);
+ kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_BESCR:
- vcpu->arch.bescr = set_reg_val(id, *val);
+ kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IC:
- vcpu->arch.ic = set_reg_val(id, *val);
+ kvmppc_set_ic(vcpu, set_reg_val(id, *val));
break;
default:
r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index efd0ebf70a5e..2b1f0cdd8c18 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -28,6 +28,7 @@
#include <asm/pte-walk.h>
#include "book3s.h"
+#include "book3s_hv.h"
#include "trace_hv.h"
//#define DEBUG_RESIZE_HPT 1
@@ -120,7 +121,7 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
kvm->arch.hpt = *info;
kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
- pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
+ pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n",
info->virt, (long)info->order, kvm->arch.lpid);
}
@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long v, orig_v, gr;
__be64 *hptep;
long int index;
- int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+ int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
if (kvm_is_radix(vcpu->kvm))
return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Get PP bits and key for permission check */
pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
- key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
key &= slb_v;
/* Calculate permissions */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 572707858d65..175a8eb2681f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -15,6 +15,7 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#include "book3s_hv.h"
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
@@ -96,7 +97,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
{
int lpid = vcpu->kvm->arch.lpid;
- int pid = vcpu->arch.pid;
+ int pid = kvmppc_get_pid(vcpu);
/* This would cause a data segment intr so don't allow the access */
if (eaddr & (0x3FFUL << 52))
@@ -270,7 +271,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Work out effective PID */
switch (eaddr >> 62) {
case 0:
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
break;
case 3:
pid = 0;
@@ -294,9 +295,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
} else {
if (!(pte & _PAGE_PRIVILEGED)) {
/* Check AMR/IAMR to see if strict mode is in force */
- if (vcpu->arch.amr & (1ul << 62))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
gpte->may_read = 0;
- if (vcpu->arch.amr & (1ul << 63))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
gpte->may_write = 0;
if (vcpu->arch.iamr & (1ul << 62))
gpte->may_execute = 0;
@@ -307,7 +308,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid)
+ unsigned int pshift, u64 lpid)
{
unsigned long psize = PAGE_SIZE;
int psi;
@@ -344,7 +345,7 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
-static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid)
{
long rc;
@@ -417,7 +418,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long old;
@@ -468,7 +469,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
- unsigned int lpid)
+ u64 lpid)
{
if (full) {
memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
@@ -489,7 +490,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
}
static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long im;
pmd_t *p = pmd;
@@ -518,7 +519,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
}
static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long iu;
pud_t *p = pud;
@@ -539,7 +540,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
pud_free(kvm->mm, pud);
}
-void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid)
{
unsigned long ig;
@@ -566,7 +567,7 @@ void kvmppc_free_radix(struct kvm *kvm)
}
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
@@ -582,7 +583,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
}
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -608,7 +609,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
@@ -785,7 +786,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
}
bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
unsigned long pgflags;
unsigned int shift;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 93b695b289e9..14c6d7e318da 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -77,8 +77,8 @@ static void kvm_spapr_tce_liobn_put(struct kref *kref)
call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
}
-extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
- struct iommu_group *grp)
+void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ struct iommu_group *grp)
{
int i;
struct kvmppc_spapr_tce_table *stt;
@@ -105,8 +105,8 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
rcu_read_unlock();
}
-extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
- struct iommu_group *grp)
+long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ struct iommu_group *grp)
{
struct kvmppc_spapr_tce_table *stt = NULL;
bool found = false;
@@ -786,12 +786,12 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
if (!page) {
- vcpu->arch.regs.gpr[4] = 0;
+ kvmppc_set_gpr(vcpu, 4, 0);
return H_SUCCESS;
}
tbl = (u64 *)page_address(page);
- vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
+ kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 130bafdb1430..1ed6ec140701 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -393,7 +393,7 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
@@ -424,9 +424,11 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
break;
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
+ cap = H_GUEST_CAP_POWER9;
break;
case PVR_ARCH_31:
guest_pcr_bit = PCR_ARCH_31;
+ cap = H_GUEST_CAP_POWER10;
break;
default:
return -EINVAL;
@@ -437,8 +439,14 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
if (guest_pcr_bit > host_pcr_bit)
return -EINVAL;
+ if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+ if (!(cap & nested_capabilities))
+ return -EINVAL;
+ }
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR);
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
@@ -794,7 +802,7 @@ static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
- __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen);
+ __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen);
vcpu->arch.vpa.dirty = true;
}
@@ -845,9 +853,9 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
+ if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207)
return true;
- if ((!vcpu->arch.vcore->arch_compat) &&
+ if ((!kvmppc_get_arch_compat(vcpu)) &&
cpu_has_feature(CPU_FTR_ARCH_207S))
return true;
return false;
@@ -868,7 +876,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
/* Guests can't breakpoint the hypervisor */
if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
return H_P3;
- vcpu->arch.ciabr = value1;
+ kvmppc_set_ciabr_hv(vcpu, value1);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR0:
if (!kvmppc_power8_compatible(vcpu))
@@ -879,8 +887,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr0 = value1;
- vcpu->arch.dawrx0 = value2;
+ kvmppc_set_dawr0_hv(vcpu, value1);
+ kvmppc_set_dawrx0_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR1:
if (!kvmppc_power8_compatible(vcpu))
@@ -895,8 +903,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr1 = value1;
- vcpu->arch.dawrx1 = value2;
+ kvmppc_set_dawr1_hv(vcpu, value1);
+ kvmppc_set_dawrx1_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/*
@@ -1267,10 +1275,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
break;
#endif
- case H_RANDOM:
- if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
+ case H_RANDOM: {
+ unsigned long rand;
+
+ if (!arch_get_random_seed_longs(&rand, 1))
ret = H_HARDWARE;
+ kvmppc_set_gpr(vcpu, 4, rand);
break;
+ }
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -1370,7 +1382,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
*/
static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
- vcpu->arch.shregs.msr |= MSR_EE;
+ __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
vcpu->arch.ceded = 1;
smp_mb();
if (vcpu->arch.prodded) {
@@ -1544,7 +1556,7 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_PM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM);
return RESUME_GUEST;
}
@@ -1554,7 +1566,7 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_EBB;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB);
return RESUME_GUEST;
}
@@ -1564,7 +1576,7 @@ static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
return RESUME_GUEST;
}
@@ -1585,7 +1597,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1636,7 +1648,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* so that it knows that the machine check occurred.
*/
if (!vcpu->kvm->arch.fwnmi_enabled) {
- ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
+ ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST;
@@ -1666,7 +1678,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS.
*/
- flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
+ flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
@@ -1676,7 +1688,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
{
int i;
- if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
@@ -1754,7 +1766,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}
- if (!(vcpu->arch.shregs.msr & MSR_DR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1778,7 +1790,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
long err;
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
- vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+ vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
DSISR_SRR1_MATCH_64S;
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
@@ -1787,7 +1799,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
@@ -1801,7 +1813,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}
- if (!(vcpu->arch.shregs.msr & MSR_IR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1863,7 +1875,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* Otherwise, we just generate a program interrupt to the guest.
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
- u64 cause = vcpu->arch.hfscr >> 56;
+ u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;
r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -1891,7 +1903,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
break;
@@ -1915,11 +1927,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
pr_emerg("KVM trap in HV mode while nested!\n");
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
kvmppc_dump_regs(vcpu);
return RESUME_HOST;
}
@@ -1976,7 +1988,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
@@ -2183,6 +2195,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
vc->lpcr = new_lpcr;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
spin_unlock(&vc->lock);
}
@@ -2207,64 +2220,64 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.dabrx);
break;
case KVM_REG_PPC_DSCR:
- *val = get_reg_val(id, vcpu->arch.dscr);
+ *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu));
break;
case KVM_REG_PPC_PURR:
- *val = get_reg_val(id, vcpu->arch.purr);
+ *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu));
break;
case KVM_REG_PPC_SPURR:
- *val = get_reg_val(id, vcpu->arch.spurr);
+ *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu));
break;
case KVM_REG_PPC_AMR:
- *val = get_reg_val(id, vcpu->arch.amr);
+ *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu));
break;
case KVM_REG_PPC_UAMOR:
- *val = get_reg_val(id, vcpu->arch.uamor);
+ *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- *val = get_reg_val(id, vcpu->arch.mmcr[i]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i));
break;
case KVM_REG_PPC_MMCR2:
- *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2));
break;
case KVM_REG_PPC_MMCRA:
- *val = get_reg_val(id, vcpu->arch.mmcra);
+ *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu));
break;
case KVM_REG_PPC_MMCRS:
*val = get_reg_val(id, vcpu->arch.mmcrs);
break;
case KVM_REG_PPC_MMCR3:
- *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3));
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- *val = get_reg_val(id, vcpu->arch.pmc[i]);
+ *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
*val = get_reg_val(id, vcpu->arch.spmc[i]);
break;
case KVM_REG_PPC_SIAR:
- *val = get_reg_val(id, vcpu->arch.siar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SDAR:
- *val = get_reg_val(id, vcpu->arch.sdar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SIER:
- *val = get_reg_val(id, vcpu->arch.sier[0]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
break;
case KVM_REG_PPC_SIER2:
- *val = get_reg_val(id, vcpu->arch.sier[1]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1));
break;
case KVM_REG_PPC_SIER3:
- *val = get_reg_val(id, vcpu->arch.sier[2]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2));
break;
case KVM_REG_PPC_IAMR:
- *val = get_reg_val(id, vcpu->arch.iamr);
+ *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu));
break;
case KVM_REG_PPC_PSPB:
- *val = get_reg_val(id, vcpu->arch.pspb);
+ *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu));
break;
case KVM_REG_PPC_DPDES:
/*
@@ -2279,22 +2292,22 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
- *val = get_reg_val(id, vcpu->arch.vcore->vtb);
+ *val = get_reg_val(id, kvmppc_get_vtb(vcpu));
break;
case KVM_REG_PPC_DAWR:
- *val = get_reg_val(id, vcpu->arch.dawr0);
+ *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX:
- *val = get_reg_val(id, vcpu->arch.dawrx0);
+ *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu));
break;
case KVM_REG_PPC_DAWR1:
- *val = get_reg_val(id, vcpu->arch.dawr1);
+ *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX1:
- *val = get_reg_val(id, vcpu->arch.dawrx1);
+ *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
break;
case KVM_REG_PPC_CIABR:
- *val = get_reg_val(id, vcpu->arch.ciabr);
+ *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
break;
case KVM_REG_PPC_CSIGR:
*val = get_reg_val(id, vcpu->arch.csigr);
@@ -2306,13 +2319,13 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.tcscr);
break;
case KVM_REG_PPC_PID:
- *val = get_reg_val(id, vcpu->arch.pid);
+ *val = get_reg_val(id, kvmppc_get_pid(vcpu));
break;
case KVM_REG_PPC_ACOP:
*val = get_reg_val(id, vcpu->arch.acop);
break;
case KVM_REG_PPC_WORT:
- *val = get_reg_val(id, vcpu->arch.wort);
+ *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu));
break;
case KVM_REG_PPC_TIDR:
*val = get_reg_val(id, vcpu->arch.tid);
@@ -2338,14 +2351,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_TB_OFFSET:
- *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu));
break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
- *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ *val = get_reg_val(id, kvmppc_get_lpcr(vcpu));
break;
case KVM_REG_PPC_PPR:
- *val = get_reg_val(id, vcpu->arch.ppr);
+ *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2414,10 +2427,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
#endif
case KVM_REG_PPC_ARCH_COMPAT:
- *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
+ *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- *val = get_reg_val(id, vcpu->arch.dec_expires);
+ *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu));
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
@@ -2425,6 +2438,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
break;
+ case KVM_REG_PPC_FSCR:
+ *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu));
+ break;
default:
r = -EINVAL;
break;
@@ -2453,29 +2469,29 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
break;
case KVM_REG_PPC_DSCR:
- vcpu->arch.dscr = set_reg_val(id, *val);
+ kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PURR:
- vcpu->arch.purr = set_reg_val(id, *val);
+ kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPURR:
- vcpu->arch.spurr = set_reg_val(id, *val);
+ kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_AMR:
- vcpu->arch.amr = set_reg_val(id, *val);
+ kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_UAMOR:
- vcpu->arch.uamor = set_reg_val(id, *val);
+ kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- vcpu->arch.mmcr[i] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR2:
- vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRA:
- vcpu->arch.mmcra = set_reg_val(id, *val);
+ kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRS:
vcpu->arch.mmcrs = set_reg_val(id, *val);
@@ -2485,32 +2501,32 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- vcpu->arch.pmc[i] = set_reg_val(id, *val);
+ kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
vcpu->arch.spmc[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIAR:
- vcpu->arch.siar = set_reg_val(id, *val);
+ kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SDAR:
- vcpu->arch.sdar = set_reg_val(id, *val);
+ kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER:
- vcpu->arch.sier[0] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER2:
- vcpu->arch.sier[1] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER3:
- vcpu->arch.sier[2] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IAMR:
- vcpu->arch.iamr = set_reg_val(id, *val);
+ kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PSPB:
- vcpu->arch.pspb = set_reg_val(id, *val);
+ kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DPDES:
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -2519,25 +2535,25 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
- vcpu->arch.vcore->vtb = set_reg_val(id, *val);
+ kvmppc_set_vtb(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWR:
- vcpu->arch.dawr0 = set_reg_val(id, *val);
+ kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX:
- vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_DAWR1:
- vcpu->arch.dawr1 = set_reg_val(id, *val);
+ kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX1:
- vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_CIABR:
- vcpu->arch.ciabr = set_reg_val(id, *val);
+ kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
/* Don't allow setting breakpoints in hypervisor code */
- if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
- vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
+ if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV);
break;
case KVM_REG_PPC_CSIGR:
vcpu->arch.csigr = set_reg_val(id, *val);
@@ -2549,13 +2565,13 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.tcscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PID:
- vcpu->arch.pid = set_reg_val(id, *val);
+ kvmppc_set_pid(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ACOP:
vcpu->arch.acop = set_reg_val(id, *val);
break;
case KVM_REG_PPC_WORT:
- vcpu->arch.wort = set_reg_val(id, *val);
+ kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TIDR:
vcpu->arch.tid = set_reg_val(id, *val);
@@ -2602,10 +2618,11 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
* decrementer, which is better than a large one that
* causes a hang.
*/
- if (!vcpu->arch.dec_expires && tb_offset)
- vcpu->arch.dec_expires = get_tb() + tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
+ if (!kvmppc_get_dec_expires(vcpu) && tb_offset)
+ kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset);
- vcpu->arch.vcore->tb_offset = tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
break;
}
case KVM_REG_PPC_LPCR:
@@ -2615,7 +2632,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
break;
case KVM_REG_PPC_PPR:
- vcpu->arch.ppr = set_reg_val(id, *val);
+ kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2686,7 +2703,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- vcpu->arch.dec_expires = set_reg_val(id, *val);
+ kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
@@ -2699,6 +2716,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_FSCR:
+ kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -2916,19 +2936,26 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.shared_big_endian = false;
#endif
#endif
- vcpu->arch.mmcr[0] = MMCR0_FC;
+
+ if (kvmhv_is_nestedv2()) {
+ err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io);
+ if (err < 0)
+ return err;
+ }
+
+ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
- vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+ kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
+ kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
}
- vcpu->arch.ctrl = CTRL_RUNLATCH;
+ kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH);
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
- vcpu->arch.shregs.msr = MSR_ME;
+ __kvmppc_set_msr_hv(vcpu, MSR_ME);
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
/*
@@ -2938,29 +2965,30 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
* don't set the HFSCR_MSGP bit, and that causes those instructions
* to trap and then we emulate them.
*/
- vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
- HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+ kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP);
/* On POWER10 and later, allow prefixed instructions */
if (cpu_has_feature(CPU_FTR_ARCH_31))
- vcpu->arch.hfscr |= HFSCR_PREFIX;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX);
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR));
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
- vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+ vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
/*
* PM, EBB, TM are demand-faulted so start with it clear.
*/
- vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM));
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -3071,6 +3099,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
@@ -4035,10 +4065,58 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
+static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
+{
+ struct kvmhv_nestedv2_io *io;
+ unsigned long msr, i;
+ int trap;
+ long rc;
+
+ io = &vcpu->arch.nestedv2_io;
+
+ msr = mfmsr();
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
+
+ rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit);
+ if (rc < 0)
+ return -EINVAL;
+
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+ rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
+ &trap, &i);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM Guest Run VCPU hcall failed\n");
+ if (rc == H_INVALID_ELEMENT_ID)
+ pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i);
+ return -EINVAL;
+ }
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+
+ *tb = mftb();
+ kvmppc_gsm_reset(io->vcpu_message);
+ kvmppc_gsm_reset(io->vcore_message);
+ kvmppc_gsbm_zero(&io->valids);
+
+ rc = kvmhv_nestedv2_parse_output(vcpu);
+ if (rc < 0)
+ return -EINVAL;
+
+ timer_rearm_host_dec(*tb);
+
+ return trap;
+}
+
/* call our hypervisor to load up HV regs and go */
static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long host_psscr;
unsigned long msr;
struct hv_guest_state hvregs;
@@ -4118,7 +4196,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
*tb = mftb();
- vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
+ vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu));
timer_rearm_host_dec(*tb);
@@ -4153,7 +4231,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
- trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ if (kvmhv_is_nestedv1())
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ else
+ trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
@@ -4176,7 +4257,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
__this_cpu_write(cpu_in_guest, NULL);
if (trap == BOOK3S_INTERRUPT_SYSCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
/*
@@ -4655,7 +4736,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
if (xive_interrupt_pending(vcpu))
kvmppc_inject_interrupt_hv(vcpu,
BOOK3S_INTERRUPT_EXTERNAL, 0);
@@ -4677,7 +4758,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
tb = mftb();
- kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
+ kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu));
trace_kvm_guest_enter(vcpu);
@@ -4844,7 +4925,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
msr |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
- (vcpu->arch.hfscr & HFSCR_TM))
+ (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
@@ -4868,7 +4949,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
accumulate_time(vcpu, &vcpu->arch.hcall);
- if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* These should have been caught reflected
* into the guest by now. Final sanity check:
@@ -5133,6 +5214,14 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
if (++cores_done >= kvm->arch.online_vcores)
break;
}
+
+ if (kvmhv_is_nestedv2()) {
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
+ }
+ }
}
void kvmppc_setup_partition_table(struct kvm *kvm)
@@ -5399,15 +5488,43 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
- lpid = kvmppc_alloc_lpid();
- if ((long)lpid < 0)
- return -ENOMEM;
- kvm->arch.lpid = lpid;
+ if (!kvmhv_is_nestedv2()) {
+ lpid = kvmppc_alloc_lpid();
+ if ((long)lpid < 0)
+ return -ENOMEM;
+ kvm->arch.lpid = lpid;
+ }
kvmppc_alloc_host_rm_ops();
kvmhv_vm_nested_init(kvm);
+ if (kvmhv_is_nestedv2()) {
+ long rc;
+ unsigned long guest_id;
+
+ rc = plpar_guest_create(0, &guest_id);
+
+ if (rc != H_SUCCESS)
+ pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc);
+
+ switch (rc) {
+ case H_PARAMETER:
+ case H_FUNCTION:
+ case H_STATE:
+ return -EINVAL;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ case H_NOT_AVAILABLE:
+ return -EBUSY;
+ }
+ kvm->arch.lpid = guest_id;
+ }
+
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -5477,7 +5594,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
return ret;
}
kvmppc_setup_partition_table(kvm);
@@ -5567,10 +5687,14 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm->arch.process_table = 0;
if (kvm->arch.secure_guest)
uv_svm_terminate(kvm->arch.lpid);
- kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ if (!kvmhv_is_nestedv2())
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
kvmppc_free_pimap(kvm);
}
@@ -5982,6 +6106,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
return -ENODEV;
if (!radix_enabled())
return -ENODEV;
+ if (kvmhv_is_nestedv2())
+ return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
index 2f2e59d7d433..47b2c815641e 100644
--- a/arch/powerpc/kvm/book3s_hv.h
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -3,6 +3,8 @@
/*
* Privileged (non-hypervisor) host registers to save.
*/
+#include "asm/guest-state-buffer.h"
+
struct p9_host_os_sprs {
unsigned long iamr;
unsigned long amr;
@@ -50,3 +52,77 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
#define start_timing(vcpu, next) do {} while (0)
#define end_timing(vcpu) do {} while (0)
#endif
+
+static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.shregs.msr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
+}
+
+static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
+ return vcpu->arch.shregs.msr;
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
+{ \
+ kvmhv_nestedv2_cached_reload(vcpu, iden); \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
+{ \
+ vcpu->arch.reg[i] = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \
+ return vcpu->arch.reg[i]; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0f5b021fa559..fa0e3a22cac0 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,7 @@
#include "book3s_xics.h"
#include "book3s_xive.h"
+#include "book3s_hv.h"
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
@@ -182,9 +183,13 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
+ unsigned long rand;
+
if (ppc_md.get_random_seed &&
- ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
+ ppc_md.get_random_seed(&rand)) {
+ kvmppc_set_gpr(vcpu, 4, rand);
return H_SUCCESS;
+ }
return H_HARDWARE;
}
@@ -510,7 +515,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
- vcpu->arch.shregs.msr = msr;
+ __kvmppc_set_msr_hv(vcpu, msr);
kvmppc_end_cede(vcpu);
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
@@ -548,7 +553,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
kvmppc_set_srr0(vcpu, pc);
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
kvmppc_set_pc(vcpu, new_pc);
- vcpu->arch.shregs.msr = new_msr;
+ __kvmppc_set_msr_hv(vcpu, new_msr);
}
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 377d0b4a05ee..3b658b8696bc 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -428,10 +428,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
return vcpu->arch.trap;
}
+unsigned long nested_capabilities;
+
long kvmhv_nested_init(void)
{
long int ptb_order;
- unsigned long ptcr;
+ unsigned long ptcr, host_capabilities;
long rc;
if (!kvmhv_on_pseries())
@@ -439,6 +441,29 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
+ rc = plpar_guest_get_capabilities(0, &host_capabilities);
+ if (rc == H_SUCCESS) {
+ unsigned long capabilities = 0;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ capabilities |= H_GUEST_CAP_POWER10;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ capabilities |= H_GUEST_CAP_POWER9;
+
+ nested_capabilities = capabilities & host_capabilities;
+ rc = plpar_guest_set_capabilities(0, nested_capabilities);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)",
+ rc);
+ return -ENODEV;
+ }
+
+ static_branch_enable(&__kvmhv_is_nestedv2);
+ return 0;
+ }
+
+ pr_info("kvm-hv: nestedv2 get capabilities hcall failed, falling back to nestedv1 (rc=%ld)\n",
+ rc);
/* Partition table entry is 1<<4 bytes in size, hence the 4. */
ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
/* Minimum partition table size is 1<<12 bytes */
@@ -478,7 +503,7 @@ void kvmhv_nested_exit(void)
}
}
-static void kvmhv_flush_lpid(unsigned int lpid)
+static void kvmhv_flush_lpid(u64 lpid)
{
long rc;
@@ -500,17 +525,22 @@ static void kvmhv_flush_lpid(unsigned int lpid)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1)
{
if (!kvmhv_on_pseries()) {
mmu_partition_table_set_entry(lpid, dw0, dw1, true);
return;
}
- pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
- pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
- /* L0 will do the necessary barriers */
- kvmhv_flush_lpid(lpid);
+ if (kvmhv_is_nestedv1()) {
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+ }
+
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1);
}
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
new file mode 100644
index 000000000000..fd3c4f2d9480
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
+ *
+ * Authors:
+ * Jordan Niethe <jniethe5@gmail.com>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors as a NESTEDv2 guest.
+ *
+ */
+
+#include "linux/blk-mq.h"
+#include "linux/console.h"
+#include "linux/gfp_types.h"
+#include "linux/signal.h"
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/hvcall.h>
+#include <asm/pgalloc.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/guest-state-buffer.h>
+#include "trace_hv.h"
+
+struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
+EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
+
+
+static size_t
+gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
+{
+ u16 ids[] = {
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_RUN_OUTPUT,
+
+ };
+ size_t size = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ int rc;
+
+ cfg = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ cfg->vcpu_run_output_size);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
+ cfg->vcpu_run_input_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ cfg = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (gse)
+ cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops config_msg_ops = {
+ .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
+ .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
+ .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
+};
+
+static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ size_t size = 0;
+ u16 iden;
+
+ kvmppc_gsbm_fill(&gsbm);
+ kvmppc_gsbm_for_each(&gsbm, iden)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_PARTITION_TABLE:
+ case KVMPPC_GSID_PROCESS_TABLE:
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ break;
+ default:
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
+ }
+ }
+ return size;
+}
+
+static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvm_vcpu *vcpu;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ kvmppc_gsm_for_each(gsm, iden)
+ {
+ rc = 0;
+
+ if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
+ (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
+ continue;
+
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
+ break;
+ case KVMPPC_GSID_PURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
+ break;
+ case KVMPPC_GSID_SPURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
+ break;
+ case KVMPPC_GSID_AMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
+ break;
+ case KVMPPC_GSID_SIAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
+ break;
+ case KVMPPC_GSID_SDAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
+ break;
+ case KVMPPC_GSID_IAMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
+ break;
+ case KVMPPC_GSID_CIABR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
+ break;
+ case KVMPPC_GSID_WORT:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
+ break;
+ case KVMPPC_GSID_PPR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
+ break;
+ case KVMPPC_GSID_PSPB:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
+ break;
+ case KVMPPC_GSID_TAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
+ break;
+ case KVMPPC_GSID_FSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
+ break;
+ case KVMPPC_GSID_BESCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
+ break;
+ case KVMPPC_GSID_IC:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
+ break;
+ case KVMPPC_GSID_CTRL:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
+ break;
+ case KVMPPC_GSID_PIDR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
+ break;
+ case KVMPPC_GSID_AMOR: {
+ u64 amor = ~0;
+
+ rc = kvmppc_gse_put_u64(gsb, iden, amor);
+ break;
+ }
+ case KVMPPC_GSID_VRSAVE:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.gpr[i]);
+ break;
+ case KVMPPC_GSID_CR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
+ break;
+ case KVMPPC_GSID_XER:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
+ break;
+ case KVMPPC_GSID_CTR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
+ break;
+ case KVMPPC_GSID_LR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.link);
+ break;
+ case KVMPPC_GSID_NIA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
+ break;
+ case KVMPPC_GSID_SRR0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr0);
+ break;
+ case KVMPPC_GSID_SRR1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr1);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg0);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg1);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg2);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg3);
+ break;
+ case KVMPPC_GSID_DAR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.dar);
+ break;
+ case KVMPPC_GSID_DSISR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.shregs.dsisr);
+ break;
+ case KVMPPC_GSID_MSR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.msr);
+ break;
+ case KVMPPC_GSID_VTB:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->vtb);
+ break;
+ case KVMPPC_GSID_LPCR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->lpcr);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->tb_offset);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&v, &vcpu->arch.fp.fpr[i],
+ sizeof(vcpu->arch.fp.fpr[i]));
+ rc = kvmppc_gse_put_vector128(gsb, iden, &v);
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ rc = kvmppc_gse_put_vector128(gsb, iden,
+ &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = vcpu->arch.dec_expires -
+ vcpu->arch.vcore->tb_offset;
+ rc = kvmppc_gse_put_u64(gsb, iden, dw);
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vcore->arch_compat);
+ break;
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_gs_elem *gse;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ kvmppc_gsp_for_each(&gsp, iden, gse)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PURR:
+ vcpu->arch.purr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPURR:
+ vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_AMR:
+ vcpu->arch.amr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIAR:
+ vcpu->arch.siar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SDAR:
+ vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IAMR:
+ vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_CIABR:
+ vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_WORT:
+ vcpu->arch.wort = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_PPR:
+ vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PSPB:
+ vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_TAR:
+ vcpu->arch.tar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FSCR:
+ vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_BESCR:
+ vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IC:
+ vcpu->arch.ic = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTRL:
+ vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PIDR:
+ vcpu->arch.pid = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_AMOR:
+ break;
+ case KVMPPC_GSID_VRSAVE:
+ vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CR:
+ vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_XER:
+ vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTR:
+ vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LR:
+ vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_NIA:
+ vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR0:
+ vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR1:
+ vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAR:
+ vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DSISR:
+ vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MSR:
+ vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VTB:
+ vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LPCR:
+ vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ kvmppc_gse_get_vector128(gse, &v);
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&vcpu->arch.fp.fpr[i], &v,
+ sizeof(vcpu->arch.fp.fpr[i]));
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_HDAR:
+ vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HDSISR:
+ vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_ASDR:
+ vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HEIR:
+ vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = kvmppc_gse_get_u64(gse);
+ vcpu->arch.dec_expires =
+ dw + vcpu->arch.vcore->tb_offset;
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
+ break;
+ default:
+ continue;
+ }
+ kvmppc_gsbm_set(valids, iden);
+ }
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops vcpu_message_ops = {
+ .get_size = gs_msg_ops_vcpu_get_size,
+ .fill_info = gs_msg_ops_vcpu_fill_info,
+ .refresh_info = gs_msg_ops_vcpu_refresh_info,
+};
+
+static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
+ unsigned long guest_id, vcpu_id;
+ struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
+ int rc;
+
+ cfg = &io->cfg;
+ guest_id = vcpu->kvm->arch.lpid;
+ vcpu_id = vcpu->vcpu_id;
+
+ gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
+ GFP_KERNEL);
+ if (!gsm) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
+ GFP_KERNEL);
+ if (!gsb) {
+ rc = -ENOMEM;
+ goto free_gsm;
+ }
+
+ rc = kvmppc_gsb_receive_datum(gsb, gsm,
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
+ goto free_gsb;
+ }
+
+ vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_output) {
+ rc = -ENOMEM;
+ goto free_gsb;
+ }
+
+ cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
+ cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
+ io->vcpu_run_output = vcpu_run_output;
+
+ gsm->flags = 0;
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
+ goto free_gs_out;
+ }
+
+ vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
+ if (!vcpu_message) {
+ rc = -ENOMEM;
+ goto free_gs_out;
+ }
+ kvmppc_gsm_include_all(vcpu_message);
+
+ io->vcpu_message = vcpu_message;
+
+ vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_input) {
+ rc = -ENOMEM;
+ goto free_vcpu_message;
+ }
+
+ io->vcpu_run_input = vcpu_run_input;
+ cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
+ cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
+ goto free_vcpu_run_input;
+ }
+
+ vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
+ KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
+ if (!vcore_message) {
+ rc = -ENOMEM;
+ goto free_vcpu_run_input;
+ }
+
+ kvmppc_gsm_include_all(vcore_message);
+ kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
+ io->vcore_message = vcore_message;
+
+ kvmppc_gsbm_fill(&io->valids);
+ kvmppc_gsm_free(gsm);
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_vcpu_run_input:
+ kvmppc_gsb_free(vcpu_run_input);
+free_vcpu_message:
+ kvmppc_gsm_free(vcpu_message);
+free_gs_out:
+ kvmppc_gsb_free(vcpu_run_output);
+free_gsb:
+ kvmppc_gsb_free(gsb);
+free_gsm:
+ kvmppc_gsm_free(gsm);
+err:
+ return rc;
+}
+
+/**
+ * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Mark a guest state ID as having been changed by the L1 host and thus
+ * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
+ */
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_msg *gsm;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ gsm = io->vcpu_message;
+ kvmppc_gsm_include(gsm, iden);
+ gsm = io->vcore_message;
+ kvmppc_gsm_include(gsm, iden);
+ kvmppc_gsbm_set(valids, iden);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
+
+/**
+ * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Reload the value for the guest state ID from the L0 host into the L1 host.
+ * This is cached so that going out to the L0 host only happens if necessary.
+ */
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ if (kvmppc_gsbm_test(valids, iden))
+ return 0;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
+ rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
+
+/**
+ * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
+ * @vcpu: vcpu
+ * @time_limit: hdec expiry tb
+ *
+ * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
+ * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
+ * wide values need to be sent with H_GUEST_SET first.
+ *
+ * The hdec tb offset is always sent to L0 host.
+ */
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg *gsm;
+ int rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_input;
+ gsm = io->vcore_message;
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
+ return rc;
+ }
+
+ gsm = io->vcpu_message;
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
+ return rc;
+ }
+
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
+
+/**
+ * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
+ * L0 host
+ * @lpid: guest id
+ * @dw0: partition table double word
+ * @dw1: process table double word
+ */
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
+{
+ struct kvmppc_gs_part_table patbl;
+ struct kvmppc_gs_proc_table prtbl;
+ struct kvmppc_gs_buff *gsb;
+ size_t size;
+ int rc;
+
+ size = kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
+ kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
+ sizeof(struct kvmppc_gs_header);
+ gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
+ if (!gsb)
+ return -ENOMEM;
+
+ patbl.address = dw0 & RPDB_MASK;
+ patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
+ ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
+ 31);
+ patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
+ rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ prtbl.address = dw1 & PRTB_MASK;
+ prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
+ rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
+ goto free_gsb;
+ }
+
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_gsb:
+ kvmppc_gsb_free(gsb);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
+
+/**
+ * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
+ * @vcpu: vcpu
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_output;
+
+ vcpu->arch.fault_dar = 0;
+ vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_gpa = 0;
+ vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
+
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+ return kvmppc_gsm_refresh_info(&gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
+
+static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmppc_gsm_free(io->vcpu_message);
+ kvmppc_gsm_free(io->vcore_message);
+ kvmppc_gsb_free(io->vcpu_run_input);
+ kvmppc_gsb_free(io->vcpu_run_output);
+}
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc = 0;
+
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+
+ for (int i = 0; i < 32; i++) {
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
+ }
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
+
+ rc = kvmppc_gsb_receive_data(gsb, &gsm);
+ if (rc < 0)
+ pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
+
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ for (int i = 0; i < 32; i++)
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
+
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
+
+/**
+ * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ long rc;
+
+ rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
+ switch (rc) {
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ rc = kvmhv_nestedv2_host_create(vcpu, io);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
+
+/**
+ * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ */
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmhv_nestedv2_host_free(vcpu, io);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 34f1db212824..34bc0a8a1288 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -305,7 +305,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* Prior memory accesses to host PID Q3 must be completed before we
@@ -330,7 +330,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
int i;
lpid = kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* See switch_mmu_to_guest_radix. ptesync should not be required here
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 82be6d87514b..9012acadbca8 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -174,14 +174,14 @@ long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
ppc_md.hmi_exception_early(NULL);
out:
- if (vc->tb_offset) {
+ if (kvmppc_get_tb_offset(vcpu)) {
u64 new_tb = mftb() + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb);
}
- vc->tb_offset_applied = vc->tb_offset;
+ vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu);
}
return ret;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9182324dbef9..17cb75a127b0 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -776,8 +776,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
- vcpu->arch.regs.gpr[4 + i * 2] = v;
- vcpu->arch.regs.gpr[5 + i * 2] = r;
+ kvmppc_set_gpr(vcpu, 4 + i * 2, v);
+ kvmppc_set_gpr(vcpu, 5 + i * 2, r);
}
return H_SUCCESS;
}
@@ -824,7 +824,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
@@ -872,7 +872,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index e165bfa842bf..e42984878503 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -481,7 +481,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
- vcpu->arch.regs.gpr[5] = get_tb();
+ kvmppc_set_gpr(vcpu, 5, get_tb());
return xics_rm_h_xirr(vcpu);
}
@@ -518,7 +518,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */
- vcpu->arch.regs.gpr[4] = xirr;
+ kvmppc_set_gpr(vcpu, 4, xirr);
return check_too_hard(xics, icp);
}
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2d6f9327f77..92f33115144b 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -858,7 +858,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
}
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
- pr_info("LPID %d went secure\n", kvm->arch.lpid);
+ pr_info("LPID %lld went secure\n", kvm->arch.lpid);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f4115819e738..29a382249770 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -328,7 +328,7 @@ static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
*/
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (old_cppr << 24));
return H_SUCCESS;
}
@@ -364,7 +364,7 @@ static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server
hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24));
return H_SUCCESS;
}
@@ -884,10 +884,10 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
}
if (single_escalation)
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d",
vcpu->kvm->arch.lpid, xc->server_num);
else
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d",
vcpu->kvm->arch.lpid, xc->server_num, prio);
if (!name) {
pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
@@ -2779,8 +2779,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
-
/* The VM should have configured XICS mode before doing XICS hcalls. */
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
@@ -2799,7 +2797,7 @@ int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_XIRR_X:
xive_vm_h_xirr(vcpu);
- kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + kvmppc_get_tb_offset(vcpu));
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 712ab91ced39..6e2ebbd8aaac 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -567,7 +567,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
u8 priority;
struct kvm_ppc_xive_eq kvm_eq;
int rc;
- __be32 *qaddr = 0;
+ __be32 *qaddr = NULL;
struct page *page;
struct xive_q *q;
gfn_t gfn;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 059c08ae0340..077fd88a0b68 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -92,7 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
- vcpu->arch.regs.msr = vcpu->arch.shared->msr;
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -250,7 +251,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(vcpu,
- VCPU_FPR(vcpu, op.reg), size, 1);
+ kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -357,6 +358,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
+ kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c
new file mode 100644
index 000000000000..b80dbc58621f
--- /dev/null
+++ b/arch/powerpc/kvm/guest-state-buffer.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/hvcall.h"
+#include <linux/log2.h>
+#include <asm/pgalloc.h>
+#include <asm/guest-state-buffer.h>
+
+static const u16 kvmppc_gse_iden_len[__KVMPPC_GSE_TYPE_MAX] = {
+ [KVMPPC_GSE_BE32] = sizeof(__be32),
+ [KVMPPC_GSE_BE64] = sizeof(__be64),
+ [KVMPPC_GSE_VEC128] = sizeof(vector128),
+ [KVMPPC_GSE_PARTITION_TABLE] = sizeof(struct kvmppc_gs_part_table),
+ [KVMPPC_GSE_PROCESS_TABLE] = sizeof(struct kvmppc_gs_proc_table),
+ [KVMPPC_GSE_BUFFER] = sizeof(struct kvmppc_gs_buff_info),
+};
+
+/**
+ * kvmppc_gsb_new() - create a new guest state buffer
+ * @size: total size of the guest state buffer (includes header)
+ * @guest_id: guest_id
+ * @vcpu_id: vcpu_id
+ * @flags: GFP flags
+ *
+ * Returns a guest state buffer.
+ */
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags)
+{
+ struct kvmppc_gs_buff *gsb;
+
+ gsb = kzalloc(sizeof(*gsb), flags);
+ if (!gsb)
+ return NULL;
+
+ size = roundup_pow_of_two(size);
+ gsb->hdr = kzalloc(size, GFP_KERNEL);
+ if (!gsb->hdr)
+ goto free;
+
+ gsb->capacity = size;
+ gsb->len = sizeof(struct kvmppc_gs_header);
+ gsb->vcpu_id = vcpu_id;
+ gsb->guest_id = guest_id;
+
+ gsb->hdr->nelems = cpu_to_be32(0);
+
+ return gsb;
+
+free:
+ kfree(gsb);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_new);
+
+/**
+ * kvmppc_gsb_free() - free a guest state buffer
+ * @gsb: guest state buffer
+ */
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb)
+{
+ kfree(gsb->hdr);
+ kfree(gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_free);
+
+/**
+ * kvmppc_gsb_put() - allocate space in a guest state buffer
+ * @gsb: buffer to allocate in
+ * @size: amount of space to allocate
+ *
+ * Returns a pointer to the amount of space requested within the buffer and
+ * increments the count of elements in the buffer.
+ *
+ * Does not check if there is enough space in the buffer.
+ */
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size)
+{
+ u32 nelems = kvmppc_gsb_nelems(gsb);
+ void *p;
+
+ p = (void *)kvmppc_gsb_header(gsb) + kvmppc_gsb_len(gsb);
+ gsb->len += size;
+
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(nelems + 1);
+ return p;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_put);
+
+static int kvmppc_gsid_class(u16 iden)
+{
+ if ((iden >= KVMPPC_GSE_GUESTWIDE_START) &&
+ (iden <= KVMPPC_GSE_GUESTWIDE_END))
+ return KVMPPC_GS_CLASS_GUESTWIDE;
+
+ if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END))
+ return KVMPPC_GS_CLASS_META;
+
+ if ((iden >= KVMPPC_GSE_DW_REGS_START) &&
+ (iden <= KVMPPC_GSE_DW_REGS_END))
+ return KVMPPC_GS_CLASS_DWORD_REG;
+
+ if ((iden >= KVMPPC_GSE_W_REGS_START) &&
+ (iden <= KVMPPC_GSE_W_REGS_END))
+ return KVMPPC_GS_CLASS_WORD_REG;
+
+ if ((iden >= KVMPPC_GSE_VSRS_START) && (iden <= KVMPPC_GSE_VSRS_END))
+ return KVMPPC_GS_CLASS_VECTOR;
+
+ if ((iden >= KVMPPC_GSE_INTR_REGS_START) &&
+ (iden <= KVMPPC_GSE_INTR_REGS_END))
+ return KVMPPC_GS_CLASS_INTR;
+
+ return -1;
+}
+
+static int kvmppc_gsid_type(u16 iden)
+{
+ int type = -1;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_TB_OFFSET:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_PARTITION_TABLE:
+ type = KVMPPC_GSE_PARTITION_TABLE;
+ break;
+ case KVMPPC_GSID_PROCESS_TABLE:
+ type = KVMPPC_GSE_PROCESS_TABLE;
+ break;
+ case KVMPPC_GSID_LOGICAL_PVR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_META:
+ switch (iden) {
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ type = KVMPPC_GSE_BUFFER;
+ break;
+ case KVMPPC_GSID_VPA:
+ type = KVMPPC_GSE_BE64;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GS_CLASS_WORD_REG:
+ type = KVMPPC_GSE_BE32;
+ break;
+ case KVMPPC_GS_CLASS_VECTOR:
+ type = KVMPPC_GSE_VEC128;
+ break;
+ case KVMPPC_GS_CLASS_INTR:
+ switch (iden) {
+ case KVMPPC_GSID_HDAR:
+ case KVMPPC_GSID_ASDR:
+ case KVMPPC_GSID_HEIR:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_HDSISR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ }
+
+ return type;
+}
+
+/**
+ * kvmppc_gsid_flags() - the flags for a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns any flags for the guest state ID.
+ */
+unsigned long kvmppc_gsid_flags(u16 iden)
+{
+ unsigned long flags = 0;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ flags = KVMPPC_GS_FLAGS_WIDE;
+ break;
+ case KVMPPC_GS_CLASS_META:
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ case KVMPPC_GS_CLASS_WORD_REG:
+ case KVMPPC_GS_CLASS_VECTOR:
+ case KVMPPC_GS_CLASS_INTR:
+ break;
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_flags);
+
+/**
+ * kvmppc_gsid_size() - the size of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns the size of guest state ID.
+ */
+u16 kvmppc_gsid_size(u16 iden)
+{
+ int type;
+
+ type = kvmppc_gsid_type(iden);
+ if (type == -1)
+ return 0;
+
+ if (type >= __KVMPPC_GSE_TYPE_MAX)
+ return 0;
+
+ return kvmppc_gse_iden_len[type];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_size);
+
+/**
+ * kvmppc_gsid_mask() - the settable bits of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns a mask of settable bits for a guest state ID.
+ */
+u64 kvmppc_gsid_mask(u16 iden)
+{
+ u64 mask = ~0ull;
+
+ switch (iden) {
+ case KVMPPC_GSID_LPCR:
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER |
+ LPCR_GTSE;
+ break;
+ case KVMPPC_GSID_MSR:
+ mask = ~(MSR_HV | MSR_S | MSR_ME);
+ break;
+ }
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_mask);
+
+/**
+ * __kvmppc_gse_put() - add a guest state element to a buffer
+ * @gsb: buffer to the element to
+ * @iden: guest state ID
+ * @size: length of data
+ * @data: pointer to data
+ */
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data)
+{
+ struct kvmppc_gs_elem *gse;
+ u16 total_size;
+
+ total_size = sizeof(*gse) + size;
+ if (total_size + kvmppc_gsb_len(gsb) > kvmppc_gsb_capacity(gsb))
+ return -ENOMEM;
+
+ if (kvmppc_gsid_size(iden) != size)
+ return -EINVAL;
+
+ gse = kvmppc_gsb_put(gsb, total_size);
+ gse->iden = cpu_to_be16(iden);
+ gse->len = cpu_to_be16(size);
+ memcpy(gse->data, data, size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmppc_gse_put);
+
+/**
+ * kvmppc_gse_parse() - create a parse map from a guest state buffer
+ * @gsp: guest state parser
+ * @gsb: guest state buffer
+ */
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_elem *curr;
+ int rem, i;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ if (kvmppc_gse_len(curr) !=
+ kvmppc_gsid_size(kvmppc_gse_iden(curr)))
+ return -EINVAL;
+ kvmppc_gsp_insert(gsp, kvmppc_gse_iden(curr), curr);
+ }
+
+ if (kvmppc_gsb_nelems(gsb) != i)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gse_parse);
+
+static inline int kvmppc_gse_flatten_iden(u16 iden)
+{
+ int bit = 0;
+ int class;
+
+ class = kvmppc_gsid_class(iden);
+
+ if (class == KVMPPC_GS_CLASS_GUESTWIDE) {
+ bit += iden - KVMPPC_GSE_GUESTWIDE_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_META) {
+ bit += iden - KVMPPC_GSE_META_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_META_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_DWORD_REG) {
+ bit += iden - KVMPPC_GSE_DW_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_WORD_REG) {
+ bit += iden - KVMPPC_GSE_W_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_W_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_VECTOR) {
+ bit += iden - KVMPPC_GSE_VSRS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_VSRS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_INTR) {
+ bit += iden - KVMPPC_GSE_INTR_REGS_START;
+ return bit;
+ }
+
+ return 0;
+}
+
+static inline u16 kvmppc_gse_unflatten_iden(int bit)
+{
+ u16 iden;
+
+ if (bit < KVMPPC_GSE_GUESTWIDE_COUNT) {
+ iden = KVMPPC_GSE_GUESTWIDE_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (bit < KVMPPC_GSE_META_COUNT) {
+ iden = KVMPPC_GSE_META_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_META_COUNT;
+
+ if (bit < KVMPPC_GSE_DW_REGS_COUNT) {
+ iden = KVMPPC_GSE_DW_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_W_REGS_COUNT) {
+ iden = KVMPPC_GSE_W_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_W_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_VSRS_COUNT) {
+ iden = KVMPPC_GSE_VSRS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_VSRS_COUNT;
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT) {
+ iden = KVMPPC_GSE_INTR_REGS_START + bit;
+ return iden;
+ }
+
+ return 0;
+}
+
+/**
+ * kvmppc_gsp_insert() - add a mapping from an guest state ID to an element
+ * @gsp: guest state parser
+ * @iden: guest state id (key)
+ * @gse: guest state element (value)
+ */
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ kvmppc_gsbm_set(&gsp->iterator, iden);
+ gsp->gses[i] = gse;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_insert);
+
+/**
+ * kvmppc_gsp_lookup() - lookup an element from a guest state ID
+ * @gsp: guest state parser
+ * @iden: guest state ID (key)
+ *
+ * Returns the guest state element if present.
+ */
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp, u16 iden)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ return gsp->gses[i];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_lookup);
+
+/**
+ * kvmppc_gsbm_set() - set the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ set_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_set);
+
+/**
+ * kvmppc_gsbm_clear() - clear the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ clear_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_clear);
+
+/**
+ * kvmppc_gsbm_test() - test the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ return test_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_test);
+
+/**
+ * kvmppc_gsbm_next() - return the next set guest state ID
+ * @gsbm: guest state bitmap
+ * @prev: last guest state ID
+ */
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev)
+{
+ int bit, pbit;
+
+ pbit = prev ? kvmppc_gse_flatten_iden(prev) + 1 : 0;
+ bit = find_next_bit(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT, pbit);
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT)
+ return kvmppc_gse_unflatten_iden(bit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_next);
+
+/**
+ * kvmppc_gsm_init() - initialize a guest state message
+ * @gsm: guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ */
+int kvmppc_gsm_init(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags)
+{
+ memset(gsm, 0, sizeof(*gsm));
+ gsm->ops = ops;
+ gsm->data = data;
+ gsm->flags = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_init);
+
+/**
+ * kvmppc_gsm_new() - creates a new guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ * @gfp_flags: GFP allocation flags
+ *
+ * Returns an initialized guest state message.
+ */
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags)
+{
+ struct kvmppc_gs_msg *gsm;
+
+ gsm = kzalloc(sizeof(*gsm), gfp_flags);
+ if (!gsm)
+ return NULL;
+
+ kvmppc_gsm_init(gsm, ops, data, flags);
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_new);
+
+/**
+ * kvmppc_gsm_size() - creates a new guest state message
+ * @gsm: self
+ *
+ * Returns the size required for the message.
+ */
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm)
+{
+ if (gsm->ops->get_size)
+ return gsm->ops->get_size(gsm);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_size);
+
+/**
+ * kvmppc_gsm_free() - free guest state message
+ * @gsm: guest state message
+ *
+ * Returns the size required for the message.
+ */
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm)
+{
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_free);
+
+/**
+ * kvmppc_gsm_fill_info() - serialises message to guest state buffer format
+ * @gsm: self
+ * @gsb: buffer to serialise into
+ */
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->fill_info(gsb, gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_fill_info);
+
+/**
+ * kvmppc_gsm_refresh_info() - deserialises from guest state buffer
+ * @gsm: self
+ * @gsb: buffer to serialise from
+ */
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->refresh_info(gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_refresh_info);
+
+/**
+ * kvmppc_gsb_send - send all elements in the buffer to the hypervisor.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_SET_STATE hcall for the guest state buffer.
+ */
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (kvmppc_gsb_nelems(gsb) == 0)
+ return 0;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_send);
+
+/**
+ * kvmppc_gsb_recv - request all elements in the buffer have their value
+ * updated.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_GET_STATE hcall for the guest state buffer.
+ * After returning from the hcall the guest state elements that were
+ * present in the buffer will have updated values from the hypervisor.
+ */
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_recv);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7197c8256668..f6af752698d0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
}
@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, 0) = gpr;
- VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
}
}
@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
- VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
- VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
}
}
@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
- val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
- VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
}
}
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx16val[offset] = gpr16;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx8val[offset] = gpr8;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
#endif /* CONFIG_ALTIVEC */
@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
}
if (rs < 32) {
- *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
- reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsx32val[vsx_offset];
}
break;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsxval[vmx_offset];
return result;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx32val[vmx_offset];
return result;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx16val[vmx_offset];
return result;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx8val[vmx_offset];
return result;
@@ -1719,17 +1719,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
break;
#endif /* CONFIG_ALTIVEC */
default:
@@ -1770,21 +1770,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
break;
#endif /* CONFIG_ALTIVEC */
default:
diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c
new file mode 100644
index 000000000000..4720b8dc8837
--- /dev/null
+++ b/arch/powerpc/kvm/test-guest-state-buffer.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <kunit/test.h>
+
+#include <asm/guest-state-buffer.h>
+
+static void test_creating_buffer(struct kunit *test)
+{
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x100;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
+
+ KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
+ KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_adding_element(struct kunit *test)
+{
+ const struct kvmppc_gs_elem *head, *curr;
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+ int rem;
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ int i, rc;
+ u64 data;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ /* Single elements, direct use of __kvmppc_gse_put() */
+ data = 0xdeadbeef;
+ rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ head = kvmppc_gsb_data(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
+ data = 0;
+ memcpy(&data, kvmppc_gse_data(head), 8);
+ KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
+
+ /* Multiple elements, simple wrapper */
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ u.dw[0] = 0x1;
+ u.dw[1] = 0x2;
+ rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
+ KUNIT_EXPECT_GE(test, rc, 0);
+ u.dw[0] = 0x0;
+ u.dw[1] = 0x0;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ switch (i) {
+ case 0:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
+ 0xdeadbeef);
+ break;
+ case 1:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(1));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
+ 0xcafef00d);
+ break;
+ case 2:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_VSRS(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
+ kvmppc_gse_get_vector128(curr, &u.v);
+ KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
+ KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
+ break;
+ }
+ }
+ KUNIT_EXPECT_EQ(test, i, 3);
+
+ kvmppc_gsb_reset(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
+ sizeof(struct kvmppc_gs_header));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_parsing(struct kunit *test)
+{
+ struct kvmppc_gs_elem *gse;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ u64 tmp1, tmp2;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ tmp1 = 0xdeadbeefull;
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
+
+ KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
+
+ tmp2 = kvmppc_gse_get_u64(gse);
+ KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_bitmap(struct kunit *test)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ struct kvmppc_gs_bitmap gsbm1 = { 0 };
+ struct kvmppc_gs_bitmap gsbm2 = { 0 };
+ u16 iden;
+ int i, j;
+
+ i = 0;
+ for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
+ iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ j = 0;
+ kvmppc_gsbm_for_each(&gsbm1, iden)
+ {
+ kvmppc_gsbm_set(&gsbm2, iden);
+ j++;
+ }
+ KUNIT_EXPECT_EQ(test, i, j);
+ KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
+}
+
+struct kvmppc_gs_msg_test1_data {
+ u64 a;
+ u32 b;
+ struct kvmppc_gs_part_table c;
+ struct kvmppc_gs_proc_table d;
+ struct kvmppc_gs_buff_info e;
+};
+
+static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
+{
+ size_t size = 0;
+ u16 ids[] = {
+ KVMPPC_GSID_PARTITION_TABLE,
+ KVMPPC_GSID_PROCESS_TABLE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_GPR(0),
+ KVMPPC_GSID_CR,
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int test1_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
+ kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
+ kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->c);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
+ kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->d);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
+
+ return 0;
+}
+
+static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ if (gse)
+ data->a = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
+ if (gse)
+ data->b = kvmppc_gse_get_u32(gse);
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
+ .get_size = test1_get_size,
+ .fill_info = test1_fill_info,
+ .refresh_info = test1_refresh_info,
+};
+
+static void test_gs_msg(struct kunit *test)
+{
+ struct kvmppc_gs_msg_test1_data test1_data = {
+ .a = 0xdeadbeef,
+ .b = 0x1,
+ };
+ struct kvmppc_gs_msg *gsm;
+ struct kvmppc_gs_buff *gsb;
+
+ gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
+
+ kvmppc_gsm_fill_info(gsm, gsb);
+
+ memset(&test1_data, 0, sizeof(test1_data));
+
+ kvmppc_gsm_refresh_info(gsm, gsb);
+ KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
+ KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
+
+ kvmppc_gsm_free(gsm);
+}
+
+static struct kunit_case guest_state_buffer_testcases[] = {
+ KUNIT_CASE(test_creating_buffer),
+ KUNIT_CASE(test_adding_element),
+ KUNIT_CASE(test_gs_bitmap),
+ KUNIT_CASE(test_gs_parsing),
+ KUNIT_CASE(test_gs_msg),
+ {}
+};
+
+static struct kunit_suite guest_state_buffer_test_suite = {
+ .name = "guest_state_buffer_test",
+ .test_cases = guest_state_buffer_testcases,
+};
+
+kunit_test_suites(&guest_state_buffer_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index b00112d7ad46..c6ab46156cda 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -38,6 +38,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr
return 0;
failed:
+ mb(); /* sync */
return -EPERM;
}
@@ -204,9 +205,6 @@ void __init poking_init(void)
{
int ret;
- if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
- return;
-
if (mm_patch_enabled())
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke_mm:online",
@@ -309,10 +307,6 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
err = __patch_instruction(addr, instr, patch_addr);
- /* hwsync performed by __patch_instruction (sync) if successful */
- if (err)
- mb(); /* sync */
-
/* context synchronisation performed by __patch_instruction (isync or exception) */
stop_using_temp_mm(patching_mm, orig_mm);
@@ -378,6 +372,144 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
}
NOKPROBE_SYMBOL(patch_instruction);
+static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
+{
+ unsigned long start = (unsigned long)patch_addr;
+
+ /* Repeat instruction */
+ if (repeat_instr) {
+ ppc_inst_t instr = ppc_inst_read(code);
+
+ if (ppc_inst_prefixed(instr)) {
+ u64 val = ppc_inst_as_ulong(instr);
+
+ memset64((u64 *)patch_addr, val, len / 8);
+ } else {
+ u32 val = ppc_inst_val(instr);
+
+ memset32(patch_addr, val, len / 4);
+ }
+ } else {
+ memcpy(patch_addr, code, len);
+ }
+
+ smp_wmb(); /* smp write barrier */
+ flush_icache_range(start, start + len);
+ return 0;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ struct mm_struct *patching_mm, *orig_mm;
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ spinlock_t *ptl;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ patching_mm = __this_cpu_read(cpu_patching_context.mm);
+ text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+
+ __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+
+ /* order PTE update before use, also serves as the hwsync */
+ asm volatile("ptesync" ::: "memory");
+
+ /* order context switch after arbitrary prior code */
+ isync();
+
+ orig_mm = start_using_temp_mm(patching_mm);
+
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+ /* context synchronisation performed by __patch_instructions */
+ stop_using_temp_mm(patching_mm, orig_mm);
+
+ pte_clear(patching_mm, text_poke_addr, pte);
+ /*
+ * ptesync to order PTE update before TLB invalidation done
+ * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
+ */
+ local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+
+ pte_unmap_unlock(pte, ptl);
+
+ return err;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = __this_cpu_read(cpu_patching_context.pte);
+ __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync" ::: "memory");
+
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+ pte_clear(&init_mm, text_poke_addr, pte);
+ flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+ return err;
+}
+
+/*
+ * Patch 'addr' with 'len' bytes of instructions from 'code'.
+ *
+ * If repeat_instr is true, the same instruction is filled for
+ * 'len' bytes.
+ */
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ while (len > 0) {
+ unsigned long flags;
+ size_t plen;
+ int err;
+
+ plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
+
+ local_irq_save(flags);
+ if (mm_patch_enabled())
+ err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
+ else
+ err = __do_patch_instructions(addr, code, plen, repeat_instr);
+ local_irq_restore(flags);
+ if (err)
+ return err;
+
+ len -= plen;
+ addr = (u32 *)((unsigned long)addr + plen);
+ if (!repeat_instr)
+ code = (u32 *)((unsigned long)code + plen);
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(patch_instructions);
+
int patch_branch(u32 *addr, unsigned long target, int flags)
{
ppc_inst_t instr;
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6dd2f46bd3ef..5de4dd549f6e 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -16,7 +16,8 @@ struct qnode {
struct qnode *next;
struct qspinlock *lock;
int cpu;
- int yield_cpu;
+ u8 sleepy; /* 1 if the previous vCPU was preempted or
+ * if the previous node was sleepy */
u8 locked; /* 1 if lock acquired */
};
@@ -43,7 +44,7 @@ static bool pv_sleepy_lock_sticky __read_mostly = false;
static u64 pv_sleepy_lock_interval_ns __read_mostly = 0;
static int pv_sleepy_lock_factor __read_mostly = 256;
static bool pv_yield_prev __read_mostly = true;
-static bool pv_yield_propagate_owner __read_mostly = true;
+static bool pv_yield_sleepy_owner __read_mostly = true;
static bool pv_prod_head __read_mostly = false;
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -247,22 +248,18 @@ static __always_inline void seen_sleepy_lock(void)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
}
-static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val)
+static __always_inline void seen_sleepy_node(void)
{
if (pv_sleepy_lock) {
if (pv_sleepy_lock_interval_ns)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
- if (val & _Q_LOCKED_VAL) {
- if (!(val & _Q_SLEEPY_VAL))
- try_set_sleepy(lock, val);
- }
+ /* Don't set sleepy because we likely have a stale val */
}
}
-static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
+static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu)
{
- int cpu = decode_tail_cpu(val);
- struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu);
+ struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu);
int idx;
/*
@@ -353,77 +350,66 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
return __yield_to_locked_owner(lock, val, paravirt, mustq);
}
-static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt)
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
{
struct qnode *next;
int owner;
if (!paravirt)
return;
- if (!pv_yield_propagate_owner)
- return;
-
- owner = get_owner_cpu(val);
- if (*set_yield_cpu == owner)
+ if (!pv_yield_sleepy_owner)
return;
next = READ_ONCE(node->next);
if (!next)
return;
- if (vcpu_is_preempted(owner)) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- } else if (*set_yield_cpu != -1) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- }
+ if (next->sleepy)
+ return;
+
+ owner = get_owner_cpu(val);
+ if (vcpu_is_preempted(owner))
+ next->sleepy = 1;
}
/* Called inside spin_begin() */
-static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
+static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
{
- int prev_cpu = decode_tail_cpu(val);
u32 yield_count;
- int yield_cpu;
bool preempted = false;
if (!paravirt)
goto relax;
- if (!pv_yield_propagate_owner)
- goto yield_prev;
-
- yield_cpu = READ_ONCE(node->yield_cpu);
- if (yield_cpu == -1) {
- /* Propagate back the -1 CPU */
- if (node->next && node->next->yield_cpu != -1)
- node->next->yield_cpu = yield_cpu;
+ if (!pv_yield_sleepy_owner)
goto yield_prev;
- }
-
- yield_count = yield_count_of(yield_cpu);
- if ((yield_count & 1) == 0)
- goto yield_prev; /* owner vcpu is running */
-
- if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
- goto yield_prev; /* re-sample lock owner */
- spin_end();
-
- preempted = true;
- seen_sleepy_node(lock, val);
+ /*
+ * If the previous waiter was preempted it might not be able to
+ * propagate sleepy to us, so check the lock in that case too.
+ */
+ if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
+ u32 val = READ_ONCE(lock->val);
- smp_rmb();
+ if (val & _Q_LOCKED_VAL) {
+ if (node->next && !node->next->sleepy) {
+ /*
+ * Propagate sleepy to next waiter. Only if
+ * owner is preempted, which allows the queue
+ * to become "non-sleepy" if vCPU preemption
+ * ceases to occur, even if the lock remains
+ * highly contended.
+ */
+ if (vcpu_is_preempted(get_owner_cpu(val)))
+ node->next->sleepy = 1;
+ }
- if (yield_cpu == node->yield_cpu) {
- if (node->next && node->next->yield_cpu != yield_cpu)
- node->next->yield_cpu = yield_cpu;
- yield_to_preempted(yield_cpu, yield_count);
- spin_begin();
- return preempted;
+ preempted = yield_to_locked_owner(lock, val, paravirt);
+ if (preempted)
+ return preempted;
+ }
+ node->sleepy = false;
}
- spin_begin();
yield_prev:
if (!pv_yield_prev)
@@ -436,7 +422,7 @@ yield_prev:
spin_end();
preempted = true;
- seen_sleepy_node(lock, val);
+ seen_sleepy_node();
smp_rmb(); /* See __yield_to_locked_owner comment */
@@ -546,7 +532,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
bool sleepy = false;
bool mustq = false;
int idx;
- int set_yield_cpu = -1;
int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -570,7 +555,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
node->next = NULL;
node->lock = lock;
node->cpu = smp_processor_id();
- node->yield_cpu = -1;
+ node->sleepy = 0;
node->locked = 0;
tail = encode_tail_cpu(node->cpu);
@@ -587,7 +572,8 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
* head of the waitqueue.
*/
if (old & _Q_TAIL_CPU_MASK) {
- struct qnode *prev = get_tail_qnode(lock, old);
+ int prev_cpu = decode_tail_cpu(old);
+ struct qnode *prev = get_tail_qnode(lock, prev_cpu);
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
@@ -597,16 +583,12 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
while (!READ_ONCE(node->locked)) {
spec_barrier();
- if (yield_to_prev(lock, node, old, paravirt))
+ if (yield_to_prev(lock, node, prev_cpu, paravirt))
seen_preempted = true;
}
spec_barrier();
spin_end();
- /* Clear out stale propagated yield_cpu */
- if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1)
- node->yield_cpu = -1;
-
smp_rmb(); /* acquire barrier for the mcs lock */
/*
@@ -648,7 +630,7 @@ again:
}
}
- propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
+ propagate_sleepy(node, val, paravirt);
preempted = yield_head_to_locked_owner(lock, val, paravirt);
if (!maybe_stealers)
continue;
@@ -952,21 +934,21 @@ static int pv_yield_prev_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
-static int pv_yield_propagate_owner_set(void *data, u64 val)
+static int pv_yield_sleepy_owner_set(void *data, u64 val)
{
- pv_yield_propagate_owner = !!val;
+ pv_yield_sleepy_owner = !!val;
return 0;
}
-static int pv_yield_propagate_owner_get(void *data, u64 *val)
+static int pv_yield_sleepy_owner_get(void *data, u64 *val)
{
- *val = pv_yield_propagate_owner;
+ *val = pv_yield_sleepy_owner;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_sleepy_owner, pv_yield_sleepy_owner_get, pv_yield_sleepy_owner_set, "%llu\n");
static int pv_prod_head_set(void *data, u64 val)
{
@@ -998,7 +980,7 @@ static __init int spinlock_debugfs_init(void)
debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns);
debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor);
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
- debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
+ debugfs_create_file("qspl_pv_yield_sleepy_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_sleepy_owner);
debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
}
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 8b804e1a9fa4..4ed0efd03db5 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -36,8 +36,9 @@
/*
* Load a PTE into the hash table, if possible.
- * The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x400) if a write.
+ * The address is in r4, and r3 contains required access flags:
+ * - For ISI: _PAGE_PRESENT | _PAGE_EXEC
+ * - For DSI: _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG_THREAD contains the physical address of the current task's thread.
*
@@ -67,12 +68,16 @@ _GLOBAL(hash_page)
lis r0, TASK_SIZE@h /* check if kernel address */
cmplw 0,r4,r0
mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
- ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
lis r5,swapper_pg_dir@ha /* if kernel address, use */
+ andi. r0,r9,MSR_PR /* Check usermode */
addi r5,r5,swapper_pg_dir@l /* kernel page table */
- rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
+#ifdef CONFIG_SMP
+ bne- .Lhash_page_out /* return if usermode */
+#else
+ bnelr-
+#endif
112: tophys(r5, r5)
#ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
@@ -113,15 +118,15 @@ _GLOBAL(hash_page)
lwarx r6,0,r8 /* get linux-style pte, flag word */
#ifdef CONFIG_PPC_KUAP
mfsrin r5,r4
- rlwinm r0,r9,28,_PAGE_RW /* MSR[PR] => _PAGE_RW */
- rlwinm r5,r5,12,_PAGE_RW /* Ks => _PAGE_RW */
+ rlwinm r0,r9,28,_PAGE_WRITE /* MSR[PR] => _PAGE_WRITE */
+ rlwinm r5,r5,12,_PAGE_WRITE /* Ks => _PAGE_WRITE */
andc r5,r5,r0 /* Ks & ~MSR[PR] */
- andc r5,r6,r5 /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
+ andc r5,r6,r5 /* Clear _PAGE_WRITE when Ks = 1 && MSR[PR] = 0 */
andc. r5,r3,r5 /* check access & ~permission */
#else
andc. r5,r3,r6 /* check access & ~permission */
#endif
- rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ rlwinm r0,r3,32-3,24,24 /* _PAGE_WRITE access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
#ifdef CONFIG_SMP
bne- .Lhash_page_out /* return if access not permitted */
@@ -307,12 +312,15 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
__REF
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
- rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
+ lis r0, TASK_SIZE@h
+ rlwinm r5,r5,0,~3 /* Clear PP bits */
+ cmplw r4,r0
+ rlwinm r8,r5,32-9,30,30 /* _PAGE_WRITE -> PP msb */
rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r8,r0 /* writable if _RW & _DIRTY */
- rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
- ori r8,r8,0xe04 /* clear out reserved bits */
+ bge- 1f /* Kernelspace ? Skip */
+ ori r5,r5,3 /* Userspace ? PP = 3 */
+1: ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION
rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 850783cfa9c7..5445587bfe84 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -127,7 +127,7 @@ static void setibat(int index, unsigned long virt, phys_addr_t phys,
wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
- if (flags & _PAGE_USER)
+ if (!is_kernel_addr(virt))
bat[0].batu |= 1; /* Vp = 1 */
}
@@ -277,10 +277,10 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
/* Do DBAT first */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
| _PAGE_COHERENT | _PAGE_GUARDED);
- wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+ wimgxpp |= (flags & _PAGE_WRITE) ? BPP_RW : BPP_RX;
bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
- if (flags & _PAGE_USER)
+ if (!is_kernel_addr(virt))
bat[1].batu |= 1; /* Vp = 1 */
if (flags & _PAGE_GUARDED) {
/* G bit must be zero in IBATs */
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 8f8a62d3ff4d..be229290a6a7 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -635,12 +635,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
unsigned long prot;
/* Radix supports execute-only, but protection_map maps X -> RX */
- if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
- prot = pgprot_val(PAGE_EXECONLY);
- } else {
- prot = pgprot_val(protection_map[vm_flags &
- (VM_ACCESS_FLAGS | VM_SHARED)]);
- }
+ if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
+ vm_flags |= VM_READ;
+
+ prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
if (vm_flags & VM_SAO)
prot |= _PAGE_SAO;
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 2369d1bf2411..fde7790277f7 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -67,7 +67,7 @@ static int drmem_update_dt_v1(struct device_node *memory,
struct property *new_prop;
struct of_drconf_cell_v1 *dr_cell;
struct drmem_lmb *lmb;
- u32 *p;
+ __be32 *p;
new_prop = clone_property(prop, prop->length);
if (!new_prop)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b1723094d464..9e49ede2bc1c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -266,14 +266,15 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
}
/*
- * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
- * defined in protection_map[]. Read faults can only be caused by
- * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
+ * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as
+ * defined in protection_map[]. In that case Read faults can only be
+ * caused by a PROT_NONE mapping. However a non exec access on a
+ * VM_EXEC only mapping is invalid anyway, so report it as such.
*/
if (unlikely(!vma_is_accessible(vma)))
return true;
- if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
+ if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)
return true;
/*
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d8adc452f431..4e71dfe7d026 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -39,6 +39,7 @@
#include <asm/hugetlb.h>
#include <asm/kup.h>
#include <asm/kasan.h>
+#include <asm/fixmap.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 705e8e8ffde4..7b0afcabd89f 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
if (pte_write(pte))
pte = pte_mkdirty(pte);
- /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
- pte = pte_exprotect(pte);
- pte = pte_mkprivileged(pte);
-
if (iowa_is_active())
return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
@@ -66,7 +62,7 @@ int early_ioremap_range(unsigned long ea, phys_addr_t pa,
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
- int err = map_kernel_page(ea + i, pa + i, prot);
+ int err = map_kernel_page(ea + i, pa + i, pgprot_nx(prot));
if (WARN_ON_ONCE(err)) /* Should clean up */
return err;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 07e8f4f1e07f..1717554b04b1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -26,6 +26,7 @@
#include <asm/ftrace.h>
#include <asm/code-patching.h>
#include <asm/setup.h>
+#include <asm/fixmap.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 3684d6e570fb..e835e80c09db 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -48,20 +48,25 @@
*/
void __init MMU_init_hw(void)
{
+ int i;
+ unsigned long zpr;
+
/*
* The Zone Protection Register (ZPR) defines how protection will
- * be applied to every page which is a member of a given zone. At
- * present, we utilize only two of the 4xx's zones.
+ * be applied to every page which is a member of a given zone.
* The zone index bits (of ZSEL) in the PTE are used for software
- * indicators, except the LSB. For user access, zone 1 is used,
- * for kernel access, zone 0 is used. We set all but zone 1
- * to zero, allowing only kernel access as indicated in the PTE.
- * For zone 1, we set a 01 binary (a value of 10 will not work)
+ * indicators. We use the 4 upper bits of virtual address to select
+ * the zone. We set all zones above TASK_SIZE to zero, allowing
+ * only kernel access as indicated in the PTE. For zones below
+ * TASK_SIZE, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
- mtspr(SPRN_ZPR, 0x10000000);
+ for (i = 0, zpr = 0; i < TASK_SIZE >> 28; i++)
+ zpr |= 1 << (30 - i * 2);
+
+ mtspr(SPRN_ZPR, zpr);
flush_instruction_cache();
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index a642a7929892..6be6421086ed 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -10,6 +10,8 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
+#include <asm/fixmap.h>
+
#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index b80fc4a91a53..1c5e4ecbebeb 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -71,7 +71,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot)
{
pgd_t *pgdp;
p4d_t *p4dp;
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
index 40a4e69ae1a9..921c3521ec11 100644
--- a/arch/powerpc/mm/nohash/e500.c
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -117,15 +117,15 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_SW : 0;
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32;
/* Below is unlikely -- only for large user pages or similar */
- if (pte_user(__pte(flags))) {
+ if (!is_kernel_addr(virt)) {
TLBCAM[index].MAS3 |= MAS3_UR;
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_UW : 0;
} else {
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
}
diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
index 6b30e40d4590..a134d28a0e4d 100644
--- a/arch/powerpc/mm/nohash/e500_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
@@ -178,8 +178,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
*
* This must always be called with the pte lock held.
*/
-void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, unsigned int nr)
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 4d69bfb9bc11..a04ae4449a02 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -46,13 +46,13 @@ static inline int is_exec_fault(void)
* and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
* on userspace PTEs
*/
-static inline int pte_looks_normal(pte_t pte)
+static inline int pte_looks_normal(pte_t pte, unsigned long addr)
{
if (pte_present(pte) && !pte_special(pte)) {
if (pte_ci(pte))
return 0;
- if (pte_user(pte))
+ if (!is_kernel_addr(addr))
return 1;
}
return 0;
@@ -79,11 +79,11 @@ static struct folio *maybe_pte_to_folio(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter_hash(pte_t pte)
+static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
{
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
- if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
- cpu_has_feature(CPU_FTR_NOEXECUTE))) {
+ if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
+ cpu_has_feature(CPU_FTR_NOEXECUTE))) {
struct folio *folio = maybe_pte_to_folio(pte);
if (!folio)
return pte;
@@ -97,7 +97,7 @@ static pte_t set_pte_filter_hash(pte_t pte)
#else /* CONFIG_PPC_BOOK3S */
-static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
+static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
#endif /* CONFIG_PPC_BOOK3S */
@@ -107,7 +107,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
*
* This is also called once for the folio. So only work with folio->flags here.
*/
-static inline pte_t set_pte_filter(pte_t pte)
+static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
struct folio *folio;
@@ -115,10 +115,10 @@ static inline pte_t set_pte_filter(pte_t pte)
return pte;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
- return set_pte_filter_hash(pte);
+ return set_pte_filter_hash(pte, addr);
/* No exec permission in the first place, move on */
- if (!pte_exec(pte) || !pte_looks_normal(pte))
+ if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
return pte;
/* If you set _PAGE_EXEC on weird pages you're on your own */
@@ -198,7 +198,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* is called. Filter the pte value and use the filtered value
* to setup all the ptes in the range.
*/
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
/*
* We don't need to call arch_enter/leave_lazy_mmu_mode()
@@ -314,7 +314,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
*/
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
val = pte_val(pte);
@@ -505,7 +505,7 @@ const pgprot_t protection_map[16] = {
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY,
- [VM_EXEC] = PAGE_READONLY_X,
+ [VM_EXEC] = PAGE_EXECONLY_X,
[VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_EXEC | VM_WRITE] = PAGE_COPY_X,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
@@ -513,7 +513,7 @@ const pgprot_t protection_map[16] = {
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
- [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index fac932eb8f9a..b5c79b11ea3c 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -21,11 +21,6 @@ static const struct flag_info flag_array[] = {
.set = "huge",
.clear = " ",
}, {
- .mask = _PAGE_SH,
- .val = 0,
- .set = "user",
- .clear = " ",
- }, {
.mask = _PAGE_RO | _PAGE_NA,
.val = 0,
.set = "rw",
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index f884760ca5cf..39c30c62b7ea 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -11,15 +11,15 @@
static const struct flag_info flag_array[] = {
{
- .mask = _PAGE_USER,
- .val = _PAGE_USER,
- .set = "user",
- .clear = " ",
+ .mask = _PAGE_READ,
+ .val = 0,
+ .set = " ",
+ .clear = "r",
}, {
- .mask = _PAGE_RW,
+ .mask = _PAGE_WRITE,
.val = 0,
- .set = "r ",
- .clear = "rw",
+ .set = " ",
+ .clear = "w",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 72b7bb34fade..cdea5dccaefe 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -36,9 +36,6 @@
EMIT(PPC_RAW_BRANCH(offset)); \
} while (0)
-/* bl (unconditional 'branch' with link) */
-#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
-
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) \
do { \
@@ -147,12 +144,6 @@ struct codegen_context {
#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
#endif
-static inline void bpf_flush_icache(void *start, void *end)
-{
- smp_wmb(); /* smp write barrier */
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
-
static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
{
return ctx->seen & (1 << (31 - i));
@@ -169,16 +160,17 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
}
void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func);
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
void bpf_jit_realloc_regs(struct codegen_context *ctx);
int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
- int insn_idx, int jmp_off, int dst_reg);
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
+ struct codegen_context *ctx, int insn_idx,
+ int jmp_off, int dst_reg);
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 37043dfc1add..0f9a21783329 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -13,9 +13,13 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include <asm/kprobes.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
#include <linux/bpf.h>
+#include <asm/kprobes.h>
+#include <asm/code-patching.h>
+
#include "bpf_jit.h"
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
@@ -39,10 +43,13 @@ int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg,
return 0;
}
-struct powerpc64_jit_data {
- struct bpf_binary_header *header;
+struct powerpc_jit_data {
+ /* address of rw header */
+ struct bpf_binary_header *hdr;
+ /* address of ro final header */
+ struct bpf_binary_header *fhdr;
u32 *addrs;
- u8 *image;
+ u8 *fimage;
u32 proglen;
struct codegen_context ctx;
};
@@ -59,15 +66,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
u8 *image = NULL;
u32 *code_base;
u32 *addrs;
- struct powerpc64_jit_data *jit_data;
+ struct powerpc_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
int flen;
- struct bpf_binary_header *bpf_hdr;
+ struct bpf_binary_header *fhdr = NULL;
+ struct bpf_binary_header *hdr = NULL;
struct bpf_prog *org_fp = fp;
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
bool extra_pass = false;
+ u8 *fimage = NULL;
+ u32 *fcode_base;
u32 extable_len;
u32 fixup_len;
@@ -97,9 +107,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
addrs = jit_data->addrs;
if (addrs) {
cgctx = jit_data->ctx;
- image = jit_data->image;
- bpf_hdr = jit_data->header;
+ /*
+ * JIT compiled to a writable location (image/code_base) first.
+ * It is then moved to the readonly final location (fimage/fcode_base)
+ * using instruction patching.
+ */
+ fimage = jit_data->fimage;
+ fhdr = jit_data->fhdr;
proglen = jit_data->proglen;
+ hdr = jit_data->hdr;
+ image = (void *)hdr + ((void *)fimage - (void *)fhdr);
extra_pass = true;
/* During extra pass, ensure index is reset before repopulating extable entries */
cgctx.exentry_idx = 0;
@@ -119,7 +136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
+ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
/* We hit something illegal or unsupported. */
fp = org_fp;
goto out_addrs;
@@ -134,7 +151,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
*/
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
+ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
fp = org_fp;
goto out_addrs;
}
@@ -146,9 +163,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
*/
- bpf_jit_build_prologue(0, &cgctx);
+ bpf_jit_build_prologue(NULL, &cgctx);
addrs[fp->len] = cgctx.idx * 4;
- bpf_jit_build_epilogue(0, &cgctx);
+ bpf_jit_build_epilogue(NULL, &cgctx);
fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
@@ -156,17 +173,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
proglen = cgctx.idx * 4;
alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
- bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
- if (!bpf_hdr) {
+ fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
+ bpf_jit_fill_ill_insns);
+ if (!fhdr) {
fp = org_fp;
goto out_addrs;
}
if (extable_len)
- fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
+ fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+ fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
@@ -174,8 +193,10 @@ skip_init_ctx:
cgctx.idx = 0;
cgctx.alt_exit_addr = 0;
bpf_jit_build_prologue(code_base, &cgctx);
- if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
- bpf_jit_binary_free(bpf_hdr);
+ if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
+ extra_pass)) {
+ bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
+ bpf_jit_binary_pack_free(fhdr, hdr);
fp = org_fp;
goto out_addrs;
}
@@ -195,17 +216,19 @@ skip_init_ctx:
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
- ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[0] = (u64)fcode_base;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
- fp->bpf_func = (void *)image;
+ fp->bpf_func = (void *)fimage;
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(bpf_hdr);
+ if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
+ fp = org_fp;
+ goto out_addrs;
+ }
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
@@ -215,8 +238,9 @@ out_addrs:
jit_data->addrs = addrs;
jit_data->ctx = cgctx;
jit_data->proglen = proglen;
- jit_data->image = image;
- jit_data->header = bpf_hdr;
+ jit_data->fimage = fimage;
+ jit_data->fhdr = fhdr;
+ jit_data->hdr = hdr;
}
out:
@@ -230,12 +254,13 @@ out:
* The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
* this function, as this only applies to BPF_PROBE_MEM, for now.
*/
-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
- int insn_idx, int jmp_off, int dst_reg)
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
+ struct codegen_context *ctx, int insn_idx, int jmp_off,
+ int dst_reg)
{
off_t offset;
unsigned long pc;
- struct exception_table_entry *ex;
+ struct exception_table_entry *ex, *ex_entry;
u32 *fixup;
/* Populate extable entries only in the last pass */
@@ -246,9 +271,16 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
return -EINVAL;
+ /*
+ * Program is first written to image before copying to the
+ * final location (fimage). Accordingly, update in the image first.
+ * As all offsets used are relative, copying as is to the
+ * final location should be alright.
+ */
pc = (unsigned long)&image[insn_idx];
+ ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
- fixup = (void *)fp->aux->extable -
+ fixup = (void *)ex -
(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
@@ -259,18 +291,71 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
fixup[BPF_FIXUP_LEN - 1] =
PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
- ex = &fp->aux->extable[ctx->exentry_idx];
+ ex_entry = &ex[ctx->exentry_idx];
- offset = pc - (long)&ex->insn;
+ offset = pc - (long)&ex_entry->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
+ ex_entry->insn = offset;
- offset = (long)fixup - (long)&ex->fixup;
+ offset = (long)fixup - (long)&ex_entry->fixup;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
- ex->fixup = offset;
+ ex_entry->fixup = offset;
ctx->exentry_idx++;
return 0;
}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ int err;
+
+ if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&text_mutex);
+ err = patch_instructions(dst, src, len, false);
+ mutex_unlock(&text_mutex);
+
+ return err ? ERR_PTR(err) : dst;
+}
+
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ u32 insn = BREAKPOINT_INSTRUCTION;
+ int ret;
+
+ if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
+ return -EINVAL;
+
+ mutex_lock(&text_mutex);
+ ret = patch_instructions(dst, &insn, len, true);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ if (fp->jited) {
+ struct powerpc_jit_data *jit_data = fp->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
+ kvfree(jit_data->addrs);
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(fp);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
+ }
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 7f91ea064c08..2f39c50ca729 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -200,12 +200,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+/* Relative offset needs to be calculated based on final image location */
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
- s32 rel = (s32)func - (s32)(image + ctx->idx);
+ s32 rel = (s32)func - (s32)(fimage + ctx->idx);
if (image && rel < 0x2000000 && rel >= -0x2000000) {
- PPC_BL(func);
+ EMIT(PPC_RAW_BL(rel));
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
@@ -278,7 +279,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
}
/* Assemble the body code between the prologue & epilogue */
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
const struct bpf_insn *insn = fp->insnsi;
@@ -940,7 +941,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* !fp->aux->verifier_zext. Emit NOP otherwise.
*
* Note that "li reg_h,0" is emitted for BPF_B/H/W case,
- * if necessary. So, jump there insted of emitting an
+ * if necessary. So, jump there instead of emitting an
* additional "li reg_h,0" instruction.
*/
if (size == BPF_DW && !fp->aux->verifier_zext)
@@ -997,7 +998,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
jmp_off += 4;
}
- ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
jmp_off, dst_reg);
if (ret)
return ret;
@@ -1053,7 +1054,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
}
- ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0f8048f6dad6..79f23974a320 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -240,7 +240,7 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
return 0;
}
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
@@ -361,7 +361,7 @@ asm (
);
/* Assemble the body code between the prologue & epilogue */
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
@@ -940,8 +940,8 @@ emit_clear:
addrs[++i] = ctx->idx * 4;
if (BPF_MODE(code) == BPF_PROBE_MEM) {
- ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
- 4, dst_reg);
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
+ ctx->idx - 1, 4, dst_reg);
if (ret)
return ret;
}
@@ -995,7 +995,7 @@ emit_clear:
if (func_addr_fixed)
ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
- ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 8c1f7def596e..10b946e9c6e7 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
/*
* Disable instruction sampling if it was enabled
*/
- if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
- val &= ~MMCRA_SAMPLE_ENABLE;
+ val &= ~MMCRA_SAMPLE_ENABLE;
/* Disable BHRB via mmcra (BHRBRD) for p10 */
if (ppmu->flags & PPMU_ARCH_31)
@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
* instruction sampling or BHRB.
*/
if (val != mmcra) {
- mtspr(SPRN_MMCRA, mmcra);
+ mtspr(SPRN_MMCRA, val);
mb();
isync();
}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 3449be7c0d51..057ec2e3451d 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1338,7 +1338,7 @@ static int get_count_from_result(struct perf_event *event,
for (i = count = 0, element_data = res->elements + data_offset;
i < num_elements;
i++, element_data += data_size + data_offset)
- count += be64_to_cpu(*((u64 *) element_data));
+ count += be64_to_cpu(*((__be64 *)element_data));
*countp = count;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9d229ef7f86e..5d12ca386c1f 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
* core and trace-imc
*/
static struct imc_pmu_ref imc_global_refc = {
- .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
.id = 0,
.refc = 0,
};
@@ -544,7 +544,7 @@ static int nest_imc_event_init(struct perf_event *event)
break;
}
pcni++;
- } while (pcni->vbase != 0);
+ } while (pcni->vbase);
if (!flag)
return -ENODEV;
@@ -1025,16 +1025,16 @@ static bool is_thread_imc_pmu(struct perf_event *event)
return false;
}
-static u64 * get_event_base_addr(struct perf_event *event)
+static __be64 *get_event_base_addr(struct perf_event *event)
{
u64 addr;
if (is_thread_imc_pmu(event)) {
addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
- return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
+ return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
}
- return (u64 *)event->hw.event_base;
+ return (__be64 *)event->hw.event_base;
}
static void thread_imc_pmu_start_txn(struct pmu *pmu,
@@ -1058,7 +1058,8 @@ static int thread_imc_pmu_commit_txn(struct pmu *pmu)
static u64 imc_read_counter(struct perf_event *event)
{
- u64 *addr, data;
+ __be64 *addr;
+ u64 data;
/*
* In-Memory Collection (IMC) counters are free flowing counters.
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 5729b6e059de..9f720b522e17 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -335,26 +335,38 @@ static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
};
-/*
- * This could be made more efficient with a binary search on
- * a presorted list, if necessary
- */
static int find_alternatives_list(u64 event)
{
- int i, j;
- unsigned int alt;
-
- for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
- if (event < event_alternatives[i][0])
- return -1;
- for (j = 0; j < MAX_ALT; ++j) {
- alt = event_alternatives[i][j];
- if (!alt || event < alt)
- break;
- if (event == alt)
- return i;
- }
+ const unsigned int presorted_event_table[] = {
+ 0x0130e8, 0x080080, 0x080088, 0x10000a, 0x10000b, 0x10000d, 0x10000e,
+ 0x100010, 0x10001a, 0x100026, 0x100054, 0x100056, 0x1000f0, 0x1000f8,
+ 0x1000fc, 0x200008, 0x20000e, 0x200010, 0x200012, 0x200054, 0x2000f0,
+ 0x2000f2, 0x2000f4, 0x2000f5, 0x2000f6, 0x2000f8, 0x2000fc, 0x2000fe,
+ 0x2d0030, 0x30000a, 0x30000c, 0x300010, 0x300012, 0x30001a, 0x300056,
+ 0x3000f0, 0x3000f2, 0x3000f6, 0x3000f8, 0x3000fc, 0x3000fe, 0x400006,
+ 0x400007, 0x40000a, 0x40000e, 0x400010, 0x400018, 0x400056, 0x4000f0,
+ 0x4000f8, 0x600005
+ };
+ const unsigned int event_index_table[] = {
+ 0, 1, 2, 3, 4, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 12, 14,
+ 7, 15, 2, 9, 16, 3, 4, 0, 17, 10, 18, 19, 20, 1, 17, 15, 19,
+ 18, 2, 16, 21, 8, 0, 22, 13, 14, 11, 21, 5, 20, 22, 1, 6, 3
+ };
+ int hi = ARRAY_SIZE(presorted_event_table) - 1;
+ int lo = 0;
+
+ while (lo <= hi) {
+ int mid = lo + (hi - lo) / 2;
+ unsigned int alt = presorted_event_table[mid];
+
+ if (alt < event)
+ lo = mid + 1;
+ else if (alt > event)
+ hi = mid - 1;
+ else
+ return event_index_table[mid];
}
+
return -1;
}
diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/4xx/soc.c
index b2d940437a66..5412e6b21e10 100644
--- a/arch/powerpc/platforms/4xx/soc.c
+++ b/arch/powerpc/platforms/4xx/soc.c
@@ -112,7 +112,7 @@ static int __init ppc4xx_l2c_probe(void)
}
/* Install error handler */
- if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
+ if (request_irq(irq, l2c_error_handler, 0, "L2C", NULL) < 0) {
printk(KERN_ERR "Cannot install L2C error handler"
", cache is not enabled\n");
of_node_put(np);
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 2fb2a85d131f..1135c1ab923c 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -14,6 +14,8 @@
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/ipic.h>
+#include <asm/fixmap.h>
+
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index ebb5f6a27dbf..b24d4102fbf6 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -40,6 +40,7 @@
#include <asm/io.h>
#include <asm/rheap.h>
#include <asm/cpm.h>
+#include <asm/fixmap.h>
#include <sysdev/fsl_soc.h>
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
index 77ea9335fd04..f381b177ea06 100644
--- a/arch/powerpc/platforms/book3s/vas-api.c
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -4,6 +4,8 @@
* Copyright (C) 2019 Haren Myneni, IBM Corp
*/
+#define pr_fmt(fmt) "vas-api: " fmt
+
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/cdev.h>
@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
task_ref->mm = get_task_mm(current);
if (!task_ref->mm) {
put_pid(task_ref->pid);
- pr_err("VAS: pid(%d): mm_struct is not found\n",
+ pr_err("pid(%d): mm_struct is not found\n",
current->pid);
return -EPERM;
}
@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
rc = kill_pid_info(SIGSEGV, &info, pid);
rcu_read_unlock();
- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
- pid_vnr(pid), rc);
+ pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
}
void vas_dump_crb(struct coprocessor_request_block *crb)
@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
rc = copy_from_user(&uattr, uptr, sizeof(uattr));
if (rc) {
- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ pr_err("copy_from_user() returns %d\n", rc);
return -EFAULT;
}
@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
cp_inst->coproc->cop_type);
if (IS_ERR(txwin)) {
- pr_err("%s() VAS window open failed, %ld\n", __func__,
+ pr_err_ratelimited("VAS window open failed rc=%ld\n",
PTR_ERR(txwin));
return PTR_ERR(txwin);
}
@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
* window is not opened. Shouldn't expect this error.
*/
if (!cp_inst || !cp_inst->txwin) {
- pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
- __func__);
+ pr_err("Unexpected fault on paste address with TX window closed\n");
return VM_FAULT_SIGBUS;
}
@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
* issue NX request.
*/
if (txwin->task_ref.vma != vmf->vma) {
- pr_err("%s(): No previous mapping with paste address\n",
- __func__);
+ pr_err("No previous mapping with paste address\n");
return VM_FAULT_SIGBUS;
}
@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
txwin = cp_inst->txwin;
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
(vma->vm_end - vma->vm_start), PAGE_SIZE);
return -EINVAL;
}
/* Ensure instance has an open send window */
if (!txwin) {
- pr_err("%s(): No send window open?\n", __func__);
+ pr_err("No send window open?\n");
return -EINVAL;
}
if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
- pr_err("%s(): VAS API is not registered\n", __func__);
+ pr_err("VAS API is not registered\n");
return -EACCES;
}
@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
*/
mutex_lock(&txwin->task_ref.mmap_mutex);
if (txwin->status != VAS_WIN_ACTIVE) {
- pr_err("%s(): Window is not active\n", __func__);
+ pr_err("Window is not active\n");
rc = -EACCES;
goto out;
}
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
if (!paste_addr) {
- pr_err("%s(): Window paste address failed\n", __func__);
+ pr_err("Window paste address failed\n");
rc = -EINVAL;
goto out;
}
@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start, prot);
- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
- paste_addr, vma->vm_start, rc);
+ pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
+ vma->vm_start, rc);
txwin->task_ref.vma = vma;
vma->vm_ops = &vas_vm_ops;
@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
goto err;
}
- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
- MINOR(devno));
+ pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
return 0;
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 130707ec9f99..8bdae0caf21e 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -2,6 +2,7 @@
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_BOOK3S && CPU_BIG_ENDIAN
+ select ADB_CUDA if POWER_RESET && PPC32
select MPIC
select FORCE_PCI
select PPC_INDIRECT_PCI if PPC32
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index ae62d432db8b..81c9fbae88b1 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2614,7 +2614,8 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
struct device_node* node;
int i;
volatile u32 __iomem *base;
- const u32 *addrp, *revp;
+ const __be32 *addrp;
+ const u32 *revp;
phys_addr_t addr;
u64 size;
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 40f3aa432fba..c097d591670e 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -925,8 +925,10 @@ static void __init smu_i2c_probe(void)
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
bus = kzalloc(sz, GFP_KERNEL);
- if (bus == NULL)
+ if (bus == NULL) {
+ of_node_put(busnode);
return;
+ }
bus->controller = controller;
bus->busnode = of_node_get(busnode);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 8be71920e63c..c83d1e14077e 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -598,8 +598,10 @@ static void __init smp_core99_setup_i2c_hwsync(int ncpus)
name = "Pulsar";
break;
}
- if (pmac_tb_freeze != NULL)
+ if (pmac_tb_freeze != NULL) {
+ of_node_put(cc);
break;
+ }
}
if (pmac_tb_freeze != NULL) {
/* Open i2c bus for synchronous access */
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h
index 3f715efb0aa6..5eeb794b5eb1 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.h
+++ b/arch/powerpc/platforms/powernv/opal-fadump.h
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
- (u64)(reg_entry->reg_val));
+ (u64 __force)(reg_entry->reg_val));
opal_fadump_set_regval_regnum(regs,
be32_to_cpu(reg_entry->reg_type),
be32_to_cpu(reg_entry->reg_num),
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index aa4042dcd6d4..a43bfb01720a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -55,7 +55,8 @@ static bool find_aa_index(struct device_node *dr_node,
struct property *ala_prop,
const u32 *lmb_assoc, u32 *aa_index)
{
- u32 *assoc_arrays, new_prop_size;
+ __be32 *assoc_arrays;
+ u32 new_prop_size;
struct property *new_prop;
int aa_arrays, aa_array_entries, aa_array_sz;
int i, index;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 16d93b580f61..496e16c588aa 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -914,7 +914,8 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_
return 0;
}
-static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
+static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift,
+ bool *direct_mapping)
{
struct dma_win *window;
const struct dynamic_dma_window_prop *dma64;
@@ -927,6 +928,7 @@ static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *windo
dma64 = window->prop;
*dma_addr = be64_to_cpu(dma64->dma_base);
*window_shift = be32_to_cpu(dma64->window_shift);
+ *direct_mapping = window->direct;
found = true;
break;
}
@@ -1270,10 +1272,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
mutex_lock(&dma_win_init_mutex);
- if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
- direct_mapping = (len >= max_ram_len);
+ if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len, &direct_mapping))
goto out_unlock;
- }
/*
* If we already went through this for a previous function of
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f2cb62148f36..4561667832ed 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -192,9 +192,9 @@ static void free_dtl_buffers(unsigned long *time_limit)
continue;
kmem_cache_free(dtl_cache, pp->dispatch_log);
pp->dtl_ridx = 0;
- pp->dispatch_log = 0;
- pp->dispatch_log_end = 0;
- pp->dtl_curr = 0;
+ pp->dispatch_log = NULL;
+ pp->dispatch_log_end = NULL;
+ pp->dtl_curr = NULL;
if (time_limit && time_after(jiffies, *time_limit)) {
cond_resched();
@@ -223,7 +223,7 @@ static void destroy_cpu_associativity(void)
{
kfree(vcpu_associativity);
kfree(pcpu_associativity);
- vcpu_associativity = pcpu_associativity = 0;
+ vcpu_associativity = pcpu_associativity = NULL;
}
static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
@@ -526,8 +526,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
if (cmd) {
rc = init_cpu_associativity();
- if (rc)
+ if (rc) {
+ destroy_cpu_associativity();
goto out;
+ }
for_each_possible_cpu(cpu) {
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
index 2d40304eb6c1..febe18f251d0 100644
--- a/arch/powerpc/platforms/pseries/plpks.c
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -150,7 +150,7 @@ static int plpks_gen_password(void)
ospasswordlength = maxpwsize;
ospassword = kzalloc(maxpwsize, GFP_KERNEL);
if (!ospassword) {
- kfree(password);
+ kfree_sensitive(password);
return -ENOMEM;
}
memcpy(ospassword, password, ospasswordlength);
@@ -163,7 +163,7 @@ static int plpks_gen_password(void)
}
}
out:
- kfree(password);
+ kfree_sensitive(password);
return pseries_status_to_err(rc);
}
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index e25ac52acf50..b1f25bac280b 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -341,7 +341,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
atomic_read(&cop_feat_caps->nr_total_credits)) {
- pr_err("Credits are not available to allocate window\n");
+ pr_err_ratelimited("Credits are not available to allocate window\n");
rc = -EINVAL;
goto out;
}
@@ -424,7 +424,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
put_vas_user_win_ref(&txwin->vas_win.task_ref);
rc = -EBUSY;
- pr_err("No credit is available to allocate window\n");
+ pr_err_ratelimited("No credit is available to allocate window\n");
out_free:
/*
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 57978a44d55b..558ec68d768e 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -11,9 +11,11 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/seq_file.h>
@@ -392,7 +394,6 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
static const struct of_device_id fsl_of_msi_ids[];
static int fsl_of_msi_probe(struct platform_device *dev)
{
- const struct of_device_id *match;
struct fsl_msi *msi;
struct resource res, msiir;
int err, i, j, irq_index, count;
@@ -402,10 +403,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
u32 offset;
struct pci_controller *phb;
- match = of_match_device(fsl_of_msi_ids, &dev->dev);
- if (!match)
- return -EINVAL;
- features = match->data;
+ features = device_get_match_data(&dev->dev);
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ba287abcb008..dabbdd356664 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -355,7 +355,7 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
- if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
+ if (r == swab32(MPIC_VECPRI_MASK)) {
printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
mpic->flags |= MPIC_BROKEN_IPI;
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 9f0af4d795d8..f1c0fa6ece21 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
- *out_qsize = be32_to_cpu(qsize);
+ *out_qsize = be64_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
diff --git a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
index a31a56016c09..73e331e7660e 100755
--- a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
+++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
@@ -7,21 +7,20 @@ set -o pipefail
# To debug, uncomment the following line
# set -x
-# -mprofile-kernel is only supported on 64-bit, so this should not be invoked
-# for 32-bit. We pass in -m64 explicitly, and -mbig-endian and -mlittle-endian
-# are passed in from Kconfig, which takes care of toolchains defaulting to
-# other targets.
+# -mprofile-kernel is only supported on 64-bit with ELFv2, so this should not
+# be invoked for other targets. Therefore we can pass in -m64 and -mabi
+# explicitly, to take care of toolchains defaulting to other targets.
# Test whether the compile option -mprofile-kernel exists and generates
# profiling code (ie. a call to _mcount()).
echo "int func() { return 0; }" | \
- $* -m64 -S -x c -O2 -p -mprofile-kernel - -o - \
+ $* -m64 -mabi=elfv2 -S -x c -O2 -p -mprofile-kernel - -o - \
2> /dev/null | grep -q "_mcount"
# Test whether the notrace attribute correctly suppresses calls to _mcount().
echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \
- $* -m64 -S -x c -O2 -p -mprofile-kernel - -o - \
+ $* -m64 -mabi=elfv2 -S -x c -O2 -p -mprofile-kernel - -o - \
2> /dev/null | grep -q "_mcount" && \
exit 1
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index eaa15a20e6ae..95a2a06acc6a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -39,6 +39,7 @@ config RISCV
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
+ select ARCH_KEEP_MEMBLOCK if ACPI
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
@@ -48,6 +49,7 @@ config RISCV
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
@@ -174,6 +176,11 @@ config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
+config HAVE_SHADOW_CALL_STACK
+ def_bool $(cc-option,-fsanitize=shadow-call-stack)
+ # https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
+ depends on $(ld-option,--no-relax-gp)
+
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8
@@ -635,6 +642,15 @@ config THREAD_SIZE_ORDER
Specify the Pages of thread stack size (from 4KB to 64KB), which also
affects irq stack size, which is equal to thread stack size.
+config RISCV_MISALIGNED
+ bool "Support misaligned load/store traps for kernel and userspace"
+ select SYSCTL_ARCH_UNALIGN_ALLOW
+ default y
+ help
+ Say Y here if you want the kernel to embed support for misaligned
+ load/store for both kernel and userspace. When disable, misaligned
+ accesses will generate SIGBUS in userspace and panic in kernel.
+
endmenu # "Platform type"
menu "Kernel features"
@@ -902,6 +918,9 @@ config PORTABLE
select MMU
select OF
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
index e69de29bb2d1..eafe17ebf710 100644
--- a/arch/riscv/Kconfig.debug
+++ b/arch/riscv/Kconfig.debug
@@ -0,0 +1 @@
+source "arch/riscv/kernel/tests/Kconfig.debug"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index b43a6bb7e4dc..a74be78678eb 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -54,6 +54,10 @@ endif
endif
endif
+ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
+ KBUILD_LDFLAGS += --no-relax-gp
+endif
+
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
@@ -130,12 +134,6 @@ endif
libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
-PHONY += vdso_install
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
- $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
- $(build)=arch/riscv/kernel/compat_vdso compat_$@)
-
ifeq ($(KBUILD_EXTMOD),)
ifeq ($(CONFIG_MMU),y)
prepare: vdso_prepare
@@ -147,6 +145,9 @@ vdso_prepare: prepare0
endif
endif
+vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
+vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
+
ifneq ($(CONFIG_XIP_KERNEL),y)
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
KBUILD_IMAGE := $(boot)/loader.bin
diff --git a/arch/riscv/Makefile.postlink b/arch/riscv/Makefile.postlink
index a46fc578b30b..829b9abc91f6 100644
--- a/arch/riscv/Makefile.postlink
+++ b/arch/riscv/Makefile.postlink
@@ -36,9 +36,6 @@ ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_strip)
endif
-%.ko: FORCE
- @true
-
clean:
@true
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 22b13947bd13..8e7fc0edf21d 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -17,6 +17,7 @@
KCOV_INSTRUMENT := n
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS_loader.bin :=-O binary
OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.* loader loader.o loader.lds loader.bin
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 1edf3cd886c5..905881282a7c 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -37,6 +37,13 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PM=y
CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPUFREQ_DT=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_ACPI=y
@@ -95,6 +102,7 @@ CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
+CONFIG_CAN=m
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -102,6 +110,11 @@ CONFIG_PCIE_XILINX=y
CONFIG_PCIE_FU740=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_NVME=m
@@ -124,8 +137,11 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
+CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
+CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
+CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
@@ -136,16 +152,24 @@ CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_I2C_CHARDEV=m
CONFIG_I2C_MV64XXX=m
+CONFIG_I2C_RIIC=y
CONFIG_SPI=y
+CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_GPIO_SIFIVE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
+CONFIG_RENESAS_RZG2LWDT=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
CONFIG_DRM=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_NOUVEAU=m
@@ -153,39 +177,69 @@ CONFIG_DRM_SUN4I=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_RZ=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_CARD=m
CONFIG_USB=y
+CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_STARFIVE=y
+CONFIG_MMC_SDHI=y
CONFIG_MMC_SUNXI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
+CONFIG_RZ_DMAC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
+CONFIG_RENESAS_OSTM=y
CONFIG_SUN8I_DE2_CCU=m
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_ARCH_R9A07G043=y
+CONFIG_IIO=y
+CONFIG_RZG2L_ADC=m
+CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_PHY_SUN4I_USB=m
+CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_LIBNVDIMM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_EXT4_FS=y
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index d5604d2073bc..7dad0cf9d701 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -66,6 +66,8 @@ int acpi_get_riscv_isa(struct acpi_table_header *table,
unsigned int cpu, const char **isa);
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
+ u32 *cboz_size, u32 *cbop_size);
#else
static inline void acpi_init_rintc_map(void) { }
static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
@@ -79,6 +81,10 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
return -EINVAL;
}
+static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
+ u32 *cbom_size, u32 *cboz_size,
+ u32 *cbop_size) { }
+
#endif /* CONFIG_ACPI */
#endif /*_ASM_ACPI_H*/
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index 61ba8ed43d8f..36b955c762ba 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
-asmlinkage unsigned long get_overflow_stack(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 114bbadaef41..b0487b39e674 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -82,6 +82,47 @@
.endr
.endm
+#ifdef CONFIG_SMP
+#ifdef CONFIG_32BIT
+#define PER_CPU_OFFSET_SHIFT 2
+#else
+#define PER_CPU_OFFSET_SHIFT 3
+#endif
+
+.macro asm_per_cpu dst sym tmp
+ REG_L \tmp, TASK_TI_CPU_NUM(tp)
+ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ la \dst, __per_cpu_offset
+ add \dst, \dst, \tmp
+ REG_L \tmp, 0(\dst)
+ la \dst, \sym
+ add \dst, \dst, \tmp
+.endm
+#else /* CONFIG_SMP */
+.macro asm_per_cpu dst sym tmp
+ la \dst, \sym
+.endm
+#endif /* CONFIG_SMP */
+
+.macro load_per_cpu dst ptr tmp
+ asm_per_cpu \dst \ptr \tmp
+ REG_L \dst, 0(\dst)
+.endm
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+/* gp is used as the shadow call stack pointer instead */
+.macro load_global_pointer
+.endm
+#else
+/* load __global_pointer to gp */
+.macro load_global_pointer
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+.endm
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
/* save all GPs except x1 ~ x5 */
.macro save_from_x6_to_x31
REG_S x6, PT_T1(sp)
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 65f6eee4ab8d..224b4dc02b50 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -15,13 +15,261 @@
#include <asm/barrier.h>
#include <asm/bitsperlong.h>
+#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
#include <asm-generic/bitops/__ffs.h>
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
+#else
+#include <asm/alternative-macros.h>
+#include <asm/hwcap.h>
+
+#if (BITS_PER_LONG == 64)
+#define CTZW "ctzw "
+#define CLZW "clzw "
+#elif (BITS_PER_LONG == 32)
+#define CTZW "ctz "
+#define CLZW "clz "
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+static __always_inline unsigned long variable__ffs(unsigned long word)
+{
+ int num;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ "ctz %0, %1\n"
+ ".option pop\n"
+ : "=r" (word) : "r" (word) :);
+
+ return word;
+
+legacy:
+ num = 0;
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+/**
+ * __ffs - find first set bit in a long word
+ * @word: The word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __ffs(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)__builtin_ctzl(word) : \
+ variable__ffs(word))
+
+static __always_inline unsigned long variable__fls(unsigned long word)
+{
+ int num;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ "clz %0, %1\n"
+ ".option pop\n"
+ : "=r" (word) : "r" (word) :);
+
+ return BITS_PER_LONG - 1 - word;
+
+legacy:
+ num = BITS_PER_LONG - 1;
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 1))))
+ num -= 1;
+ return num;
+}
+
+/**
+ * __fls - find last set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __fls(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \
+ variable__fls(word))
+
+static __always_inline int variable_ffs(int x)
+{
+ int r;
+
+ if (!x)
+ return 0;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ CTZW "%0, %1\n"
+ ".option pop\n"
+ : "=r" (r) : "r" (x) :);
+
+ return r + 1;
+
+legacy:
+ r = 1;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+/**
+ * ffs - find first set bit in a word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs routines.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first set bit if
+ * value is nonzero. The first (least significant) bit is at position 1.
+ */
+#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
+
+static __always_inline int variable_fls(unsigned int x)
+{
+ int r;
+
+ if (!x)
+ return 0;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ CLZW "%0, %1\n"
+ ".option pop\n"
+ : "=r" (r) : "r" (x) :);
+
+ return 32 - r;
+
+legacy:
+ r = 32;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/**
+ * fls - find last set bit in a word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as ffs, but returns the position of the most
+ * significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last set bit if
+ * value is nonzero. The last (most significant) bit is at position 32.
+ */
+#define fls(x) \
+({ \
+ typeof(x) x_ = (x); \
+ __builtin_constant_p(x_) ? \
+ (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
+ : \
+ variable_fls(x_); \
+})
+
+#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
+
+#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index d0345bd659c9..a418c3112cd6 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -7,7 +7,10 @@
#define _ASM_CPUFEATURE_H
#include <linux/bitmap.h>
+#include <linux/jump_label.h>
#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <asm/errno.h>
/*
* These are probed via a device_initcall(), via either the SBI or directly
@@ -30,6 +33,104 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
-void check_unaligned_access(int cpu);
+void riscv_user_isa_enable(void);
+
+#ifdef CONFIG_RISCV_MISALIGNED
+bool unaligned_ctl_available(void);
+bool check_unaligned_access_emulated(int cpu);
+void unaligned_emulation_finish(void);
+#else
+static inline bool unaligned_ctl_available(void)
+{
+ return false;
+}
+
+static inline bool check_unaligned_access_emulated(int cpu)
+{
+ return false;
+}
+
+static inline void unaligned_emulation_finish(void) {}
+#endif
+
+unsigned long riscv_get_elf_hwcap(void);
+
+struct riscv_isa_ext_data {
+ const unsigned int id;
+ const char *name;
+ const char *property;
+};
+
+extern const struct riscv_isa_ext_data riscv_isa_ext[];
+extern const size_t riscv_isa_ext_count;
+extern bool riscv_isa_fallback;
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+static __always_inline bool
+riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_no);
+ } else {
+ if (!__riscv_isa_extension_available(NULL, ext))
+ goto l_no;
+ }
+
+ return true;
+l_no:
+ return false;
+}
+
+static __always_inline bool
+riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_yes);
+ } else {
+ if (__riscv_isa_extension_available(NULL, ext))
+ goto l_yes;
+ }
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
+ return true;
+
+ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
+
+static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
+ return true;
+
+ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
#endif
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index b3b2dfbdf945..06c236bfab53 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -14,7 +14,7 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
#include <asm/cacheinfo.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
/*
* These are used to set parameters in the core dumps.
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
index 6e4dee49d84b..7ab5e34318c8 100644
--- a/arch/riscv/include/asm/entry-common.h
+++ b/arch/riscv/include/asm/entry-common.h
@@ -8,4 +8,18 @@
void handle_page_fault(struct pt_regs *regs);
void handle_break(struct pt_regs *regs);
+#ifdef CONFIG_RISCV_MISALIGNED
+int handle_misaligned_load(struct pt_regs *regs);
+int handle_misaligned_store(struct pt_regs *regs);
+#else
+static inline int handle_misaligned_load(struct pt_regs *regs)
+{
+ return -1;
+}
+static inline int handle_misaligned_store(struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
#endif /* _ASM_RISCV_ENTRY_COMMON_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index b55b434f0059..83ed25e43553 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -95,31 +95,31 @@ asm volatile(ALTERNATIVE( \
#endif
/*
- * dcache.ipa rs1 (invalidate, physical address)
+ * th.dcache.ipa rs1 (invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01010 rs1 000 00000 0001011
- * dache.iva rs1 (invalida, virtual address)
+ * th.dache.iva rs1 (invalida, virtual address)
* 0000001 00110 rs1 000 00000 0001011
*
- * dcache.cpa rs1 (clean, physical address)
+ * th.dcache.cpa rs1 (clean, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01001 rs1 000 00000 0001011
- * dcache.cva rs1 (clean, virtual address)
+ * th.dcache.cva rs1 (clean, virtual address)
* 0000001 00101 rs1 000 00000 0001011
*
- * dcache.cipa rs1 (clean then invalidate, physical address)
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01011 rs1 000 00000 0001011
- * dcache.civa rs1 (... virtual address)
+ * th.dcache.civa rs1 (... virtual address)
* 0000001 00111 rs1 000 00000 0001011
*
- * sync.s (make sure all cache operations finished)
+ * th.sync.s (make sure all cache operations finished)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000000 11001 00000 000 00000 0001011
*/
-#define THEAD_inval_A0 ".long 0x0265000b"
-#define THEAD_clean_A0 ".long 0x0255000b"
-#define THEAD_flush_A0 ".long 0x0275000b"
+#define THEAD_INVAL_A0 ".long 0x0265000b"
+#define THEAD_CLEAN_A0 ".long 0x0255000b"
+#define THEAD_FLUSH_A0 ".long 0x0275000b"
#define THEAD_SYNC_S ".long 0x0190000b"
#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 6fc51c1b34cf..06d30526ef3b 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -8,9 +8,6 @@
#ifndef _ASM_RISCV_HWCAP_H
#define _ASM_RISCV_HWCAP_H
-#include <asm/alternative-macros.h>
-#include <asm/errno.h>
-#include <linux/bits.h>
#include <uapi/asm/hwcap.h>
#define RISCV_ISA_EXT_a ('a' - 'a')
@@ -69,76 +66,4 @@
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
#endif
-#ifndef __ASSEMBLY__
-
-#include <linux/jump_label.h>
-
-unsigned long riscv_get_elf_hwcap(void);
-
-struct riscv_isa_ext_data {
- const unsigned int id;
- const char *name;
- const char *property;
-};
-
-extern const struct riscv_isa_ext_data riscv_isa_ext[];
-extern const size_t riscv_isa_ext_count;
-extern bool riscv_isa_fallback;
-
-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-
-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
-
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
-#define riscv_isa_extension_available(isa_bitmap, ext) \
- __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
-
-static __always_inline bool
-riscv_has_extension_likely(const unsigned long ext)
-{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX,
- "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
- ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
- :
- : [ext] "i" (ext)
- :
- : l_no);
- } else {
- if (!__riscv_isa_extension_available(NULL, ext))
- goto l_no;
- }
-
- return true;
-l_no:
- return false;
-}
-
-static __always_inline bool
-riscv_has_extension_unlikely(const unsigned long ext)
-{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX,
- "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
- ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
- :
- : [ext] "i" (ext)
- :
- : l_yes);
- } else {
- if (__riscv_isa_extension_available(NULL, ext))
- goto l_yes;
- }
-
- return false;
-l_yes:
- return true;
-}
-
-#endif
-
#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 78936f4ff513..5c48f48e79a6 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -8,6 +8,11 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 5
+#define RISCV_HWPROBE_MAX_KEY 6
+
+static inline bool riscv_hwprobe_key_is_valid(__s64 key)
+{
+ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
+}
#endif
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index 6960beb75f32..e27179b26086 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -180,19 +180,19 @@
INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
__RD(0), RS1(gaddr), RS2(vmid))
-#define CBO_inval(base) \
+#define CBO_INVAL(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(0))
-#define CBO_clean(base) \
+#define CBO_CLEAN(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(1))
-#define CBO_flush(base) \
+#define CBO_FLUSH(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(2))
-#define CBO_zero(base) \
+#define CBO_ZERO(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(4))
diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h
index e4042d297580..6441ded3b0cf 100644
--- a/arch/riscv/include/asm/irq_stack.h
+++ b/arch/riscv/include/asm/irq_stack.h
@@ -12,6 +12,9 @@
DECLARE_PER_CPU(ulong *, irq_stack_ptr);
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+ void (*func)(struct pt_regs *));
+
#ifdef CONFIG_VMAP_STACK
/*
* To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 5488ecc337b6..57e887bfa34c 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -33,8 +33,8 @@
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif
/*
- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
- * define the PAGE_OFFSET value for SV39.
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+ * define the PAGE_OFFSET value for SV48 and SV39.
*/
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
index 59ba1fbaf784..00f3369570a8 100644
--- a/arch/riscv/include/asm/pgtable-32.h
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -33,4 +33,7 @@
_PAGE_WRITE | _PAGE_EXEC | \
_PAGE_USER | _PAGE_GLOBAL))
+static const __maybe_unused int pgtable_l4_enabled;
+static const __maybe_unused int pgtable_l5_enabled;
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 7a5097202e15..9a2c780a11e9 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -126,14 +126,18 @@ enum napot_cont_order {
/*
* [63:59] T-Head Memory Type definitions:
- *
- * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * bit[63] SO - Strong Order
+ * bit[62] C - Cacheable
+ * bit[61] B - Bufferable
+ * bit[60] SH - Shareable
+ * bit[59] Sec - Trustable
+ * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable
* 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
- * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
*/
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
-#define _PAGE_NOCACHE_THEAD 0UL
-#define _PAGE_IO_THEAD (1UL << 63)
+#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
+#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
static inline u64 riscv_page_mtmask(void)
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index f896708e8331..179bd4afece4 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -16,9 +16,9 @@
#define _PAGE_GLOBAL (1 << 5) /* Global */
#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
-#define _PAGE_SOFT (1 << 8) /* Reserved for software */
+#define _PAGE_SOFT (3 << 8) /* Reserved for software */
-#define _PAGE_SPECIAL _PAGE_SOFT
+#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */
#define _PAGE_TABLE _PAGE_PRESENT
/*
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index b2ba3f79cfe9..294044429e8e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -291,6 +291,7 @@ static inline pte_t pud_pte(pud_t pud)
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <asm/cpufeature.h>
static __always_inline bool has_svnapot(void)
{
@@ -811,7 +812,7 @@ extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
* bit 5: _PAGE_PROT_NONE (zero)
* bit 6: exclusive marker
* bits 7 to 11: swap type
- * bits 11 to XLEN-1: swap offset
+ * bits 12 to XLEN-1: swap offset
*/
#define __SWP_TYPE_SHIFT 7
#define __SWP_TYPE_BITS 5
@@ -914,7 +915,6 @@ extern uintptr_t _dtb_early_pa;
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
extern u64 satp_mode;
-extern bool pgtable_l4_enabled;
void paging_init(void);
void misc_mem_init(void);
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 441da1839c94..f19f861cda54 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -8,6 +8,7 @@
#include <linux/const.h>
#include <linux/cache.h>
+#include <linux/prctl.h>
#include <vdso/processor.h>
@@ -82,6 +83,7 @@ struct thread_struct {
unsigned long bad_cause;
unsigned long vstate_ctrl;
struct __riscv_v_ext_state vstate;
+ unsigned long align_ctl;
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
@@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long)&init_stack, \
+ .align_ctl = PR_UNALIGN_NOPRINT, \
}
#define task_pt_regs(tsk) \
@@ -136,6 +139,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
extern long riscv_v_vstate_ctrl_get_current(void);
#endif /* CONFIG_RISCV_ISA_V */
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 12dfda6bb924..0892f4421bc4 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -280,9 +280,6 @@ void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_send_ipi(unsigned int cpu);
int sbi_remote_fence_i(const struct cpumask *cpu_mask);
-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
- unsigned long start,
- unsigned long size);
int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start,
diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h
new file mode 100644
index 000000000000..0e45db78b24b
--- /dev/null
+++ b/arch/riscv/include/asm/scs.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+/* Load init_shadow_call_stack to gp. */
+.macro scs_load_init_stack
+ la gp, init_shadow_call_stack
+ XIP_FIXUP_OFFSET gp
+.endm
+
+/* Load the per-CPU IRQ shadow call stack to gp. */
+.macro scs_load_irq_stack tmp
+ load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
+.endm
+
+/* Load task_scs_sp(current) to gp. */
+.macro scs_load_current
+ REG_L gp, TASK_TI_SCS_SP(tp)
+.endm
+
+/* Load task_scs_sp(current) to gp, but only if tp has changed. */
+.macro scs_load_current_if_task_changed prev
+ beq \prev, tp, _skip_scs
+ scs_load_current
+_skip_scs:
+.endm
+
+/* Save gp to task_scs_sp(current). */
+.macro scs_save_current
+ REG_S gp, TASK_TI_SCS_SP(tp)
+.endm
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+.macro scs_load_init_stack
+.endm
+.macro scs_load_irq_stack tmp
+.endm
+.macro scs_load_current
+.endm
+.macro scs_load_current_if_task_changed prev
+.endm
+.macro scs_save_current
+.endm
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SCS_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index a727be723c56..f90d8e42f3c7 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -9,7 +9,7 @@
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
#include <asm/vector.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1833beb00489..574779900bfb 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -34,9 +34,6 @@
#ifndef __ASSEMBLY__
-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
-extern unsigned long spin_shadow_stack;
-
#include <asm/processor.h>
#include <asm/csr.h>
@@ -60,8 +57,20 @@ struct thread_info {
long user_sp; /* User stack pointer */
int cpu;
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+#ifdef CONFIG_SHADOW_CALL_STACK
+ void *scs_base;
+ void *scs_sp;
+#endif
};
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define INIT_SCS \
+ .scs_base = init_shadow_call_stack, \
+ .scs_sp = init_shadow_call_stack,
+#else
+#define INIT_SCS
+#endif
+
/*
* macros/functions for gaining access to the thread information structure
*
@@ -71,6 +80,7 @@ struct thread_info {
{ \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
+ INIT_SCS \
}
void arch_release_task_struct(struct task_struct *tsk);
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 120bcf2ed8a8..1eb5682b2af6 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
- flush_tlb_mm(tlb->mm);
+#ifdef CONFIG_MMU
+ if (tlb->fullmm || tlb->need_flush_all)
+ flush_tlb_mm(tlb->mm);
+ else
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+ tlb_get_unmap_size(tlb));
+#endif
}
#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index a09196f8de68..8f3418c5f172 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -11,6 +11,9 @@
#include <asm/smp.h>
#include <asm/errata_list.h>
+#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
+#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
+
#ifdef CONFIG_MMU
extern unsigned long asid_mask;
@@ -32,9 +35,12 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned int page_size);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -51,14 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
local_flush_tlb_all();
}
-#define flush_tlb_mm(mm) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
-
/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
- flush_tlb_all();
+ local_flush_tlb_all();
}
+#define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index 14f5d27783b8..96b65a5396df 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
-#ifdef __riscv_zihintpause
+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
/*
* Reduce instruction retirement.
* This assumes the PC changes.
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index c5ee07b3df07..87aaef656257 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/ptrace.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index d696d6610231..11a71b8533d5 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_TLS_DTPREL64 9
#define R_RISCV_TLS_TPREL32 10
#define R_RISCV_TLS_TPREL64 11
+#define R_RISCV_IRELATIVE 58
/* Relocation types not used by the dynamic linker */
#define R_RISCV_BRANCH 16
@@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_ALIGN 43
#define R_RISCV_RVC_BRANCH 44
#define R_RISCV_RVC_JUMP 45
-#define R_RISCV_LUI 46
#define R_RISCV_GPREL_I 47
#define R_RISCV_GPREL_S 48
#define R_RISCV_TPREL_I 49
@@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_SET16 55
#define R_RISCV_SET32 56
#define R_RISCV_32_PCREL 57
+#define R_RISCV_PLT32 59
+#define R_RISCV_SET_ULEB128 60
+#define R_RISCV_SUB_ULEB128 61
#endif /* _UAPI_ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index d43e306ce2f9..b659ffcfcdb4 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -29,6 +29,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
+#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -36,6 +37,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
+#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 95cf25d48405..fee22a3d1b53 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -57,9 +57,10 @@ obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
obj-y += probes/
+obj-y += tests/
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
+obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
obj-$(CONFIG_SMP) += smpboot.o
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index 56cb2c986c48..e619edc8b0cc 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -14,9 +14,10 @@
*/
#include <linux/acpi.h>
+#include <linux/efi.h>
#include <linux/io.h>
+#include <linux/memblock.h>
#include <linux/pci.h>
-#include <linux/efi.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
@@ -217,7 +218,89 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
- return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
+ efi_memory_desc_t *md, *region = NULL;
+ pgprot_t prot;
+
+ if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
+ return NULL;
+
+ for_each_efi_memory_desc(md) {
+ u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+ if (phys < md->phys_addr || phys >= end)
+ continue;
+
+ if (phys + size > end) {
+ pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
+ return NULL;
+ }
+ region = md;
+ break;
+ }
+
+ /*
+ * It is fine for AML to remap regions that are not represented in the
+ * EFI memory map at all, as it only describes normal memory, and MMIO
+ * regions that require a virtual mapping to make them accessible to
+ * the EFI runtime services.
+ */
+ prot = PAGE_KERNEL_IO;
+ if (region) {
+ switch (region->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ if (memblock_is_map_memory(phys) ||
+ !memblock_is_region_memory(phys, size)) {
+ pr_warn(FW_BUG "requested region covers kernel memory\n");
+ return NULL;
+ }
+
+ /*
+ * Mapping kernel memory is permitted if the region in
+ * question is covered by a single memblock with the
+ * NOMAP attribute set: this enables the use of ACPI
+ * table overrides passed via initramfs.
+ * This particular use case only requires read access.
+ */
+ fallthrough;
+
+ case EFI_RUNTIME_SERVICES_CODE:
+ /*
+ * This would be unusual, but not problematic per se,
+ * as long as we take care not to create a writable
+ * mapping for executable code.
+ */
+ prot = PAGE_KERNEL_RO;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ /*
+ * ACPI reclaim memory is used to pass firmware tables
+ * and other data that is intended for consumption by
+ * the OS only, which may decide it wants to reclaim
+ * that memory and use it for something else. We never
+ * do that, but we usually add it to the linear map
+ * anyway, in which case we should use the existing
+ * mapping.
+ */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__va(phys);
+ fallthrough;
+
+ default:
+ if (region->attribute & EFI_MEMORY_WB)
+ prot = PAGE_KERNEL;
+ else if ((region->attribute & EFI_MEMORY_WC) ||
+ (region->attribute & EFI_MEMORY_WT))
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+ }
+
+ return ioremap_prot(phys, size, pgprot_val(prot));
}
#ifdef CONFIG_PCI
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index d6a75aac1d27..a03129f40c46 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
+#include <asm/stacktrace.h>
#include <asm/suspend.h>
void asm_offsets(void);
@@ -38,7 +39,11 @@ void asm_offsets(void)
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+#ifdef CONFIG_SHADOW_CALL_STACK
+ OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
+#endif
+ OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
@@ -479,4 +484,8 @@ void asm_offsets(void)
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
+
+ DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
+ OFFSET(STACKFRAME_FP, stackframe, fp);
+ OFFSET(STACKFRAME_RA, stackframe, ra);
}
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index b86e5e2c3aea..62fa393b2eb2 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -76,13 +76,3 @@ quiet_cmd_compat_vdsold = VDSOLD $@
# actual build commands
quiet_cmd_compat_vdsoas = VDSOAS $@
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
-
-# install commands for the unstripped file
-quiet_cmd_compat_vdso_install = INSTALL $@
- cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
-
-compat_vdso.so: $(obj)/compat_vdso.so.dbg
- @mkdir -p $(MODLIB)/compat_vdso
- $(call cmd,compat_vdso_install)
-
-compat_vdso_install: compat_vdso.so
diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
index cfdecfbaad62..2b3d9398c113 100644
--- a/arch/riscv/kernel/copy-unaligned.S
+++ b/arch/riscv/kernel/copy-unaligned.S
@@ -9,7 +9,7 @@
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of 8 * SZREG */
-ENTRY(__riscv_copy_words_unaligned)
+SYM_FUNC_START(__riscv_copy_words_unaligned)
andi a4, a2, ~((8*SZREG)-1)
beqz a4, 2f
add a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
2:
ret
-END(__riscv_copy_words_unaligned)
+SYM_FUNC_END(__riscv_copy_words_unaligned)
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
-ENTRY(__riscv_copy_bytes_unaligned)
+SYM_FUNC_START(__riscv_copy_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
2:
ret
-END(__riscv_copy_bytes_unaligned)
+SYM_FUNC_END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index c17dacb1141c..d11d6320fb0d 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -125,13 +125,14 @@ old_interface:
*/
int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
{
- int rc;
-
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv")) {
- rc = riscv_of_processor_hartid(node, hartid);
- if (!rc)
- return 0;
+ *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
+ if (*hartid == ~0UL) {
+ pr_warn("Found CPU without hart ID\n");
+ return -ENODEV;
+ }
+ return 0;
}
}
@@ -202,9 +203,8 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
-static void print_isa(struct seq_file *f)
+static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
{
- seq_puts(f, "isa\t\t: ");
if (IS_ENABLED(CONFIG_32BIT))
seq_write(f, "rv32", 4);
@@ -212,7 +212,7 @@ static void print_isa(struct seq_file *f)
seq_write(f, "rv64", 4);
for (int i = 0; i < riscv_isa_ext_count; i++) {
- if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
+ if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
continue;
/* Only multi-letter extensions are split by underscores */
@@ -276,7 +276,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
- print_isa(m);
+
+ /*
+ * For historical raisins, the isa: line is limited to the lowest common
+ * denominator of extensions supported across all harts. A true list of
+ * extensions supported on this hart is printed later in the hart isa:
+ * line.
+ */
+ seq_puts(m, "isa\t\t: ");
+ print_isa(m, NULL);
print_mmu(m);
if (acpi_disabled) {
@@ -292,6 +300,13 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
+
+ /*
+ * Print the ISA extensions specific to this hart, which may show
+ * additional extensions not present across all harts.
+ */
+ seq_puts(m, "hart isa\t: ");
+ print_isa(m, hart_isa[cpu_id].isa);
seq_puts(m, "\n");
return 0;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index e3803822ab5a..b3785ffc1570 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/memory.h>
@@ -29,6 +30,7 @@
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
unsigned long elf_hwcap __read_mostly;
@@ -93,10 +95,10 @@ static bool riscv_isa_extension_check(int id)
return true;
case RISCV_ISA_EXT_ZICBOZ:
if (!riscv_cboz_block_size) {
- pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
+ pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
return false;
} else if (!is_power_of_2(riscv_cboz_block_size)) {
- pr_err("cboz-block-size present, but is not a power-of-2\n");
+ pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
return false;
}
return true;
@@ -206,10 +208,11 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
switch (*ext) {
case 's':
/*
- * Workaround for invalid single-letter 's' & 'u'(QEMU).
+ * Workaround for invalid single-letter 's' & 'u' (QEMU).
* No need to set the bit in riscv_isa as 's' & 'u' are
- * not valid ISA extensions. It works until multi-letter
- * extension starting with "Su" appears.
+ * not valid ISA extensions. It works unless the first
+ * multi-letter extension in the ISA string begins with
+ * "Su" and is not prefixed with an underscore.
*/
if (ext[-1] != '_' && ext[1] == 'u') {
++isa;
@@ -558,23 +561,21 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
-void check_unaligned_access(int cpu)
+static int check_unaligned_access(void *param)
{
+ int cpu = smp_processor_id();
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
- struct page *page;
+ struct page *page = param;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
- page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
- if (!page) {
- pr_warn("Can't alloc pages to measure memcpy performance");
- return;
- }
+ if (check_unaligned_access_emulated(cpu))
+ return 0;
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
@@ -628,7 +629,7 @@ void check_unaligned_access(int cpu)
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
cpu);
- goto out;
+ return 0;
}
if (word_cycles < byte_cycles)
@@ -642,18 +643,90 @@ void check_unaligned_access(int cpu)
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
+ return 0;
+}
-out:
- __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+static void check_unaligned_access_nonboot_cpu(void *param)
+{
+ unsigned int cpu = smp_processor_id();
+ struct page **pages = param;
+
+ if (smp_processor_id() != 0)
+ check_unaligned_access(pages[cpu]);
}
-static int check_unaligned_access_boot_cpu(void)
+static int riscv_online_cpu(unsigned int cpu)
{
- check_unaligned_access(0);
+ static struct page *buf;
+
+ /* We are already set since the last check */
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+ return 0;
+
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
return 0;
}
-arch_initcall(check_unaligned_access_boot_cpu);
+/* Measure unaligned access on all CPUs present at boot in parallel. */
+static int check_unaligned_access_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return 0;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+ /* Setup hotplug callback for any new CPUs that come online. */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, NULL);
+
+out:
+ unaligned_emulation_finish();
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+ return 0;
+}
+
+arch_initcall(check_unaligned_access_all_cpus);
+
+void riscv_user_isa_enable(void)
+{
+ if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
+ csr_set(CSR_SENVCFG, ENVCFG_CBZE);
+}
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 143a2bb3e697..54ca4564a926 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -9,10 +9,15 @@
#include <asm/asm.h>
#include <asm/csr.h>
+#include <asm/scs.h>
#include <asm/unistd.h>
+#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
+#include <linux/sizes.h>
+
+ .section .irqentry.text, "ax"
SYM_CODE_START(handle_exception)
/*
@@ -21,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
- bnez tp, _save_context
+ bnez tp, .Lsave_context
-_restore_kernel_tpsp:
+.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
@@ -35,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
-_save_context:
+.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
@@ -73,10 +78,11 @@ _save_context:
csrw CSR_SCRATCH, x0
/* Load the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
+
+ /* Load the kernel shadow call stack pointer if coming from userspace */
+ scs_load_current_if_task_changed s5
+
move a0, sp /* pt_regs */
la ra, ret_from_exception
@@ -123,6 +129,9 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
+ /* Save the kernel shadow call stack pointer */
+ scs_save_current
+
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
@@ -170,67 +179,15 @@ SYM_CODE_END(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
- /*
- * Takes the psuedo-spinlock for the shadow stack, in case multiple
- * harts are concurrently overflowing their kernel stacks. We could
- * store any value here, but since we're overflowing the kernel stack
- * already we only have SP to use as a scratch register. So we just
- * swap in the address of the spinlock, as that's definately non-zero.
- *
- * Pairs with a store_release in handle_bad_stack().
- */
-1: la sp, spin_shadow_stack
- REG_AMOSWAP_AQ sp, sp, (sp)
- bnez sp, 1b
-
- la sp, shadow_stack
- addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+ /* we reach here from kernel context, sscratch must be 0 */
+ csrrw x31, CSR_SCRATCH, x31
+ asm_per_cpu sp, overflow_stack, x31
+ li x31, OVERFLOW_STACK_SIZE
+ add sp, sp, x31
+ /* zero out x31 again and restore x31 */
+ xor x31, x31, x31
+ csrrw x31, CSR_SCRATCH, x31
- //save caller register to shadow stack
- addi sp, sp, -(PT_SIZE_ON_STACK)
- REG_S x1, PT_RA(sp)
- REG_S x5, PT_T0(sp)
- REG_S x6, PT_T1(sp)
- REG_S x7, PT_T2(sp)
- REG_S x10, PT_A0(sp)
- REG_S x11, PT_A1(sp)
- REG_S x12, PT_A2(sp)
- REG_S x13, PT_A3(sp)
- REG_S x14, PT_A4(sp)
- REG_S x15, PT_A5(sp)
- REG_S x16, PT_A6(sp)
- REG_S x17, PT_A7(sp)
- REG_S x28, PT_T3(sp)
- REG_S x29, PT_T4(sp)
- REG_S x30, PT_T5(sp)
- REG_S x31, PT_T6(sp)
-
- la ra, restore_caller_reg
- tail get_overflow_stack
-
-restore_caller_reg:
- //save per-cpu overflow stack
- REG_S a0, -8(sp)
- //restore caller register from shadow_stack
- REG_L x1, PT_RA(sp)
- REG_L x5, PT_T0(sp)
- REG_L x6, PT_T1(sp)
- REG_L x7, PT_T2(sp)
- REG_L x10, PT_A0(sp)
- REG_L x11, PT_A1(sp)
- REG_L x12, PT_A2(sp)
- REG_L x13, PT_A3(sp)
- REG_L x14, PT_A4(sp)
- REG_L x15, PT_A5(sp)
- REG_L x16, PT_A6(sp)
- REG_L x17, PT_A7(sp)
- REG_L x28, PT_T3(sp)
- REG_L x29, PT_T4(sp)
- REG_L x30, PT_T5(sp)
- REG_L x31, PT_T6(sp)
-
- //load per-cpu overflow stack
- REG_L sp, -8(sp)
addi sp, sp, -(PT_SIZE_ON_STACK)
//save context to overflow stack
@@ -268,6 +225,43 @@ SYM_CODE_START(ret_from_fork)
tail syscall_exit_to_user_mode
SYM_CODE_END(ret_from_fork)
+#ifdef CONFIG_IRQ_STACKS
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using the per-CPU IRQ stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+ /* Create a frame record to save ra and s0 (fp) */
+ addi sp, sp, -STACKFRAME_SIZE_ON_STACK
+ REG_S ra, STACKFRAME_RA(sp)
+ REG_S s0, STACKFRAME_FP(sp)
+ addi s0, sp, STACKFRAME_SIZE_ON_STACK
+
+ /* Switch to the per-CPU shadow call stack */
+ scs_save_current
+ scs_load_irq_stack t0
+
+ /* Switch to the per-CPU IRQ stack and call the handler */
+ load_per_cpu t0, irq_stack_ptr, t1
+ li t1, IRQ_STACK_SIZE
+ add sp, t0, t1
+ jalr a1
+
+ /* Switch back to the thread shadow call stack */
+ scs_load_current
+
+ /* Switch back to the thread stack and restore ra and s0 */
+ addi sp, s0, -STACKFRAME_SIZE_ON_STACK
+ REG_L ra, STACKFRAME_RA(sp)
+ REG_L s0, STACKFRAME_FP(sp)
+ addi sp, sp, STACKFRAME_SIZE_ON_STACK
+
+ ret
+SYM_FUNC_END(call_on_irq_stack)
+#endif /* CONFIG_IRQ_STACKS */
+
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
@@ -297,6 +291,8 @@ SYM_FUNC_START(__switch_to)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
+ /* Save the kernel shadow call stack pointer */
+ scs_save_current
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
@@ -314,6 +310,8 @@ SYM_FUNC_START(__switch_to)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
+ /* Switch to the next shadow call stack */
+ scs_load_current
ret
SYM_FUNC_END(__switch_to)
@@ -324,7 +322,7 @@ SYM_FUNC_END(__switch_to)
.section ".rodata"
.align LGREG
/* Exception vector table */
-SYM_CODE_START(excp_vect_table)
+SYM_DATA_START_LOCAL(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
@@ -342,12 +340,11 @@ SYM_CODE_START(excp_vect_table)
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
-excp_vect_table_end:
-SYM_CODE_END(excp_vect_table)
+SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
#ifndef CONFIG_MMU
-SYM_CODE_START(__user_rt_sigreturn)
+SYM_DATA_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
ecall
-SYM_CODE_END(__user_rt_sigreturn)
+SYM_DATA_END(__user_rt_sigreturn)
#endif
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
index dd2205473de7..2c543f130f93 100644
--- a/arch/riscv/kernel/fpu.S
+++ b/arch/riscv/kernel/fpu.S
@@ -19,7 +19,7 @@
#include <asm/csr.h>
#include <asm/asm-offsets.h>
-ENTRY(__fstate_save)
+SYM_FUNC_START(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_save)
+SYM_FUNC_END(__fstate_save)
-ENTRY(__fstate_restore)
+SYM_FUNC_START(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -103,4 +103,125 @@ ENTRY(__fstate_restore)
fscsr t0
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_restore)
+SYM_FUNC_END(__fstate_restore)
+
+#define get_f32(which) fmv.x.s a0, which; j 2f
+#define put_f32(which) fmv.s.x which, a1; j 2f
+#if __riscv_xlen == 64
+# define get_f64(which) fmv.x.d a0, which; j 2f
+# define put_f64(which) fmv.d.x which, a1; j 2f
+#else
+# define get_f64(which) fsd which, 0(a1); j 2f
+# define put_f64(which) fld which, 0(a1); j 2f
+#endif
+
+.macro fp_access_prologue
+ /*
+ * Compute jump offset to store the correct FP register since we don't
+ * have indirect FP register access
+ */
+ sll t0, a0, 3
+ la t2, 1f
+ add t0, t0, t2
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ jr t0
+1:
+.endm
+
+.macro fp_access_epilogue
+2:
+ csrc CSR_STATUS, t1
+ ret
+.endm
+
+#define fp_access_body(__access_func) \
+ __access_func(f0); \
+ __access_func(f1); \
+ __access_func(f2); \
+ __access_func(f3); \
+ __access_func(f4); \
+ __access_func(f5); \
+ __access_func(f6); \
+ __access_func(f7); \
+ __access_func(f8); \
+ __access_func(f9); \
+ __access_func(f10); \
+ __access_func(f11); \
+ __access_func(f12); \
+ __access_func(f13); \
+ __access_func(f14); \
+ __access_func(f15); \
+ __access_func(f16); \
+ __access_func(f17); \
+ __access_func(f18); \
+ __access_func(f19); \
+ __access_func(f20); \
+ __access_func(f21); \
+ __access_func(f22); \
+ __access_func(f23); \
+ __access_func(f24); \
+ __access_func(f25); \
+ __access_func(f26); \
+ __access_func(f27); \
+ __access_func(f28); \
+ __access_func(f29); \
+ __access_func(f30); \
+ __access_func(f31)
+
+
+#ifdef CONFIG_RISCV_MISALIGNED
+
+/*
+ * Disable compressed instructions set to keep a constant offset between FP
+ * load/store/move instructions
+ */
+.option norvc
+/*
+ * put_f32_reg - Set a FP register from a register containing the value
+ * a0 = FP register index to be set
+ * a1 = value to be loaded in the FP register
+ */
+SYM_FUNC_START(put_f32_reg)
+ fp_access_prologue
+ fp_access_body(put_f32)
+ fp_access_epilogue
+SYM_FUNC_END(put_f32_reg)
+
+/*
+ * get_f32_reg - Get a FP register value and return it
+ * a0 = FP register index to be retrieved
+ */
+SYM_FUNC_START(get_f32_reg)
+ fp_access_prologue
+ fp_access_body(get_f32)
+ fp_access_epilogue
+SYM_FUNC_END(get_f32_reg)
+
+/*
+ * put_f64_reg - Set a 64 bits FP register from a value or a pointer.
+ * a0 = FP register index to be set
+ * a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we
+ * load the value to a pointer).
+ */
+SYM_FUNC_START(put_f64_reg)
+ fp_access_prologue
+ fp_access_body(put_f64)
+ fp_access_epilogue
+SYM_FUNC_END(put_f64_reg)
+
+/*
+ * put_f64_reg - Get a 64 bits FP register value and returned it or store it to
+ * a pointer.
+ * a0 = FP register index to be retrieved
+ * a1 = If xlen == 32, pointer which should be loaded with the FP register value
+ * or unused if xlen == 64. In which case the FP register value is returned
+ * through a0
+ */
+SYM_FUNC_START(get_f64_reg)
+ fp_access_prologue
+ fp_access_body(get_f64)
+ fp_access_epilogue
+SYM_FUNC_END(get_f64_reg)
+
+#endif /* CONFIG_RISCV_MISALIGNED */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 3710ea5d160f..b77397432403 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -14,11 +14,12 @@
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
+#include <asm/scs.h>
#include <asm/xip_fixup.h>
#include "efi-header.S"
__HEAD
-ENTRY(_start)
+SYM_CODE_START(_start)
/*
* Image header expected by Linux boot-loaders. The image header data
* structure is described in asm/image.h.
@@ -110,10 +111,7 @@ relocate_enable_mmu:
csrw CSR_TVEC, a0
/* Reload the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
/*
* Switch to kernel page tables. A full fence is necessary in order to
@@ -134,10 +132,7 @@ secondary_start_sbi:
csrw CSR_IP, zero
/* Load the global pointer */
- .option push
- .option norelax
- la gp, __global_pointer$
- .option pop
+ load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@@ -159,6 +154,7 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3
add a3, a3, a1
REG_L sp, (a3)
+ scs_load_current
.Lsecondary_start_common:
@@ -168,12 +164,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
- call setup_trap_vector
+ call .Lsetup_trap_vector
tail smp_callin
#endif /* CONFIG_SMP */
.align 2
-setup_trap_vector:
+.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
@@ -191,9 +187,9 @@ setup_trap_vector:
wfi
j .Lsecondary_park
-END(_start)
+SYM_CODE_END(_start)
-ENTRY(_start_kernel)
+SYM_CODE_START(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
@@ -210,7 +206,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
- la a0, pmp_done
+ la a0, .Lpmp_done
csrw CSR_TVEC, a0
li a0, -1
@@ -218,7 +214,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
-pmp_done:
+.Lpmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
@@ -228,10 +224,7 @@ pmp_done:
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@@ -282,12 +275,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
- ble a4, a3, clear_bss_done
-clear_bss:
+ ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
- blt a3, a4, clear_bss
-clear_bss_done:
+ blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
@@ -298,6 +291,7 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE
XIP_FIXUP_OFFSET sp
addi sp, sp, -PT_SIZE_ON_STACK
+ scs_load_init_stack
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
@@ -311,11 +305,12 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */
- call setup_trap_vector
+ call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
addi sp, sp, -PT_SIZE_ON_STACK
+ scs_load_current
#ifdef CONFIG_KASAN
call kasan_early_init
@@ -353,10 +348,10 @@ clear_bss_done:
tail .Lsecondary_start_common
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
-END(_start_kernel)
+SYM_CODE_END(_start_kernel)
#ifdef CONFIG_RISCV_M_MODE
-ENTRY(reset_regs)
+SYM_CODE_START_LOCAL(reset_regs)
li sp, 0
li gp, 0
li tp, 0
@@ -454,5 +449,5 @@ ENTRY(reset_regs)
.Lreset_regs_done_vector:
#endif /* CONFIG_RISCV_ISA_V */
ret
-END(reset_regs)
+SYM_CODE_END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
diff --git a/arch/riscv/kernel/hibernate-asm.S b/arch/riscv/kernel/hibernate-asm.S
index d698dd7df637..d040dcf4add4 100644
--- a/arch/riscv/kernel/hibernate-asm.S
+++ b/arch/riscv/kernel/hibernate-asm.S
@@ -21,7 +21,7 @@
*
* Always returns 0
*/
-ENTRY(__hibernate_cpu_resume)
+SYM_FUNC_START(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
@@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
mv a0, zero
ret
-END(__hibernate_cpu_resume)
+SYM_FUNC_END(__hibernate_cpu_resume)
/*
* Prepare to restore the image.
@@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
-ENTRY(hibernate_restore_image)
+SYM_FUNC_START(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
@@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
REG_L a1, relocated_restore_code
jr a1
-END(hibernate_restore_image)
+SYM_FUNC_END(hibernate_restore_image)
/*
* The below code will be executed from a 'safe' page.
@@ -58,7 +58,7 @@ END(hibernate_restore_image)
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
-ENTRY(hibernate_core_restore_code)
+SYM_FUNC_START(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
bnez s4, .Lcopy
jr s2
-END(hibernate_core_restore_code)
+SYM_FUNC_END(hibernate_core_restore_code)
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
index ea1a10355ce9..3df30dd1c458 100644
--- a/arch/riscv/kernel/image-vars.h
+++ b/arch/riscv/kernel/image-vars.h
@@ -28,7 +28,9 @@ __efistub__start_kernel = _start_kernel;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub___init_text_end = __init_text_end;
+#if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB)
__efistub_screen_info = screen_info;
+#endif
#endif
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 9cc0a7669271..9ceda02507ca 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -9,6 +9,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
+#include <linux/scs.h>
#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/smp.h>
@@ -34,6 +35,24 @@ EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
#ifdef CONFIG_IRQ_STACKS
#include <asm/irq_stack.h>
+DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+#endif
+
+static void init_irq_scs(void)
+{
+ int cpu;
+
+ if (!scs_is_enabled())
+ return;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+ scs_alloc(cpu_to_node(cpu));
+}
+
DEFINE_PER_CPU(ulong *, irq_stack_ptr);
#ifdef CONFIG_VMAP_STACK
@@ -61,40 +80,22 @@ static void init_irq_stacks(void)
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+static void ___do_softirq(struct pt_regs *regs)
+{
+ __do_softirq();
+}
+
void do_softirq_own_stack(void)
{
-#ifdef CONFIG_IRQ_STACKS
- if (on_thread_stack()) {
- ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
- + IRQ_STACK_SIZE/sizeof(ulong);
- __asm__ __volatile(
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" ra, (sp) \n"
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" s0, (sp) \n"
- "addi s0, sp, 2*"RISCV_SZPTR "\n"
- "move sp, %[sp] \n"
- "call __do_softirq \n"
- "addi sp, s0, -2*"RISCV_SZPTR"\n"
- REG_L" s0, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- REG_L" ra, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- :
- : [sp] "r" (sp)
- : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
- "s0",
-#endif
- "memory");
- } else
-#endif
+ if (on_thread_stack())
+ call_on_irq_stack(NULL, ___do_softirq);
+ else
__do_softirq();
}
#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
#else
+static void init_irq_scs(void) {}
static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */
@@ -106,6 +107,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
void __init init_IRQ(void)
{
+ init_irq_scs();
init_irq_stacks();
irqchip_init();
if (!handle_arch_irq)
diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S
index 059c5e216ae7..de0a4b35d01e 100644
--- a/arch/riscv/kernel/kexec_relocate.S
+++ b/arch/riscv/kernel/kexec_relocate.S
@@ -17,27 +17,17 @@ SYM_CODE_START(riscv_kexec_relocate)
* s1: (const) Phys address to jump to after relocation
* s2: (const) Phys address of the FDT image
* s3: (const) The hartid of the current hart
- * s4: Pointer to the destination address for the relocation
- * s5: (const) Number of words per page
- * s6: (const) 1, used for subtraction
- * s7: (const) kernel_map.va_pa_offset, used when switching MMU off
- * s8: (const) Physical address of the main loop
- * s9: (debug) indirection page counter
- * s10: (debug) entry counter
- * s11: (debug) copied words counter
+ * s4: (const) kernel_map.va_pa_offset, used when switching MMU off
+ * s5: Pointer to the destination address for the relocation
+ * s6: (const) Physical address of the main loop
*/
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
- mv s4, zero
- li s5, (PAGE_SIZE / RISCV_SZPTR)
- li s6, 1
- mv s7, a4
- mv s8, zero
- mv s9, zero
- mv s10, zero
- mv s11, zero
+ mv s4, a4
+ mv s5, zero
+ mv s6, zero
/* Disable / cleanup interrupts */
csrw CSR_SIE, zero
@@ -52,21 +42,27 @@ SYM_CODE_START(riscv_kexec_relocate)
* the start of the loop below so that we jump there in
* any case.
*/
- la s8, 1f
- sub s8, s8, s7
- csrw CSR_STVEC, s8
+ la s6, 1f
+ sub s6, s6, s4
+ csrw CSR_STVEC, s6
+
+ /*
+ * With C-extension, here we get 42 Bytes and the next
+ * .align directive would pad zeros here up to 44 Bytes.
+ * So manually put a nop here to avoid zeros padding.
+ */
+ nop
/* Process entries in a loop */
.align 2
1:
- addi s10, s10, 1
REG_L t0, 0(s0) /* t0 = *image->entry */
addi s0, s0, RISCV_SZPTR /* image->entry++ */
/* IND_DESTINATION entry ? -> save destination address */
andi t1, t0, 0x1
beqz t1, 2f
- andi s4, t0, ~0x1
+ andi s5, t0, ~0x1
j 1b
2:
@@ -74,9 +70,8 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x2
beqz t1, 2f
andi s0, t0, ~0x2
- addi s9, s9, 1
csrw CSR_SATP, zero
- jalr zero, s8, 0
+ jr s6
2:
/* IND_DONE entry ? -> jump to done label */
@@ -92,14 +87,13 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x8
beqz t1, 1b /* Unknown entry type, ignore it */
andi t0, t0, ~0x8
- mv t3, s5 /* i = num words per page */
+ li t3, (PAGE_SIZE / RISCV_SZPTR) /* i = num words per page */
3: /* copy loop */
REG_L t1, (t0) /* t1 = *src_ptr */
- REG_S t1, (s4) /* *dst_ptr = *src_ptr */
+ REG_S t1, (s5) /* *dst_ptr = *src_ptr */
addi t0, t0, RISCV_SZPTR /* stc_ptr++ */
- addi s4, s4, RISCV_SZPTR /* dst_ptr++ */
- sub t3, t3, s6 /* i-- */
- addi s11, s11, 1 /* c++ */
+ addi s5, s5, RISCV_SZPTR /* dst_ptr++ */
+ addi t3, t3, -0x1 /* i-- */
beqz t3, 1b /* copy done ? */
j 3b
@@ -146,7 +140,7 @@ SYM_CODE_START(riscv_kexec_relocate)
*/
fence.i
- jalr zero, a2, 0
+ jr a2
SYM_CODE_END(riscv_kexec_relocate)
riscv_kexec_relocate_end:
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 669b8697aa38..58dd96a2a153 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -82,7 +82,7 @@
.endm
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
SAVE_ABI
addi a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
mv a1, ra
mv a3, sp
-ftrace_call:
- .global ftrace_call
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_call:
- .global ftrace_graph_call
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ABI
jr t0
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
SAVE_ALL
addi a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
mv a1, ra
mv a3, sp
-ftrace_regs_call:
- .global ftrace_regs_call
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_regs_call:
- .global ftrace_graph_regs_call
+SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ALL
jr t0
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8818a8fa9ff3..b4dd9ed6849e 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)
-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,25 +76,25 @@ ENTRY(return_to_handler)
mv a2, a0
RESTORE_RET_ABI_STATE
jalr a2
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(MCOUNT_NAME)
+SYM_FUNC_START(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
REG_L t1, 0(t0)
- bne t1, t4, do_ftrace_graph_caller
+ bne t1, t4, .Ldo_ftrace_graph_caller
la t3, ftrace_graph_entry
REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
- bne t2, t6, do_ftrace_graph_caller
+ bne t2, t6, .Ldo_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
REG_L t5, 0(t3)
- bne t5, t4, do_trace
+ bne t5, t4, .Ldo_trace
ret
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
-do_ftrace_graph_caller:
+.Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
-do_trace:
+.Ldo_trace:
REG_L a1, -SZREG(s0)
mv a0, ra
@@ -126,6 +126,6 @@ do_trace:
jalr t5
RESTORE_ABI_STATE
ret
-ENDPROC(MCOUNT_NAME)
+SYM_FUNC_END(MCOUNT_NAME)
#endif
EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 7c651d55fcbd..56a8c78e9e21 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -7,6 +7,9 @@
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
@@ -14,6 +17,38 @@
#include <asm/alternative.h>
#include <asm/sections.h>
+struct used_bucket {
+ struct list_head head;
+ struct hlist_head *bucket;
+};
+
+struct relocation_head {
+ struct hlist_node node;
+ struct list_head *rel_entry;
+ void *location;
+};
+
+struct relocation_entry {
+ struct list_head head;
+ Elf_Addr value;
+ unsigned int type;
+};
+
+struct relocation_handlers {
+ int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
+ int (*accumulate_handler)(struct module *me, void *location,
+ long buffer);
+};
+
+unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
+void process_accumulated_relocations(struct module *me);
+int add_relocation_to_accumulate(struct module *me, int type, void *location,
+ unsigned int hashtable_bits, Elf_Addr v);
+
+struct hlist_head *relocation_hashtable;
+
+struct list_head used_buckets_list;
+
/*
* The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -27,68 +62,90 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
#endif
}
-static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
+static int riscv_insn_rmw(void *location, u32 keep, u32 set)
+{
+ u16 *parcel = location;
+ u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
+
+ insn &= keep;
+ insn |= set;
+
+ parcel[0] = cpu_to_le16(insn);
+ parcel[1] = cpu_to_le16(insn >> 16);
+ return 0;
+}
+
+static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
+{
+ u16 *parcel = location;
+ u16 insn = le16_to_cpu(*parcel);
+
+ insn &= keep;
+ insn |= set;
+
+ *parcel = cpu_to_le16(insn);
+ return 0;
+}
+
+static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
{
if (v != (u32)v) {
pr_err("%s: value %016llx out of range for 32-bit field\n",
me->name, (long long)v);
return -EINVAL;
}
- *location = v;
+ *(u32 *)location = v;
return 0;
}
-static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
{
*(u64 *)location = v;
return 0;
}
-static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_branch_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 imm12 = (offset & 0x1000) << (31 - 12);
u32 imm11 = (offset & 0x800) >> (11 - 7);
u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
u32 imm4_1 = (offset & 0x1e) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
- return 0;
+ return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
}
-static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+static int apply_r_riscv_jal_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 imm20 = (offset & 0x100000) << (31 - 20);
u32 imm19_12 = (offset & 0xff000);
u32 imm11 = (offset & 0x800) << (20 - 11);
u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
- *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
}
-static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4_3 = (offset & 0x18) << (12 - 5);
u16 imm2_1 = (offset & 0x6) << (12 - 10);
- *(u16 *)location = (*(u16 *)location & 0xe383) |
- imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
- return 0;
+ return riscv_insn_rvc_rmw(location, 0xe383,
+ imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
}
-static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -98,16 +155,14 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
u16 imm4 = (offset & 0x10) << (12 - 5);
u16 imm3_1 = (offset & 0xe) << (12 - 10);
- *(u16 *)location = (*(u16 *)location & 0xe003) |
- imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
- return 0;
+ return riscv_insn_rvc_rmw(location, 0xe003,
+ imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
}
-static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
- s32 hi20;
+ ptrdiff_t offset = (void *)v - location;
if (!riscv_insn_valid_32bit_offset(offset)) {
pr_err(
@@ -116,23 +171,20 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = (offset + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
Elf_Addr v)
{
/*
* v is the lo12 value to fill. It is calculated before calling this
* handler.
*/
- *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
- return 0;
+ return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
}
-static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
Elf_Addr v)
{
/*
@@ -142,15 +194,12 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
u32 imm11_5 = (v & 0xfe0) << (31 - 11);
u32 imm4_0 = (v & 0x1f) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
- return 0;
+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
}
-static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- s32 hi20;
-
if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -158,22 +207,20 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = ((s32)v + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
- *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
- return 0;
+
+ return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
}
-static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
@@ -181,20 +228,18 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
s32 lo12 = ((s32)v - hi20);
u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
- return 0;
+
+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
}
-static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
- s32 hi20;
+ ptrdiff_t offset = (void *)v - location;
/* Always emit the got entry */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
- offset = module_emit_got_entry(me, v);
- offset = (void *)offset - (void *)location;
+ offset = (void *)module_emit_got_entry(me, v) - location;
} else {
pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
@@ -202,22 +247,19 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = (offset + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 hi20, lo12;
if (!riscv_insn_valid_32bit_offset(offset)) {
/* Only emit the plt entry if offset over 32-bit range */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
- offset = module_emit_plt_entry(me, v);
- offset = (void *)offset - (void *)location;
+ offset = (void *)module_emit_plt_entry(me, v) - location;
} else {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -228,15 +270,14 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
- *location = (*location & 0xfff) | hi20;
- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
- return 0;
+ riscv_insn_rmw(location, 0xfff, hi20);
+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
}
-static int apply_r_riscv_call_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 hi20, lo12;
if (!riscv_insn_valid_32bit_offset(offset)) {
@@ -248,18 +289,17 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
- *location = (*location & 0xfff) | hi20;
- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
- return 0;
+ riscv_insn_rmw(location, 0xfff, hi20);
+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
}
-static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+static int apply_r_riscv_relax_rela(struct module *me, void *location,
Elf_Addr v)
{
return 0;
}
-static int apply_r_riscv_align_rela(struct module *me, u32 *location,
+static int apply_r_riscv_align_rela(struct module *me, void *location,
Elf_Addr v)
{
pr_err(
@@ -268,91 +308,446 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
return -EINVAL;
}
-static int apply_r_riscv_add16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location += (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_add16_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u16 *)location += (u16)v;
return 0;
}
-static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add32_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u32 *)location += (u32)v;
return 0;
}
-static int apply_r_riscv_add64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add64_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u64 *)location += (u64)v;
return 0;
}
-static int apply_r_riscv_sub16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location -= (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_sub16_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u16 *)location -= (u16)v;
return 0;
}
-static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub32_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u32 *)location -= (u32)v;
return 0;
}
-static int apply_r_riscv_sub64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub64_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u64 *)location -= (u64)v;
return 0;
}
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_RISCV_32] = apply_r_riscv_32_rela,
- [R_RISCV_64] = apply_r_riscv_64_rela,
- [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
- [R_RISCV_JAL] = apply_r_riscv_jal_rela,
- [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela,
- [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
- [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
- [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
- [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
- [R_RISCV_HI20] = apply_r_riscv_hi20_rela,
- [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela,
- [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela,
- [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela,
- [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
- [R_RISCV_CALL] = apply_r_riscv_call_rela,
- [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
- [R_RISCV_ALIGN] = apply_r_riscv_align_rela,
- [R_RISCV_ADD16] = apply_r_riscv_add16_rela,
- [R_RISCV_ADD32] = apply_r_riscv_add32_rela,
- [R_RISCV_ADD64] = apply_r_riscv_add64_rela,
- [R_RISCV_SUB16] = apply_r_riscv_sub16_rela,
- [R_RISCV_SUB32] = apply_r_riscv_sub32_rela,
- [R_RISCV_SUB64] = apply_r_riscv_sub64_rela,
+static int dynamic_linking_not_supported(struct module *me, void *location,
+ Elf_Addr v)
+{
+ pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
+ me->name, location);
+ return -EINVAL;
+}
+
+static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
+{
+ pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
+ me->name, location);
+ return -EINVAL;
+}
+
+static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
+{
+ u8 *byte = location;
+ u8 value = v;
+
+ *byte = (*byte - (value & 0x3f)) & 0x3f;
+ return 0;
+}
+
+static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
+{
+ u8 *byte = location;
+ u8 value = v;
+
+ *byte = (*byte & 0xc0) | (value & 0x3f);
+ return 0;
+}
+
+static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location = (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_set16_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u16 *)location = (u16)v;
+ return 0;
+}
+
+static int apply_r_riscv_set32_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u32 *)location = (u32)v;
+ return 0;
+}
+
+static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u32 *)location = v - (uintptr_t)location;
+ return 0;
+}
+
+static int apply_r_riscv_plt32_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - location;
+
+ if (!riscv_insn_valid_32bit_offset(offset)) {
+ /* Only emit the plt entry if offset over 32-bit range */
+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+ offset = (void *)module_emit_plt_entry(me, v) - location;
+ } else {
+ pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+ }
+
+ *(u32 *)location = (u32)offset;
+ return 0;
+}
+
+static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+ *(long *)location = v;
+ return 0;
+}
+
+static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+ *(long *)location -= v;
+ return 0;
+}
+
+static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ u8 *byte = location;
+ u8 value = buffer;
+
+ if (buffer > 0x3f) {
+ pr_err("%s: value %ld out of range for 6-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+
+ *byte = (*byte & 0xc0) | (value & 0x3f);
+ return 0;
+}
+
+static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U8_MAX) {
+ pr_err("%s: value %ld out of range for 8-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u8 *)location = (u8)buffer;
+ return 0;
+}
+
+static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U16_MAX) {
+ pr_err("%s: value %ld out of range for 16-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u16 *)location = (u16)buffer;
+ return 0;
+}
+
+static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U32_MAX) {
+ pr_err("%s: value %ld out of range for 32-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u32 *)location = (u32)buffer;
+ return 0;
+}
+
+static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ *(u64 *)location = (u64)buffer;
+ return 0;
+}
+
+static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
+{
+ /*
+ * ULEB128 is a variable length encoding. Encode the buffer into
+ * the ULEB128 data format.
+ */
+ u8 *p = location;
+
+ while (buffer != 0) {
+ u8 value = buffer & 0x7f;
+
+ buffer >>= 7;
+ value |= (!!buffer) << 7;
+
+ *p++ = value;
+ }
+ return 0;
+}
+
+/*
+ * Relocations defined in the riscv-elf-psabi-doc.
+ * This handles static linking only.
+ */
+static const struct relocation_handlers reloc_handlers[] = {
+ [R_RISCV_32] = { .reloc_handler = apply_r_riscv_32_rela },
+ [R_RISCV_64] = { .reloc_handler = apply_r_riscv_64_rela },
+ [R_RISCV_RELATIVE] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_COPY] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_JUMP_SLOT] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPMOD32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPMOD64] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPREL32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPREL64] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_TPREL32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_TPREL64] = { .reloc_handler = dynamic_linking_not_supported },
+ /* 12-15 undefined */
+ [R_RISCV_BRANCH] = { .reloc_handler = apply_r_riscv_branch_rela },
+ [R_RISCV_JAL] = { .reloc_handler = apply_r_riscv_jal_rela },
+ [R_RISCV_CALL] = { .reloc_handler = apply_r_riscv_call_rela },
+ [R_RISCV_CALL_PLT] = { .reloc_handler = apply_r_riscv_call_plt_rela },
+ [R_RISCV_GOT_HI20] = { .reloc_handler = apply_r_riscv_got_hi20_rela },
+ [R_RISCV_TLS_GOT_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TLS_GD_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_PCREL_HI20] = { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
+ [R_RISCV_PCREL_LO12_I] = { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
+ [R_RISCV_PCREL_LO12_S] = { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
+ [R_RISCV_HI20] = { .reloc_handler = apply_r_riscv_hi20_rela },
+ [R_RISCV_LO12_I] = { .reloc_handler = apply_r_riscv_lo12_i_rela },
+ [R_RISCV_LO12_S] = { .reloc_handler = apply_r_riscv_lo12_s_rela },
+ [R_RISCV_TPREL_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_LO12_I] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_LO12_S] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_ADD] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_ADD8] = { .reloc_handler = apply_r_riscv_add8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_ADD16] = { .reloc_handler = apply_r_riscv_add16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_ADD32] = { .reloc_handler = apply_r_riscv_add32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_ADD64] = { .reloc_handler = apply_r_riscv_add64_rela,
+ .accumulate_handler = apply_64_bit_accumulation },
+ [R_RISCV_SUB8] = { .reloc_handler = apply_r_riscv_sub8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_SUB16] = { .reloc_handler = apply_r_riscv_sub16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_SUB32] = { .reloc_handler = apply_r_riscv_sub32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_SUB64] = { .reloc_handler = apply_r_riscv_sub64_rela,
+ .accumulate_handler = apply_64_bit_accumulation },
+ /* 41-42 reserved for future standard use */
+ [R_RISCV_ALIGN] = { .reloc_handler = apply_r_riscv_align_rela },
+ [R_RISCV_RVC_BRANCH] = { .reloc_handler = apply_r_riscv_rvc_branch_rela },
+ [R_RISCV_RVC_JUMP] = { .reloc_handler = apply_r_riscv_rvc_jump_rela },
+ /* 46-50 reserved for future standard use */
+ [R_RISCV_RELAX] = { .reloc_handler = apply_r_riscv_relax_rela },
+ [R_RISCV_SUB6] = { .reloc_handler = apply_r_riscv_sub6_rela,
+ .accumulate_handler = apply_6_bit_accumulation },
+ [R_RISCV_SET6] = { .reloc_handler = apply_r_riscv_set6_rela,
+ .accumulate_handler = apply_6_bit_accumulation },
+ [R_RISCV_SET8] = { .reloc_handler = apply_r_riscv_set8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_SET16] = { .reloc_handler = apply_r_riscv_set16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_SET32] = { .reloc_handler = apply_r_riscv_set32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_32_PCREL] = { .reloc_handler = apply_r_riscv_32_pcrel_rela },
+ [R_RISCV_IRELATIVE] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_PLT32] = { .reloc_handler = apply_r_riscv_plt32_rela },
+ [R_RISCV_SET_ULEB128] = { .reloc_handler = apply_r_riscv_set_uleb128,
+ .accumulate_handler = apply_uleb128_accumulation },
+ [R_RISCV_SUB_ULEB128] = { .reloc_handler = apply_r_riscv_sub_uleb128,
+ .accumulate_handler = apply_uleb128_accumulation },
+ /* 62-191 reserved for future standard use */
+ /* 192-255 nonstandard ABI extensions */
};
+void process_accumulated_relocations(struct module *me)
+{
+ /*
+ * Only ADD/SUB/SET/ULEB128 should end up here.
+ *
+ * Each bucket may have more than one relocation location. All
+ * relocations for a location are stored in a list in a bucket.
+ *
+ * Relocations are applied to a temp variable before being stored to the
+ * provided location to check for overflow. This also allows ULEB128 to
+ * properly decide how many entries are needed before storing to
+ * location. The final value is stored into location using the handler
+ * for the last relocation to an address.
+ *
+ * Three layers of indexing:
+ * - Each of the buckets in use
+ * - Groups of relocations in each bucket by location address
+ * - Each relocation entry for a location address
+ */
+ struct used_bucket *bucket_iter;
+ struct relocation_head *rel_head_iter;
+ struct relocation_entry *rel_entry_iter;
+ int curr_type;
+ void *location;
+ long buffer;
+
+ list_for_each_entry(bucket_iter, &used_buckets_list, head) {
+ hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+ buffer = 0;
+ location = rel_head_iter->location;
+ list_for_each_entry(rel_entry_iter,
+ rel_head_iter->rel_entry, head) {
+ curr_type = rel_entry_iter->type;
+ reloc_handlers[curr_type].reloc_handler(
+ me, &buffer, rel_entry_iter->value);
+ kfree(rel_entry_iter);
+ }
+ reloc_handlers[curr_type].accumulate_handler(
+ me, location, buffer);
+ kfree(rel_head_iter);
+ }
+ kfree(bucket_iter);
+ }
+
+ kfree(relocation_hashtable);
+}
+
+int add_relocation_to_accumulate(struct module *me, int type, void *location,
+ unsigned int hashtable_bits, Elf_Addr v)
+{
+ struct relocation_entry *entry;
+ struct relocation_head *rel_head;
+ struct hlist_head *current_head;
+ struct used_bucket *bucket;
+ unsigned long hash;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ INIT_LIST_HEAD(&entry->head);
+ entry->type = type;
+ entry->value = v;
+
+ hash = hash_min((uintptr_t)location, hashtable_bits);
+
+ current_head = &relocation_hashtable[hash];
+
+ /* Find matching location (if any) */
+ bool found = false;
+ struct relocation_head *rel_head_iter;
+
+ hlist_for_each_entry(rel_head_iter, current_head, node) {
+ if (rel_head_iter->location == location) {
+ found = true;
+ rel_head = rel_head_iter;
+ break;
+ }
+ }
+
+ if (!found) {
+ rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+ rel_head->rel_entry =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ INIT_LIST_HEAD(rel_head->rel_entry);
+ rel_head->location = location;
+ INIT_HLIST_NODE(&rel_head->node);
+ if (!current_head->first) {
+ bucket =
+ kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+ INIT_LIST_HEAD(&bucket->head);
+ bucket->bucket = current_head;
+ list_add(&bucket->head, &used_buckets_list);
+ }
+ hlist_add_head(&rel_head->node, current_head);
+ }
+
+ /* Add relocation to head of discovered rel_head */
+ list_add_tail(&entry->head, rel_head->rel_entry);
+
+ return 0;
+}
+
+unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+{
+ /* Can safely assume that bits is not greater than sizeof(long) */
+ unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+ unsigned int hashtable_bits = ilog2(hashtable_size);
+
+ /*
+ * Double size of hashtable if num_relocations * 1.25 is greater than
+ * hashtable_size.
+ */
+ int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
+
+ hashtable_bits += should_double_size;
+
+ hashtable_size <<= should_double_size;
+
+ relocation_hashtable = kmalloc_array(hashtable_size,
+ sizeof(*relocation_hashtable),
+ GFP_KERNEL);
+ __hash_init(relocation_hashtable, hashtable_size);
+
+ INIT_LIST_HEAD(&used_buckets_list);
+
+ return hashtable_bits;
+}
+
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ int (*handler)(struct module *me, void *location, Elf_Addr v);
Elf_Sym *sym;
- u32 *location;
+ void *location;
unsigned int i, type;
Elf_Addr v;
int res;
+ unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
+ unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ for (i = 0; i < num_relocations; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
@@ -370,8 +765,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
type = ELF_RISCV_R_TYPE(rel[i].r_info);
- if (type < ARRAY_SIZE(reloc_handlers_rela))
- handler = reloc_handlers_rela[type];
+ if (type < ARRAY_SIZE(reloc_handlers))
+ handler = reloc_handlers[type].reloc_handler;
else
handler = NULL;
@@ -427,11 +822,16 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
}
- res = handler(me, location, v);
+ if (reloc_handlers[type].accumulate_handler)
+ res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+ else
+ res = handler(me, location, v);
if (res)
return res;
}
+ process_accumulated_relocations(me);
+
return 0;
}
diff --git a/arch/riscv/kernel/probes/rethook_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S
index 21bac92a170a..f2cd83d9b0f0 100644
--- a/arch/riscv/kernel/probes/rethook_trampoline.S
+++ b/arch/riscv/kernel/probes/rethook_trampoline.S
@@ -75,7 +75,7 @@
REG_L x31, PT_T6(sp)
.endm
-ENTRY(arch_rethook_trampoline)
+SYM_CODE_START(arch_rethook_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs
@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
addi sp, sp, PT_SIZE_ON_STACK
ret
-ENDPROC(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
index d3099d67816d..6c166029079c 100644
--- a/arch/riscv/kernel/probes/simulate-insn.c
+++ b/arch/riscv/kernel/probes/simulate-insn.c
@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
unsigned long val)
{
if (index == 0)
- return false;
+ return true;
else if (index <= 31)
*((unsigned long *)regs + index) = val;
else
diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
index 194f166b2cc4..4b3dc8beaf77 100644
--- a/arch/riscv/kernel/probes/uprobes.c
+++ b/arch/riscv/kernel/probes/uprobes.c
@@ -3,6 +3,7 @@
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
+#include <asm/insn.h>
#include "decode-insn.h"
@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
#endif
}
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
+}
+
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e32d737e039f..4f21d970a129 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -25,6 +25,7 @@
#include <asm/thread_info.h>
#include <asm/cpuidle.h>
#include <asm/vector.h>
+#include <asm/cpufeature.h>
register unsigned long gp_in_global __asm__("gp");
@@ -41,6 +42,23 @@ void arch_cpu_idle(void)
cpu_do_idle();
}
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ if (!unaligned_ctl_available())
+ return -EINVAL;
+
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ if (!unaligned_ctl_available())
+ return -EINVAL;
+
+ return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
+}
+
void __show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index c672c8ba9a2a..5a62ed1da453 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -11,6 +11,7 @@
#include <linux/reboot.h>
#include <asm/sbi.h>
#include <asm/smp.h>
+#include <asm/tlbflush.h>
/* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
@@ -377,31 +378,14 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
EXPORT_SYMBOL(sbi_remote_fence_i);
/**
- * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
- * harts for the specified virtual address range.
- * @cpu_mask: A cpu mask containing all the target harts.
- * @start: Start of the virtual address
- * @size: Total size of the virtual address range.
- *
- * Return: 0 on success, appropriate linux error code otherwise.
- */
-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
- unsigned long start,
- unsigned long size)
-{
- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
- cpu_mask, start, size, 0, 0);
-}
-EXPORT_SYMBOL(sbi_remote_sfence_vma);
-
-/**
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
- * remote harts for a virtual address range belonging to a specific ASID.
+ * remote harts for a virtual address range belonging to a specific ASID or not.
*
* @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
- * @asid: The value of address space identifier (ASID).
+ * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
+ * for flushing all address spaces.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long size,
unsigned long asid)
{
- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
- cpu_mask, start, size, asid, 0);
+ if (asid == FLUSH_TLB_NO_ASID)
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ cpu_mask, start, size, 0, 0);
+ else
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ cpu_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index aac853ae4eb7..535a837de55d 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -15,7 +15,6 @@
#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/console.h>
-#include <linux/screen_info.h>
#include <linux/of_fdt.h>
#include <linux/sched/task.h>
#include <linux/smp.h>
@@ -26,6 +25,7 @@
#include <asm/acpi.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
@@ -40,17 +40,6 @@
#include "head.h"
-#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
-struct screen_info screen_info __section(".data") = {
- .orig_video_lines = 30,
- .orig_video_cols = 80,
- .orig_video_mode = 0,
- .orig_video_ega_bx = 0,
- .orig_video_isVGA = 1,
- .orig_video_points = 8
-};
-#endif
-
/*
* The lucky hart to first increment this variable will boot the other cores.
* This is used before the kernel initializes the BSS so it can't be in the
@@ -301,10 +290,13 @@ void __init setup_arch(char **cmdline_p)
riscv_fill_hwcap();
init_rt_signal_env();
apply_boot_alternatives();
+
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
riscv_noncoherent_supported();
riscv_set_dma_cache_alignment();
+
+ riscv_user_isa_enable();
}
static int __init topology_init(void)
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 21a4d0e111bc..88b6220b2608 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -384,30 +384,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
- /* Are we from a system call? */
- if (regs->cause == EXC_SYSCALL) {
- /* Avoid additional syscall restarting via ret_from_exception */
- regs->cause = -1UL;
- /* If so, check system call restarting.. */
- switch (regs->a0) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->a0 = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
- regs->a0 = -EINTR;
- break;
- }
- fallthrough;
- case -ERESTARTNOINTR:
- regs->a0 = regs->orig_a0;
- regs->epc -= 0x4;
- break;
- }
- }
-
rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
@@ -421,36 +397,67 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
void arch_do_signal_or_restart(struct pt_regs *regs)
{
+ unsigned long continue_addr = 0, restart_addr = 0;
+ int retval = 0;
struct ksignal ksig;
+ bool syscall = (regs->cause == EXC_SYSCALL);
- if (get_signal(&ksig)) {
- /* Actually deliver the signal */
- handle_signal(&ksig, regs);
- return;
- }
+ /* If we were from a system call, check for system call restarting */
+ if (syscall) {
+ continue_addr = regs->epc;
+ restart_addr = continue_addr - 4;
+ retval = regs->a0;
- /* Did we come from a system call? */
- if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */
regs->cause = -1UL;
- /* Restart the system call - no handlers present */
- switch (regs->a0) {
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PC.
+ */
+ switch (retval) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- regs->a0 = regs->orig_a0;
- regs->epc -= 0x4;
- break;
case -ERESTART_RESTARTBLOCK:
- regs->a0 = regs->orig_a0;
- regs->a7 = __NR_restart_syscall;
- regs->epc -= 0x4;
+ regs->a0 = regs->orig_a0;
+ regs->epc = restart_addr;
break;
}
}
/*
+ * Get the signal to deliver. When running under ptrace, at this point
+ * the debugger may change all of our registers.
+ */
+ if (get_signal(&ksig)) {
+ /*
+ * Depending on the signal settings, we may need to revert the
+ * decision to restart the system call, but skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (regs->epc == restart_addr &&
+ (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK ||
+ (retval == -ERESTARTSYS &&
+ !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
+ regs->a0 = -EINTR;
+ regs->epc = continue_addr;
+ }
+
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /*
+ * Handle restarting a different system call. As above, if a debugger
+ * has chosen to restart at a different PC, ignore the restart.
+ */
+ if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
+ regs->a7 = __NR_restart_syscall;
+
+ /*
* If there is no signal to deliver, we just put the saved
* sigmask back.
*/
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 1b8da4e40a4d..d162bf339beb 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -25,6 +25,8 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
+
+#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/cpufeature.h>
#include <asm/irq.h>
@@ -246,13 +248,14 @@ asmlinkage __visible void smp_callin(void)
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
- check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
}
+ riscv_user_isa_enable();
+
/*
* Remote TLB flushes are ignored while the CPU is offline, so emit
* a local TLB flush right now just in case.
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index f7960c7c5f9e..2d54f309c140 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -16,7 +16,7 @@
.altmacro
.option norelax
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,14 +57,11 @@ ENTRY(__cpu_suspend_enter)
/* Return to C code */
ret
-END(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
- .option push
- .option norelax
- la gp, __global_pointer$
- .option pop
+ load_global_pointer
#ifdef CONFIG_MMU
/* Save A0 and A1 */
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index b651ec698a91..c712037dbe10 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -145,26 +145,38 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
for_each_cpu(cpu, cpus) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
- if (riscv_isa_extension_available(isainfo->isa, ZBA))
- pair->value |= RISCV_HWPROBE_EXT_ZBA;
- else
- missing |= RISCV_HWPROBE_EXT_ZBA;
-
- if (riscv_isa_extension_available(isainfo->isa, ZBB))
- pair->value |= RISCV_HWPROBE_EXT_ZBB;
- else
- missing |= RISCV_HWPROBE_EXT_ZBB;
-
- if (riscv_isa_extension_available(isainfo->isa, ZBS))
- pair->value |= RISCV_HWPROBE_EXT_ZBS;
- else
- missing |= RISCV_HWPROBE_EXT_ZBS;
+#define EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_EXT_##ext; \
+ } while (false)
+
+ /*
+ * Only use EXT_KEY() for extensions which can be exposed to userspace,
+ * regardless of the kernel's configuration, as no other checks, besides
+ * presence in the hart_isa bitmap, are made.
+ */
+ EXT_KEY(ZBA);
+ EXT_KEY(ZBB);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZICBOZ);
+#undef EXT_KEY
}
/* Now turn off reporting features if any CPU is missing it. */
pair->value &= ~missing;
}
+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+{
+ struct riscv_hwprobe pair;
+
+ hwprobe_isa_ext0(&pair, cpus);
+ return (pair.value & ext);
+}
+
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
@@ -215,6 +227,12 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = hwprobe_misaligned(cpus);
break;
+ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+ pair->value = riscv_cboz_block_size;
+ break;
+
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug
new file mode 100644
index 000000000000..5dba64e8e977
--- /dev/null
+++ b/arch/riscv/kernel/tests/Kconfig.debug
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "arch/riscv/kernel Testing and Coverage"
+
+config AS_HAS_ULEB128
+ def_bool $(as-instr,.reloc label$(comma) R_RISCV_SET_ULEB128$(comma) 127\n.reloc label$(comma) R_RISCV_SUB_ULEB128$(comma) 127\nlabel:\n.word 0)
+
+menuconfig RUNTIME_KERNEL_TESTING_MENU
+ bool "arch/riscv/kernel runtime Testing"
+ def_bool y
+ help
+ Enable riscv kernel runtime testing.
+
+if RUNTIME_KERNEL_TESTING_MENU
+
+config RISCV_MODULE_LINKING_KUNIT
+ bool "KUnit test riscv module linking at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test riscv module linking at boot. This will
+ enable a module called "test_module_linking".
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endif # RUNTIME_TESTING_MENU
+
+endmenu # "arch/riscv/kernel runtime Testing"
diff --git a/arch/riscv/kernel/tests/Makefile b/arch/riscv/kernel/tests/Makefile
new file mode 100644
index 000000000000..7d6c76cffe20
--- /dev/null
+++ b/arch/riscv/kernel/tests/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RISCV_MODULE_LINKING_KUNIT) += module_test/
diff --git a/arch/riscv/kernel/tests/module_test/Makefile b/arch/riscv/kernel/tests/module_test/Makefile
new file mode 100644
index 000000000000..d7a6fd8943de
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/Makefile
@@ -0,0 +1,15 @@
+obj-m += test_module_linking.o
+
+test_sub := test_sub6.o test_sub8.o test_sub16.o test_sub32.o test_sub64.o
+
+test_set := test_set6.o test_set8.o test_set16.o test_set32.o
+
+test_module_linking-objs += $(test_sub)
+
+test_module_linking-objs += $(test_set)
+
+ifeq ($(CONFIG_AS_HAS_ULEB128),y)
+test_module_linking-objs += test_uleb128.o
+endif
+
+test_module_linking-objs += test_module_linking_main.o
diff --git a/arch/riscv/kernel/tests/module_test/test_module_linking_main.c b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c
new file mode 100644
index 000000000000..8df5fa5b834e
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <kunit/test.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module linking");
+
+extern int test_set32(void);
+extern int test_set16(void);
+extern int test_set8(void);
+extern int test_set6(void);
+extern long test_sub64(void);
+extern int test_sub32(void);
+extern int test_sub16(void);
+extern int test_sub8(void);
+extern int test_sub6(void);
+
+#ifdef CONFIG_AS_HAS_ULEB128
+extern int test_uleb_basic(void);
+extern int test_uleb_large(void);
+#endif
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs)
+
+void run_test_set(struct kunit *test);
+void run_test_sub(struct kunit *test);
+void run_test_uleb(struct kunit *test);
+
+void run_test_set(struct kunit *test)
+{
+ int val32 = test_set32();
+ int val16 = test_set16();
+ int val8 = test_set8();
+ int val6 = test_set6();
+
+ CHECK_EQ(val32, 0);
+ CHECK_EQ(val16, 0);
+ CHECK_EQ(val8, 0);
+ CHECK_EQ(val6, 0);
+}
+
+void run_test_sub(struct kunit *test)
+{
+ int val64 = test_sub64();
+ int val32 = test_sub32();
+ int val16 = test_sub16();
+ int val8 = test_sub8();
+ int val6 = test_sub6();
+
+ CHECK_EQ(val64, 0);
+ CHECK_EQ(val32, 0);
+ CHECK_EQ(val16, 0);
+ CHECK_EQ(val8, 0);
+ CHECK_EQ(val6, 0);
+}
+
+#ifdef CONFIG_AS_HAS_ULEB128
+void run_test_uleb(struct kunit *test)
+{
+ int val_uleb = test_uleb_basic();
+ int val_uleb2 = test_uleb_large();
+
+ CHECK_EQ(val_uleb, 0);
+ CHECK_EQ(val_uleb2, 0);
+}
+#endif
+
+static struct kunit_case __refdata riscv_module_linking_test_cases[] = {
+ KUNIT_CASE(run_test_set),
+ KUNIT_CASE(run_test_sub),
+#ifdef CONFIG_AS_HAS_ULEB128
+ KUNIT_CASE(run_test_uleb),
+#endif
+ {}
+};
+
+static struct kunit_suite riscv_module_linking_test_suite = {
+ .name = "riscv_checksum",
+ .test_cases = riscv_module_linking_test_cases,
+};
+
+kunit_test_suites(&riscv_module_linking_test_suite);
diff --git a/arch/riscv/kernel/tests/module_test/test_set16.S b/arch/riscv/kernel/tests/module_test/test_set16.S
new file mode 100644
index 000000000000..2be0e441a12e
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set16.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set16
+test_set16:
+ lw a0, set16
+ la t0, set16
+#ifdef CONFIG_32BIT
+ slli t0, t0, 16
+ srli t0, t0, 16
+#else
+ slli t0, t0, 48
+ srli t0, t0, 48
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set16:
+ .reloc set16, R_RISCV_SET16, set16
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set32.S b/arch/riscv/kernel/tests/module_test/test_set32.S
new file mode 100644
index 000000000000..de0444537e67
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set32.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set32
+test_set32:
+ lw a0, set32
+ la t0, set32
+#ifndef CONFIG_32BIT
+ slli t0, t0, 32
+ srli t0, t0, 32
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set32:
+ .reloc set32, R_RISCV_SET32, set32
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set6.S b/arch/riscv/kernel/tests/module_test/test_set6.S
new file mode 100644
index 000000000000..c39ce4c219eb
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set6.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set6
+test_set6:
+ lw a0, set6
+ la t0, set6
+#ifdef CONFIG_32BIT
+ slli t0, t0, 26
+ srli t0, t0, 26
+#else
+ slli t0, t0, 58
+ srli t0, t0, 58
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set6:
+ .reloc set6, R_RISCV_SET6, set6
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set8.S b/arch/riscv/kernel/tests/module_test/test_set8.S
new file mode 100644
index 000000000000..a656173f6f99
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set8.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set8
+test_set8:
+ lw a0, set8
+ la t0, set8
+#ifdef CONFIG_32BIT
+ slli t0, t0, 24
+ srli t0, t0, 24
+#else
+ slli t0, t0, 56
+ srli t0, t0, 56
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set8:
+ .reloc set8, R_RISCV_SET8, set8
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub16.S b/arch/riscv/kernel/tests/module_test/test_sub16.S
new file mode 100644
index 000000000000..80f731d599ba
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub16.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub16
+test_sub16:
+ lh a0, sub16
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub16:
+ .reloc sub16, R_RISCV_ADD16, second
+ .reloc sub16, R_RISCV_SUB16, first
+ .half 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub32.S b/arch/riscv/kernel/tests/module_test/test_sub32.S
new file mode 100644
index 000000000000..a341686e12df
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub32.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub32
+test_sub32:
+ lw a0, sub32
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub32:
+ .reloc sub32, R_RISCV_ADD32, second
+ .reloc sub32, R_RISCV_SUB32, first
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub6.S b/arch/riscv/kernel/tests/module_test/test_sub6.S
new file mode 100644
index 000000000000..e8b61c1ec527
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub6.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub6
+test_sub6:
+ lb a0, sub6
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub6:
+ .reloc sub6, R_RISCV_SET6, second
+ .reloc sub6, R_RISCV_SUB6, first
+ .byte 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub64.S b/arch/riscv/kernel/tests/module_test/test_sub64.S
new file mode 100644
index 000000000000..a59e8afa88fd
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub64.S
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub64
+test_sub64:
+#ifdef CONFIG_32BIT
+ lw a0, sub64
+#else
+ ld a0, sub64
+#endif
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub64:
+ .reloc sub64, R_RISCV_ADD64, second
+ .reloc sub64, R_RISCV_SUB64, first
+ .word 0
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub8.S b/arch/riscv/kernel/tests/module_test/test_sub8.S
new file mode 100644
index 000000000000..ac5d0ec98de3
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub8.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub8
+test_sub8:
+ lb a0, sub8
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub8:
+ .reloc sub8, R_RISCV_ADD8, second
+ .reloc sub8, R_RISCV_SUB8, first
+ .byte 0
diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S
new file mode 100644
index 000000000000..90f22049d553
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_uleb128.S
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_uleb_basic
+test_uleb_basic:
+ ld a0, second
+ addi a0, a0, -127
+ ret
+
+.global test_uleb_large
+test_uleb_large:
+ ld a0, fourth
+ addi a0, a0, -0x07e8
+ ret
+
+.data
+first:
+ .space 127
+second:
+ .reloc second, R_RISCV_SET_ULEB128, second
+ .reloc second, R_RISCV_SUB_ULEB128, first
+ .dword 0
+third:
+ .space 1000
+fourth:
+ .reloc fourth, R_RISCV_SET_ULEB128, fourth
+ .reloc fourth, R_RISCV_SUB_ULEB128, third
+ .dword 0
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index fae8f610d867..a1b9be3c4332 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -36,7 +36,21 @@ int show_unhandled_signals = 1;
static DEFINE_SPINLOCK(die_lock);
-static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
+static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
+{
+ const void __user *uaddr = (__force const void __user *)insns;
+
+ if (!user_mode(regs))
+ return get_kernel_nofault(*val, insns);
+
+ /* The user space code from other tasks cannot be accessed. */
+ if (regs != task_pt_regs(current))
+ return -EPERM;
+
+ return copy_from_user_nofault(val, uaddr, sizeof(*val));
+}
+
+static void dump_instr(const char *loglvl, struct pt_regs *regs)
{
char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
const u16 *insns = (u16 *)instruction_pointer(regs);
@@ -45,7 +59,7 @@ static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
int i;
for (i = -10; i < 2; i++) {
- bad = get_kernel_nofault(val, &insns[i]);
+ bad = copy_code(regs, &val, &insns[i]);
if (!bad) {
p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
} else {
@@ -74,7 +88,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules();
if (regs) {
show_regs(regs);
- dump_kernel_instr(KERN_EMERG, regs);
+ dump_instr(KERN_EMERG, regs);
}
cause = regs ? regs->cause : -1;
@@ -107,6 +121,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
__show_regs(regs);
+ dump_instr(KERN_EMERG, regs);
}
force_sig_fault(signo, code, (void __user *)addr);
@@ -181,14 +196,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
-#ifndef CONFIG_RISCV_M_MODE
-DO_ERROR_INFO(do_trap_load_misaligned,
- SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
-DO_ERROR_INFO(do_trap_store_misaligned,
- SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
-#else
-int handle_misaligned_load(struct pt_regs *regs);
-int handle_misaligned_store(struct pt_regs *regs);
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
@@ -231,7 +238,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs
irqentry_nmi_exit(regs, state);
}
}
-#endif
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,
@@ -360,34 +366,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
asmlinkage void noinstr do_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
-#ifdef CONFIG_IRQ_STACKS
- if (on_thread_stack()) {
- ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
- + IRQ_STACK_SIZE/sizeof(ulong);
- __asm__ __volatile(
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" ra, (sp) \n"
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" s0, (sp) \n"
- "addi s0, sp, 2*"RISCV_SZPTR "\n"
- "move sp, %[sp] \n"
- "move a0, %[regs] \n"
- "call handle_riscv_irq \n"
- "addi sp, s0, -2*"RISCV_SZPTR"\n"
- REG_L" s0, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- REG_L" ra, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- :
- : [sp] "r" (sp), [regs] "r" (regs)
- : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
- "s0",
-#endif
- "memory");
- } else
-#endif
+
+ if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
+ call_on_irq_stack(regs, handle_riscv_irq);
+ else
handle_riscv_irq(regs);
irqentry_exit(regs, state);
@@ -410,48 +392,14 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
-/*
- * Extra stack space that allows us to provide panic messages when the kernel
- * has overflowed its stack.
- */
-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
-/*
- * A temporary stack for use by handle_kernel_stack_overflow. This is used so
- * we can call into C code to get the per-hart overflow stack. Usage of this
- * stack must be protected by spin_shadow_stack.
- */
-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
-
-/*
- * A pseudo spinlock to protect the shadow stack from being used by multiple
- * harts concurrently. This isn't a real spinlock because the lock side must
- * be taken without a valid stack and only a single register, it's only taken
- * while in the process of panicing anyway so the performance and error
- * checking a proper spinlock gives us doesn't matter.
- */
-unsigned long spin_shadow_stack;
-
-asmlinkage unsigned long get_overflow_stack(void)
-{
- return (unsigned long)this_cpu_ptr(overflow_stack) +
- OVERFLOW_STACK_SIZE;
-}
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
- /*
- * We're done with the shadow stack by this point, as we're on the
- * overflow stack. Tell any other concurrent overflowing harts that
- * they can proceed with panicing by releasing the pseudo-spinlock.
- *
- * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
- */
- smp_store_release(&spin_shadow_stack, 0);
-
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 378f5b151443..5eba37147caa 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -6,12 +6,16 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/perf_event.h>
#include <linux/irq.h>
#include <linux/stringify.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
+#include <asm/entry-common.h>
+#include <asm/hwprobe.h>
+#include <asm/cpufeature.h>
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
@@ -151,53 +155,134 @@
#define PRECISION_S 0
#define PRECISION_D 1
-#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
-static inline type load_##type(const type *addr) \
-{ \
- type val; \
- asm (#insn " %0, %1" \
- : "=&r" (val) : "m" (*addr)); \
- return val; \
+#ifdef CONFIG_FPU
+
+#define FP_GET_RD(insn) (insn >> 7 & 0x1F)
+
+extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
+ unsigned long val)
+{
+ unsigned long fp_reg = FP_GET_RD(insn);
+
+ put_f32_reg(fp_reg, val);
+ regs->status |= SR_FS_DIRTY;
+
+ return 0;
}
-#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
-static inline void store_##type(type *addr, type val) \
-{ \
- asm volatile (#insn " %0, %1\n" \
- : : "r" (val), "m" (*addr)); \
+extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
+{
+ unsigned long fp_reg = FP_GET_RD(insn);
+ unsigned long value;
+
+#if __riscv_xlen == 32
+ value = (unsigned long) &val;
+#else
+ value = val;
+#endif
+ put_f64_reg(fp_reg, value);
+ regs->status |= SR_FS_DIRTY;
+
+ return 0;
}
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
-#if defined(CONFIG_64BIT)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+#if __riscv_xlen == 32
+extern void get_f64_reg(unsigned long fp_reg, u64 *value);
+
+static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ u64 val;
+
+ get_f64_reg(fp_reg, &val);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
+}
#else
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
-static inline u64 load_u64(const u64 *addr)
+extern unsigned long get_f64_reg(unsigned long fp_reg);
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
{
- return load_u32((u32 *)addr)
- + ((u64)load_u32((u32 *)addr + 1) << 32);
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ unsigned long val;
+
+ val = get_f64_reg(fp_reg);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
}
-static inline void store_u64(u64 *addr, u64 val)
+#endif
+
+extern unsigned long get_f32_reg(unsigned long fp_reg);
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
{
- store_u32((u32 *)addr, val);
- store_u32((u32 *)addr + 1, val >> 32);
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ unsigned long val;
+
+ val = get_f32_reg(fp_reg);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
}
+
+#else /* CONFIG_FPU */
+static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
+ unsigned long val) {}
+
+static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
#endif
-static inline ulong get_insn(ulong mepc)
+#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
+#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
+#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
+
+#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
+#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
+#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
+
+#ifdef CONFIG_RISCV_M_MODE
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+ u8 val;
+
+ asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
+ *r_val = val;
+
+ return 0;
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+ asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
+
+ return 0;
+}
+
+static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
{
register ulong __mepc asm ("a2") = mepc;
ulong val, rvc_mask = 3, tmp;
@@ -226,23 +311,119 @@ static inline ulong get_insn(ulong mepc)
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
[xlen_minus_16] "i" (XLEN_MINUS_16));
- return val;
+ *r_insn = val;
+
+ return 0;
+}
+#else
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+ if (user_mode(regs)) {
+ return __get_user(*r_val, addr);
+ } else {
+ *r_val = *addr;
+ return 0;
+ }
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+ if (user_mode(regs)) {
+ return __put_user(val, addr);
+ } else {
+ *addr = val;
+ return 0;
+ }
}
+#define __read_insn(regs, insn, insn_addr) \
+({ \
+ int __ret; \
+ \
+ if (user_mode(regs)) { \
+ __ret = __get_user(insn, insn_addr); \
+ } else { \
+ insn = *insn_addr; \
+ __ret = 0; \
+ } \
+ \
+ __ret; \
+})
+
+static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
+{
+ ulong insn = 0;
+
+ if (epc & 0x2) {
+ ulong tmp = 0;
+ u16 __user *insn_addr = (u16 __user *)epc;
+
+ if (__read_insn(regs, insn, insn_addr))
+ return -EFAULT;
+ /* __get_user() uses regular "lw" which sign extend the loaded
+ * value make sure to clear higher order bits in case we "or" it
+ * below with the upper 16 bits half.
+ */
+ insn &= GENMASK(15, 0);
+ if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn_addr++;
+ if (__read_insn(regs, tmp, insn_addr))
+ return -EFAULT;
+ *r_insn = (tmp << 16) | insn;
+
+ return 0;
+ } else {
+ u32 __user *insn_addr = (u32 __user *)epc;
+
+ if (__read_insn(regs, insn, insn_addr))
+ return -EFAULT;
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn &= GENMASK(15, 0);
+ *r_insn = insn;
+
+ return 0;
+ }
+}
+#endif
+
union reg_data {
u8 data_bytes[8];
ulong data_ulong;
u64 data_u64;
};
+static bool unaligned_ctl __read_mostly;
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+
int handle_misaligned_load(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
+ unsigned long insn;
+ unsigned long addr = regs->badaddr;
int i, fp = 0, shift = 0, len = 0;
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+ *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
+
+ if (!unaligned_enabled)
+ return -1;
+
+ if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ return -1;
+
+ if (get_insn(regs, epc, &insn))
+ return -1;
+
regs->epc = 0;
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
@@ -305,13 +486,21 @@ int handle_misaligned_load(struct pt_regs *regs)
return -1;
}
+ if (!IS_ENABLED(CONFIG_FPU) && fp)
+ return -EOPNOTSUPP;
+
val.data_u64 = 0;
- for (i = 0; i < len; i++)
- val.data_bytes[i] = load_u8((void *)(addr + i));
+ for (i = 0; i < len; i++) {
+ if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
+ return -1;
+ }
- if (fp)
- return -1;
- SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ if (!fp)
+ SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ else if (len == 8)
+ set_f64_rd(insn, regs, val.data_u64);
+ else
+ set_f32_rd(insn, regs, val.data_ulong);
regs->epc = epc + INSN_LEN(insn);
@@ -322,9 +511,20 @@ int handle_misaligned_store(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
- int i, len = 0;
+ unsigned long insn;
+ unsigned long addr = regs->badaddr;
+ int i, len = 0, fp = 0;
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+ if (!unaligned_enabled)
+ return -1;
+
+ if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ return -1;
+
+ if (get_insn(regs, epc, &insn))
+ return -1;
regs->epc = 0;
@@ -336,6 +536,14 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
+ } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2(insn, regs);
+ } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2(insn, regs);
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#if defined(CONFIG_64BIT)
@@ -354,15 +562,88 @@ int handle_misaligned_store(struct pt_regs *regs)
((insn >> SH_RD) & 0x1f)) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2C(insn, regs);
+#if !defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2C(insn, regs);
+#endif
} else {
regs->epc = epc;
return -1;
}
- for (i = 0; i < len; i++)
- store_u8((void *)(addr + i), val.data_bytes[i]);
+ if (!IS_ENABLED(CONFIG_FPU) && fp)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < len; i++) {
+ if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
+ return -1;
+ }
regs->epc = epc + INSN_LEN(insn);
return 0;
}
+
+bool check_unaligned_access_emulated(int cpu)
+{
+ long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+ unsigned long tmp_var, tmp_val;
+ bool misaligned_emu_detected;
+
+ *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+ __asm__ __volatile__ (
+ " "REG_L" %[tmp], 1(%[ptr])\n"
+ : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+
+ misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
+ /*
+ * If unaligned_ctl is already set, this means that we detected that all
+ * CPUS uses emulated misaligned access at boot time. If that changed
+ * when hotplugging the new cpu, this is something we don't handle.
+ */
+ if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
+ pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
+ while (true)
+ cpu_relax();
+ }
+
+ return misaligned_emu_detected;
+}
+
+void unaligned_emulation_finish(void)
+{
+ int cpu;
+
+ /*
+ * We can only support PR_UNALIGN controls if all CPUs have misaligned
+ * accesses emulated since tasks requesting such control can run on any
+ * CPU.
+ */
+ for_each_present_cpu(cpu) {
+ if (per_cpu(misaligned_access_speed, cpu) !=
+ RISCV_HWPROBE_MISALIGNED_EMULATED) {
+ return;
+ }
+ }
+ unaligned_ctl = true;
+}
+
+bool unaligned_ctl_available(void)
+{
+ return unaligned_ctl;
+}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 6b1dba11bf6d..9b517fe1b8a8 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -36,7 +36,7 @@ CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
endif
# Disable -pg to prevent insert call site
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n
@@ -73,13 +73,3 @@ quiet_cmd_vdsold = VDSOLD $@
cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
$(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index 82f97d67c23e..8f884227e8bc 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -8,7 +8,7 @@
.text
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
-ENTRY(__vdso_flush_icache)
+SYM_FUNC_START(__vdso_flush_icache)
.cfi_startproc
#ifdef CONFIG_SMP
li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
#endif
ret
.cfi_endproc
-ENDPROC(__vdso_flush_icache)
+SYM_FUNC_END(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
index bb0c05e2ffba..9c1bd531907f 100644
--- a/arch/riscv/kernel/vdso/getcpu.S
+++ b/arch/riscv/kernel/vdso/getcpu.S
@@ -8,11 +8,11 @@
.text
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
-ENTRY(__vdso_getcpu)
+SYM_FUNC_START(__vdso_getcpu)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_getcpu
ecall
ret
.cfi_endproc
-ENDPROC(__vdso_getcpu)
+SYM_FUNC_END(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index d40bec6ac078..cadf725ef798 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
/* This is something we can handle, fill out the pairs. */
while (p < end) {
- if (p->key <= RISCV_HWPROBE_MAX_KEY) {
+ if (riscv_hwprobe_key_is_valid(p->key)) {
p->value = avd->all_cpu_hwprobe_values[p->key];
} else {
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
index 10438c7c626a..3dc022aa8931 100644
--- a/arch/riscv/kernel/vdso/rt_sigreturn.S
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -7,10 +7,10 @@
#include <asm/unistd.h>
.text
-ENTRY(__vdso_rt_sigreturn)
+SYM_FUNC_START(__vdso_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
li a7, __NR_rt_sigreturn
ecall
.cfi_endproc
-ENDPROC(__vdso_rt_sigreturn)
+SYM_FUNC_END(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/sys_hwprobe.S b/arch/riscv/kernel/vdso/sys_hwprobe.S
index 4e704146c77a..77e57f830521 100644
--- a/arch/riscv/kernel/vdso/sys_hwprobe.S
+++ b/arch/riscv/kernel/vdso/sys_hwprobe.S
@@ -5,11 +5,11 @@
#include <asm/unistd.h>
.text
-ENTRY(riscv_hwprobe)
+SYM_FUNC_START(riscv_hwprobe)
.cfi_startproc
li a7, __NR_riscv_hwprobe
ecall
ret
.cfi_endproc
-ENDPROC(riscv_hwprobe)
+SYM_FUNC_END(riscv_hwprobe)
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index 82ce64900f3d..cbe2a179331d 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -23,35 +23,31 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
- .note : { *(.note.*) } :text :note
.dynamic : { *(.dynamic) } :text :dynamic
+ .rodata : {
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+
+ .note : { *(.note.*) } :text :note
+
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
-
/*
- * This linker script is used both with -r and with -shared.
- * For the layouts to match, we need to skip more than enough
- * space for the dynamic symbol table, etc. If this amount is
- * insufficient, ld -shared will error; simply increase it here.
+ * Text is well-separated from actual data: there's plenty of
+ * stuff that isn't used at runtime in between.
*/
- . = 0x800;
+ . = ALIGN(16);
.text : { *(.text .text.*) } :text
. = ALIGN(4);
.alternative : {
- __alt_start = .;
*(.alternative)
- __alt_end = .;
- }
-
- .data : {
- *(.got.plt) *(.got)
- *(.data .data.* .gnu.linkonce.d.*)
- *(.dynbss)
- *(.bss .bss.* .gnu.linkonce.b.*)
}
}
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 74bb27440527..a944294f6f23 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -14,7 +14,7 @@
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_aia_imsic.h>
struct aia_hgei_control {
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 48ae0d4b3932..225a435d9c9a 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/sbi.h>
long kvm_arch_dev_ioctl(struct file *filp,
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 44bc324aeeb0..23c0e82b5103 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -12,7 +12,7 @@
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
#include <asm/csr.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/insn-def.h>
#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 08ba48a395aa..030904d82b58 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -11,7 +11,7 @@
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index c6ebce6126b5..f8c9fa0c03c5 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c
index b430cbb69521..b339a2682f25 100644
--- a/arch/riscv/kvm/vcpu_vector.c
+++ b/arch/riscv/kvm/vcpu_vector.c
@@ -11,7 +11,7 @@
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S
index d7a256eb53f4..b22de1231144 100644
--- a/arch/riscv/lib/clear_page.S
+++ b/arch/riscv/lib/clear_page.S
@@ -29,41 +29,41 @@ SYM_FUNC_START(clear_page)
lw a1, riscv_cboz_block_size
add a2, a0, a2
.Lzero_loop:
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
CBOZ_ALT(11, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
CBOZ_ALT(10, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
CBOZ_ALT(9, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
CBOZ_ALT(8, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
- CBO_zero(a0)
+ CBO_ZERO(a0)
add a0, a0, a1
bltu a0, a2, .Lzero_loop
ret
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
index 1a40d01a9543..44e009ec5fef 100644
--- a/arch/riscv/lib/memcpy.S
+++ b/arch/riscv/lib/memcpy.S
@@ -7,8 +7,7 @@
#include <asm/asm.h>
/* void *memcpy(void *, const void *, size_t) */
-ENTRY(__memcpy)
-WEAK(memcpy)
+SYM_FUNC_START(__memcpy)
move t6, a0 /* Preserve return value */
/* Defer to byte-oriented copy for small sizes */
@@ -105,6 +104,7 @@ WEAK(memcpy)
bltu a1, a3, 5b
6:
ret
-END(__memcpy)
+SYM_FUNC_END(__memcpy)
+SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index 838ff2022fe3..cb3e2e7ef0ba 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -7,7 +7,6 @@
#include <asm/asm.h>
SYM_FUNC_START(__memmove)
-SYM_FUNC_START_WEAK(memmove)
/*
* Returns
* a0 - dest
@@ -26,8 +25,8 @@ SYM_FUNC_START_WEAK(memmove)
*/
/* Return if nothing to do */
- beq a0, a1, return_from_memmove
- beqz a2, return_from_memmove
+ beq a0, a1, .Lreturn_from_memmove
+ beqz a2, .Lreturn_from_memmove
/*
* Register Uses
@@ -60,7 +59,7 @@ SYM_FUNC_START_WEAK(memmove)
* small enough not to bother.
*/
andi t0, a2, -(2 * SZREG)
- beqz t0, byte_copy
+ beqz t0, .Lbyte_copy
/*
* Now solve for t5 and t6.
@@ -87,14 +86,14 @@ SYM_FUNC_START_WEAK(memmove)
*/
xor t0, a0, a1
andi t1, t0, (SZREG - 1)
- beqz t1, coaligned_copy
+ beqz t1, .Lcoaligned_copy
/* Fall through to misaligned fixup copy */
-misaligned_fixup_copy:
- bltu a1, a0, misaligned_fixup_copy_reverse
+.Lmisaligned_fixup_copy:
+ bltu a1, a0, .Lmisaligned_fixup_copy_reverse
-misaligned_fixup_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lmisaligned_fixup_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward
andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +152,10 @@ misaligned_fixup_copy_forward:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */
add a1, t3, a5 /* Restore the src pointer */
- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
-misaligned_fixup_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lmisaligned_fixup_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse
andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +214,18 @@ misaligned_fixup_copy_reverse:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */
add a4, t4, a5 /* Restore the src pointer */
- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/*
* Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned
* data at their terminations.
*/
-coaligned_copy:
- bltu a1, a0, coaligned_copy_reverse
+.Lcoaligned_copy:
+ bltu a1, a0, .Lcoaligned_copy_reverse
-coaligned_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lcoaligned_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward
1:
REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +234,10 @@ coaligned_copy_forward:
REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b
- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
-coaligned_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lcoaligned_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse
1:
REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +246,7 @@ coaligned_copy_reverse:
REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b
- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/*
* These are basically sub-functions within the function. They
@@ -258,7 +257,7 @@ coaligned_copy_reverse:
* up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps.
*/
-byte_copy_until_aligned_forward:
+.Lbyte_copy_until_aligned_forward:
beq t3, t5, 2f
1:
lb t1, 0(a1)
@@ -269,7 +268,7 @@ byte_copy_until_aligned_forward:
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
-byte_copy_until_aligned_reverse:
+.Lbyte_copy_until_aligned_reverse:
beq t4, t6, 2f
1:
lb t1, -1(a4)
@@ -285,10 +284,10 @@ byte_copy_until_aligned_reverse:
* These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove.
*/
-byte_copy:
- bltu a1, a0, byte_copy_reverse
+.Lbyte_copy:
+ bltu a1, a0, .Lbyte_copy_reverse
-byte_copy_forward:
+.Lbyte_copy_forward:
beq t3, t4, 2f
1:
lb t1, 0(a1)
@@ -299,7 +298,7 @@ byte_copy_forward:
2:
ret
-byte_copy_reverse:
+.Lbyte_copy_reverse:
beq t4, t3, 2f
1:
lb t1, -1(a4)
@@ -309,10 +308,10 @@ byte_copy_reverse:
bne t4, t3, 1b
2:
-return_from_memmove:
+.Lreturn_from_memmove:
ret
-SYM_FUNC_END(memmove)
SYM_FUNC_END(__memmove)
+SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 34c5360c6705..35f358e70bdb 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -8,8 +8,7 @@
#include <asm/asm.h>
/* void *memset(void *, int, size_t) */
-ENTRY(__memset)
-WEAK(memset)
+SYM_FUNC_START(__memset)
move t0, a0 /* Preserve return value */
/* Defer to byte-oriented fill for small sizes */
@@ -110,4 +109,5 @@ WEAK(memset)
bltu t0, a3, 5b
6:
ret
-END(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_ALIAS_WEAK(memset, __memset)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 09b47ebacf2e..3ab438f30d13 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -10,8 +10,7 @@
_asm_extable 100b, \lbl
.endm
-ENTRY(__asm_copy_to_user)
-ENTRY(__asm_copy_from_user)
+SYM_FUNC_START(__asm_copy_to_user)
/* Enable access to user memory */
li t6, SR_SUM
@@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
csrc CSR_STATUS, t6
sub a0, t5, a0
ret
-ENDPROC(__asm_copy_to_user)
-ENDPROC(__asm_copy_from_user)
+SYM_FUNC_END(__asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_to_user)
+SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_from_user)
-ENTRY(__clear_user)
+SYM_FUNC_START(__clear_user)
/* Enable access to user memory */
li t6, SR_SUM
@@ -233,5 +232,5 @@ ENTRY(__clear_user)
csrc CSR_STATUS, t6
sub a0, a3, a0
ret
-ENDPROC(__clear_user)
+SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 9c454f90fd3d..3a4dfc8babcf 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -36,3 +36,4 @@ endif
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
+obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o
diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c
new file mode 100644
index 000000000000..a993ad11d0ec
--- /dev/null
+++ b/arch/riscv/mm/cache-ops.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#include <asm/dma-noncoherent.h>
+
+struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
+
+void
+riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+{
+ if (!ops)
+ return;
+ noncoherent_cache_ops = *ops;
+}
+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index f1387272a551..55a34f2020a8 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,7 +3,9 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/acpi.h>
#include <linux/of.h>
+#include <asm/acpi.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void)
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
struct device_node *node;
+ struct acpi_table_header *rhct;
+ acpi_status status;
+
+ if (acpi_disabled) {
+ for_each_of_cpu_node(node) {
+ /* set block-size for cbom and/or cboz extension if available */
+ cbo_get_block_size(node, "riscv,cbom-block-size",
+ &cbom_block_size, &cbom_hartid);
+ cbo_get_block_size(node, "riscv,cboz-block-size",
+ &cboz_block_size, &cboz_hartid);
+ }
+ } else {
+ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+ if (ACPI_FAILURE(status))
+ return;
- for_each_of_cpu_node(node) {
- /* set block-size for cbom and/or cboz extension if available */
- cbo_get_block_size(node, "riscv,cbom-block-size",
- &cbom_block_size, &cbom_hartid);
- cbo_get_block_size(node, "riscv,cboz-block-size",
- &cboz_block_size, &cboz_hartid);
+ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
+ acpi_put_table((struct acpi_table_header *)rhct);
}
if (cbom_block_size)
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index b76e7e192eb1..4e4e469b8dd6 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
EXPORT_SYMBOL_GPL(dma_cache_alignment);
-struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
- .wback = NULL,
- .inv = NULL,
- .wback_inv = NULL,
-};
-
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
@@ -31,7 +25,7 @@ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
return;
}
#endif
- ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(CLEAN, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
@@ -45,7 +39,7 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
}
#endif
- ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(INVAL, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
@@ -59,7 +53,7 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
}
#endif
- ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(FLUSH, vaddr, size, riscv_cbom_block_size);
}
static inline bool arch_sync_dma_clean_before_fromdevice(void)
@@ -131,7 +125,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
}
#endif
- ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
@@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
if (!noncoherent_supported)
dma_cache_alignment = 1;
}
-
-void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
-{
- if (!ops)
- return;
-
- noncoherent_cache_ops = *ops;
-}
-EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index d9a4e8702864..2e011cbddf3a 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -49,10 +49,12 @@ u64 satp_mode __ro_after_init = SATP_MODE_32;
#endif
EXPORT_SYMBOL(satp_mode);
+#ifdef CONFIG_64BIT
bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);
+#endif
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
@@ -664,16 +666,16 @@ void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
{
- if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
- return PGDIR_SIZE;
-
- if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+ if (pgtable_l5_enabled &&
+ !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE;
- if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+ if (pgtable_l4_enabled &&
+ !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
return PUD_SIZE;
- if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+ if (IS_ENABLED(CONFIG_64BIT) &&
+ !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
return PMD_SIZE;
return PAGE_SIZE;
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 161d0b34c2cb..fc5fc4f785c4 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -5,6 +5,7 @@
#include <linux/pagewalk.h>
#include <linux/pgtable.h>
+#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/set_memory.h>
@@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
return new_val;
}
-static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
-{
- pgd_t val = READ_ONCE(*pgd);
-
- if (pgd_leaf(val)) {
- val = __pgd(set_pageattr_masks(pgd_val(val), walk));
- set_pgd(pgd, val);
- }
-
- return 0;
-}
-
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
@@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next,
}
static const struct mm_walk_ops pageattr_ops = {
- .pgd_entry = pageattr_pgd_entry,
.p4d_entry = pageattr_p4d_entry,
.pud_entry = pageattr_pud_entry,
.pmd_entry = pageattr_pmd_entry,
@@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = {
.walk_lock = PGWALK_RDLOCK,
};
+#ifdef CONFIG_64BIT
+static int __split_linear_mapping_pmd(pud_t *pudp,
+ unsigned long vaddr, unsigned long end)
+{
+ pmd_t *pmdp;
+ unsigned long next;
+
+ pmdp = pmd_offset(pudp, vaddr);
+
+ do {
+ next = pmd_addr_end(vaddr, end);
+
+ if (next - vaddr >= PMD_SIZE &&
+ vaddr <= (vaddr & PMD_MASK) && end >= next)
+ continue;
+
+ if (pmd_leaf(*pmdp)) {
+ struct page *pte_page;
+ unsigned long pfn = _pmd_pfn(*pmdp);
+ pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+ pte_t *ptep_new;
+ int i;
+
+ pte_page = alloc_page(GFP_KERNEL);
+ if (!pte_page)
+ return -ENOMEM;
+
+ ptep_new = (pte_t *)page_address(pte_page);
+ for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
+ set_pte(ptep_new, pfn_pte(pfn + i, prot));
+
+ smp_wmb();
+
+ set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
+ }
+ } while (pmdp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_pud(p4d_t *p4dp,
+ unsigned long vaddr, unsigned long end)
+{
+ pud_t *pudp;
+ unsigned long next;
+ int ret;
+
+ pudp = pud_offset(p4dp, vaddr);
+
+ do {
+ next = pud_addr_end(vaddr, end);
+
+ if (next - vaddr >= PUD_SIZE &&
+ vaddr <= (vaddr & PUD_MASK) && end >= next)
+ continue;
+
+ if (pud_leaf(*pudp)) {
+ struct page *pmd_page;
+ unsigned long pfn = _pud_pfn(*pudp);
+ pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+ pmd_t *pmdp_new;
+ int i;
+
+ pmd_page = alloc_page(GFP_KERNEL);
+ if (!pmd_page)
+ return -ENOMEM;
+
+ pmdp_new = (pmd_t *)page_address(pmd_page);
+ for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
+ set_pmd(pmdp_new,
+ pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
+
+ smp_wmb();
+
+ set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
+ }
+
+ ret = __split_linear_mapping_pmd(pudp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (pudp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_p4d(pgd_t *pgdp,
+ unsigned long vaddr, unsigned long end)
+{
+ p4d_t *p4dp;
+ unsigned long next;
+ int ret;
+
+ p4dp = p4d_offset(pgdp, vaddr);
+
+ do {
+ next = p4d_addr_end(vaddr, end);
+
+ /*
+ * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
+ * need to split, we'll change the protections on the whole P4D.
+ */
+ if (next - vaddr >= P4D_SIZE &&
+ vaddr <= (vaddr & P4D_MASK) && end >= next)
+ continue;
+
+ if (p4d_leaf(*p4dp)) {
+ struct page *pud_page;
+ unsigned long pfn = _p4d_pfn(*p4dp);
+ pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+ pud_t *pudp_new;
+ int i;
+
+ pud_page = alloc_page(GFP_KERNEL);
+ if (!pud_page)
+ return -ENOMEM;
+
+ /*
+ * Fill the pud level with leaf puds that have the same
+ * protections as the leaf p4d.
+ */
+ pudp_new = (pud_t *)page_address(pud_page);
+ for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
+ set_pud(pudp_new,
+ pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
+
+ /*
+ * Make sure the pud filling is not reordered with the
+ * p4d store which could result in seeing a partially
+ * filled pud level.
+ */
+ smp_wmb();
+
+ set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
+ }
+
+ ret = __split_linear_mapping_pud(p4dp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (p4dp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_pgd(pgd_t *pgdp,
+ unsigned long vaddr,
+ unsigned long end)
+{
+ unsigned long next;
+ int ret;
+
+ do {
+ next = pgd_addr_end(vaddr, end);
+ /* We never use PGD mappings for the linear mapping */
+ ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (pgdp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int split_linear_mapping(unsigned long start, unsigned long end)
+{
+ return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
+}
+#endif /* CONFIG_64BIT */
+
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
pgprot_t clear_mask)
{
int ret;
unsigned long start = addr;
unsigned long end = start + PAGE_SIZE * numpages;
+ unsigned long __maybe_unused lm_start;
+ unsigned long __maybe_unused lm_end;
struct pageattr_masks masks = {
.set_mask = set_mask,
.clear_mask = clear_mask
@@ -120,11 +276,67 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
return 0;
mmap_write_lock(&init_mm);
+
+#ifdef CONFIG_64BIT
+ /*
+ * We are about to change the permissions of a kernel mapping, we must
+ * apply the same changes to its linear mapping alias, which may imply
+ * splitting a huge mapping.
+ */
+
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ struct vm_struct *area = NULL;
+ int i, page_start;
+
+ area = find_vm_area((void *)start);
+ page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
+
+ for (i = page_start; i < page_start + numpages; ++i) {
+ lm_start = (unsigned long)page_address(area->pages[i]);
+ lm_end = lm_start + PAGE_SIZE;
+
+ ret = split_linear_mapping(lm_start, lm_end);
+ if (ret)
+ goto unlock;
+
+ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ &pageattr_ops, NULL, &masks);
+ if (ret)
+ goto unlock;
+ }
+ } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
+ lm_start = (unsigned long)lm_alias(start);
+ lm_end = (unsigned long)lm_alias(end);
+
+ ret = split_linear_mapping(lm_start, lm_end);
+ if (ret)
+ goto unlock;
+
+ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ &pageattr_ops, NULL, &masks);
+ if (ret)
+ goto unlock;
+ }
+
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
+
+unlock:
+ mmap_write_unlock(&init_mm);
+
+ /*
+ * We can't use flush_tlb_kernel_range() here as we may have split a
+ * hugepage that is larger than that, so let's flush everything.
+ */
+ flush_tlb_all();
+#else
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ &masks);
+
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
+#endif
return ret;
}
@@ -159,36 +371,14 @@ int set_memory_nx(unsigned long addr, int numpages)
int set_direct_map_invalid_noflush(struct page *page)
{
- int ret;
- unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
- struct pageattr_masks masks = {
- .set_mask = __pgprot(0),
- .clear_mask = __pgprot(_PAGE_PRESENT)
- };
-
- mmap_read_lock(&init_mm);
- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return __set_memory((unsigned long)page_address(page), 1,
+ __pgprot(0), __pgprot(_PAGE_PRESENT));
}
int set_direct_map_default_noflush(struct page *page)
{
- int ret;
- unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
- struct pageattr_masks masks = {
- .set_mask = PAGE_KERNEL,
- .clear_mask = __pgprot(0)
- };
-
- mmap_read_lock(&init_mm);
- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return __set_memory((unsigned long)page_address(page), 1,
+ PAGE_KERNEL, __pgprot(0));
}
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
index c5fc5ec96f6d..370a422ede11 100644
--- a/arch/riscv/mm/pmem.c
+++ b/arch/riscv/mm/pmem.c
@@ -17,7 +17,7 @@ void arch_wb_cache_pmem(void *addr, size_t size)
return;
}
#endif
- ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(CLEAN, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
@@ -29,6 +29,6 @@ void arch_invalidate_pmem(void *addr, size_t size)
return;
}
#endif
- ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(INVAL, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 20a9f991a6d7..657c27bc07a7 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -129,55 +129,55 @@ static struct ptd_mm_info efi_ptd_info = {
/* Page Table Entry */
struct prot_bits {
u64 mask;
- u64 val;
const char *set;
const char *clear;
};
static const struct prot_bits pte_bits[] = {
{
+#ifdef CONFIG_64BIT
+ .mask = _PAGE_NAPOT,
+ .set = "N",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_MTMASK_SVPBMT,
+ .set = "MT(%s)",
+ .clear = " .. ",
+ }, {
+#endif
.mask = _PAGE_SOFT,
- .val = _PAGE_SOFT,
- .set = "RSW",
- .clear = " ",
+ .set = "RSW(%d)",
+ .clear = " .. ",
}, {
.mask = _PAGE_DIRTY,
- .val = _PAGE_DIRTY,
.set = "D",
.clear = ".",
}, {
.mask = _PAGE_ACCESSED,
- .val = _PAGE_ACCESSED,
.set = "A",
.clear = ".",
}, {
.mask = _PAGE_GLOBAL,
- .val = _PAGE_GLOBAL,
.set = "G",
.clear = ".",
}, {
.mask = _PAGE_USER,
- .val = _PAGE_USER,
.set = "U",
.clear = ".",
}, {
.mask = _PAGE_EXEC,
- .val = _PAGE_EXEC,
.set = "X",
.clear = ".",
}, {
.mask = _PAGE_WRITE,
- .val = _PAGE_WRITE,
.set = "W",
.clear = ".",
}, {
.mask = _PAGE_READ,
- .val = _PAGE_READ,
.set = "R",
.clear = ".",
}, {
.mask = _PAGE_PRESENT,
- .val = _PAGE_PRESENT,
.set = "V",
.clear = ".",
}
@@ -208,15 +208,30 @@ static void dump_prot(struct pg_state *st)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
- const char *s;
+ char s[7];
+ unsigned long val;
- if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
- s = pte_bits[i].set;
- else
- s = pte_bits[i].clear;
+ val = st->current_prot & pte_bits[i].mask;
+ if (val) {
+ if (pte_bits[i].mask == _PAGE_SOFT)
+ sprintf(s, pte_bits[i].set, val >> 8);
+#ifdef CONFIG_64BIT
+ else if (pte_bits[i].mask == _PAGE_MTMASK_SVPBMT) {
+ if (val == _PAGE_NOCACHE_SVPBMT)
+ sprintf(s, pte_bits[i].set, "NC");
+ else if (val == _PAGE_IO_SVPBMT)
+ sprintf(s, pte_bits[i].set, "IO");
+ else
+ sprintf(s, pte_bits[i].set, "??");
+ }
+#endif
+ else
+ sprintf(s, "%s", pte_bits[i].set);
+ } else {
+ sprintf(s, "%s", pte_bits[i].clear);
+ }
- if (s)
- pt_dump_seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
}
@@ -384,6 +399,9 @@ static int __init ptdump_init(void)
kernel_ptd_info.base_addr = KERN_VIRT_START;
+ pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
+ pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
+
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 77be59aadc73..e6659d7368b3 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -3,33 +3,56 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
+ if (asid != FLUSH_TLB_NO_ASID)
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+ else
+ local_flush_tlb_all();
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
+ if (asid != FLUSH_TLB_NO_ASID)
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+ else
+ local_flush_tlb_page(addr);
}
-static inline void local_flush_tlb_range(unsigned long start,
- unsigned long size, unsigned long stride)
+/*
+ * Flush entire TLB if number of entries to be flushed is greater
+ * than the threshold below.
+ */
+static unsigned long tlb_flush_all_threshold __read_mostly = 64;
+
+static void local_flush_tlb_range_threshold_asid(unsigned long start,
+ unsigned long size,
+ unsigned long stride,
+ unsigned long asid)
{
- if (size <= stride)
- local_flush_tlb_page(start);
- else
- local_flush_tlb_all();
+ unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
+ int i;
+
+ if (nr_ptes_in_range > tlb_flush_all_threshold) {
+ local_flush_tlb_all_asid(asid);
+ return;
+ }
+
+ for (i = 0; i < nr_ptes_in_range; ++i) {
+ local_flush_tlb_page_asid(start, asid);
+ start += stride;
+ }
}
static inline void local_flush_tlb_range_asid(unsigned long start,
@@ -37,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
{
if (size <= stride)
local_flush_tlb_page_asid(start, asid);
- else
+ else if (size == FLUSH_TLB_MAX_SIZE)
local_flush_tlb_all_asid(asid);
+ else
+ local_flush_tlb_range_threshold_asid(start, size, stride, asid);
}
static void __ipi_flush_tlb_all(void *info)
@@ -51,7 +76,7 @@ void flush_tlb_all(void)
if (riscv_use_ipi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else
- sbi_remote_sfence_vma(NULL, 0, -1);
+ sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
}
struct flush_tlb_range_data {
@@ -68,68 +93,62 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __ipi_flush_tlb_range(void *info)
-{
- struct flush_tlb_range_data *d = info;
-
- local_flush_tlb_range(d->start, d->size, d->stride);
-}
-
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct flush_tlb_range_data ftd;
- struct cpumask *cmask = mm_cpumask(mm);
- unsigned int cpuid;
+ const struct cpumask *cmask;
+ unsigned long asid = FLUSH_TLB_NO_ASID;
bool broadcast;
- if (cpumask_empty(cmask))
- return;
+ if (mm) {
+ unsigned int cpuid;
- cpuid = get_cpu();
- /* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
-
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = asid;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range_asid,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma_asid(cmask,
- start, size, asid);
- } else {
- local_flush_tlb_range_asid(start, size, stride, asid);
- }
+ cmask = mm_cpumask(mm);
+ if (cpumask_empty(cmask))
+ return;
+
+ cpuid = get_cpu();
+ /* check if the tlbflush needs to be sent to other CPUs */
+ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+
+ if (static_branch_unlikely(&use_asid_allocator))
+ asid = atomic_long_read(&mm->context.id) & asid_mask;
} else {
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = 0;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma(cmask, start, size);
- } else {
- local_flush_tlb_range(start, size, stride);
- }
+ cmask = cpu_online_mask;
+ broadcast = true;
}
- put_cpu();
+ if (broadcast) {
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range_asid,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma_asid(cmask,
+ start, size, asid);
+ } else {
+ local_flush_tlb_range_asid(start, size, stride, asid);
+ }
+
+ if (mm)
+ put_cpu();
}
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
+ __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+}
+
+void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned int page_size)
+{
+ __flush_tlb_range(mm, start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
@@ -140,8 +159,40 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+ unsigned long stride_size;
+
+ if (!is_vm_hugetlb_page(vma)) {
+ stride_size = PAGE_SIZE;
+ } else {
+ stride_size = huge_page_size(hstate_vma(vma));
+
+ /*
+ * As stated in the privileged specification, every PTE in a
+ * NAPOT region must be invalidated, so reset the stride in that
+ * case.
+ */
+ if (has_svnapot()) {
+ if (stride_size >= PGDIR_SIZE)
+ stride_size = PGDIR_SIZE;
+ else if (stride_size >= P4D_SIZE)
+ stride_size = P4D_SIZE;
+ else if (stride_size >= PUD_SIZE)
+ stride_size = PUD_SIZE;
+ else if (stride_size >= PMD_SIZE)
+ stride_size = PMD_SIZE;
+ else
+ stride_size = PAGE_SIZE;
+ }
+ }
+
+ __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index 9e6476719abb..280b0eb352b8 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -81,6 +81,14 @@ ifdef CONFIG_CFI_CLANG
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_CFI)
endif
+ifdef CONFIG_RELOCATABLE
+PURGATORY_CFLAGS_REMOVE += -fPIE
+endif
+
+ifdef CONFIG_SHADOW_CALL_STACK
+PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_SCS)
+endif
+
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
index 0194f4554130..5bcf3af903da 100644
--- a/arch/riscv/purgatory/entry.S
+++ b/arch/riscv/purgatory/entry.S
@@ -7,15 +7,11 @@
* Author: Li Zhengyu (lizhengyu3@huawei.com)
*
*/
-
-.macro size, sym:req
- .size \sym, . - \sym
-.endm
+#include <linux/linkage.h>
.text
-.globl purgatory_start
-purgatory_start:
+SYM_CODE_START(purgatory_start)
lla sp, .Lstack
mv s0, a0 /* The hartid of the current hart */
@@ -28,8 +24,7 @@ purgatory_start:
mv a1, s1
ld a2, riscv_kernel_entry
jr a2
-
-size purgatory_start
+SYM_CODE_END(purgatory_start)
.align 4
.rept 256
@@ -39,9 +34,6 @@ size purgatory_start
.data
-.globl riscv_kernel_entry
-riscv_kernel_entry:
- .quad 0
-size riscv_kernel_entry
+SYM_DATA(riscv_kernel_entry, .quad 0)
.end
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ae29e4392664..3bec98d20283 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -131,6 +131,7 @@ config S390
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
+ select DCACHE_WORD_ACCESS if !KMSAN
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
@@ -235,6 +236,7 @@ config S390
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select TTY
+ select USER_STACKTRACE_SUPPORT
select VIRT_CPU_ACCOUNTING
select ZONE_DMA
# Note: keep the above list sorted alphabetically
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a53a36ee0731..73873e451686 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -138,9 +138,6 @@ bzImage: vmlinux
zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
-
archheaders:
$(Q)$(MAKE) $(build)=$(syscalls) uapi
@@ -160,6 +157,9 @@ vdso_prepare: prepare0
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
$(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h)
+vdso-install-y += arch/s390/kernel/vdso64/vdso64.so.dbg
+vdso-install-$(CONFIG_COMPAT) += arch/s390/kernel/vdso32/vdso32.so.dbg
+
ifdef CONFIG_EXPOLINE_EXTERN
modules_prepare: expoline_prepare
expoline_prepare: scripts
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 7b7521762633..2ab4872fbee1 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/pgtable.h>
+#include <asm/page-states.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
@@ -24,6 +25,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
int __bootdata_preserved(__kaslr_enabled);
+int __bootdata_preserved(cmma_flag) = 1;
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
@@ -295,6 +297,12 @@ void parse_boot_command_line(void)
if (!strcmp(param, "nokaslr"))
__kaslr_enabled = 0;
+ if (!strcmp(param, "cmma")) {
+ rc = kstrtobool(val, &enabled);
+ if (!rc && !enabled)
+ cmma_flag = 0;
+ }
+
#if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) {
rc = kstrtobool(val, &enabled);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index d3e48bd9c394..8104e0e3d188 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/elf.h>
+#include <asm/page-states.h>
#include <asm/boot_data.h>
#include <asm/sections.h>
#include <asm/maccess.h>
@@ -49,7 +50,7 @@ static void detect_facilities(void)
{
if (test_facility(8)) {
machine.has_edat1 = 1;
- __ctl_set_bit(0, 23);
+ local_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
machine.has_edat2 = 1;
@@ -57,6 +58,48 @@ static void detect_facilities(void)
machine.has_nx = 1;
}
+static int cmma_test_essa(void)
+{
+ unsigned long reg1, reg2, tmp = 0;
+ int rc = 1;
+ psw_t old;
+
+ /* Test ESSA_GET_STATE */
+ asm volatile(
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
+ " la %[rc],0\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ [tmp] "=&d" (tmp),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+ [cmd] "i" (ESSA_GET_STATE)
+ : "cc", "memory");
+ return rc;
+}
+
+static void cmma_init(void)
+{
+ if (!cmma_flag)
+ return;
+ if (cmma_test_essa()) {
+ cmma_flag = 0;
+ return;
+ }
+ if (test_facility(147))
+ cmma_flag = 2;
+}
+
static void setup_lpp(void)
{
S390_lowcore.current_pid = 0;
@@ -306,6 +349,7 @@ void startup_kernel(void)
setup_boot_command_line();
parse_boot_command_line();
detect_facilities();
+ cmma_init();
sanitize_prot_virt_host();
max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 442a74f113cb..e3a4500a5a75 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -2,16 +2,18 @@
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
+#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/facility.h>
#include <asm/sections.h>
+#include <asm/ctlreg.h>
#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
-unsigned long __bootdata_preserved(s390_invalid_asce);
+struct ctlreg __bootdata_preserved(s390_invalid_asce);
#ifdef CONFIG_PROC_FS
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
@@ -69,6 +71,10 @@ static void kasan_populate_shadow(void)
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+ __arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER);
+ __arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER);
+ __arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
+ __arch_set_page_dat(kasan_early_shadow_pte, 1);
/*
* Current memory layout:
@@ -166,8 +172,6 @@ static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
- pte_t entry;
-
if (mode == POPULATE_KASAN_ZERO_SHADOW) {
set_pte(pte, pte_z);
return true;
@@ -224,6 +228,7 @@ static void *boot_crst_alloc(unsigned long val)
table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
crst_table_init(table, val);
+ __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
}
@@ -239,6 +244,7 @@ static pte_t *boot_pte_alloc(void)
if (!pte_leftover) {
pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
+ __arch_set_page_dat(pte, 1);
} else {
pte = pte_leftover;
pte_leftover = NULL;
@@ -419,6 +425,14 @@ void setup_vmem(unsigned long asce_limit)
unsigned long asce_bits;
int i;
+ /*
+ * Mark whole memory as no-dat. This must be done before any
+ * page tables are allocated, or kernel image builtin pages
+ * are marked as dat tables.
+ */
+ for_each_physmem_online_range(i, &start, &end)
+ __arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
+
if (asce_limit == _REGION1_SIZE) {
asce_type = _REGION2_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
@@ -426,10 +440,12 @@ void setup_vmem(unsigned long asce_limit)
asce_type = _REGION3_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
}
- s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
+ __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
+ __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
/*
* To allow prefixing the lowcore must be mapped with 4KB pages.
@@ -447,12 +463,12 @@ void setup_vmem(unsigned long asce_limit)
kasan_populate_shadow();
- S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
+ S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- __ctl_load(S390_lowcore.kernel_asce, 13, 13);
+ local_ctl_load(1, &S390_lowcore.kernel_asce);
+ local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(13, &S390_lowcore.kernel_asce);
- init_mm.context.asce = S390_lowcore.kernel_asce;
+ init_mm.context.asce = S390_lowcore.kernel_asce.val;
}
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index e6532477f126..4a6b0a8b6412 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -13,6 +13,7 @@
#define EX_TYPE_UA_LOAD_MEM 4
#define EX_TYPE_UA_LOAD_REG 5
#define EX_TYPE_UA_LOAD_REGPAIR 6
+#define EX_TYPE_ZEROPAD 7
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@@ -23,16 +24,7 @@
#define EX_DATA_LEN_SHIFT 8
#define EX_DATA_LEN GENMASK(11, 8)
-#define __EX_TABLE(_section, _fault, _target, _type) \
- stringify_in_c(.section _section,"a";) \
- stringify_in_c(.balign 4;) \
- stringify_in_c(.long (_fault) - .;) \
- stringify_in_c(.long (_target) - .;) \
- stringify_in_c(.short (_type);) \
- stringify_in_c(.short 0;) \
- stringify_in_c(.previous)
-
-#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
+#define __EX_TABLE(_section, _fault, _target, _type, _regerr, _regaddr, _len) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
@@ -72,21 +64,24 @@
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_AMODE31(_fault, _target) \
- __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
+ __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
#define EX_TABLE_UA_LOAD_REGPAIR(_fault, _target, _regerr, _regzero) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0)
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0)
+
+#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
deleted file mode 100644
index adf7d8cdac7e..000000000000
--- a/arch/s390/include/asm/ctl_reg.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 1999, 2009
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef __ASM_CTL_REG_H
-#define __ASM_CTL_REG_H
-
-#include <linux/bits.h>
-
-#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
-#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
-#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38)
-#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39)
-#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
-#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
-#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
-#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53)
-#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54)
-#define CR0_UNUSED_56 BIT(63 - 56)
-#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
-#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
-
-#define CR14_UNUSED_32 BIT(63 - 32)
-#define CR14_UNUSED_33 BIT(63 - 33)
-#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
-#define CR14_RECOVERY_SUBMASK BIT(63 - 36)
-#define CR14_DEGRADATION_SUBMASK BIT(63 - 37)
-#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38)
-#define CR14_WARNING_SUBMASK BIT(63 - 39)
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bug.h>
-
-#define __ctl_load(array, low, high) do { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- " lctlg %1,%2,%0\n" \
- : \
- : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
- : "memory"); \
-} while (0)
-
-#define __ctl_store(array, low, high) do { \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- \
- BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
- asm volatile( \
- " stctg %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
-} while (0)
-
-static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg |= 1UL << bit;
- __ctl_load(reg, cr, cr);
-}
-
-static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
-{
- unsigned long reg;
-
- __ctl_store(reg, cr, cr);
- reg &= ~(1UL << bit);
- __ctl_load(reg, cr, cr);
-}
-
-void smp_ctl_set_clear_bit(int cr, int bit, bool set);
-
-static inline void ctl_set_bit(int cr, int bit)
-{
- smp_ctl_set_clear_bit(cr, bit, true);
-}
-
-static inline void ctl_clear_bit(int cr, int bit)
-{
- smp_ctl_set_clear_bit(cr, bit, false);
-}
-
-union ctlreg0 {
- unsigned long val;
- struct {
- unsigned long : 8;
- unsigned long tcx : 1; /* Transactional-Execution control */
- unsigned long pifo : 1; /* Transactional-Execution Program-
- Interruption-Filtering Override */
- unsigned long : 3;
- unsigned long ccc : 1; /* Cryptography counter control */
- unsigned long pec : 1; /* PAI extension control */
- unsigned long : 17;
- unsigned long : 3;
- unsigned long lap : 1; /* Low-address-protection control */
- unsigned long : 4;
- unsigned long edat : 1; /* Enhanced-DAT-enablement control */
- unsigned long : 2;
- unsigned long iep : 1; /* Instruction-Execution-Protection */
- unsigned long : 1;
- unsigned long afp : 1; /* AFP-register control */
- unsigned long vx : 1; /* Vector enablement control */
- unsigned long : 7;
- unsigned long sssm : 1; /* Service signal subclass mask */
- unsigned long : 9;
- };
-};
-
-union ctlreg2 {
- unsigned long val;
- struct {
- unsigned long : 33;
- unsigned long ducto : 25;
- unsigned long : 1;
- unsigned long gse : 1;
- unsigned long : 1;
- unsigned long tds : 1;
- unsigned long tdc : 2;
- };
-};
-
-union ctlreg5 {
- unsigned long val;
- struct {
- unsigned long : 33;
- unsigned long pasteo: 25;
- unsigned long : 6;
- };
-};
-
-union ctlreg15 {
- unsigned long val;
- struct {
- unsigned long lsea : 61;
- unsigned long : 3;
- };
-};
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
new file mode 100644
index 000000000000..6d4b85f2b541
--- /dev/null
+++ b/arch/s390/include/asm/ctlreg.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_S390_CTLREG_H
+#define __ASM_S390_CTLREG_H
+
+#include <linux/bits.h>
+
+#define CR0_TRANSACTIONAL_EXECUTION_BIT (63 - 8)
+#define CR0_CLOCK_COMPARATOR_SIGN_BIT (63 - 10)
+#define CR0_CRYPTOGRAPHY_COUNTER_BIT (63 - 13)
+#define CR0_PAI_EXTENSION_BIT (63 - 14)
+#define CR0_CPUMF_EXTRACTION_AUTH_BIT (63 - 15)
+#define CR0_WARNING_TRACK_BIT (63 - 30)
+#define CR0_LOW_ADDRESS_PROTECTION_BIT (63 - 35)
+#define CR0_FETCH_PROTECTION_OVERRIDE_BIT (63 - 38)
+#define CR0_STORAGE_PROTECTION_OVERRIDE_BIT (63 - 39)
+#define CR0_EDAT_BIT (63 - 40)
+#define CR0_INSTRUCTION_EXEC_PROTECTION_BIT (63 - 43)
+#define CR0_VECTOR_BIT (63 - 46)
+#define CR0_MALFUNCTION_ALERT_SUBMASK_BIT (63 - 48)
+#define CR0_EMERGENCY_SIGNAL_SUBMASK_BIT (63 - 49)
+#define CR0_EXTERNAL_CALL_SUBMASK_BIT (63 - 50)
+#define CR0_CLOCK_COMPARATOR_SUBMASK_BIT (63 - 52)
+#define CR0_CPU_TIMER_SUBMASK_BIT (63 - 53)
+#define CR0_SERVICE_SIGNAL_SUBMASK_BIT (63 - 54)
+#define CR0_UNUSED_56_BIT (63 - 56)
+#define CR0_INTERRUPT_KEY_SUBMASK_BIT (63 - 57)
+#define CR0_MEASUREMENT_ALERT_SUBMASK_BIT (63 - 58)
+#define CR0_ETR_SUBMASK_BIT (63 - 59)
+#define CR0_IUCV_BIT (63 - 62)
+
+#define CR0_TRANSACTIONAL_EXECUTION BIT(CR0_TRANSACTIONAL_EXECUTION_BIT)
+#define CR0_CLOCK_COMPARATOR_SIGN BIT(CR0_CLOCK_COMPARATOR_SIGN_BIT)
+#define CR0_CRYPTOGRAPHY_COUNTER BIT(CR0_CRYPTOGRAPHY_COUNTER_BIT)
+#define CR0_PAI_EXTENSION BIT(CR0_PAI_EXTENSION_BIT)
+#define CR0_CPUMF_EXTRACTION_AUTH BIT(CR0_CPUMF_EXTRACTION_AUTH_BIT)
+#define CR0_WARNING_TRACK BIT(CR0_WARNING_TRACK_BIT)
+#define CR0_LOW_ADDRESS_PROTECTION BIT(CR0_LOW_ADDRESS_PROTECTION_BIT)
+#define CR0_FETCH_PROTECTION_OVERRIDE BIT(CR0_FETCH_PROTECTION_OVERRIDE_BIT)
+#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(CR0_STORAGE_PROTECTION_OVERRIDE_BIT)
+#define CR0_EDAT BIT(CR0_EDAT_BIT)
+#define CR0_INSTRUCTION_EXEC_PROTECTION BIT(CR0_INSTRUCTION_EXEC_PROTECTION_BIT)
+#define CR0_VECTOR BIT(CR0_VECTOR_BIT)
+#define CR0_MALFUNCTION_ALERT_SUBMASK BIT(CR0_MALFUNCTION_ALERT_SUBMASK_BIT)
+#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(CR0_EMERGENCY_SIGNAL_SUBMASK_BIT)
+#define CR0_EXTERNAL_CALL_SUBMASK BIT(CR0_EXTERNAL_CALL_SUBMASK_BIT)
+#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(CR0_CLOCK_COMPARATOR_SUBMASK_BIT)
+#define CR0_CPU_TIMER_SUBMASK BIT(CR0_CPU_TIMER_SUBMASK_BIT)
+#define CR0_SERVICE_SIGNAL_SUBMASK BIT(CR0_SERVICE_SIGNAL_SUBMASK_BIT)
+#define CR0_UNUSED_56 BIT(CR0_UNUSED_56_BIT)
+#define CR0_INTERRUPT_KEY_SUBMASK BIT(CR0_INTERRUPT_KEY_SUBMASK_BIT)
+#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(CR0_MEASUREMENT_ALERT_SUBMASK_BIT)
+#define CR0_ETR_SUBMASK BIT(CR0_ETR_SUBMASK_BIT)
+#define CR0_IUCV BIT(CR0_IUCV_BIT)
+
+#define CR2_MIO_ADDRESSING_BIT (63 - 58)
+#define CR2_GUARDED_STORAGE_BIT (63 - 59)
+
+#define CR2_MIO_ADDRESSING BIT(CR2_MIO_ADDRESSING_BIT)
+#define CR2_GUARDED_STORAGE BIT(CR2_GUARDED_STORAGE_BIT)
+
+#define CR14_UNUSED_32_BIT (63 - 32)
+#define CR14_UNUSED_33_BIT (63 - 33)
+#define CR14_CHANNEL_REPORT_SUBMASK_BIT (63 - 35)
+#define CR14_RECOVERY_SUBMASK_BIT (63 - 36)
+#define CR14_DEGRADATION_SUBMASK_BIT (63 - 37)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK_BIT (63 - 38)
+#define CR14_WARNING_SUBMASK_BIT (63 - 39)
+
+#define CR14_UNUSED_32 BIT(CR14_UNUSED_32_BIT)
+#define CR14_UNUSED_33 BIT(CR14_UNUSED_33_BIT)
+#define CR14_CHANNEL_REPORT_SUBMASK BIT(CR14_CHANNEL_REPORT_SUBMASK_BIT)
+#define CR14_RECOVERY_SUBMASK BIT(CR14_RECOVERY_SUBMASK_BIT)
+#define CR14_DEGRADATION_SUBMASK BIT(CR14_DEGRADATION_SUBMASK_BIT)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT)
+#define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bug.h>
+
+struct ctlreg {
+ unsigned long val;
+};
+
+#define __local_ctl_load(low, high, array) do { \
+ struct addrtype { \
+ char _[sizeof(array)]; \
+ }; \
+ int _high = high; \
+ int _low = low; \
+ int _esize; \
+ \
+ _esize = (_high - _low + 1) * sizeof(struct ctlreg); \
+ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
+ typecheck(struct ctlreg, array[0]); \
+ asm volatile( \
+ " lctlg %[_low],%[_high],%[_arr]\n" \
+ : \
+ : [_arr] "Q" (*(struct addrtype *)(&array)), \
+ [_low] "i" (low), [_high] "i" (high) \
+ : "memory"); \
+} while (0)
+
+#define __local_ctl_store(low, high, array) do { \
+ struct addrtype { \
+ char _[sizeof(array)]; \
+ }; \
+ int _high = high; \
+ int _low = low; \
+ int _esize; \
+ \
+ _esize = (_high - _low + 1) * sizeof(struct ctlreg); \
+ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
+ typecheck(struct ctlreg, array[0]); \
+ asm volatile( \
+ " stctg %[_low],%[_high],%[_arr]\n" \
+ : [_arr] "=Q" (*(struct addrtype *)(&array)) \
+ : [_low] "i" (low), [_high] "i" (high)); \
+} while (0)
+
+static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
+{
+ asm volatile(
+ " lctlg %[cr],%[cr],%[reg]\n"
+ :
+ : [reg] "Q" (*reg), [cr] "i" (cr)
+ : "memory");
+}
+
+static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
+{
+ asm volatile(
+ " stctg %[cr],%[cr],%[reg]\n"
+ : [reg] "=Q" (*reg)
+ : [cr] "i" (cr));
+}
+
+static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ struct ctlreg reg;
+
+ local_ctl_store(cr, &reg);
+ reg.val |= 1UL << bit;
+ local_ctl_load(cr, &reg);
+}
+
+static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ struct ctlreg reg;
+
+ local_ctl_store(cr, &reg);
+ reg.val &= ~(1UL << bit);
+ local_ctl_load(cr, &reg);
+}
+
+struct lowcore;
+
+void system_ctlreg_lock(void);
+void system_ctlreg_unlock(void);
+void system_ctlreg_init_save_area(struct lowcore *lc);
+void system_ctlreg_modify(unsigned int cr, unsigned long data, int request);
+
+enum {
+ CTLREG_SET_BIT,
+ CTLREG_CLEAR_BIT,
+ CTLREG_LOAD,
+};
+
+static inline void system_ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ system_ctlreg_modify(cr, bit, CTLREG_SET_BIT);
+}
+
+static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ system_ctlreg_modify(cr, bit, CTLREG_CLEAR_BIT);
+}
+
+static inline void system_ctl_load(unsigned int cr, struct ctlreg *reg)
+{
+ system_ctlreg_modify(cr, reg->val, CTLREG_LOAD);
+}
+
+union ctlreg0 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 8;
+ unsigned long tcx : 1; /* Transactional-Execution control */
+ unsigned long pifo : 1; /* Transactional-Execution Program-
+ Interruption-Filtering Override */
+ unsigned long : 3;
+ unsigned long ccc : 1; /* Cryptography counter control */
+ unsigned long pec : 1; /* PAI extension control */
+ unsigned long : 17;
+ unsigned long : 3;
+ unsigned long lap : 1; /* Low-address-protection control */
+ unsigned long : 4;
+ unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+ unsigned long : 2;
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 1;
+ unsigned long afp : 1; /* AFP-register control */
+ unsigned long vx : 1; /* Vector enablement control */
+ unsigned long : 7;
+ unsigned long sssm : 1; /* Service signal subclass mask */
+ unsigned long : 9;
+ };
+};
+
+union ctlreg2 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 33;
+ unsigned long ducto : 25;
+ unsigned long : 1;
+ unsigned long gse : 1;
+ unsigned long : 1;
+ unsigned long tds : 1;
+ unsigned long tdc : 2;
+ };
+};
+
+union ctlreg5 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long : 33;
+ unsigned long pasteo: 25;
+ unsigned long : 6;
+ };
+};
+
+union ctlreg15 {
+ unsigned long val;
+ struct ctlreg reg;
+ struct {
+ unsigned long lsea : 61;
+ unsigned long : 3;
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_S390_CTLREG_H */
diff --git a/arch/s390/include/asm/fault.h b/arch/s390/include/asm/fault.h
new file mode 100644
index 000000000000..d326f56603d6
--- /dev/null
+++ b/arch/s390/include/asm/fault.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+#ifndef _ASM_S390_FAULT_H
+#define _ASM_S390_FAULT_H
+
+union teid {
+ unsigned long val;
+ struct {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 2;
+ unsigned long b56 : 1;
+ unsigned long : 3;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+ };
+};
+
+enum {
+ TEID_FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
+ TEID_FSI_STORE = 1, /* Exception was due to store operation */
+ TEID_FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+#endif /* _ASM_S390_FAULT_H */
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index bbdadb1c9efc..8634581b9011 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -10,7 +10,6 @@
#define _ASM_S390_FPU_INTERNAL_H
#include <linux/string.h>
-#include <asm/ctl_reg.h>
#include <asm/fpu/types.h>
static inline void save_vx_regs(__vector128 *vxrs)
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 89902f754740..54b42817f70a 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -31,6 +31,7 @@
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>
+#include <asm/ctlreg.h>
enum interruption_class {
IRQEXT_CLK,
@@ -101,17 +102,17 @@ enum irq_subclass {
};
#define CR0_IRQ_SUBCLASS_MASK \
- ((1UL << (63 - 30)) /* Warning Track */ | \
- (1UL << (63 - 48)) /* Malfunction Alert */ | \
- (1UL << (63 - 49)) /* Emergency Signal */ | \
- (1UL << (63 - 50)) /* External Call */ | \
- (1UL << (63 - 52)) /* Clock Comparator */ | \
- (1UL << (63 - 53)) /* CPU Timer */ | \
- (1UL << (63 - 54)) /* Service Signal */ | \
- (1UL << (63 - 57)) /* Interrupt Key */ | \
- (1UL << (63 - 58)) /* Measurement Alert */ | \
- (1UL << (63 - 59)) /* Timing Alert */ | \
- (1UL << (63 - 62))) /* IUCV */
+ (CR0_WARNING_TRACK | \
+ CR0_MALFUNCTION_ALERT_SUBMASK | \
+ CR0_EMERGENCY_SIGNAL_SUBMASK | \
+ CR0_EXTERNAL_CALL_SUBMASK | \
+ CR0_CLOCK_COMPARATOR_SUBMASK | \
+ CR0_CPU_TIMER_SUBMASK | \
+ CR0_SERVICE_SIGNAL_SUBMASK | \
+ CR0_INTERRUPT_KEY_SUBMASK | \
+ CR0_MEASUREMENT_ALERT_SUBMASK | \
+ CR0_ETR_SUBMASK | \
+ CR0_IUCV)
void irq_subclass_register(enum irq_subclass subclass);
void irq_subclass_unregister(enum irq_subclass subclass);
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 83f732ca3af4..21b9e5290c04 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -15,6 +15,7 @@
* <grundym@us.ibm.com>
*/
#include <linux/types.h>
+#include <asm/ctlreg.h>
#include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002
@@ -65,7 +66,7 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_imask;
- unsigned long kprobe_saved_ctl[3];
+ struct ctlreg kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 69ccc464a430..5dc1b6345006 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/ptrace.h>
+#include <asm/ctlreg.h>
#include <asm/cpu.h>
#include <asm/types.h>
@@ -139,8 +140,8 @@ struct lowcore {
__u32 restart_flags; /* 0x0384 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0388 */
- __u64 user_asce; /* 0x0390 */
+ struct ctlreg kernel_asce; /* 0x0388 */
+ struct ctlreg user_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
@@ -199,7 +200,7 @@ struct lowcore {
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
- __u64 cregs_save_area[16]; /* 0x1380 */
+ struct ctlreg cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
/* Cryptography-counter designation */
__u64 ccd; /* 0x1500 */
@@ -221,12 +222,4 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory");
}
-static inline __u32 store_prefix(void)
-{
- __u32 address;
-
- asm volatile("stpx %0" : "=Q" (address));
- return address;
-}
-
#endif /* _ASM_S390_LOWCORE_H */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 829d68e2c685..bb1b4bef1878 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,6 @@ typedef struct {
cpumask_t cpu_attach_mask;
atomic_t flush_count;
unsigned int flush_mm;
- struct list_head pgtable_list;
struct list_head gmap_list;
unsigned long gmap_asce;
unsigned long asce;
@@ -39,7 +38,6 @@ typedef struct {
#define INIT_MM_CONTEXT(name) \
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 2a38af5a00c2..929af18b0908 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -12,7 +12,7 @@
#include <linux/uaccess.h>
#include <linux/mm_types.h>
#include <asm/tlbflush.h>
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm-generic/mm_hooks.h>
#define init_new_context init_new_context
@@ -22,7 +22,6 @@ static inline int init_new_context(struct task_struct *tsk,
unsigned long asce_type, init_entry;
spin_lock_init(&mm->context.lock);
- INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
@@ -78,10 +77,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
if (next == &init_mm)
S390_lowcore.user_asce = s390_invalid_asce;
else
- S390_lowcore.user_asce = next->context.asce;
+ S390_lowcore.user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
- __ctl_load(s390_invalid_asce, 7, 7);
+ local_ctl_load(7, &s390_invalid_asce);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -111,7 +110,7 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#define activate_mm activate_mm
@@ -120,7 +119,7 @@ static inline void activate_mm(struct mm_struct *prev,
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#include <asm-generic/mmu_context.h>
diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h
index c33c4deb545f..08fcbd628120 100644
--- a/arch/s390/include/asm/page-states.h
+++ b/arch/s390/include/asm/page-states.h
@@ -7,6 +7,9 @@
#ifndef PAGE_STATES_H
#define PAGE_STATES_H
+#include <asm/sections.h>
+#include <asm/page.h>
+
#define ESSA_GET_STATE 0
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -18,4 +21,60 @@
#define ESSA_MAX ESSA_SET_STABLE_NODAT
+extern int __bootdata_preserved(cmma_flag);
+
+static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd)
+{
+ unsigned long rc;
+
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%[rc],%[paddr],%[cmd],0"
+ : [rc] "=d" (rc)
+ : [paddr] "d" (paddr),
+ [cmd] "i" (cmd));
+ return rc;
+}
+
+static __always_inline void __set_page_state(void *addr, unsigned long num_pages, unsigned char cmd)
+{
+ unsigned long paddr = __pa(addr) & PAGE_MASK;
+
+ while (num_pages--) {
+ essa(paddr, cmd);
+ paddr += PAGE_SIZE;
+ }
+}
+
+static inline void __set_page_unused(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_UNUSED);
+}
+
+static inline void __set_page_stable_dat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE);
+}
+
+static inline void __set_page_stable_nodat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE_NODAT);
+}
+
+static inline void __arch_set_page_nodat(void *addr, unsigned long num_pages)
+{
+ if (!cmma_flag)
+ return;
+ if (cmma_flag < 2)
+ __set_page_stable_dat(addr, num_pages);
+ else
+ __set_page_stable_nodat(addr, num_pages);
+}
+
+static inline void __arch_set_page_dat(void *addr, unsigned long num_pages)
+{
+ if (!cmma_flag)
+ return;
+ __set_page_stable_dat(addr, num_pages);
+}
+
#endif
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index cfec0743314e..73b9c3bf377f 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -164,7 +164,6 @@ static inline int page_reset_referenced(unsigned long addr)
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
-void arch_set_page_dat(struct page *page, int order);
static inline int devmem_is_allowed(unsigned long pfn)
{
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b248694e0024..e91cd6bbc330 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -159,13 +159,6 @@ struct zpci_dev {
unsigned long *dma_table;
int tlb_refresh;
- spinlock_t iommu_bitmap_lock;
- unsigned long *iommu_bitmap;
- unsigned long *lazy_bitmap;
- unsigned long iommu_size;
- unsigned long iommu_pages;
- unsigned int next_bit;
-
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16];
@@ -180,10 +173,6 @@ struct zpci_dev {
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
u16 fmb_length;
- /* software counters */
- atomic64_t allocated_pages;
- atomic64_t mapped_pages;
- atomic64_t unmapped_pages;
u8 version;
enum pci_bus_speed max_bus_speed;
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index d6189ed14f84..f0c677ddd270 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -50,6 +50,9 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
+/* PCI function type numbers */
+#define PCI_FUNC_TYPE_ISM 0x5 /* ISM device */
+
extern bool zpci_unique_uid;
struct clp_rsp_slpc_pci {
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7119c04c51c5..42d7cc4262ca 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -82,117 +82,16 @@ enum zpci_ioat_dtype {
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
-static inline unsigned int calc_rtx(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_sx(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_px(dma_addr_t ptr)
-{
- return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
-}
-
-static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
-{
- *entry &= ZPCI_PTE_FLAG_MASK;
- *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
-}
-
-static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
-{
- *entry &= ZPCI_RTE_FLAG_MASK;
- *entry |= (sto & ZPCI_RTE_ADDR_MASK);
- *entry |= ZPCI_TABLE_TYPE_RTX;
-}
-
-static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
-{
- *entry &= ZPCI_STE_FLAG_MASK;
- *entry |= (pto & ZPCI_STE_ADDR_MASK);
- *entry |= ZPCI_TABLE_TYPE_SX;
-}
-
-static inline void validate_rt_entry(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_VALID_MASK;
- *entry &= ~ZPCI_TABLE_OFFSET_MASK;
- *entry |= ZPCI_TABLE_VALID;
- *entry |= ZPCI_TABLE_LEN_RTX;
-}
-
-static inline void validate_st_entry(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_VALID_MASK;
- *entry |= ZPCI_TABLE_VALID;
-}
-
-static inline void invalidate_pt_entry(unsigned long *entry)
-{
- WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
- *entry &= ~ZPCI_PTE_VALID_MASK;
- *entry |= ZPCI_PTE_INVALID;
-}
-
-static inline void validate_pt_entry(unsigned long *entry)
-{
- WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
- *entry &= ~ZPCI_PTE_VALID_MASK;
- *entry |= ZPCI_PTE_VALID;
-}
-
-static inline void entry_set_protected(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_PROT_MASK;
- *entry |= ZPCI_TABLE_PROTECTED;
-}
-
-static inline void entry_clr_protected(unsigned long *entry)
-{
- *entry &= ~ZPCI_TABLE_PROT_MASK;
- *entry |= ZPCI_TABLE_UNPROTECTED;
-}
-
-static inline int reg_entry_isvalid(unsigned long entry)
-{
- return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
-}
-
-static inline int pt_entry_isvalid(unsigned long entry)
-{
- return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
-}
-
-static inline unsigned long *get_rt_sto(unsigned long entry)
-{
- if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
- return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
- else
- return NULL;
-
-}
-
-static inline unsigned long *get_st_pto(unsigned long entry)
-{
- if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
- return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
- else
- return NULL;
-}
-
-/* Prototypes */
-void dma_free_seg_table(unsigned long);
-unsigned long *dma_alloc_cpu_table(gfp_t gfp);
-void dma_cleanup_tables(unsigned long *);
-unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
- gfp_t gfp);
-void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
-
-extern const struct dma_map_ops s390_pci_dma_ops;
+struct zpci_iommu_ctrs {
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
+ atomic64_t global_rpcits;
+ atomic64_t sync_map_rpcits;
+ atomic64_t sync_rpcits;
+};
+
+struct zpci_dev;
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev);
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 376b4b23bdaa..502d655fe6ae 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -25,7 +25,6 @@ void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
struct page *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
-void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fb3ee7758b76..601e87fa8a9a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -18,6 +18,7 @@
#include <linux/radix-tree.h>
#include <linux/atomic.h>
#include <asm/sections.h>
+#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@@ -25,7 +26,7 @@
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
extern void paging_init(void);
-extern unsigned long s390_invalid_asce;
+extern struct ctlreg s390_invalid_asce;
enum {
PG_DIRECT_MAP_4K = 0,
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 25cadc2b9cff..df316436d2e1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -125,9 +125,6 @@ static inline void vmcp_cma_reserve(void) { }
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
-void cmma_init(void);
-void cmma_init_nodat(void);
-
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 73ed2781073b..6e5b1b4b19a9 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -59,7 +59,6 @@ static inline void smp_cpus_done(unsigned int max_cpus)
{
}
-extern int smp_reinit_ipl_cpu(void);
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 78f7b729b65f..31ec4f545e03 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -6,6 +6,13 @@
#include <linux/ptrace.h>
#include <asm/switch_to.h>
+struct stack_frame_user {
+ unsigned long back_chain;
+ unsigned long empty1[5];
+ unsigned long gprs[10];
+ unsigned long empty2[4];
+};
+
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 383b1f91442c..d1455a601adc 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -69,12 +69,9 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_pmds = 1;
- /*
- * page_table_free_rcu takes care of the allocation bit masks
- * of the 2K table fragments in the 4K page table page,
- * then calls tlb_remove_table.
- */
- page_table_free_rcu(tlb, (unsigned long *) pte, address);
+ if (mm_alloc_pgste(tlb->mm))
+ gmap_unlink(tlb->mm, (unsigned long *)pte, address);
+ tlb_remove_ptdesc(tlb, pte);
}
/*
@@ -112,7 +109,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb_remove_table(tlb, p4d);
+ tlb_remove_ptdesc(tlb, p4d);
}
/*
@@ -130,7 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
- tlb_remove_table(tlb, pud);
+ tlb_remove_ptdesc(tlb, pud);
}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8a8c64a678c4..81ae8a98e7ec 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -15,7 +15,6 @@
*/
#include <asm/asm-extable.h>
#include <asm/processor.h>
-#include <asm/ctl_reg.h>
#include <asm/extable.h>
#include <asm/facility.h>
#include <asm-generic/access_ok.h>
diff --git a/arch/s390/include/asm/word-at-a-time.h b/arch/s390/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..2579f1694b82
--- /dev/null
+++ b/arch/s390/include/asm/word-at-a-time.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+#include <asm/asm-extable.h>
+#include <asm/bitsperlong.h>
+
+struct word_at_a_time {
+ const unsigned long bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x7f) }
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long data, const struct word_at_a_time *c)
+{
+ return data;
+}
+
+static inline unsigned long create_zero_mask(unsigned long data)
+{
+ return __fls(data);
+}
+
+static inline unsigned long find_zero(unsigned long data)
+{
+ return (data ^ (BITS_PER_LONG - 1)) >> 3;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->bits) + c->bits;
+
+ *data = ~(mask | val | c->bits);
+ return *data;
+}
+
+static inline unsigned long zero_bytemask(unsigned long data)
+{
+ return ~1UL << data;
+}
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long data;
+
+ asm volatile(
+ "0: lg %[data],0(%[addr])\n"
+ "1: nopr %%r7\n"
+ EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
+ EX_TABLE_ZEROPAD(1b, 1b, %[data], %[addr])
+ : [data] "=d" (data)
+ : [addr] "a" (addr), "m" (*(unsigned long *)addr));
+ return data;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 0df2b88cc0da..353def93973b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -37,7 +37,7 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
-obj-y += sysinfo.o lgr.o os_info.o
+obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 7af69948b290..514feadd4c58 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -498,7 +498,7 @@ static int get_mem_chunk_cnt(void)
/*
* Initialize ELF loads (new kernel)
*/
-static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
+static void loads_init(Elf64_Phdr *phdr)
{
phys_addr_t start, end;
u64 idx;
@@ -507,7 +507,7 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
phdr->p_offset = start;
- phdr->p_vaddr = start;
+ phdr->p_vaddr = (unsigned long)__va(start);
phdr->p_paddr = start;
phdr->p_memsz = end - start;
phdr->p_flags = PF_R | PF_W | PF_X;
@@ -612,7 +612,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init loads */
hdr_off = PTR_DIFF(ptr, hdr);
- loads_init(phdr_loads, hdr_off);
+ loads_init(phdr_loads);
*addr = (unsigned long long) hdr;
*size = (unsigned long long) hdr_off;
BUG_ON(elfcorehdr_size > alloc_size);
diff --git a/arch/s390/kernel/ctlreg.c b/arch/s390/kernel/ctlreg.c
new file mode 100644
index 000000000000..8cc26cf2c64a
--- /dev/null
+++ b/arch/s390/kernel/ctlreg.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+
+#include <linux/irqflags.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cache.h>
+#include <asm/abs_lowcore.h>
+#include <asm/ctlreg.h>
+
+/*
+ * ctl_lock guards access to global control register contents which
+ * are kept in the control register save area within absolute lowcore
+ * at physical address zero.
+ */
+static DEFINE_SPINLOCK(system_ctl_lock);
+
+void system_ctlreg_lock(void)
+ __acquires(&system_ctl_lock)
+{
+ spin_lock(&system_ctl_lock);
+}
+
+void system_ctlreg_unlock(void)
+ __releases(&system_ctl_lock)
+{
+ spin_unlock(&system_ctl_lock);
+}
+
+static bool system_ctlreg_area_init __ro_after_init;
+
+void __init system_ctlreg_init_save_area(struct lowcore *lc)
+{
+ struct lowcore *abs_lc;
+
+ abs_lc = get_abs_lowcore();
+ __local_ctl_store(0, 15, lc->cregs_save_area);
+ __local_ctl_store(0, 15, abs_lc->cregs_save_area);
+ put_abs_lowcore(abs_lc);
+ system_ctlreg_area_init = true;
+}
+
+struct ctlreg_parms {
+ unsigned long andval;
+ unsigned long orval;
+ unsigned long val;
+ int request;
+ int cr;
+};
+
+static void ctlreg_callback(void *info)
+{
+ struct ctlreg_parms *pp = info;
+ struct ctlreg regs[16];
+
+ __local_ctl_store(0, 15, regs);
+ if (pp->request == CTLREG_LOAD) {
+ regs[pp->cr].val = pp->val;
+ } else {
+ regs[pp->cr].val &= pp->andval;
+ regs[pp->cr].val |= pp->orval;
+ }
+ __local_ctl_load(0, 15, regs);
+}
+
+static void system_ctlreg_update(void *info)
+{
+ unsigned long flags;
+
+ if (system_state == SYSTEM_BOOTING) {
+ /*
+ * For very early calls do not call on_each_cpu()
+ * since not everything might be setup.
+ */
+ local_irq_save(flags);
+ ctlreg_callback(info);
+ local_irq_restore(flags);
+ } else {
+ on_each_cpu(ctlreg_callback, info, 1);
+ }
+}
+
+void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
+{
+ struct ctlreg_parms pp = { .cr = cr, .request = request, };
+ struct lowcore *abs_lc;
+
+ switch (request) {
+ case CTLREG_SET_BIT:
+ pp.orval = 1UL << data;
+ pp.andval = -1UL;
+ break;
+ case CTLREG_CLEAR_BIT:
+ pp.orval = 0;
+ pp.andval = ~(1UL << data);
+ break;
+ case CTLREG_LOAD:
+ pp.val = data;
+ break;
+ }
+ if (system_ctlreg_area_init) {
+ system_ctlreg_lock();
+ abs_lc = get_abs_lowcore();
+ if (request == CTLREG_LOAD) {
+ abs_lc->cregs_save_area[cr].val = pp.val;
+ } else {
+ abs_lc->cregs_save_area[cr].val &= pp.andval;
+ abs_lc->cregs_save_area[cr].val |= pp.orval;
+ }
+ put_abs_lowcore(abs_lc);
+ system_ctlreg_update(&pp);
+ system_ctlreg_unlock();
+ } else {
+ system_ctlreg_update(&pp);
+ }
+}
+EXPORT_SYMBOL(system_ctlreg_modify);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f9f06cd8fcee..92fdc35f028c 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -245,6 +245,7 @@ EXPORT_SYMBOL(diag8c);
int diag224(void *ptr)
{
+ unsigned long addr = __pa(ptr);
int rc = -EOPNOTSUPP;
diag_stat_inc(DIAG_STAT_X224);
@@ -253,7 +254,7 @@ int diag224(void *ptr)
"0: lhi %0,0x0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+d" (rc) :"d" (0), "d" (ptr) : "memory");
+ : "+d" (rc) :"d" (0), "d" (addr) : "memory");
return rc;
}
EXPORT_SYMBOL(diag224);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 442ce0489e1a..eb43e5922a25 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -46,6 +46,7 @@ decompressor_handled_param(vmalloc);
decompressor_handled_param(dfltcc);
decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
+decompressor_handled_param(cmma);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
@@ -216,7 +217,7 @@ static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
- __ctl_set_bit(0, 23);
+ system_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
@@ -224,13 +225,13 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
- __ctl_set_bit(0, 55);
+ system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
}
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
- __ctl_set_bit(0, 17);
+ system_ctl_set_bit(0, CR0_VECTOR_BIT);
}
if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
@@ -240,7 +241,7 @@ static __init void detect_machine_facilities(void)
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
- __ctl_set_bit(0, 53);
+ system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
@@ -258,15 +259,9 @@ static inline void save_vector_registers(void)
#endif
}
-static inline void setup_control_registers(void)
+static inline void setup_low_address_protection(void)
{
- unsigned long reg;
-
- __ctl_store(reg, 0, 0);
- reg |= CR0_LOW_ADDRESS_PROTECTION;
- reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
- reg |= CR0_EXTERNAL_CALL_SUBMASK;
- __ctl_load(reg, 0, 0);
+ system_ctl_set_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
}
static inline void setup_access_registers(void)
@@ -279,7 +274,7 @@ static inline void setup_access_registers(void)
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
- __ctl_clear_bit(0, 17);
+ system_ctl_clear_bit(0, CR0_VECTOR_BIT);
return 0;
}
early_param("novx", disable_vector_extension);
@@ -314,7 +309,7 @@ void __init startup_init(void)
save_vector_registers();
setup_topology();
sclp_early_detect();
- setup_control_registers();
+ setup_low_address_protection();
setup_access_registers();
lockdep_on();
}
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
index d14dd1c2e524..0b68168d9566 100644
--- a/arch/s390/kernel/guarded_storage.c
+++ b/arch/s390/kernel/guarded_storage.c
@@ -28,7 +28,7 @@ static int gs_enable(void)
return -ENOMEM;
gs_cb->gsd = 25;
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
preempt_enable();
@@ -42,7 +42,7 @@ static int gs_disable(void)
preempt_disable();
kfree(current->thread.gs_cb);
current->thread.gs_cb = NULL;
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
preempt_enable();
}
return 0;
@@ -84,7 +84,7 @@ void gs_load_bc_cb(struct pt_regs *regs)
if (gs_cb) {
kfree(current->thread.gs_cb);
current->thread.gs_bc_cb = NULL;
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 05e51666db03..cc364fce6aa9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2381,7 +2381,7 @@ void s390_reset_system(void)
set_prefix(0);
/* Disable lowcore protection */
- __ctl_clear_bit(0, 28);
+ local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
diag_amode31_ops.diag308_reset();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b020ff17d206..6f71b0ce1068 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -385,7 +385,7 @@ void irq_subclass_register(enum irq_subclass subclass)
{
spin_lock(&irq_subclass_lock);
if (!irq_subclass_refcount[subclass])
- ctl_set_bit(0, subclass);
+ system_ctl_set_bit(0, subclass);
irq_subclass_refcount[subclass]++;
spin_unlock(&irq_subclass_lock);
}
@@ -396,7 +396,7 @@ void irq_subclass_unregister(enum irq_subclass subclass)
spin_lock(&irq_subclass_lock);
irq_subclass_refcount[subclass]--;
if (!irq_subclass_refcount[subclass])
- ctl_clear_bit(0, subclass);
+ system_ctl_clear_bit(0, subclass);
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_unregister);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d4b863ed0aa7..f0cf20d4b3c5 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -224,20 +224,27 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs,
unsigned long ip)
{
- struct per_regs per_kprobe;
+ union {
+ struct ctlreg regs[3];
+ struct {
+ struct ctlreg control;
+ struct ctlreg start;
+ struct ctlreg end;
+ };
+ } per_kprobe;
/* Set up the PER control registers %cr9-%cr11 */
- per_kprobe.control = PER_EVENT_IFETCH;
- per_kprobe.start = ip;
- per_kprobe.end = ip;
+ per_kprobe.control.val = PER_EVENT_IFETCH;
+ per_kprobe.start.val = ip;
+ per_kprobe.end.val = ip;
/* Save control regs and psw mask */
- __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_store(9, 11, kcb->kprobe_saved_ctl);
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */
- __ctl_load(per_kprobe, 9, 11);
+ __local_ctl_load(9, 11, per_kprobe.regs);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip;
@@ -249,7 +256,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
- __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_load(9, 11, kcb->kprobe_saved_ctl);
regs->psw.mask &= ~PSW_MASK_PER;
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index ce65fc01671f..bb0d4d68fcbe 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -94,12 +94,12 @@ static noinline void __machine_kdump(void *image)
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old.val, 2, 2);
+ local_ctl_store(2, &cr2_old.reg);
cr2_new = cr2_old;
cr2_new.gse = 1;
- __ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.reg);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old.val, 2, 2);
+ local_ctl_load(2, &cr2_old.reg);
}
/*
* To create a good backchain for this CPU in the dump store_status
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 38ec0487521c..0daf0f1cdfc9 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -22,13 +22,13 @@
#include <linux/kvm_host.h>
#include <linux/export.h>
#include <asm/lowcore.h>
+#include <asm/ctlreg.h>
#include <asm/smp.h>
#include <asm/stp.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
-#include <asm/ctl_reg.h>
#include <asm/asm-offsets.h>
#include <asm/pai.h>
#include <asm/vx-insn.h>
@@ -131,10 +131,10 @@ static notrace void s390_handle_damage(void)
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
- __ctl_store(cr0.val, 0, 0);
+ local_ctl_store(0, &cr0.reg);
cr0_new = cr0;
cr0_new.lap = 0;
- __ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.reg);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
@@ -146,7 +146,7 @@ static notrace void s390_handle_damage(void)
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
- __ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.reg);
disabled_wait();
while (1);
}
@@ -185,7 +185,7 @@ void s390_handle_mcck(void)
static int mchchk_wng_posted = 0;
/* Use single cpu clear, as we cannot handle smp here. */
- __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
+ local_ctl_clear_bit(14, CR14_WARNING_SUBMASK_BIT);
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
@@ -269,9 +269,9 @@ static int notrace s390_validate_registers(union mci mci)
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
- cr0.val = S390_lowcore.cregs_save_area[0];
+ cr0.reg = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.reg);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
@@ -279,7 +279,7 @@ static int notrace s390_validate_registers(union mci mci)
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ local_ctl_load(0, &S390_lowcore.cregs_save_area[0]);
}
/* Validate access registers */
asm volatile(
@@ -290,7 +290,7 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.ar)
kill_task = 1;
/* Validate guarded storage registers */
- cr2.val = S390_lowcore.cregs_save_area[2];
+ cr2.reg = S390_lowcore.cregs_save_area[2];
if (cr2.gse) {
if (!mci.gs) {
/*
@@ -505,9 +505,9 @@ NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
- ctl_set_bit(14, 25); /* enable external damage MCH */
- ctl_set_bit(14, 27); /* enable system recovery MCH */
- ctl_set_bit(14, 24); /* enable warning MCH */
+ system_ctl_set_bit(14, CR14_EXTERNAL_DAMAGE_SUBMASK_BIT);
+ system_ctl_set_bit(14, CR14_RECOVERY_SUBMASK_BIT);
+ system_ctl_set_bit(14, CR14_WARNING_SUBMASK_BIT);
return 0;
}
early_initcall(machine_check_init);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 850c11ea631a..41ed6e0f0a2a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1193,7 +1193,7 @@ static int __init cpumf_pmu_init(void)
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
*/
- ctl_clear_bit(0, 48);
+ system_ctl_clear_bit(0, CR0_CPUMF_EXTRACTION_AUTH_BIT);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c27321cb0969..dfa77da2fd2e 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -15,7 +15,10 @@
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <linux/sysfs.h>
+#include <asm/stacktrace.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
@@ -212,6 +215,44 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
}
}
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct stack_frame_user __user *sf;
+ unsigned long ip, sp;
+ bool first = true;
+
+ if (is_compat_task())
+ return;
+ perf_callchain_store(entry, instruction_pointer(regs));
+ sf = (void __user *)user_stack_pointer(regs);
+ pagefault_disable();
+ while (entry->nr < entry->max_stack) {
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ if (ip & 0x1) {
+ /*
+ * If the instruction address is invalid, and this
+ * is the first stack frame, assume r14 has not
+ * been written to the stack yet. Otherwise exit.
+ */
+ if (first && !(regs->gprs[14] & 0x1))
+ ip = regs->gprs[14];
+ else
+ break;
+ }
+ perf_callchain_store(entry, ip);
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (!sp || sp & 0x7)
+ break;
+ sf = (void __user *)sp;
+ first = false;
+ }
+ pagefault_enable();
+}
+
/* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index fe7d1774ded1..77fd24e6cbb6 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -16,8 +16,7 @@
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
-
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/pai.h>
#include <asm/debug.h>
@@ -41,7 +40,43 @@ struct paicrypt_map {
struct perf_event *event; /* Perf event for sampling */
};
-static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
+struct paicrypt_mapptr {
+ struct paicrypt_map *mapptr;
+};
+
+static struct paicrypt_root { /* Anchor to per CPU data */
+ refcount_t refcnt; /* Overall active events */
+ struct paicrypt_mapptr __percpu *mapptr;
+} paicrypt_root;
+
+/* Free per CPU data when the last event is removed. */
+static void paicrypt_root_free(void)
+{
+ if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
+ free_percpu(paicrypt_root.mapptr);
+ paicrypt_root.mapptr = NULL;
+ }
+ debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
+ refcount_read(&paicrypt_root.refcnt));
+}
+
+/*
+ * On initialization of first event also allocate per CPU data dynamically.
+ * Start with an array of pointers, the array size is the maximum number of
+ * CPUs possible, which might be larger than the number of CPUs currently
+ * online.
+ */
+static int paicrypt_root_alloc(void)
+{
+ if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
+ /* The memory is already zeroed. */
+ paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
+ if (!paicrypt_root.mapptr)
+ return -ENOMEM;
+ refcount_set(&paicrypt_root.refcnt, 1);
+ }
+ return 0;
+}
/* Release the PMU if event is the last perf event */
static DEFINE_MUTEX(pai_reserve_mutex);
@@ -51,7 +86,9 @@ static DEFINE_MUTEX(pai_reserve_mutex);
*/
static void paicrypt_event_destroy(struct perf_event *event)
{
- struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
+ struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
+ event->cpu);
+ struct paicrypt_map *cpump = mp->mapptr;
cpump->event = NULL;
static_branch_dec(&pai_key);
@@ -66,11 +103,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
__func__, (unsigned long)cpump->page,
cpump->save);
free_page((unsigned long)cpump->page);
- cpump->page = NULL;
kvfree(cpump->save);
- cpump->save = NULL;
- cpump->mode = PAI_MODE_NONE;
+ kfree(cpump);
+ mp->mapptr = NULL;
}
+ paicrypt_root_free();
mutex_unlock(&pai_reserve_mutex);
}
@@ -86,7 +123,8 @@ static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
*/
static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
u64 sum = 0;
int i;
@@ -132,11 +170,31 @@ static u64 paicrypt_getall(struct perf_event *event)
*
* Allocate the memory for the event.
*/
-static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
+static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
{
- int rc = 0;
+ struct perf_event_attr *a = &event->attr;
+ struct paicrypt_map *cpump = NULL;
+ struct paicrypt_mapptr *mp;
+ int rc;
mutex_lock(&pai_reserve_mutex);
+
+ /* Allocate root node */
+ rc = paicrypt_root_alloc();
+ if (rc)
+ goto unlock;
+
+ /* Allocate node for this event */
+ mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
+ cpump = mp->mapptr;
+ if (!cpump) { /* Paicrypt_map allocated? */
+ cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
+ if (!cpump) {
+ rc = -ENOMEM;
+ goto free_root;
+ }
+ }
+
if (a->sample_period) { /* Sampling requested */
if (cpump->mode != PAI_MODE_NONE)
rc = -EBUSY; /* ... sampling/counting active */
@@ -144,8 +202,15 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
if (cpump->mode == PAI_MODE_SAMPLING)
rc = -EBUSY; /* ... and sampling active */
}
+ /*
+ * This error case triggers when there is a conflict:
+ * Either sampling requested and counting already active, or visa
+ * versa. Therefore the struct paicrypto_map for this CPU is
+ * needed or the error could not have occurred. Only adjust root
+ * node refcount.
+ */
if (rc)
- goto unlock;
+ goto free_root;
/* Allocate memory for counter page and counter extraction.
* Only the first counting event has to allocate a page.
@@ -158,30 +223,36 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
rc = -ENOMEM;
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!cpump->page)
- goto unlock;
+ goto free_paicrypt_map;
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
sizeof(struct pai_userdata), GFP_KERNEL);
if (!cpump->save) {
free_page((unsigned long)cpump->page);
cpump->page = NULL;
- goto unlock;
+ goto free_paicrypt_map;
}
+
+ /* Set mode and reference count */
rc = 0;
refcount_set(&cpump->refcnt, 1);
-
-unlock:
- /* If rc is non-zero, do not set mode and reference count */
- if (!rc) {
- cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
- : PAI_MODE_COUNTING;
- }
+ cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
+ mp->mapptr = cpump;
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
" mode %d refcnt %u page %#lx save %p rc %d\n",
__func__, a->sample_period, cpump->active_events,
cpump->mode, refcount_read(&cpump->refcnt),
(unsigned long)cpump->page, cpump->save, rc);
+ goto unlock;
+
+free_paicrypt_map:
+ kfree(cpump);
+ mp->mapptr = NULL;
+free_root:
+ paicrypt_root_free();
+
+unlock:
mutex_unlock(&pai_reserve_mutex);
- return rc;
+ return rc ? ERR_PTR(rc) : cpump;
}
/* Might be called on different CPU than the one the event is intended for. */
@@ -189,7 +260,6 @@ static int paicrypt_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
struct paicrypt_map *cpump;
- int rc;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
@@ -199,16 +269,15 @@ static int paicrypt_event_init(struct perf_event *event)
a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
return -EINVAL;
/* Allow only CPU wide operation, no process context for now. */
- if (event->hw.target || event->cpu == -1)
+ if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
return -ENOENT;
/* Allow only CRYPTO_ALL for sampling. */
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
return -EINVAL;
- cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
- rc = paicrypt_busy(a, cpump);
- if (rc)
- return rc;
+ cpump = paicrypt_busy(event);
+ if (IS_ERR(cpump))
+ return PTR_ERR(cpump);
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
@@ -216,7 +285,6 @@ static int paicrypt_event_init(struct perf_event *event)
* are active at the same time.
*/
event->hw.last_tag = 0;
- cpump->event = event;
event->destroy = paicrypt_event_destroy;
if (a->sample_period) {
@@ -253,20 +321,20 @@ static void paicrypt_start(struct perf_event *event, int flags)
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */
- local64_set(&event->count, 0);
local64_set(&event->hw.prev_count, sum);
}
}
static int paicrypt_add(struct perf_event *event, int flags)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
unsigned long ccd;
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
WRITE_ONCE(S390_lowcore.ccd, ccd);
- __ctl_set_bit(0, 50);
+ local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
}
cpump->event = event;
if (flags & PERF_EF_START && !event->attr.sample_period) {
@@ -287,7 +355,8 @@ static void paicrypt_stop(struct perf_event *event, int flags)
static void paicrypt_del(struct perf_event *event, int flags)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
if (event->attr.sample_period)
perf_sched_cb_dec(event->pmu);
@@ -295,7 +364,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
/* Only counting needs to read counter */
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
- __ctl_clear_bit(0, 50);
+ local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
WRITE_ONCE(S390_lowcore.ccd, 0);
}
}
@@ -329,7 +398,8 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
static int paicrypt_push_sample(void)
{
- struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index c57c1a203256..8ba0f1a3a39d 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -17,8 +17,7 @@
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
-
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/pai.h>
#include <asm/debug.h>
@@ -249,7 +248,7 @@ static int paiext_event_init(struct perf_event *event)
if (rc)
return rc;
/* Allow only CPU wide operation, no process context for now. */
- if (event->hw.target || event->cpu == -1)
+ if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
return -ENOENT;
/* Allow only event NNPA_ALL for sampling. */
if (a->sample_period && a->config != PAI_NNPA_BASE)
@@ -327,7 +326,6 @@ static void paiext_start(struct perf_event *event, int flags)
event->hw.last_tag = 1;
sum = paiext_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
- local64_set(&event->count, 0);
}
static int paiext_add(struct perf_event *event, int flags)
@@ -340,7 +338,7 @@ static int paiext_add(struct perf_event *event, int flags)
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
- __ctl_set_bit(0, 49);
+ local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
@@ -376,7 +374,7 @@ static void paiext_del(struct perf_event *event, int flags)
}
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
- __ctl_clear_bit(0, 49);
+ local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
pcb->acc = 0;
S390_lowcore.aicd = 0;
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ea244a73efad..046403471c5d 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -41,13 +41,20 @@ void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
- struct per_regs old, new;
union ctlreg0 cr0_old, cr0_new;
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
-
- __ctl_store(cr0_old.val, 0, 0);
- __ctl_store(cr2_old.val, 2, 2);
+ union {
+ struct ctlreg regs[3];
+ struct {
+ struct ctlreg control;
+ struct ctlreg start;
+ struct ctlreg end;
+ };
+ } old, new;
+
+ local_ctl_store(0, &cr0_old.reg);
+ local_ctl_store(2, &cr2_old.reg);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
@@ -75,38 +82,38 @@ void update_cr_regs(struct task_struct *task)
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.reg);
if (cr2_changed)
- __ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.reg);
/* Copy user specified PER registers */
- new.control = thread->per_user.control;
- new.start = thread->per_user.start;
- new.end = thread->per_user.end;
+ new.control.val = thread->per_user.control;
+ new.start.val = thread->per_user.start;
+ new.end.val = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
- new.control |= PER_EVENT_BRANCH;
+ new.control.val |= PER_EVENT_BRANCH;
else
- new.control |= PER_EVENT_IFETCH;
- new.control |= PER_CONTROL_SUSPENSION;
- new.control |= PER_EVENT_TRANSACTION_END;
+ new.control.val |= PER_EVENT_IFETCH;
+ new.control.val |= PER_CONTROL_SUSPENSION;
+ new.control.val |= PER_EVENT_TRANSACTION_END;
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
- new.control |= PER_EVENT_IFETCH;
- new.start = 0;
- new.end = -1UL;
+ new.control.val |= PER_EVENT_IFETCH;
+ new.start.val = 0;
+ new.end.val = -1UL;
}
/* Take care of the PER enablement bit in the PSW. */
- if (!(new.control & PER_EVENT_MASK)) {
+ if (!(new.control.val & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER;
return;
}
regs->psw.mask |= PSW_MASK_PER;
- __ctl_store(old, 9, 11);
+ __local_ctl_store(9, 11, old.regs);
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
- __ctl_load(new, 9, 11);
+ __local_ctl_load(9, 11, new.regs);
}
void user_enable_single_step(struct task_struct *task)
@@ -1107,7 +1114,7 @@ static int s390_gs_cb_set(struct task_struct *target,
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e555b576d3c8..5701356f4f33 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,7 +305,7 @@ static void __init setup_zfcpdump(void)
return;
if (oldmem_data.start)
return;
- strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
+ strlcat(boot_command_line, " cio_ignore=all,!ipldev,!condev", COMMAND_LINE_SIZE);
console_loglevel = 2;
}
#else
@@ -381,12 +381,6 @@ void stack_free(unsigned long stack)
#endif
}
-void __init __noreturn arch_call_rest_init(void)
-{
- smp_reinit_ipl_cpu();
- rest_init();
-}
-
static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
@@ -455,7 +449,6 @@ static void __init setup_lowcore(void)
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1U;
- __ctl_store(lc->cregs_save_area, 0, 15);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
@@ -465,6 +458,7 @@ static void __init setup_lowcore(void)
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->user_asce = S390_lowcore.user_asce;
+ system_ctlreg_init_save_area(lc);
abs_lc = get_abs_lowcore();
abs_lc->restart_stack = lc->restart_stack;
abs_lc->restart_fn = lc->restart_fn;
@@ -472,7 +466,6 @@ static void __init setup_lowcore(void)
abs_lc->restart_source = lc->restart_source;
abs_lc->restart_psw = lc->restart_psw;
abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
- memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area));
abs_lc->program_new_psw = lc->program_new_psw;
abs_lc->mcesad = lc->mcesad;
put_abs_lowcore(abs_lc);
@@ -797,15 +790,15 @@ static void __init setup_cr(void)
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
- __ctl_store(cr2.val, 2, 2);
- __ctl_store(cr5.val, 5, 5);
- __ctl_store(cr15.val, 15, 15);
+ local_ctl_store(2, &cr2.reg);
+ local_ctl_store(5, &cr5.reg);
+ local_ctl_store(15, &cr15.reg);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
- __ctl_load(cr2.val, 2, 2);
- __ctl_load(cr5.val, 5, 5);
- __ctl_load(cr15.val, 15, 15);
+ system_ctl_load(2, &cr2.reg);
+ system_ctl_load(5, &cr5.reg);
+ system_ctl_load(15, &cr15.reg);
}
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 214a1b67f80a..f7fcfff09acf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/crash_dump.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
+#include <asm/ctlreg.h>
#include <asm/pfault.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
@@ -567,54 +568,6 @@ void arch_irq_work_raise(void)
}
#endif
-/*
- * parameter area for the set/clear control bit callbacks
- */
-struct ec_creg_mask_parms {
- unsigned long orval;
- unsigned long andval;
- int cr;
-};
-
-/*
- * callback for setting/clearing control bits
- */
-static void smp_ctl_bit_callback(void *info)
-{
- struct ec_creg_mask_parms *pp = info;
- unsigned long cregs[16];
-
- __ctl_store(cregs, 0, 15);
- cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
- __ctl_load(cregs, 0, 15);
-}
-
-static DEFINE_SPINLOCK(ctl_lock);
-
-void smp_ctl_set_clear_bit(int cr, int bit, bool set)
-{
- struct ec_creg_mask_parms parms = { .cr = cr, };
- struct lowcore *abs_lc;
- u64 ctlreg;
-
- if (set) {
- parms.orval = 1UL << bit;
- parms.andval = -1UL;
- } else {
- parms.orval = 0;
- parms.andval = ~(1UL << bit);
- }
- spin_lock(&ctl_lock);
- abs_lc = get_abs_lowcore();
- ctlreg = abs_lc->cregs_save_area[cr];
- ctlreg = (ctlreg & parms.andval) | parms.orval;
- abs_lc->cregs_save_area[cr] = ctlreg;
- put_abs_lowcore(abs_lc);
- on_each_cpu(smp_ctl_bit_callback, &parms, 1);
- spin_unlock(&ctl_lock);
-}
-EXPORT_SYMBOL(smp_ctl_set_clear_bit);
-
#ifdef CONFIG_CRASH_DUMP
int smp_store_status(int cpu)
@@ -935,14 +888,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
* Make sure global control register contents do not change
* until new CPU has initialized control registers.
*/
- spin_lock(&ctl_lock);
+ system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
- spin_unlock(&ctl_lock);
+ system_ctlreg_unlock();
return 0;
}
@@ -957,7 +910,7 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
- unsigned long cregs[16];
+ struct ctlreg cregs[16];
int cpu;
/* Handle possible pending IPIs */
@@ -969,11 +922,11 @@ int __cpu_disable(void)
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
- __ctl_store(cregs, 0, 15);
- cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
- cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
- cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
- __ctl_load(cregs, 0, 15);
+ __local_ctl_store(0, 15, cregs);
+ cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
+ cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
+ cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
+ __local_ctl_load(0, 15, cregs);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
@@ -1013,12 +966,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- /* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
- /* request the 0x1202 external call external interrupt */
+ system_ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
+ system_ctl_set_bit(0, 13);
}
void __init smp_prepare_boot_cpu(void)
@@ -1076,11 +1029,9 @@ static ssize_t cpu_configure_store(struct device *dev,
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
rc = -EBUSY;
- /* disallow configuration changes of online cpus and cpu 0 */
+ /* disallow configuration changes of online cpus */
cpu = dev->id;
cpu = smp_get_base_cpu(cpu);
- if (cpu == 0)
- goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_online(cpu + i))
goto out;
@@ -1180,7 +1131,7 @@ static int smp_add_present_cpu(int cpu)
return -ENOMEM;
per_cpu(cpu_device, cpu) = c;
s = &c->dev;
- c->hotpluggable = 1;
+ c->hotpluggable = !!cpu;
rc = register_cpu(c, cpu);
if (rc)
goto out;
@@ -1258,60 +1209,3 @@ out:
return rc;
}
subsys_initcall(s390_smp_init);
-
-static __always_inline void set_new_lowcore(struct lowcore *lc)
-{
- union register_pair dst, src;
- u32 pfx;
-
- src.even = (unsigned long) &S390_lowcore;
- src.odd = sizeof(S390_lowcore);
- dst.even = (unsigned long) lc;
- dst.odd = sizeof(*lc);
- pfx = __pa(lc);
-
- asm volatile(
- " mvcl %[dst],%[src]\n"
- " spx %[pfx]\n"
- : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
- : [pfx] "Q" (pfx)
- : "memory", "cc");
-}
-
-int __init smp_reinit_ipl_cpu(void)
-{
- unsigned long async_stack, nodat_stack, mcck_stack;
- struct lowcore *lc, *lc_ipl;
- unsigned long flags, cr0;
- u64 mcesad;
-
- lc_ipl = lowcore_ptr[0];
- lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- async_stack = stack_alloc();
- mcck_stack = stack_alloc();
- if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
- panic("Couldn't allocate memory");
-
- local_irq_save(flags);
- local_mcck_disable();
- set_new_lowcore(lc);
- S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
- S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
- S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
- __ctl_store(cr0, 0, 0);
- __ctl_clear_bit(0, 28); /* disable lowcore protection */
- S390_lowcore.mcesad = mcesad;
- __ctl_load(cr0, 0, 0);
- if (abs_lowcore_map(0, lc, false))
- panic("Couldn't remap absolute lowcore");
- lowcore_ptr[0] = lc;
- local_mcck_enable();
- local_irq_restore(flags);
-
- memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
- memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
- return 0;
-}
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0787010139f7..94f440e38303 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -6,9 +6,12 @@
*/
#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#include <asm/kprobes.h>
+#include <asm/ptrace.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
@@ -58,3 +61,43 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return -EINVAL;
return 0;
}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ struct stack_frame_user __user *sf;
+ unsigned long ip, sp;
+ bool first = true;
+
+ if (is_compat_task())
+ return;
+ if (!consume_entry(cookie, instruction_pointer(regs)))
+ return;
+ sf = (void __user *)user_stack_pointer(regs);
+ pagefault_disable();
+ while (1) {
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ if (ip & 0x1) {
+ /*
+ * If the instruction address is invalid, and this
+ * is the first stack frame, assume r14 has not
+ * been written to the stack yet. Otherwise exit.
+ */
+ if (first && !(regs->gprs[14] & 0x1))
+ ip = regs->gprs[14];
+ else
+ break;
+ }
+ if (!consume_entry(cookie, ip))
+ break;
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (!sp || sp & 0x7)
+ break;
+ sf = (void __user *)sp;
+ first = false;
+ }
+ pagefault_enable();
+}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d34d3548c046..14abad953c02 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -173,10 +173,10 @@ void init_cpu_timer(void)
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
- __ctl_set_bit(0,11);
+ local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SUBMASK_BIT);
/* Always allow the timing alert external interrupt. */
- __ctl_set_bit(0, 4);
+ local_ctl_set_bit(0, CR0_ETR_SUBMASK_BIT);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 23e868b79a6c..caec7db6f966 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -61,16 +61,6 @@ quiet_cmd_vdso32as = VDSO32A $@
quiet_cmd_vdso32cc = VDSO32C $@
cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso32.so: $(obj)/vdso32.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso32.so
-
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index fc1c6ff8178f..e3c9085f8fa7 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -70,16 +70,6 @@ quiet_cmd_vdso64as = VDSO64A $@
quiet_cmd_vdso64cc = VDSO64C $@
cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso64.so: $(obj)/vdso64.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso64.so
-
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ff8349d17b33..5bfcc50c1a68 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -11,7 +11,7 @@
#include <linux/err.h>
#include <linux/pgtable.h>
#include <linux/bitfield.h>
-
+#include <asm/fault.h>
#include <asm/gmap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -466,23 +466,6 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
return 0;
}
-struct trans_exc_code_bits {
- unsigned long addr : 52; /* Translation-exception Address */
- unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 2;
- unsigned long b56 : 1;
- unsigned long : 3;
- unsigned long b60 : 1;
- unsigned long b61 : 1;
- unsigned long as : 2; /* ASCE Identifier */
-};
-
-enum {
- FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
- FSI_STORE = 1, /* Exception was due to store operation */
- FSI_FETCH = 2 /* Exception was due to fetch operation */
-};
-
enum prot_type {
PROT_TYPE_LA = 0,
PROT_TYPE_KEYC = 1,
@@ -497,11 +480,11 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
enum gacc_mode mode, enum prot_type prot, bool terminate)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
- struct trans_exc_code_bits *tec;
+ union teid *teid;
memset(pgm, 0, sizeof(*pgm));
pgm->code = code;
- tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+ teid = (union teid *)&pgm->trans_exc_code;
switch (code) {
case PGM_PROTECTION:
@@ -511,25 +494,25 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
WARN_ON_ONCE(1);
break;
case PROT_TYPE_IEP:
- tec->b61 = 1;
+ teid->b61 = 1;
fallthrough;
case PROT_TYPE_LA:
- tec->b56 = 1;
+ teid->b56 = 1;
break;
case PROT_TYPE_KEYC:
- tec->b60 = 1;
+ teid->b60 = 1;
break;
case PROT_TYPE_ALC:
- tec->b60 = 1;
+ teid->b60 = 1;
fallthrough;
case PROT_TYPE_DAT:
- tec->b61 = 1;
+ teid->b61 = 1;
break;
}
if (terminate) {
- tec->b56 = 0;
- tec->b60 = 0;
- tec->b61 = 0;
+ teid->b56 = 0;
+ teid->b60 = 0;
+ teid->b61 = 0;
}
fallthrough;
case PGM_ASCE_TYPE:
@@ -543,9 +526,9 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
* exc_access_id has to be set to 0 for some instructions. Both
* cases have to be handled by the caller.
*/
- tec->addr = gva >> PAGE_SHIFT;
- tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
- tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
+ teid->addr = gva >> PAGE_SHIFT;
+ teid->fsi = mode == GACC_STORE ? TEID_FSI_STORE : TEID_FSI_FETCH;
+ teid->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
fallthrough;
case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 11676b81e6bf..7aa0e668488f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4927,7 +4927,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
}
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
if (current->thread.gs_cb) {
vcpu->arch.host_gscb = current->thread.gs_cb;
save_gs_cb(vcpu->arch.host_gscb);
@@ -5004,13 +5004,13 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
if (!vcpu->arch.host_gscb)
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
vcpu->arch.host_gscb = NULL;
preempt_enable();
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index dc4cfa8795c0..621a17fd1a1b 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -57,7 +57,7 @@ static int handle_gs(struct kvm_vcpu *vcpu)
if (test_kvm_facility(vcpu->kvm, 133)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
preempt_enable();
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index e4a13d7cab6e..61d8dcd95bbc 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -12,21 +12,22 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/asm-extable.h>
+#include <asm/ctlreg.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
{
- unsigned long cr1, cr7;
+ struct ctlreg cr1, cr7;
- __ctl_store(cr1, 1, 1);
- __ctl_store(cr7, 7, 7);
- if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
+ local_ctl_store(1, &cr1);
+ local_ctl_store(7, &cr7);
+ if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
- "kernel: %016llx user: %016llx\n",
- exit ? "exit" : "entry", cr1, cr7,
- S390_lowcore.kernel_asce, S390_lowcore.user_asce);
+ "kernel: %016lx user: %016lx\n",
+ exit ? "exit" : "entry", cr1.val, cr7.val,
+ S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index b51666967aa1..d37a8f607b71 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -287,7 +287,7 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
- max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
+ max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index fe87291df95d..0a0738a473af 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -61,6 +61,22 @@ static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex,
return true;
}
+static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+ unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned int reg_data = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+ unsigned long data, addr, offset;
+
+ addr = regs->gprs[reg_addr];
+ offset = addr & (sizeof(unsigned long) - 1);
+ addr &= ~(sizeof(unsigned long) - 1);
+ data = *(unsigned long *)addr;
+ data <<= BITS_PER_BYTE * offset;
+ regs->gprs[reg_data] = data;
+ regs->psw.addr = extable_fixup(ex);
+ return true;
+}
+
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@@ -81,6 +97,8 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_ua_load_reg(ex, false, regs);
case EX_TYPE_UA_LOAD_REGPAIR:
return ex_handler_ua_load_reg(ex, true, regs);
+ case EX_TYPE_ZEROPAD:
+ return ex_handler_zeropad(ex, regs);
}
panic("invalid exception table entry");
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index b678295931c3..249aefcf7c4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -3,17 +3,19 @@
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
- * Ulrich Weigand (uweigand@de.ibm.com)
+ * Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
+#include <linux/mmu_context.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -34,38 +36,27 @@
#include <linux/kfence.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
+#include <asm/fault.h>
#include <asm/diag.h>
#include <asm/gmap.h>
#include <asm/irq.h>
-#include <asm/mmu_context.h>
#include <asm/facility.h>
#include <asm/uv.h>
#include "../kernel/entry.h"
-#define __FAIL_ADDR_MASK -4096L
-
-/*
- * Allocate private vm_fault_reason from top. Please make sure it won't
- * collide with vm_fault_reason.
- */
-#define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000)
-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
-#define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000)
-#define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000)
-
enum fault_type {
KERNEL_FAULT,
USER_FAULT,
GMAP_FAULT,
};
-static unsigned long store_indication __read_mostly;
+static DEFINE_STATIC_KEY_FALSE(have_store_indication);
static int __init fault_init(void)
{
if (test_facility(75))
- store_indication = 0xc00;
+ static_branch_enable(&have_store_indication);
return 0;
}
early_initcall(fault_init);
@@ -75,11 +66,9 @@ early_initcall(fault_init);
*/
static enum fault_type get_fault_type(struct pt_regs *regs)
{
- unsigned long trans_exc_code;
+ union teid teid = { .val = regs->int_parm_long };
- trans_exc_code = regs->int_parm_long & 3;
- if (likely(trans_exc_code == 0)) {
- /* primary space exception */
+ if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
if (user_mode(regs))
return USER_FAULT;
if (!IS_ENABLED(CONFIG_PGSTE))
@@ -88,83 +77,77 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
return GMAP_FAULT;
return KERNEL_FAULT;
}
- if (trans_exc_code == 2)
+ if (teid.as == PSW_BITS_AS_SECONDARY)
return USER_FAULT;
- if (trans_exc_code == 1) {
- /* access register mode, not used in the kernel */
+ /* Access register mode, not used in the kernel */
+ if (teid.as == PSW_BITS_AS_ACCREG)
return USER_FAULT;
- }
- /* home space exception -> access via kernel ASCE */
+ /* Home space -> access via kernel ASCE */
return KERNEL_FAULT;
}
static unsigned long get_fault_address(struct pt_regs *regs)
{
- unsigned long trans_exc_code = regs->int_parm_long;
+ union teid teid = { .val = regs->int_parm_long };
- return trans_exc_code & __FAIL_ADDR_MASK;
+ return teid.addr * PAGE_SIZE;
}
-static bool fault_is_write(struct pt_regs *regs)
+static __always_inline bool fault_is_write(struct pt_regs *regs)
{
- unsigned long trans_exc_code = regs->int_parm_long;
+ union teid teid = { .val = regs->int_parm_long };
- return (trans_exc_code & store_indication) == 0x400;
-}
-
-static int bad_address(void *p)
-{
- unsigned long dummy;
-
- return get_kernel_nofault(dummy, (unsigned long *)p);
+ if (static_branch_likely(&have_store_indication))
+ return teid.fsi == TEID_FSI_STORE;
+ return false;
}
static void dump_pagetable(unsigned long asce, unsigned long address)
{
- unsigned long *table = __va(asce & _ASCE_ORIGIN);
+ unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
pr_alert("AS:%016lx ", asce);
switch (asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
- if (bad_address(table))
+ if (get_kernel_nofault(entry, table))
goto bad;
- pr_cont("R1:%016lx ", *table);
- if (*table & _REGION_ENTRY_INVALID)
+ pr_cont("R1:%016lx ", entry);
+ if (entry & _REGION_ENTRY_INVALID)
goto out;
- table = __va(*table & _REGION_ENTRY_ORIGIN);
+ table = __va(entry & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION2:
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
- if (bad_address(table))
+ if (get_kernel_nofault(entry, table))
goto bad;
- pr_cont("R2:%016lx ", *table);
- if (*table & _REGION_ENTRY_INVALID)
+ pr_cont("R2:%016lx ", entry);
+ if (entry & _REGION_ENTRY_INVALID)
goto out;
- table = __va(*table & _REGION_ENTRY_ORIGIN);
+ table = __va(entry & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION3:
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
- if (bad_address(table))
+ if (get_kernel_nofault(entry, table))
goto bad;
- pr_cont("R3:%016lx ", *table);
- if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
+ pr_cont("R3:%016lx ", entry);
+ if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
goto out;
- table = __va(*table & _REGION_ENTRY_ORIGIN);
+ table = __va(entry & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_SEGMENT:
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
- if (bad_address(table))
+ if (get_kernel_nofault(entry, table))
goto bad;
- pr_cont("S:%016lx ", *table);
- if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
+ pr_cont("S:%016lx ", entry);
+ if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out;
- table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
+ table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
}
table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
- if (bad_address(table))
+ if (get_kernel_nofault(entry, table))
goto bad;
- pr_cont("P:%016lx ", *table);
+ pr_cont("P:%016lx ", entry);
out:
pr_cont("\n");
return;
@@ -174,173 +157,113 @@ bad:
static void dump_fault_info(struct pt_regs *regs)
{
+ union teid teid = { .val = regs->int_parm_long };
unsigned long asce;
pr_alert("Failing address: %016lx TEID: %016lx\n",
- regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
+ get_fault_address(regs), teid.val);
pr_alert("Fault in ");
- switch (regs->int_parm_long & 3) {
- case 3:
+ switch (teid.as) {
+ case PSW_BITS_AS_HOME:
pr_cont("home space ");
break;
- case 2:
+ case PSW_BITS_AS_SECONDARY:
pr_cont("secondary space ");
break;
- case 1:
+ case PSW_BITS_AS_ACCREG:
pr_cont("access register ");
break;
- case 0:
+ case PSW_BITS_AS_PRIMARY:
pr_cont("primary space ");
break;
}
pr_cont("mode while using ");
switch (get_fault_type(regs)) {
case USER_FAULT:
- asce = S390_lowcore.user_asce;
+ asce = S390_lowcore.user_asce.val;
pr_cont("user ");
break;
case GMAP_FAULT:
- asce = ((struct gmap *) S390_lowcore.gmap)->asce;
+ asce = ((struct gmap *)S390_lowcore.gmap)->asce;
pr_cont("gmap ");
break;
case KERNEL_FAULT:
- asce = S390_lowcore.kernel_asce;
+ asce = S390_lowcore.kernel_asce.val;
pr_cont("kernel ");
break;
default:
unreachable();
}
pr_cont("ASCE.\n");
- dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
+ dump_pagetable(asce, get_fault_address(regs));
}
int show_unhandled_signals = 1;
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
- if (!printk_ratelimit())
+ if (!__ratelimit(&rs))
return;
- printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
- regs->int_code & 0xffff, regs->int_code >> 17);
+ pr_alert("User process fault: interruption code %04x ilc:%d ",
+ regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
if (is_mm_fault)
dump_fault_info(regs);
show_regs(regs);
}
-/*
- * Send SIGSEGV to task. This is an external routine
- * to keep the stack usage of do_page_fault small.
- */
-static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
+static void do_sigsegv(struct pt_regs *regs, int si_code)
{
report_user_fault(regs, SIGSEGV, 1);
- force_sig_fault(SIGSEGV, si_code,
- (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
+ force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
}
-static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
+static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
{
enum fault_type fault_type;
unsigned long address;
bool is_write;
+ if (user_mode(regs)) {
+ if (WARN_ON_ONCE(!si_code))
+ si_code = SEGV_MAPERR;
+ return do_sigsegv(regs, si_code);
+ }
if (fixup_exception(regs))
return;
fault_type = get_fault_type(regs);
- if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
+ if (fault_type == KERNEL_FAULT) {
address = get_fault_address(regs);
is_write = fault_is_write(regs);
if (kfence_handle_page_fault(address, is_write, regs))
return;
}
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
if (fault_type == KERNEL_FAULT)
- printk(KERN_ALERT "Unable to handle kernel pointer dereference"
- " in virtual kernel address space\n");
+ pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
else
- printk(KERN_ALERT "Unable to handle kernel paging request"
- " in virtual user address space\n");
+ pr_alert("Unable to handle kernel paging request in virtual user address space\n");
dump_fault_info(regs);
die(regs, "Oops");
}
-static noinline void do_low_address(struct pt_regs *regs)
+static void handle_fault_error(struct pt_regs *regs, int si_code)
{
- /* Low-address protection hit in kernel mode means
- NULL pointer write access in kernel mode. */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- /* Low-address protection hit in user mode 'cannot happen'. */
- die (regs, "Low-address protection");
- }
+ struct mm_struct *mm = current->mm;
- do_no_context(regs, VM_FAULT_BADACCESS);
-}
-
-static noinline void do_sigbus(struct pt_regs *regs)
-{
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- force_sig_fault(SIGBUS, BUS_ADRERR,
- (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
+ mmap_read_unlock(mm);
+ handle_fault_error_nolock(regs, si_code);
}
-static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
+static void do_sigbus(struct pt_regs *regs)
{
- int si_code;
-
- switch (fault) {
- case VM_FAULT_BADACCESS:
- case VM_FAULT_BADMAP:
- /* Bad memory access. Check if it is kernel or user space. */
- if (user_mode(regs)) {
- /* User mode accesses just cause a SIGSEGV */
- si_code = (fault == VM_FAULT_BADMAP) ?
- SEGV_MAPERR : SEGV_ACCERR;
- do_sigsegv(regs, si_code);
- break;
- }
- fallthrough;
- case VM_FAULT_BADCONTEXT:
- case VM_FAULT_PFAULT:
- do_no_context(regs, fault);
- break;
- case VM_FAULT_SIGNAL:
- if (!user_mode(regs))
- do_no_context(regs, fault);
- break;
- default: /* fault & VM_FAULT_ERROR */
- if (fault & VM_FAULT_OOM) {
- if (!user_mode(regs))
- do_no_context(regs, fault);
- else
- pagefault_out_of_memory();
- } else if (fault & VM_FAULT_SIGSEGV) {
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- do_no_context(regs, fault);
- else
- do_sigsegv(regs, SEGV_MAPERR);
- } else if (fault & VM_FAULT_SIGBUS) {
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- do_no_context(regs, fault);
- else
- do_sigbus(regs);
- } else
- BUG();
- break;
- }
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
}
/*
@@ -349,20 +272,20 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
* routines.
*
* interruption code (int_code):
- * 04 Protection -> Write-Protection (suppression)
- * 10 Segment translation -> Not present (nullification)
- * 11 Page translation -> Not present (nullification)
- * 3b Region third trans. -> Not present (nullification)
+ * 04 Protection -> Write-Protection (suppression)
+ * 10 Segment translation -> Not present (nullification)
+ * 11 Page translation -> Not present (nullification)
+ * 3b Region third trans. -> Not present (nullification)
*/
-static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+static void do_exception(struct pt_regs *regs, int access)
{
- struct gmap *gmap;
+ struct vm_area_struct *vma;
struct task_struct *tsk;
+ unsigned long address;
struct mm_struct *mm;
- struct vm_area_struct *vma;
enum fault_type type;
- unsigned long address;
unsigned int flags;
+ struct gmap *gmap;
vm_fault_t fault;
bool is_write;
@@ -372,31 +295,21 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_thread_flag(TIF_PER_TRAP);
-
if (kprobe_page_fault(regs, 14))
- return 0;
-
+ return;
mm = tsk->mm;
address = get_fault_address(regs);
is_write = fault_is_write(regs);
-
- /*
- * Verify that the fault happened in user space, that
- * we are not in an interrupt and that there is a
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
type = get_fault_type(regs);
switch (type) {
case KERNEL_FAULT:
- goto out;
+ return handle_fault_error_nolock(regs, 0);
case USER_FAULT:
case GMAP_FAULT:
if (faulthandler_disabled() || !mm)
- goto out;
+ return handle_fault_error_nolock(regs, 0);
break;
}
-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
@@ -419,125 +332,117 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
- if (likely(!(fault & VM_FAULT_ERROR)))
- fault = 0;
- goto out;
+ if (unlikely(fault & VM_FAULT_ERROR))
+ goto error;
+ return;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
- fault = VM_FAULT_SIGNAL;
- goto out;
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ return;
}
lock_mmap:
mmap_read_lock(mm);
-
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
- gmap = (struct gmap *) S390_lowcore.gmap;
+ gmap = (struct gmap *)S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
address = __gmap_translate(gmap, address);
- if (address == -EFAULT) {
- fault = VM_FAULT_BADMAP;
- goto out_up;
- }
+ if (address == -EFAULT)
+ return handle_fault_error(regs, SEGV_MAPERR);
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
-
retry:
- fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
- goto out_up;
-
+ return handle_fault_error(regs, SEGV_MAPERR);
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
- goto out_up;
+ return handle_fault_error(regs, SEGV_MAPERR);
vma = expand_stack(mm, address);
if (!vma)
- goto out;
+ return handle_fault_error_nolock(regs, SEGV_MAPERR);
}
-
- /*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
- fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
- goto out_up;
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
+ return handle_fault_error(regs, SEGV_ACCERR);
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
- fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_up;
- goto out;
+ mmap_read_unlock(mm);
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ return;
}
-
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED) {
if (gmap) {
mmap_read_lock(mm);
- goto out_gmap;
+ goto gmap;
}
- fault = 0;
- goto out;
+ return;
+ }
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mmap_read_unlock(mm);
+ goto error;
}
-
- if (unlikely(fault & VM_FAULT_ERROR))
- goto out_up;
-
if (fault & VM_FAULT_RETRY) {
- if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
- (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/*
- * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
- * not been released
+ * FAULT_FLAG_RETRY_NOWAIT has been set,
+ * mmap_lock has not been released
*/
current->thread.gmap_pfault = 1;
- fault = VM_FAULT_PFAULT;
- goto out_up;
+ return handle_fault_error(regs, 0);
}
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
flags |= FAULT_FLAG_TRIED;
mmap_read_lock(mm);
goto retry;
}
-out_gmap:
+gmap:
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
- if (address == -EFAULT) {
- fault = VM_FAULT_BADMAP;
- goto out_up;
- }
+ if (address == -EFAULT)
+ return handle_fault_error(regs, SEGV_MAPERR);
if (address == -ENOMEM) {
fault = VM_FAULT_OOM;
- goto out_up;
+ mmap_read_unlock(mm);
+ goto error;
}
}
- fault = 0;
-out_up:
mmap_read_unlock(mm);
-out:
- return fault;
+ return;
+error:
+ if (fault & VM_FAULT_OOM) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
+ } else if (fault & VM_FAULT_SIGBUS) {
+ if (!user_mode(regs))
+ handle_fault_error_nolock(regs, 0);
+ else
+ do_sigbus(regs);
+ } else {
+ BUG();
+ }
}
void do_protection_exception(struct pt_regs *regs)
{
- unsigned long trans_exc_code;
- int access;
- vm_fault_t fault;
+ union teid teid = { .val = regs->int_parm_long };
- trans_exc_code = regs->int_parm_long;
/*
* Protection exceptions are suppressing, decrement psw address.
* The exception to this rule are aborted transactions, for these
@@ -550,33 +455,28 @@ void do_protection_exception(struct pt_regs *regs)
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
- if (unlikely(!(trans_exc_code & 4))) {
- do_low_address(regs);
- return;
+ if (unlikely(!teid.b61)) {
+ if (user_mode(regs)) {
+ /* Low-address protection in user mode: cannot happen */
+ die(regs, "Low-address protection");
+ }
+ /*
+ * Low-address protection in kernel mode means
+ * NULL pointer write access in kernel mode.
+ */
+ return handle_fault_error_nolock(regs, 0);
}
- if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
- regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
- (regs->psw.addr & PAGE_MASK);
- access = VM_EXEC;
- fault = VM_FAULT_BADACCESS;
- } else {
- access = VM_WRITE;
- fault = do_exception(regs, access);
+ if (unlikely(MACHINE_HAS_NX && teid.b56)) {
+ regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
+ return handle_fault_error_nolock(regs, SEGV_ACCERR);
}
- if (unlikely(fault))
- do_fault_error(regs, fault);
+ do_exception(regs, VM_WRITE);
}
NOKPROBE_SYMBOL(do_protection_exception);
void do_dat_exception(struct pt_regs *regs)
{
- int access;
- vm_fault_t fault;
-
- access = VM_ACCESS_FLAGS;
- fault = do_exception(regs, access);
- if (unlikely(fault))
- do_fault_error(regs, fault);
+ do_exception(regs, VM_ACCESS_FLAGS);
}
NOKPROBE_SYMBOL(do_dat_exception);
@@ -584,7 +484,8 @@ NOKPROBE_SYMBOL(do_dat_exception);
void do_secure_storage_access(struct pt_regs *regs)
{
- unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
+ union teid teid = { .val = regs->int_parm_long };
+ unsigned long addr = get_fault_address(regs);
struct vm_area_struct *vma;
struct mm_struct *mm;
struct page *page;
@@ -592,14 +493,12 @@ void do_secure_storage_access(struct pt_regs *regs)
int rc;
/*
- * bit 61 tells us if the address is valid, if it's not we
- * have a major problem and should stop the kernel or send a
- * SIGSEGV to the process. Unfortunately bit 61 is not
- * reliable without the misc UV feature so we need to check
- * for that as well.
+ * Bit 61 indicates if the address is valid, if it is not the
+ * kernel should be stopped or SIGSEGV should be sent to the
+ * process. Bit 61 is not reliable without the misc UV feature,
+ * therefore this needs to be checked too.
*/
- if (uv_has_feature(BIT_UV_FEAT_MISC) &&
- !test_bit_inv(61, &regs->int_parm_long)) {
+ if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
/*
* When this happens, userspace did something that it
* was not supposed to do, e.g. branching into secure
@@ -609,14 +508,12 @@ void do_secure_storage_access(struct pt_regs *regs)
send_sig(SIGSEGV, current, 0);
return;
}
-
/*
- * The kernel should never run into this case and we
- * have no way out of this situation.
+ * The kernel should never run into this case and
+ * there is no way out of this situation.
*/
panic("Unexpected PGM 0x3d with TEID bit 61=0");
}
-
switch (get_fault_type(regs)) {
case GMAP_FAULT:
mm = current->mm;
@@ -624,20 +521,15 @@ void do_secure_storage_access(struct pt_regs *regs)
mmap_read_lock(mm);
addr = __gmap_translate(gmap, addr);
mmap_read_unlock(mm);
- if (IS_ERR_VALUE(addr)) {
- do_fault_error(regs, VM_FAULT_BADMAP);
- break;
- }
+ if (IS_ERR_VALUE(addr))
+ return handle_fault_error_nolock(regs, SEGV_MAPERR);
fallthrough;
case USER_FAULT:
mm = current->mm;
mmap_read_lock(mm);
vma = find_vma(mm, addr);
- if (!vma) {
- mmap_read_unlock(mm);
- do_fault_error(regs, VM_FAULT_BADMAP);
- break;
- }
+ if (!vma)
+ return handle_fault_error(regs, SEGV_MAPERR);
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
mmap_read_unlock(mm);
@@ -658,23 +550,18 @@ void do_secure_storage_access(struct pt_regs *regs)
BUG();
break;
default:
- do_fault_error(regs, VM_FAULT_BADMAP);
- WARN_ON_ONCE(1);
+ unreachable();
}
}
NOKPROBE_SYMBOL(do_secure_storage_access);
void do_non_secure_storage_access(struct pt_regs *regs)
{
- unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ unsigned long gaddr = get_fault_address(regs);
- if (get_fault_type(regs) != GMAP_FAULT) {
- do_fault_error(regs, VM_FAULT_BADMAP);
- WARN_ON_ONCE(1);
- return;
- }
-
+ if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
+ return handle_fault_error_nolock(regs, SEGV_MAPERR);
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
send_sig(SIGSEGV, current, 0);
}
@@ -682,8 +569,8 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access);
void do_secure_storage_violation(struct pt_regs *regs)
{
- unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ unsigned long gaddr = get_fault_address(regs);
/*
* If the VM has been rebooted, its address space might still contain
@@ -699,9 +586,8 @@ void do_secure_storage_violation(struct pt_regs *regs)
* This exception is only triggered when a guest 2 is running
* and can therefore never occur in kernel context.
*/
- printk_ratelimited(KERN_WARNING
- "Secure storage violation in task: %s, pid %d\n",
- current->comm, current->pid);
+ pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
+ current->comm, current->pid);
send_sig(SIGSEGV, current, 0);
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 906a7bfc2a78..6f96b5a71c63 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -18,13 +18,25 @@
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/pgtable.h>
-
+#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
+#include <asm/page.h>
#include <asm/tlb.h>
#define GMAP_SHADOW_FAKE_TABLE 1ULL
+static struct page *gmap_alloc_crst(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ if (!page)
+ return NULL;
+ __arch_set_page_dat(page_to_virt(page), 1UL << CRST_ALLOC_ORDER);
+ return page;
+}
+
/**
* gmap_alloc - allocate and initialize a guest address space
* @limit: maximum address of the gmap address space
@@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
refcount_set(&gmap->ref_count, 1);
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
goto out_free;
page->index = 0;
@@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
new = page_to_virt(page);
@@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN;
@@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN;
@@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN;
@@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL;
- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8b94d2212d33..43e612bc2bcd 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
+#include <asm/ctlreg.h>
#include <asm/kfence.h>
#include <asm/ptdump.h>
#include <asm/dma.h>
@@ -42,7 +43,6 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/set_memory.h>
#include <asm/kasan.h>
@@ -54,7 +54,7 @@
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
-unsigned long __bootdata_preserved(s390_invalid_asce);
+struct ctlreg __bootdata_preserved(s390_invalid_asce);
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
@@ -164,14 +164,10 @@ void __init mem_init(void)
pv_init();
kfence_split_mapping();
- /* Setup guest page hinting */
- cmma_init();
/* this will put all low memory onto the freelists */
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
-
- cmma_init_nodat();
}
void free_initmem(void)
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index c805b3e2592b..632c3a55feed 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -15,10 +15,10 @@
#include <linux/uio.h>
#include <linux/io.h>
#include <asm/asm-extable.h>
-#include <asm/ctl_reg.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
#include <asm/maccess.h>
+#include <asm/ctlreg.h>
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 1e2ea706aa22..01f9b39e65f5 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -7,199 +7,18 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/memblock.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <asm/asm-extable.h>
-#include <asm/facility.h>
#include <asm/page-states.h>
+#include <asm/sections.h>
+#include <asm/page.h>
-static int cmma_flag = 1;
-
-static int __init cmma(char *str)
-{
- bool enabled;
-
- if (!kstrtobool(str, &enabled))
- cmma_flag = enabled;
- return 1;
-}
-__setup("cmma=", cmma);
-
-static inline int cmma_test_essa(void)
-{
- unsigned long tmp = 0;
- int rc = -EOPNOTSUPP;
-
- /* test ESSA_GET_STATE */
- asm volatile(
- " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
- "0: la %[rc],0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
- : [cmd] "i" (ESSA_GET_STATE));
- return rc;
-}
-
-void __init cmma_init(void)
-{
- if (!cmma_flag)
- return;
- if (cmma_test_essa()) {
- cmma_flag = 0;
- return;
- }
- if (test_facility(147))
- cmma_flag = 2;
-}
-
-static inline void set_page_unused(struct page *page, int order)
-{
- int i, rc;
-
- for (i = 0; i < (1 << order); i++)
- asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
- : "=&d" (rc)
- : "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_UNUSED));
-}
-
-static inline void set_page_stable_dat(struct page *page, int order)
-{
- int i, rc;
-
- for (i = 0; i < (1 << order); i++)
- asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
- : "=&d" (rc)
- : "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_STABLE));
-}
-
-static inline void set_page_stable_nodat(struct page *page, int order)
-{
- int i, rc;
-
- for (i = 0; i < (1 << order); i++)
- asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
- : "=&d" (rc)
- : "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_STABLE_NODAT));
-}
-
-static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
-{
- unsigned long next;
- struct page *page;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd) || pmd_large(*pmd))
- continue;
- page = phys_to_page(pmd_val(*pmd));
- set_bit(PG_arch_1, &page->flags);
- } while (pmd++, addr = next, addr != end);
-}
-
-static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
-{
- unsigned long next;
- struct page *page;
- pud_t *pud;
- int i;
-
- pud = pud_offset(p4d, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none(*pud) || pud_large(*pud))
- continue;
- if (!pud_folded(*pud)) {
- page = phys_to_page(pud_val(*pud));
- for (i = 0; i < 3; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_pmd(pud, addr, next);
- } while (pud++, addr = next, addr != end);
-}
-
-static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
- unsigned long next;
- struct page *page;
- p4d_t *p4d;
- int i;
-
- p4d = p4d_offset(pgd, addr);
- do {
- next = p4d_addr_end(addr, end);
- if (p4d_none(*p4d))
- continue;
- if (!p4d_folded(*p4d)) {
- page = phys_to_page(p4d_val(*p4d));
- for (i = 0; i < 3; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_pud(p4d, addr, next);
- } while (p4d++, addr = next, addr != end);
-}
-
-static void mark_kernel_pgd(void)
-{
- unsigned long addr, next;
- struct page *page;
- pgd_t *pgd;
- int i;
-
- addr = 0;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, MODULES_END);
- if (pgd_none(*pgd))
- continue;
- if (!pgd_folded(*pgd)) {
- page = phys_to_page(pgd_val(*pgd));
- for (i = 0; i < 3; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_p4d(pgd, addr, next);
- } while (pgd++, addr = next, addr != MODULES_END);
-}
-
-void __init cmma_init_nodat(void)
-{
- struct page *page;
- unsigned long start, end, ix;
- int i;
-
- if (cmma_flag < 2)
- return;
- /* Mark pages used in kernel page tables */
- mark_kernel_pgd();
-
- /* Set all kernel pages not used for page tables to stable/no-dat */
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
- page = pfn_to_page(start);
- for (ix = start; ix < end; ix++, page++) {
- if (__test_and_clear_bit(PG_arch_1, &page->flags))
- continue; /* skip page table pages */
- if (!list_empty(&page->lru))
- continue; /* skip free pages */
- set_page_stable_nodat(page, 0);
- }
- }
-}
+int __bootdata_preserved(cmma_flag);
void arch_free_page(struct page *page, int order)
{
if (!cmma_flag)
return;
- set_page_unused(page, order);
+ __set_page_unused(page_to_virt(page), 1UL << order);
}
void arch_alloc_page(struct page *page, int order)
@@ -207,14 +26,7 @@ void arch_alloc_page(struct page *page, int order)
if (!cmma_flag)
return;
if (cmma_flag < 2)
- set_page_stable_dat(page, order);
+ __set_page_stable_dat(page_to_virt(page), 1UL << order);
else
- set_page_stable_nodat(page, order);
-}
-
-void arch_set_page_dat(struct page *page, int order)
-{
- if (!cmma_flag)
- return;
- set_page_stable_dat(page, order);
+ __set_page_stable_nodat(page_to_virt(page), 1UL << order);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index b87e96c64b61..631e3a4ee2de 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
break;
}
table = (unsigned long *)((unsigned long)old & mask);
- crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
+ crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
} else if (MACHINE_HAS_IDTE) {
cspg(old, *old, new);
} else {
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 75e1039f2ec5..008e487c94a6 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
+#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
@@ -43,11 +44,13 @@ __initcall(page_table_register_sysctl);
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
+ unsigned long *table;
if (!ptdesc)
return NULL;
- arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
- return (unsigned long *) ptdesc_to_virt(ptdesc);
+ table = ptdesc_to_virt(ptdesc);
+ __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
+ return table;
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
@@ -61,8 +64,8 @@ static void __crst_table_upgrade(void *arg)
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
- S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ S390_lowcore.user_asce.val = mm->context.asce;
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
__tlb_flush_local();
}
@@ -130,11 +133,6 @@ err_p4d:
return -ENOMEM;
}
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
-{
- return atomic_fetch_xor(bits, v) ^ bits;
-}
-
#ifdef CONFIG_PGSTE
struct page *page_table_alloc_pgste(struct mm_struct *mm)
@@ -145,6 +143,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (ptdesc) {
table = (u64 *)ptdesc_to_virt(ptdesc);
+ __arch_set_page_dat(table, 1);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
@@ -158,125 +157,11 @@ void page_table_free_pgste(struct page *page)
#endif /* CONFIG_PGSTE */
-/*
- * A 2KB-pgtable is either upper or lower half of a normal page.
- * The second half of the page may be unused or used as another
- * 2KB-pgtable.
- *
- * Whenever possible the parent page for a new 2KB-pgtable is picked
- * from the list of partially allocated pages mm_context_t::pgtable_list.
- * In case the list is empty a new parent page is allocated and added to
- * the list.
- *
- * When a parent page gets fully allocated it contains 2KB-pgtables in both
- * upper and lower halves and is removed from mm_context_t::pgtable_list.
- *
- * When 2KB-pgtable is freed from to fully allocated parent page that
- * page turns partially allocated and added to mm_context_t::pgtable_list.
- *
- * If 2KB-pgtable is freed from the partially allocated parent page that
- * page turns unused and gets removed from mm_context_t::pgtable_list.
- * Furthermore, the unused parent page is released.
- *
- * As follows from the above, no unallocated or fully allocated parent
- * pages are contained in mm_context_t::pgtable_list.
- *
- * The upper byte (bits 24-31) of the parent page _refcount is used
- * for tracking contained 2KB-pgtables and has the following format:
- *
- * PP AA
- * 01234567 upper byte (bits 24-31) of struct page::_refcount
- * || ||
- * || |+--- upper 2KB-pgtable is allocated
- * || +---- lower 2KB-pgtable is allocated
- * |+------- upper 2KB-pgtable is pending for removal
- * +-------- lower 2KB-pgtable is pending for removal
- *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
- * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
- * The parent page is either:
- * - added to mm_context_t::pgtable_list in case the second half of the
- * parent page is still unallocated;
- * - removed from mm_context_t::pgtable_list in case both hales of the
- * parent page are allocated;
- * These operations are protected with mm_context_t::lock.
- *
- * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
- * and the corresponding PP bit is set to 1 in a single atomic operation.
- * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
- * exclusive and may never be both set to 1!
- * The parent page is either:
- * - added to mm_context_t::pgtable_list in case the second half of the
- * parent page is still allocated;
- * - removed from mm_context_t::pgtable_list in case the second half of
- * the parent page is unallocated;
- * These operations are protected with mm_context_t::lock.
- *
- * It is important to understand that mm_context_t::lock only protects
- * mm_context_t::pgtable_list and AA bits, but not the parent page itself
- * and PP bits.
- *
- * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
- * while both AA bits and the second PP bit are already unset. Then the
- * parent page does not contain any 2KB-pgtable fragment anymore, and it has
- * also been removed from mm_context_t::pgtable_list. It is safe to release
- * the page therefore.
- *
- * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
- * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
- * while the PP bits are never used, nor such a page is added to or removed
- * from mm_context_t::pgtable_list.
- *
- * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
- * and prevents both 2K fragments from being reused. pte_free_defer() has to
- * guarantee that its pgtable cannot be reused before the RCU grace period
- * has elapsed (which page_table_free_rcu() does not actually guarantee).
- * But for simplicity, because page->rcu_head overlays page->lru, and because
- * the RCU callback might not be called before the mm_context_t has been freed,
- * pte_free_defer() in this implementation prevents both fragments from being
- * reused, and delays making the call to RCU until both fragments are freed.
- */
unsigned long *page_table_alloc(struct mm_struct *mm)
{
- unsigned long *table;
struct ptdesc *ptdesc;
- unsigned int mask, bit;
-
- /* Try to get a fragment of a 4K page as a 2K page table */
- if (!mm_alloc_pgste(mm)) {
- table = NULL;
- spin_lock_bh(&mm->context.lock);
- if (!list_empty(&mm->context.pgtable_list)) {
- ptdesc = list_first_entry(&mm->context.pgtable_list,
- struct ptdesc, pt_list);
- mask = atomic_read(&ptdesc->_refcount) >> 24;
- /*
- * The pending removal bits must also be checked.
- * Failure to do so might lead to an impossible
- * value of (i.e 0x13 or 0x23) written to _refcount.
- * Such values violate the assumption that pending and
- * allocation bits are mutually exclusive, and the rest
- * of the code unrails as result. That could lead to
- * a whole bunch of races and corruptions.
- */
- mask = (mask | (mask >> 4)) & 0x03U;
- if (mask != 0x03U) {
- table = (unsigned long *) ptdesc_to_virt(ptdesc);
- bit = mask & 1; /* =1 -> second 2K */
- if (bit)
- table += PTRS_PER_PTE;
- atomic_xor_bits(&ptdesc->_refcount,
- 0x01U << (bit + 24));
- list_del_init(&ptdesc->pt_list);
- }
- }
- spin_unlock_bh(&mm->context.lock);
- if (table)
- return table;
- }
- /* Allocate a fresh page */
+ unsigned long *table;
+
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (!ptdesc)
return NULL;
@@ -284,177 +169,57 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
pagetable_free(ptdesc);
return NULL;
}
- arch_set_page_dat(ptdesc_page(ptdesc), 0);
- /* Initialize page table */
- table = (unsigned long *) ptdesc_to_virt(ptdesc);
- if (mm_alloc_pgste(mm)) {
- /* Return 4K page table with PGSTEs */
- INIT_LIST_HEAD(&ptdesc->pt_list);
- atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
- memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
- memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
- } else {
- /* Return the first 2K fragment of the page */
- atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
- memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
- spin_lock_bh(&mm->context.lock);
- list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
- spin_unlock_bh(&mm->context.lock);
- }
+ table = ptdesc_to_virt(ptdesc);
+ __arch_set_page_dat(table, 1);
+ /* pt_list is used by gmap only */
+ INIT_LIST_HEAD(&ptdesc->pt_list);
+ memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
return table;
}
-static void page_table_release_check(struct page *page, void *table,
- unsigned int half, unsigned int mask)
+static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
{
- char msg[128];
-
- if (!IS_ENABLED(CONFIG_DEBUG_VM))
- return;
- if (!mask && list_empty(&page->lru))
- return;
- snprintf(msg, sizeof(msg),
- "Invalid pgtable %p release half 0x%02x mask 0x%02x",
- table, half, mask);
- dump_page(page, msg);
-}
-
-static void pte_free_now(struct rcu_head *head)
-{
- struct ptdesc *ptdesc;
-
- ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
pagetable_pte_dtor(ptdesc);
pagetable_free(ptdesc);
}
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
- unsigned int mask, bit, half;
struct ptdesc *ptdesc = virt_to_ptdesc(table);
- if (!mm_alloc_pgste(mm)) {
- /* Free 2K page table fragment of a 4K page */
- bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
- spin_lock_bh(&mm->context.lock);
- /*
- * Mark the page for delayed release. The actual release
- * will happen outside of the critical section from this
- * function or from __tlb_remove_table()
- */
- mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
- mask >>= 24;
- if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
- /*
- * Other half is allocated, and neither half has had
- * its free deferred: add page to head of list, to make
- * this freed half available for immediate reuse.
- */
- list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
- } else {
- /* If page is on list, now remove it. */
- list_del_init(&ptdesc->pt_list);
- }
- spin_unlock_bh(&mm->context.lock);
- mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
- mask >>= 24;
- if (mask != 0x00U)
- return;
- half = 0x01U << bit;
- } else {
- half = 0x03U;
- mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
- mask >>= 24;
- }
-
- page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
- if (folio_test_clear_active(ptdesc_folio(ptdesc)))
- call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
- else
- pte_free_now(&ptdesc->pt_rcu_head);
+ pagetable_pte_dtor_free(ptdesc);
}
-void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
- unsigned long vmaddr)
+void __tlb_remove_table(void *table)
{
- struct mm_struct *mm;
- unsigned int bit, mask;
struct ptdesc *ptdesc = virt_to_ptdesc(table);
+ struct page *page = ptdesc_page(ptdesc);
- mm = tlb->mm;
- if (mm_alloc_pgste(mm)) {
- gmap_unlink(mm, table, vmaddr);
- table = (unsigned long *) ((unsigned long)table | 0x03U);
- tlb_remove_ptdesc(tlb, table);
+ if (compound_order(page) == CRST_ALLOC_ORDER) {
+ /* pmd, pud, or p4d */
+ pagetable_free(ptdesc);
return;
}
- bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
- spin_lock_bh(&mm->context.lock);
- /*
- * Mark the page for delayed release. The actual release will happen
- * outside of the critical section from __tlb_remove_table() or from
- * page_table_free()
- */
- mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
- mask >>= 24;
- if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
- /*
- * Other half is allocated, and neither half has had
- * its free deferred: add page to end of list, to make
- * this freed half available for reuse once its pending
- * bit has been cleared by __tlb_remove_table().
- */
- list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
- } else {
- /* If page is on list, now remove it. */
- list_del_init(&ptdesc->pt_list);
- }
- spin_unlock_bh(&mm->context.lock);
- table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
- tlb_remove_table(tlb, table);
+ pagetable_pte_dtor_free(ptdesc);
}
-void __tlb_remove_table(void *_table)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void pte_free_now(struct rcu_head *head)
{
- unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
- void *table = (void *)((unsigned long) _table ^ mask);
- struct ptdesc *ptdesc = virt_to_ptdesc(table);
-
- switch (half) {
- case 0x00U: /* pmd, pud, or p4d */
- pagetable_free(ptdesc);
- return;
- case 0x01U: /* lower 2K of a 4K page table */
- case 0x02U: /* higher 2K of a 4K page table */
- mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
- mask >>= 24;
- if (mask != 0x00U)
- return;
- break;
- case 0x03U: /* 4K page table with pgstes */
- mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
- mask >>= 24;
- break;
- }
+ struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
- page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
- if (folio_test_clear_active(ptdesc_folio(ptdesc)))
- call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
- else
- pte_free_now(&ptdesc->pt_rcu_head);
+ pagetable_pte_dtor_free(ptdesc);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
{
- struct page *page;
+ struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- page = virt_to_page(pgtable);
- SetPageActive(page);
- page_table_free(mm, (unsigned long *)pgtable);
+ call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
/*
- * page_table_free() does not do the pgste gmap_unlink() which
- * page_table_free_rcu() does: warn us if pgste ever reaches here.
+ * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
+ * Turn to the generic pte_free_defer() version once gmap is removed.
*/
WARN_ON_ONCE(mm_has_pgste(mm));
}
@@ -487,11 +252,10 @@ static unsigned long *base_crst_alloc(unsigned long val)
unsigned long *table;
struct ptdesc *ptdesc;
- ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
+ ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!ptdesc)
return NULL;
table = ptdesc_address(ptdesc);
-
crst_table_init(table, val);
return table;
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6957d2ed97bf..186a020857cf 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -12,8 +12,10 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include <asm/page-states.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
+#include <asm/ctlreg.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
@@ -45,8 +47,10 @@ void *vmem_crst_alloc(unsigned long val)
unsigned long *table;
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
+ if (!table)
+ return NULL;
+ crst_table_init(table, val);
+ __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
}
@@ -62,6 +66,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+ __arch_set_page_dat(pte, 1);
return pte;
}
@@ -498,6 +503,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return ret;
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
@@ -506,6 +513,8 @@ void vmemmap_free(unsigned long start, unsigned long end,
mutex_unlock(&vmem_mutex);
}
+#endif
+
void vmem_remove_mapping(unsigned long start, unsigned long size)
{
mutex_lock(&vmem_mutex);
@@ -659,7 +668,7 @@ void __init vmem_map_init(void)
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
- ctl_set_bit(0, 20);
+ system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 5ae31ca9dd44..0547a10406e7 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,7 +3,7 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index d34d5813d006..676ac74026a8 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -124,7 +124,11 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
WARN_ON_ONCE(iota & 0x3fff);
fib.pba = base;
- fib.pal = limit;
+ /* Work around off by one in ISM virt device */
+ if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
+ fib.pal = limit + (1 << 12);
+ else
+ fib.pal = limit;
fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, status);
@@ -153,6 +157,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
int zpci_fmb_enable_device(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+ struct zpci_iommu_ctrs *ctrs;
struct zpci_fib fib = {0};
u8 cc, status;
@@ -165,9 +170,15 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
WARN_ON((u64) zdev->fmb & 0xf);
/* reset software counters */
- atomic64_set(&zdev->allocated_pages, 0);
- atomic64_set(&zdev->mapped_pages, 0);
- atomic64_set(&zdev->unmapped_pages, 0);
+ ctrs = zpci_get_iommu_ctrs(zdev);
+ if (ctrs) {
+ atomic64_set(&ctrs->mapped_pages, 0);
+ atomic64_set(&ctrs->unmapped_pages, 0);
+ atomic64_set(&ctrs->global_rpcits, 0);
+ atomic64_set(&ctrs->sync_map_rpcits, 0);
+ atomic64_set(&ctrs->sync_rpcits, 0);
+ }
+
fib.fmb_addr = virt_to_phys(zdev->fmb);
fib.gd = zdev->gisa;
@@ -582,7 +593,6 @@ int pcibios_device_add(struct pci_dev *pdev)
pdev->no_vf_scan = 1;
pdev->dev.groups = zpci_attr_groups;
- pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -756,8 +766,6 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
if (zdev->dma_table)
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status);
- else
- rc = zpci_dma_init_device(zdev);
if (rc) {
zpci_disable_device(zdev);
return rc;
@@ -865,11 +873,6 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false);
- if (zdev->dma_table) {
- rc = zpci_dma_exit_device(zdev);
- if (rc)
- return rc;
- }
if (zdev_enabled(zdev)) {
rc = zpci_disable_device(zdev);
if (rc)
@@ -918,8 +921,6 @@ void zpci_release_device(struct kref *kref)
if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false);
- if (zdev->dma_table)
- zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev))
zpci_disable_device(zdev);
@@ -1094,7 +1095,7 @@ static int __init pci_base_init(void)
if (MACHINE_HAS_PCI_MIO) {
static_branch_enable(&have_mio);
- ctl_set_bit(2, 5);
+ system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
}
rc = zpci_debug_init();
@@ -1109,10 +1110,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_irq;
- rc = zpci_dma_init();
- if (rc)
- goto out_dma;
-
rc = clp_scan_pci_devices();
if (rc)
goto out_find;
@@ -1122,8 +1119,6 @@ static int __init pci_base_init(void)
return 0;
out_find:
- zpci_dma_exit();
-out_dma:
zpci_irq_exit();
out_irq:
zpci_mem_exit();
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 32245b970a0c..daa5d7450c7d 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -47,11 +47,6 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
rc = zpci_enable_device(zdev);
if (rc)
return rc;
- rc = zpci_dma_init_device(zdev);
- if (rc) {
- zpci_disable_device(zdev);
- return rc;
- }
}
if (!zdev->has_resources) {
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index ca6bd98eec13..6dde2263c79d 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -53,9 +53,11 @@ static char *pci_fmt3_names[] = {
};
static char *pci_sw_names[] = {
- "Allocated pages",
"Mapped pages",
"Unmapped pages",
+ "Global RPCITs",
+ "Sync Map RPCITs",
+ "Sync RPCITs",
};
static void pci_fmb_show(struct seq_file *m, char *name[], int length,
@@ -69,10 +71,14 @@ static void pci_fmb_show(struct seq_file *m, char *name[], int length,
static void pci_sw_counter_show(struct seq_file *m)
{
- struct zpci_dev *zdev = m->private;
- atomic64_t *counter = &zdev->allocated_pages;
+ struct zpci_iommu_ctrs *ctrs = zpci_get_iommu_ctrs(m->private);
+ atomic64_t *counter;
int i;
+ if (!ctrs)
+ return;
+
+ counter = &ctrs->mapped_pages;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
atomic64_read(counter));
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
deleted file mode 100644
index 99209085c75b..000000000000
--- a/arch/s390/pci/pci_dma.c
+++ /dev/null
@@ -1,746 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2012
- *
- * Author(s):
- * Jan Glauber <jang@linux.vnet.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/iommu-helper.h>
-#include <linux/dma-map-ops.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <asm/pci_dma.h>
-
-static struct kmem_cache *dma_region_table_cache;
-static struct kmem_cache *dma_page_table_cache;
-static int s390_iommu_strict;
-static u64 s390_iommu_aperture;
-static u32 s390_iommu_aperture_factor = 1;
-
-static int zpci_refresh_global(struct zpci_dev *zdev)
-{
- return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
- zdev->iommu_pages * PAGE_SIZE);
-}
-
-unsigned long *dma_alloc_cpu_table(gfp_t gfp)
-{
- unsigned long *table, *entry;
-
- table = kmem_cache_alloc(dma_region_table_cache, gfp);
- if (!table)
- return NULL;
-
- for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
- *entry = ZPCI_TABLE_INVALID;
- return table;
-}
-
-static void dma_free_cpu_table(void *table)
-{
- kmem_cache_free(dma_region_table_cache, table);
-}
-
-static unsigned long *dma_alloc_page_table(gfp_t gfp)
-{
- unsigned long *table, *entry;
-
- table = kmem_cache_alloc(dma_page_table_cache, gfp);
- if (!table)
- return NULL;
-
- for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
- *entry = ZPCI_PTE_INVALID;
- return table;
-}
-
-static void dma_free_page_table(void *table)
-{
- kmem_cache_free(dma_page_table_cache, table);
-}
-
-static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
-{
- unsigned long old_rte, rte;
- unsigned long *sto;
-
- rte = READ_ONCE(*rtep);
- if (reg_entry_isvalid(rte)) {
- sto = get_rt_sto(rte);
- } else {
- sto = dma_alloc_cpu_table(gfp);
- if (!sto)
- return NULL;
-
- set_rt_sto(&rte, virt_to_phys(sto));
- validate_rt_entry(&rte);
- entry_clr_protected(&rte);
-
- old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
- if (old_rte != ZPCI_TABLE_INVALID) {
- /* Somone else was faster, use theirs */
- dma_free_cpu_table(sto);
- sto = get_rt_sto(old_rte);
- }
- }
- return sto;
-}
-
-static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
-{
- unsigned long old_ste, ste;
- unsigned long *pto;
-
- ste = READ_ONCE(*step);
- if (reg_entry_isvalid(ste)) {
- pto = get_st_pto(ste);
- } else {
- pto = dma_alloc_page_table(gfp);
- if (!pto)
- return NULL;
- set_st_pto(&ste, virt_to_phys(pto));
- validate_st_entry(&ste);
- entry_clr_protected(&ste);
-
- old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
- if (old_ste != ZPCI_TABLE_INVALID) {
- /* Somone else was faster, use theirs */
- dma_free_page_table(pto);
- pto = get_st_pto(old_ste);
- }
- }
- return pto;
-}
-
-unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
- gfp_t gfp)
-{
- unsigned long *sto, *pto;
- unsigned int rtx, sx, px;
-
- rtx = calc_rtx(dma_addr);
- sto = dma_get_seg_table_origin(&rto[rtx], gfp);
- if (!sto)
- return NULL;
-
- sx = calc_sx(dma_addr);
- pto = dma_get_page_table_origin(&sto[sx], gfp);
- if (!pto)
- return NULL;
-
- px = calc_px(dma_addr);
- return &pto[px];
-}
-
-void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
-{
- unsigned long pte;
-
- pte = READ_ONCE(*ptep);
- if (flags & ZPCI_PTE_INVALID) {
- invalidate_pt_entry(&pte);
- } else {
- set_pt_pfaa(&pte, page_addr);
- validate_pt_entry(&pte);
- }
-
- if (flags & ZPCI_TABLE_PROTECTED)
- entry_set_protected(&pte);
- else
- entry_clr_protected(&pte);
-
- xchg(ptep, pte);
-}
-
-static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
- dma_addr_t dma_addr, size_t size, int flags)
-{
- unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- phys_addr_t page_addr = (pa & PAGE_MASK);
- unsigned long *entry;
- int i, rc = 0;
-
- if (!nr_pages)
- return -EINVAL;
-
- if (!zdev->dma_table)
- return -EINVAL;
-
- for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
- GFP_ATOMIC);
- if (!entry) {
- rc = -ENOMEM;
- goto undo_cpu_trans;
- }
- dma_update_cpu_trans(entry, page_addr, flags);
- page_addr += PAGE_SIZE;
- dma_addr += PAGE_SIZE;
- }
-
-undo_cpu_trans:
- if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
- flags = ZPCI_PTE_INVALID;
- while (i-- > 0) {
- page_addr -= PAGE_SIZE;
- dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
- GFP_ATOMIC);
- if (!entry)
- break;
- dma_update_cpu_trans(entry, page_addr, flags);
- }
- }
- return rc;
-}
-
-static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
- size_t size, int flags)
-{
- unsigned long irqflags;
- int ret;
-
- /*
- * With zdev->tlb_refresh == 0, rpcit is not required to establish new
- * translations when previously invalid translation-table entries are
- * validated. With lazy unmap, rpcit is skipped for previously valid
- * entries, but a global rpcit is then required before any address can
- * be re-used, i.e. after each iommu bitmap wrap-around.
- */
- if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
- if (!zdev->tlb_refresh)
- return 0;
- } else {
- if (!s390_iommu_strict)
- return 0;
- }
-
- ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
- PAGE_ALIGN(size));
- if (ret == -ENOMEM && !s390_iommu_strict) {
- /* enable the hypervisor to free some resources */
- if (zpci_refresh_global(zdev))
- goto out;
-
- spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
- bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
- zdev->lazy_bitmap, zdev->iommu_pages);
- bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
- spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
- ret = 0;
- }
-out:
- return ret;
-}
-
-static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
- dma_addr_t dma_addr, size_t size, int flags)
-{
- int rc;
-
- rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
- if (rc)
- return rc;
-
- rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
- if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
- __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
-
- return rc;
-}
-
-void dma_free_seg_table(unsigned long entry)
-{
- unsigned long *sto = get_rt_sto(entry);
- int sx;
-
- for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
- if (reg_entry_isvalid(sto[sx]))
- dma_free_page_table(get_st_pto(sto[sx]));
-
- dma_free_cpu_table(sto);
-}
-
-void dma_cleanup_tables(unsigned long *table)
-{
- int rtx;
-
- if (!table)
- return;
-
- for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
- if (reg_entry_isvalid(table[rtx]))
- dma_free_seg_table(table[rtx]);
-
- dma_free_cpu_table(table);
-}
-
-static unsigned long __dma_alloc_iommu(struct device *dev,
- unsigned long start, int size)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-
- return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
- start, size, zdev->start_dma >> PAGE_SHIFT,
- dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
- 0);
-}
-
-static dma_addr_t dma_alloc_address(struct device *dev, int size)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long offset, flags;
-
- spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
- offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
- if (offset == -1) {
- if (!s390_iommu_strict) {
- /* global flush before DMA addresses are reused */
- if (zpci_refresh_global(zdev))
- goto out_error;
-
- bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
- zdev->lazy_bitmap, zdev->iommu_pages);
- bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
- }
- /* wrap-around */
- offset = __dma_alloc_iommu(dev, 0, size);
- if (offset == -1)
- goto out_error;
- }
- zdev->next_bit = offset + size;
- spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
-
- return zdev->start_dma + offset * PAGE_SIZE;
-
-out_error:
- spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return DMA_MAPPING_ERROR;
-}
-
-static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long flags, offset;
-
- offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
-
- spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
- if (!zdev->iommu_bitmap)
- goto out;
-
- if (s390_iommu_strict)
- bitmap_clear(zdev->iommu_bitmap, offset, size);
- else
- bitmap_set(zdev->lazy_bitmap, offset, size);
-
-out:
- spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
-}
-
-static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
-{
- struct {
- unsigned long rc;
- unsigned long addr;
- } __packed data = {rc, addr};
-
- zpci_err_hex(&data, sizeof(data));
-}
-
-static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long pa = page_to_phys(page) + offset;
- int flags = ZPCI_PTE_VALID;
- unsigned long nr_pages;
- dma_addr_t dma_addr;
- int ret;
-
- /* This rounds up number of pages based on size and offset */
- nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
- dma_addr = dma_alloc_address(dev, nr_pages);
- if (dma_addr == DMA_MAPPING_ERROR) {
- ret = -ENOSPC;
- goto out_err;
- }
-
- /* Use rounded up size */
- size = nr_pages * PAGE_SIZE;
-
- if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
- flags |= ZPCI_TABLE_PROTECTED;
-
- ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
- if (ret)
- goto out_free;
-
- atomic64_add(nr_pages, &zdev->mapped_pages);
- return dma_addr + (offset & ~PAGE_MASK);
-
-out_free:
- dma_free_address(dev, dma_addr, nr_pages);
-out_err:
- zpci_err("map error:\n");
- zpci_err_dma(ret, pa);
- return DMA_MAPPING_ERROR;
-}
-
-static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- int npages, ret;
-
- npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr = dma_addr & PAGE_MASK;
- ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
- ZPCI_PTE_INVALID);
- if (ret) {
- zpci_err("unmap error:\n");
- zpci_err_dma(ret, dma_addr);
- return;
- }
-
- atomic64_add(npages, &zdev->unmapped_pages);
- dma_free_address(dev, dma_addr, npages);
-}
-
-static void *s390_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- struct page *page;
- phys_addr_t pa;
- dma_addr_t map;
-
- size = PAGE_ALIGN(size);
- page = alloc_pages(flag | __GFP_ZERO, get_order(size));
- if (!page)
- return NULL;
-
- pa = page_to_phys(page);
- map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
- if (dma_mapping_error(dev, map)) {
- __free_pages(page, get_order(size));
- return NULL;
- }
-
- atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
- if (dma_handle)
- *dma_handle = map;
- return phys_to_virt(pa);
-}
-
-static void s390_dma_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-
- size = PAGE_ALIGN(size);
- atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
- s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-/* Map a segment into a contiguous dma address area */
-static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
- size_t size, dma_addr_t *handle,
- enum dma_data_direction dir)
-{
- unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- dma_addr_t dma_addr_base, dma_addr;
- int flags = ZPCI_PTE_VALID;
- struct scatterlist *s;
- phys_addr_t pa = 0;
- int ret;
-
- dma_addr_base = dma_alloc_address(dev, nr_pages);
- if (dma_addr_base == DMA_MAPPING_ERROR)
- return -ENOMEM;
-
- dma_addr = dma_addr_base;
- if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
- flags |= ZPCI_TABLE_PROTECTED;
-
- for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
- pa = page_to_phys(sg_page(s));
- ret = __dma_update_trans(zdev, pa, dma_addr,
- s->offset + s->length, flags);
- if (ret)
- goto unmap;
-
- dma_addr += s->offset + s->length;
- }
- ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
- if (ret)
- goto unmap;
-
- *handle = dma_addr_base;
- atomic64_add(nr_pages, &zdev->mapped_pages);
-
- return ret;
-
-unmap:
- dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
- ZPCI_PTE_INVALID);
- dma_free_address(dev, dma_addr_base, nr_pages);
- zpci_err("map error:\n");
- zpci_err_dma(ret, pa);
- return ret;
-}
-
-static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nr_elements, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s = sg, *start = sg, *dma = sg;
- unsigned int max = dma_get_max_seg_size(dev);
- unsigned int size = s->offset + s->length;
- unsigned int offset = s->offset;
- int count = 0, i, ret;
-
- for (i = 1; i < nr_elements; i++) {
- s = sg_next(s);
-
- s->dma_length = 0;
-
- if (s->offset || (size & ~PAGE_MASK) ||
- size + s->length > max) {
- ret = __s390_dma_map_sg(dev, start, size,
- &dma->dma_address, dir);
- if (ret)
- goto unmap;
-
- dma->dma_address += offset;
- dma->dma_length = size - offset;
-
- size = offset = s->offset;
- start = s;
- dma = sg_next(dma);
- count++;
- }
- size += s->length;
- }
- ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
- if (ret)
- goto unmap;
-
- dma->dma_address += offset;
- dma->dma_length = size - offset;
-
- return count + 1;
-unmap:
- for_each_sg(sg, s, count, i)
- s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
- dir, attrs);
-
- return ret;
-}
-
-static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nr_elements, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nr_elements, i) {
- if (s->dma_length)
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, attrs);
- s->dma_address = 0;
- s->dma_length = 0;
- }
-}
-
-static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
-{
- size_t n = BITS_TO_LONGS(bits);
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
- return NULL;
-
- return vzalloc(bytes);
-}
-
-int zpci_dma_init_device(struct zpci_dev *zdev)
-{
- u8 status;
- int rc;
-
- /*
- * At this point, if the device is part of an IOMMU domain, this would
- * be a strong hint towards a bug in the IOMMU API (common) code and/or
- * simultaneous access via IOMMU and DMA API. So let's issue a warning.
- */
- WARN_ON(zdev->s390_domain);
-
- spin_lock_init(&zdev->iommu_bitmap_lock);
-
- zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
- if (!zdev->dma_table) {
- rc = -ENOMEM;
- goto out;
- }
-
- /*
- * Restrict the iommu bitmap size to the minimum of the following:
- * - s390_iommu_aperture which defaults to high_memory
- * - 3-level pagetable address limit minus start_dma offset
- * - DMA address range allowed by the hardware (clp query pci fn)
- *
- * Also set zdev->end_dma to the actual end address of the usable
- * range, instead of the theoretical maximum as reported by hardware.
- *
- * This limits the number of concurrently usable DMA mappings since
- * for each DMA mapped memory address we need a DMA address including
- * extra DMA addresses for multiple mappings of the same memory address.
- */
- zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- zdev->iommu_size = min3(s390_iommu_aperture,
- ZPCI_TABLE_SIZE_RT - zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
- zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
- zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
- if (!zdev->iommu_bitmap) {
- rc = -ENOMEM;
- goto free_dma_table;
- }
- if (!s390_iommu_strict) {
- zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
- if (!zdev->lazy_bitmap) {
- rc = -ENOMEM;
- goto free_bitmap;
- }
-
- }
- if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table), &status)) {
- rc = -EIO;
- goto free_bitmap;
- }
-
- return 0;
-free_bitmap:
- vfree(zdev->iommu_bitmap);
- zdev->iommu_bitmap = NULL;
- vfree(zdev->lazy_bitmap);
- zdev->lazy_bitmap = NULL;
-free_dma_table:
- dma_free_cpu_table(zdev->dma_table);
- zdev->dma_table = NULL;
-out:
- return rc;
-}
-
-int zpci_dma_exit_device(struct zpci_dev *zdev)
-{
- int cc = 0;
-
- /*
- * At this point, if the device is part of an IOMMU domain, this would
- * be a strong hint towards a bug in the IOMMU API (common) code and/or
- * simultaneous access via IOMMU and DMA API. So let's issue a warning.
- */
- WARN_ON(zdev->s390_domain);
- if (zdev_enabled(zdev))
- cc = zpci_unregister_ioat(zdev, 0);
- /*
- * cc == 3 indicates the function is gone already. This can happen
- * if the function was deconfigured/disabled suddenly and we have not
- * received a new handle yet.
- */
- if (cc && cc != 3)
- return -EIO;
-
- dma_cleanup_tables(zdev->dma_table);
- zdev->dma_table = NULL;
- vfree(zdev->iommu_bitmap);
- zdev->iommu_bitmap = NULL;
- vfree(zdev->lazy_bitmap);
- zdev->lazy_bitmap = NULL;
- zdev->next_bit = 0;
- return 0;
-}
-
-static int __init dma_alloc_cpu_table_caches(void)
-{
- dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
- ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
- 0, NULL);
- if (!dma_region_table_cache)
- return -ENOMEM;
-
- dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
- ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
- 0, NULL);
- if (!dma_page_table_cache) {
- kmem_cache_destroy(dma_region_table_cache);
- return -ENOMEM;
- }
- return 0;
-}
-
-int __init zpci_dma_init(void)
-{
- s390_iommu_aperture = (u64)virt_to_phys(high_memory);
- if (!s390_iommu_aperture_factor)
- s390_iommu_aperture = ULONG_MAX;
- else
- s390_iommu_aperture *= s390_iommu_aperture_factor;
-
- return dma_alloc_cpu_table_caches();
-}
-
-void zpci_dma_exit(void)
-{
- kmem_cache_destroy(dma_page_table_cache);
- kmem_cache_destroy(dma_region_table_cache);
-}
-
-const struct dma_map_ops s390_pci_dma_ops = {
- .alloc = s390_dma_alloc,
- .free = s390_dma_free,
- .map_sg = s390_dma_map_sg,
- .unmap_sg = s390_dma_unmap_sg,
- .map_page = s390_dma_map_pages,
- .unmap_page = s390_dma_unmap_pages,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
- /* dma_supported is unconditionally true without a callback */
-};
-EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
-
-static int __init s390_iommu_setup(char *str)
-{
- if (!strcmp(str, "strict"))
- s390_iommu_strict = 1;
- return 1;
-}
-
-__setup("s390_iommu=", s390_iommu_setup);
-
-static int __init s390_iommu_aperture_setup(char *str)
-{
- if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
- s390_iommu_aperture_factor = 1;
- return 1;
-}
-
-__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index b9324ca2eb94..4d9773ef9e0a 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -59,9 +59,16 @@ static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
}
}
-static bool is_passed_through(struct zpci_dev *zdev)
+static bool is_passed_through(struct pci_dev *pdev)
{
- return zdev->s390_domain;
+ struct zpci_dev *zdev = to_zpci(pdev);
+ bool ret;
+
+ mutex_lock(&zdev->kzdev_lock);
+ ret = !!zdev->kzdev;
+ mutex_unlock(&zdev->kzdev_lock);
+
+ return ret;
}
static bool is_driver_supported(struct pci_driver *driver)
@@ -176,7 +183,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
}
pdev->error_state = pci_channel_io_frozen;
- if (is_passed_through(to_zpci(pdev))) {
+ if (is_passed_through(pdev)) {
pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n",
pci_name(pdev));
goto out_unlock;
@@ -239,7 +246,7 @@ static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es)
* we will inject the error event and let the guest recover the device
* itself.
*/
- if (is_passed_through(to_zpci(pdev)))
+ if (is_passed_through(pdev))
goto out;
driver = to_pci_driver(pdev->dev.driver);
if (driver && driver->err_handler && driver->err_handler->error_detected)
@@ -306,8 +313,6 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
/* Even though the device is already gone we still
* need to free zPCI resources as part of the disable.
*/
- if (zdev->dma_table)
- zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev))
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index cae280e5c047..8a7abac51816 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -56,6 +56,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
int ret = 0;
+ u8 status;
/* Can't use device_remove_self() here as that would lead us to lock
* the pci_rescan_remove_lock while holding the device' kernfs lock.
@@ -82,12 +83,6 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
pci_lock_rescan_remove();
if (pci_dev_is_added(pdev)) {
pci_stop_and_remove_bus_device(pdev);
- if (zdev->dma_table) {
- ret = zpci_dma_exit_device(zdev);
- if (ret)
- goto out;
- }
-
if (zdev_enabled(zdev)) {
ret = zpci_disable_device(zdev);
/*
@@ -105,14 +100,16 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
ret = zpci_enable_device(zdev);
if (ret)
goto out;
- ret = zpci_dma_init_device(zdev);
- if (ret) {
- zpci_disable_device(zdev);
- goto out;
+
+ if (zdev->dma_table) {
+ ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ virt_to_phys(zdev->dma_table), &status);
+ if (ret)
+ zpci_disable_device(zdev);
}
- pci_rescan_bus(zdev->zbus->bus);
}
out:
+ pci_rescan_bus(zdev->zbus->bus);
pci_unlock_rescan_remove();
if (kn)
sysfs_unbreak_active_protection(kn);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index b3da2757faaf..3d80515298d2 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -7,7 +7,6 @@
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2010 Paul Mundt
*/
-#include <linux/screen_info.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
@@ -69,10 +68,6 @@ EXPORT_SYMBOL(cpu_data);
struct sh_machine_vector sh_mv = { .mv_name = "generic", };
EXPORT_SYMBOL(sh_mv);
-#ifdef CONFIG_VT
-struct screen_info screen_info;
-#endif
-
extern int root_mountflags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 7417345c6639..5f6035936131 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -76,9 +76,8 @@ install:
archheaders:
$(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all
-PHONY += vdso_install
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/sparc/vdso $@
+vdso-install-$(CONFIG_SPARC64) += arch/sparc/vdso/vdso64.so.dbg
+vdso-install-$(CONFIG_COMPAT) += arch/sparc/vdso/vdso32.so.dbg
# This is the image used for packaging
KBUILD_IMAGE := $(boot)/zImage
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 34ef7febf0d5..e3b72a7b46d3 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -17,7 +17,6 @@
#include <linux/initrd.h>
#include <asm/smp.h>
#include <linux/user.h>
-#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
@@ -51,18 +50,6 @@
#include "kernel.h"
-struct screen_info screen_info = {
- 0, 0, /* orig-x, orig-y */
- 0, /* unused */
- 0, /* orig-video-page */
- 0, /* orig-video-mode */
- 128, /* orig-video-cols */
- 0,0,0, /* ega_ax, ega_bx, ega_cx */
- 54, /* orig-video-lines */
- 0, /* orig-video-isVGA */
- 16 /* orig-video-points */
-};
-
/* Typing sync at the prom prompt calls the function pointed to by
* romvec->pv_synchook which I set to the following function.
* This should sync all filesystems and return, for now it just
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 6546ca9d4d3f..6a4797dec34b 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -15,7 +15,6 @@
#include <linux/ptrace.h>
#include <asm/smp.h>
#include <linux/user.h>
-#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
@@ -68,18 +67,6 @@
DEFINE_SPINLOCK(ns87303_lock);
EXPORT_SYMBOL(ns87303_lock);
-struct screen_info screen_info = {
- 0, 0, /* orig-x, orig-y */
- 0, /* unused */
- 0, /* orig-video-page */
- 0, /* orig-video-mode */
- 128, /* orig-video-cols */
- 0, 0, 0, /* unused, ega_bx, unused */
- 54, /* orig-video-lines */
- 0, /* orig-video-isVGA */
- 16 /* orig-video-points */
-};
-
static void
prom_console_write(struct console *con, const char *s, unsigned int n)
{
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index 77d7b9032158..d08c3a0443f3 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -116,30 +116,3 @@ quiet_cmd_vdso = VDSO $@
VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic
GCOV_PROFILE := n
-
-#
-# Install the unstripped copies of vdso*.so. If our toolchain supports
-# build-id, install .build-id links as well.
-#
-quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
-define cmd_vdso_install
- cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
- if readelf -n $< |grep -q 'Build ID'; then \
- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
- first=`echo $$buildid | cut -b-2`; \
- last=`echo $$buildid | cut -b3-`; \
- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
- ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
- fi
-endef
-
-vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
-
-$(MODLIB)/vdso: FORCE
- @mkdir -p $(MODLIB)/vdso
-
-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
- $(call cmd,vdso_install)
-
-PHONY += vdso_install $(vdso_img_insttargets)
-vdso_install: $(vdso_img_insttargets) FORCE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a917f62eff2..3762f41bb092 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1313,16 +1313,41 @@ config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
+config MICROCODE_INITRD32
+ def_bool y
+ depends on MICROCODE && X86_32 && BLK_DEV_INITRD
+
config MICROCODE_LATE_LOADING
bool "Late microcode loading (DANGEROUS)"
default n
- depends on MICROCODE
+ depends on MICROCODE && SMP
help
Loading microcode late, when the system is up and executing instructions
is a tricky business and should be avoided if possible. Just the sequence
of synchronizing all cores and SMT threads is one fragile dance which does
not guarantee that cores might not softlock after the loading. Therefore,
- use this at your own risk. Late loading taints the kernel too.
+ use this at your own risk. Late loading taints the kernel unless the
+ microcode header indicates that it is safe for late loading via the
+ minimal revision check. This minimal revision check can be enforced on
+ the kernel command line with "microcode.minrev=Y".
+
+config MICROCODE_LATE_FORCE_MINREV
+ bool "Enforce late microcode loading minimal revision check"
+ default n
+ depends on MICROCODE_LATE_LOADING
+ help
+ To prevent that users load microcode late which modifies already
+ in use features, newer microcode patches have a minimum revision field
+ in the microcode header, which tells the kernel which minimum
+ revision must be active in the CPU to safely load that new microcode
+ late into the running system. If disabled the check will not
+ be enforced but the kernel will be tainted when the minimal
+ revision check fails.
+
+ This minimal revision check can also be controlled via the
+ "microcode.minrev" parameter on the kernel command line.
+
+ If unsure say Y.
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4de6ddaf4b84..1a068de12a56 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -294,9 +294,10 @@ PHONY += install
install:
$(call cmd,install)
-PHONY += vdso_install
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
+vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64.so.dbg
+vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdsox32.so.dbg
+vdso-install-$(CONFIG_X86_32) += arch/x86/entry/vdso/vdso32.so.dbg
+vdso-install-$(CONFIG_IA32_EMULATION) += arch/x86/entry/vdso/vdso32.so.dbg
archprepare: checkbin
checkbin:
diff --git a/arch/x86/Makefile.postlink b/arch/x86/Makefile.postlink
index 936093d29160..fef2e977cc7d 100644
--- a/arch/x86/Makefile.postlink
+++ b/arch/x86/Makefile.postlink
@@ -34,9 +34,6 @@ ifeq ($(CONFIG_X86_NEED_RELOCS),y)
$(call cmd,strip_relocs)
endif
-%.ko: FORCE
- @true
-
clean:
@rm -f $(OUT_RELOCS)/vmlinux.relocs
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index d11206ceff3b..1b5d17a9f70d 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -106,6 +106,27 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
}
EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
+/**
+ * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
+ * hypercall.
+ * @buf: Address of the directly mapped shared kernel buffer which
+ * contains TDREPORT. The same buffer will be used by VMM to
+ * store the generated TD Quote output.
+ * @size: size of the tdquote buffer (4KB-aligned).
+ *
+ * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
+ * v1.0 specification for more information on GetQuote hypercall.
+ * It is used in the TDX guest driver module to get the TD Quote.
+ *
+ * Return 0 on success or error code on failure.
+ */
+u64 tdx_hcall_get_quote(u8 *buf, size_t size)
+{
+ /* Since buf is a shared memory, set the shared (decrypted) bits */
+ return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
+}
+EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
+
static void __noreturn tdx_panic(const char *msg)
{
struct tdx_module_args args = {
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 83c0afb7c741..b1b8dd1608f7 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -191,31 +191,4 @@ GCOV_PROFILE := n
quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check)
-#
-# Install the unstripped copies of vdso*.so. If our toolchain supports
-# build-id, install .build-id links as well.
-#
-quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
-define cmd_vdso_install
- cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
- if readelf -n $< |grep -q 'Build ID'; then \
- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
- first=`echo $$buildid | cut -b-2`; \
- last=`echo $$buildid | cut -b3-`; \
- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
- ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
- fi
-endef
-
-vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
-
-$(MODLIB)/vdso: FORCE
- @mkdir -p $(MODLIB)/vdso
-
-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-PHONY += vdso_install $(vdso_img_insttargets)
-vdso_install: $(vdso_img_insttargets)
-
clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index b0d192f613b7..d21f48f1c242 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -276,7 +276,8 @@ struct apic {
u32 disable_esr : 1,
dest_mode_logical : 1,
- x2apic_set_max_apicid : 1;
+ x2apic_set_max_apicid : 1,
+ nmi_to_offline_cpu : 1;
u32 (*calc_dest_apicid)(unsigned int cpu);
@@ -531,6 +532,8 @@ extern u32 apic_flat_calc_apicid(unsigned int cpu);
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern u32 default_cpu_present_to_apicid(int mps_cpu);
+void apic_send_nmi_to_offline_cpu(unsigned int cpu);
+
#else /* CONFIG_X86_LOCAL_APIC */
static inline u32 read_apic_id(void) { return 0; }
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 25050d953eee..fecc4fe1d68a 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -71,26 +71,12 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
extern __noendbr void cet_disable(void);
-struct ucode_cpu_info;
+struct cpu_signature;
-int intel_cpu_collect_info(struct ucode_cpu_info *uci);
-
-static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
- unsigned int s2, unsigned int p2)
-{
- if (s1 != s2)
- return false;
-
- /* Processor flags are either both 0 ... */
- if (!p1 && !p2)
- return true;
-
- /* ... or they intersect. */
- return p1 & p2;
-}
+void intel_collect_cpu_info(struct cpu_signature *sig);
extern u64 x86_read_arch_cap_msr(void);
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf);
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
extern struct cpumask cpus_stop_mask;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index bbbe9d744977..695e569159c1 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
#endif
+extern unsigned long initrd_start_early;
+
#ifdef CONFIG_CPU_SUP_INTEL
/* Intel specific microcode defines. Public for IFS */
struct microcode_header_intel {
@@ -36,7 +38,8 @@ struct microcode_header_intel {
unsigned int datasize;
unsigned int totalsize;
unsigned int metasize;
- unsigned int reserved[2];
+ unsigned int min_req_ver;
+ unsigned int reserved;
};
struct microcode_intel {
@@ -68,11 +71,19 @@ static inline u32 intel_get_microcode_revision(void)
return rev;
}
+#endif /* !CONFIG_CPU_SUP_INTEL */
-void show_ucode_info_early(void);
+bool microcode_nmi_handler(void);
+void microcode_offline_nmi_handler(void);
-#else /* CONFIG_CPU_SUP_INTEL */
-static inline void show_ucode_info_early(void) { }
-#endif /* !CONFIG_CPU_SUP_INTEL */
+#ifdef CONFIG_MICROCODE_LATE_LOADING
+DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static __always_inline bool microcode_nmi_handler_enabled(void)
+{
+ return static_branch_unlikely(&microcode_nmi_handler_enable);
+}
+#else
+static __always_inline bool microcode_nmi_handler_enabled(void) { return false; }
+#endif
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index f3495623ac99..bf483fcb4e57 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -126,6 +126,7 @@ void clear_bss(void);
#ifdef __i386__
asmlinkage void __init __noreturn i386_start_kernel(void);
+void __init mk_early_pgtbl_32(void);
#else
asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode);
diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
index f74695dea217..ccce7ebd8677 100644
--- a/arch/x86/include/asm/shared/tdx.h
+++ b/arch/x86/include/asm/shared/tdx.h
@@ -23,6 +23,7 @@
/* TDX hypercall Leaf IDs */
#define TDVMCALL_MAP_GPA 0x10001
+#define TDVMCALL_GET_QUOTE 0x10002
#define TDVMCALL_REPORT_FATAL_ERROR 0x10003
#define TDVMCALL_STATUS_RETRY 1
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index adcbe3f1de30..f3d5305a60fc 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -56,6 +56,8 @@ bool tdx_early_handle_ve(struct pt_regs *regs);
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
+u64 tdx_hcall_get_quote(u8 *buf, size_t size);
+
#else
static inline void tdx_early_init(void) { };
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3269a0e23d3a..0000325ab98f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
+CFLAGS_REMOVE_head32.o = -pg
CFLAGS_REMOVE_sev.o = -pg
CFLAGS_REMOVE_rethook.o = -pg
endif
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 37daa3fd6819..7139867d69cd 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = {
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
@@ -173,6 +174,7 @@ static struct apic apic_physflat __ro_after_init = {
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 0078730a512e..5da693d633b7 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -97,6 +97,14 @@ sendmask:
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
}
+void apic_send_nmi_to_offline_cpu(unsigned int cpu)
+{
+ if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
+ return;
+ if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
+ return;
+ apic->send_IPI(cpu, NMI_VECTOR);
+}
#endif /* CONFIG_SMP */
static inline int __prepare_ICR2(unsigned int mask)
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index affbff65e497..a8306089c91b 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 7c9fe28f742f..558a4a8824f4 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5d9591146244..b14fc8c1c953 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2164,8 +2164,6 @@ static inline void setup_getcpu(int cpu)
}
#ifdef CONFIG_X86_64
-static inline void ucode_cpu_init(int cpu) { }
-
static inline void tss_setup_ist(struct tss_struct *tss)
{
/* Set up the per-CPU TSS IST stacks */
@@ -2176,16 +2174,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
/* Only mapped when SEV-ES is active */
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
}
-
#else /* CONFIG_X86_64 */
-
-static inline void ucode_cpu_init(int cpu)
-{
- show_ucode_info_early();
-}
-
static inline void tss_setup_ist(struct tss_struct *tss) { }
-
#endif /* !CONFIG_X86_64 */
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
@@ -2241,8 +2231,6 @@ void cpu_init(void)
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
- ucode_cpu_init(cpu);
-
#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
early_cpu_to_node(cpu) != NUMA_NO_NODE)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index bbd1dc38ea03..9373ec01c5ae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -37,6 +37,16 @@
#include "internal.h"
+struct ucode_patch {
+ struct list_head plist;
+ void *data;
+ unsigned int size;
+ u32 patch_id;
+ u16 equiv_cpu;
+};
+
+static LIST_HEAD(microcode_cache);
+
#define UCODE_MAGIC 0x00414d44
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
#define UCODE_UCODE_TYPE 0x00000001
@@ -121,24 +131,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
/*
* Check whether there is a valid microcode container file at the beginning
- * of @buf of size @buf_size. Set @early to use this function in the early path.
+ * of @buf of size @buf_size.
*/
-static bool verify_container(const u8 *buf, size_t buf_size, bool early)
+static bool verify_container(const u8 *buf, size_t buf_size)
{
u32 cont_magic;
if (buf_size <= CONTAINER_HDR_SZ) {
- if (!early)
- pr_debug("Truncated microcode container header.\n");
-
+ pr_debug("Truncated microcode container header.\n");
return false;
}
cont_magic = *(const u32 *)buf;
if (cont_magic != UCODE_MAGIC) {
- if (!early)
- pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
-
+ pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
return false;
}
@@ -147,23 +153,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early)
/*
* Check whether there is a valid, non-truncated CPU equivalence table at the
- * beginning of @buf of size @buf_size. Set @early to use this function in the
- * early path.
+ * beginning of @buf of size @buf_size.
*/
-static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
+static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
{
const u32 *hdr = (const u32 *)buf;
u32 cont_type, equiv_tbl_len;
- if (!verify_container(buf, buf_size, early))
+ if (!verify_container(buf, buf_size))
return false;
cont_type = hdr[1];
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
- if (!early)
- pr_debug("Wrong microcode container equivalence table type: %u.\n",
- cont_type);
-
+ pr_debug("Wrong microcode container equivalence table type: %u.\n",
+ cont_type);
return false;
}
@@ -172,9 +175,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
equiv_tbl_len = hdr[2];
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
buf_size < equiv_tbl_len) {
- if (!early)
- pr_debug("Truncated equivalence table.\n");
-
+ pr_debug("Truncated equivalence table.\n");
return false;
}
@@ -183,22 +184,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
/*
* Check whether there is a valid, non-truncated microcode patch section at the
- * beginning of @buf of size @buf_size. Set @early to use this function in the
- * early path.
+ * beginning of @buf of size @buf_size.
*
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
static bool
-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
+__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
if (buf_size < SECTION_HDR_SIZE) {
- if (!early)
- pr_debug("Truncated patch section.\n");
-
+ pr_debug("Truncated patch section.\n");
return false;
}
@@ -207,17 +205,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early
p_size = hdr[1];
if (p_type != UCODE_UCODE_TYPE) {
- if (!early)
- pr_debug("Invalid type field (0x%x) in container file section header.\n",
- p_type);
-
+ pr_debug("Invalid type field (0x%x) in container file section header.\n",
+ p_type);
return false;
}
if (p_size < sizeof(struct microcode_header_amd)) {
- if (!early)
- pr_debug("Patch of size %u too short.\n", p_size);
-
+ pr_debug("Patch of size %u too short.\n", p_size);
return false;
}
@@ -269,7 +263,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
* 0: success
*/
static int
-verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
+verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
{
struct microcode_header_amd *mc_hdr;
unsigned int ret;
@@ -277,7 +271,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
u16 proc_id;
u8 patch_fam;
- if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
+ if (!__verify_patch_section(buf, buf_size, &sh_psize))
return -1;
/*
@@ -292,16 +286,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
* size sh_psize, as the section claims.
*/
if (buf_size < sh_psize) {
- if (!early)
- pr_debug("Patch of size %u truncated.\n", sh_psize);
-
+ pr_debug("Patch of size %u truncated.\n", sh_psize);
return -1;
}
ret = __verify_patch_size(family, sh_psize, buf_size);
if (!ret) {
- if (!early)
- pr_debug("Per-family patch size mismatch.\n");
+ pr_debug("Per-family patch size mismatch.\n");
return -1;
}
@@ -309,8 +300,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
- if (!early)
- pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
+ pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
return -1;
}
@@ -337,7 +327,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u16 eq_id;
u8 *buf;
- if (!verify_equivalence_table(ucode, size, true))
+ if (!verify_equivalence_table(ucode, size))
return 0;
buf = ucode;
@@ -364,7 +354,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u32 patch_size;
int ret;
- ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
+ ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size);
if (ret < 0) {
/*
* Patch verification failed, skip to the next container, if
@@ -456,14 +446,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
- u32 rev, dummy, *new_rev;
bool ret = false;
-
-#ifdef CONFIG_X86_32
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
-#else
- new_rev = &ucode_new_rev;
-#endif
+ u32 rev, dummy;
desc.cpuid_1_eax = cpuid_1_eax;
@@ -484,8 +468,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
return ret;
if (!__apply_microcode_amd(mc)) {
- *new_rev = mc->hdr.patch_id;
- ret = true;
+ ucode_new_rev = mc->hdr.patch_id;
+ ret = true;
}
return ret;
@@ -501,7 +485,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
if (family >= 0x15)
snprintf(fw_name, sizeof(fw_name),
- "amd-ucode/microcode_amd_fam%.2xh.bin", family);
+ "amd-ucode/microcode_amd_fam%02hhxh.bin", family);
if (firmware_request_builtin(&fw, fw_name)) {
cp->size = fw.size;
@@ -512,36 +496,23 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
return false;
}
-static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
+static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
{
- struct ucode_cpu_info *uci;
struct cpio_data cp;
- const char *path;
- bool use_pa;
-
- if (IS_ENABLED(CONFIG_X86_32)) {
- uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
- path = (const char *)__pa_nodebug(ucode_path);
- use_pa = true;
- } else {
- uci = ucode_cpu_info;
- path = ucode_path;
- use_pa = false;
- }
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
- cp = find_microcode_in_initrd(path, use_pa);
-
- /* Needed in load_microcode_amd() */
- uci->cpu_sig.sig = cpuid_1_eax;
+ cp = find_microcode_in_initrd(ucode_path);
*ret = cp;
}
-static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
{
struct cpio_data cp = { };
+ /* Needed in load_microcode_amd() */
+ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
+
find_blobs_in_containers(cpuid_1_eax, &cp);
if (!(cp.data && cp.size))
return;
@@ -549,20 +520,20 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
}
-void load_ucode_amd_early(unsigned int cpuid_1_eax)
-{
- return apply_ucode_from_containers(cpuid_1_eax);
-}
-
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
-int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+static int __init save_microcode_in_initrd(void)
{
+ unsigned int cpuid_1_eax = native_cpuid_eax(1);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
- cp = find_microcode_in_initrd(ucode_path, false);
+ if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ return 0;
+
+ find_blobs_in_containers(cpuid_1_eax, &cp);
if (!(cp.data && cp.size))
return -EINVAL;
@@ -578,6 +549,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return 0;
}
+early_initcall(save_microcode_in_initrd);
/*
* a small, trivial cache of per-family ucode patches
@@ -631,7 +603,6 @@ static struct ucode_patch *find_patch(unsigned int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u16 equiv_id;
-
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
if (!equiv_id)
return NULL;
@@ -733,12 +704,20 @@ out:
return ret;
}
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+{
+ unsigned int cpu = smp_processor_id();
+
+ ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
+ apply_microcode_amd(cpu);
+}
+
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
{
u32 equiv_tbl_len;
const u32 *hdr;
- if (!verify_equivalence_table(buf, buf_size, false))
+ if (!verify_equivalence_table(buf, buf_size))
return 0;
hdr = (const u32 *)buf;
@@ -784,7 +763,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
u16 proc_id;
int ret;
- ret = verify_patch(family, fw, leftover, patch_size, false);
+ ret = verify_patch(family, fw, leftover, patch_size);
if (ret)
return ret;
@@ -909,6 +888,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
+ if (force_minrev)
+ return UCODE_NFOUND;
+
if (c->x86 >= 0x15)
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
@@ -918,7 +900,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
}
ret = UCODE_ERROR;
- if (!verify_container(fw->data, fw->size, false))
+ if (!verify_container(fw->data, fw->size))
goto fw_release;
ret = load_microcode_amd(c->x86, fw->data, fw->size);
@@ -938,10 +920,11 @@ static void microcode_fini_cpu_amd(int cpu)
}
static struct microcode_ops microcode_amd_ops = {
- .request_microcode_fw = request_microcode_amd,
- .collect_cpu_info = collect_cpu_info_amd,
- .apply_microcode = apply_microcode_amd,
- .microcode_fini_cpu = microcode_fini_cpu_amd,
+ .request_microcode_fw = request_microcode_amd,
+ .collect_cpu_info = collect_cpu_info_amd,
+ .apply_microcode = apply_microcode_amd,
+ .microcode_fini_cpu = microcode_fini_cpu_amd,
+ .nmi_safe = true,
};
struct microcode_ops * __init init_amd_microcode(void)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 6cc7a2c181da..666d25bbc5ad 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -23,6 +23,7 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -31,6 +32,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/processor.h>
@@ -42,11 +44,10 @@
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
-static bool dis_ucode_ldr = true;
+bool dis_ucode_ldr = true;
-bool initrd_gone;
-
-LIST_HEAD(microcode_cache);
+bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
+module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
/*
* Synchronization.
@@ -90,10 +91,7 @@ static bool amd_check_current_patch_level(void)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
- if (IS_ENABLED(CONFIG_X86_32))
- levels = (u32 *)__pa_nodebug(&final_levels);
- else
- levels = final_levels;
+ levels = final_levels;
for (i = 0; levels[i]; i++) {
if (lvl == levels[i])
@@ -105,17 +103,8 @@ static bool amd_check_current_patch_level(void)
static bool __init check_loader_disabled_bsp(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
-
-#ifdef CONFIG_X86_32
- const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
- const char *option = (const char *)__pa_nodebug(__dis_opt_str);
- bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
-
-#else /* CONFIG_X86_64 */
const char *cmdline = boot_command_line;
const char *option = __dis_opt_str;
- bool *res = &dis_ucode_ldr;
-#endif
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
@@ -123,17 +112,17 @@ static bool __init check_loader_disabled_bsp(void)
* that's good enough as they don't land on the BSP path anyway.
*/
if (native_cpuid_ecx(1) & BIT(31))
- return *res;
+ return true;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
if (amd_check_current_patch_level())
- return *res;
+ return true;
}
if (cmdline_find_option_bool(cmdline, option) <= 0)
- *res = false;
+ dis_ucode_ldr = false;
- return *res;
+ return dis_ucode_ldr;
}
void __init load_ucode_bsp(void)
@@ -168,23 +157,14 @@ void __init load_ucode_bsp(void)
if (intel)
load_ucode_intel_bsp();
else
- load_ucode_amd_early(cpuid_1_eax);
-}
-
-static bool check_loader_disabled_ap(void)
-{
-#ifdef CONFIG_X86_32
- return *((bool *)__pa_nodebug(&dis_ucode_ldr));
-#else
- return dis_ucode_ldr;
-#endif
+ load_ucode_amd_bsp(cpuid_1_eax);
}
void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
- if (check_loader_disabled_ap())
+ if (dis_ucode_ldr)
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -196,97 +176,44 @@ void load_ucode_ap(void)
break;
case X86_VENDOR_AMD:
if (x86_family(cpuid_1_eax) >= 0x10)
- load_ucode_amd_early(cpuid_1_eax);
- break;
- default:
- break;
- }
-}
-
-static int __init save_microcode_in_initrd(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- int ret = -EINVAL;
-
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- if (c->x86 >= 6)
- ret = save_microcode_in_initrd_intel();
- break;
- case X86_VENDOR_AMD:
- if (c->x86 >= 0x10)
- ret = save_microcode_in_initrd_amd(cpuid_eax(1));
+ load_ucode_amd_ap(cpuid_1_eax);
break;
default:
break;
}
-
- initrd_gone = true;
-
- return ret;
}
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+struct cpio_data __init find_microcode_in_initrd(const char *path)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long start = 0;
size_t size;
#ifdef CONFIG_X86_32
- struct boot_params *params;
-
- if (use_pa)
- params = (struct boot_params *)__pa_nodebug(&boot_params);
- else
- params = &boot_params;
-
- size = params->hdr.ramdisk_size;
-
- /*
- * Set start only if we have an initrd image. We cannot use initrd_start
- * because it is not set that early yet.
- */
+ size = boot_params.hdr.ramdisk_size;
+ /* Early load on BSP has a temporary mapping. */
if (size)
- start = params->hdr.ramdisk_image;
+ start = initrd_start_early;
-# else /* CONFIG_X86_64 */
+#else /* CONFIG_X86_64 */
size = (unsigned long)boot_params.ext_ramdisk_size << 32;
size |= boot_params.hdr.ramdisk_size;
if (size) {
start = (unsigned long)boot_params.ext_ramdisk_image << 32;
start |= boot_params.hdr.ramdisk_image;
-
start += PAGE_OFFSET;
}
-# endif
+#endif
/*
* Fixup the start address: after reserve_initrd() runs, initrd_start
* has the virtual address of the beginning of the initrd. It also
* possibly relocates the ramdisk. In either case, initrd_start contains
* the updated address so use that instead.
- *
- * initrd_gone is for the hotplug case where we've thrown out initrd
- * already.
*/
- if (!use_pa) {
- if (initrd_gone)
- return (struct cpio_data){ NULL, 0, "" };
- if (initrd_start)
- start = initrd_start;
- } else {
- /*
- * The picture with physical addresses is a bit different: we
- * need to get the *physical* address to which the ramdisk was
- * relocated, i.e., relocated_ramdisk (not initrd_start) and
- * since we're running from physical addresses, we need to access
- * relocated_ramdisk through its *physical* address too.
- */
- u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
- if (*rr)
- start = *rr;
- }
+ if (initrd_start)
+ start = initrd_start;
return find_cpio_data(path, (void *)start, size, NULL);
#else /* !CONFIG_BLK_DEV_INITRD */
@@ -330,117 +257,298 @@ static struct platform_device *microcode_pdev;
* requirement can be relaxed in the future. Right now, this is conservative
* and good.
*/
-#define SPINUNIT 100 /* 100 nsec */
+enum sibling_ctrl {
+ /* Spinwait with timeout */
+ SCTRL_WAIT,
+ /* Invoke the microcode_apply() callback */
+ SCTRL_APPLY,
+ /* Proceed without invoking the microcode_apply() callback */
+ SCTRL_DONE,
+};
+
+struct microcode_ctrl {
+ enum sibling_ctrl ctrl;
+ enum ucode_state result;
+ unsigned int ctrl_cpu;
+ bool nmi_enabled;
+};
-static int check_online_cpus(void)
+DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
+static atomic_t late_cpus_in, offline_in_nmi;
+static unsigned int loops_per_usec;
+static cpumask_t cpu_offline_mask;
+
+static noinstr bool wait_for_cpus(atomic_t *cnt)
{
- unsigned int cpu;
+ unsigned int timeout, loops;
- /*
- * Make sure all CPUs are online. It's fine for SMT to be disabled if
- * all the primary threads are still online.
- */
- for_each_present_cpu(cpu) {
- if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
- pr_err("Not all CPUs online, aborting microcode update.\n");
- return -EINVAL;
+ WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
+
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
+ if (!raw_atomic_read(cnt))
+ return true;
+
+ for (loops = 0; loops < loops_per_usec; loops++)
+ cpu_relax();
+
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
}
}
-
- return 0;
+ /* Prevent the late comers from making progress and let them time out */
+ raw_atomic_inc(cnt);
+ return false;
}
-static atomic_t late_cpus_in;
-static atomic_t late_cpus_out;
-
-static int __wait_for_cpus(atomic_t *t, long long timeout)
+static noinstr bool wait_for_ctrl(void)
{
- int all_cpus = num_online_cpus();
-
- atomic_inc(t);
+ unsigned int timeout, loops;
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT) {
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
- all_cpus - atomic_read(t));
- return 1;
- }
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
+ if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
+ return true;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
+ for (loops = 0; loops < loops_per_usec; loops++)
+ cpu_relax();
- touch_nmi_watchdog();
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
+ }
}
- return 0;
+ return false;
}
/*
- * Returns:
- * < 0 - on error
- * 0 - success (no update done or microcode was updated)
+ * Protected against instrumentation up to the point where the primary
+ * thread completed the update. See microcode_nmi_handler() for details.
*/
-static int __reload_late(void *info)
+static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
{
- int cpu = smp_processor_id();
- enum ucode_state err;
- int ret = 0;
+ /* Initial rendezvous to ensure that all CPUs have arrived */
+ if (!wait_for_cpus(&late_cpus_in)) {
+ raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
+ return false;
+ }
/*
- * Wait for all CPUs to arrive. A load will not be attempted unless all
- * CPUs show up.
- * */
- if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
- return -1;
+ * Wait for primary threads to complete. If one of them hangs due
+ * to the update, there is no way out. This is non-recoverable
+ * because the CPU might hold locks or resources and confuse the
+ * scheduler, watchdogs etc. There is no way to safely evacuate the
+ * machine.
+ */
+ if (wait_for_ctrl())
+ return true;
+ instrumentation_begin();
+ panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
+ instrumentation_end();
+}
+
+/*
+ * Protected against instrumentation up to the point where the primary
+ * thread completed the update. See microcode_nmi_handler() for details.
+ */
+static noinstr void load_secondary(unsigned int cpu)
+{
+ unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
+ enum ucode_state ret;
+
+ if (!load_secondary_wait(ctrl_cpu)) {
+ instrumentation_begin();
+ pr_err_once("load: %d CPUs timed out\n",
+ atomic_read(&late_cpus_in) - 1);
+ instrumentation_end();
+ return;
+ }
+
+ /* Primary thread completed. Allow to invoke instrumentable code */
+ instrumentation_begin();
/*
- * On an SMT system, it suffices to load the microcode on one sibling of
- * the core because the microcode engine is shared between the threads.
- * Synchronization still needs to take place so that no concurrent
- * loading attempts happen on multiple threads of an SMT core. See
- * below.
+ * If the primary succeeded then invoke the apply() callback,
+ * otherwise copy the state from the primary thread.
*/
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
- err = microcode_ops->apply_microcode(cpu);
+ if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
+ ret = microcode_ops->apply_microcode(cpu);
else
- goto wait_for_siblings;
+ ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
- if (err >= UCODE_NFOUND) {
- if (err == UCODE_ERROR) {
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
- ret = -1;
- }
+ this_cpu_write(ucode_ctrl.result, ret);
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
+ instrumentation_end();
+}
+
+static void __load_primary(unsigned int cpu)
+{
+ struct cpumask *secondaries = topology_sibling_cpumask(cpu);
+ enum sibling_ctrl ctrl;
+ enum ucode_state ret;
+ unsigned int sibling;
+
+ /* Initial rendezvous to ensure that all CPUs have arrived */
+ if (!wait_for_cpus(&late_cpus_in)) {
+ this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
+ pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
+ return;
}
-wait_for_siblings:
- if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
- panic("Timeout during microcode update!\n");
+ ret = microcode_ops->apply_microcode(cpu);
+ this_cpu_write(ucode_ctrl.result, ret);
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
/*
- * At least one thread has completed update on each core.
- * For others, simply call the update to make sure the
- * per-cpu cpuinfo can be updated with right microcode
- * revision.
+ * If the update was successful, let the siblings run the apply()
+ * callback. If not, tell them it's done. This also covers the
+ * case where the CPU has uniform loading at package or system
+ * scope implemented but does not advertise it.
*/
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
- err = microcode_ops->apply_microcode(cpu);
+ if (ret == UCODE_UPDATED || ret == UCODE_OK)
+ ctrl = SCTRL_APPLY;
+ else
+ ctrl = SCTRL_DONE;
+
+ for_each_cpu(sibling, secondaries) {
+ if (sibling != cpu)
+ per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
+ }
+}
+
+static bool kick_offline_cpus(unsigned int nr_offl)
+{
+ unsigned int cpu, timeout;
+
+ for_each_cpu(cpu, &cpu_offline_mask) {
+ /* Enable the rendezvous handler and send NMI */
+ per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
+ apic_send_nmi_to_offline_cpu(cpu);
+ }
+
+ /* Wait for them to arrive */
+ for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
+ if (atomic_read(&offline_in_nmi) == nr_offl)
+ return true;
+ udelay(1);
+ }
+ /* Let the others time out */
+ return false;
+}
+
+static void release_offline_cpus(void)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, &cpu_offline_mask)
+ per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
+}
+
+static void load_primary(unsigned int cpu)
+{
+ unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
+ bool proceed = true;
+
+ /* Kick soft-offlined SMT siblings if required */
+ if (!cpu && nr_offl)
+ proceed = kick_offline_cpus(nr_offl);
+
+ /* If the soft-offlined CPUs did not respond, abort */
+ if (proceed)
+ __load_primary(cpu);
- return ret;
+ /* Unconditionally release soft-offlined SMT siblings if required */
+ if (!cpu && nr_offl)
+ release_offline_cpus();
}
/*
- * Reload microcode late on all CPUs. Wait for a sec until they
- * all gather together.
+ * Minimal stub rendezvous handler for soft-offlined CPUs which participate
+ * in the NMI rendezvous to protect against a concurrent NMI on affected
+ * CPUs.
*/
-static int microcode_reload_late(void)
+void noinstr microcode_offline_nmi_handler(void)
{
- int old = boot_cpu_data.microcode, ret;
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
+ return;
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
+ raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
+ raw_atomic_inc(&offline_in_nmi);
+ wait_for_ctrl();
+}
+
+static noinstr bool microcode_update_handler(void)
+{
+ unsigned int cpu = raw_smp_processor_id();
+
+ if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
+ instrumentation_begin();
+ load_primary(cpu);
+ instrumentation_end();
+ } else {
+ load_secondary(cpu);
+ }
+
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
+
+ return true;
+}
+
+/*
+ * Protection against instrumentation is required for CPUs which are not
+ * safe against an NMI which is delivered to the secondary SMT sibling
+ * while the primary thread updates the microcode. Instrumentation can end
+ * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
+ * which is the opposite of what the NMI rendezvous is trying to achieve.
+ *
+ * The primary thread is safe versus instrumentation as the actual
+ * microcode update handles this correctly. It's only the sibling code
+ * path which must be NMI safe until the primary thread completed the
+ * update.
+ */
+bool noinstr microcode_nmi_handler(void)
+{
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
+ return false;
+
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
+ return microcode_update_handler();
+}
+
+static int load_cpus_stopped(void *unused)
+{
+ if (microcode_ops->use_nmi) {
+ /* Enable the NMI handler and raise NMI */
+ this_cpu_write(ucode_ctrl.nmi_enabled, true);
+ apic->send_IPI(smp_processor_id(), NMI_VECTOR);
+ } else {
+ /* Just invoke the handler directly */
+ microcode_update_handler();
+ }
+ return 0;
+}
+
+static int load_late_stop_cpus(bool is_safe)
+{
+ unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
+ unsigned int nr_offl, offline = 0;
+ int old_rev = boot_cpu_data.microcode;
struct cpuinfo_x86 prev_info;
- pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
- pr_err("You should switch to early loading, if possible.\n");
+ if (!is_safe) {
+ pr_err("Late microcode loading without minimal revision check.\n");
+ pr_err("You should switch to early loading, if possible.\n");
+ }
- atomic_set(&late_cpus_in, 0);
- atomic_set(&late_cpus_out, 0);
+ atomic_set(&late_cpus_in, num_online_cpus());
+ atomic_set(&offline_in_nmi, 0);
+ loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
/*
* Take a snapshot before the microcode update in order to compare and
@@ -448,52 +556,162 @@ static int microcode_reload_late(void)
*/
store_cpu_caps(&prev_info);
- ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
- if (!ret) {
- pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
- old, boot_cpu_data.microcode);
- microcode_check(&prev_info);
- } else {
- pr_info("Reload failed, current microcode revision: 0x%x\n",
- boot_cpu_data.microcode);
+ if (microcode_ops->use_nmi)
+ static_branch_enable_cpuslocked(&microcode_nmi_handler_enable);
+
+ stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
+
+ if (microcode_ops->use_nmi)
+ static_branch_disable_cpuslocked(&microcode_nmi_handler_enable);
+
+ /* Analyze the results */
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
+ switch (per_cpu(ucode_ctrl.result, cpu)) {
+ case UCODE_UPDATED: updated++; break;
+ case UCODE_TIMEOUT: timedout++; break;
+ case UCODE_OK: siblings++; break;
+ case UCODE_OFFLINE: offline++; break;
+ default: failed++; break;
+ }
+ }
+
+ if (microcode_ops->finalize_late_load)
+ microcode_ops->finalize_late_load(!updated);
+
+ if (!updated) {
+ /* Nothing changed. */
+ if (!failed && !timedout)
+ return 0;
+
+ nr_offl = cpumask_weight(&cpu_offline_mask);
+ if (offline < nr_offl) {
+ pr_warn("%u offline siblings did not respond.\n",
+ nr_offl - atomic_read(&offline_in_nmi));
+ return -EIO;
+ }
+ pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
+ failed, timedout);
+ return -EIO;
+ }
+
+ if (!is_safe || failed || timedout)
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
+ pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
+ if (failed || timedout) {
+ pr_err("load incomplete. %u CPUs timed out or failed\n",
+ num_online_cpus() - (updated + siblings));
+ }
+ pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
+ microcode_check(&prev_info);
+
+ return updated + siblings == num_online_cpus() ? 0 : -EIO;
+}
+
+/*
+ * This function does two things:
+ *
+ * 1) Ensure that all required CPUs which are present and have been booted
+ * once are online.
+ *
+ * To pass this check, all primary threads must be online.
+ *
+ * If the microcode load is not safe against NMI then all SMT threads
+ * must be online as well because they still react to NMIs when they are
+ * soft-offlined and parked in one of the play_dead() variants. So if a
+ * NMI hits while the primary thread updates the microcode the resulting
+ * behaviour is undefined. The default play_dead() implementation on
+ * modern CPUs uses MWAIT, which is also not guaranteed to be safe
+ * against a microcode update which affects MWAIT.
+ *
+ * As soft-offlined CPUs still react on NMIs, the SMT sibling
+ * restriction can be lifted when the vendor driver signals to use NMI
+ * for rendezvous and the APIC provides a mechanism to send an NMI to a
+ * soft-offlined CPU. The soft-offlined CPUs are then able to
+ * participate in the rendezvous in a trivial stub handler.
+ *
+ * 2) Initialize the per CPU control structure and create a cpumask
+ * which contains "offline"; secondary threads, so they can be handled
+ * correctly by a control CPU.
+ */
+static bool setup_cpus(void)
+{
+ struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
+ bool allow_smt_offline;
+ unsigned int cpu;
+
+ allow_smt_offline = microcode_ops->nmi_safe ||
+ (microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
+
+ cpumask_clear(&cpu_offline_mask);
+
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
+ /*
+ * Offline CPUs sit in one of the play_dead() functions
+ * with interrupts disabled, but they still react on NMIs
+ * and execute arbitrary code. Also MWAIT being updated
+ * while the offline CPU sits there is not necessarily safe
+ * on all CPU variants.
+ *
+ * Mark them in the offline_cpus mask which will be handled
+ * by CPU0 later in the update process.
+ *
+ * Ensure that the primary thread is online so that it is
+ * guaranteed that all cores are updated.
+ */
+ if (!cpu_online(cpu)) {
+ if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
+ pr_err("CPU %u not online, loading aborted\n", cpu);
+ return false;
+ }
+ cpumask_set_cpu(cpu, &cpu_offline_mask);
+ per_cpu(ucode_ctrl, cpu) = ctrl;
+ continue;
+ }
+
+ /*
+ * Initialize the per CPU state. This is core scope for now,
+ * but prepared to take package or system scope into account.
+ */
+ ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
+ per_cpu(ucode_ctrl, cpu) = ctrl;
}
+ return true;
+}
- return ret;
+static int load_late_locked(void)
+{
+ if (!setup_cpus())
+ return -EBUSY;
+
+ switch (microcode_ops->request_microcode_fw(0, &microcode_pdev->dev)) {
+ case UCODE_NEW:
+ return load_late_stop_cpus(false);
+ case UCODE_NEW_SAFE:
+ return load_late_stop_cpus(true);
+ case UCODE_NFOUND:
+ return -ENOENT;
+ default:
+ return -EBADFD;
+ }
}
static ssize_t reload_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- enum ucode_state tmp_ret = UCODE_OK;
- int bsp = boot_cpu_data.cpu_index;
unsigned long val;
- ssize_t ret = 0;
+ ssize_t ret;
ret = kstrtoul(buf, 0, &val);
if (ret || val != 1)
return -EINVAL;
cpus_read_lock();
-
- ret = check_online_cpus();
- if (ret)
- goto put;
-
- tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev);
- if (tmp_ret != UCODE_NEW)
- goto put;
-
- ret = microcode_reload_late();
-put:
+ ret = load_late_locked();
cpus_read_unlock();
- if (ret == 0)
- ret = size;
-
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
-
- return ret;
+ return ret ? : size;
}
static DEVICE_ATTR_WO(reload);
@@ -535,17 +753,6 @@ static void microcode_fini_cpu(int cpu)
microcode_ops->microcode_fini_cpu(cpu);
}
-static enum ucode_state microcode_init_cpu(int cpu)
-{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- memset(uci, 0, sizeof(*uci));
-
- microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
-
- return microcode_ops->apply_microcode(cpu);
-}
-
/**
* microcode_bsp_resume - Update boot CPU microcode during resume.
*/
@@ -564,19 +771,18 @@ static struct syscore_ops mc_syscore_ops = {
.resume = microcode_bsp_resume,
};
-static int mc_cpu_starting(unsigned int cpu)
-{
- enum ucode_state err = microcode_ops->apply_microcode(cpu);
-
- pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err);
-
- return err == UCODE_ERROR;
-}
-
static int mc_cpu_online(unsigned int cpu)
{
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct device *dev = get_cpu_device(cpu);
+ memset(uci, 0, sizeof(*uci));
+
+ microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
+ if (!cpu)
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
+
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
return 0;
@@ -584,33 +790,13 @@ static int mc_cpu_online(unsigned int cpu)
static int mc_cpu_down_prep(unsigned int cpu)
{
- struct device *dev;
-
- dev = get_cpu_device(cpu);
+ struct device *dev = get_cpu_device(cpu);
microcode_fini_cpu(cpu);
-
- /* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
- pr_debug("%s: CPU%d\n", __func__, cpu);
-
return 0;
}
-static void setup_online_cpu(struct work_struct *work)
-{
- int cpu = smp_processor_id();
- enum ucode_state err;
-
- err = microcode_init_cpu(cpu);
- if (err == UCODE_ERROR) {
- pr_err("Error applying microcode on CPU%d\n", cpu);
- return;
- }
-
- mc_cpu_online(cpu);
-}
-
static struct attribute *cpu_root_microcode_attrs[] = {
#ifdef CONFIG_MICROCODE_LATE_LOADING
&dev_attr_reload.attr,
@@ -656,14 +842,9 @@ static int __init microcode_init(void)
}
}
- /* Do per-CPU setup */
- schedule_on_each_cpu(setup_online_cpu);
-
register_syscore_ops(&mc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
- mc_cpu_starting, NULL);
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
- mc_cpu_online, mc_cpu_down_prep);
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+ mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
@@ -674,5 +855,4 @@ static int __init microcode_init(void)
return error;
}
-fs_initcall(save_microcode_in_initrd);
late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 94dd6af9c963..6024feb98d29 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -14,7 +14,6 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -32,11 +31,14 @@
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
+#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
+
/* Current microcode patch used in early patching on the APs. */
-static struct microcode_intel *intel_ucode_patch;
+static struct microcode_intel *ucode_patch_va __read_mostly;
+static struct microcode_intel *ucode_patch_late __read_mostly;
/* last level cache size per core */
-static int llc_size_per_core;
+static unsigned int llc_size_per_core __ro_after_init;
/* microcode format is extended from prescott processors */
struct extended_signature {
@@ -66,60 +68,52 @@ static inline unsigned int exttable_size(struct extended_sigtable *et)
return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
}
-int intel_cpu_collect_info(struct ucode_cpu_info *uci)
+void intel_collect_cpu_info(struct cpu_signature *sig)
{
- unsigned int val[2];
- unsigned int family, model;
- struct cpu_signature csig = { 0 };
- unsigned int eax, ebx, ecx, edx;
-
- memset(uci, 0, sizeof(*uci));
-
- eax = 0x00000001;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- csig.sig = eax;
+ sig->sig = cpuid_eax(1);
+ sig->pf = 0;
+ sig->rev = intel_get_microcode_revision();
- family = x86_family(eax);
- model = x86_model(eax);
+ if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) {
+ unsigned int val[2];
- if (model >= 5 || family > 6) {
/* get processor flags from MSR 0x17 */
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig.pf = 1 << ((val[1] >> 18) & 7);
+ sig->pf = 1 << ((val[1] >> 18) & 7);
}
+}
+EXPORT_SYMBOL_GPL(intel_collect_cpu_info);
- csig.rev = intel_get_microcode_revision();
-
- uci->cpu_sig = csig;
+static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,
+ unsigned int pf2)
+{
+ if (s1->sig != sig2)
+ return false;
- return 0;
+ /* Processor flags are either both 0 or they intersect. */
+ return ((!s1->pf && !pf2) || (s1->pf & pf2));
}
-EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)
{
struct microcode_header_intel *mc_hdr = mc;
- struct extended_sigtable *ext_hdr;
struct extended_signature *ext_sig;
+ struct extended_sigtable *ext_hdr;
int i;
- if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
- return 1;
+ if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))
+ return true;
/* Look for ext. headers: */
if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
- return 0;
+ return false;
ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
for (i = 0; i < ext_hdr->count; i++) {
- if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
- return 1;
+ if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))
+ return true;
ext_sig++;
}
return 0;
@@ -240,264 +234,91 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
}
EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
+static void update_ucode_pointer(struct microcode_intel *mc)
{
- struct microcode_header_intel *mc_hdr = mc;
-
- if (mc_hdr->rev <= new_rev)
- return 0;
-
- return intel_find_matching_signature(mc, csig, cpf);
-}
-
-static struct ucode_patch *memdup_patch(void *data, unsigned int size)
-{
- struct ucode_patch *p;
-
- p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
- if (!p)
- return NULL;
-
- p->data = kmemdup(data, size, GFP_KERNEL);
- if (!p->data) {
- kfree(p);
- return NULL;
- }
-
- return p;
-}
-
-static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
-{
- struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
- struct ucode_patch *iter, *tmp, *p = NULL;
- bool prev_found = false;
- unsigned int sig, pf;
-
- mc_hdr = (struct microcode_header_intel *)data;
-
- list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
- mc_saved_hdr = (struct microcode_header_intel *)iter->data;
- sig = mc_saved_hdr->sig;
- pf = mc_saved_hdr->pf;
-
- if (intel_find_matching_signature(data, sig, pf)) {
- prev_found = true;
-
- if (mc_hdr->rev <= mc_saved_hdr->rev)
- continue;
-
- p = memdup_patch(data, size);
- if (!p)
- pr_err("Error allocating buffer %p\n", data);
- else {
- list_replace(&iter->plist, &p->plist);
- kfree(iter->data);
- kfree(iter);
- }
- }
- }
+ kvfree(ucode_patch_va);
/*
- * There weren't any previous patches found in the list cache; save the
- * newly found.
+ * Save the virtual address for early loading and for eventual free
+ * on late loading.
*/
- if (!prev_found) {
- p = memdup_patch(data, size);
- if (!p)
- pr_err("Error allocating buffer for %p\n", data);
- else
- list_add_tail(&p->plist, &microcode_cache);
- }
-
- if (!p)
- return;
+ ucode_patch_va = mc;
+}
- if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
- return;
+static void save_microcode_patch(struct microcode_intel *patch)
+{
+ unsigned int size = get_totalsize(&patch->hdr);
+ struct microcode_intel *mc;
- /*
- * Save for early loading. On 32-bit, that needs to be a physical
- * address as the APs are running from physical addresses, before
- * paging has been enabled.
- */
- if (IS_ENABLED(CONFIG_X86_32))
- intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
+ mc = kvmemdup(patch, size, GFP_KERNEL);
+ if (mc)
+ update_ucode_pointer(mc);
else
- intel_ucode_patch = p->data;
+ pr_err("Unable to allocate microcode memory size: %u\n", size);
}
-/*
- * Get microcode matching with BSP's model. Only CPUs with the same model as
- * BSP can stay in the platform.
- */
-static struct microcode_intel *
-scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
+/* Scan blob for microcode matching the boot CPUs family, model, stepping */
+static __init struct microcode_intel *scan_microcode(void *data, size_t size,
+ struct ucode_cpu_info *uci,
+ bool save)
{
struct microcode_header_intel *mc_header;
struct microcode_intel *patch = NULL;
+ u32 cur_rev = uci->cpu_sig.rev;
unsigned int mc_size;
- while (size) {
- if (size < sizeof(struct microcode_header_intel))
- break;
-
+ for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {
mc_header = (struct microcode_header_intel *)data;
mc_size = get_totalsize(mc_header);
- if (!mc_size ||
- mc_size > size ||
+ if (!mc_size || mc_size > size ||
intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
break;
- size -= mc_size;
-
- if (!intel_find_matching_signature(data, uci->cpu_sig.sig,
- uci->cpu_sig.pf)) {
- data += mc_size;
+ if (!intel_find_matching_signature(data, &uci->cpu_sig))
continue;
- }
+ /*
+ * For saving the early microcode, find the matching revision which
+ * was loaded on the BSP.
+ *
+ * On the BSP during early boot, find a newer revision than
+ * actually loaded in the CPU.
+ */
if (save) {
- save_microcode_patch(uci, data, mc_size);
- goto next;
- }
-
-
- if (!patch) {
- if (!has_newer_microcode(data,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf,
- uci->cpu_sig.rev))
- goto next;
-
- } else {
- struct microcode_header_intel *phdr = &patch->hdr;
-
- if (!has_newer_microcode(data,
- phdr->sig,
- phdr->pf,
- phdr->rev))
- goto next;
+ if (cur_rev != mc_header->rev)
+ continue;
+ } else if (cur_rev >= mc_header->rev) {
+ continue;
}
- /* We have a newer patch, save it. */
patch = data;
-
-next:
- data += mc_size;
- }
-
- if (size)
- return NULL;
-
- return patch;
-}
-
-static bool load_builtin_intel_microcode(struct cpio_data *cp)
-{
- unsigned int eax = 1, ebx, ecx = 0, edx;
- struct firmware fw;
- char name[30];
-
- if (IS_ENABLED(CONFIG_X86_32))
- return false;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- sprintf(name, "intel-ucode/%02x-%02x-%02x",
- x86_family(eax), x86_model(eax), x86_stepping(eax));
-
- if (firmware_request_builtin(&fw, name)) {
- cp->size = fw.size;
- cp->data = (void *)fw.data;
- return true;
- }
-
- return false;
-}
-
-static void print_ucode_info(int old_rev, int new_rev, unsigned int date)
-{
- pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
- old_rev,
- new_rev,
- date & 0xffff,
- date >> 24,
- (date >> 16) & 0xff);
-}
-
-#ifdef CONFIG_X86_32
-
-static int delay_ucode_info;
-static int current_mc_date;
-static int early_old_rev;
-
-/*
- * Print early updated ucode info after printk works. This is delayed info dump.
- */
-void show_ucode_info_early(void)
-{
- struct ucode_cpu_info uci;
-
- if (delay_ucode_info) {
- intel_cpu_collect_info(&uci);
- print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date);
- delay_ucode_info = 0;
+ cur_rev = mc_header->rev;
}
-}
-
-/*
- * At this point, we can not call printk() yet. Delay printing microcode info in
- * show_ucode_info_early() until printk() works.
- */
-static void print_ucode(int old_rev, int new_rev, int date)
-{
- int *delay_ucode_info_p;
- int *current_mc_date_p;
- int *early_old_rev_p;
-
- delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
- current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
- early_old_rev_p = (int *)__pa_nodebug(&early_old_rev);
-
- *delay_ucode_info_p = 1;
- *current_mc_date_p = date;
- *early_old_rev_p = old_rev;
-}
-#else
-static inline void print_ucode(int old_rev, int new_rev, int date)
-{
- print_ucode_info(old_rev, new_rev, date);
+ return size ? NULL : patch;
}
-#endif
-static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
+ struct microcode_intel *mc,
+ u32 *cur_rev)
{
- struct microcode_intel *mc;
- u32 rev, old_rev;
+ u32 rev;
- mc = uci->mc;
if (!mc)
- return 0;
+ return UCODE_NFOUND;
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
- rev = intel_get_microcode_revision();
- if (rev >= mc->hdr.rev) {
- uci->cpu_sig.rev = rev;
+ *cur_rev = intel_get_microcode_revision();
+ if (*cur_rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = *cur_rev;
return UCODE_OK;
}
- old_rev = rev;
-
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
@@ -509,247 +330,182 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
rev = intel_get_microcode_revision();
if (rev != mc->hdr.rev)
- return -1;
+ return UCODE_ERROR;
uci->cpu_sig.rev = rev;
+ return UCODE_UPDATED;
+}
- if (early)
- print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date);
- else
- print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date);
+static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
+{
+ struct microcode_intel *mc = uci->mc;
+ enum ucode_state ret;
+ u32 cur_rev, date;
- return 0;
+ ret = __apply_microcode(uci, mc, &cur_rev);
+ if (ret == UCODE_UPDATED) {
+ date = mc->hdr.date;
+ pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
+ cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff);
+ }
+ return ret;
}
-int __init save_microcode_in_initrd_intel(void)
+static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
{
- struct ucode_cpu_info uci;
- struct cpio_data cp;
-
- /*
- * initrd is going away, clear patch ptr. We will scan the microcode one
- * last time before jettisoning and save a patch, if found. Then we will
- * update that pointer too, with a stable patch address to use when
- * resuming the cores.
- */
- intel_ucode_patch = NULL;
+ unsigned int eax = 1, ebx, ecx = 0, edx;
+ struct firmware fw;
+ char name[30];
- if (!load_builtin_intel_microcode(&cp))
- cp = find_microcode_in_initrd(ucode_path, false);
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
- if (!(cp.data && cp.size))
- return 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
- intel_cpu_collect_info(&uci);
+ sprintf(name, "intel-ucode/%02x-%02x-%02x",
+ x86_family(eax), x86_model(eax), x86_stepping(eax));
- scan_microcode(cp.data, cp.size, &uci, true);
- return 0;
+ if (firmware_request_builtin(&fw, name)) {
+ cp->size = fw.size;
+ cp->data = (void *)fw.data;
+ return true;
+ }
+ return false;
}
-/*
- * @res_patch, output: a pointer to the patch we found.
- */
-static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
+static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)
{
- static const char *path;
struct cpio_data cp;
- bool use_pa;
-
- if (IS_ENABLED(CONFIG_X86_32)) {
- path = (const char *)__pa_nodebug(ucode_path);
- use_pa = true;
- } else {
- path = ucode_path;
- use_pa = false;
- }
- /* try built-in microcode first */
if (!load_builtin_intel_microcode(&cp))
- cp = find_microcode_in_initrd(path, use_pa);
+ cp = find_microcode_in_initrd(ucode_path);
if (!(cp.data && cp.size))
return NULL;
- intel_cpu_collect_info(uci);
+ intel_collect_cpu_info(&uci->cpu_sig);
- return scan_microcode(cp.data, cp.size, uci, false);
+ return scan_microcode(cp.data, cp.size, uci, save);
}
-void __init load_ucode_intel_bsp(void)
+/*
+ * Invoked from an early init call to save the microcode blob which was
+ * selected during early boot when mm was not usable. The microcode must be
+ * saved because initrd is going away. It's an early init call so the APs
+ * just can use the pointer and do not have to scan initrd/builtin firmware
+ * again.
+ */
+static int __init save_builtin_microcode(void)
{
- struct microcode_intel *patch;
struct ucode_cpu_info uci;
- patch = __load_ucode_intel(&uci);
- if (!patch)
- return;
+ if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
+ return 0;
- uci.mc = patch;
+ if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
- apply_microcode_early(&uci, true);
+ uci.mc = get_microcode_blob(&uci, true);
+ if (uci.mc)
+ save_microcode_patch(uci.mc);
+ return 0;
}
+early_initcall(save_builtin_microcode);
-void load_ucode_intel_ap(void)
+/* Load microcode on BSP from initrd or builtin blobs */
+void __init load_ucode_intel_bsp(void)
{
- struct microcode_intel *patch, **iup;
struct ucode_cpu_info uci;
- if (IS_ENABLED(CONFIG_X86_32))
- iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
- else
- iup = &intel_ucode_patch;
-
- if (!*iup) {
- patch = __load_ucode_intel(&uci);
- if (!patch)
- return;
-
- *iup = patch;
- }
-
- uci.mc = *iup;
-
- apply_microcode_early(&uci, true);
+ uci.mc = get_microcode_blob(&uci, false);
+ if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
+ ucode_patch_va = UCODE_BSP_LOADED;
}
-static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
+void load_ucode_intel_ap(void)
{
- struct microcode_header_intel *phdr;
- struct ucode_patch *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
-
- phdr = (struct microcode_header_intel *)iter->data;
-
- if (phdr->rev <= uci->cpu_sig.rev)
- continue;
-
- if (!intel_find_matching_signature(phdr,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf))
- continue;
+ struct ucode_cpu_info uci;
- return iter->data;
- }
- return NULL;
+ uci.mc = ucode_patch_va;
+ if (uci.mc)
+ apply_microcode_early(&uci);
}
+/* Reload microcode on resume */
void reload_ucode_intel(void)
{
- struct microcode_intel *p;
- struct ucode_cpu_info uci;
-
- intel_cpu_collect_info(&uci);
-
- p = find_patch(&uci);
- if (!p)
- return;
-
- uci.mc = p;
+ struct ucode_cpu_info uci = { .mc = ucode_patch_va, };
- apply_microcode_early(&uci, false);
+ if (uci.mc)
+ apply_microcode_early(&uci);
}
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu_num);
- unsigned int val[2];
-
- memset(csig, 0, sizeof(*csig));
-
- csig->sig = cpuid_eax(0x00000001);
-
- if ((c->x86_model >= 5) || (c->x86 > 6)) {
- /* get processor flags from MSR 0x17 */
- rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig->pf = 1 << ((val[1] >> 18) & 7);
- }
-
- csig->rev = c->microcode;
-
+ intel_collect_cpu_info(csig);
return 0;
}
-static enum ucode_state apply_microcode_intel(int cpu)
+static enum ucode_state apply_microcode_late(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
- struct microcode_intel *mc;
+ struct microcode_intel *mc = ucode_patch_late;
enum ucode_state ret;
- static int prev_rev;
- u32 rev;
+ u32 cur_rev;
- /* We should bind the task to the CPU */
- if (WARN_ON(raw_smp_processor_id() != cpu))
+ if (WARN_ON_ONCE(smp_processor_id() != cpu))
return UCODE_ERROR;
- /* Look for a newer patch in our cache: */
- mc = find_patch(uci);
- if (!mc) {
- mc = uci->mc;
- if (!mc)
- return UCODE_NFOUND;
- }
+ ret = __apply_microcode(uci, mc, &cur_rev);
+ if (ret != UCODE_UPDATED && ret != UCODE_OK)
+ return ret;
- /*
- * Save us the MSR write below - which is a particular expensive
- * operation - when the other hyperthread has updated the microcode
- * already.
- */
- rev = intel_get_microcode_revision();
- if (rev >= mc->hdr.rev) {
- ret = UCODE_OK;
- goto out;
+ if (!cpu && uci->cpu_sig.rev != cur_rev) {
+ pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n",
+ uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24,
+ (mc->hdr.date >> 16) & 0xff);
}
- /*
- * Writeback and invalidate caches before updating microcode to avoid
- * internal issues depending on what the microcode is updating.
- */
- native_wbinvd();
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
+ if (!cpu)
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
- /* write microcode via MSR 0x79 */
- wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+ return ret;
+}
- rev = intel_get_microcode_revision();
+static bool ucode_validate_minrev(struct microcode_header_intel *mc_header)
+{
+ int cur_rev = boot_cpu_data.microcode;
- if (rev != mc->hdr.rev) {
- pr_err("CPU%d update to revision 0x%x failed\n",
- cpu, mc->hdr.rev);
- return UCODE_ERROR;
+ /*
+ * When late-loading, ensure the header declares a minimum revision
+ * required to perform a late-load. The previously reserved field
+ * is 0 in older microcode blobs.
+ */
+ if (!mc_header->min_req_ver) {
+ pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");
+ return false;
}
- if (bsp && rev != prev_rev) {
- pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- rev,
- mc->hdr.date & 0xffff,
- mc->hdr.date >> 24,
- (mc->hdr.date >> 16) & 0xff);
- prev_rev = rev;
+ /*
+ * Check whether the current revision is either greater or equal to
+ * to the minimum revision specified in the header.
+ */
+ if (cur_rev < mc_header->min_req_ver) {
+ pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev);
+ pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver);
+ return false;
}
-
- ret = UCODE_UPDATED;
-
-out:
- uci->cpu_sig.rev = rev;
- c->microcode = rev;
-
- /* Update boot_cpu_data's revision too, if we're on the BSP: */
- if (bsp)
- boot_cpu_data.microcode = rev;
-
- return ret;
+ return true;
}
-static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
+static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- unsigned int curr_mc_size = 0, new_mc_size = 0;
- enum ucode_state ret = UCODE_OK;
- int new_rev = uci->cpu_sig.rev;
+ bool is_safe, new_is_safe = false;
+ int cur_rev = uci->cpu_sig.rev;
+ unsigned int curr_mc_size = 0;
u8 *new_mc = NULL, *mc = NULL;
- unsigned int csig, cpf;
while (iov_iter_count(iter)) {
struct microcode_header_intel mc_header;
@@ -758,68 +514,66 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
pr_err("error! Truncated or inaccessible header in microcode data file\n");
- break;
+ goto fail;
}
mc_size = get_totalsize(&mc_header);
if (mc_size < sizeof(mc_header)) {
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
- break;
+ goto fail;
}
data_size = mc_size - sizeof(mc_header);
if (data_size > iov_iter_count(iter)) {
pr_err("error! Bad data in microcode data file (truncated file?)\n");
- break;
+ goto fail;
}
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
- vfree(mc);
- mc = vmalloc(mc_size);
+ kvfree(mc);
+ mc = kvmalloc(mc_size, GFP_KERNEL);
if (!mc)
- break;
+ goto fail;
curr_mc_size = mc_size;
}
memcpy(mc, &mc_header, sizeof(mc_header));
data = mc + sizeof(mc_header);
if (!copy_from_iter_full(data, data_size, iter) ||
- intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
- break;
- }
+ intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
+ goto fail;
- csig = uci->cpu_sig.sig;
- cpf = uci->cpu_sig.pf;
- if (has_newer_microcode(mc, csig, cpf, new_rev)) {
- vfree(new_mc);
- new_rev = mc_header.rev;
- new_mc = mc;
- new_mc_size = mc_size;
- mc = NULL; /* trigger new vmalloc */
- ret = UCODE_NEW;
- }
- }
+ if (cur_rev >= mc_header.rev)
+ continue;
- vfree(mc);
+ if (!intel_find_matching_signature(mc, &uci->cpu_sig))
+ continue;
- if (iov_iter_count(iter)) {
- vfree(new_mc);
- return UCODE_ERROR;
+ is_safe = ucode_validate_minrev(&mc_header);
+ if (force_minrev && !is_safe)
+ continue;
+
+ kvfree(new_mc);
+ cur_rev = mc_header.rev;
+ new_mc = mc;
+ new_is_safe = is_safe;
+ mc = NULL;
}
+ if (iov_iter_count(iter))
+ goto fail;
+
+ kvfree(mc);
if (!new_mc)
return UCODE_NFOUND;
- vfree(uci->mc);
- uci->mc = (struct microcode_intel *)new_mc;
-
- /* Save for CPU hotplug */
- save_microcode_patch(uci, new_mc, new_mc_size);
+ ucode_patch_late = (struct microcode_intel *)new_mc;
+ return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW;
- pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
- cpu, new_rev, uci->cpu_sig.rev);
-
- return ret;
+fail:
+ kvfree(mc);
+ kvfree(new_mc);
+ return UCODE_ERROR;
}
static bool is_blacklisted(unsigned int cpu)
@@ -868,26 +622,36 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
kvec.iov_base = (void *)firmware->data;
kvec.iov_len = firmware->size;
iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
- ret = generic_load_microcode(cpu, &iter);
+ ret = parse_microcode_blobs(cpu, &iter);
release_firmware(firmware);
return ret;
}
+static void finalize_late_load(int result)
+{
+ if (!result)
+ update_ucode_pointer(ucode_patch_late);
+ else
+ kvfree(ucode_patch_late);
+ ucode_patch_late = NULL;
+}
+
static struct microcode_ops microcode_intel_ops = {
- .request_microcode_fw = request_microcode_fw,
- .collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode_intel,
+ .request_microcode_fw = request_microcode_fw,
+ .collect_cpu_info = collect_cpu_info,
+ .apply_microcode = apply_microcode_late,
+ .finalize_late_load = finalize_late_load,
+ .use_nmi = IS_ENABLED(CONFIG_X86_64),
};
-static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
u64 llc_size = c->x86_cache_size * 1024ULL;
do_div(llc_size, c->x86_max_cores);
-
- return (int)llc_size;
+ llc_size_per_core = (unsigned int)llc_size;
}
struct microcode_ops * __init init_intel_microcode(void)
@@ -900,7 +664,7 @@ struct microcode_ops * __init init_intel_microcode(void)
return NULL;
}
- llc_size_per_core = calc_llc_size_per_core(c);
+ calc_llc_size_per_core(c);
return &microcode_intel_ops;
}
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index bf883aa71233..f8047b12329a 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -8,43 +8,37 @@
#include <asm/cpu.h>
#include <asm/microcode.h>
-struct ucode_patch {
- struct list_head plist;
- void *data; /* Intel uses only this one */
- unsigned int size;
- u32 patch_id;
- u16 equiv_cpu;
-};
-
-extern struct list_head microcode_cache;
-
struct device;
enum ucode_state {
UCODE_OK = 0,
UCODE_NEW,
+ UCODE_NEW_SAFE,
UCODE_UPDATED,
UCODE_NFOUND,
UCODE_ERROR,
+ UCODE_TIMEOUT,
+ UCODE_OFFLINE,
};
struct microcode_ops {
enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
-
void (*microcode_fini_cpu)(int cpu);
/*
- * The generic 'microcode_core' part guarantees that
- * the callbacks below run on a target cpu when they
- * are being called.
+ * The generic 'microcode_core' part guarantees that the callbacks
+ * below run on a target CPU when they are being called.
* See also the "Synchronization" section in microcode_core.c.
*/
- enum ucode_state (*apply_microcode)(int cpu);
- int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
+ enum ucode_state (*apply_microcode)(int cpu);
+ int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
+ void (*finalize_late_load)(int result);
+ unsigned int nmi_safe : 1,
+ use_nmi : 1;
};
extern struct ucode_cpu_info ucode_cpu_info[];
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
+struct cpio_data find_microcode_in_initrd(const char *path);
#define MAX_UCODE_COUNT 128
@@ -94,12 +88,12 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
-extern bool initrd_gone;
+extern bool dis_ucode_ldr;
+extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(unsigned int family);
void load_ucode_amd_ap(unsigned int family);
-void load_ucode_amd_early(unsigned int cpuid_1_eax);
int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
@@ -107,7 +101,6 @@ void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
-static inline void load_ucode_amd_early(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
@@ -117,13 +110,11 @@ static inline void exit_amd_microcode(void) { }
#ifdef CONFIG_CPU_SUP_INTEL
void load_ucode_intel_bsp(void);
void load_ucode_intel_ap(void);
-int save_microcode_in_initrd_intel(void);
void reload_ucode_intel(void);
struct microcode_ops *init_intel_microcode(void);
#else /* CONFIG_CPU_SUP_INTEL */
static inline void load_ucode_intel_bsp(void) { }
static inline void load_ucode_intel_ap(void) { }
-static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; }
static inline void reload_ucode_intel(void) { }
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
#endif /* !CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 246a609f889b..de001b2146ab 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -19,6 +19,7 @@
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
+#include <asm/microcode.h>
#include <asm/tlbflush.h>
#include <asm/bootparam_utils.h>
@@ -29,11 +30,33 @@ static void __init i386_default_early_setup(void)
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
}
+#ifdef CONFIG_MICROCODE_INITRD32
+unsigned long __initdata initrd_start_early;
+static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end;
+
+static void zap_early_initrd_mapping(void)
+{
+ pte_t *pl2p = initrd_pl2p_start;
+
+ for (; pl2p < initrd_pl2p_end; pl2p++) {
+ *pl2p = (pte_t){ .pte = 0 };
+
+ if (!IS_ENABLED(CONFIG_X86_PAE))
+ *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0};
+ }
+}
+#else
+static inline void zap_early_initrd_mapping(void) { }
+#endif
+
asmlinkage __visible void __init __noreturn i386_start_kernel(void)
{
/* Make sure IDT is set up before any exception happens */
idt_setup_early_handler();
+ load_ucode_bsp();
+ zap_early_initrd_mapping();
+
cr4_init_shadow();
sanitize_boot_params(&boot_params);
@@ -69,52 +92,83 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void)
* to the first kernel PMD. Note the upper half of each PMD or PTE are
* always zero at this stage.
*/
-void __init mk_early_pgtbl_32(void);
-void __init mk_early_pgtbl_32(void)
-{
-#ifdef __pa
-#undef __pa
-#endif
-#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
- pte_t pte, *ptep;
- int i;
- unsigned long *ptr;
- /* Enough space to fit pagetables for the low memory linear map */
- const unsigned long limit = __pa(_end) +
- (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
#ifdef CONFIG_X86_PAE
- pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
-#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
+typedef pmd_t pl2_t;
+#define pl2_base initial_pg_pmd
+#define SET_PL2(val) { .pmd = (val), }
#else
- pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
-#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
+typedef pgd_t pl2_t;
+#define pl2_base initial_page_table
+#define SET_PL2(val) { .pgd = (val), }
#endif
- ptep = (pte_t *)__pa(__brk_base);
- pte.pte = PTE_IDENT_ATTR;
-
+static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p,
+ const unsigned long limit)
+{
while ((pte.pte & PTE_PFN_MASK) < limit) {
+ pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR);
+ int i;
+
+ **pl2p = pl2;
+ if (!IS_ENABLED(CONFIG_X86_PAE)) {
+ /* Kernel PDE entry */
+ *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
+ }
- SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
- *pl2p = pl2;
-#ifndef CONFIG_X86_PAE
- /* Kernel PDE entry */
- *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
-#endif
for (i = 0; i < PTRS_PER_PTE; i++) {
- *ptep = pte;
+ **ptep = pte;
pte.pte += PAGE_SIZE;
- ptep++;
+ (*ptep)++;
}
-
- pl2p++;
+ (*pl2p)++;
}
+ return pte;
+}
+
+void __init __no_stack_protector mk_early_pgtbl_32(void)
+{
+ /* Enough space to fit pagetables for the low memory linear map */
+ unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
+ pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base);
+ struct boot_params __maybe_unused *params;
+ pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base);
+ unsigned long *ptr;
+
+ pte.pte = PTE_IDENT_ATTR;
+ pte = init_map(pte, &ptep, &pl2p, limit);
- ptr = (unsigned long *)__pa(&max_pfn_mapped);
+ ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped);
/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
- ptr = (unsigned long *)__pa(&_brk_end);
+ ptr = (unsigned long *)__pa_nodebug(&_brk_end);
*ptr = (unsigned long)ptep + PAGE_OFFSET;
-}
+#ifdef CONFIG_MICROCODE_INITRD32
+ /* Running on a hypervisor? */
+ if (native_cpuid_ecx(1) & BIT(31))
+ return;
+
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
+ if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
+ return;
+
+ /* Save the virtual start address */
+ ptr = (unsigned long *)__pa_nodebug(&initrd_start_early);
+ *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET;
+ *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK;
+
+ /* Save PLP2 for cleanup */
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start);
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
+
+ limit = (unsigned long)params->hdr.ramdisk_image;
+ pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit);
+ limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size;
+
+ init_map(pte, &ptep, &pl2p, limit);
+
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end);
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
+#endif
+}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6554212b7c7..487ac57e2c81 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -118,11 +118,6 @@ SYM_CODE_START(startup_32)
movl %eax, pa(olpc_ofw_pgd)
#endif
-#ifdef CONFIG_MICROCODE
- /* Early load ucode on BSP. */
- call load_ucode_bsp
-#endif
-
/* Create early pagetables. */
call mk_early_pgtbl_32
@@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp)
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
-#ifdef CONFIG_MICROCODE
- /* Early load ucode on AP. */
- call load_ucode_ap
-#endif
-
.Ldefault_entry:
movl $(CR0_STATE & ~X86_CR0_PG),%eax
movl %eax,%cr0
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 4766b6bed443..17e955ab69fe 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/microcode.h>
#include <asm/sev.h>
#define CREATE_TRACE_POINTS
@@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
instrumentation_begin();
+ if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
+ goto out;
+
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -498,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
raw_atomic_long_inc(&nsp->idt_calls);
- if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
+ if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) {
+ if (microcode_nmi_handler_enabled())
+ microcode_offline_nmi_handler();
return;
+ }
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
this_cpu_write(nmi_state, NMI_LATCHED);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 163c35db3d04..1526747bedf2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1167,7 +1167,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
- conswitchp = &vga_con;
+ vgacon_register_screen(&screen_info);
#endif
#endif
x86_init.oem.banner();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c4aca66f0902..2cc2aa120b4b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -272,12 +272,9 @@ static void notrace start_secondary(void *unused)
cpu_init_exception_handling();
/*
- * 32-bit systems load the microcode from the ASM startup code for
- * historical reasons.
- *
- * On 64-bit systems load it before reaching the AP alive
- * synchronization point below so it is not part of the full per
- * CPU serialized bringup part when "parallel" bringup is enabled.
+ * Load the microcode before reaching the AP alive synchronization
+ * point below so it is not part of the full per CPU serialized
+ * bringup part when "parallel" bringup is enabled.
*
* That's even safe when hyperthreading is enabled in the CPU as
* the core code starts the primary threads first and leaves the
@@ -290,8 +287,7 @@ static void notrace start_secondary(void *unused)
* CPUID, MSRs etc. must be strictly serialized to maintain
* software state correctness.
*/
- if (IS_ENABLED(CONFIG_X86_64))
- load_ucode_ap();
+ load_ucode_ap();
/*
* Synchronization point with the hotplug core. Sets this CPUs
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 6825e146a62f..b86d634730b2 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -67,15 +67,3 @@ quiet_cmd_vdso = VDSO $@
VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
GCOV_PROFILE := n
-
-#
-# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
-#
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-PHONY += vdso_install $(vdso-install-y)
-vdso_install: $(vdso-install-y)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 52d6e4870a04..bdec4a773af0 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -19,7 +19,6 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
-#include <linux/screen_info.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/reboot.h>
@@ -49,17 +48,6 @@
#include <asm/timex.h>
#include <asm/traps.h>
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
-struct screen_info screen_info = {
- .orig_x = 0,
- .orig_y = 24,
- .orig_video_cols = 80,
- .orig_video_lines = 24,
- .orig_video_isVGA = 1,
- .orig_video_points = 16,
-};
-#endif
-
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start;
extern unsigned long initrd_end;
diff --git a/block/blk-core.c b/block/blk-core.c
index 9d51e9894ece..fdf25b8d6e78 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
- pr_warn("Trying to write to read-only block-device %pg\n",
- bio->bi_bdev);
+ pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
+ bio->bi_bdev);
/* Older lvm-tools actually trigger this */
}
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bbf51d55724e..70661f58ee41 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1297,10 +1297,12 @@ config CRYPTO_JITTERENTROPY
See https://www.chronox.de/jent.html
+if CRYPTO_JITTERENTROPY
+if CRYPTO_FIPS && EXPERT
+
choice
prompt "CPU Jitter RNG Memory Size"
default CRYPTO_JITTERENTROPY_MEMSIZE_2
- depends on CRYPTO_JITTERENTROPY
help
The Jitter RNG measures the execution time of memory accesses.
Multiple consecutive memory accesses are performed. If the memory
@@ -1344,7 +1346,6 @@ config CRYPTO_JITTERENTROPY_OSR
int "CPU Jitter RNG Oversampling Rate"
range 1 15
default 1
- depends on CRYPTO_JITTERENTROPY
help
The Jitter RNG allows the specification of an oversampling rate (OSR).
The Jitter RNG operation requires a fixed amount of timing
@@ -1359,7 +1360,6 @@ config CRYPTO_JITTERENTROPY_OSR
config CRYPTO_JITTERENTROPY_TESTINTERFACE
bool "CPU Jitter RNG Test Interface"
- depends on CRYPTO_JITTERENTROPY
help
The test interface allows a privileged process to capture
the raw unconditioned high resolution time stamp noise that
@@ -1377,6 +1377,28 @@ config CRYPTO_JITTERENTROPY_TESTINTERFACE
If unsure, select N.
+endif # if CRYPTO_FIPS && EXPERT
+
+if !(CRYPTO_FIPS && EXPERT)
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+ int
+ default 64
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+ int
+ default 32
+
+config CRYPTO_JITTERENTROPY_OSR
+ int
+ default 1
+
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool
+
+endif # if !(CRYPTO_FIPS && EXPERT)
+endif # if CRYPTO_JITTERENTROPY
+
config CRYPTO_KDF800108_CTR
tristate
select CRYPTO_HMAC
diff --git a/crypto/ahash.c b/crypto/ahash.c
index deee55f939dc..80c3e5354711 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -651,6 +651,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
err = PTR_ERR(shash);
goto out_free_nhash;
}
+ nhash->using_shash = true;
*nctx = shash;
return nhash;
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9826907eb06e..7bdad836fc62 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -135,8 +135,6 @@ source "drivers/uio/Kconfig"
source "drivers/vfio/Kconfig"
-source "drivers/vlynq/Kconfig"
-
source "drivers/virt/Kconfig"
source "drivers/virtio/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 722d15be0eb7..d828329c268d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -151,7 +151,6 @@ obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VHOST_IOTLB) += vhost/
obj-$(CONFIG_VHOST) += vhost/
-obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/accessibility/speakup/kobjects.c b/drivers/accessibility/speakup/kobjects.c
index a7522d409802..0dfdb6608e02 100644
--- a/drivers/accessibility/speakup/kobjects.c
+++ b/drivers/accessibility/speakup/kobjects.c
@@ -413,27 +413,24 @@ static ssize_t synth_direct_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- u_char tmp[256];
- int len;
- int bytes;
- const char *ptr = buf;
+ char *unescaped;
unsigned long flags;
if (!synth)
return -EPERM;
- len = strlen(buf);
+ unescaped = kstrdup(buf, GFP_KERNEL);
+ if (!unescaped)
+ return -ENOMEM;
+
+ string_unescape_any_inplace(unescaped);
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
- while (len > 0) {
- bytes = min_t(size_t, len, 250);
- strncpy(tmp, ptr, bytes);
- tmp[bytes] = '\0';
- string_unescape_any_inplace(tmp);
- synth_printf("%s", tmp);
- ptr += bytes;
- len -= bytes;
- }
+ synth_write(unescaped, strlen(unescaped));
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+
+ kfree(unescaped);
+
return count;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 554e487cbfab..f819e760ff19 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -12,6 +12,7 @@ menuconfig ACPI
select PNP
select NLS
select CRC32
+ select FIRMWARE_TABLE
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/riscv/rhct.c b/drivers/acpi/riscv/rhct.c
index b280b3e9c7d9..caa2c16e1697 100644
--- a/drivers/acpi/riscv/rhct.c
+++ b/drivers/acpi/riscv/rhct.c
@@ -8,8 +8,9 @@
#define pr_fmt(fmt) "ACPI: RHCT: " fmt
#include <linux/acpi.h>
+#include <linux/bits.h>
-static struct acpi_table_header *acpi_get_rhct(void)
+static struct acpi_table_rhct *acpi_get_rhct(void)
{
static struct acpi_table_header *rhct;
acpi_status status;
@@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void)
}
}
- return rhct;
+ return (struct acpi_table_rhct *)rhct;
}
/*
@@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
BUG_ON(acpi_disabled);
if (!table) {
- rhct = (struct acpi_table_rhct *)acpi_get_rhct();
+ rhct = acpi_get_rhct();
if (!rhct)
return -ENOENT;
} else {
@@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
return -1;
}
+
+static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct,
+ struct acpi_rhct_hart_info *hart_info,
+ u32 *cbom_size, u32 *cboz_size, u32 *cbop_size)
+{
+ u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info);
+ u32 size_hdr = sizeof(struct acpi_rhct_node_header);
+ struct acpi_rhct_node_header *ref_node;
+ struct acpi_rhct_cmo_node *cmo_node;
+ u32 *hart_info_node_offset;
+
+ hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo);
+ for (int i = 0; i < hart_info->num_offsets; i++) {
+ ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header,
+ rhct, hart_info_node_offset[i]);
+ if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) {
+ cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node,
+ ref_node, size_hdr);
+ if (cbom_size && cmo_node->cbom_size <= 30) {
+ if (!*cbom_size)
+ *cbom_size = BIT(cmo_node->cbom_size);
+ else if (*cbom_size != BIT(cmo_node->cbom_size))
+ pr_warn("CBOM size is not the same across harts\n");
+ }
+
+ if (cboz_size && cmo_node->cboz_size <= 30) {
+ if (!*cboz_size)
+ *cboz_size = BIT(cmo_node->cboz_size);
+ else if (*cboz_size != BIT(cmo_node->cboz_size))
+ pr_warn("CBOZ size is not the same across harts\n");
+ }
+
+ if (cbop_size && cmo_node->cbop_size <= 30) {
+ if (!*cbop_size)
+ *cbop_size = BIT(cmo_node->cbop_size);
+ else if (*cbop_size != BIT(cmo_node->cbop_size))
+ pr_warn("CBOP size is not the same across harts\n");
+ }
+ }
+ }
+}
+
+/*
+ * During early boot, the caller should call acpi_get_table() and pass its pointer to
+ * these functions (and free up later). At run time, since this table can be used
+ * multiple times, pass NULL so that the table remains in memory.
+ */
+void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
+ u32 *cboz_size, u32 *cbop_size)
+{
+ u32 size_hdr = sizeof(struct acpi_rhct_node_header);
+ struct acpi_rhct_node_header *node, *end;
+ struct acpi_rhct_hart_info *hart_info;
+ struct acpi_table_rhct *rhct;
+
+ if (acpi_disabled)
+ return;
+
+ if (table) {
+ rhct = (struct acpi_table_rhct *)table;
+ } else {
+ rhct = acpi_get_rhct();
+ if (!rhct)
+ return;
+ }
+
+ if (cbom_size)
+ *cbom_size = 0;
+
+ if (cboz_size)
+ *cboz_size = 0;
+
+ if (cbop_size)
+ *cbop_size = 0;
+
+ end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length);
+ for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
+ node < end;
+ node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
+ if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
+ hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
+ acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size,
+ cboz_size, cbop_size);
+ }
+ }
+}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8ab0a82b4da4..c1516337f668 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -37,18 +37,6 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
static int acpi_apic_instance __initdata_or_acpilib;
-enum acpi_subtable_type {
- ACPI_SUBTABLE_COMMON,
- ACPI_SUBTABLE_HMAT,
- ACPI_SUBTABLE_PRMT,
- ACPI_SUBTABLE_CEDT,
-};
-
-struct acpi_subtable_entry {
- union acpi_subtable_headers *hdr;
- enum acpi_subtable_type type;
-};
-
/*
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
@@ -237,167 +225,6 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
-static unsigned long __init_or_acpilib
-acpi_get_entry_type(struct acpi_subtable_entry *entry)
-{
- switch (entry->type) {
- case ACPI_SUBTABLE_COMMON:
- return entry->hdr->common.type;
- case ACPI_SUBTABLE_HMAT:
- return entry->hdr->hmat.type;
- case ACPI_SUBTABLE_PRMT:
- return 0;
- case ACPI_SUBTABLE_CEDT:
- return entry->hdr->cedt.type;
- }
- return 0;
-}
-
-static unsigned long __init_or_acpilib
-acpi_get_entry_length(struct acpi_subtable_entry *entry)
-{
- switch (entry->type) {
- case ACPI_SUBTABLE_COMMON:
- return entry->hdr->common.length;
- case ACPI_SUBTABLE_HMAT:
- return entry->hdr->hmat.length;
- case ACPI_SUBTABLE_PRMT:
- return entry->hdr->prmt.length;
- case ACPI_SUBTABLE_CEDT:
- return entry->hdr->cedt.length;
- }
- return 0;
-}
-
-static unsigned long __init_or_acpilib
-acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
-{
- switch (entry->type) {
- case ACPI_SUBTABLE_COMMON:
- return sizeof(entry->hdr->common);
- case ACPI_SUBTABLE_HMAT:
- return sizeof(entry->hdr->hmat);
- case ACPI_SUBTABLE_PRMT:
- return sizeof(entry->hdr->prmt);
- case ACPI_SUBTABLE_CEDT:
- return sizeof(entry->hdr->cedt);
- }
- return 0;
-}
-
-static enum acpi_subtable_type __init_or_acpilib
-acpi_get_subtable_type(char *id)
-{
- if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
- return ACPI_SUBTABLE_HMAT;
- if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
- return ACPI_SUBTABLE_PRMT;
- if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
- return ACPI_SUBTABLE_CEDT;
- return ACPI_SUBTABLE_COMMON;
-}
-
-static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
-{
- return proc->handler || proc->handler_arg;
-}
-
-static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
- union acpi_subtable_headers *hdr,
- unsigned long end)
-{
- if (proc->handler)
- return proc->handler(hdr, end);
- if (proc->handler_arg)
- return proc->handler_arg(hdr, proc->arg, end);
- return -EINVAL;
-}
-
-/**
- * acpi_parse_entries_array - for each proc_num find a suitable subtable
- *
- * @id: table id (for debugging purposes)
- * @table_size: size of the root table
- * @table_header: where does the table start?
- * @proc: array of acpi_subtable_proc struct containing entry id
- * and associated handler with it
- * @proc_num: how big proc is?
- * @max_entries: how many entries can we process?
- *
- * For each proc_num find a subtable with proc->id and run proc->handler
- * on it. Assumption is that there's only single handler for particular
- * entry id.
- *
- * The table_size is not the size of the complete ACPI table (the length
- * field in the header struct), but only the size of the root table; i.e.,
- * the offset from the very first byte of the complete ACPI table, to the
- * first byte of the very first subtable.
- *
- * On success returns sum of all matching entries for all proc handlers.
- * Otherwise, -ENODEV or -EINVAL is returned.
- */
-static int __init_or_acpilib acpi_parse_entries_array(
- char *id, unsigned long table_size,
- struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
- int proc_num, unsigned int max_entries)
-{
- struct acpi_subtable_entry entry;
- unsigned long table_end, subtable_len, entry_len;
- int count = 0;
- int errs = 0;
- int i;
-
- table_end = (unsigned long)table_header + table_header->length;
-
- /* Parse all entries looking for a match. */
-
- entry.type = acpi_get_subtable_type(id);
- entry.hdr = (union acpi_subtable_headers *)
- ((unsigned long)table_header + table_size);
- subtable_len = acpi_get_subtable_header_length(&entry);
-
- while (((unsigned long)entry.hdr) + subtable_len < table_end) {
- if (max_entries && count >= max_entries)
- break;
-
- for (i = 0; i < proc_num; i++) {
- if (acpi_get_entry_type(&entry) != proc[i].id)
- continue;
- if (!has_handler(&proc[i]) ||
- (!errs &&
- call_handler(&proc[i], entry.hdr, table_end))) {
- errs++;
- continue;
- }
-
- proc[i].count++;
- break;
- }
- if (i != proc_num)
- count++;
-
- /*
- * If entry->length is 0, break from this loop to avoid
- * infinite loop.
- */
- entry_len = acpi_get_entry_length(&entry);
- if (entry_len == 0) {
- pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
- return -EINVAL;
- }
-
- entry.hdr = (union acpi_subtable_headers *)
- ((unsigned long)entry.hdr + entry_len);
- }
-
- if (max_entries && count > max_entries) {
- pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
- id, proc->id, count);
- }
-
- return errs ? -EINVAL : count;
-}
-
int __init_or_acpilib acpi_table_parse_entries_array(
char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d98ff69303b3..f74d81abdbfc 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -702,9 +702,9 @@ unregister_tzd:
static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
{
+ thermal_zone_device_disable(tz->thermal_zone);
acpi_thermal_zone_sysfs_remove(tz);
thermal_zone_device_unregister(tz->thermal_zone);
- kfree(tz->trip_table);
tz->thermal_zone = NULL;
}
@@ -967,7 +967,7 @@ static void acpi_thermal_remove(struct acpi_device *device)
flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
-
+ kfree(tz->trip_table);
acpi_thermal_free_thermal_zone(tz);
}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 420dc9cbf774..1224ab7aa070 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -93,7 +93,7 @@ bool is_binderfs_device(const struct inode *inode)
/**
* binderfs_binder_device_create - allocate inode from super block of a
* binderfs mount
- * @ref_inode: inode from wich the super block will be taken
+ * @ref_inode: inode from which the super block will be taken
* @userp: buffer to copy information about new device for userspace to
* @req: struct binderfs_device as copied from userspace
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6fb4e8dc8c3c..09ed67772fae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6180,24 +6180,10 @@ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
- struct ata_port *ap;
- unsigned long flags;
int i;
- /* Tell EH to disable all devices */
- for (i = 0; i < host->n_ports; i++) {
- ap = host->ports[i];
- spin_lock_irqsave(ap->lock, flags);
- ap->pflags |= ATA_PFLAG_UNLOADING;
- ata_port_schedule_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
- }
-
for (i = 0; i < host->n_ports; i++) {
- ap = host->ports[i];
-
- /* Wait for EH to complete before freezing the port */
- ata_port_wait_eh(ap);
+ struct ata_port *ap = host->ports[i];
ap->pflags |= ATA_PFLAG_FROZEN;
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 0c2ae430f5aa..18ceefd176df 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -121,7 +121,7 @@ static struct ata_port_operations pata_falcon_ops = {
.set_mode = pata_falcon_set_mode,
};
-static int __init pata_falcon_init_one(struct platform_device *pdev)
+static int pata_falcon_init_one(struct platform_device *pdev)
{
struct resource *base_mem_res, *ctl_mem_res;
struct resource *base_res, *ctl_res, *irq_res;
@@ -216,23 +216,22 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
IRQF_SHARED, &pata_falcon_sht);
}
-static int __exit pata_falcon_remove_one(struct platform_device *pdev)
+static void pata_falcon_remove_one(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
ata_host_detach(host);
-
- return 0;
}
static struct platform_driver pata_falcon_driver = {
- .remove = __exit_p(pata_falcon_remove_one),
+ .probe = pata_falcon_init_one,
+ .remove_new = pata_falcon_remove_one,
.driver = {
.name = "atari-falcon-ide",
},
};
-module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one);
+module_platform_driver(pata_falcon_driver);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 3bdbe2b65a2b..94df60ac2307 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -124,7 +124,7 @@ static struct ata_port_operations pata_gayle_a4000_ops = {
.set_mode = pata_gayle_set_mode,
};
-static int __init pata_gayle_init_one(struct platform_device *pdev)
+static int pata_gayle_init_one(struct platform_device *pdev)
{
struct resource *res;
struct gayle_ide_platform_data *pdata;
@@ -193,23 +193,22 @@ static int __init pata_gayle_init_one(struct platform_device *pdev)
return 0;
}
-static int __exit pata_gayle_remove_one(struct platform_device *pdev)
+static void pata_gayle_remove_one(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
ata_host_detach(host);
-
- return 0;
}
static struct platform_driver pata_gayle_driver = {
- .remove = __exit_p(pata_gayle_remove_one),
+ .probe = pata_gayle_init_one,
+ .remove_new = pata_gayle_remove_one,
.driver = {
.name = "amiga-gayle-ide",
},
};
-module_platform_driver_probe(pata_gayle_driver, pata_gayle_init_one);
+module_platform_driver(pata_gayle_driver);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Amiga Gayle PATA");
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 05d9df90f621..7e78aee0fd6c 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -193,10 +193,8 @@ int class_register(const struct class *cls)
lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
- if (error) {
- kfree(cp);
- return error;
- }
+ if (error)
+ goto err_out;
cp->subsys.kobj.kset = class_kset;
cp->subsys.kobj.ktype = &class_ktype;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4d8b315c48a1..67ba592afc77 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -49,6 +49,7 @@ static bool fw_devlink_best_effort;
* __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
+ * @flags: Link flags.
*
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
* represents the detail that the firmware lists @sup fwnode as supplying a
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a528cec24264..0c3725c3eefa 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
- device_links_driver_cleanup(dev);
device_unbind_cleanup(dev);
+ device_links_driver_cleanup(dev);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index bf68e3947814..3ef0b312ae71 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -46,7 +46,7 @@ static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
static LIST_HEAD(pending_fw_head);
-void kill_pending_fw_fallback_reqs(bool only_kill_custom)
+void kill_pending_fw_fallback_reqs(bool kill_all)
{
struct fw_priv *fw_priv;
struct fw_priv *next;
@@ -54,9 +54,13 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_lock(&fw_lock);
list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
pending_list) {
- if (!fw_priv->need_uevent || !only_kill_custom)
+ if (kill_all || !fw_priv->need_uevent)
__fw_load_abort(fw_priv);
}
+
+ if (kill_all)
+ fw_load_abort_all = true;
+
mutex_unlock(&fw_lock);
}
@@ -86,7 +90,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
}
mutex_lock(&fw_lock);
- if (fw_state_is_aborted(fw_priv)) {
+ if (fw_load_abort_all || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
retval = -EINTR;
goto out;
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 144148595660..ccf912bef6ca 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -13,7 +13,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
u32 opt_flags,
int ret);
-void kill_pending_fw_fallback_reqs(bool only_kill_custom);
+void kill_pending_fw_fallback_reqs(bool kill_all);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
@@ -28,7 +28,7 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret;
}
-static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline void kill_pending_fw_fallback_reqs(bool kill_all) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }
#endif /* CONFIG_FW_LOADER_USER_HELPER */
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index bf549d6500d7..e891742ba264 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -86,6 +86,7 @@ struct fw_priv {
extern struct mutex fw_lock;
extern struct firmware_cache fw_cache;
+extern bool fw_load_abort_all;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index b58c42f1b1ce..ea28102d421e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -93,6 +93,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
DEFINE_MUTEX(fw_lock);
struct firmware_cache fw_cache;
+bool fw_load_abort_all;
void fw_state_init(struct fw_priv *fw_priv)
{
@@ -1524,10 +1525,10 @@ static int fw_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
/*
- * kill pending fallback requests with a custom fallback
- * to avoid stalling suspend.
+ * Here, kill pending fallback requests will only kill
+ * non-uevent firmware request to avoid stalling suspend.
*/
- kill_pending_fw_fallback_reqs(true);
+ kill_pending_fw_fallback_reqs(false);
device_cache_fw_images();
break;
@@ -1612,7 +1613,7 @@ static int fw_shutdown_notify(struct notifier_block *unused1,
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
*/
- kill_pending_fw_fallback_reqs(false);
+ kill_pending_fw_fallback_reqs(true);
return NOTIFY_DONE;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 76bfcba25003..10c577963418 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -178,18 +178,19 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = dev->archdata.irqs[num];
goto out;
#else
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- ret = of_irq_get(dev->dev.of_node, num);
+ if (is_of_node(fwnode)) {
+ ret = of_irq_get(to_of_node(fwnode), num);
if (ret > 0 || ret == -EPROBE_DEFER)
goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
- if (has_acpi_companion(&dev->dev)) {
+ if (is_acpi_device_node(fwnode)) {
if (r && r->flags & IORESOURCE_DISABLED) {
- ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+ ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r);
if (ret)
goto out;
}
@@ -222,8 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
* the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource.
*/
- if (num == 0 && has_acpi_companion(&dev->dev)) {
- ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+ if (num == 0 && is_acpi_device_node(fwnode)) {
+ ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
goto out;
@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(platform_irq_count);
struct irq_affinity_devres {
unsigned int count;
- unsigned int irq[];
+ unsigned int irq[] __counted_by(count);
};
static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
@@ -312,7 +313,7 @@ static void devm_platform_get_irqs_affinity_release(struct device *dev,
for (i = 0; i < ptr->count; i++) {
irq_dispose_mapping(ptr->irq[i]);
- if (has_acpi_companion(dev))
+ if (is_acpi_device_node(dev_fwnode(dev)))
platform_disable_acpi_irq(to_platform_device(dev), i);
}
}
@@ -655,23 +656,21 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
*/
int platform_device_add(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
u32 i;
int ret;
- if (!pdev)
- return -EINVAL;
-
- if (!pdev->dev.parent)
- pdev->dev.parent = &platform_bus;
+ if (!dev->parent)
+ dev->parent = &platform_bus;
- pdev->dev.bus = &platform_bus_type;
+ dev->bus = &platform_bus_type;
switch (pdev->id) {
default:
- dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+ dev_set_name(dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE:
- dev_set_name(&pdev->dev, "%s", pdev->name);
+ dev_set_name(dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO:
/*
@@ -681,10 +680,10 @@ int platform_device_add(struct platform_device *pdev)
*/
ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
if (ret < 0)
- goto err_out;
+ return ret;
pdev->id = ret;
pdev->id_auto = true;
- dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
+ dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
@@ -692,7 +691,7 @@ int platform_device_add(struct platform_device *pdev)
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
- r->name = dev_name(&pdev->dev);
+ r->name = dev_name(dev);
p = r->parent;
if (!p) {
@@ -705,18 +704,20 @@ int platform_device_add(struct platform_device *pdev)
if (p) {
ret = insert_resource(p, r);
if (ret) {
- dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+ dev_err(dev, "failed to claim resource %d: %pR\n", i, r);
goto failed;
}
}
}
- pr_debug("Registering platform device '%s'. Parent at %s\n",
- dev_name(&pdev->dev), dev_name(pdev->dev.parent));
+ pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev),
+ dev_name(dev->parent));
- ret = device_add(&pdev->dev);
- if (ret == 0)
- return ret;
+ ret = device_add(dev);
+ if (ret)
+ goto failed;
+
+ return 0;
failed:
if (pdev->id_auto) {
@@ -730,7 +731,6 @@ int platform_device_add(struct platform_device *pdev)
release_resource(r);
}
- err_out:
return ret;
}
EXPORT_SYMBOL_GPL(platform_device_add);
@@ -1447,21 +1447,22 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
- if (dev->of_node) {
- ret = of_dma_configure(dev, dev->of_node, true);
- } else if (has_acpi_companion(dev)) {
- attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+ if (is_of_node(fwnode)) {
+ ret = of_dma_configure(dev, to_of_node(fwnode), true);
+ } else if (is_acpi_device_node(fwnode)) {
+ attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
+ if (ret || drv->driver_managed_dma)
+ return ret;
- if (!ret && !drv->driver_managed_dma) {
- ret = iommu_device_use_default_domain(dev);
- if (ret)
- arch_teardown_dma_ops(dev);
- }
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
return ret;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 234a84ecde8b..ea6157747199 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
}
if (!map->cache_bypass && map->format.parse_val) {
- unsigned int ival;
+ unsigned int ival, offset;
int val_bytes = map->format.val_bytes;
- for (i = 0; i < val_len / val_bytes; i++) {
- ival = map->format.parse_val(val + (i * val_bytes));
- ret = regcache_write(map,
- reg + regmap_get_offset(map, i),
- ival);
+
+ /* Cache the last written value for noinc writes */
+ i = noinc ? val_len - val_bytes : 0;
+ for (; i < val_len; i += val_bytes) {
+ ival = map->format.parse_val(val + i);
+ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
+ ret = regcache_write(map, reg + offset, ival);
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
- reg + regmap_get_offset(map, i), ret);
+ reg + offset, ret);
return ret;
}
}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 9d42051f8f8e..5c7fac80611c 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -14,6 +14,6 @@ config DM_KUNIT_TEST
depends on KUNIT
config DRIVER_PE_KUNIT_TEST
- bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
- depends on KUNIT=y
+ tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
default KUNIT_ALL_TESTS
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index dd2b606d76a3..a8657eb06f94 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -506,3 +506,7 @@ static struct kunit_suite property_entry_test_suite = {
};
kunit_test_suite(property_entry_test_suite);
+
+MODULE_DESCRIPTION("Test module for the property entry API");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 800f131222fc..855fdf5c3b4e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
struct gendisk *disk = nbd->disk;
del_gendisk(disk);
- put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
/*
@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
destroy_workqueue(nbd->recv_workq);
- kfree(nbd);
+ put_disk(disk);
}
static void nbd_dev_remove_work(struct work_struct *work)
@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk)
nbd_put(nbd);
}
+static void nbd_free_disk(struct gendisk *disk)
+{
+ struct nbd_device *nbd = disk->private_data;
+
+ kfree(nbd);
+}
+
static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
.release = nbd_release,
.ioctl = nbd_ioctl,
.compat_ioctl = nbd_ioctl,
+ .free_disk = nbd_free_disk,
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4689ac2e0c0e..d53d6aa8ee69 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1311,6 +1311,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
+ size_t max_dma_size;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1409,7 +1410,8 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */
blk_queue_max_hw_sectors(q, UINT_MAX);
- max_size = virtio_max_dma_size(vdev);
+ max_dma_size = virtio_max_dma_size(vdev);
+ max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
/* Host can optionally specify maximum segment size and number of
* segments. */
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 0324e4914f6e..5d1ea482419f 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -5,4 +5,6 @@
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
#
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
+
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 9efb7584f952..4461c6c9313f 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -60,7 +60,7 @@
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/xarray.h>
+#include <linux/idr.h>
#include <linux/cdx/cdx_bus.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
@@ -70,8 +70,12 @@
#define CDX_DEFAULT_DMA_MASK (~0ULL)
#define MAX_CDX_CONTROLLERS 16
-/* CDX controllers registered with the CDX bus */
-static DEFINE_XARRAY_ALLOC(cdx_controllers);
+/* IDA for CDX controllers registered with the CDX bus */
+static DEFINE_IDA(cdx_controller_ida);
+/* Lock to protect controller ops */
+static DEFINE_MUTEX(cdx_controller_lock);
+
+static char *compat_node_name = "xlnx,versal-net-cdx";
/**
* cdx_dev_reset - Reset a CDX device
@@ -107,6 +111,20 @@ int cdx_dev_reset(struct device *dev)
EXPORT_SYMBOL_GPL(cdx_dev_reset);
/**
+ * reset_cdx_device - Reset a CDX device
+ * @dev: CDX device
+ * @data: This is always passed as NULL, and is not used in this API,
+ * but is required here as the device_for_each_child() API expects
+ * the passed function to have this as an argument.
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+static int reset_cdx_device(struct device *dev, void *data)
+{
+ return cdx_dev_reset(dev);
+}
+
+/**
* cdx_unregister_device - Unregister a CDX device
* @dev: CDX device
* @data: This is always passed as NULL, and is not used in this API,
@@ -120,9 +138,17 @@ static int cdx_unregister_device(struct device *dev,
void *data)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+
+ if (cdx_dev->is_bus) {
+ device_for_each_child(dev, NULL, cdx_unregister_device);
+ if (cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
+ } else {
+ kfree(cdx_dev->driver_override);
+ cdx_dev->driver_override = NULL;
+ }
- kfree(cdx_dev->driver_override);
- cdx_dev->driver_override = NULL;
/*
* Do not free cdx_dev here as it would be freed in
* cdx_device_release() called from within put_device().
@@ -153,7 +179,10 @@ cdx_match_one_device(const struct cdx_device_id *id,
{
/* Use vendor ID and device ID for matching */
if ((id->vendor == CDX_ANY_ID || id->vendor == dev->vendor) &&
- (id->device == CDX_ANY_ID || id->device == dev->device))
+ (id->device == CDX_ANY_ID || id->device == dev->device) &&
+ (id->subvendor == CDX_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
+ (id->subdevice == CDX_ANY_ID || id->subdevice == dev->subsystem_device) &&
+ !((id->class ^ dev->class) & id->class_mask))
return id;
return NULL;
}
@@ -229,6 +258,9 @@ static int cdx_bus_match(struct device *dev, struct device_driver *drv)
const struct cdx_device_id *found_id = NULL;
const struct cdx_device_id *ids;
+ if (cdx_dev->is_bus)
+ return false;
+
ids = cdx_drv->match_id_table;
/* When driver_override is set, only bind to the matching driver */
@@ -293,10 +325,11 @@ static int cdx_dma_configure(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
u32 input_id = cdx_dev->req_id;
int ret;
- ret = of_dma_configure_id(dev, dev->parent->of_node, 0, &input_id);
+ ret = of_dma_configure_id(dev, cdx->dev->of_node, 0, &input_id);
if (ret && ret != -EPROBE_DEFER) {
dev_err(dev, "of_dma_configure_id() failed\n");
return ret;
@@ -331,6 +364,10 @@ static DEVICE_ATTR_RO(field)
cdx_config_attr(vendor, "0x%04x\n");
cdx_config_attr(device, "0x%04x\n");
+cdx_config_attr(subsystem_vendor, "0x%04x\n");
+cdx_config_attr(subsystem_device, "0x%04x\n");
+cdx_config_attr(revision, "0x%02x\n");
+cdx_config_attr(class, "0x%06x\n");
static ssize_t remove_store(struct device *dev,
struct device_attribute *attr,
@@ -359,6 +396,7 @@ static DEVICE_ATTR_WO(remove);
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
bool val;
int ret;
@@ -368,14 +406,27 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
if (!val)
return -EINVAL;
- ret = cdx_dev_reset(dev);
- if (ret)
- return ret;
+ if (cdx_dev->is_bus)
+ /* Reset all the devices attached to cdx bus */
+ ret = device_for_each_child(dev, NULL, reset_cdx_device);
+ else
+ ret = cdx_dev_reset(dev);
- return count;
+ return ret < 0 ? ret : count;
}
static DEVICE_ATTR_WO(reset);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ return sprintf(buf, "cdx:v%04Xd%04Xsv%04Xsd%04Xc%06X\n", cdx_dev->vendor,
+ cdx_dev->device, cdx_dev->subsystem_vendor, cdx_dev->subsystem_device,
+ cdx_dev->class);
+}
+static DEVICE_ATTR_RO(modalias);
+
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -402,21 +453,107 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ bool enable;
+ int ret;
+
+ if (kstrtobool(buf, &enable) < 0)
+ return -EINVAL;
+
+ if (enable == cdx_dev->enabled)
+ return count;
+
+ if (enable && cdx->ops->bus_enable)
+ ret = cdx->ops->bus_enable(cdx, cdx_dev->bus_num);
+ else if (!enable && cdx->ops->bus_disable)
+ ret = cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (!ret)
+ cdx_dev->enabled = enable;
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ return sysfs_emit(buf, "%u\n", cdx_dev->enabled);
+}
+static DEVICE_ATTR_RW(enable);
+
+static umode_t cdx_dev_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cdx_device *cdx_dev;
+
+ cdx_dev = to_cdx_device(dev);
+ if (!cdx_dev->is_bus)
+ return a->mode;
+
+ return 0;
+}
+
+static umode_t cdx_bus_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cdx_device *cdx_dev;
+
+ cdx_dev = to_cdx_device(dev);
+ if (cdx_dev->is_bus)
+ return a->mode;
+
+ return 0;
+}
+
static struct attribute *cdx_dev_attrs[] = {
&dev_attr_remove.attr,
&dev_attr_reset.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
-ATTRIBUTE_GROUPS(cdx_dev);
+
+static const struct attribute_group cdx_dev_group = {
+ .attrs = cdx_dev_attrs,
+ .is_visible = cdx_dev_attrs_are_visible,
+};
+
+static struct attribute *cdx_bus_dev_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group cdx_bus_dev_group = {
+ .attrs = cdx_bus_dev_attrs,
+ .is_visible = cdx_bus_attrs_are_visible,
+};
+
+static const struct attribute_group *cdx_dev_groups[] = {
+ &cdx_dev_group,
+ &cdx_bus_dev_group,
+ NULL,
+};
static ssize_t rescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct cdx_controller *cdx;
- unsigned long index;
+ struct platform_device *pd;
+ struct device_node *np;
bool val;
if (kstrtobool(buf, &val) < 0)
@@ -425,18 +562,29 @@ static ssize_t rescan_store(const struct bus_type *bus,
if (!val)
return -EINVAL;
+ mutex_lock(&cdx_controller_lock);
+
/* Unregister all the devices on the bus */
cdx_unregister_devices(&cdx_bus_type);
/* Rescan all the devices */
- xa_for_each(&cdx_controllers, index, cdx) {
- int ret;
+ for_each_compatible_node(np, NULL, compat_node_name) {
+ if (!np)
+ return -EINVAL;
- ret = cdx->ops->scan(cdx);
- if (ret)
- dev_err(cdx->dev, "cdx bus scanning failed\n");
+ pd = of_find_device_by_node(np);
+ if (!pd)
+ return -EINVAL;
+
+ cdx = platform_get_drvdata(pd);
+ if (cdx && cdx->controller_registered && cdx->ops->scan)
+ cdx->ops->scan(cdx);
+
+ put_device(&pd->dev);
}
+ mutex_unlock(&cdx_controller_lock);
+
return count;
}
static BUS_ATTR_WO(rescan);
@@ -495,7 +643,6 @@ static void cdx_device_release(struct device *dev)
int cdx_device_add(struct cdx_dev_params *dev_params)
{
struct cdx_controller *cdx = dev_params->cdx;
- struct device *parent = cdx->dev;
struct cdx_device *cdx_dev;
int ret;
@@ -512,6 +659,10 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
cdx_dev->req_id = dev_params->req_id;
cdx_dev->vendor = dev_params->vendor;
cdx_dev->device = dev_params->device;
+ cdx_dev->subsystem_vendor = dev_params->subsys_vendor;
+ cdx_dev->subsystem_device = dev_params->subsys_device;
+ cdx_dev->class = dev_params->class;
+ cdx_dev->revision = dev_params->revision;
cdx_dev->bus_num = dev_params->bus_num;
cdx_dev->dev_num = dev_params->dev_num;
cdx_dev->cdx = dev_params->cdx;
@@ -519,7 +670,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
/* Initialize generic device */
device_initialize(&cdx_dev->dev);
- cdx_dev->dev.parent = parent;
+ cdx_dev->dev.parent = dev_params->parent;
cdx_dev->dev.bus = &cdx_bus_type;
cdx_dev->dev.dma_mask = &cdx_dev->dma_mask;
cdx_dev->dev.release = cdx_device_release;
@@ -546,37 +697,94 @@ fail:
return ret;
}
-EXPORT_SYMBOL_GPL(cdx_device_add);
+EXPORT_SYMBOL_NS_GPL(cdx_device_add, CDX_BUS_CONTROLLER);
-int cdx_register_controller(struct cdx_controller *cdx)
+struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num)
{
+ struct cdx_device *cdx_dev;
int ret;
- ret = xa_alloc(&cdx_controllers, &cdx->id, cdx,
- XA_LIMIT(0, MAX_CDX_CONTROLLERS - 1), GFP_KERNEL);
+ cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
+ if (!cdx_dev)
+ return NULL;
+
+ device_initialize(&cdx_dev->dev);
+ cdx_dev->cdx = cdx;
+
+ cdx_dev->dev.parent = cdx->dev;
+ cdx_dev->dev.bus = &cdx_bus_type;
+ cdx_dev->dev.release = cdx_device_release;
+ cdx_dev->is_bus = true;
+ cdx_dev->bus_num = bus_num;
+
+ dev_set_name(&cdx_dev->dev, "cdx-%02x",
+ ((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (bus_num & CDX_BUS_NUM_MASK)));
+
+ ret = device_add(&cdx_dev->dev);
if (ret) {
+ dev_err(&cdx_dev->dev, "cdx bus device add failed: %d\n", ret);
+ goto device_add_fail;
+ }
+
+ if (cdx->ops->bus_enable) {
+ ret = cdx->ops->bus_enable(cdx, bus_num);
+ if (ret && ret != -EALREADY) {
+ dev_err(cdx->dev, "cdx bus enable failed: %d\n", ret);
+ goto bus_enable_fail;
+ }
+ }
+
+ cdx_dev->enabled = true;
+ return &cdx_dev->dev;
+
+bus_enable_fail:
+ device_del(&cdx_dev->dev);
+device_add_fail:
+ put_device(&cdx_dev->dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_bus_add, CDX_BUS_CONTROLLER);
+
+int cdx_register_controller(struct cdx_controller *cdx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&cdx_controller_ida, 0, MAX_CDX_CONTROLLERS - 1, GFP_KERNEL);
+ if (ret < 0) {
dev_err(cdx->dev,
"No free index available. Maximum controllers already registered\n");
cdx->id = (u8)MAX_CDX_CONTROLLERS;
return ret;
}
+ mutex_lock(&cdx_controller_lock);
+ cdx->id = ret;
+
/* Scan all the devices */
- cdx->ops->scan(cdx);
+ if (cdx->ops->scan)
+ cdx->ops->scan(cdx);
+ cdx->controller_registered = true;
+ mutex_unlock(&cdx_controller_lock);
return 0;
}
-EXPORT_SYMBOL_GPL(cdx_register_controller);
+EXPORT_SYMBOL_NS_GPL(cdx_register_controller, CDX_BUS_CONTROLLER);
void cdx_unregister_controller(struct cdx_controller *cdx)
{
if (cdx->id >= MAX_CDX_CONTROLLERS)
return;
+ mutex_lock(&cdx_controller_lock);
+
+ cdx->controller_registered = false;
device_for_each_child(cdx->dev, NULL, cdx_unregister_device);
- xa_erase(&cdx_controllers, cdx->id);
+ ida_free(&cdx_controller_ida, cdx->id);
+
+ mutex_unlock(&cdx_controller_lock);
}
-EXPORT_SYMBOL_GPL(cdx_unregister_controller);
+EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
static int __init cdx_bus_init(void)
{
diff --git a/drivers/cdx/cdx.h b/drivers/cdx/cdx.h
index c436ac7ac86f..300ad8be7a34 100644
--- a/drivers/cdx/cdx.h
+++ b/drivers/cdx/cdx.h
@@ -13,24 +13,33 @@
/**
* struct cdx_dev_params - CDX device parameters
* @cdx: CDX controller associated with the device
- * @parent: Associated CDX controller
+ * @parent: Associated CDX Bus device
* @vendor: Vendor ID for CDX device
* @device: Device ID for CDX device
+ * @subsys_vendor: Sub vendor ID for CDX device
+ * @subsys_device: Sub device ID for CDX device
* @bus_num: Bus number for this CDX device
* @dev_num: Device number for this device
* @res: array of MMIO region entries
* @res_count: number of valid MMIO regions
* @req_id: Requestor ID associated with CDX device
+ * @class: Class of the CDX Device
+ * @revision: Revision of the CDX device
*/
struct cdx_dev_params {
struct cdx_controller *cdx;
+ struct device *parent;
u16 vendor;
u16 device;
+ u16 subsys_vendor;
+ u16 subsys_device;
u8 bus_num;
u8 dev_num;
struct resource res[MAX_CDX_DEV_RESOURCES];
u8 res_count;
u32 req_id;
+ u32 class;
+ u8 revision;
};
/**
@@ -59,4 +68,15 @@ void cdx_unregister_controller(struct cdx_controller *cdx);
*/
int cdx_device_add(struct cdx_dev_params *dev_params);
+/**
+ * cdx_bus_add - Add a CDX bus. This function adds a bus on the CDX bus
+ * subsystem. It creates a CDX device for the corresponding bus and
+ * also registers an associated Linux generic device.
+ * @cdx: Associated CDX controller
+ * @us_num: Bus number
+ *
+ * Return: associated Linux generic device pointer on success or NULL on failure.
+ */
+struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num);
+
#endif /* _CDX_H_ */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 7828dac8edb1..85fe4b1c4e5e 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -33,6 +33,16 @@ static const struct cdx_mcdi_ops mcdi_ops = {
.mcdi_request = cdx_mcdi_request,
};
+static int cdx_bus_enable(struct cdx_controller *cdx, u8 bus_num)
+{
+ return cdx_mcdi_bus_enable(cdx->priv, bus_num);
+}
+
+static int cdx_bus_disable(struct cdx_controller *cdx, u8 bus_num)
+{
+ return cdx_mcdi_bus_disable(cdx->priv, bus_num);
+}
+
void cdx_rpmsg_post_probe(struct cdx_controller *cdx)
{
/* Register CDX controller with CDX bus driver */
@@ -83,8 +93,14 @@ static int cdx_scan_devices(struct cdx_controller *cdx)
num_cdx_bus = (u8)ret;
for (bus_num = 0; bus_num < num_cdx_bus; bus_num++) {
+ struct device *bus_dev;
u8 num_cdx_dev;
+ /* Add the bus on cdx subsystem */
+ bus_dev = cdx_bus_add(cdx, bus_num);
+ if (!bus_dev)
+ continue;
+
/* MCDI FW Read: Fetch the number of devices present */
ret = cdx_mcdi_get_num_devs(cdx_mcdi, bus_num);
if (ret < 0) {
@@ -107,6 +123,7 @@ static int cdx_scan_devices(struct cdx_controller *cdx)
continue;
}
dev_params.cdx = cdx;
+ dev_params.parent = bus_dev;
/* Add the device to the cdx bus */
ret = cdx_device_add(&dev_params);
@@ -125,6 +142,8 @@ static int cdx_scan_devices(struct cdx_controller *cdx)
}
static struct cdx_ops cdx_ops = {
+ .bus_enable = cdx_bus_enable,
+ .bus_disable = cdx_bus_disable,
.scan = cdx_scan_devices,
.dev_configure = cdx_configure_device,
};
@@ -233,3 +252,4 @@ module_exit(cdx_controller_exit);
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("CDX controller for AMD devices");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CDX_BUS_CONTROLLER);
diff --git a/drivers/cdx/controller/mc_cdx_pcol.h b/drivers/cdx/controller/mc_cdx_pcol.h
index 4ccb7b52951b..2de019406b57 100644
--- a/drivers/cdx/controller/mc_cdx_pcol.h
+++ b/drivers/cdx/controller/mc_cdx_pcol.h
@@ -457,6 +457,60 @@
/***********************************/
/*
+ * MC_CMD_CDX_BUS_DOWN
+ * Asserting reset on the CDX bus causes all devices on the bus to be quiesced.
+ * DMA bus mastering is disabled and any pending DMA request are flushed. Once
+ * the response is returned, the devices are guaranteed to no longer issue DMA
+ * requests or raise MSI interrupts. Further device MMIO accesses may have
+ * undefined results. While the bus reset is asserted, any of the enumeration
+ * or device configuration MCDIs will fail with EAGAIN. It is only legal to
+ * reload the relevant PL region containing CDX devices if the corresponding CDX
+ * bus is in reset. Depending on the implementation, the firmware may or may
+ * not enforce this restriction and it is up to the caller to make sure this
+ * requirement is satisfied.
+ */
+#define MC_CMD_CDX_BUS_DOWN 0x4
+#define MC_CMD_CDX_BUS_DOWN_MSGSET 0x4
+
+/* MC_CMD_CDX_BUS_DOWN_IN msgrequest */
+#define MC_CMD_CDX_BUS_DOWN_IN_LEN 4
+/* Bus number to put in reset, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_DOWN_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_DOWN_IN_BUS_LEN 4
+
+/*
+ * MC_CMD_CDX_BUS_DOWN_OUT msgresponse: The bus is quiesced, no further
+ * upstream traffic for devices on this bus.
+ */
+#define MC_CMD_CDX_BUS_DOWN_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_UP
+ * After bus reset is de-asserted, devices are in a state which is functionally
+ * equivalent to each device having been reset with MC_CMD_CDX_DEVICE_RESET. In
+ * other words, device logic is reset in a hardware-specific way, MMIO accesses
+ * are forwarded to the device, DMA bus mastering is disabled and needs to be
+ * re-enabled with MC_CMD_CDX_DEVICE_DMA_ENABLE once the driver is ready to
+ * start servicing DMA. If the underlying number of devices or device resources
+ * changed (e.g. if PL was reloaded) while the bus was in reset, the bus driver
+ * is expected to re-enumerate the bus. Returns EALREADY if the bus was already
+ * up before the call.
+ */
+#define MC_CMD_CDX_BUS_UP 0x5
+#define MC_CMD_CDX_BUS_UP_MSGSET 0x5
+
+/* MC_CMD_CDX_BUS_UP_IN msgrequest */
+#define MC_CMD_CDX_BUS_UP_IN_LEN 4
+/* Bus number to take out of reset, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_UP_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_UP_IN_BUS_LEN 4
+
+/* MC_CMD_CDX_BUS_UP_OUT msgresponse: The bus can now be enumerated. */
+#define MC_CMD_CDX_BUS_UP_OUT_LEN 0
+
+/***********************************/
+/*
* MC_CMD_CDX_DEVICE_RESET
* After this call completes, device DMA and interrupts are quiesced, devices
* logic is reset in a hardware-specific way and DMA bus mastering is disabled.
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index fc82435d5dea..b1f530946389 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -120,10 +120,41 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
dev_params->vendor = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID);
dev_params->device = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID);
+ dev_params->subsys_vendor = MCDI_WORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID);
+ dev_params->subsys_device = MCDI_WORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID);
+ dev_params->class = MCDI_DWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS) & 0xFFFFFF;
+ dev_params->revision = MCDI_BYTE(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION);
return 0;
}
+int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_UP_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_UP_IN_BUS, bus_num);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_UP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
+int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_DOWN_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_DOWN_IN_BUS, bus_num);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_DOWN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
int cdx_mcdi_reset_device(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_RESET_IN_LEN);
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index a448d6581eb4..258a5462fbe3 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -48,6 +48,24 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
struct cdx_dev_params *dev_params);
/**
+ * cdx_mcdi_bus_enable - Enable CDX bus represented by bus_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num);
+
+/**
+ * cdx_mcdi_bus_disable - Disable CDX bus represented by bus_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num);
+
+/**
* cdx_mcdi_reset_device - Reset cdx device represented by bus_num:dev_num
* @cdx: pointer to MCDI interface.
* @bus_num: Bus number.
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e1deb7a69b8a..9c90b1d2c036 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -92,7 +92,7 @@ struct hpets {
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
- struct hpet_dev hp_dev[];
+ struct hpet_dev hp_dev[] __counted_by(hp_ntimer);
};
static struct hpets *hpets;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8d27aa6b5b50..3c6670cf905f 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -628,6 +628,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define full_lseek null_lseek
#define write_zero write_null
#define write_iter_zero write_iter_null
+#define splice_write_zero splice_write_null
#define open_mem open_port
static const struct file_operations __maybe_unused mem_fops = {
@@ -665,6 +666,8 @@ static const struct file_operations zero_fops = {
.read_iter = read_iter_zero,
.read = read_zero,
.write_iter = write_iter_zero,
+ .splice_read = copy_splice_read,
+ .splice_write = splice_write_zero,
.mmap = mmap_zero,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
@@ -676,6 +679,7 @@ static const struct file_operations full_fops = {
.llseek = full_lseek,
.read_iter = read_iter_zero,
.write = write_full,
+ .splice_read = copy_splice_read,
};
static const struct memdev {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f60bb6151402..019cf6079cec 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -84,18 +84,13 @@
#include <linux/sysctl.h>
#include <linux/fs.h>
#include <linux/cdev.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_OF
-/* For open firmware. */
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#endif
-
#include "xilinx_hwicap.h"
#include "buffer_icap.h"
#include "fifo_icap.h"
@@ -601,14 +596,14 @@ static const struct file_operations hwicap_fops = {
.llseek = noop_llseek,
};
-static int hwicap_setup(struct device *dev, int id,
- const struct resource *regs_res,
+static int hwicap_setup(struct platform_device *pdev, int id,
const struct hwicap_driver_config *config,
const struct config_registers *config_regs)
{
dev_t devt;
struct hwicap_drvdata *drvdata = NULL;
- int retval = 0;
+ struct device *dev = &pdev->dev;
+ int retval;
dev_info(dev, "Xilinx icap port driver\n");
@@ -636,72 +631,39 @@ static int hwicap_setup(struct device *dev, int id,
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
- drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ drvdata = devm_kzalloc(dev, sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
retval = -ENOMEM;
- goto failed0;
+ goto failed;
}
dev_set_drvdata(dev, (void *)drvdata);
- if (!regs_res) {
- dev_err(dev, "Couldn't get registers resource\n");
- retval = -EFAULT;
- goto failed1;
- }
-
- drvdata->mem_start = regs_res->start;
- drvdata->mem_end = regs_res->end;
- drvdata->mem_size = resource_size(regs_res);
-
- if (!request_mem_region(drvdata->mem_start,
- drvdata->mem_size, DRIVER_NAME)) {
- dev_err(dev, "Couldn't lock memory region at %Lx\n",
- (unsigned long long) regs_res->start);
- retval = -EBUSY;
- goto failed1;
+ drvdata->base_address = devm_platform_ioremap_resource(pdev, 0);
+ if (!drvdata->base_address) {
+ retval = -ENODEV;
+ goto failed;
}
drvdata->devt = devt;
drvdata->dev = dev;
- drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
- if (!drvdata->base_address) {
- dev_err(dev, "ioremap() failed\n");
- retval = -ENOMEM;
- goto failed2;
- }
-
drvdata->config = config;
drvdata->config_regs = config_regs;
mutex_init(&drvdata->sem);
drvdata->is_open = 0;
- dev_info(dev, "ioremap %llx to %p with size %llx\n",
- (unsigned long long) drvdata->mem_start,
- drvdata->base_address,
- (unsigned long long) drvdata->mem_size);
-
cdev_init(&drvdata->cdev, &hwicap_fops);
drvdata->cdev.owner = THIS_MODULE;
retval = cdev_add(&drvdata->cdev, devt, 1);
if (retval) {
dev_err(dev, "cdev_add() failed\n");
- goto failed3;
+ goto failed;
}
device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
return 0; /* success */
- failed3:
- iounmap(drvdata->base_address);
-
- failed2:
- release_mem_region(regs_res->start, drvdata->mem_size);
-
- failed1:
- kfree(drvdata);
-
- failed0:
+ failed:
mutex_lock(&icap_sem);
probed_devices[id] = 0;
mutex_unlock(&icap_sem);
@@ -723,75 +685,22 @@ static struct hwicap_driver_config fifo_icap_config = {
.reset = fifo_icap_reset,
};
-#ifdef CONFIG_OF
-static int hwicap_of_probe(struct platform_device *op,
- const struct hwicap_driver_config *config)
-{
- struct resource res;
- const unsigned int *id;
- const char *family;
- int rc;
- const struct config_registers *regs;
-
-
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
-
- id = of_get_property(op->dev.of_node, "port-number", NULL);
-
- /* It's most likely that we're using V4, if the family is not
- * specified
- */
- regs = &v4_config_registers;
- family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
-
- if (family) {
- if (!strcmp(family, "virtex2p"))
- regs = &v2_config_registers;
- else if (!strcmp(family, "virtex4"))
- regs = &v4_config_registers;
- else if (!strcmp(family, "virtex5"))
- regs = &v5_config_registers;
- else if (!strcmp(family, "virtex6"))
- regs = &v6_config_registers;
- }
- return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
- regs);
-}
-#else
-static inline int hwicap_of_probe(struct platform_device *op,
- const struct hwicap_driver_config *config)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_OF */
-
-static const struct of_device_id hwicap_of_match[];
static int hwicap_drv_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct resource *res;
const struct config_registers *regs;
+ const struct hwicap_driver_config *config;
const char *family;
+ int id = -1;
- match = of_match_device(hwicap_of_match, &pdev->dev);
- if (match)
- return hwicap_of_probe(pdev, match->data);
+ config = device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ of_property_read_u32(pdev->dev.of_node, "port-number", &id);
/* It's most likely that we're using V4, if the family is not
* specified
*/
regs = &v4_config_registers;
- family = pdev->dev.platform_data;
-
- if (family) {
+ if (!of_property_read_string(pdev->dev.of_node, "xlnx,family", &family)) {
if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
else if (!strcmp(family, "virtex4"))
@@ -801,9 +710,7 @@ static int hwicap_drv_probe(struct platform_device *pdev)
else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
}
-
- return hwicap_setup(&pdev->dev, pdev->id, res,
- &buffer_icap_config, regs);
+ return hwicap_setup(pdev, id, config, regs);
}
static void hwicap_drv_remove(struct platform_device *pdev)
@@ -815,16 +722,12 @@ static void hwicap_drv_remove(struct platform_device *pdev)
device_destroy(&icap_class, drvdata->devt);
cdev_del(&drvdata->cdev);
- iounmap(drvdata->base_address);
- release_mem_region(drvdata->mem_start, drvdata->mem_size);
- kfree(drvdata);
mutex_lock(&icap_sem);
probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
mutex_unlock(&icap_sem);
}
-#ifdef CONFIG_OF
/* Match table for device tree binding */
static const struct of_device_id hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
@@ -832,9 +735,6 @@ static const struct of_device_id hwicap_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, hwicap_of_match);
-#else
-#define hwicap_of_match NULL
-#endif
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 50198657230e..57857c0dfba9 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -22,21 +22,32 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/limits.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/sbi.h>
#include <asm/timex.h>
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
static bool riscv_timer_cannot_wake_cpu;
+static void riscv_clock_event_stop(void)
+{
+ if (static_branch_likely(&riscv_sstc_available)) {
+ csr_write(CSR_STIMECMP, ULONG_MAX);
+ if (IS_ENABLED(CONFIG_32BIT))
+ csr_write(CSR_STIMECMPH, ULONG_MAX);
+ } else {
+ sbi_set_timer(U64_MAX);
+ }
+}
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
u64 next_tval = get_cycles64() + delta;
- csr_set(CSR_IE, IE_TIE);
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
@@ -94,6 +105,8 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
+ if (static_branch_likely(&riscv_sstc_available))
+ ce->rating = 450;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
@@ -119,7 +132,7 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
- csr_clear(CSR_IE, IE_TIE);
+ riscv_clock_event_stop();
evdev->event_handler(evdev);
return IRQ_HANDLED;
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 9af280735cba..93c68a40a17b 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -67,6 +67,7 @@ config COMEDI_TEST
config COMEDI_PARPORT
tristate "Parallel port support"
+ depends on HAS_IOPORT
help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
@@ -79,6 +80,7 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
+ depends on HAS_IOPORT
help
Enable support for SSV Embedded Systems DIL/Net-PC
@@ -89,6 +91,8 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
+ depends on ISA || ISA_BUS || PC104
+ depends on HAS_IOPORT
help
Enable comedi ISA and PC/104 drivers to be built
@@ -589,6 +593,7 @@ config COMEDI_8255_PCI
config COMEDI_ADDI_WATCHDOG
tristate
+ depends on HAS_IOPORT
help
Provides support for the watchdog subdevice found on many ADDI-DATA
boards. This module will be automatically selected when needed. The
@@ -596,6 +601,7 @@ config COMEDI_ADDI_WATCHDOG
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
+ depends on HAS_IOPORT
help
Enable support for ADDI-DATA APCI_1032 cards
@@ -604,6 +610,7 @@ config COMEDI_ADDI_APCI_1032
config COMEDI_ADDI_APCI_1500
tristate "ADDI-DATA APCI_1500 support"
+ depends on HAS_IOPORT
help
Enable support for ADDI-DATA APCI_1500 cards
@@ -612,6 +619,7 @@ config COMEDI_ADDI_APCI_1500
config COMEDI_ADDI_APCI_1516
tristate "ADDI-DATA APCI-1016/1516/2016 support"
+ depends on HAS_IOPORT
select COMEDI_ADDI_WATCHDOG
help
Enable support for ADDI-DATA APCI-1016, APCI-1516 and APCI-2016 boards.
@@ -623,6 +631,7 @@ config COMEDI_ADDI_APCI_1516
config COMEDI_ADDI_APCI_1564
tristate "ADDI-DATA APCI_1564 support"
+ depends on HAS_IOPORT
select COMEDI_ADDI_WATCHDOG
help
Enable support for ADDI-DATA APCI_1564 cards
@@ -632,6 +641,7 @@ config COMEDI_ADDI_APCI_1564
config COMEDI_ADDI_APCI_16XX
tristate "ADDI-DATA APCI_16xx support"
+ depends on HAS_IOPORT
help
Enable support for ADDI-DATA APCI_16xx cards
@@ -640,6 +650,7 @@ config COMEDI_ADDI_APCI_16XX
config COMEDI_ADDI_APCI_2032
tristate "ADDI-DATA APCI_2032 support"
+ depends on HAS_IOPORT
select COMEDI_ADDI_WATCHDOG
help
Enable support for ADDI-DATA APCI_2032 cards
@@ -649,6 +660,7 @@ config COMEDI_ADDI_APCI_2032
config COMEDI_ADDI_APCI_2200
tristate "ADDI-DATA APCI_2200 support"
+ depends on HAS_IOPORT
select COMEDI_ADDI_WATCHDOG
help
Enable support for ADDI-DATA APCI_2200 cards
@@ -658,6 +670,7 @@ config COMEDI_ADDI_APCI_2200
config COMEDI_ADDI_APCI_3120
tristate "ADDI-DATA APCI_3120/3001 support"
+ depends on HAS_IOPORT
depends on HAS_DMA
help
Enable support for ADDI-DATA APCI_3120/3001 cards
@@ -667,6 +680,7 @@ config COMEDI_ADDI_APCI_3120
config COMEDI_ADDI_APCI_3501
tristate "ADDI-DATA APCI_3501 support"
+ depends on HAS_IOPORT
help
Enable support for ADDI-DATA APCI_3501 cards
@@ -675,6 +689,7 @@ config COMEDI_ADDI_APCI_3501
config COMEDI_ADDI_APCI_3XXX
tristate "ADDI-DATA APCI_3xxx support"
+ depends on HAS_IOPORT
help
Enable support for ADDI-DATA APCI_3xxx cards
@@ -683,6 +698,7 @@ config COMEDI_ADDI_APCI_3XXX
config COMEDI_ADL_PCI6208
tristate "ADLink PCI-6208A support"
+ depends on HAS_IOPORT
help
Enable support for ADLink PCI-6208A cards
@@ -691,6 +707,7 @@ config COMEDI_ADL_PCI6208
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
+ depends on HAS_IOPORT
help
Enable support for ADlink PCI-723X/743X isolated digital i/o boards.
Supported boards include the 32-channel PCI-7230 (16 in/16 out),
@@ -702,6 +719,7 @@ config COMEDI_ADL_PCI7X3X
config COMEDI_ADL_PCI8164
tristate "ADLink PCI-8164 4 Axes Motion Control board support"
+ depends on HAS_IOPORT
help
Enable support for ADlink PCI-8164 4 Axes Motion Control board
@@ -710,6 +728,7 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
+ depends on HAS_IOPORT
select COMEDI_8254
help
Enable support for ADlink PCI9111 cards
@@ -719,6 +738,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
+ depends on HAS_IOPORT
depends on HAS_DMA
select COMEDI_8254
help
@@ -729,6 +749,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
+ depends on HAS_IOPORT
select COMEDI_8254
help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
@@ -739,6 +760,7 @@ config COMEDI_ADV_PCI1710
config COMEDI_ADV_PCI1720
tristate "Advantech PCI-1720 support"
+ depends on HAS_IOPORT
help
Enable support for Advantech PCI-1720 Analog Output board.
@@ -747,6 +769,7 @@ config COMEDI_ADV_PCI1720
config COMEDI_ADV_PCI1723
tristate "Advantech PCI-1723 support"
+ depends on HAS_IOPORT
help
Enable support for Advantech PCI-1723 cards
@@ -755,6 +778,7 @@ config COMEDI_ADV_PCI1723
config COMEDI_ADV_PCI1724
tristate "Advantech PCI-1724U support"
+ depends on HAS_IOPORT
help
Enable support for Advantech PCI-1724U cards. These are 32-channel
analog output cards with voltage and current loop output ranges and
@@ -765,6 +789,7 @@ config COMEDI_ADV_PCI1724
config COMEDI_ADV_PCI1760
tristate "Advantech PCI-1760 support"
+ depends on HAS_IOPORT
help
Enable support for Advantech PCI-1760 board.
@@ -773,6 +798,7 @@ config COMEDI_ADV_PCI1760
config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
+ depends on HAS_IOPORT
select COMEDI_8254
select COMEDI_8255
help
@@ -796,6 +822,7 @@ config COMEDI_AMPLC_DIO200_PCI
config COMEDI_AMPLC_PC236_PCI
tristate "Amplicon PCI236 DIO board support"
+ depends on HAS_IOPORT
select COMEDI_AMPLC_PC236
help
Enable support for Amplicon PCI236 DIO board.
@@ -805,6 +832,7 @@ config COMEDI_AMPLC_PC236_PCI
config COMEDI_AMPLC_PC263_PCI
tristate "Amplicon PCI263 relay board support"
+ depends on HAS_IOPORT
help
Enable support for Amplicon PCI263 relay board. This is a PCI board
with 16 reed relay output channels.
@@ -814,6 +842,7 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
+ depends on HAS_IOPORT
select COMEDI_8254
help
Enable support for Amplicon PCI224 and PCI234 AO boards
@@ -823,6 +852,7 @@ config COMEDI_AMPLC_PCI224
config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
+ depends on HAS_IOPORT
select COMEDI_8254
select COMEDI_8255
help
@@ -834,6 +864,7 @@ config COMEDI_AMPLC_PCI230
config COMEDI_CONTEC_PCI_DIO
tristate "Contec PIO1616L digital I/O board support"
+ depends on HAS_IOPORT
help
Enable support for the Contec PIO1616L digital I/O board
@@ -842,6 +873,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
+ depends on HAS_IOPORT
select COMEDI_DAS08
help
Enable support for PCI DAS-08 cards.
@@ -861,6 +893,7 @@ config COMEDI_DT3000
config COMEDI_DYNA_PCI10XX
tristate "Dynalog PCI DAQ series support"
+ depends on HAS_IOPORT
help
Enable support for Dynalog PCI DAQ series
PCI-1050
@@ -911,6 +944,7 @@ config COMEDI_JR3_PCI
config COMEDI_KE_COUNTER
tristate "Kolter-Electronic PCI Counter 1 card support"
+ depends on HAS_IOPORT
help
Enable support for Kolter-Electronic PCI Counter 1 cards
@@ -929,6 +963,7 @@ config COMEDI_CB_PCIDAS64
config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
+ depends on HAS_IOPORT
select COMEDI_8254
select COMEDI_8255
help
@@ -942,6 +977,7 @@ config COMEDI_CB_PCIDAS
config COMEDI_CB_PCIDDA
tristate "MeasurementComputing PCI-DDA series support"
+ depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI-DDA
@@ -953,6 +989,7 @@ config COMEDI_CB_PCIDDA
config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
+ depends on HAS_IOPORT
select COMEDI_8254
select COMEDI_8255
help
@@ -964,6 +1001,7 @@ config COMEDI_CB_PCIMDAS
config COMEDI_CB_PCIMDDA
tristate "MeasurementComputing PCIM-DDA06-16 support"
+ depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCIM-DDA06-16
@@ -973,6 +1011,7 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
+ depends on HAS_IOPORT
select COMEDI_8254
help
Enable support for Meilhaus PCI data acquisition cards
@@ -1042,7 +1081,6 @@ config COMEDI_NI_PCIDIO
tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
depends on HAS_DMA
select COMEDI_MITE
- select COMEDI_8255
help
Enable support for National Instruments PCI-DIO-32HS, PXI-6533,
PCI-6533 and PCI-6534
@@ -1103,7 +1141,7 @@ endif # COMEDI_PCI_DRIVERS
menuconfig COMEDI_PCMCIA_DRIVERS
tristate "Comedi PCMCIA drivers"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Enable support for comedi PCMCIA drivers.
@@ -1254,6 +1292,7 @@ config COMEDI_8255
config COMEDI_8255_SA
tristate "Standalone 8255 support"
+ depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for 8255 digital I/O as a standalone driver.
@@ -1290,10 +1329,12 @@ config COMEDI_AMPLC_DIO200
config COMEDI_AMPLC_PC236
tristate
+ depends on HAS_IOPORT
select COMEDI_8255
config COMEDI_DAS08
tristate
+ depends on HAS_IOPORT
select COMEDI_8254
select COMEDI_8255
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index d4e2ed709bfc..376130bfba8a 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -177,7 +177,8 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
dev->n_subdevices = 0;
}
kfree(dev->private);
- kfree(dev->pacer);
+ if (!IS_ERR(dev->pacer))
+ kfree(dev->pacer);
dev->private = NULL;
dev->pacer = NULL;
dev->driver = NULL;
diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c
index ced8ea09d4fa..f45f7bd1c61a 100644
--- a/drivers/comedi/drivers/8255.c
+++ b/drivers/comedi/drivers/8255.c
@@ -80,7 +80,7 @@ static int dev_8255_attach(struct comedi_device *dev,
if (ret) {
s->type = COMEDI_SUBD_UNUSED;
} else {
- ret = subdev_8255_init(dev, s, NULL, iobase);
+ ret = subdev_8255_io_init(dev, s, iobase);
if (ret) {
/*
* Release the I/O port region here, as the
diff --git a/drivers/comedi/drivers/8255_pci.c b/drivers/comedi/drivers/8255_pci.c
index 0fec048e3a53..8498cabe4d91 100644
--- a/drivers/comedi/drivers/8255_pci.c
+++ b/drivers/comedi/drivers/8255_pci.c
@@ -57,6 +57,7 @@
#include <linux/comedi/comedi_8255.h>
enum pci_8255_boardid {
+#ifdef CONFIG_HAS_IOPORT
BOARD_ADLINK_PCI7224,
BOARD_ADLINK_PCI7248,
BOARD_ADLINK_PCI7296,
@@ -65,6 +66,7 @@ enum pci_8255_boardid {
BOARD_CB_PCIDIO48H_OLD,
BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H,
+#endif /* CONFIG_HAS_IOPORT */
BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B,
BOARD_NI_PXI6508,
@@ -82,6 +84,7 @@ struct pci_8255_boardinfo {
};
static const struct pci_8255_boardinfo pci_8255_boards[] = {
+#ifdef CONFIG_HAS_IOPORT
[BOARD_ADLINK_PCI7224] = {
.name = "adl_pci-7224",
.dio_badr = 2,
@@ -122,6 +125,7 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.dio_badr = 2,
.n_8255 = 4,
},
+#endif /* CONFIG_HAS_IOPORT */
[BOARD_NI_PCIDIO96] = {
.name = "ni_pci-dio-96",
.dio_badr = 1,
@@ -219,8 +223,11 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
dev->mmio = pci_ioremap_bar(pcidev, board->dio_badr);
if (!dev->mmio)
return -ENOMEM;
- } else {
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
dev->iobase = pci_resource_start(pcidev, board->dio_badr);
+ } else {
+ dev_err(dev->class_dev, "error! need I/O port support\n");
+ return -ENXIO;
}
/*
@@ -235,9 +242,9 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
for (i = 0; i < board->n_8255; i++) {
s = &dev->subdevices[i];
if (dev->mmio)
- ret = subdev_8255_mm_init(dev, s, NULL, i * I8255_SIZE);
+ ret = subdev_8255_mm_init(dev, s, i * I8255_SIZE);
else
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
+ ret = subdev_8255_io_init(dev, s, i * I8255_SIZE);
if (ret)
return ret;
}
@@ -259,6 +266,7 @@ static int pci_8255_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id pci_8255_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
{ PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 },
{ PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 },
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
@@ -269,6 +277,7 @@ static const struct pci_device_id pci_8255_pci_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
.driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
+#endif /* CONFIG_HAS_IOPORT */
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
{ PCI_VDEVICE(NI, 0x13c0), BOARD_NI_PXI6508 },
diff --git a/drivers/comedi/drivers/adl_pci9111.c b/drivers/comedi/drivers/adl_pci9111.c
index c50f94272a74..086d93f40cb9 100644
--- a/drivers/comedi/drivers/adl_pci9111.c
+++ b/drivers/comedi/drivers/adl_pci9111.c
@@ -647,10 +647,10 @@ static int pci9111_auto_attach(struct comedi_device *dev,
dev->irq = pcidev->irq;
}
- dev->pacer = comedi_8254_init(dev->iobase + PCI9111_8254_BASE_REG,
- I8254_OSC_BASE_2MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCI9111_8254_BASE_REG,
+ I8254_OSC_BASE_2MHZ, I8254_IO16, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c
index 9a816c718303..a76e2666d583 100644
--- a/drivers/comedi/drivers/adl_pci9118.c
+++ b/drivers/comedi/drivers/adl_pci9118.c
@@ -1524,10 +1524,10 @@ static int pci9118_common_attach(struct comedi_device *dev,
devpriv->iobase_a = pci_resource_start(pcidev, 0);
dev->iobase = pci_resource_start(pcidev, 2);
- dev->pacer = comedi_8254_init(dev->iobase + PCI9118_TIMER_BASE,
- I8254_OSC_BASE_4MHZ, I8254_IO32, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCI9118_TIMER_BASE,
+ I8254_OSC_BASE_4MHZ, I8254_IO32, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
pci9118_reset(dev);
diff --git a/drivers/comedi/drivers/adv_pci1710.c b/drivers/comedi/drivers/adv_pci1710.c
index 4f2639968260..c49b0f1f5228 100644
--- a/drivers/comedi/drivers/adv_pci1710.c
+++ b/drivers/comedi/drivers/adv_pci1710.c
@@ -767,10 +767,10 @@ static int pci1710_auto_attach(struct comedi_device *dev,
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
- dev->pacer = comedi_8254_init(dev->iobase + PCI171X_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCI171X_TIMER_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
n_subdevices = 1; /* all boards have analog inputs */
if (board->has_ao)
diff --git a/drivers/comedi/drivers/adv_pci_dio.c b/drivers/comedi/drivers/adv_pci_dio.c
index efa3e46b554b..ca8054504760 100644
--- a/drivers/comedi/drivers/adv_pci_dio.c
+++ b/drivers/comedi/drivers/adv_pci_dio.c
@@ -642,8 +642,8 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
for (j = 0; j < d->chans; j++) {
s = &dev->subdevices[subdev++];
- ret = subdev_8255_init(dev, s, NULL,
- d->addr + j * I8255_SIZE);
+ ret = subdev_8255_io_init(dev, s,
+ d->addr + j * I8255_SIZE);
if (ret)
return ret;
}
@@ -664,11 +664,11 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
if (board->timer_regbase) {
s = &dev->subdevices[subdev++];
- dev->pacer = comedi_8254_init(dev->iobase +
- board->timer_regbase,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer =
+ comedi_8254_io_alloc(dev->iobase + board->timer_regbase,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
comedi_8254_subdevice_init(s, dev->pacer);
}
diff --git a/drivers/comedi/drivers/aio_aio12_8.c b/drivers/comedi/drivers/aio_aio12_8.c
index 30b8a32204d8..227a86a3a760 100644
--- a/drivers/comedi/drivers/aio_aio12_8.c
+++ b/drivers/comedi/drivers/aio_aio12_8.c
@@ -206,10 +206,10 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
if (ret)
return ret;
- dev->pacer = comedi_8254_init(dev->iobase + AIO12_8_8254_BASE_REG,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + AIO12_8_8254_BASE_REG,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -247,7 +247,7 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
/* Digital I/O subdevice (8255) */
s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, NULL, AIO12_8_8255_BASE_REG);
+ ret = subdev_8255_io_init(dev, s, AIO12_8_8255_BASE_REG);
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/amplc_dio200_common.c b/drivers/comedi/drivers/amplc_dio200_common.c
index ff651f2eb86c..b1a9b4c4a185 100644
--- a/drivers/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/comedi/drivers/amplc_dio200_common.c
@@ -86,6 +86,70 @@ struct dio200_subdev_intr {
unsigned int active:1;
};
+#ifdef CONFIG_HAS_IOPORT
+
+static unsigned char dio200___read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+ if (dev->mmio)
+ return readb(dev->mmio + offset);
+ return inb(dev->iobase + offset);
+}
+
+static void dio200___write8(struct comedi_device *dev,
+ unsigned int offset, unsigned char val)
+{
+ if (dev->mmio)
+ writeb(val, dev->mmio + offset);
+ else
+ outb(val, dev->iobase + offset);
+}
+
+static unsigned int dio200___read32(struct comedi_device *dev,
+ unsigned int offset)
+{
+ if (dev->mmio)
+ return readl(dev->mmio + offset);
+ return inl(dev->iobase + offset);
+}
+
+static void dio200___write32(struct comedi_device *dev,
+ unsigned int offset, unsigned int val)
+{
+ if (dev->mmio)
+ writel(val, dev->mmio + offset);
+ else
+ outl(val, dev->iobase + offset);
+}
+
+#else /* CONFIG_HAS_IOPORT */
+
+static unsigned char dio200___read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+ return readb(dev->mmio + offset);
+}
+
+static void dio200___write8(struct comedi_device *dev,
+ unsigned int offset, unsigned char val)
+{
+ writeb(val, dev->mmio + offset);
+}
+
+static unsigned int dio200___read32(struct comedi_device *dev,
+ unsigned int offset)
+{
+ return readl(dev->mmio + offset);
+}
+
+static void dio200___write32(struct comedi_device *dev,
+ unsigned int offset, unsigned int val)
+{
+ writel(val, dev->mmio + offset);
+}
+
+#endif /* CONFIG_HAS_IOPORT */
+
static unsigned char dio200_read8(struct comedi_device *dev,
unsigned int offset)
{
@@ -94,9 +158,7 @@ static unsigned char dio200_read8(struct comedi_device *dev,
if (board->is_pcie)
offset <<= 3;
- if (dev->mmio)
- return readb(dev->mmio + offset);
- return inb(dev->iobase + offset);
+ return dio200___read8(dev, offset);
}
static void dio200_write8(struct comedi_device *dev,
@@ -107,10 +169,7 @@ static void dio200_write8(struct comedi_device *dev,
if (board->is_pcie)
offset <<= 3;
- if (dev->mmio)
- writeb(val, dev->mmio + offset);
- else
- outb(val, dev->iobase + offset);
+ dio200___write8(dev, offset, val);
}
static unsigned int dio200_read32(struct comedi_device *dev,
@@ -121,9 +180,7 @@ static unsigned int dio200_read32(struct comedi_device *dev,
if (board->is_pcie)
offset <<= 3;
- if (dev->mmio)
- return readl(dev->mmio + offset);
- return inl(dev->iobase + offset);
+ return dio200___read32(dev, offset);
}
static void dio200_write32(struct comedi_device *dev,
@@ -134,10 +191,7 @@ static void dio200_write32(struct comedi_device *dev,
if (board->is_pcie)
offset <<= 3;
- if (dev->mmio)
- writel(val, dev->mmio + offset);
- else
- outl(val, dev->iobase + offset);
+ dio200___write32(dev, offset, val);
}
static unsigned int dio200_subdev_8254_offset(struct comedi_device *dev,
@@ -149,9 +203,9 @@ static unsigned int dio200_subdev_8254_offset(struct comedi_device *dev,
/* get the offset that was passed to comedi_8254_*_init() */
if (dev->mmio)
- offset = i8254->mmio - dev->mmio;
+ offset = (void __iomem *)i8254->context - dev->mmio;
else
- offset = i8254->iobase - dev->iobase;
+ offset = i8254->context - dev->iobase;
/* remove the shift that was added for PCIe boards */
if (board->is_pcie)
@@ -556,14 +610,14 @@ static int dio200_subdev_8254_init(struct comedi_device *dev,
}
if (dev->mmio) {
- i8254 = comedi_8254_mm_init(dev->mmio + offset,
- 0, I8254_IO8, regshift);
+ i8254 = comedi_8254_mm_alloc(dev->mmio + offset,
+ 0, I8254_IO8, regshift);
} else {
- i8254 = comedi_8254_init(dev->iobase + offset,
- 0, I8254_IO8, regshift);
+ i8254 = comedi_8254_io_alloc(dev->iobase + offset,
+ 0, I8254_IO8, regshift);
}
- if (!i8254)
- return -ENOMEM;
+ if (IS_ERR(i8254))
+ return PTR_ERR(i8254);
comedi_8254_subdevice_init(s, i8254);
@@ -779,6 +833,12 @@ int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
unsigned int n;
int ret;
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT) && !dev->mmio) {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
ret = comedi_alloc_subdevices(dev, board->n_subdevs);
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/amplc_dio200_pci.c b/drivers/comedi/drivers/amplc_dio200_pci.c
index 527994d82a1f..cb5b328a28e3 100644
--- a/drivers/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/comedi/drivers/amplc_dio200_pci.c
@@ -223,14 +223,17 @@
*/
enum dio200_pci_model {
+#ifdef CONFIG_HAS_IOPORT
pci215_model,
pci272_model,
+#endif /* CONFIG_HAS_IOPORT */
pcie215_model,
pcie236_model,
pcie296_model
};
static const struct dio200_board dio200_pci_boards[] = {
+#ifdef CONFIG_HAS_IOPORT
[pci215_model] = {
.name = "pci215",
.mainbar = 2,
@@ -252,6 +255,7 @@ static const struct dio200_board dio200_pci_boards[] = {
.sdinfo = { 0x00, 0x08, 0x10, 0x3f },
.has_int_sce = true,
},
+#endif /* CONFIG_HAS_IOPORT */
[pcie215_model] = {
.name = "pcie215",
.mainbar = 1,
@@ -364,8 +368,12 @@ static int dio200_pci_auto_attach(struct comedi_device *dev,
"error! cannot remap registers\n");
return -ENOMEM;
}
- } else {
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
dev->iobase = pci_resource_start(pci_dev, bar);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
}
if (board->is_pcie) {
@@ -385,8 +393,10 @@ static struct comedi_driver dio200_pci_comedi_driver = {
};
static const struct pci_device_id dio200_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
{ PCI_VDEVICE(AMPLICON, 0x000b), pci215_model },
{ PCI_VDEVICE(AMPLICON, 0x000a), pci272_model },
+#endif /* CONFIG_HAS_IOPORT */
{ PCI_VDEVICE(AMPLICON, 0x0011), pcie236_model },
{ PCI_VDEVICE(AMPLICON, 0x0012), pcie215_model },
{ PCI_VDEVICE(AMPLICON, 0x0014), pcie296_model },
diff --git a/drivers/comedi/drivers/amplc_pc236_common.c b/drivers/comedi/drivers/amplc_pc236_common.c
index 9f4f89b1ef23..326ca72c24ec 100644
--- a/drivers/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/comedi/drivers/amplc_pc236_common.c
@@ -147,7 +147,7 @@ int amplc_pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
s = &dev->subdevices[0];
/* digital i/o subdevice (8255) */
- ret = subdev_8255_init(dev, s, NULL, 0x00);
+ ret = subdev_8255_io_init(dev, s, 0x00);
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/amplc_pci224.c b/drivers/comedi/drivers/amplc_pci224.c
index 5a04e55daeea..1373637c2ca2 100644
--- a/drivers/comedi/drivers/amplc_pci224.c
+++ b/drivers/comedi/drivers/amplc_pci224.c
@@ -1051,10 +1051,10 @@ pci224_auto_attach(struct comedi_device *dev, unsigned long context_model)
outw(devpriv->daccon | PCI224_DACCON_FIFORESET,
dev->iobase + PCI224_DACCON);
- dev->pacer = comedi_8254_init(devpriv->iobase1 + PCI224_Z2_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(devpriv->iobase1 + PCI224_Z2_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/comedi/drivers/amplc_pci230.c b/drivers/comedi/drivers/amplc_pci230.c
index 92ba8b8c0172..c74209c2e83a 100644
--- a/drivers/comedi/drivers/amplc_pci230.c
+++ b/drivers/comedi/drivers/amplc_pci230.c
@@ -2475,10 +2475,10 @@ static int pci230_auto_attach(struct comedi_device *dev,
dev->irq = pci_dev->irq;
}
- dev->pacer = comedi_8254_init(dev->iobase + PCI230_Z2_CT_BASE,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCI230_Z2_CT_BASE,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
rc = comedi_alloc_subdevices(dev, 3);
if (rc)
@@ -2529,7 +2529,7 @@ static int pci230_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
/* digital i/o subdevice */
if (board->have_dio) {
- rc = subdev_8255_init(dev, s, NULL, PCI230_PPI_X_BASE);
+ rc = subdev_8255_io_init(dev, s, PCI230_PPI_X_BASE);
if (rc)
return rc;
} else {
diff --git a/drivers/comedi/drivers/cb_das16_cs.c b/drivers/comedi/drivers/cb_das16_cs.c
index 8e0d2fa5f95d..306208a0695b 100644
--- a/drivers/comedi/drivers/cb_das16_cs.c
+++ b/drivers/comedi/drivers/cb_das16_cs.c
@@ -363,10 +363,10 @@ static int das16cs_auto_attach(struct comedi_device *dev,
if (!devpriv)
return -ENOMEM;
- dev->pacer = comedi_8254_init(dev->iobase + DAS16CS_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS16CS_TIMER_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/cb_pcidas.c b/drivers/comedi/drivers/cb_pcidas.c
index 0c7576b967fc..8bb9b0623869 100644
--- a/drivers/comedi/drivers/cb_pcidas.c
+++ b/drivers/comedi/drivers/cb_pcidas.c
@@ -1288,16 +1288,16 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
}
dev->irq = pcidev->irq;
- dev->pacer = comedi_8254_init(dev->iobase + PCIDAS_AI_8254_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCIDAS_AI_8254_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
- devpriv->ao_pacer = comedi_8254_init(dev->iobase + PCIDAS_AO_8254_BASE,
- I8254_OSC_BASE_10MHZ,
- I8254_IO8, 0);
- if (!devpriv->ao_pacer)
- return -ENOMEM;
+ devpriv->ao_pacer =
+ comedi_8254_io_alloc(dev->iobase + PCIDAS_AO_8254_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(devpriv->ao_pacer))
+ return PTR_ERR(devpriv->ao_pacer);
ret = comedi_alloc_subdevices(dev, 7);
if (ret)
@@ -1352,7 +1352,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
/* 8255 */
s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, NULL, PCIDAS_8255_BASE);
+ ret = subdev_8255_io_init(dev, s, PCIDAS_8255_BASE);
if (ret)
return ret;
@@ -1453,7 +1453,8 @@ static void cb_pcidas_detach(struct comedi_device *dev)
if (devpriv->amcc)
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->amcc + AMCC_OP_REG_INTCSR);
- kfree(devpriv->ao_pacer);
+ if (!IS_ERR(devpriv->ao_pacer))
+ kfree(devpriv->ao_pacer);
}
comedi_pci_detach(dev);
}
diff --git a/drivers/comedi/drivers/cb_pcidas64.c b/drivers/comedi/drivers/cb_pcidas64.c
index ca6038a25f26..ff19fc3859e4 100644
--- a/drivers/comedi/drivers/cb_pcidas64.c
+++ b/drivers/comedi/drivers/cb_pcidas64.c
@@ -3877,11 +3877,10 @@ static int setup_subdevices(struct comedi_device *dev)
s = &dev->subdevices[4];
if (board->has_8255) {
if (board->layout == LAYOUT_4020) {
- ret = subdev_8255_init(dev, s, dio_callback_4020,
- I8255_4020_REG);
+ ret = subdev_8255_cb_init(dev, s, dio_callback_4020,
+ I8255_4020_REG);
} else {
- ret = subdev_8255_mm_init(dev, s, NULL,
- DIO_8255_OFFSET);
+ ret = subdev_8255_mm_init(dev, s, DIO_8255_OFFSET);
}
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/cb_pcidda.c b/drivers/comedi/drivers/cb_pcidda.c
index c52204a6bda4..c353d0f87da9 100644
--- a/drivers/comedi/drivers/cb_pcidda.c
+++ b/drivers/comedi/drivers/cb_pcidda.c
@@ -365,7 +365,7 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
/* two 8255 digital io subdevices */
for (i = 0; i < 2; i++) {
s = &dev->subdevices[1 + i];
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
+ ret = subdev_8255_io_init(dev, s, i * I8255_SIZE);
if (ret)
return ret;
}
diff --git a/drivers/comedi/drivers/cb_pcimdas.c b/drivers/comedi/drivers/cb_pcimdas.c
index 8bdb00774f11..641c30df392e 100644
--- a/drivers/comedi/drivers/cb_pcimdas.c
+++ b/drivers/comedi/drivers/cb_pcimdas.c
@@ -364,11 +364,11 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
devpriv->BADR3 = pci_resource_start(pcidev, 3);
dev->iobase = pci_resource_start(pcidev, 4);
- dev->pacer = comedi_8254_init(devpriv->BADR3 + PCIMDAS_8254_BASE,
- cb_pcimdas_pacer_clk(dev),
- I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(devpriv->BADR3 + PCIMDAS_8254_BASE,
+ cb_pcimdas_pacer_clk(dev),
+ I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 6);
if (ret)
@@ -405,7 +405,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
/* Digital I/O subdevice */
s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, NULL, PCIMDAS_8255_BASE);
+ ret = subdev_8255_io_init(dev, s, PCIMDAS_8255_BASE);
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/cb_pcimdda.c b/drivers/comedi/drivers/cb_pcimdda.c
index bf8093a10315..541b5742bb1b 100644
--- a/drivers/comedi/drivers/cb_pcimdda.c
+++ b/drivers/comedi/drivers/cb_pcimdda.c
@@ -154,7 +154,7 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
/* digital i/o subdevice */
- return subdev_8255_init(dev, s, NULL, PCIMDDA_8255_BASE_REG);
+ return subdev_8255_io_init(dev, s, PCIMDDA_8255_BASE_REG);
}
static struct comedi_driver cb_pcimdda_driver = {
diff --git a/drivers/comedi/drivers/comedi_8254.c b/drivers/comedi/drivers/comedi_8254.c
index b4185c1b2695..6beca2a6d66e 100644
--- a/drivers/comedi/drivers/comedi_8254.c
+++ b/drivers/comedi/drivers/comedi_8254.c
@@ -24,14 +24,17 @@
*
* This module provides the following basic functions:
*
- * comedi_8254_init() / comedi_8254_mm_init()
+ * comedi_8254_io_alloc() / comedi_8254_mm_alloc()
* Initializes this module to access the 8254 registers. The _mm version
- * sets up the module for MMIO register access the other for PIO access.
- * The pointer returned from these functions is normally stored in the
- * comedi_device dev->pacer and will be freed by the comedi core during
- * the driver (*detach). If a driver has multiple 8254 devices, they need
- * to be stored in the drivers private data and freed when the driver is
- * detached.
+ * sets up the module for MMIO register access; the _io version sets it
+ * up for PIO access. These functions return a pointer to a struct
+ * comedi_8254 on success, or an ERR_PTR value on failure. The pointer
+ * returned from these functions is normally stored in the comedi_device
+ * dev->pacer and will be freed by the comedi core during the driver
+ * (*detach). If a driver has multiple 8254 devices, they need to be
+ * stored in the drivers private data and freed when the driver is
+ * detached. If the ERR_PTR value is stored, code should check the
+ * pointer value with !IS_ERR(pointer) before freeing.
*
* NOTE: The counters are reset by setting them to I8254_MODE0 as part of
* this initialization.
@@ -119,63 +122,105 @@
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8254.h>
-static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg)
+#ifdef CONFIG_HAS_IOPORT
+
+static unsigned int i8254_io8_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
{
- unsigned int reg_offset = (reg * i8254->iosize) << i8254->regshift;
- unsigned int val;
+ unsigned long iobase = i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO8) << i8254->regshift;
- switch (i8254->iosize) {
- default:
- case I8254_IO8:
- if (i8254->mmio)
- val = readb(i8254->mmio + reg_offset);
- else
- val = inb(i8254->iobase + reg_offset);
- break;
- case I8254_IO16:
- if (i8254->mmio)
- val = readw(i8254->mmio + reg_offset);
- else
- val = inw(i8254->iobase + reg_offset);
- break;
- case I8254_IO32:
- if (i8254->mmio)
- val = readl(i8254->mmio + reg_offset);
- else
- val = inl(i8254->iobase + reg_offset);
- break;
+ if (dir) {
+ outb(val, iobase + reg_offset);
+ return 0;
+ } else {
+ return inb(iobase + reg_offset);
}
- return val & 0xff;
}
-static void __i8254_write(struct comedi_8254 *i8254,
- unsigned int val, unsigned int reg)
+static unsigned int i8254_io16_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
{
- unsigned int reg_offset = (reg * i8254->iosize) << i8254->regshift;
+ unsigned long iobase = i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO16) << i8254->regshift;
- switch (i8254->iosize) {
- default:
- case I8254_IO8:
- if (i8254->mmio)
- writeb(val, i8254->mmio + reg_offset);
- else
- outb(val, i8254->iobase + reg_offset);
- break;
- case I8254_IO16:
- if (i8254->mmio)
- writew(val, i8254->mmio + reg_offset);
- else
- outw(val, i8254->iobase + reg_offset);
- break;
- case I8254_IO32:
- if (i8254->mmio)
- writel(val, i8254->mmio + reg_offset);
- else
- outl(val, i8254->iobase + reg_offset);
- break;
+ if (dir) {
+ outw(val, iobase + reg_offset);
+ return 0;
+ } else {
+ return inw(iobase + reg_offset);
}
}
+static unsigned int i8254_io32_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
+{
+ unsigned long iobase = i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO32) << i8254->regshift;
+
+ if (dir) {
+ outl(val, iobase + reg_offset);
+ return 0;
+ } else {
+ return inl(iobase + reg_offset);
+ }
+}
+
+#endif /* CONFIG_HAS_IOPORT */
+
+static unsigned int i8254_mmio8_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
+{
+ void __iomem *mmiobase = (void __iomem *)i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO8) << i8254->regshift;
+
+ if (dir) {
+ writeb(val, mmiobase + reg_offset);
+ return 0;
+ } else {
+ return readb(mmiobase + reg_offset);
+ }
+}
+
+static unsigned int i8254_mmio16_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
+{
+ void __iomem *mmiobase = (void __iomem *)i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO16) << i8254->regshift;
+
+ if (dir) {
+ writew(val, mmiobase + reg_offset);
+ return 0;
+ } else {
+ return readw(mmiobase + reg_offset);
+ }
+}
+
+static unsigned int i8254_mmio32_cb(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val)
+{
+ void __iomem *mmiobase = (void __iomem *)i8254->context;
+ unsigned int reg_offset = (reg * I8254_IO32) << i8254->regshift;
+
+ if (dir) {
+ writel(val, mmiobase + reg_offset);
+ return 0;
+ } else {
+ return readl(mmiobase + reg_offset);
+ }
+}
+
+static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg)
+{
+ return 0xff & i8254->iocb(i8254, 0, reg, 0);
+}
+
+static void __i8254_write(struct comedi_8254 *i8254,
+ unsigned int val, unsigned int reg)
+{
+ i8254->iocb(i8254, 1, reg, val);
+}
+
/**
* comedi_8254_status - return the status of a counter
* @i8254: comedi_8254 struct for the timer
@@ -571,8 +616,8 @@ void comedi_8254_subdevice_init(struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(comedi_8254_subdevice_init);
-static struct comedi_8254 *__i8254_init(unsigned long iobase,
- void __iomem *mmio,
+static struct comedi_8254 *__i8254_init(comedi_8254_iocb_fn *iocb,
+ unsigned long context,
unsigned int osc_base,
unsigned int iosize,
unsigned int regshift)
@@ -583,14 +628,17 @@ static struct comedi_8254 *__i8254_init(unsigned long iobase,
/* sanity check that the iosize is valid */
if (!(iosize == I8254_IO8 || iosize == I8254_IO16 ||
iosize == I8254_IO32))
- return NULL;
+ return ERR_PTR(-EINVAL);
+
+ if (!iocb)
+ return ERR_PTR(-EINVAL);
i8254 = kzalloc(sizeof(*i8254), GFP_KERNEL);
if (!i8254)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- i8254->iobase = iobase;
- i8254->mmio = mmio;
+ i8254->iocb = iocb;
+ i8254->context = context;
i8254->iosize = iosize;
i8254->regshift = regshift;
@@ -604,39 +652,77 @@ static struct comedi_8254 *__i8254_init(unsigned long iobase,
return i8254;
}
+#ifdef CONFIG_HAS_IOPORT
+
/**
- * comedi_8254_init - allocate and initialize the 8254 device for pio access
+ * comedi_8254_io_alloc - allocate and initialize the 8254 device for pio access
* @iobase: port I/O base address
* @osc_base: base time of the counter in ns
* OPTIONAL - only used by comedi_8254_cascade_ns_to_timer()
* @iosize: I/O register size
* @regshift: register gap shift
+ *
+ * Return: A pointer to a struct comedi_8254 or an ERR_PTR value.
*/
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift)
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
{
- return __i8254_init(iobase, NULL, osc_base, iosize, regshift);
+ comedi_8254_iocb_fn *iocb;
+
+ switch (iosize) {
+ case I8254_IO8:
+ iocb = i8254_io8_cb;
+ break;
+ case I8254_IO16:
+ iocb = i8254_io16_cb;
+ break;
+ case I8254_IO32:
+ iocb = i8254_io32_cb;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ return __i8254_init(iocb, iobase, osc_base, iosize, regshift);
}
-EXPORT_SYMBOL_GPL(comedi_8254_init);
+EXPORT_SYMBOL_GPL(comedi_8254_io_alloc);
+
+#endif /* CONFIG_HAS_IOPORT */
/**
- * comedi_8254_mm_init - allocate and initialize the 8254 device for mmio access
+ * comedi_8254_mm_alloc - allocate and initialize the 8254 device for mmio access
* @mmio: memory mapped I/O base address
* @osc_base: base time of the counter in ns
* OPTIONAL - only used by comedi_8254_cascade_ns_to_timer()
* @iosize: I/O register size
* @regshift: register gap shift
+ *
+ * Return: A pointer to a struct comedi_8254 or an ERR_PTR value.
*/
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift)
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
{
- return __i8254_init(0, mmio, osc_base, iosize, regshift);
+ comedi_8254_iocb_fn *iocb;
+
+ switch (iosize) {
+ case I8254_IO8:
+ iocb = i8254_mmio8_cb;
+ break;
+ case I8254_IO16:
+ iocb = i8254_mmio16_cb;
+ break;
+ case I8254_IO32:
+ iocb = i8254_mmio32_cb;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ return __i8254_init(iocb, (unsigned long)mmio, osc_base, iosize, regshift);
}
-EXPORT_SYMBOL_GPL(comedi_8254_mm_init);
+EXPORT_SYMBOL_GPL(comedi_8254_mm_alloc);
static int __init comedi_8254_module_init(void)
{
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index 5562b9cd0a17..e4974b508328 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -33,11 +33,13 @@
#include <linux/comedi/comedi_8255.h>
struct subdev_8255_private {
- unsigned long regbase;
+ unsigned long context;
int (*io)(struct comedi_device *dev, int dir, int port, int data,
- unsigned long regbase);
+ unsigned long context);
};
+#ifdef CONFIG_HAS_IOPORT
+
static int subdev_8255_io(struct comedi_device *dev,
int dir, int port, int data, unsigned long regbase)
{
@@ -48,6 +50,8 @@ static int subdev_8255_io(struct comedi_device *dev,
return inb(dev->iobase + regbase + port);
}
+#endif /* CONFIG_HAS_IOPORT */
+
static int subdev_8255_mmio(struct comedi_device *dev,
int dir, int port, int data, unsigned long regbase)
{
@@ -64,7 +68,7 @@ static int subdev_8255_insn(struct comedi_device *dev,
unsigned int *data)
{
struct subdev_8255_private *spriv = s->private;
- unsigned long regbase = spriv->regbase;
+ unsigned long context = spriv->context;
unsigned int mask;
unsigned int v;
@@ -72,18 +76,18 @@ static int subdev_8255_insn(struct comedi_device *dev,
if (mask) {
if (mask & 0xff)
spriv->io(dev, 1, I8255_DATA_A_REG,
- s->state & 0xff, regbase);
+ s->state & 0xff, context);
if (mask & 0xff00)
spriv->io(dev, 1, I8255_DATA_B_REG,
- (s->state >> 8) & 0xff, regbase);
+ (s->state >> 8) & 0xff, context);
if (mask & 0xff0000)
spriv->io(dev, 1, I8255_DATA_C_REG,
- (s->state >> 16) & 0xff, regbase);
+ (s->state >> 16) & 0xff, context);
}
- v = spriv->io(dev, 0, I8255_DATA_A_REG, 0, regbase);
- v |= (spriv->io(dev, 0, I8255_DATA_B_REG, 0, regbase) << 8);
- v |= (spriv->io(dev, 0, I8255_DATA_C_REG, 0, regbase) << 16);
+ v = spriv->io(dev, 0, I8255_DATA_A_REG, 0, context);
+ v |= (spriv->io(dev, 0, I8255_DATA_B_REG, 0, context) << 8);
+ v |= (spriv->io(dev, 0, I8255_DATA_C_REG, 0, context) << 16);
data[1] = v;
@@ -94,7 +98,7 @@ static void subdev_8255_do_config(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct subdev_8255_private *spriv = s->private;
- unsigned long regbase = spriv->regbase;
+ unsigned long context = spriv->context;
int config;
config = I8255_CTRL_CW;
@@ -108,7 +112,7 @@ static void subdev_8255_do_config(struct comedi_device *dev,
if (!(s->io_bits & 0xf00000))
config |= I8255_CTRL_C_HI_IO;
- spriv->io(dev, 1, I8255_CTRL_REG, config, regbase);
+ spriv->io(dev, 1, I8255_CTRL_REG, config, context);
}
static int subdev_8255_insn_config(struct comedi_device *dev,
@@ -142,23 +146,19 @@ static int __subdev_8255_init(struct comedi_device *dev,
struct comedi_subdevice *s,
int (*io)(struct comedi_device *dev,
int dir, int port, int data,
- unsigned long regbase),
- unsigned long regbase,
- bool is_mmio)
+ unsigned long context),
+ unsigned long context)
{
struct subdev_8255_private *spriv;
+ if (!io)
+ return -EINVAL;
+
spriv = comedi_alloc_spriv(s, sizeof(*spriv));
if (!spriv)
return -ENOMEM;
- if (io)
- spriv->io = io;
- else if (is_mmio)
- spriv->io = subdev_8255_mmio;
- else
- spriv->io = subdev_8255_io;
- spriv->regbase = regbase;
+ spriv->context = context;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
@@ -173,89 +173,88 @@ static int __subdev_8255_init(struct comedi_device *dev,
return 0;
}
+#ifdef CONFIG_HAS_IOPORT
+
/**
- * subdev_8255_init - initialize DIO subdevice for driving I/O mapped 8255
+ * subdev_8255_io_init - initialize DIO subdevice for driving I/O mapped 8255
* @dev: comedi device owning subdevice
* @s: comedi subdevice to initialize
- * @io: (optional) register I/O call-back function
- * @regbase: offset of 8255 registers from dev->iobase, or call-back context
+ * @regbase: offset of 8255 registers from dev->iobase
*
* Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
*
- * If the optional I/O call-back function is provided, its prototype is of
- * the following form:
- *
- * int my_8255_callback(struct comedi_device *dev, int dir, int port,
- * int data, unsigned long regbase);
- *
- * where 'dev', and 'regbase' match the values passed to this function,
- * 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
- * is the direction (0 for read, 1 for write) and 'data' is the value to be
- * written. It should return 0 if writing or the value read if reading.
- *
- * If the optional I/O call-back function is not provided, an internal
- * call-back function is used which uses consecutive I/O port addresses
- * starting at dev->iobase + regbase.
- *
* Return: -ENOMEM if failed to allocate memory, zero on success.
*/
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long regbase)
{
- return __subdev_8255_init(dev, s, io, regbase, false);
+ return __subdev_8255_init(dev, s, subdev_8255_io, regbase);
}
-EXPORT_SYMBOL_GPL(subdev_8255_init);
+EXPORT_SYMBOL_GPL(subdev_8255_io_init);
+
+#endif /* CONFIG_HAS_IOPORT */
/**
* subdev_8255_mm_init - initialize DIO subdevice for driving mmio-mapped 8255
* @dev: comedi device owning subdevice
* @s: comedi subdevice to initialize
- * @io: (optional) register I/O call-back function
- * @regbase: offset of 8255 registers from dev->mmio, or call-back context
+ * @regbase: offset of 8255 registers from dev->mmio
*
* Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
*
- * If the optional I/O call-back function is provided, its prototype is of
- * the following form:
+ * Return: -ENOMEM if failed to allocate memory, zero on success.
+ */
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return __subdev_8255_init(dev, s, subdev_8255_mmio, regbase);
+}
+EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
+
+/**
+ * subdev_8255_cb_init - initialize DIO subdevice for driving callback-mapped 8255
+ * @dev: comedi device owning subdevice
+ * @s: comedi subdevice to initialize
+ * @io: register I/O call-back function
+ * @context: call-back context
+ *
+ * Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
+ *
+ * The prototype of the I/O call-back function is of the following form:
*
* int my_8255_callback(struct comedi_device *dev, int dir, int port,
- * int data, unsigned long regbase);
+ * int data, unsigned long context);
*
- * where 'dev', and 'regbase' match the values passed to this function,
+ * where 'dev', and 'context' match the values passed to this function,
* 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
* is the direction (0 for read, 1 for write) and 'data' is the value to be
* written. It should return 0 if writing or the value read if reading.
*
- * If the optional I/O call-back function is not provided, an internal
- * call-back function is used which uses consecutive MMIO virtual addresses
- * starting at dev->mmio + regbase.
*
* Return: -ENOMEM if failed to allocate memory, zero on success.
*/
-int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase)
+ int data, unsigned long context),
+ unsigned long context)
{
- return __subdev_8255_init(dev, s, io, regbase, true);
+ return __subdev_8255_init(dev, s, io, context);
}
-EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
+EXPORT_SYMBOL_GPL(subdev_8255_cb_init);
/**
* subdev_8255_regbase - get offset of 8255 registers or call-back context
* @s: comedi subdevice
*
- * Returns the 'regbase' parameter that was previously passed to
- * subdev_8255_init() or subdev_8255_mm_init() to set up the subdevice.
- * Only valid if the subdevice was set up successfully.
+ * Returns the 'regbase' or 'context' parameter that was previously passed to
+ * subdev_8255_io_init(), subdev_8255_mm_init(), or subdev_8255_cb_init() to
+ * set up the subdevice. Only valid if the subdevice was set up successfully.
*/
unsigned long subdev_8255_regbase(struct comedi_subdevice *s)
{
struct subdev_8255_private *spriv = s->private;
- return spriv->regbase;
+ return spriv->context;
}
EXPORT_SYMBOL_GPL(subdev_8255_regbase);
diff --git a/drivers/comedi/drivers/daqboard2000.c b/drivers/comedi/drivers/daqboard2000.c
index c0a4e1b06fb3..897bf46b95ee 100644
--- a/drivers/comedi/drivers/daqboard2000.c
+++ b/drivers/comedi/drivers/daqboard2000.c
@@ -738,8 +738,8 @@ static int db2k_auto_attach(struct comedi_device *dev, unsigned long context)
return result;
s = &dev->subdevices[2];
- return subdev_8255_init(dev, s, db2k_8255_cb,
- DB2K_REG_DIO_P2_EXP_IO_8_BIT);
+ return subdev_8255_cb_init(dev, s, db2k_8255_cb,
+ DB2K_REG_DIO_P2_EXP_IO_8_BIT);
}
static void db2k_detach(struct comedi_device *dev)
diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c
index f8ab3af2e391..5d5b9174f88a 100644
--- a/drivers/comedi/drivers/das08.c
+++ b/drivers/comedi/drivers/das08.c
@@ -429,7 +429,7 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
s = &dev->subdevices[4];
/* 8255 */
if (board->i8255_offset != 0) {
- ret = subdev_8255_init(dev, s, NULL, board->i8255_offset);
+ ret = subdev_8255_io_init(dev, s, board->i8255_offset);
if (ret)
return ret;
} else {
@@ -439,10 +439,11 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
/* Counter subdevice (8254) */
s = &dev->subdevices[5];
if (board->i8254_offset) {
- dev->pacer = comedi_8254_init(dev->iobase + board->i8254_offset,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer =
+ comedi_8254_io_alloc(dev->iobase + board->i8254_offset,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
comedi_8254_subdevice_init(s, dev->pacer);
} else {
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index 728dc02156c8..4ed56a02150e 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -1067,10 +1067,10 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
osc_base = I8254_OSC_BASE_1MHZ / it->options[3];
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS16_TIMER_BASE_REG,
- osc_base, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS16_TIMER_BASE_REG,
+ osc_base, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
das16_alloc_dma(dev, it->options[2]);
@@ -1145,7 +1145,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* 8255 Digital I/O subdevice */
if (board->has_8255) {
s = &dev->subdevices[4];
- ret = subdev_8255_init(dev, s, NULL, board->i8255_offset);
+ ret = subdev_8255_io_init(dev, s, board->i8255_offset);
if (ret)
return ret;
}
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index 275effb77746..b8ea737ad3d1 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -529,15 +529,16 @@ static int das16m1_attach(struct comedi_device *dev,
dev->irq = it->options[1];
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE2,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS16M1_8254_IOBASE2,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
- devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE1,
- 0, I8254_IO8, 0);
- if (!devpriv->counter)
- return -ENOMEM;
+ devpriv->counter =
+ comedi_8254_io_alloc(dev->iobase + DAS16M1_8254_IOBASE1,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(devpriv->counter))
+ return PTR_ERR(devpriv->counter);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -582,7 +583,7 @@ static int das16m1_attach(struct comedi_device *dev,
/* Digital I/O subdevice (8255) */
s = &dev->subdevices[3];
- ret = subdev_8255_init(dev, s, NULL, DAS16M1_8255_IOBASE);
+ ret = subdev_8255_io_init(dev, s, DAS16M1_8255_IOBASE);
if (ret)
return ret;
@@ -603,7 +604,8 @@ static void das16m1_detach(struct comedi_device *dev)
if (devpriv) {
if (devpriv->extra_iobase)
release_region(devpriv->extra_iobase, DAS16M1_SIZE2);
- kfree(devpriv->counter);
+ if (!IS_ERR(devpriv->counter))
+ kfree(devpriv->counter);
}
comedi_legacy_detach(dev);
}
diff --git a/drivers/comedi/drivers/das1800.c b/drivers/comedi/drivers/das1800.c
index f09608c0f4ff..7117c67aee7e 100644
--- a/drivers/comedi/drivers/das1800.c
+++ b/drivers/comedi/drivers/das1800.c
@@ -1233,10 +1233,10 @@ static int das1800_attach(struct comedi_device *dev,
if (!devpriv->fifo_buf)
return -ENOMEM;
- dev->pacer = comedi_8254_init(dev->iobase + DAS1800_COUNTER,
- I8254_OSC_BASE_5MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS1800_COUNTER,
+ I8254_OSC_BASE_5MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 1af394591e74..68f95330de45 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -590,10 +590,10 @@ static int das6402_attach(struct comedi_device *dev,
}
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS6402_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS6402_TIMER_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/das800.c b/drivers/comedi/drivers/das800.c
index 4ca33f46eaa7..300775523031 100644
--- a/drivers/comedi/drivers/das800.c
+++ b/drivers/comedi/drivers/das800.c
@@ -672,10 +672,10 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = irq;
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS800_8254,
- I8254_OSC_BASE_1MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + DAS800_8254,
+ I8254_OSC_BASE_1MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/comedi/drivers/dmm32at.c b/drivers/comedi/drivers/dmm32at.c
index fe023c722aa3..644e3b643c79 100644
--- a/drivers/comedi/drivers/dmm32at.c
+++ b/drivers/comedi/drivers/dmm32at.c
@@ -599,7 +599,8 @@ static int dmm32at_attach(struct comedi_device *dev,
/* Digital I/O subdevice */
s = &dev->subdevices[2];
- return subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
+ return subdev_8255_cb_init(dev, s, dmm32at_8255_io,
+ DMM32AT_8255_IOBASE);
}
static struct comedi_driver dmm32at_driver = {
diff --git a/drivers/comedi/drivers/me4000.c b/drivers/comedi/drivers/me4000.c
index 9aea02b86ed9..7dd3a0071863 100644
--- a/drivers/comedi/drivers/me4000.c
+++ b/drivers/comedi/drivers/me4000.c
@@ -1209,9 +1209,9 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (!timer_base)
return -ENODEV;
- dev->pacer = comedi_8254_init(timer_base, 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(timer_base, 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
comedi_8254_subdevice_init(s, dev->pacer);
} else {
diff --git a/drivers/comedi/drivers/ni_at_a2150.c b/drivers/comedi/drivers/ni_at_a2150.c
index df8d219e6723..e4e5a0ebd195 100644
--- a/drivers/comedi/drivers/ni_at_a2150.c
+++ b/drivers/comedi/drivers/ni_at_a2150.c
@@ -707,10 +707,10 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* an IRQ and DMA are required to support async commands */
a2150_alloc_irq_and_dma(dev, it);
- dev->pacer = comedi_8254_init(dev->iobase + I8253_BASE_REG,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + I8253_BASE_REG,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/comedi/drivers/ni_at_ao.c b/drivers/comedi/drivers/ni_at_ao.c
index 9f3147b72aa8..9cf6b4ff6b65 100644
--- a/drivers/comedi/drivers/ni_at_ao.c
+++ b/drivers/comedi/drivers/ni_at_ao.c
@@ -303,10 +303,10 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- dev->pacer = comedi_8254_init(dev->iobase + ATAO_82C53_BASE,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + ATAO_82C53_BASE,
+ 0, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/ni_atmio16d.c b/drivers/comedi/drivers/ni_atmio16d.c
index 9fa902529a8e..e5e7cc423c87 100644
--- a/drivers/comedi/drivers/ni_atmio16d.c
+++ b/drivers/comedi/drivers/ni_atmio16d.c
@@ -677,7 +677,7 @@ static int atmio16d_attach(struct comedi_device *dev,
/* 8255 subdevice */
s = &dev->subdevices[3];
if (board->has_8255) {
- ret = subdev_8255_init(dev, s, NULL, 0x00);
+ ret = subdev_8255_io_init(dev, s, 0x00);
if (ret)
return ret;
} else {
diff --git a/drivers/comedi/drivers/ni_daq_dio24.c b/drivers/comedi/drivers/ni_daq_dio24.c
index 487733111023..9419caf02edc 100644
--- a/drivers/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/comedi/drivers/ni_daq_dio24.c
@@ -45,7 +45,7 @@ static int dio24_auto_attach(struct comedi_device *dev,
/* 8255 dio */
s = &dev->subdevices[0];
- return subdev_8255_init(dev, s, NULL, 0x00);
+ return subdev_8255_io_init(dev, s, 0x00);
}
static struct comedi_driver driver_dio24 = {
diff --git a/drivers/comedi/drivers/ni_labpc_common.c b/drivers/comedi/drivers/ni_labpc_common.c
index 763249653228..7e0ce0ce0adf 100644
--- a/drivers/comedi/drivers/ni_labpc_common.c
+++ b/drivers/comedi/drivers/ni_labpc_common.c
@@ -78,6 +78,9 @@ static const struct comedi_lrange range_labpc_ao = {
* functions that do inb/outb and readb/writeb so we can use
* function pointers to decide which to use
*/
+
+#ifdef CONFIG_HAS_IOPORT
+
static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
{
return inb(dev->iobase + reg);
@@ -89,6 +92,8 @@ static void labpc_outb(struct comedi_device *dev,
outb(byte, dev->iobase + reg);
}
+#endif /* CONFIG_HAS_IOPORT */
+
static unsigned int labpc_readb(struct comedi_device *dev, unsigned long reg)
{
return readb(dev->mmio + reg);
@@ -1200,8 +1205,12 @@ int labpc_common_attach(struct comedi_device *dev,
devpriv->read_byte = labpc_readb;
devpriv->write_byte = labpc_writeb;
} else {
+#ifdef CONFIG_HAS_IOPORT
devpriv->read_byte = labpc_inb;
devpriv->write_byte = labpc_outb;
+#else
+ return -ENXIO;
+#endif
}
/* initialize board's command registers */
@@ -1222,24 +1231,24 @@ int labpc_common_attach(struct comedi_device *dev,
}
if (dev->mmio) {
- dev->pacer = comedi_8254_mm_init(dev->mmio + COUNTER_B_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- devpriv->counter = comedi_8254_mm_init(dev->mmio +
- COUNTER_A_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
+ dev->pacer =
+ comedi_8254_mm_alloc(dev->mmio + COUNTER_B_BASE_REG,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
+ devpriv->counter =
+ comedi_8254_mm_alloc(dev->mmio + COUNTER_A_BASE_REG,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
} else {
- dev->pacer = comedi_8254_init(dev->iobase + COUNTER_B_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- devpriv->counter = comedi_8254_init(dev->iobase +
- COUNTER_A_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
+ dev->pacer =
+ comedi_8254_io_alloc(dev->iobase + COUNTER_B_BASE_REG,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
+ devpriv->counter =
+ comedi_8254_io_alloc(dev->iobase + COUNTER_A_BASE_REG,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
}
- if (!dev->pacer || !devpriv->counter)
- return -ENOMEM;
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
+ if (IS_ERR(devpriv->counter))
+ return PTR_ERR(devpriv->counter);
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
@@ -1287,9 +1296,9 @@ int labpc_common_attach(struct comedi_device *dev,
/* 8255 dio */
s = &dev->subdevices[2];
if (dev->mmio)
- ret = subdev_8255_mm_init(dev, s, NULL, DIO_BASE_REG);
+ ret = subdev_8255_mm_init(dev, s, DIO_BASE_REG);
else
- ret = subdev_8255_init(dev, s, NULL, DIO_BASE_REG);
+ ret = subdev_8255_io_init(dev, s, DIO_BASE_REG);
if (ret)
return ret;
@@ -1341,8 +1350,10 @@ void labpc_common_detach(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- if (devpriv)
- kfree(devpriv->counter);
+ if (devpriv) {
+ if (!IS_ERR(devpriv->counter))
+ kfree(devpriv->counter);
+ }
}
EXPORT_SYMBOL_GPL(labpc_common_detach);
diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c
index d39998565808..980f309d6de7 100644
--- a/drivers/comedi/drivers/ni_mio_common.c
+++ b/drivers/comedi/drivers/ni_mio_common.c
@@ -46,6 +46,12 @@
#include <linux/comedi/comedi_8255.h>
#include "mite.h"
+#ifdef PCIDMA
+#define IS_PCIMIO 1
+#else
+#define IS_PCIMIO 0
+#endif
+
/* A timeout count */
#define NI_TIMEOUT 1000
@@ -219,54 +225,72 @@ enum timebase_nanoseconds {
static const int num_adc_stages_611x = 3;
+#ifdef PCIDMA
+
static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
{
- if (dev->mmio)
- writel(data, dev->mmio + reg);
- else
- outl(data, dev->iobase + reg);
+ writel(data, dev->mmio + reg);
}
static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
{
- if (dev->mmio)
- writew(data, dev->mmio + reg);
- else
- outw(data, dev->iobase + reg);
+ writew(data, dev->mmio + reg);
}
static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
{
- if (dev->mmio)
- writeb(data, dev->mmio + reg);
- else
- outb(data, dev->iobase + reg);
+ writeb(data, dev->mmio + reg);
}
static unsigned int ni_readl(struct comedi_device *dev, int reg)
{
- if (dev->mmio)
- return readl(dev->mmio + reg);
+ return readl(dev->mmio + reg);
+}
+
+static unsigned int ni_readw(struct comedi_device *dev, int reg)
+{
+ return readw(dev->mmio + reg);
+}
+
+static unsigned int ni_readb(struct comedi_device *dev, int reg)
+{
+ return readb(dev->mmio + reg);
+}
+#else /* PCIDMA */
+
+static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
+{
+ outl(data, dev->iobase + reg);
+}
+
+static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
+{
+ outw(data, dev->iobase + reg);
+}
+
+static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
+{
+ outb(data, dev->iobase + reg);
+}
+
+static unsigned int ni_readl(struct comedi_device *dev, int reg)
+{
return inl(dev->iobase + reg);
}
static unsigned int ni_readw(struct comedi_device *dev, int reg)
{
- if (dev->mmio)
- return readw(dev->mmio + reg);
-
return inw(dev->iobase + reg);
}
static unsigned int ni_readb(struct comedi_device *dev, int reg)
{
- if (dev->mmio)
- return readb(dev->mmio + reg);
-
return inb(dev->iobase + reg);
}
+#endif /* PCIDMA */
+
/*
* We automatically take advantage of STC registers that can be
* read/written directly in the I/O space of the board.
@@ -5977,6 +6001,12 @@ static int ni_E_init(struct comedi_device *dev,
int i;
const char *dev_family = devpriv->is_m_series ? "ni_mseries"
: "ni_eseries";
+ if (!IS_PCIMIO != !dev->mmio) {
+ dev_err(dev->class_dev,
+ "%s: bug! %s device not supported.\n",
+ KBUILD_MODNAME, board->name);
+ return -ENXIO;
+ }
/* prepare the device for globally-named routes. */
if (ni_assign_device_routes(dev_family, board->name,
@@ -6137,8 +6167,8 @@ static int ni_E_init(struct comedi_device *dev,
/* 8255 device */
s = &dev->subdevices[NI_8255_DIO_SUBDEV];
if (board->has_8255) {
- ret = subdev_8255_init(dev, s, ni_8255_callback,
- NI_E_8255_BASE);
+ ret = subdev_8255_cb_init(dev, s, ni_8255_callback,
+ NI_E_8255_BASE);
if (ret)
return ret;
} else {
diff --git a/drivers/comedi/drivers/pcl711.c b/drivers/comedi/drivers/pcl711.c
index 05172c553c8a..0cf3917defe7 100644
--- a/drivers/comedi/drivers/pcl711.c
+++ b/drivers/comedi/drivers/pcl711.c
@@ -429,10 +429,10 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = it->options[1];
}
- dev->pacer = comedi_8254_init(dev->iobase + PCL711_TIMER_BASE,
- I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCL711_TIMER_BASE,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/pcl724.c b/drivers/comedi/drivers/pcl724.c
index 948a0576c9ef..00474710b81f 100644
--- a/drivers/comedi/drivers/pcl724.c
+++ b/drivers/comedi/drivers/pcl724.c
@@ -124,10 +124,10 @@ static int pcl724_attach(struct comedi_device *dev,
s = &dev->subdevices[i];
if (board->is_pet48) {
iobase = dev->iobase + (i * 0x1000);
- ret = subdev_8255_init(dev, s, pcl724_8255mapped_io,
- iobase);
+ ret = subdev_8255_cb_init(dev, s, pcl724_8255mapped_io,
+ iobase);
} else {
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
+ ret = subdev_8255_io_init(dev, s, i * I8255_SIZE);
}
if (ret)
return ret;
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index 70dbc129fcf5..0df639c6a595 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -1143,11 +1143,11 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return ret;
if (board->irq_bits) {
- dev->pacer = comedi_8254_init(dev->iobase + PCL812_TIMER_BASE,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer =
+ comedi_8254_io_alloc(dev->iobase + PCL812_TIMER_BASE,
+ I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
if ((1 << it->options[1]) & board->irq_bits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
diff --git a/drivers/comedi/drivers/pcl816.c b/drivers/comedi/drivers/pcl816.c
index a5e5320be648..28d1a88c50f6 100644
--- a/drivers/comedi/drivers/pcl816.c
+++ b/drivers/comedi/drivers/pcl816.c
@@ -615,10 +615,10 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* an IRQ and DMA are required to support async commands */
pcl816_alloc_irq_and_dma(dev, it);
- dev->pacer = comedi_8254_init(dev->iobase + PCL816_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCL816_TIMER_BASE,
+ I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c
index 29e503de8267..4127adcfb229 100644
--- a/drivers/comedi/drivers/pcl818.c
+++ b/drivers/comedi/drivers/pcl818.c
@@ -1015,10 +1015,10 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
else
osc_base = I8254_OSC_BASE_1MHZ;
- dev->pacer = comedi_8254_init(dev->iobase + PCL818_TIMER_BASE,
- osc_base, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
+ dev->pacer = comedi_8254_io_alloc(dev->iobase + PCL818_TIMER_BASE,
+ osc_base, I8254_IO8, 0);
+ if (IS_ERR(dev->pacer))
+ return PTR_ERR(dev->pacer);
/* max sampling speed */
devpriv->ns_min = board->ns_min;
diff --git a/drivers/comedi/drivers/pcm3724.c b/drivers/comedi/drivers/pcm3724.c
index ca8bef54dacc..fb41de3baef8 100644
--- a/drivers/comedi/drivers/pcm3724.c
+++ b/drivers/comedi/drivers/pcm3724.c
@@ -204,7 +204,7 @@ static int pcm3724_attach(struct comedi_device *dev,
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
+ ret = subdev_8255_io_init(dev, s, i * I8255_SIZE);
if (ret)
return ret;
s->insn_config = subdev_3724_insn_config;
diff --git a/drivers/comedi/drivers/rtd520.c b/drivers/comedi/drivers/rtd520.c
index 7e0ec1a2a2ca..44bb0decd7a4 100644
--- a/drivers/comedi/drivers/rtd520.c
+++ b/drivers/comedi/drivers/rtd520.c
@@ -1289,9 +1289,9 @@ static int rtd_auto_attach(struct comedi_device *dev,
/* 8254 Timer/Counter subdevice */
s = &dev->subdevices[3];
- dev->pacer = comedi_8254_mm_init(dev->mmio + LAS0_8254_TIMER_BASE,
- RTD_CLOCK_BASE, I8254_IO8, 2);
- if (!dev->pacer)
+ dev->pacer = comedi_8254_mm_alloc(dev->mmio + LAS0_8254_TIMER_BASE,
+ RTD_CLOCK_BASE, I8254_IO8, 2);
+ if (IS_ERR(dev->pacer))
return -ENOMEM;
comedi_8254_subdevice_init(s, dev->pacer);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 11b3e34b7696..bd1e1357cef8 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -180,8 +180,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,am62a7", },
{ .compatible = "ti,am62p5", },
+ { .compatible = "qcom,ipq5332", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
+ { .compatible = "qcom,ipq8074", },
+ { .compatible = "qcom,ipq9574", },
{ .compatible = "qcom,apq8064", },
{ .compatible = "qcom,msm8974", },
{ .compatible = "qcom,msm8960", },
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 15367ac08b2b..6355a39418c5 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -38,6 +38,11 @@ enum ipq806x_versions {
#define IPQ6000_VERSION BIT(2)
+enum ipq8074_versions {
+ IPQ8074_HAWKEYE_VERSION = 0,
+ IPQ8074_ACORN_VERSION,
+};
+
struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
@@ -178,6 +183,16 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
switch (msm_id) {
case QCOM_ID_MSM8996:
case QCOM_ID_APQ8096:
+ case QCOM_ID_IPQ5332:
+ case QCOM_ID_IPQ5322:
+ case QCOM_ID_IPQ5312:
+ case QCOM_ID_IPQ5302:
+ case QCOM_ID_IPQ5300:
+ case QCOM_ID_IPQ9514:
+ case QCOM_ID_IPQ9550:
+ case QCOM_ID_IPQ9554:
+ case QCOM_ID_IPQ9570:
+ case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
case QCOM_ID_MSM8996SG:
@@ -338,6 +353,44 @@ static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev,
return 0;
}
+static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u32 msm_id;
+ int ret;
+ *pvs_name = NULL;
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret)
+ return ret;
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ8070A:
+ case QCOM_ID_IPQ8071A:
+ case QCOM_ID_IPQ8172:
+ case QCOM_ID_IPQ8173:
+ case QCOM_ID_IPQ8174:
+ drv->versions = BIT(IPQ8074_ACORN_VERSION);
+ break;
+ case QCOM_ID_IPQ8072A:
+ case QCOM_ID_IPQ8074A:
+ case QCOM_ID_IPQ8076A:
+ case QCOM_ID_IPQ8078A:
+ drv->versions = BIT(IPQ8074_HAWKEYE_VERSION);
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ8074 family, limiting to 1.4GHz!\n",
+ msm_id);
+ drv->versions = BIT(IPQ8074_ACORN_VERSION);
+ break;
+ }
+
+ return 0;
+}
+
static const char *generic_genpd_names[] = { "perf", NULL };
static const struct qcom_cpufreq_match_data match_data_kryo = {
@@ -367,6 +420,10 @@ static const struct qcom_cpufreq_match_data match_data_ipq8064 = {
.get_version = qcom_cpufreq_ipq8064_name_version,
};
+static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
+ .get_version = qcom_cpufreq_ipq8074_name_version,
+};
+
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@@ -494,9 +551,12 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
{ .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+ { .compatible = "qcom,ipq5332", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
+ { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
+ { .compatible = "qcom,ipq9574", .data = &match_data_kryo },
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
{},
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40d055560e52..2034eb4ce83f 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -289,6 +289,9 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
}
}
}
+
+ cxlrd->qos_class = cfmws->qtg_id;
+
rc = cxl_decoder_add(cxld, target_map);
err_xormap:
if (rc)
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 45e7e044cf4a..86d7ba23235e 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -73,8 +73,10 @@ struct cxl_rcrb_info;
resource_size_t __rcrb_to_component(struct device *dev,
struct cxl_rcrb_info *ri,
enum cxl_rcrb which);
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
extern struct rw_semaphore cxl_dpa_rwsem;
+extern struct rw_semaphore cxl_region_rwsem;
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 4449b34a80cc..1cc9be85ba4c 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -81,26 +81,6 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(14, 12);
}
-static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
- struct cxl_component_regs *regs)
-{
- struct cxl_register_map map = {
- .dev = &port->dev,
- .resource = port->component_reg_phys,
- .base = crb,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
- };
-
- cxl_probe_component_regs(&port->dev, crb, &map.component_map);
- if (!map.component_map.hdm_decoder.valid) {
- dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
- /* unique error code to indicate no HDM decoder capability */
- return -ENODEV;
- }
-
- return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
-}
-
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
{
struct cxl_hdm *cxlhdm;
@@ -153,9 +133,9 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
- void __iomem *crb;
int rc;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
@@ -164,19 +144,29 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
cxlhdm->port = port;
dev_set_drvdata(dev, cxlhdm);
- crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb && info && info->mem_enabled) {
+ /* Memory devices can configure device HDM using DVSEC range regs. */
+ if (reg_map->resource == CXL_RESOURCE_NONE) {
+ if (!info || !info->mem_enabled) {
+ dev_err(dev, "No component registers mapped\n");
+ return ERR_PTR(-ENXIO);
+ }
+
cxlhdm->decoder_count = info->ranges;
return cxlhdm;
- } else if (!crb) {
- dev_err(dev, "No component registers mapped\n");
- return ERR_PTR(-ENXIO);
}
- rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
- iounmap(crb);
- if (rc)
+ if (!reg_map->component_map.hdm_decoder.valid) {
+ dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
+ /* unique error code to indicate no HDM decoder capability */
+ return ERR_PTR(-ENODEV);
+ }
+
+ rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
+ BIT(CXL_CM_CAP_CAP_ID_HDM));
+ if (rc) {
+ dev_err(dev, "Failed to map HDM capability.\n");
return ERR_PTR(rc);
+ }
parse_hdm_decoder_caps(cxlhdm);
if (cxlhdm->decoder_count == 0) {
@@ -575,17 +565,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
CXL_HDM_DECODER0_CTRL_HOSTONLY);
}
-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
{
struct cxl_dport **t = &cxlsd->target[0];
int ways = cxlsd->cxld.interleave_ways;
- if (dev_WARN_ONCE(&cxlsd->cxld.dev,
- ways > 8 || ways > cxlsd->nr_targets,
- "ways: %d overflows targets: %d\n", ways,
- cxlsd->nr_targets))
- return -ENXIO;
-
*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
if (ways > 1)
*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
@@ -601,8 +585,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
if (ways > 7)
*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
-
- return 0;
}
/*
@@ -643,13 +625,33 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
if (cxld->flags & CXL_DECODER_F_ENABLE)
return 0;
- if (port->commit_end + 1 != id) {
+ if (cxl_num_decoders_committed(port) != id) {
dev_dbg(&port->dev,
"%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
return -EBUSY;
}
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
@@ -670,13 +672,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
u64 targets;
- rc = cxlsd_set_targets(cxlsd, &targets);
- if (rc) {
- dev_dbg(&port->dev, "%s: target configuration error\n",
- dev_name(&cxld->dev));
- goto err;
- }
-
+ cxlsd_set_targets(cxlsd, &targets);
writel(upper_32_bits(targets), tl_hi);
writel(lower_32_bits(targets), tl_lo);
} else {
@@ -694,7 +690,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
-err:
if (rc) {
dev_dbg(&port->dev, "%s: error %d committing decoder\n",
dev_name(&cxld->dev), rc);
@@ -844,7 +839,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
cxld->target_type = CXL_DECODER_DEVMEM;
- if (cxld->id != port->commit_end + 1) {
+ if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
port->id, cxld->id);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 4df4f614f490..36270dcfb42e 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-/**
- * cxl_mem_sanitize() - Send a sanitization command to the device.
- * @mds: The device data for the operation
- * @cmd: The specific sanitization command opcode
- *
- * Return: 0 if the command was executed successfully, regardless of
- * whether or not the actual security operation is done in the background,
- * such as for the Sanitize case.
- * Error return values can be the result of the mailbox command, -EINVAL
- * when security requirements are not met or invalid contexts.
- *
- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
- */
-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
{
int rc;
u32 sec_out = 0;
@@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
+
+
+/**
+ * cxl_mem_sanitize() - Send a sanitization command to the device.
+ * @cxlmd: The device for the operation
+ * @cmd: The specific sanitization command opcode
+ *
+ * Return: 0 if the command was executed successfully, regardless of
+ * whether or not the actual security operation is done in the background,
+ * such as for the Sanitize case.
+ * Error return values can be the result of the mailbox command, -EINVAL
+ * when security requirements are not met or invalid contexts, or -EBUSY
+ * if the sanitize operation is already in flight.
+ *
+ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+ */
+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_port *endpoint;
+ int rc;
+
+ /* synchronize with cxl_mem_probe() and decoder write operations */
+ device_lock(&cxlmd->dev);
+ endpoint = cxlmd->endpoint;
+ down_read(&cxl_region_rwsem);
+ /*
+ * Require an endpoint to be safe otherwise the driver can not
+ * be sure that the device is unmapped.
+ */
+ if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
+ rc = __cxl_mem_sanitize(mds, cmd);
+ else
+ rc = -EBUSY;
+ up_read(&cxl_region_rwsem);
+ device_unlock(&cxlmd->dev);
+
+ return rc;
+}
static int add_dpa_res(struct device *dev, struct resource *parent,
struct resource *res, resource_size_t start,
@@ -1224,8 +1249,7 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
return 0;
}
- cxlds->dpa_res =
- (struct resource)DEFINE_RES_MEM(0, mds->total_bytes);
+ cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
if (mds->partition_align_bytes == 0) {
rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
@@ -1377,6 +1401,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mutex_init(&mds->mbox_mutex);
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
+ mds->cxlds.reg_map.host = dev;
+ mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
return mds;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 14b547c07f54..fc5c2b414793 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
- u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
- u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
unsigned long state = mds->security.state;
+ int rc = 0;
- if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
- return sysfs_emit(buf, "sanitize\n");
+ /* sync with latest submission state */
+ mutex_lock(&mds->mbox_mutex);
+ if (mds->security.sanitize_active)
+ rc = sysfs_emit(buf, "sanitize\n");
+ mutex_unlock(&mds->mbox_mutex);
+ if (rc)
+ return rc;
if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
return sysfs_emit(buf, "disabled\n");
@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
const char *buf, size_t len)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct cxl_port *port = cxlmd->endpoint;
bool sanitize;
ssize_t rc;
if (kstrtobool(buf, &sanitize) || !sanitize)
return -EINVAL;
- if (!port || !is_cxl_endpoint(port))
- return -EINVAL;
-
- /* ensure no regions are mapped to this memdev */
- if (port->commit_end != -1)
- return -EBUSY;
-
- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
+ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
static struct device_attribute dev_attr_security_sanitize =
__ATTR(sanitize, 0200, NULL, security_sanitize_store);
@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
const char *buf, size_t len)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct cxl_port *port = cxlmd->endpoint;
ssize_t rc;
bool erase;
if (kstrtobool(buf, &erase) || !erase)
return -EINVAL;
- if (!port || !is_cxl_endpoint(port))
- return -EINVAL;
-
- /* ensure no regions are mapped to this memdev */
- if (port->commit_end != -1)
- return -EBUSY;
-
- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
+ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
static struct device_attribute dev_attr_security_erase =
__ATTR(erase, 0200, NULL, security_erase_store);
@@ -242,7 +231,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (rc)
return rc;
- if (port->commit_end == -1) {
+ if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
rc = cxl_get_poison_by_memdev(cxlmd);
} else {
@@ -293,7 +282,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
.dpa = dpa,
};
port = cxlmd->endpoint;
- if (port && is_cxl_endpoint(port) && port->commit_end != -1)
+ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
@@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
}
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
-static void cxl_memdev_security_shutdown(struct device *dev)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-
- if (mds->security.poll)
- cancel_delayed_work_sync(&mds->security.poll_dwork);
-}
-
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
- cxl_memdev_security_shutdown(dev);
cxlmd->cxlds = NULL;
up_write(&cxl_memdev_rwsem);
}
@@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
struct cxl_memdev *cxlmd = _cxlmd;
struct device *dev = &cxlmd->dev;
- cxl_memdev_shutdown(dev);
cdev_device_del(&cxlmd->cdev, dev);
+ cxl_memdev_shutdown(dev);
put_device(dev);
}
@@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
.cleanup = cxl_fw_cleanup,
};
-static void devm_cxl_remove_fw_upload(void *fwl)
+static void cxl_remove_fw_upload(void *fwl)
{
firmware_upload_unregister(fwl);
}
-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = &cxlds->cxlmd->dev;
struct fw_upload *fwl;
- int rc;
if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
return 0;
@@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
&cxl_memdev_fw_ops, mds);
if (IS_ERR(fwl))
- return dev_err_probe(dev, PTR_ERR(fwl),
- "Failed to register firmware loader\n");
-
- rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
- fwl);
- if (rc)
- dev_err(dev,
- "Failed to add firmware loader remove action: %d\n",
- rc);
-
- return rc;
+ return PTR_ERR(fwl);
+ return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
}
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
@@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = {
.llseek = noop_llseek,
};
-static void put_sanitize(void *data)
-{
- struct cxl_memdev_state *mds = data;
-
- sysfs_put(mds->security.sanitize_node);
-}
-
-static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
-{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct device *dev = &cxlmd->dev;
- struct kernfs_node *sec;
-
- sec = sysfs_get_dirent(dev->kobj.sd, "security");
- if (!sec) {
- dev_err(dev, "sysfs_get_dirent 'security' failed\n");
- return -ENODEV;
- }
- mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
- sysfs_put(sec);
- if (!mds->security.sanitize_node) {
- dev_err(dev, "sysfs_get_dirent 'state' failed\n");
- return -ENODEV;
- }
-
- return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
- }
-
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
+ struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd;
struct device *dev;
@@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
if (rc)
goto err;
- rc = cxl_memdev_security_init(cxlmd);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
@@ -1079,6 +1016,50 @@ err:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+static void sanitize_teardown_notifier(void *data)
+{
+ struct cxl_memdev_state *mds = data;
+ struct kernfs_node *state;
+
+ /*
+ * Prevent new irq triggered invocations of the workqueue and
+ * flush inflight invocations.
+ */
+ mutex_lock(&mds->mbox_mutex);
+ state = mds->security.sanitize_node;
+ mds->security.sanitize_node = NULL;
+ mutex_unlock(&mds->mbox_mutex);
+
+ cancel_delayed_work_sync(&mds->security.poll_dwork);
+ sysfs_put(state);
+}
+
+int devm_cxl_sanitize_setup_notifier(struct device *host,
+ struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct kernfs_node *sec;
+
+ if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
+ return 0;
+
+ /*
+ * Note, the expectation is that @cxlmd would have failed to be
+ * created if these sysfs_get_dirent calls fail.
+ */
+ sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
+ if (!sec)
+ return -ENOENT;
+ mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+ sysfs_put(sec);
+ if (!mds->security.sanitize_node)
+ return -ENOENT;
+
+ return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
+
__init int cxl_memdev_init(void)
{
dev_t devt;
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c7a7887ebdcf..eff20e83d0a6 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/aer.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -595,6 +596,16 @@ static int cxl_cdat_read_table(struct device *dev,
return 0;
}
+static unsigned char cdat_checksum(void *buf, size_t size)
+{
+ unsigned char sum, *data = buf;
+ size_t i;
+
+ for (sum = 0, i = 0; i < size; i++)
+ sum += data[i];
+ return sum;
+}
+
/**
* read_cdat_data - Read the CDAT data on this port
* @port: Port to read data from
@@ -603,18 +614,30 @@ static int cxl_cdat_read_table(struct device *dev,
*/
void read_cdat_data(struct cxl_port *port)
{
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct device *host = cxlmd->dev.parent;
+ struct device *uport = port->uport_dev;
struct device *dev = &port->dev;
struct pci_doe_mb *cdat_doe;
+ struct pci_dev *pdev = NULL;
+ struct cxl_memdev *cxlmd;
size_t cdat_length;
void *cdat_table;
int rc;
- if (!dev_is_pci(host))
+ if (is_cxl_memdev(uport)) {
+ struct device *host;
+
+ cxlmd = to_cxl_memdev(uport);
+ host = cxlmd->dev.parent;
+ if (dev_is_pci(host))
+ pdev = to_pci_dev(host);
+ } else if (dev_is_pci(uport)) {
+ pdev = to_pci_dev(uport);
+ }
+
+ if (!pdev)
return;
- cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
- PCI_DVSEC_VENDOR_ID_CXL,
+
+ cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!cdat_doe) {
dev_dbg(dev, "No CDAT mailbox\n");
@@ -634,44 +657,54 @@ void read_cdat_data(struct cxl_port *port)
return;
rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
- if (rc) {
- /* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_table);
- dev_err(dev, "CDAT data read error\n");
- return;
- }
+ if (rc)
+ goto err;
+
+ cdat_table = cdat_table + sizeof(__le32);
+ if (cdat_checksum(cdat_table, cdat_length))
+ goto err;
- port->cdat.table = cdat_table + sizeof(__le32);
+ port->cdat.table = cdat_table;
port->cdat.length = cdat_length;
+ return;
+
+err:
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, cdat_table);
+ dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
-void cxl_cor_error_detected(struct pci_dev *pdev)
+static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 status;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return;
- addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
+static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
+}
/* CXL spec rev3.0 8.2.4.16.1 */
-static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
+static void header_log_copy(void __iomem *ras_base, u32 *log)
{
void __iomem *addr;
u32 *log_addr;
int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
- addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
+ addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
log_addr = log;
for (i = 0; i < log_u32_size; i++) {
@@ -685,17 +718,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
* Log the state of the RAS status registers and prepare them to log the
* next error status. Return 1 if reset needed.
*/
-static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
+static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
u32 hl[CXL_HEADERLOG_SIZE_U32];
void __iomem *addr;
u32 status;
u32 fe;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return false;
- addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
return false;
@@ -703,7 +737,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
void __iomem *rcc_addr =
- cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+ ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
readl(rcc_addr)));
@@ -711,13 +745,201 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
fe = status;
}
- header_log_copy(cxlds, hl);
+ header_log_copy(ras_base, hl);
trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
return true;
}
+static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_ras(cxlds, cxlds->regs.ras);
+}
+
+#ifdef CONFIG_PCIEAER_CXL
+
+static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
+{
+ struct cxl_rcrb_info *ri = &dport->rcrb;
+ void __iomem *dport_aer = NULL;
+ resource_size_t aer_phys;
+ struct device *host;
+
+ if (dport->rch && ri->aer_cap) {
+ host = dport->reg_map.host;
+ aer_phys = ri->aer_cap + ri->base;
+ dport_aer = devm_cxl_iomap_block(host, aer_phys,
+ sizeof(struct aer_capability_regs));
+ }
+
+ dport->regs.dport_aer = dport_aer;
+}
+
+static void cxl_dport_map_regs(struct cxl_dport *dport)
+{
+ struct cxl_register_map *map = &dport->reg_map;
+ struct device *dev = dport->dport_dev;
+
+ if (!map->component_map.ras.valid)
+ dev_dbg(dev, "RAS registers not found\n");
+ else if (cxl_map_component_regs(map, &dport->regs.component,
+ BIT(CXL_CM_CAP_CAP_ID_RAS)))
+ dev_dbg(dev, "Failed to map RAS capability.\n");
+
+ if (dport->rch)
+ cxl_dport_map_rch_aer(dport);
+}
+
+static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
+{
+ void __iomem *aer_base = dport->regs.dport_aer;
+ struct pci_host_bridge *bridge;
+ u32 aer_cmd_mask, aer_cmd;
+
+ if (!aer_base)
+ return;
+
+ bridge = to_pci_host_bridge(dport->dport_dev);
+
+ /*
+ * Disable RCH root port command interrupts.
+ * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
+ *
+ * This sequence may not be necessary. CXL spec states disabling
+ * the root cmd register's interrupts is required. But, PCI spec
+ * shows these are disabled by default on reset.
+ */
+ if (bridge->native_aer) {
+ aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+ aer_cmd &= ~aer_cmd_mask;
+ writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
+ }
+}
+
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+{
+ struct device *dport_dev = dport->dport_dev;
+ struct pci_host_bridge *host_bridge;
+
+ host_bridge = to_pci_host_bridge(dport_dev);
+ if (host_bridge->native_aer)
+ dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
+
+ dport->reg_map.host = host;
+ cxl_dport_map_regs(dport);
+
+ if (dport->rch)
+ cxl_disable_rch_root_ints(dport);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+
+static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
+}
+
+static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_ras(cxlds, dport->regs.ras);
+}
+
+/*
+ * Copy the AER capability registers using 32 bit read accesses.
+ * This is necessary because RCRB AER capability is MMIO mapped. Clear the
+ * status after copying.
+ *
+ * @aer_base: base address of AER capability block in RCRB
+ * @aer_regs: destination for copying AER capability
+ */
+static bool cxl_rch_get_aer_info(void __iomem *aer_base,
+ struct aer_capability_regs *aer_regs)
+{
+ int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
+ u32 *aer_regs_buf = (u32 *)aer_regs;
+ int n;
+
+ if (!aer_base)
+ return false;
+
+ /* Use readl() to guarantee 32-bit accesses */
+ for (n = 0; n < read_cnt; n++)
+ aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
+
+ writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
+ writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
+
+ return true;
+}
+
+/* Get AER severity. Return false if there is no error. */
+static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
+ int *severity)
+{
+ if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
+ if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
+ *severity = AER_FATAL;
+ else
+ *severity = AER_NONFATAL;
+ return true;
+ }
+
+ if (aer_regs->cor_status & ~aer_regs->cor_mask) {
+ *severity = AER_CORRECTABLE;
+ return true;
+ }
+
+ return false;
+}
+
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct aer_capability_regs aer_regs;
+ struct cxl_dport *dport;
+ struct cxl_port *port;
+ int severity;
+
+ port = cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return;
+
+ put_device(&port->dev);
+
+ if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
+ return;
+
+ if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
+ return;
+
+ pci_print_aer(pdev, severity, &aer_regs);
+
+ if (severity == AER_CORRECTABLE)
+ cxl_handle_rdport_cor_ras(cxlds, dport);
+ else
+ cxl_handle_rdport_ras(cxlds, dport);
+}
+
+#else
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
+#endif
+
+void cxl_cor_error_detected(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+
+ cxl_handle_endpoint_cor_ras(cxlds);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -726,13 +948,16 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+
/*
* A frozen channel indicates an impending reset which is fatal to
* CXL.mem operation, and will likely crash the system. On the off
* chance the situation is recoverable dump the status of the RAS
* capability registers and bounce the active state of the memdev.
*/
- ue = cxl_report_and_clear(cxlds);
+ ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 7ca01a834e18..38441634e4c6 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -28,9 +28,22 @@
* instantiated by the core.
*/
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+DECLARE_RWSEM(cxl_region_rwsem);
+
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+int cxl_num_decoders_committed(struct cxl_port *port)
+{
+ lockdep_assert_held(&cxl_region_rwsem);
+
+ return port->commit_end + 1;
+}
+
static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -278,6 +291,15 @@ static ssize_t interleave_ways_show(struct device *dev,
static DEVICE_ATTR_RO(interleave_ways);
+static ssize_t qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxlrd->qos_class);
+}
+static DEVICE_ATTR_RO(qos_class);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
@@ -297,6 +319,7 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ &dev_attr_qos_class.attr,
SET_CXL_REGION_ATTR(create_pmem_region)
SET_CXL_REGION_ATTR(create_ram_region)
SET_CXL_REGION_ATTR(delete_region)
@@ -521,8 +544,33 @@ static void cxl_port_release(struct device *dev)
kfree(port);
}
+static ssize_t decoders_committed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_port *port = to_cxl_port(dev);
+ int rc;
+
+ down_read(&cxl_region_rwsem);
+ rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static DEVICE_ATTR_RO(decoders_committed);
+
+static struct attribute *cxl_port_attrs[] = {
+ &dev_attr_decoders_committed.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_port_attribute_group = {
+ .attrs = cxl_port_attrs,
+};
+
static const struct attribute_group *cxl_port_attribute_groups[] = {
&cxl_base_attribute_group,
+ &cxl_port_attribute_group,
NULL,
};
@@ -619,7 +667,6 @@ static int devm_cxl_link_parent_dport(struct device *host,
static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
- resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
struct cxl_port *port;
@@ -670,7 +717,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
} else
dev->parent = uport_dev;
- port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
port->hdm_end = -1;
port->commit_end = -1;
@@ -691,19 +737,21 @@ err:
return ERR_PTR(rc);
}
-static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
+static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
resource_size_t component_reg_phys)
{
- if (component_reg_phys == CXL_RESOURCE_NONE)
- return 0;
-
*map = (struct cxl_register_map) {
- .dev = dev,
- .reg_type = CXL_REGLOC_RBI_COMPONENT,
+ .host = host,
+ .reg_type = CXL_REGLOC_RBI_EMPTY,
.resource = component_reg_phys,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
};
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return 0;
+
+ map->reg_type = CXL_REGLOC_RBI_COMPONENT;
+ map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
+
return cxl_setup_regs(map);
}
@@ -712,17 +760,27 @@ static int cxl_port_setup_regs(struct cxl_port *port,
{
if (dev_is_platform(port->uport_dev))
return 0;
- return cxl_setup_comp_regs(&port->dev, &port->comp_map,
+ return cxl_setup_comp_regs(&port->dev, &port->reg_map,
component_reg_phys);
}
-static int cxl_dport_setup_regs(struct cxl_dport *dport,
+static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
resource_size_t component_reg_phys)
{
+ int rc;
+
if (dev_is_platform(dport->dport_dev))
return 0;
- return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
- component_reg_phys);
+
+ /*
+ * use @dport->dport_dev for the context for error messages during
+ * register probing, and fixup @host after the fact, since @host may be
+ * NULL.
+ */
+ rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map,
+ component_reg_phys);
+ dport->reg_map.host = host;
+ return rc;
}
static struct cxl_port *__devm_cxl_add_port(struct device *host,
@@ -734,21 +792,36 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
struct device *dev;
int rc;
- port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
+ port = cxl_port_alloc(uport_dev, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
- if (is_cxl_memdev(uport_dev))
+ if (is_cxl_memdev(uport_dev)) {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_dport)
+ if (rc)
+ goto err;
+
+ /*
+ * The endpoint driver already enumerated the component and RAS
+ * registers. Reuse that enumeration while prepping them to be
+ * mapped by the cxl_port driver.
+ */
+ port->reg_map = cxlds->reg_map;
+ port->reg_map.host = &port->dev;
+ } else if (parent_dport) {
rc = dev_set_name(dev, "port%d", port->id);
- else
- rc = dev_set_name(dev, "root%d", port->id);
- if (rc)
- goto err;
+ if (rc)
+ goto err;
- rc = cxl_port_setup_regs(port, component_reg_phys);
+ rc = cxl_port_setup_regs(port, component_reg_phys);
+ if (rc)
+ goto err;
+ } else
+ rc = dev_set_name(dev, "root%d", port->id);
if (rc)
goto err;
@@ -983,7 +1056,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (!dport)
return ERR_PTR(-ENOMEM);
- if (rcrb != CXL_RESOURCE_NONE) {
+ dport->dport_dev = dport_dev;
+ dport->port_id = port_id;
+ dport->port = port;
+
+ if (rcrb == CXL_RESOURCE_NONE) {
+ rc = cxl_dport_setup_regs(&port->dev, dport,
+ component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+ } else {
dport->rcrb.base = rcrb;
component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
CXL_RCRB_DOWNSTREAM);
@@ -992,6 +1074,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
return ERR_PTR(-ENXIO);
}
+ /*
+ * RCH @dport is not ready to map until associated with its
+ * memdev
+ */
+ rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+
dport->rch = true;
}
@@ -999,14 +1089,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
&component_reg_phys);
- dport->dport_dev = dport_dev;
- dport->port_id = port_id;
- dport->port = port;
-
- rc = cxl_dport_setup_regs(dport, component_reg_phys);
- if (rc)
- return ERR_PTR(rc);
-
cond_cxl_root_lock(port);
rc = add_dport(port, dport);
cond_cxl_root_unlock(port);
@@ -1217,35 +1299,39 @@ static struct device *grandparent(struct device *dev)
return NULL;
}
+static struct device *endpoint_host(struct cxl_port *endpoint)
+{
+ struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
+
+ if (is_cxl_root(port))
+ return port->uport_dev;
+ return &port->dev;
+}
+
static void delete_endpoint(void *data)
{
struct cxl_memdev *cxlmd = data;
struct cxl_port *endpoint = cxlmd->endpoint;
- struct cxl_port *parent_port;
- struct device *parent;
+ struct device *host = endpoint_host(endpoint);
- parent_port = cxl_mem_find_port(cxlmd, NULL);
- if (!parent_port)
- goto out;
- parent = &parent_port->dev;
-
- device_lock(parent);
- if (parent->driver && !endpoint->dead) {
- devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
- devm_release_action(parent, cxl_unlink_uport, endpoint);
- devm_release_action(parent, unregister_port, endpoint);
+ device_lock(host);
+ if (host->driver && !endpoint->dead) {
+ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
+ devm_release_action(host, cxl_unlink_uport, endpoint);
+ devm_release_action(host, unregister_port, endpoint);
}
cxlmd->endpoint = NULL;
- device_unlock(parent);
- put_device(parent);
-out:
+ device_unlock(host);
put_device(&endpoint->dev);
+ put_device(host);
}
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
{
+ struct device *host = endpoint_host(endpoint);
struct device *dev = &cxlmd->dev;
+ get_device(host);
get_device(&endpoint->dev);
cxlmd->endpoint = endpoint;
cxlmd->depth = endpoint->depth;
@@ -1468,7 +1554,11 @@ retry:
struct cxl_dport *dport;
struct cxl_port *port;
- if (!dport_dev)
+ /*
+ * The terminal "grandparent" in PCI is NULL and @platform_bus
+ * for platform devices
+ */
+ if (!dport_dev || dport_dev == &platform_bus)
return 0;
uport_dev = dport_dev->parent;
@@ -1691,6 +1781,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
}
atomic_set(&cxlrd->region_id, rc);
+ cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -2062,3 +2153,4 @@ static void cxl_core_exit(void)
subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 6d63b8798c29..56e575c79bb4 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -28,12 +28,6 @@
* 3. Decoder targets
*/
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-static DECLARE_RWSEM(cxl_region_rwsem);
-
static struct cxl_region *to_cxl_region(struct device *dev);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
@@ -129,7 +123,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
{
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn_once(
+ dev_info_once(
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
- return rc;
+ goto out;
if (commit) {
rc = cxl_region_decode_commit(cxlr);
@@ -1133,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
}
if (is_cxl_root(parent_port)) {
- parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+ /*
+ * Root decoder IG is always set to value in CFMWS which
+ * may be different than this region's IG. We can use the
+ * region's IG here since interleave_granularity_store()
+ * does not allow interleaved host-bridges with
+ * root IG != region IG.
+ */
+ parent_ig = p->interleave_granularity;
parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
/*
* For purposes of address bit routing, use power-of-2 math for
@@ -1195,6 +1196,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return rc;
}
+ if (iw > 8 || iw > cxlsd->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s:%s: ways: %d overflows targets: %d\n",
+ dev_name(port->uport_dev), dev_name(&port->dev),
+ dev_name(&cxld->dev), iw, cxlsd->nr_targets);
+ return -ENXIO;
+ }
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
cxld->interleave_granularity != ig ||
@@ -1480,6 +1489,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
return 0;
}
+static int cmp_interleave_pos(const void *a, const void *b)
+{
+ struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+ struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+
+ return cxled_a->pos - cxled_b->pos;
+}
+
static struct cxl_port *next_port(struct cxl_port *port)
{
if (!port->parent_dport)
@@ -1487,119 +1504,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
return port->parent_dport->port;
}
-static int decoder_match_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev, void *data)
{
- struct cxl_endpoint_decoder *cxled = data;
struct cxl_switch_decoder *cxlsd;
+ struct range *r1, *r2 = data;
if (!is_switch_decoder(dev))
return 0;
cxlsd = to_cxl_switch_decoder(dev);
- return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
-}
-
-static void find_positions(const struct cxl_switch_decoder *cxlsd,
- const struct cxl_port *iter_a,
- const struct cxl_port *iter_b, int *a_pos,
- int *b_pos)
-{
- int i;
+ r1 = &cxlsd->cxld.hpa_range;
- for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
- if (cxlsd->target[i] == iter_a->parent_dport)
- *a_pos = i;
- else if (cxlsd->target[i] == iter_b->parent_dport)
- *b_pos = i;
- if (*a_pos >= 0 && *b_pos >= 0)
- break;
- }
+ if (is_root_decoder(dev))
+ return range_contains(r1, r2);
+ return (r1->start == r2->start && r1->end == r2->end);
}
-static int cmp_decode_pos(const void *a, const void *b)
+static int find_pos_and_ways(struct cxl_port *port, struct range *range,
+ int *pos, int *ways)
{
- struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
- struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
- struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
- struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
- struct cxl_port *port_a = cxled_to_port(cxled_a);
- struct cxl_port *port_b = cxled_to_port(cxled_b);
- struct cxl_port *iter_a, *iter_b, *port = NULL;
struct cxl_switch_decoder *cxlsd;
+ struct cxl_port *parent;
struct device *dev;
- int a_pos, b_pos;
- unsigned int seq;
-
- /* Exit early if any prior sorting failed */
- if (cxled_a->pos < 0 || cxled_b->pos < 0)
- return 0;
+ int rc = -ENXIO;
- /*
- * Walk up the hierarchy to find a shared port, find the decoder that
- * maps the range, compare the relative position of those dport
- * mappings.
- */
- for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
- struct cxl_port *next_a, *next_b;
+ parent = next_port(port);
+ if (!parent)
+ return rc;
- next_a = next_port(iter_a);
- if (!next_a)
- break;
+ dev = device_find_child(&parent->dev, range,
+ match_switch_decoder_by_range);
+ if (!dev) {
+ dev_err(port->uport_dev,
+ "failed to find decoder mapping %#llx-%#llx\n",
+ range->start, range->end);
+ return rc;
+ }
+ cxlsd = to_cxl_switch_decoder(dev);
+ *ways = cxlsd->cxld.interleave_ways;
- for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
- next_b = next_port(iter_b);
- if (next_a != next_b)
- continue;
- port = next_a;
+ for (int i = 0; i < *ways; i++) {
+ if (cxlsd->target[i] == port->parent_dport) {
+ *pos = i;
+ rc = 0;
break;
}
-
- if (port)
- break;
}
+ put_device(dev);
- if (!port) {
- dev_err(cxlmd_a->dev.parent,
- "failed to find shared port with %s\n",
- dev_name(cxlmd_b->dev.parent));
- goto err;
- }
+ return rc;
+}
- dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
- if (!dev) {
- struct range *range = &cxled_a->cxld.hpa_range;
+/**
+ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
+ * @cxled: endpoint decoder member of given region
+ *
+ * The endpoint position is calculated by traversing the topology from
+ * the endpoint to the root decoder and iteratively applying this
+ * calculation:
+ *
+ * position = position * parent_ways + parent_pos;
+ *
+ * ...where @position is inferred from switch and root decoder target lists.
+ *
+ * Return: position >= 0 on success
+ * -ENXIO on failure
+ */
+static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *iter, *port = cxled_to_port(cxled);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct range *range = &cxled->cxld.hpa_range;
+ int parent_ways = 0, parent_pos = 0, pos = 0;
+ int rc;
- dev_err(port->uport_dev,
- "failed to find decoder that maps %#llx-%#llx\n",
- range->start, range->end);
- goto err;
- }
+ /*
+ * Example: the expected interleave order of the 4-way region shown
+ * below is: mem0, mem2, mem1, mem3
+ *
+ * root_port
+ * / \
+ * host_bridge_0 host_bridge_1
+ * | | | |
+ * mem0 mem1 mem2 mem3
+ *
+ * In the example the calculator will iterate twice. The first iteration
+ * uses the mem position in the host-bridge and the ways of the host-
+ * bridge to generate the first, or local, position. The second
+ * iteration uses the host-bridge position in the root_port and the ways
+ * of the root_port to refine the position.
+ *
+ * A trace of the calculation per endpoint looks like this:
+ * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
+ * pos = 0 * 2 + 0 pos = 0 * 2 + 1
+ * pos: 0 pos: 1
+ *
+ * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
+ * pos = 1 * 2 + 0 pos = 1 * 2 + 1
+ * pos: 2 pos = 3
+ *
+ * Note that while this example is simple, the method applies to more
+ * complex topologies, including those with switches.
+ */
- cxlsd = to_cxl_switch_decoder(dev);
- do {
- seq = read_seqbegin(&cxlsd->target_lock);
- find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
- } while (read_seqretry(&cxlsd->target_lock, seq));
+ /* Iterate from endpoint to root_port refining the position */
+ for (iter = port; iter; iter = next_port(iter)) {
+ if (is_cxl_root(iter))
+ break;
- put_device(dev);
+ rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
+ if (rc)
+ return rc;
- if (a_pos < 0 || b_pos < 0) {
- dev_err(port->uport_dev,
- "failed to find shared decoder for %s and %s\n",
- dev_name(cxlmd_a->dev.parent),
- dev_name(cxlmd_b->dev.parent));
- goto err;
+ pos = pos * parent_ways + parent_pos;
}
- dev_dbg(port->uport_dev, "%s comes %s %s\n",
- dev_name(cxlmd_a->dev.parent),
- a_pos - b_pos < 0 ? "before" : "after",
- dev_name(cxlmd_b->dev.parent));
+ dev_dbg(&cxlmd->dev,
+ "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
+ dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
+ dev_name(&port->dev), range->start, range->end, pos);
- return a_pos - b_pos;
-err:
- cxled_a->pos = -1;
- return 0;
+ return pos;
}
static int cxl_region_sort_targets(struct cxl_region *cxlr)
@@ -1607,22 +1632,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
struct cxl_region_params *p = &cxlr->params;
int i, rc = 0;
- sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
- NULL);
-
for (i = 0; i < p->nr_targets; i++) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
+ cxled->pos = cxl_calc_interleave_pos(cxled);
/*
- * Record that sorting failed, but still continue to restore
- * cxled->pos with its ->targets[] position so that follow-on
- * code paths can reliably do p->targets[cxled->pos] to
- * self-reference their entry.
+ * Record that sorting failed, but still continue to calc
+ * cxled->pos so that follow-on code paths can reliably
+ * do p->targets[cxled->pos] to self-reference their entry.
*/
if (cxled->pos < 0)
rc = -ENXIO;
- cxled->pos = i;
}
+ /* Keep the cxlr target list in interleave position order */
+ sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
+ cmp_interleave_pos, NULL);
dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
return rc;
@@ -1658,6 +1682,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
+ if (p->nr_targets >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
+ p->nr_targets);
+ return -EINVAL;
+ }
+
ep_port = cxled_to_port(cxled);
root_port = cxlrd_to_port(cxlrd);
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
@@ -1750,7 +1780,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->nr_targets == p->interleave_ways) {
rc = cxl_region_setup_targets(cxlr);
if (rc)
- goto err_decrement;
+ return rc;
p->state = CXL_CONFIG_ACTIVE;
}
@@ -1761,13 +1791,27 @@ static int cxl_region_attach(struct cxl_region *cxlr,
.end = p->res->end,
};
- return 0;
+ if (p->nr_targets != p->interleave_ways)
+ return 0;
-err_decrement:
- p->nr_targets--;
- cxled->pos = -1;
- p->targets[pos] = NULL;
- return rc;
+ /*
+ * Test the auto-discovery position calculator function
+ * against this successfully created user-defined region.
+ * A fail message here means that this interleave config
+ * will fail when presented as CXL_REGION_F_AUTO.
+ */
+ for (int i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ int test_pos;
+
+ test_pos = cxl_calc_interleave_pos(cxled);
+ dev_dbg(&cxled->cxld.dev,
+ "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
+ (test_pos == cxled->pos) ? "success" : "fail",
+ test_pos, cxled->pos);
+ }
+
+ return 0;
}
static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
@@ -2696,7 +2740,7 @@ err:
return rc;
}
-static int match_decoder_by_range(struct device *dev, void *data)
+static int match_root_decoder_by_range(struct device *dev, void *data)
{
struct range *r1, *r2 = data;
struct cxl_root_decoder *cxlrd;
@@ -2827,7 +2871,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
int rc;
cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_decoder_by_range);
+ match_root_decoder_by_range);
if (!cxlrd_dev) {
dev_err(cxlmd->dev.parent,
"%s:%s no CXL window for range %#llx:%#llx\n",
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 6281127b3e9d..372786f80955 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
struct cxl_component_regs *regs,
unsigned long map_mask)
{
- struct device *dev = map->dev;
+ struct device *host = map->host;
struct mapinfo {
const struct cxl_reg_map *rmap;
void __iomem **addr;
@@ -216,16 +216,16 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
struct mapinfo *mi = &mapinfo[i];
- resource_size_t phys_addr;
+ resource_size_t addr;
resource_size_t length;
if (!mi->rmap->valid)
continue;
if (!test_bit(mi->rmap->id, &map_mask))
continue;
- phys_addr = map->resource + mi->rmap->offset;
+ addr = map->resource + mi->rmap->offset;
length = mi->rmap->size;
- *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
+ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
if (!*(mi->addr))
return -ENOMEM;
}
@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs)
{
- struct device *dev = map->dev;
+ struct device *host = map->host;
resource_size_t phys_addr = map->resource;
struct mapinfo {
const struct cxl_reg_map *rmap;
@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
addr = phys_addr + mi->rmap->offset;
length = mi->rmap->size;
- *(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
+ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
if (!*(mi->addr))
return -ENOMEM;
}
@@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
int regloc, i;
*map = (struct cxl_register_map) {
- .dev = &pdev->dev,
+ .host = &pdev->dev,
.resource = CXL_RESOURCE_NONE,
};
@@ -386,10 +386,9 @@ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
}
EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
- struct cxl_register_map *map)
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = map->host;
resource_size_t phys_addr;
phys_addr = map->resource;
@@ -403,15 +402,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
static int cxl_map_regblock(struct cxl_register_map *map)
{
- struct device *dev = map->dev;
+ struct device *host = map->host;
map->base = ioremap(map->resource, map->max_size);
if (!map->base) {
- dev_err(dev, "failed to map registers\n");
+ dev_err(host, "failed to map registers\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+ dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
return 0;
}
@@ -425,28 +424,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
{
struct cxl_component_reg_map *comp_map;
struct cxl_device_reg_map *dev_map;
- struct device *dev = map->dev;
+ struct device *host = map->host;
void __iomem *base = map->base;
switch (map->reg_type) {
case CXL_REGLOC_RBI_COMPONENT:
comp_map = &map->component_map;
- cxl_probe_component_regs(dev, base, comp_map);
- dev_dbg(dev, "Set up component registers\n");
+ cxl_probe_component_regs(host, base, comp_map);
+ dev_dbg(host, "Set up component registers\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
dev_map = &map->device_map;
- cxl_probe_device_regs(dev, base, dev_map);
+ cxl_probe_device_regs(host, base, dev_map);
if (!dev_map->status.valid || !dev_map->mbox.valid ||
!dev_map->memdev.valid) {
- dev_err(dev, "registers not found: %s%s%s\n",
+ dev_err(host, "registers not found: %s%s%s\n",
!dev_map->status.valid ? "status " : "",
!dev_map->mbox.valid ? "mbox " : "",
!dev_map->memdev.valid ? "memdev " : "");
return -ENXIO;
}
- dev_dbg(dev, "Probing device registers...\n");
+ dev_dbg(host, "Probing device registers...\n");
break;
default:
break;
@@ -470,6 +469,42 @@ int cxl_setup_regs(struct cxl_register_map *map)
}
EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
+{
+ void __iomem *addr;
+ u16 offset = 0;
+ u32 cap_hdr;
+
+ if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE))
+ return 0;
+
+ if (!request_mem_region(rcrb, SZ_4K, dev_name(dev)))
+ return 0;
+
+ addr = ioremap(rcrb, SZ_4K);
+ if (!addr)
+ goto out;
+
+ cap_hdr = readl(addr + offset);
+ while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) {
+ offset = PCI_EXT_CAP_NEXT(cap_hdr);
+
+ /* Offset 0 terminates capability list. */
+ if (!offset)
+ break;
+ cap_hdr = readl(addr + offset);
+ }
+
+ if (offset)
+ dev_dbg(dev, "found AER extended capability (0x%x)\n", offset);
+
+ iounmap(addr);
+out:
+ release_mem_region(rcrb, SZ_4K);
+
+ return offset;
+}
+
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
enum cxl_rcrb which)
{
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 76d92561af29..687043ece101 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -221,6 +221,14 @@ struct cxl_regs {
struct_group_tagged(cxl_pmu_regs, pmu_regs,
void __iomem *pmu;
);
+
+ /*
+ * RCH downstream port specific RAS register
+ * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB
+ */
+ struct_group_tagged(cxl_rch_regs, rch_regs,
+ void __iomem *dport_aer;
+ );
};
struct cxl_reg_map {
@@ -247,7 +255,7 @@ struct cxl_pmu_reg_map {
/**
* struct cxl_register_map - DVSEC harvested register block mapping parameters
- * @dev: device for devm operations and logging
+ * @host: device for devm operations and logging
* @base: virtual base of the register-block-BAR + @block_offset
* @resource: physical resource base of the register block
* @max_size: maximum mapping size to perform register search
@@ -257,7 +265,7 @@ struct cxl_pmu_reg_map {
* @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
*/
struct cxl_register_map {
- struct device *dev;
+ struct device *host;
void __iomem *base;
resource_size_t resource;
resource_size_t max_size;
@@ -278,8 +286,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
unsigned long map_mask);
int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
- struct cxl_register_map *map);
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
@@ -321,6 +328,7 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_QOS_CLASS_INVALID -1
/**
* struct cxl_decoder - Common CXL HDM Decoder Attributes
@@ -432,6 +440,7 @@ typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd,
* @calc_hb: which host bridge covers the n'th position by granularity
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
+ * @qos_class: QoS performance class cookie
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
@@ -440,6 +449,7 @@ struct cxl_root_decoder {
cxl_calc_hb_fn calc_hb;
void *platform_data;
struct mutex range_lock;
+ int qos_class;
struct cxl_switch_decoder cxlsd;
};
@@ -572,11 +582,10 @@ struct cxl_dax_region {
* @regions: cxl_region_ref instances, regions mapped by this port
* @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
* @nr_dports: number of entries in @dports
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
* @commit_end: cursor to track highest committed decoder for commit ordering
- * @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
* @cdat: Cached CDAT data
@@ -592,11 +601,10 @@ struct cxl_port {
struct xarray regions;
struct cxl_dport *parent_dport;
struct ida decoder_ida;
- struct cxl_register_map comp_map;
+ struct cxl_register_map reg_map;
int nr_dports;
int hdm_end;
int commit_end;
- resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
struct cxl_cdat {
@@ -620,19 +628,21 @@ struct cxl_rcrb_info {
/**
* struct cxl_dport - CXL downstream port
* @dport_dev: PCI bridge or firmware device representing the downstream link
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
* @port_id: unique hardware identifier for dport in decoder target list
* @rcrb: Data about the Root Complex Register Block layout
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
+ * @regs: Dport parsed register blocks
*/
struct cxl_dport {
struct device *dport_dev;
- struct cxl_register_map comp_map;
+ struct cxl_register_map reg_map;
int port_id;
struct cxl_rcrb_info rcrb;
bool rch;
struct cxl_port *port;
+ struct cxl_regs regs;
};
/**
@@ -679,6 +689,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
return port->uport_dev == port->dev.parent;
}
+int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
struct pci_bus;
@@ -706,6 +717,13 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev, int port_id,
resource_size_t rcrb);
+#ifdef CONFIG_PCIEAER_CXL
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
+#else
+static inline void cxl_setup_parent_dport(struct device *host,
+ struct cxl_dport *dport) { }
+#endif
+
struct cxl_decoder *to_cxl_decoder(struct device *dev);
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 706f8a6d1ef4..a2fcbca253f3 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
return is_cxl_memdev(port->uport_dev);
}
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
+ struct cxl_dev_state *cxlds);
+int devm_cxl_sanitize_setup_notifier(struct device *host,
+ struct cxl_memdev *cxlmd);
struct cxl_memdev_state;
-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
@@ -360,16 +363,16 @@ struct cxl_fw_state {
*
* @state: state of last security operation
* @enabled_cmds: All security commands enabled in the CEL
- * @poll: polling for sanitization is enabled, device has no mbox irq support
* @poll_tmo_secs: polling timeout
+ * @sanitize_active: sanitize completion pending
* @poll_dwork: polling work item
* @sanitize_node: sanitation sysfs file to notify
*/
struct cxl_security_state {
unsigned long state;
DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
- bool poll;
int poll_tmo_secs;
+ bool sanitize_active;
struct delayed_work poll_dwork;
struct kernfs_node *sanitize_node;
};
@@ -397,6 +400,7 @@ enum cxl_devtype {
*
* @dev: The device associated with this CXL state
* @cxlmd: The device representing the CXL.mem capabilities of @dev
+ * @reg_map: component and ras register mapping parameters
* @regs: Parsed register blocks
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
@@ -404,13 +408,13 @@ enum cxl_devtype {
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
- * @component_reg_phys: register base of component registers
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
+ struct cxl_register_map reg_map;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
@@ -418,7 +422,6 @@ struct cxl_dev_state {
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
- resource_size_t component_reg_phys;
u64 serial;
enum cxl_devtype type;
};
@@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void)
}
#endif
-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
struct cxl_hdm {
struct cxl_component_regs regs;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 317c7548e4e9..e087febf9af0 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -49,7 +49,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport)
{
struct cxl_port *parent_port = parent_dport->port;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
int rc;
@@ -65,8 +64,8 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
ep->next = down;
}
- endpoint = devm_cxl_add_port(host, &cxlmd->dev,
- cxlds->component_reg_phys,
+ /* Note: endpoint port component registers are derived from @cxlds */
+ endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
@@ -158,6 +157,8 @@ static int cxl_mem_probe(struct device *dev)
else
endpoint_parent = &parent_port->dev;
+ cxl_setup_parent_dport(dev, dport);
+
device_lock(endpoint_parent);
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 44a21ab7add5..0155fb66b580 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -85,25 +85,28 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
+/*
+ * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique
+ * wrapper object for each irq within the same cxlds.
+ */
struct cxl_dev_id {
struct cxl_dev_state *cxlds;
};
static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
- irq_handler_t handler, irq_handler_t thread_fn)
+ irq_handler_t thread_fn)
{
struct device *dev = cxlds->dev;
struct cxl_dev_id *dev_id;
- /* dev_id must be globally unique and must contain the cxlds */
dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
if (!dev_id)
return -ENOMEM;
dev_id->cxlds = cxlds;
- return devm_request_threaded_irq(dev, irq, handler, thread_fn,
- IRQF_SHARED | IRQF_ONESHOT,
- NULL, dev_id);
+ return devm_request_threaded_irq(dev, irq, NULL, thread_fn,
+ IRQF_SHARED | IRQF_ONESHOT, NULL,
+ dev_id);
}
static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
@@ -128,10 +131,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
if (opcode == CXL_MBOX_OP_SANITIZE) {
+ mutex_lock(&mds->mbox_mutex);
if (mds->security.sanitize_node)
- sysfs_notify_dirent(mds->security.sanitize_node);
-
- dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
+ mutex_unlock(&mds->mbox_mutex);
} else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
rcuwait_wake_up(&mds->mbox_wait);
@@ -152,18 +155,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
mutex_lock(&mds->mbox_mutex);
if (cxl_mbox_background_complete(cxlds)) {
mds->security.poll_tmo_secs = 0;
- put_device(cxlds->dev);
-
if (mds->security.sanitize_node)
sysfs_notify_dirent(mds->security.sanitize_node);
+ mds->security.sanitize_active = false;
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
} else {
int timeout = mds->security.poll_tmo_secs + 10;
mds->security.poll_tmo_secs = min(15 * 60, timeout);
- queue_delayed_work(system_wq, &mds->security.poll_dwork,
- timeout * HZ);
+ schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
}
mutex_unlock(&mds->mbox_mutex);
}
@@ -295,18 +296,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
* and allow userspace to poll(2) for completion.
*/
if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
- if (mds->security.poll) {
- /* hold the device throughout */
- get_device(cxlds->dev);
-
- /* give first timeout a second */
- timeout = 1;
- mds->security.poll_tmo_secs = timeout;
- queue_delayed_work(system_wq,
- &mds->security.poll_dwork,
- timeout * HZ);
- }
-
+ if (mds->security.sanitize_active)
+ return -EBUSY;
+
+ /* give first timeout a second */
+ timeout = 1;
+ mds->security.poll_tmo_secs = timeout;
+ mds->security.sanitize_active = true;
+ schedule_delayed_work(&mds->security.poll_dwork,
+ timeout * HZ);
dev_dbg(dev, "Sanitization operation started\n");
goto success;
}
@@ -389,7 +387,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
struct device *dev = cxlds->dev;
unsigned long timeout;
+ int irq, msgnum;
u64 md_status;
+ u32 ctrl;
timeout = jiffies + mbox_ready_timeout * HZ;
do {
@@ -437,33 +437,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
rcuwait_init(&mds->mbox_wait);
+ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
- if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
- u32 ctrl;
- int irq, msgnum;
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
-
- msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
- irq = pci_irq_vector(pdev, msgnum);
- if (irq < 0)
- goto mbox_poll;
-
- if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
- goto mbox_poll;
+ /* background command interrupts are optional */
+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+ return 0;
- /* enable background command mbox irq support */
- ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
- ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
- writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+ irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
+ if (irq < 0)
+ return 0;
+ if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq))
return 0;
- }
-mbox_poll:
- mds->security.poll = true;
- INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+ dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
+ /* enable background command mbox irq support */
+ ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+ writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
- dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
return 0;
}
@@ -484,7 +477,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
resource_size_t component_reg_phys;
*map = (struct cxl_register_map) {
- .dev = &pdev->dev,
+ .host = &pdev->dev,
.resource = CXL_RESOURCE_NONE,
};
@@ -653,7 +646,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
if (irq < 0)
return irq;
- return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
+ return cxl_request_irq(cxlds, irq, cxl_event_thread);
}
static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
@@ -834,16 +827,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* If the component registers can't be found, the cxl_pci driver may
* still be useful for management functions so don't return an error.
*/
- cxlds->component_reg_phys = CXL_RESOURCE_NONE;
- rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
+ &cxlds->reg_map);
if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
- else if (!map.component_map.ras.valid)
+ else if (!cxlds->reg_map.component_map.ras.valid)
dev_dbg(&pdev->dev, "RAS registers not found\n");
- cxlds->component_reg_phys = map.resource;
-
- rc = cxl_map_component_regs(&map, &cxlds->regs.component,
+ rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
@@ -882,11 +873,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(cxlds);
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- rc = cxl_memdev_setup_fw_upload(mds);
+ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ if (rc)
+ return rc;
+
+ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
if (rc)
return rc;
@@ -900,7 +895,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
break;
}
- rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map);
+ rc = cxl_map_pmu_regs(&map, &pmu_regs);
if (rc) {
dev_dbg(&pdev->dev, "Could not map PMU regs\n");
break;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 6240e05b9542..47bc8e0b8590 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -62,6 +62,9 @@ static int cxl_switch_port_probe(struct cxl_port *port)
struct cxl_hdm *cxlhdm;
int rc;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
rc = devm_cxl_port_enumerate_dports(port);
if (rc < 0)
return rc;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4ccae1a3b884..70ba506dabab 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -362,7 +362,7 @@ config INTEL_IOATDMA
config K3_DMA
tristate "Hisilicon K3 DMA support"
- depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
+ depends on ARCH_HISI || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 4153c2edb049..a8e3615235b8 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -923,7 +923,7 @@ fail:
*
* Return: Always '0'
*/
-static int msgdma_remove(struct platform_device *pdev)
+static void msgdma_remove(struct platform_device *pdev)
{
struct msgdma_device *mdev = platform_get_drvdata(pdev);
@@ -933,8 +933,6 @@ static int msgdma_remove(struct platform_device *pdev)
msgdma_dev_remove(mdev);
dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
-
- return 0;
}
#ifdef CONFIG_OF
@@ -952,7 +950,7 @@ static struct platform_driver msgdma_driver = {
.of_match_table = of_match_ptr(msgdma_match),
},
.probe = msgdma_probe,
- .remove = msgdma_remove,
+ .remove_new = msgdma_remove,
};
module_platform_driver(msgdma_driver);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 3af795635c5c..5b63996640d9 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -128,7 +128,7 @@ struct admac_data {
int irq;
int irq_index;
int nchannels;
- struct admac_chan channels[];
+ struct admac_chan channels[] __counted_by(nchannels);
};
struct admac_tx {
@@ -925,7 +925,7 @@ free_reset:
return err;
}
-static int admac_remove(struct platform_device *pdev)
+static void admac_remove(struct platform_device *pdev)
{
struct admac_data *ad = platform_get_drvdata(pdev);
@@ -933,8 +933,6 @@ static int admac_remove(struct platform_device *pdev)
dma_async_device_unregister(&ad->dma);
free_irq(ad->irq, ad);
reset_control_rearm(ad->rstc);
-
- return 0;
}
static const struct of_device_id admac_of_match[] = {
@@ -949,7 +947,7 @@ static struct platform_driver apple_admac_driver = {
.of_match_table = admac_of_match,
},
.probe = admac_probe,
- .remove = admac_remove,
+ .remove_new = admac_remove,
};
module_platform_driver(apple_admac_driver);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index b2876f67471f..fb89ecbf0cc5 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -239,7 +239,7 @@ struct at_desc {
bool memset_buffer;
dma_addr_t memset_paddr;
int *memset_vaddr;
- struct atdma_sg sg[];
+ struct atdma_sg sg[] __counted_by(sglen);
};
/*-- Channels --------------------------------------------------------*/
@@ -2100,7 +2100,7 @@ err_irq:
return err;
}
-static int at_dma_remove(struct platform_device *pdev)
+static void at_dma_remove(struct platform_device *pdev)
{
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
@@ -2122,8 +2122,6 @@ static int at_dma_remove(struct platform_device *pdev)
}
clk_disable_unprepare(atdma->clk);
-
- return 0;
}
static void at_dma_shutdown(struct platform_device *pdev)
@@ -2242,7 +2240,7 @@ static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = {
};
static struct platform_driver at_dma_driver = {
- .remove = at_dma_remove,
+ .remove_new = at_dma_remove,
.shutdown = at_dma_shutdown,
.id_table = atdma_devtypes,
.driver = {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index c3b37168b21f..299396121e6d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2431,7 +2431,7 @@ err_free_irq:
return ret;
}
-static int at_xdmac_remove(struct platform_device *pdev)
+static void at_xdmac_remove(struct platform_device *pdev)
{
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
int i;
@@ -2452,8 +2452,6 @@ static int at_xdmac_remove(struct platform_device *pdev)
tasklet_kill(&atchan->tasklet);
at_xdmac_free_chan_resources(&atchan->chan);
}
-
- return 0;
}
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
@@ -2478,7 +2476,7 @@ MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
static struct platform_driver at_xdmac_driver = {
.probe = at_xdmac_probe,
- .remove = at_xdmac_remove,
+ .remove_new = at_xdmac_remove,
.driver = {
.name = "at_xdmac",
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 94ea35330eb5..fbaacb4c19b2 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1734,7 +1734,7 @@ fail_free_mchan:
return ret;
}
-static int sba_remove(struct platform_device *pdev)
+static void sba_remove(struct platform_device *pdev)
{
struct sba_device *sba = platform_get_drvdata(pdev);
@@ -1745,8 +1745,6 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
mbox_free_channel(sba->mchan);
-
- return 0;
}
static const struct of_device_id sba_of_match[] = {
@@ -1758,7 +1756,7 @@ MODULE_DEVICE_TABLE(of, sba_of_match);
static struct platform_driver sba_driver = {
.probe = sba_probe,
- .remove = sba_remove,
+ .remove_new = sba_remove,
.driver = {
.name = "bcm-sba-raid",
.of_match_table = sba_of_match,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0807fb9eb262..9d74fe97452e 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -1019,19 +1019,17 @@ err_no_dma:
return rc;
}
-static int bcm2835_dma_remove(struct platform_device *pdev)
+static void bcm2835_dma_remove(struct platform_device *pdev)
{
struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
dma_async_device_unregister(&od->ddev);
bcm2835_dma_free(od);
-
- return 0;
}
static struct platform_driver bcm2835_dma_driver = {
.probe = bcm2835_dma_probe,
- .remove = bcm2835_dma_remove,
+ .remove_new = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 80096f94032d..0bbaa7620bdd 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -455,7 +455,7 @@ error_ofput:
}
-static int mpc52xx_bcom_remove(struct platform_device *op)
+static void mpc52xx_bcom_remove(struct platform_device *op)
{
/* Clean up the engine */
bcom_engine_cleanup();
@@ -473,8 +473,6 @@ static int mpc52xx_bcom_remove(struct platform_device *op)
/* Release memory */
kfree(bcom_eng);
bcom_eng = NULL;
-
- return 0;
}
static const struct of_device_id mpc52xx_bcom_of_match[] = {
@@ -488,7 +486,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
- .remove = mpc52xx_bcom_remove,
+ .remove_new = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mpc52xx_bcom_of_match,
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index fc7cdad37161..2457a420c13d 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -117,7 +117,7 @@ struct axi_dmac_desc {
unsigned int num_submitted;
unsigned int num_completed;
unsigned int num_sgs;
- struct axi_dmac_sg sg[];
+ struct axi_dmac_sg sg[] __counted_by(num_sgs);
};
struct axi_dmac_chan {
@@ -484,12 +484,11 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
+ desc->num_sgs = num_sgs;
for (i = 0; i < num_sgs; i++)
desc->sg[i].id = AXI_DMAC_SG_UNUSED;
- desc->num_sgs = num_sgs;
-
return desc;
}
@@ -1029,7 +1028,7 @@ err_clk_disable:
return ret;
}
-static int axi_dmac_remove(struct platform_device *pdev)
+static void axi_dmac_remove(struct platform_device *pdev)
{
struct axi_dmac *dmac = platform_get_drvdata(pdev);
@@ -1038,8 +1037,6 @@ static int axi_dmac_remove(struct platform_device *pdev)
tasklet_kill(&dmac->chan.vchan.task);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);
-
- return 0;
}
static const struct of_device_id axi_dmac_of_match_table[] = {
@@ -1054,7 +1051,7 @@ static struct platform_driver axi_dmac_driver = {
.of_match_table = axi_dmac_of_match_table,
},
.probe = axi_dmac_probe,
- .remove = axi_dmac_remove,
+ .remove_new = axi_dmac_remove,
};
module_platform_driver(axi_dmac_driver);
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index adbd47bd6adf..c9cfa341db51 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1008,7 +1008,7 @@ err_disable_clk:
return ret;
}
-static int jz4780_dma_remove(struct platform_device *pdev)
+static void jz4780_dma_remove(struct platform_device *pdev)
{
struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
int i;
@@ -1020,8 +1020,6 @@ static int jz4780_dma_remove(struct platform_device *pdev)
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
tasklet_kill(&jzdma->chan[i].vchan.task);
-
- return 0;
}
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
@@ -1124,7 +1122,7 @@ MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
static struct platform_driver jz4780_dma_driver = {
.probe = jz4780_dma_probe,
- .remove = jz4780_dma_remove,
+ .remove_new = jz4780_dma_remove,
.driver = {
.name = "jz4780-dma",
.of_match_table = jz4780_dma_dt_match,
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index dd02f84e404d..a86a81ff0caa 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -63,6 +63,17 @@ static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
}
static inline void
+axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
+{
+ iowrite64(val, chip->regs + reg);
+}
+
+static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
+{
+ return ioread64(chip->regs + reg);
+}
+
+static inline void
axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
{
iowrite32(val, chan->chan_regs + reg);
@@ -182,38 +193,73 @@ static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
static inline void axi_chan_disable(struct axi_dma_chan *chan)
{
- u32 val;
-
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
- if (chan->chip->dw->hdata->reg_map_8_channels)
- val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
- else
- val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ if (chan->id >= DMAC_CHAN_16) {
+ val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
+ val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
+ if (chan->chip->dw->hdata->reg_map_8_channels)
+ val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+ else
+ val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ }
}
static inline void axi_chan_enable(struct axi_dma_chan *chan)
{
- u32 val;
-
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- if (chan->chip->dw->hdata->reg_map_8_channels)
- val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
- BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
- else
- val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ if (chan->id >= DMAC_CHAN_16) {
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
+ (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ }
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ }
}
static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
{
- u32 val;
+ u64 val;
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ else
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
+ if (chan->id >= DMAC_CHAN_16)
+ return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
+ else
+ return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
}
static void axi_dma_hw_init(struct axi_dma_chip *chip)
@@ -1175,20 +1221,34 @@ static int dma_chan_pause(struct dma_chan *dchan)
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
unsigned long flags;
unsigned int timeout = 20; /* timeout iterations */
- u32 val;
+ u64 val;
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
- BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
+ if (chan->id >= DMAC_CHAN_16) {
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
+ (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
} else {
- val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
- val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
+ axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
+ }
}
do {
@@ -1210,18 +1270,32 @@ static int dma_chan_pause(struct dma_chan *dchan)
/* Called in chan locked context */
static inline void axi_chan_resume(struct axi_dma_chan *chan)
{
- u32 val;
-
- if (chan->chip->dw->hdata->reg_map_8_channels) {
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
- val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
+ if (chan->id >= DMAC_CHAN_16) {
+ val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ } else {
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
} else {
- val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
- val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
- val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
- axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
+ axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
+ }
}
chan->is_paused = false;
@@ -1535,7 +1609,7 @@ err_pm_disable:
return ret;
}
-static int dw_remove(struct platform_device *pdev)
+static void dw_remove(struct platform_device *pdev)
{
struct axi_dma_chip *chip = platform_get_drvdata(pdev);
struct dw_axi_dma *dw = chip->dw;
@@ -1564,8 +1638,6 @@ static int dw_remove(struct platform_device *pdev)
list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
}
-
- return 0;
}
static const struct dev_pm_ops dw_axi_dma_pm_ops = {
@@ -1588,7 +1660,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove = dw_remove,
+ .remove_new = dw_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = dw_dma_of_id_table,
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index eb267cb24f67..454904d99654 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -18,7 +18,7 @@
#include "../virt-dma.h"
-#define DMAC_MAX_CHANNELS 16
+#define DMAC_MAX_CHANNELS 32
#define DMAC_MAX_MASTERS 2
#define DMAC_MAX_BLK_SIZE 0x200000
@@ -222,6 +222,10 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
/* DMAC_CHEN2 */
#define DMAC_CHAN_EN2_WE_SHIFT 16
+/* DMAC CHAN BLOCKS */
+#define DMAC_CHAN_BLOCK_SHIFT 32
+#define DMAC_CHAN_16 16
+
/* DMAC_CHSUSP */
#define DMAC_CHAN_SUSP2_SHIFT 0
#define DMAC_CHAN_SUSP2_WE_SHIFT 16
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 47f2292dba98..7d9d4c951724 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -93,7 +93,7 @@ err_dw_dma_probe:
return err;
}
-static int dw_remove(struct platform_device *pdev)
+static void dw_remove(struct platform_device *pdev)
{
struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
@@ -109,8 +109,6 @@ static int dw_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
-
- return 0;
}
static void dw_shutdown(struct platform_device *pdev)
@@ -193,7 +191,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove = dw_remove,
+ .remove_new = dw_remove,
.shutdown = dw_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5c4a448a1254..d6c60635e90d 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -213,7 +213,7 @@ struct ep93xx_dma_engine {
#define INTERRUPT_NEXT_BUFFER 2
size_t num_channels;
- struct ep93xx_dma_chan channels[];
+ struct ep93xx_dma_chan channels[] __counted_by(num_channels);
};
static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index a42a37634881..7958ac33e36c 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -814,7 +814,6 @@ static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
static struct fsl_mc_driver dpaa2_qdma_driver = {
.driver = {
.name = "dpaa2-qdma",
- .owner = THIS_MODULE,
},
.probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove,
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 40d50cc3d75a..bb5221158a77 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -225,7 +225,7 @@ struct fsl_edma_engine {
bool big_endian;
struct edma_regs regs;
u64 chan_masked;
- struct fsl_edma_chan chans[];
+ struct fsl_edma_chan chans[] __counted_by(n_chans);
};
#define edma_read_tcdreg(chan, __name) \
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 8c4ed7012e23..4635e16d7705 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -13,13 +13,11 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/property.h>
#include "fsl-edma-common.h"
@@ -232,10 +230,8 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
/* request channel irq */
fsl_chan->txirq = platform_get_irq(pdev, i);
- if (fsl_chan->txirq < 0) {
- dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
+ if (fsl_chan->txirq < 0)
return -EINVAL;
- }
ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
fsl_edma3_tx_handler, IRQF_SHARED,
@@ -418,8 +414,6 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
static int fsl_edma_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(fsl_edma_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
@@ -428,8 +422,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
int chans;
int ret, i;
- if (of_id)
- drvdata = of_id->data;
+ drvdata = device_get_match_data(&pdev->dev);
if (!drvdata) {
dev_err(&pdev->dev, "unable to find driver data\n");
return -EINVAL;
@@ -617,7 +610,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
return 0;
}
-static int fsl_edma_remove(struct platform_device *pdev)
+static void fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
@@ -627,8 +620,6 @@ static int fsl_edma_remove(struct platform_device *pdev)
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
-
- return 0;
}
static int fsl_edma_suspend_late(struct device *dev)
@@ -692,7 +683,7 @@ static struct platform_driver fsl_edma_driver = {
.pm = &fsl_edma_pm_ops,
},
.probe = fsl_edma_probe,
- .remove = fsl_edma_remove,
+ .remove_new = fsl_edma_remove,
};
static int __init fsl_edma_init(void)
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index a8cc8a4bc610..47cb28468049 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1266,7 +1266,7 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
}
}
-static int fsl_qdma_remove(struct platform_device *pdev)
+static void fsl_qdma_remove(struct platform_device *pdev)
{
int i;
struct fsl_qdma_queue *status;
@@ -1283,7 +1283,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
status->n_cq, status->cq, status->bus_addr);
}
- return 0;
}
static const struct of_device_id fsl_qdma_dt_ids[] = {
@@ -1298,7 +1297,7 @@ static struct platform_driver fsl_qdma_driver = {
.of_match_table = fsl_qdma_dt_ids,
},
.probe = fsl_qdma_probe,
- .remove = fsl_qdma_remove,
+ .remove_new = fsl_qdma_remove,
};
module_platform_driver(fsl_qdma_driver);
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 0b9ca93ce3dc..014ff523d5ec 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -857,7 +857,7 @@ static void fsl_re_remove_chan(struct fsl_re_chan *chan)
chan->oub_phys_addr);
}
-static int fsl_re_remove(struct platform_device *ofdev)
+static void fsl_re_remove(struct platform_device *ofdev)
{
struct fsl_re_drv_private *re_priv;
struct device *dev;
@@ -872,8 +872,6 @@ static int fsl_re_remove(struct platform_device *ofdev)
/* Unregister the driver */
dma_async_device_unregister(&re_priv->dma_dev);
-
- return 0;
}
static const struct of_device_id fsl_re_ids[] = {
@@ -888,7 +886,7 @@ static struct platform_driver fsl_re_driver = {
.of_match_table = fsl_re_ids,
},
.probe = fsl_re_probe,
- .remove = fsl_re_remove,
+ .remove_new = fsl_re_remove,
};
module_platform_driver(fsl_re_driver);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ddcf736d283d..18a6c4bf6275 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1306,7 +1306,7 @@ out_return:
return err;
}
-static int fsldma_of_remove(struct platform_device *op)
+static void fsldma_of_remove(struct platform_device *op)
{
struct fsldma_device *fdev;
unsigned int i;
@@ -1324,8 +1324,6 @@ static int fsldma_of_remove(struct platform_device *op)
iounmap(fdev->regs);
kfree(fdev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1406,7 +1404,7 @@ static struct platform_driver fsldma_of_driver = {
#endif
},
.probe = fsldma_of_probe,
- .remove = fsldma_of_remove,
+ .remove_new = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index c1350a36fddd..4c47bff81064 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -163,7 +163,7 @@ struct hisi_dma_dev {
u32 chan_depth;
enum hisi_dma_reg_layout reg_layout;
void __iomem *queue_base; /* queue region start of register */
- struct hisi_dma_chan chan[];
+ struct hisi_dma_chan chan[] __counted_by(chan_num);
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 0ac634a51c5e..78a938969d7d 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -660,13 +660,11 @@ static int idma64_platform_probe(struct platform_device *pdev)
return 0;
}
-static int idma64_platform_remove(struct platform_device *pdev)
+static void idma64_platform_remove(struct platform_device *pdev)
{
struct idma64_chip *chip = platform_get_drvdata(pdev);
idma64_remove(chip);
-
- return 0;
}
static int __maybe_unused idma64_pm_suspend(struct device *dev)
@@ -691,7 +689,7 @@ static const struct dev_pm_ops idma64_dev_pm_ops = {
static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe,
- .remove = idma64_platform_remove,
+ .remove_new = idma64_platform_remove,
.driver = {
.name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops,
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index dc096839ac63..c5e679070e46 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,12 +1,12 @@
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+idxd_bus-y := bus.o
+
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
-idxd_bus-y := bus.o
-
obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
idxd_compat-y := compat.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index d32deb9b4e3d..0423655f5a88 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -509,6 +509,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
{
+ struct device *dev = &idxd_dev->conf_dev;
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -536,6 +537,12 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ rc = -ENODEV;
+ goto wq_err;
+ }
+
wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
if (!wq->wq) {
rc = -ENOMEM;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 07623fb0f52f..47a01893cfdb 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -306,6 +306,12 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return -ENXIO;
mutex_lock(&wq->wq_lock);
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ rc = -ENODEV;
+ goto err;
+ }
+
wq->type = IDXD_WQT_KERNEL;
rc = drv_enable_wq(wq);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index e269ca1f4862..1e89c80a07fc 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -159,6 +159,8 @@ struct idxd_cdev {
int minor;
};
+#define DRIVER_NAME_SIZE 128
+
#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
@@ -227,6 +229,8 @@ struct idxd_wq {
/* Lock to protect upasid_xa access. */
struct mutex uc_lock;
struct xarray upasid_xa;
+
+ char driver_name[DRIVER_NAME_SIZE + 1];
};
struct idxd_engine {
@@ -646,6 +650,11 @@ static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wq
wqcfg->max_batch_shift = max_batch_shift;
}
+static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev)
+{
+ return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
+}
+
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index b501320a9c7a..2183d7f9cdbd 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -434,8 +434,8 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
val |= IDXD_INTC_ERR;
for (i = 0; i < 4; i++)
- dev_warn(dev, "err[%d]: %#16.16llx\n",
- i, idxd->sw_err.bits[i]);
+ dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n",
+ i, idxd->sw_err.bits[i]);
err = true;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7caba90d85b3..523ae0dff7d4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1259,6 +1259,39 @@ err:
static struct device_attribute dev_attr_wq_op_config =
__ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
+static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ return sysfs_emit(buf, "%s\n", wq->driver_name);
+}
+
+static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ char *input, *pos;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0)
+ return -EINVAL;
+
+ input = kstrndup(buf, count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ pos = strim(input);
+ memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
+ sprintf(wq->driver_name, "%s", pos);
+ kfree(input);
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_driver_name =
+ __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1278,6 +1311,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_occupancy.attr,
&dev_attr_wq_enqcmds_retries.attr,
&dev_attr_wq_op_config.attr,
+ &dev_attr_wq_driver_name.attr,
NULL,
};
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 9be0d3226e19..0532dd2640dc 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -1017,7 +1017,7 @@ suspend:
return ret;
}
-static int mdc_dma_remove(struct platform_device *pdev)
+static void mdc_dma_remove(struct platform_device *pdev)
{
struct mdc_dma *mdma = platform_get_drvdata(pdev);
struct mdc_chan *mchan, *next;
@@ -1037,8 +1037,6 @@ static int mdc_dma_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_mdc_runtime_suspend(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1078,7 +1076,7 @@ static struct platform_driver mdc_dma_driver = {
.of_match_table = of_match_ptr(mdc_dma_of_match),
},
.probe = mdc_dma_probe,
- .remove = mdc_dma_remove,
+ .remove_new = mdc_dma_remove,
};
module_platform_driver(mdc_dma_driver);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 114f254b9f50..ebf7c115d553 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1216,7 +1216,7 @@ static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *
}
}
-static int imxdma_remove(struct platform_device *pdev)
+static void imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
@@ -1229,8 +1229,6 @@ static int imxdma_remove(struct platform_device *pdev)
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
-
- return 0;
}
static struct platform_driver imxdma_driver = {
@@ -1238,7 +1236,7 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
.of_match_table = imx_dma_of_dev_id,
},
- .remove = imxdma_remove,
+ .remove_new = imxdma_remove,
};
static int __init imxdma_module_init(void)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 51012bd39900..f81ecf5863e8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2358,7 +2358,7 @@ err_clk:
return ret;
}
-static int sdma_remove(struct platform_device *pdev)
+static void sdma_remove(struct platform_device *pdev)
{
struct sdma_engine *sdma = platform_get_drvdata(pdev);
int i;
@@ -2377,7 +2377,6 @@ static int sdma_remove(struct platform_device *pdev)
}
platform_set_drvdata(pdev, NULL);
- return 0;
}
static struct platform_driver sdma_driver = {
@@ -2385,7 +2384,7 @@ static struct platform_driver sdma_driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
},
- .remove = sdma_remove,
+ .remove_new = sdma_remove,
.probe = sdma_probe,
};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index ecdaada95120..5de8c21d41e7 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -15,7 +15,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_dma.h>
@@ -839,7 +838,6 @@ static int k3_dma_probe(struct platform_device *op)
{
const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
- const struct of_device_id *of_id;
int i, ret, irq = 0;
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
@@ -854,19 +852,16 @@ static int k3_dma_probe(struct platform_device *op)
if (IS_ERR(d->base))
return PTR_ERR(d->base);
- of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
- if (of_id) {
- of_property_read_u32((&op->dev)->of_node,
- "dma-channels", &d->dma_channels);
- of_property_read_u32((&op->dev)->of_node,
- "dma-requests", &d->dma_requests);
- ret = of_property_read_u32((&op->dev)->of_node,
- "dma-channel-mask", &d->dma_channel_mask);
- if (ret) {
- dev_warn(&op->dev,
- "dma-channel-mask doesn't exist, considering all as available.\n");
- d->dma_channel_mask = (u32)~0UL;
- }
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ ret = of_property_read_u32((&op->dev)->of_node,
+ "dma-channel-mask", &d->dma_channel_mask);
+ if (ret) {
+ dev_warn(&op->dev,
+ "dma-channel-mask doesn't exist, considering all as available.\n");
+ d->dma_channel_mask = (u32)~0UL;
}
if (!(soc_data->flags & K3_FLAG_NOCLK)) {
@@ -974,7 +969,7 @@ dma_async_register_fail:
return ret;
}
-static int k3_dma_remove(struct platform_device *op)
+static void k3_dma_remove(struct platform_device *op)
{
struct k3_dma_chan *c, *cn;
struct k3_dma_dev *d = platform_get_drvdata(op);
@@ -990,7 +985,6 @@ static int k3_dma_remove(struct platform_device *op)
}
tasklet_kill(&d->task);
clk_disable_unprepare(d->clk);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1034,7 +1028,7 @@ static struct platform_driver k3_pdma_driver = {
.of_match_table = k3_pdma_dt_ids,
},
.probe = k3_dma_probe,
- .remove = k3_dma_remove,
+ .remove_new = k3_dma_remove,
};
module_platform_driver(k3_pdma_driver);
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index b359421ee9ea..ab21455d9c3a 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -255,15 +255,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
return 0;
}
-static int mcf_edma_remove(struct platform_device *pdev)
+static void mcf_edma_remove(struct platform_device *pdev)
{
struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
mcf_edma_irq_free(pdev, mcf_edma);
fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
dma_async_device_unregister(&mcf_edma->dma_dev);
-
- return 0;
}
static struct platform_driver mcf_edma_driver = {
@@ -271,7 +269,7 @@ static struct platform_driver mcf_edma_driver = {
.name = "mcf-edma",
},
.probe = mcf_edma_probe,
- .remove = mcf_edma_remove,
+ .remove_new = mcf_edma_remove,
};
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 324b7387b1b9..529100c5b9f5 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -885,7 +885,7 @@ err_unregister:
return err;
}
-static int mtk_cqdma_remove(struct platform_device *pdev)
+static void mtk_cqdma_remove(struct platform_device *pdev)
{
struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev);
struct mtk_cqdma_vchan *vc;
@@ -918,13 +918,11 @@ static int mtk_cqdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&cqdma->ddev);
of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
}
static struct platform_driver mtk_cqdma_driver = {
.probe = mtk_cqdma_probe,
- .remove = mtk_cqdma_remove,
+ .remove_new = mtk_cqdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_cqdma_match,
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 64120767d983..36ff11e909ea 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1009,7 +1009,7 @@ err_unregister:
return err;
}
-static int mtk_hsdma_remove(struct platform_device *pdev)
+static void mtk_hsdma_remove(struct platform_device *pdev)
{
struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
struct mtk_hsdma_vchan *vc;
@@ -1034,13 +1034,11 @@ static int mtk_hsdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&hsdma->ddev);
of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
}
static struct platform_driver mtk_hsdma_driver = {
.probe = mtk_hsdma_probe,
- .remove = mtk_hsdma_remove,
+ .remove_new = mtk_hsdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_hsdma_match,
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 06d12ac39144..1bdc1500be40 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -572,7 +572,7 @@ err_no_dma:
return rc;
}
-static int mtk_uart_apdma_remove(struct platform_device *pdev)
+static void mtk_uart_apdma_remove(struct platform_device *pdev)
{
struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
@@ -583,8 +583,6 @@ static int mtk_uart_apdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&mtkd->ddev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -639,7 +637,7 @@ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
static struct platform_driver mtk_uart_apdma_driver = {
.probe = mtk_uart_apdma_probe,
- .remove = mtk_uart_apdma_remove,
+ .remove_new = mtk_uart_apdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.pm = &mtk_uart_apdma_pm_ops,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ebdfdcbb4f7a..136fcaeff8dd 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,7 +15,6 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -932,7 +931,7 @@ static void dma_do_tasklet(struct tasklet_struct *t)
}
}
-static int mmp_pdma_remove(struct platform_device *op)
+static void mmp_pdma_remove(struct platform_device *op)
{
struct mmp_pdma_device *pdev = platform_get_drvdata(op);
struct mmp_pdma_phy *phy;
@@ -958,7 +957,6 @@ static int mmp_pdma_remove(struct platform_device *op)
}
dma_async_device_unregister(&pdev->device);
- return 0;
}
static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
@@ -1020,7 +1018,6 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
- const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
@@ -1040,8 +1037,7 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
- of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
- if (of_id) {
+ if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
&dma_channels))
@@ -1141,7 +1137,7 @@ static struct platform_driver mmp_pdma_driver = {
},
.id_table = mmp_pdma_id_table,
.probe = mmp_pdma_probe,
- .remove = mmp_pdma_remove,
+ .remove_new = mmp_pdma_remove,
};
module_platform_driver(mmp_pdma_driver);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d49fa6bc6775..b76fe99e1151 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -14,9 +14,9 @@
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/device.h>
#include <linux/genalloc.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include "dmaengine.h"
@@ -552,12 +552,10 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
mmp_tdma_enable_chan(tdmac);
}
-static int mmp_tdma_remove(struct platform_device *pdev)
+static void mmp_tdma_remove(struct platform_device *pdev)
{
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
}
static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
@@ -637,18 +635,13 @@ MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
static int mmp_tdma_probe(struct platform_device *pdev)
{
enum mmp_tdma_type type;
- const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
- if (of_id)
- type = (enum mmp_tdma_type) of_id->data;
- else
- type = platform_get_device_id(pdev)->driver_data;
+ type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -726,34 +719,24 @@ static int mmp_tdma_probe(struct platform_device *pdev)
return ret;
}
- if (pdev->dev.of_node) {
- ret = of_dma_controller_register(pdev->dev.of_node,
- mmp_tdma_xlate, tdev);
- if (ret) {
- dev_err(tdev->device.dev,
- "failed to register controller\n");
- return ret;
- }
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ mmp_tdma_xlate, tdev);
+ if (ret) {
+ dev_err(tdev->device.dev, "failed to register controller\n");
+ return ret;
}
dev_info(tdev->device.dev, "initialized\n");
return 0;
}
-static const struct platform_device_id mmp_tdma_id_table[] = {
- { "mmp-adma", MMP_AUD_TDMA },
- { "pxa910-squ", PXA910_SQU },
- { },
-};
-
static struct platform_driver mmp_tdma_driver = {
.driver = {
.name = "mmp-tdma",
.of_match_table = mmp_tdma_dt_ids,
},
- .id_table = mmp_tdma_id_table,
.probe = mmp_tdma_probe,
- .remove = mmp_tdma_remove,
+ .remove_new = mmp_tdma_remove,
};
module_platform_driver(mmp_tdma_driver);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 7565ad98ba66..c48d68cbff92 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -124,7 +124,7 @@ struct moxart_desc {
unsigned int dma_cycles;
struct virt_dma_desc vd;
uint8_t es;
- struct moxart_sg sg[];
+ struct moxart_sg sg[] __counted_by(sglen);
};
struct moxart_chan {
@@ -309,6 +309,7 @@ static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC);
if (!d)
return NULL;
+ d->sglen = sg_len;
d->dma_dir = dir;
d->dev_addr = dev_addr;
@@ -319,8 +320,6 @@ static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
d->sg[i].len = sg_dma_len(sgent);
}
- d->sglen = sg_len;
-
ch->error = 0;
return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
@@ -630,7 +629,7 @@ static int moxart_probe(struct platform_device *pdev)
return 0;
}
-static int moxart_remove(struct platform_device *pdev)
+static void moxart_remove(struct platform_device *pdev)
{
struct moxart_dmadev *m = platform_get_drvdata(pdev);
@@ -640,8 +639,6 @@ static int moxart_remove(struct platform_device *pdev)
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
}
static const struct of_device_id moxart_dma_match[] = {
@@ -652,7 +649,7 @@ MODULE_DEVICE_TABLE(of, moxart_dma_match);
static struct platform_driver moxart_driver = {
.probe = moxart_probe,
- .remove = moxart_remove,
+ .remove_new = moxart_remove,
.driver = {
.name = "moxart-dma-engine",
.of_match_table = moxart_dma_match,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 1104017320b8..68c247a46321 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1084,7 +1084,7 @@ err:
return retval;
}
-static int mpc_dma_remove(struct platform_device *op)
+static void mpc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
@@ -1099,8 +1099,6 @@ static int mpc_dma_remove(struct platform_device *op)
free_irq(mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
tasklet_kill(&mdma->tasklet);
-
- return 0;
}
static const struct of_device_id mpc_dma_match[] = {
@@ -1112,7 +1110,7 @@ MODULE_DEVICE_TABLE(of, mpc_dma_match);
static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
- .remove = mpc_dma_remove,
+ .remove_new = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_dma_match,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23b232b57518..bcd3b623ac6c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -10,8 +10,8 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/memory.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -1328,13 +1328,8 @@ static int mv_xor_probe(struct platform_device *pdev)
* setting up. In non-dt case it can only be the legacy one.
*/
xordev->xor_type = XOR_ORION;
- if (pdev->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(mv_xor_dt_ids,
- &pdev->dev);
-
- xordev->xor_type = (uintptr_t)of_id->data;
- }
+ if (pdev->dev.of_node)
+ xordev->xor_type = (uintptr_t)device_get_match_data(&pdev->dev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 0e1e9ca1c005..1ebfbe88e733 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -855,7 +855,7 @@ free_msi_irqs:
return ret;
}
-static int mv_xor_v2_remove(struct platform_device *pdev)
+static void mv_xor_v2_remove(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
@@ -870,8 +870,6 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
platform_msi_domain_free_irqs(&pdev->dev);
tasklet_kill(&xor_dev->irq_tasklet);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -886,7 +884,7 @@ static struct platform_driver mv_xor_v2_driver = {
.probe = mv_xor_v2_probe,
.suspend = mv_xor_v2_suspend,
.resume = mv_xor_v2_resume,
- .remove = mv_xor_v2_remove,
+ .remove_new = mv_xor_v2_remove,
.driver = {
.name = "mv_xor_v2",
.of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 0b2f96fd8bf0..c08916339aa7 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1454,7 +1454,7 @@ e_clk_off:
return ret;
}
-static int nbpf_remove(struct platform_device *pdev)
+static void nbpf_remove(struct platform_device *pdev)
{
struct nbpf_device *nbpf = platform_get_drvdata(pdev);
int i;
@@ -1472,8 +1472,6 @@ static int nbpf_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&nbpf->dma_dev);
clk_disable_unprepare(nbpf->clk);
-
- return 0;
}
static const struct platform_device_id nbpf_ids[] = {
@@ -1517,7 +1515,7 @@ static struct platform_driver nbpf_driver = {
},
.id_table = nbpf_ids,
.probe = nbpf_probe,
- .remove = nbpf_remove,
+ .remove_new = nbpf_remove,
};
module_platform_driver(nbpf_driver);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 384476757c5e..4e76c4ec2d39 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1231,7 +1231,7 @@ err_pool_free:
return ret;
}
-static int owl_dma_remove(struct platform_device *pdev)
+static void owl_dma_remove(struct platform_device *pdev)
{
struct owl_dma *od = platform_get_drvdata(pdev);
@@ -1248,13 +1248,11 @@ static int owl_dma_remove(struct platform_device *pdev)
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
-
- return 0;
}
static struct platform_driver owl_dma_driver = {
.probe = owl_dma_probe,
- .remove = owl_dma_remove,
+ .remove_new = owl_dma_remove,
.driver = {
.name = "dma-owl",
.of_match_table = of_match_ptr(owl_dma_match),
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index f9b82dff3387..bbb60a970dab 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4230,7 +4230,7 @@ out:
/**
* ppc440spe_adma_remove - remove the asynch device
*/
-static int ppc440spe_adma_remove(struct platform_device *ofdev)
+static void ppc440spe_adma_remove(struct platform_device *ofdev)
{
struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
@@ -4278,7 +4278,6 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev)
of_address_to_resource(np, 0, &res);
release_mem_region(res.start, resource_size(&res));
kfree(adev);
- return 0;
}
/*
@@ -4550,7 +4549,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct platform_driver ppc440spe_adma_driver = {
.probe = ppc440spe_adma_probe,
- .remove = ppc440spe_adma_remove,
+ .remove_new = ppc440spe_adma_remove,
.driver = {
.name = "PPC440SP(E)-ADMA",
.of_match_table = ppc440spe_adma_of_match,
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 1b046d9a3a26..31f8da810c05 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -15,9 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
-#include <linux/of_device.h>
-#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/wait.h>
#include <linux/dma/pxa-dma.h>
@@ -91,7 +90,8 @@ struct pxad_desc_sw {
bool cyclic;
struct dma_pool *desc_pool; /* Channel's used allocator */
- struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */
+ struct pxad_desc_hw *hw_desc[] __counted_by(nb_desc);
+ /* DMA coherent descriptors */
};
struct pxad_phy {
@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
dma_addr_t dma;
struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
- BUG_ON(sw_desc->nb_desc == 0);
for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
if (i > 0)
dma = sw_desc->hw_desc[i - 1]->ddadr;
@@ -740,6 +739,7 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
{
struct pxad_desc_sw *sw_desc;
dma_addr_t dma;
+ void *desc;
int i;
sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
@@ -749,20 +749,21 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
sw_desc->desc_pool = chan->desc_pool;
for (i = 0; i < nb_hw_desc; i++) {
- sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
- GFP_NOWAIT, &dma);
- if (!sw_desc->hw_desc[i]) {
+ desc = dma_pool_alloc(sw_desc->desc_pool, GFP_NOWAIT, &dma);
+ if (!desc) {
dev_err(&chan->vc.chan.dev->device,
"%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
__func__, i, sw_desc->desc_pool);
goto err;
}
+ sw_desc->nb_desc++;
+ sw_desc->hw_desc[i] = desc;
+
if (i == 0)
sw_desc->first = dma;
else
sw_desc->hw_desc[i - 1]->ddadr = dma;
- sw_desc->nb_desc++;
}
return sw_desc;
@@ -1221,13 +1222,12 @@ static void pxad_free_channels(struct dma_device *dmadev)
}
}
-static int pxad_remove(struct platform_device *op)
+static void pxad_remove(struct platform_device *op)
{
struct pxad_device *pdev = platform_get_drvdata(op);
pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave);
- return 0;
}
static int pxad_init_phys(struct platform_device *op,
@@ -1343,7 +1343,6 @@ static int pxad_init_dmadev(struct platform_device *op,
static int pxad_probe(struct platform_device *op)
{
struct pxad_device *pdev;
- const struct of_device_id *of_id;
const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
@@ -1361,8 +1360,7 @@ static int pxad_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
- of_id = of_match_device(pxad_dt_ids, &op->dev);
- if (of_id) {
+ if (op->dev.of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(op->dev.of_node, "dma-channels",
&dma_channels))
@@ -1444,7 +1442,7 @@ static struct platform_driver pxad_driver = {
},
.id_table = pxad_id_table,
.probe = pxad_probe,
- .remove = pxad_remove,
+ .remove_new = pxad_remove,
};
static bool pxad_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4c3eb972039d..5e7d332731e0 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -74,7 +74,7 @@ struct bam_async_desc {
struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
- struct bam_desc_hw desc[];
+ struct bam_desc_hw desc[] __counted_by(num_desc);
};
enum bam_reg {
@@ -1386,7 +1386,7 @@ err_disable_clk:
return ret;
}
-static int bam_dma_remove(struct platform_device *pdev)
+static void bam_dma_remove(struct platform_device *pdev)
{
struct bam_device *bdev = platform_get_drvdata(pdev);
u32 i;
@@ -1416,8 +1416,6 @@ static int bam_dma_remove(struct platform_device *pdev)
tasklet_kill(&bdev->task);
clk_disable_unprepare(bdev->bamclk);
-
- return 0;
}
static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
@@ -1475,7 +1473,7 @@ static const struct dev_pm_ops bam_dma_pm_ops = {
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
- .remove = bam_dma_remove,
+ .remove_new = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
.pm = &bam_dma_pm_ops,
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 834ae519c15d..d63b93dc7047 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -745,7 +745,7 @@ static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
{
enum hidma_cap cap;
- cap = (enum hidma_cap) device_get_match_data(dev);
+ cap = (uintptr_t) device_get_match_data(dev);
return cap ? ((cap & test_cap) > 0) : 0;
}
@@ -915,7 +915,7 @@ static void hidma_shutdown(struct platform_device *pdev)
}
-static int hidma_remove(struct platform_device *pdev)
+static void hidma_remove(struct platform_device *pdev)
{
struct hidma_dev *dmadev = platform_get_drvdata(pdev);
@@ -935,8 +935,6 @@ static int hidma_remove(struct platform_device *pdev)
dev_info(&pdev->dev, "HI-DMA engine removed\n");
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#if IS_ENABLED(CONFIG_ACPI)
@@ -960,7 +958,7 @@ MODULE_DEVICE_TABLE(of, hidma_match);
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
- .remove = hidma_remove,
+ .remove_new = hidma_remove,
.shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index d56caf1681ff..53f4273b657c 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -904,7 +904,7 @@ err_disable_core_clk:
return ret;
}
-static int adm_dma_remove(struct platform_device *pdev)
+static void adm_dma_remove(struct platform_device *pdev)
{
struct adm_device *adev = platform_get_drvdata(pdev);
struct adm_chan *achan;
@@ -927,8 +927,6 @@ static int adm_dma_remove(struct platform_device *pdev)
clk_disable_unprepare(adev->core_clk);
clk_disable_unprepare(adev->iface_clk);
-
- return 0;
}
static const struct of_device_id adm_of_match[] = {
@@ -939,7 +937,7 @@ MODULE_DEVICE_TABLE(of, adm_of_match);
static struct platform_driver adm_dma_driver = {
.probe = adm_dma_probe,
- .remove = adm_dma_remove,
+ .remove_new = adm_dma_remove,
.driver = {
.name = "adm-dma-engine",
.of_match_table = adm_of_match,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index a29c13cae716..01e656c69e6c 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,7 +78,7 @@ struct sa11x0_dma_desc {
bool cyclic;
unsigned sglen;
- struct sa11x0_dma_sg sg[];
+ struct sa11x0_dma_sg sg[] __counted_by(sglen);
};
struct sa11x0_dma_phy;
@@ -558,6 +558,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
}
+ txd->sglen = j;
j = 0;
for_each_sg(sg, sgent, sglen, i) {
@@ -593,7 +594,6 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
txd->ddar = c->ddar;
txd->size = size;
- txd->sglen = j;
dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
&c->vc, &txd->vd, txd->size, txd->sglen);
@@ -628,6 +628,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
}
+ txd->sglen = sglen;
for (i = k = 0; i < size / period; i++) {
size_t tlen, len = period;
@@ -653,7 +654,6 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
txd->ddar = c->ddar;
txd->size = size;
- txd->sglen = sglen;
txd->cyclic = 1;
txd->period = sgperiod;
@@ -984,7 +984,7 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
return ret;
}
-static int sa11x0_dma_remove(struct platform_device *pdev)
+static void sa11x0_dma_remove(struct platform_device *pdev)
{
struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
unsigned pch;
@@ -997,8 +997,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
tasklet_kill(&d->task);
iounmap(d->base);
kfree(d);
-
- return 0;
}
static __maybe_unused int sa11x0_dma_suspend(struct device *dev)
@@ -1081,7 +1079,7 @@ static struct platform_driver sa11x0_dma_driver = {
.pm = &sa11x0_dma_pm_ops,
},
.probe = sa11x0_dma_probe,
- .remove = sa11x0_dma_remove,
+ .remove_new = sa11x0_dma_remove,
};
static int __init sa11x0_dma_init(void)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index d1c6956af452..3125a2f162b4 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -566,7 +566,7 @@ static int sf_pdma_probe(struct platform_device *pdev)
return 0;
}
-static int sf_pdma_remove(struct platform_device *pdev)
+static void sf_pdma_remove(struct platform_device *pdev)
{
struct sf_pdma *pdma = platform_get_drvdata(pdev);
struct sf_pdma_chan *ch;
@@ -584,8 +584,6 @@ static int sf_pdma_remove(struct platform_device *pdev)
}
dma_async_device_unregister(&pdma->dma_dev);
-
- return 0;
}
static const struct of_device_id sf_pdma_dt_ids[] = {
@@ -597,7 +595,7 @@ MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
static struct platform_driver sf_pdma_driver = {
.probe = sf_pdma_probe,
- .remove = sf_pdma_remove,
+ .remove_new = sf_pdma_remove,
.driver = {
.name = "sf-pdma",
.of_match_table = sf_pdma_dt_ids,
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index 5c398a83b491..d05772b5d8d3 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -113,7 +113,7 @@ struct sf_pdma {
void __iomem *membase;
void __iomem *mappedbase;
u32 n_chans;
- struct sf_pdma_chan chans[];
+ struct sf_pdma_chan chans[] __counted_by(n_chans);
};
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 641d689d17ff..40482cb73d79 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1990,7 +1990,7 @@ err_pm_disable:
return ret;
}
-static int rcar_dmac_remove(struct platform_device *pdev)
+static void rcar_dmac_remove(struct platform_device *pdev)
{
struct rcar_dmac *dmac = platform_get_drvdata(pdev);
@@ -1998,8 +1998,6 @@ static int rcar_dmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&dmac->engine);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static void rcar_dmac_shutdown(struct platform_device *pdev)
@@ -2041,7 +2039,7 @@ static struct platform_driver rcar_dmac_driver = {
.of_match_table = rcar_dmac_of_ids,
},
.probe = rcar_dmac_probe,
- .remove = rcar_dmac_remove,
+ .remove_new = rcar_dmac_remove,
.shutdown = rcar_dmac_shutdown,
};
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index f777addda8ba..fea5bda34bc2 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -969,7 +969,7 @@ err_pm_disable:
return ret;
}
-static int rz_dmac_remove(struct platform_device *pdev)
+static void rz_dmac_remove(struct platform_device *pdev)
{
struct rz_dmac *dmac = platform_get_drvdata(pdev);
unsigned int i;
@@ -987,8 +987,6 @@ static int rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id of_rz_dmac_match[] = {
@@ -1003,7 +1001,7 @@ static struct platform_driver rz_dmac_driver = {
.of_match_table = of_rz_dmac_match,
},
.probe = rz_dmac_probe,
- .remove = rz_dmac_remove,
+ .remove_new = rz_dmac_remove,
};
module_platform_driver(rz_dmac_driver);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 00067b29e232..7cc9eb2217e8 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -882,7 +882,7 @@ eshdma:
return err;
}
-static int sh_dmae_remove(struct platform_device *pdev)
+static void sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
@@ -899,8 +899,6 @@ static int sh_dmae_remove(struct platform_device *pdev)
shdma_cleanup(&shdev->shdma_dev);
synchronize_rcu();
-
- return 0;
}
static struct platform_driver sh_dmae_driver = {
@@ -908,7 +906,7 @@ static struct platform_driver sh_dmae_driver = {
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
},
- .remove = sh_dmae_remove,
+ .remove_new = sh_dmae_remove,
};
static int __init sh_dmae_init(void)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index b14cf350b669..a9b4302f6050 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -57,7 +57,7 @@ struct usb_dmac_desc {
u32 residue;
struct list_head node;
dma_cookie_t done_cookie;
- struct usb_dmac_sg sg[];
+ struct usb_dmac_sg sg[] __counted_by(sg_allocated_len);
};
#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
@@ -866,7 +866,7 @@ static void usb_dmac_chan_remove(struct usb_dmac *dmac,
devm_free_irq(dmac->dev, uchan->irq, uchan);
}
-static int usb_dmac_remove(struct platform_device *pdev)
+static void usb_dmac_remove(struct platform_device *pdev)
{
struct usb_dmac *dmac = platform_get_drvdata(pdev);
int i;
@@ -877,8 +877,6 @@ static int usb_dmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&dmac->engine);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static void usb_dmac_shutdown(struct platform_device *pdev)
@@ -901,7 +899,7 @@ static struct platform_driver usb_dmac_driver = {
.of_match_table = usb_dmac_of_ids,
},
.probe = usb_dmac_probe,
- .remove = usb_dmac_remove,
+ .remove_new = usb_dmac_remove,
.shutdown = usb_dmac_shutdown,
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 168aa0bd73a0..3f54ff37c5e0 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -212,7 +212,7 @@ struct sprd_dma_dev {
struct clk *ashb_clk;
int irq;
u32 total_chns;
- struct sprd_dma_chn channels[];
+ struct sprd_dma_chn channels[] __counted_by(total_chns);
};
static void sprd_dma_free_desc(struct virt_dma_desc *vd);
@@ -572,8 +572,7 @@ static void sprd_dma_stop(struct sprd_dma_chn *schan)
schan->cur_desc = NULL;
}
-static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
- enum sprd_dma_int_type int_type,
+static bool sprd_dma_check_trans_done(enum sprd_dma_int_type int_type,
enum sprd_dma_req_mode req_mode)
{
if (int_type == SPRD_DMA_NO_INT)
@@ -619,8 +618,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
vchan_cyclic_callback(&sdesc->vd);
} else {
/* Check if the dma request descriptor is done. */
- trans_done = sprd_dma_check_trans_done(sdesc, int_type,
- req_type);
+ trans_done = sprd_dma_check_trans_done(int_type, req_type);
if (trans_done == true) {
vchan_cookie_complete(&sdesc->vd);
schan->cur_desc = NULL;
@@ -1117,6 +1115,15 @@ static int sprd_dma_probe(struct platform_device *pdev)
u32 chn_count;
int ret, i;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set coherent mask to 32\n");
+ return ret;
+ }
+ }
+
/* Parse new and deprecated dma-channels properties */
ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
if (ret)
@@ -1232,7 +1239,7 @@ err_rpm:
return ret;
}
-static int sprd_dma_remove(struct platform_device *pdev)
+static void sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
@@ -1255,7 +1262,6 @@ static int sprd_dma_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static const struct of_device_id sprd_dma_match[] = {
@@ -1292,7 +1298,7 @@ static const struct dev_pm_ops sprd_dma_pm_ops = {
static struct platform_driver sprd_dma_driver = {
.probe = sprd_dma_probe,
- .remove = sprd_dma_remove,
+ .remove_new = sprd_dma_remove,
.driver = {
.name = "sprd-dma",
.of_match_table = sprd_dma_match,
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index d95c421877fb..8880b5e336f8 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -10,9 +10,10 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include <linux/slab.h>
@@ -739,18 +740,11 @@ static void st_fdma_free(struct st_fdma_dev *fdev)
static int st_fdma_probe(struct platform_device *pdev)
{
struct st_fdma_dev *fdev;
- const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
const struct st_fdma_driverdata *drvdata;
int ret, i;
- match = of_match_device((st_fdma_match), &pdev->dev);
- if (!match || !match->data) {
- dev_err(&pdev->dev, "No device match found\n");
- return -ENODEV;
- }
-
- drvdata = match->data;
+ drvdata = device_get_match_data(&pdev->dev);
fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
if (!fdev)
@@ -849,15 +843,13 @@ err:
return ret;
}
-static int st_fdma_remove(struct platform_device *pdev)
+static void st_fdma_remove(struct platform_device *pdev)
{
struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
}
static struct platform_driver st_fdma_platform_driver = {
@@ -866,7 +858,7 @@ static struct platform_driver st_fdma_platform_driver = {
.of_match_table = st_fdma_match,
},
.probe = st_fdma_probe,
- .remove = st_fdma_remove,
+ .remove_new = st_fdma_remove,
};
module_platform_driver(st_fdma_platform_driver);
diff --git a/drivers/dma/st_fdma.h b/drivers/dma/st_fdma.h
index fa15b97a3bab..f296412e96b6 100644
--- a/drivers/dma/st_fdma.h
+++ b/drivers/dma/st_fdma.h
@@ -97,7 +97,7 @@ struct st_fdma_desc {
struct st_fdma_chan *fchan;
bool iscyclic;
unsigned int n_nodes;
- struct st_fdma_sw_node node[];
+ struct st_fdma_sw_node node[] __counted_by(n_nodes);
};
enum st_fdma_type {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 0b30151fb45c..72d83cd9ed6b 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -21,7 +21,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -191,7 +190,7 @@ struct stm32_dma_desc {
struct virt_dma_desc vdesc;
bool cyclic;
u32 num_sgs;
- struct stm32_dma_sg_req sg_req[];
+ struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
};
/**
@@ -1105,6 +1104,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
if (!desc)
return NULL;
+ desc->num_sgs = sg_len;
/* Set peripheral flow controller */
if (chan->dma_sconfig.device_fc)
@@ -1143,8 +1143,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
}
-
- desc->num_sgs = sg_len;
desc->cyclic = false;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -1218,6 +1216,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
if (!desc)
return NULL;
+ desc->num_sgs = num_periods;
for (i = 0; i < num_periods; i++) {
desc->sg_req[i].len = period_len;
@@ -1234,8 +1233,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
if (!chan->trig_mdma)
buf_addr += period_len;
}
-
- desc->num_sgs = num_periods;
desc->cyclic = true;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -1256,6 +1253,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
+ desc->num_sgs = num_sgs;
threshold = chan->threshold;
@@ -1285,8 +1283,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
desc->sg_req[i].len = xfer_count;
}
-
- desc->num_sgs = num_sgs;
desc->cyclic = false;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -1567,17 +1563,10 @@ static int stm32_dma_probe(struct platform_device *pdev)
struct stm32_dma_chan *chan;
struct stm32_dma_device *dmadev;
struct dma_device *dd;
- const struct of_device_id *match;
struct resource *res;
struct reset_control *rst;
int i, ret;
- match = of_match_device(stm32_dma_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
-
dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
if (!dmadev)
return -ENOMEM;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index bae08b3f55c7..6505081ced44 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -224,7 +224,7 @@ struct stm32_mdma_desc {
u32 ccr;
bool cyclic;
u32 count;
- struct stm32_mdma_desc_node node[];
+ struct stm32_mdma_desc_node node[] __counted_by(count);
};
struct stm32_mdma_dma_config {
@@ -256,7 +256,7 @@ struct stm32_mdma_device {
u32 nr_ahb_addr_masks;
u32 chan_reserved;
struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
- u32 ahb_addr_masks[];
+ u32 ahb_addr_masks[] __counted_by(nr_ahb_addr_masks);
};
static struct stm32_mdma_device *stm32_mdma_get_dev(
@@ -321,6 +321,7 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
if (!desc)
return NULL;
+ desc->count = count;
for (i = 0; i < count; i++) {
desc->node[i].hwdesc =
@@ -330,8 +331,6 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
goto err;
}
- desc->count = count;
-
return desc;
err:
@@ -489,7 +488,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_maxburst = chan->dma_config.src_maxburst;
dst_maxburst = chan->dma_config.dst_maxburst;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
@@ -965,7 +964,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1627,13 +1626,13 @@ static int stm32_mdma_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!dmadev)
return -ENOMEM;
+ dmadev->nr_ahb_addr_masks = count;
dmadev->nr_channels = nr_channels;
dmadev->nr_requests = nr_requests;
device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
dmadev->ahb_addr_masks,
count);
- dmadev->nr_ahb_addr_masks = count;
dmadev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmadev->base))
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e86c8829513a..2e7f9b07fdd2 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1271,7 +1271,7 @@ err_clk_disable:
return ret;
}
-static int sun4i_dma_remove(struct platform_device *pdev)
+static void sun4i_dma_remove(struct platform_device *pdev)
{
struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
@@ -1282,8 +1282,6 @@ static int sun4i_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&priv->slave);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id sun4i_dma_match[] = {
@@ -1294,7 +1292,7 @@ MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
- .remove = sun4i_dma_remove,
+ .remove_new = sun4i_dma_remove,
.driver = {
.name = "sun4i-dma",
.of_match_table = sun4i_dma_match,
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 2469efddf540..583bf49031cf 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1470,7 +1470,7 @@ err_chan_free:
return ret;
}
-static int sun6i_dma_remove(struct platform_device *pdev)
+static void sun6i_dma_remove(struct platform_device *pdev)
{
struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
@@ -1484,13 +1484,11 @@ static int sun6i_dma_remove(struct platform_device *pdev)
reset_control_assert(sdc->rstc);
sun6i_dma_free(sdc);
-
- return 0;
}
static struct platform_driver sun6i_dma_driver = {
.probe = sun6i_dma_probe,
- .remove = sun6i_dma_remove,
+ .remove_new = sun6i_dma_remove,
.driver = {
.name = "sun6i-dma",
.of_match_table = sun6i_dma_match,
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 33b101001100..fa4d4142a68a 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -221,7 +221,7 @@ struct tegra_dma_desc {
unsigned int sg_count;
struct virt_dma_desc vd;
struct tegra_dma_channel *tdc;
- struct tegra_dma_sg_req sg_req[];
+ struct tegra_dma_sg_req sg_req[] __counted_by(sg_count);
};
/*
@@ -1473,14 +1473,12 @@ static int tegra_dma_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_dma_remove(struct platform_device *pdev)
+static void tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
-
- return 0;
}
static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
@@ -1533,7 +1531,7 @@ static struct platform_driver tegra_dma_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove = tegra_dma_remove,
+ .remove_new = tegra_dma_remove,
};
module_platform_driver(tegra_dma_driver);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 063022f9df76..ac69778827f2 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1581,7 +1581,7 @@ err_clk_unprepare:
return ret;
}
-static int tegra_dma_remove(struct platform_device *pdev)
+static void tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
@@ -1589,8 +1589,6 @@ static int tegra_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&tdma->dma_dev);
pm_runtime_disable(&pdev->dev);
clk_unprepare(tdma->dma_clk);
-
- return 0;
}
static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
@@ -1677,7 +1675,7 @@ static struct platform_driver tegra_dmac_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove = tegra_dma_remove,
+ .remove_new = tegra_dma_remove,
};
module_platform_driver(tegra_dmac_driver);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index e557bada1510..7a0586633bf3 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -162,7 +162,7 @@ struct tegra_adma {
const struct tegra_adma_chip_data *cdata;
/* Last member of the structure */
- struct tegra_adma_chan channels[];
+ struct tegra_adma_chan channels[] __counted_by(nr_channels);
};
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
@@ -949,7 +949,7 @@ irq_dispose:
return ret;
}
-static int tegra_adma_remove(struct platform_device *pdev)
+static void tegra_adma_remove(struct platform_device *pdev)
{
struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i;
@@ -961,8 +961,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
irq_dispose_mapping(tdma->channels[i].irq);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
@@ -979,7 +977,7 @@ static struct platform_driver tegra_admac_driver = {
.of_match_table = tegra_adma_of_match,
},
.probe = tegra_adma_probe,
- .remove = tegra_adma_remove,
+ .remove_new = tegra_adma_remove,
};
module_platform_driver(tegra_admac_driver);
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index c3555cfb0681..7e0b06b5dff0 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1156,7 +1156,7 @@ err_get_sync:
return ret;
}
-static int cppi41_dma_remove(struct platform_device *pdev)
+static void cppi41_dma_remove(struct platform_device *pdev)
{
struct cppi41_dd *cdd = platform_get_drvdata(pdev);
int error;
@@ -1173,7 +1173,6 @@ static int cppi41_dma_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused cppi41_suspend(struct device *dev)
@@ -1244,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = {
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
- .remove = cppi41_dma_remove,
+ .remove_new = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.pm = &cppi41_pm_ops,
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index aa8e2e8ac260..f1f920861fa9 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -202,7 +202,7 @@ struct edma_desc {
u32 residue;
u32 residue_stat;
- struct edma_pset pset[];
+ struct edma_pset pset[] __counted_by(pset_nr);
};
struct edma_cc;
@@ -2401,7 +2401,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
@@ -2417,7 +2417,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 2);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
@@ -2550,7 +2550,7 @@ static void edma_cleanupp_vchan(struct dma_device *dmadev)
}
}
-static int edma_remove(struct platform_device *pdev)
+static void edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
@@ -2568,8 +2568,6 @@ static int edma_remove(struct platform_device *pdev)
edma_free_slot(ecc, ecc->dummy_slot);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2628,7 +2626,7 @@ static const struct dev_pm_ops edma_pm_ops = {
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove = edma_remove,
+ .remove_new = edma_remove,
.driver = {
.name = "edma",
.pm = &edma_pm_ops,
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index cf96cf915c0c..b9e0e22383b7 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -124,7 +124,7 @@ struct omap_desc {
uint32_t csdp; /* CSDP value */
unsigned sglen;
- struct omap_sg sg[];
+ struct omap_sg sg[] __counted_by(sglen);
};
enum {
@@ -1005,6 +1005,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
if (!d)
return NULL;
+ d->sglen = sglen;
d->dir = dir;
d->dev_addr = dev_addr;
@@ -1120,8 +1121,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
}
}
- d->sglen = sglen;
-
/* Release the dma_pool entries if one allocation failed */
if (ll_failed) {
for (i = 0; i < d->sglen; i++) {
@@ -1844,7 +1843,7 @@ static int omap_dma_probe(struct platform_device *pdev)
return rc;
}
-static int omap_dma_remove(struct platform_device *pdev)
+static void omap_dma_remove(struct platform_device *pdev)
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
int irq;
@@ -1869,8 +1868,6 @@ static int omap_dma_remove(struct platform_device *pdev)
dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
-
- return 0;
}
static const struct omap_dma_config omap2420_data = {
@@ -1918,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match);
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
- .remove = omap_dma_remove,
+ .remove_new = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.of_match_table = omap_dma_match,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 3f524be69efb..7410025605e0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -740,7 +740,7 @@ err_release_region:
}
-static int td_remove(struct platform_device *pdev)
+static void td_remove(struct platform_device *pdev)
{
struct timb_dma *td = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -754,7 +754,6 @@ static int td_remove(struct platform_device *pdev)
release_mem_region(iomem->start, resource_size(iomem));
dev_dbg(&pdev->dev, "Removed...\n");
- return 0;
}
static struct platform_driver td_driver = {
@@ -762,7 +761,7 @@ static struct platform_driver td_driver = {
.name = DRIVER_NAME,
},
.probe = td_probe,
- .remove = td_remove,
+ .remove_new = td_remove,
};
module_platform_driver(td_driver);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 5b6b375a257e..44ba377b4b5a 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1151,7 +1151,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
return 0;
}
-static int txx9dmac_chan_remove(struct platform_device *pdev)
+static void txx9dmac_chan_remove(struct platform_device *pdev)
{
struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
@@ -1162,7 +1162,6 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
tasklet_kill(&dc->tasklet);
}
dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
- return 0;
}
static int __init txx9dmac_probe(struct platform_device *pdev)
@@ -1215,7 +1214,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev)
return 0;
}
-static int txx9dmac_remove(struct platform_device *pdev)
+static void txx9dmac_remove(struct platform_device *pdev)
{
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
@@ -1224,7 +1223,6 @@ static int txx9dmac_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, ddev->irq, ddev);
tasklet_kill(&ddev->tasklet);
}
- return 0;
}
static void txx9dmac_shutdown(struct platform_device *pdev)
@@ -1262,14 +1260,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
};
static struct platform_driver txx9dmac_chan_driver = {
- .remove = txx9dmac_chan_remove,
+ .remove_new = txx9dmac_chan_remove,
.driver = {
.name = "txx9dmac-chan",
},
};
static struct platform_driver txx9dmac_driver = {
- .remove = txx9dmac_remove,
+ .remove_new = txx9dmac_remove,
.shutdown = txx9dmac_shutdown,
.driver = {
.name = "txx9dmac",
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 290836b7e1be..3a8ee2b173b5 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -80,7 +80,7 @@ struct uniphier_xdmac_desc {
unsigned int nr_node;
unsigned int cur_node;
enum dma_transfer_direction dir;
- struct uniphier_xdmac_desc_node nodes[];
+ struct uniphier_xdmac_desc_node nodes[] __counted_by(nr_node);
};
struct uniphier_xdmac_chan {
@@ -97,7 +97,7 @@ struct uniphier_xdmac_device {
struct dma_device ddev;
void __iomem *reg_base;
int nr_chans;
- struct uniphier_xdmac_chan channels[];
+ struct uniphier_xdmac_chan channels[] __counted_by(nr_chans);
};
static struct uniphier_xdmac_chan *
@@ -295,6 +295,7 @@ uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
if (!xd)
return NULL;
+ xd->nr_node = nr;
for (i = 0; i < nr; i++) {
burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
@@ -309,7 +310,6 @@ uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
}
xd->dir = DMA_MEM_TO_MEM;
- xd->nr_node = nr;
xd->cur_node = 0;
return vchan_tx_prep(vc, &xd->vd, flags);
@@ -351,6 +351,7 @@ uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
if (!xd)
return NULL;
+ xd->nr_node = sg_len;
for_each_sg(sgl, sg, sg_len, i) {
xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
@@ -385,7 +386,6 @@ uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
xd->dir = direction;
- xd->nr_node = sg_len;
xd->cur_node = 0;
return vchan_tx_prep(vc, &xd->vd, flags);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index bb4ff8c86733..fd4397adeb79 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1776,7 +1776,7 @@ err_clk_enable:
return ret;
}
-static int xgene_dma_remove(struct platform_device *pdev)
+static void xgene_dma_remove(struct platform_device *pdev)
{
struct xgene_dma *pdma = platform_get_drvdata(pdev);
struct xgene_dma_chan *chan;
@@ -1797,8 +1797,6 @@ static int xgene_dma_remove(struct platform_device *pdev)
if (!IS_ERR(pdma->clk))
clk_disable_unprepare(pdma->clk);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -1817,7 +1815,7 @@ MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
static struct platform_driver xgene_dma_driver = {
.probe = xgene_dma_probe,
- .remove = xgene_dma_remove,
+ .remove_new = xgene_dma_remove,
.driver = {
.name = "X-Gene-DMA",
.of_match_table = xgene_dma_of_match_ptr,
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index dd98b4526b90..e641a5083e14 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -44,6 +44,8 @@
FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
#define XDMA_DESC_CONTROL_LAST \
XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+#define XDMA_DESC_CONTROL_CYCLIC \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED)
/*
* Descriptor for a single contiguous memory block transfer.
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e0bfd129d563..84a88029226f 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -83,6 +83,9 @@ struct xdma_chan {
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
+ * @cyclic: Cyclic transfer vs. scatter-gather
+ * @periods: Number of periods in the cyclic transfer
+ * @period_size: Size of a period in bytes in cyclic transfers
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
@@ -93,6 +96,9 @@ struct xdma_desc {
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
+ bool cyclic;
+ u32 periods;
+ u32 period_size;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -137,10 +143,10 @@ static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
}
/**
- * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
+ * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
* @sw_desc: Tx descriptor pointer
*/
-static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
+static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
{
struct xdma_desc_block *block;
u32 last_blk_desc, desc_control;
@@ -174,6 +180,25 @@ static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
}
+/**
+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ block = sw_desc->desc_blocks;
+ for (i = 0; i < sw_desc->desc_num - 1; i++) {
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
+ }
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr);
+}
+
static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct xdma_chan, vchan.chan);
@@ -231,14 +256,16 @@ static void xdma_free_desc(struct virt_dma_desc *vdesc)
* xdma_alloc_desc - Allocate descriptor
* @chan: DMA channel pointer
* @desc_num: Number of hardware descriptors
+ * @cyclic: Whether this is a cyclic transfer
*/
static struct xdma_desc *
-xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
{
struct xdma_desc *sw_desc;
struct xdma_hw_desc *desc;
dma_addr_t dma_addr;
u32 dblk_num;
+ u32 control;
void *addr;
int i, j;
@@ -248,12 +275,18 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
+ sw_desc->cyclic = cyclic;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
if (!sw_desc->desc_blocks)
goto failed;
+ if (cyclic)
+ control = XDMA_DESC_CONTROL_CYCLIC;
+ else
+ control = XDMA_DESC_CONTROL(1, 0);
+
sw_desc->dblk_num = dblk_num;
for (i = 0; i < sw_desc->dblk_num; i++) {
addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
@@ -263,10 +296,13 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
sw_desc->desc_blocks[i].virt_addr = addr;
sw_desc->desc_blocks[i].dma_addr = dma_addr;
for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
- desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
+ desc[j].control = cpu_to_le32(control);
}
- xdma_link_desc_blocks(sw_desc);
+ if (cyclic)
+ xdma_link_cyclic_desc_blocks(sw_desc);
+ else
+ xdma_link_sg_desc_blocks(sw_desc);
return sw_desc;
@@ -466,7 +502,7 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
- sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
@@ -522,6 +558,81 @@ failed:
}
/**
+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
+ * @chan: DMA channel pointer
+ * @address: Device DMA address to access
+ * @size: Total length to transfer
+ * @period_size: Period size to use for each transfer
+ * @dir: Transfer direction
+ * @flags: Transfer ack flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
+ size_t size, size_t period_size,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ unsigned int periods = size / period_size;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+ struct xdma_desc *sw_desc;
+ unsigned int i;
+
+ /*
+ * Simplify the whole logic by preventing an abnormally high number of
+ * periods and periods size.
+ */
+ if (period_size > XDMA_DESC_BLEN_MAX) {
+ xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
+ return NULL;
+ }
+
+ if (periods > XDMA_DESC_ADJACENT) {
+ xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
+ return NULL;
+ }
+
+ sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->periods = periods;
+ sw_desc->period_size = period_size;
+ sw_desc->dir = dir;
+
+ dblk = sw_desc->desc_blocks;
+ desc = dblk->virt_addr;
+
+ /* fill hardware descriptor */
+ for (i = 0; i < periods; i++) {
+ desc->bytes = cpu_to_le32(period_size);
+ if (dir == DMA_MEM_TO_DEV) {
+ desc->src_addr = cpu_to_le64(address + i * period_size);
+ desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
+ } else {
+ desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
+ desc->dst_addr = cpu_to_le64(address + i * period_size);
+ }
+
+ desc++;
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
@@ -577,6 +688,41 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return 0;
}
+static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_desc *desc = NULL;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int period_idx;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ vd = vchan_find_desc(&xdma_chan->vchan, cookie);
+ if (vd)
+ desc = to_xdma_desc(vd);
+ if (!desc || !desc->cyclic) {
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+ return ret;
+ }
+
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ dma_set_residue(state, residue);
+
+ return ret;
+}
+
/**
* xdma_channel_isr - XDMA channel interrupt handler
* @irq: IRQ number
@@ -590,6 +736,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
struct virt_dma_desc *vd;
struct xdma_desc *desc;
int ret;
+ u32 st;
spin_lock(&xchan->vchan.lock);
@@ -608,6 +755,19 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
goto out;
desc->completed_desc_num += complete_desc_num;
+
+ if (desc->cyclic) {
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
+ &st);
+ if (ret)
+ goto out;
+
+ regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+
+ vchan_cyclic_callback(vd);
+ goto out;
+ }
+
/*
* if all data blocks are transferred, remove and complete the request
*/
@@ -621,7 +781,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
goto out;
- /* transfer the rest of data */
+ /* transfer the rest of data (SG only) */
xdma_xfer_start(xchan);
out:
@@ -841,7 +1001,7 @@ EXPORT_SYMBOL(xdma_get_user_irq);
* xdma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
*/
-static int xdma_remove(struct platform_device *pdev)
+static void xdma_remove(struct platform_device *pdev)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
@@ -850,8 +1010,6 @@ static int xdma_remove(struct platform_device *pdev)
if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
dma_async_device_unregister(&xdev->dma_dev);
-
- return 0;
}
/**
@@ -885,7 +1043,7 @@ static int xdma_probe(struct platform_device *pdev)
goto failed;
}
xdev->irq_start = res->start;
- xdev->irq_num = res->end - res->start + 1;
+ xdev->irq_num = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -921,17 +1079,20 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
- xdev->dma_dev.device_tx_status = dma_cookie_status;
+ xdev->dma_dev.device_tx_status = xdma_tx_status;
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
+ xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
@@ -966,7 +1127,7 @@ static struct platform_driver xdma_driver = {
},
.id_table = xdma_id_table,
.probe = xdma_probe,
- .remove = xdma_remove,
+ .remove_new = xdma_remove,
};
module_platform_driver(xdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 0a3b2e22f23d..e40696f6f864 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3242,10 +3242,8 @@ disable_clks:
/**
* xilinx_dma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
- *
- * Return: Always '0'
*/
-static int xilinx_dma_remove(struct platform_device *pdev)
+static void xilinx_dma_remove(struct platform_device *pdev)
{
struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
int i;
@@ -3259,8 +3257,6 @@ static int xilinx_dma_remove(struct platform_device *pdev)
xilinx_dma_chan_remove(xdev->chan[i]);
xdma_disable_allclks(xdev);
-
- return 0;
}
static struct platform_driver xilinx_vdma_driver = {
@@ -3269,7 +3265,7 @@ static struct platform_driver xilinx_vdma_driver = {
.of_match_table = xilinx_dma_of_ids,
},
.probe = xilinx_dma_probe,
- .remove = xilinx_dma_remove,
+ .remove_new = xilinx_dma_remove,
};
module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 84dc5240a807..69587d85a7cd 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -1736,7 +1736,7 @@ error:
return ret;
}
-static int xilinx_dpdma_remove(struct platform_device *pdev)
+static void xilinx_dpdma_remove(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
unsigned int i;
@@ -1751,8 +1751,6 @@ static int xilinx_dpdma_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
xilinx_dpdma_chan_remove(xdev->chan[i]);
-
- return 0;
}
static const struct of_device_id xilinx_dpdma_of_match[] = {
@@ -1763,7 +1761,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
static struct platform_driver xilinx_dpdma_driver = {
.probe = xilinx_dpdma_probe,
- .remove = xilinx_dpdma_remove,
+ .remove_new = xilinx_dpdma_remove,
.driver = {
.name = "xilinx-zynqmp-dpdma",
.of_match_table = xilinx_dpdma_of_match,
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index bd8c3cc2eaab..f31631bef961 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1147,7 +1147,7 @@ err_disable_pm:
*
* Return: Always '0'
*/
-static int zynqmp_dma_remove(struct platform_device *pdev)
+static void zynqmp_dma_remove(struct platform_device *pdev)
{
struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
@@ -1158,8 +1158,6 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
pm_runtime_disable(zdev->dev);
if (!pm_runtime_enabled(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
-
- return 0;
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1175,7 +1173,7 @@ static struct platform_driver zynqmp_dma_driver = {
.pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
- .remove = zynqmp_dma_remove,
+ .remove_new = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 8de9023c2a38..5f869eacd19a 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -191,4 +191,15 @@ config EXTCON_USBC_TUSB320
Say Y here to enable support for USB Type C cable detection extcon
support using a TUSB320.
+config EXTCON_RTK_TYPE_C
+ tristate "Realtek RTD SoC extcon Type-C Driver"
+ depends on ARCH_REALTEK || COMPILE_TEST
+ depends on TYPEC
+ select USB_COMMON
+ help
+ Say Y here to enable extcon support for USB Type C cable detection
+ when using the Realtek RTD SoC USB Type-C port.
+ The DHC (Digital Home Hub) RTD series SoC contains a type c module.
+ This driver will detect the status of the type-c port.
+
endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 1b390d934ca9..f779adb5e4c7 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
obj-$(CONFIG_EXTCON_USBC_TUSB320) += extcon-usbc-tusb320.o
+obj-$(CONFIG_EXTCON_RTK_TYPE_C) += extcon-rtk-type-c.o
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 1f1d9ab0c5c7..2c567e0b7b7f 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1258,9 +1258,16 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
}
+static const struct of_device_id of_max77693_muic_dt_match[] = {
+ { .compatible = "maxim,max77693-muic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_max77693_muic_dt_match);
+
static struct platform_driver max77693_muic_driver = {
.driver = {
.name = DEV_NAME,
+ .of_match_table = of_max77693_muic_dt_match,
},
.probe = max77693_muic_probe,
};
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 1bc0426ce3f1..acb11a54f875 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -946,9 +946,16 @@ static const struct platform_device_id max77843_muic_id[] = {
};
MODULE_DEVICE_TABLE(platform, max77843_muic_id);
+static const struct of_device_id of_max77843_muic_dt_match[] = {
+ { .compatible = "maxim,max77843-muic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_max77843_muic_dt_match);
+
static struct platform_driver max77843_muic_driver = {
.driver = {
.name = "max77843-muic",
+ .of_match_table = of_max77843_muic_dt_match,
},
.probe = max77843_muic_probe,
.remove = max77843_muic_remove,
diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c
new file mode 100644
index 000000000000..a592bab77538
--- /dev/null
+++ b/drivers/extcon/extcon-rtk-type-c.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * * extcon-rtk-type-c.c - Realtek Extcon Type C driver
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/syscalls.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
+#include <linux/sys_soc.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/typec.h>
+
+struct cc_param {
+ u32 rp_4p7k_code;
+ u32 rp_36k_code;
+ u32 rp_12k_code;
+ u32 rd_code;
+ u32 ra_code;
+ u32 vref_2p6v;
+ u32 vref_1p23v;
+ u32 vref_0p8v;
+ u32 vref_0p66v;
+ u32 vref_0p4v;
+ u32 vref_0p2v;
+ u32 vref_1_1p6v;
+ u32 vref_0_1p6v;
+};
+
+struct type_c_cfg {
+ int parameter_ver; /* Parameter version */
+ int cc_dfp_mode;
+ struct cc_param cc1_param;
+ struct cc_param cc2_param;
+
+ u32 debounce_val;
+ bool use_defalut_parameter;
+};
+
+struct type_c_data {
+ void __iomem *reg_base;
+ struct device *dev;
+ struct extcon_dev *edev;
+
+ u32 irq;
+
+ /* rd control GPIO only for rtd1295 */
+ struct gpio_desc *rd_ctrl_gpio_desc;
+
+ /* Parameters */
+ struct type_c_cfg *type_c_cfg;
+ u32 dfp_mode_rp_en;
+ u32 ufp_mode_rd_en;
+ u32 cc1_code;
+ u32 cc2_code;
+ u32 cc1_vref;
+ u32 cc2_vref;
+ u32 debounce; /* 1b,1us 7f,4.7us */
+
+ /* type_c state */
+ int connect_change;
+#define CONNECT_CHANGE 1
+#define CONNECT_NO_CHANGE 0
+ int cc_mode; /* cc is host or device */
+#define IN_HOST_MODE 0x10
+#define IN_DEVICE_MODE 0x20
+ int is_attach;
+#define IN_ATTACH 1
+#define TO_ATTACH 1
+#define IN_DETACH 0
+#define TO_DETACH 0
+ int at_cc1;
+#define AT_CC1 1
+#define AT_CC2 0
+
+ u32 int_status;
+ u32 cc_status;
+ /* protect the data member */
+ spinlock_t lock;
+ struct delayed_work delayed_work;
+
+ bool rd_en_at_first;
+
+ struct dentry *debug_dir;
+
+ struct typec_port *port;
+};
+
+/* Type C register offset */
+#define USB_TYPEC_CTRL_CC1_0 0x0
+#define USB_TYPEC_CTRL_CC1_1 0x4
+#define USB_TYPEC_CTRL_CC2_0 0x8
+#define USB_TYPEC_CTRL_CC2_1 0xC
+#define USB_TYPEC_STS 0x10
+#define USB_TYPEC_CTRL 0x14
+#define USB_DBUS_PWR_CTRL 0x18
+
+#define ENABLE_CC1 0x1
+#define ENABLE_CC2 0x2
+#define DISABLE_CC 0x0
+
+/* Bit mapping USB_TYPEC_CTRL_CC1_0 and USB_TYPEC_CTRL_CC2_0 */
+#define PLR_EN BIT(29)
+#define CC_SWITCH_MASK (BIT(29) | BIT(28) | BIT(27))
+#define CC_CODE_MASK (0xfffff << 7)
+#define rp4pk_code(val) ((0x1f & (val)) << 22)
+#define code_rp4pk(val) (((val) >> 22) & 0x1f)
+#define rp36k_code(val) ((0x1f & (val)) << 17)
+#define code_rp36k(val) (((val) >> 17) & 0x1f)
+#define rp12k_code(val) ((0x1f & (val)) << 12)
+#define code_rp12k(val) (((val) >> 12) & 0x1f)
+#define rd_code(val) ((0x1f & (val)) << 7)
+#define code_rd(val) (((val) >> 7) & 0x1f)
+#define dfp_mode(val) ((0x3 & (val)) << 5)
+#define EN_RP4P7K BIT(4)
+#define EN_RP36K BIT(3)
+#define EN_RP12K BIT(2)
+#define EN_RD BIT(1)
+#define EN_CC_DET BIT(0)
+
+#define CC_MODE_UFP 0x0
+#define CC_MODE_DFP_USB 0x1
+#define CC_MODE_DFP_1_5 0x2
+#define CC_MODE_DFP_3_0 0x3
+
+/*
+ * PARAMETER_V0:
+ * Realtek Kylin rtd1295
+ * Realtek Hercules rtd1395
+ * Realtek Thor rtd1619
+ * Realtek Hank rtd1319
+ * Realtek Groot rtd1312c
+ * PARAMETER_V1:
+ * Realtek Stark rtd1619b
+ * Realtek Parker rtd1319d
+ * Realtek Danvers rtd1315e
+ */
+enum parameter_version {
+ PARAMETER_V0 = 0,
+ PARAMETER_V1 = 1,
+};
+
+/* Bit mapping USB_TYPEC_CTRL_CC1_1 and USB_TYPEC_CTRL_CC2_1 */
+#define V0_vref_2p6v(val) ((0xf & (val)) << 26) /* Bit 29 for groot */
+#define V0_vref_1p23v(val) ((0xf & (val)) << 22)
+#define V0_vref_0p8v(val) ((0xf & (val)) << 18)
+#define V0_vref_0p66v(val) ((0xf & (val)) << 14)
+#define V0_vref_0p4v(val) ((0x7 & (val)) << 11)
+#define V0_vref_0p2v(val) ((0x7 & (val)) << 8)
+#define V0_vref_1_1p6v(val) ((0xf & (val)) << 4)
+#define V0_vref_0_1p6v(val) ((0xf & (val)) << 0)
+
+#define V0_decode_2p6v(val) (((val) >> 26) & 0xf) /* Bit 29 for groot */
+#define V0_decode_1p23v(val) (((val) >> 22) & 0xf)
+#define V0_decode_0p8v(val) (((val) >> 18) & 0xf)
+#define V0_decode_0p66v(val) (((val) >> 14) & 0xf)
+#define V0_decode_0p4v(val) (((val) >> 11) & 0x7)
+#define V0_decode_0p2v(val) (((val) >> 8) & 0x7)
+#define V0_decode_1_1p6v(val) (((val) >> 4) & 0xf)
+#define V0_decode_0_1p6v(val) (((val) >> 0) & 0xf)
+
+/* new Bit mapping USB_TYPEC_CTRL_CC1_1 and USB_TYPEC_CTRL_CC2_1 */
+#define V1_vref_2p6v(val) ((0xf & (val)) << 28)
+#define V1_vref_1p23v(val) ((0xf & (val)) << 24)
+#define V1_vref_0p8v(val) ((0xf & (val)) << 20)
+#define V1_vref_0p66v(val) ((0xf & (val)) << 16)
+#define V1_vref_0p4v(val) ((0xf & (val)) << 12)
+#define V1_vref_0p2v(val) ((0xf & (val)) << 8)
+#define V1_vref_1_1p6v(val) ((0xf & (val)) << 4)
+#define V1_vref_0_1p6v(val) ((0xf & (val)) << 0)
+
+#define V1_decode_2p6v(val) (((val) >> 28) & 0xf)
+#define V1_decode_1p23v(val) (((val) >> 24) & 0xf)
+#define V1_decode_0p8v(val) (((val) >> 20) & 0xf)
+#define V1_decode_0p66v(val) (((val) >> 16) & 0xf)
+#define V1_decode_0p4v(val) (((val) >> 12) & 0xf)
+#define V1_decode_0p2v(val) (((val) >> 8) & 0xf)
+#define V1_decode_1_1p6v(val) (((val) >> 4) & 0xf)
+#define V1_decode_0_1p6v(val) (((val) >> 0) & 0xf)
+
+/* Bit mapping USB_TYPEC_STS */
+#define DET_STS 0x7
+#define CC1_DET_STS (DET_STS)
+#define CC2_DET_STS (DET_STS << 3)
+#define DET_STS_RA 0x1
+#define DET_STS_RD 0x3
+#define DET_STS_RP 0x1
+#define CC1_DET_STS_RA (DET_STS_RA)
+#define CC1_DET_STS_RD (DET_STS_RD)
+#define CC1_DET_STS_RP (DET_STS_RP)
+#define CC2_DET_STS_RA (DET_STS_RA << 3)
+#define CC2_DET_STS_RD (DET_STS_RD << 3)
+#define CC2_DET_STS_RP (DET_STS_RP << 3)
+
+/* Bit mapping USB_TYPEC_CTRL */
+#define CC2_INT_EN BIT(11)
+#define CC1_INT_EN BIT(10)
+#define CC2_INT_STS BIT(9)
+#define CC1_INT_STS BIT(8)
+#define DEBOUNCE_TIME_MASK 0xff
+#define DEBOUNCE_EN BIT(0)
+#define ENABLE_TYPE_C_DETECT (CC1_INT_EN | CC2_INT_EN)
+#define ALL_CC_INT_STS (CC1_INT_STS | CC2_INT_STS)
+
+/* Parameter */
+#define DETECT_TIME 50 /* ms */
+
+static const unsigned int usb_type_c_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+enum usb_data_roles {
+ DR_NONE,
+ DR_HOST,
+ DR_DEVICE,
+};
+
+static const struct soc_device_attribute rtk_soc_kylin[] = {
+ { .family = "Realtek Kylin", },
+ { /* empty */ }
+};
+
+static int rtd129x_switch_type_c_plug_config(struct type_c_data *type_c,
+ int dr_mode, int cc)
+{
+ void __iomem *reg = type_c->reg_base + USB_TYPEC_CTRL_CC1_0;
+ int val_cc;
+
+#define TYPE_C_EN_SWITCH BIT(29)
+#define TYPE_C_TXRX_SEL (BIT(28) | BIT(27))
+#define TYPE_C_SWITCH_MASK (TYPE_C_EN_SWITCH | TYPE_C_TXRX_SEL)
+#define TYPE_C_ENABLE_CC1 TYPE_C_EN_SWITCH
+#define TYPE_C_ENABLE_CC2 (TYPE_C_EN_SWITCH | TYPE_C_TXRX_SEL)
+#define TYPE_C_DISABLE_CC ~TYPE_C_SWITCH_MASK
+
+ val_cc = readl(reg);
+ val_cc &= ~TYPE_C_SWITCH_MASK;
+
+ if (cc == DISABLE_CC) {
+ val_cc &= TYPE_C_DISABLE_CC;
+ } else if (cc == ENABLE_CC1) {
+ val_cc |= TYPE_C_ENABLE_CC1;
+ } else if (cc == ENABLE_CC2) {
+ val_cc |= TYPE_C_ENABLE_CC2;
+ } else {
+ dev_err(type_c->dev, "%s: Error cc setting cc=0x%x\n", __func__, cc);
+ return -EINVAL;
+ }
+ writel(val_cc, reg);
+
+ /* waiting cc stable for enable/disable */
+ mdelay(1);
+
+ dev_dbg(type_c->dev, "%s: cc=0x%x val_cc=0x%x usb_typec_ctrl_cc1_0=0x%x\n",
+ __func__, cc, val_cc, readl(reg));
+
+ return 0;
+}
+
+static inline void switch_type_c_plug_config(struct type_c_data *type_c,
+ int dr_mode, int cc)
+{
+ int ret = 0;
+
+ if (soc_device_match(rtk_soc_kylin))
+ ret = rtd129x_switch_type_c_plug_config(type_c, dr_mode, cc);
+
+ if (ret < 0)
+ dev_err(type_c->dev, "%s: Error set type c plug config\n",
+ __func__);
+}
+
+static void switch_type_c_dr_mode(struct type_c_data *type_c, int dr_mode, int cc)
+{
+ bool is_host = false;
+ bool is_device = false;
+ bool polarity = false;
+ bool vbus = false;
+ bool ss = true;
+
+ switch_type_c_plug_config(type_c, dr_mode, cc);
+ if (cc == ENABLE_CC2)
+ polarity = true;
+
+ switch (dr_mode) {
+ case USB_DR_MODE_HOST:
+ is_host = true;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ is_device = true;
+ vbus = true;
+ break;
+ default:
+ dev_dbg(type_c->dev, "%s dr_mode=%d ==> no host or device\n",
+ __func__, dr_mode);
+ break;
+ }
+
+ dev_dbg(type_c->dev, "%s is_host=%d is_device=%d vbus=%d polarity=%d\n",
+ __func__, is_host, is_device, vbus, polarity);
+
+ /* for EXTCON_USB device mode */
+ extcon_set_state(type_c->edev, EXTCON_USB, is_device);
+ extcon_set_property(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)vbus);
+ extcon_set_property(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)ss);
+
+ /* for EXTCON_USB_HOST host mode */
+ extcon_set_state(type_c->edev, EXTCON_USB_HOST, is_host);
+ extcon_set_property(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)vbus);
+ extcon_set_property(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)ss);
+
+ /* sync EXTCON_USB and EXTCON_USB_HOST */
+ extcon_sync(type_c->edev, EXTCON_USB);
+ extcon_sync(type_c->edev, EXTCON_USB_HOST);
+
+ if (type_c->port) {
+ switch (dr_mode) {
+ case USB_DR_MODE_HOST:
+ typec_set_data_role(type_c->port, TYPEC_HOST);
+ typec_set_pwr_role(type_c->port, TYPEC_SOURCE);
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ typec_set_data_role(type_c->port, TYPEC_DEVICE);
+ typec_set_pwr_role(type_c->port, TYPEC_SINK);
+ break;
+ default:
+ dev_dbg(type_c->dev, "%s unknown dr_mode=%d\n",
+ __func__, dr_mode);
+ break;
+ }
+ }
+}
+
+/* connector attached/detached */
+static int connector_attached(struct type_c_data *type_c, u32 cc, int dr_mode)
+{
+ void __iomem *reg = type_c->reg_base + USB_TYPEC_CTRL;
+
+ cancel_delayed_work(&type_c->delayed_work);
+
+ switch_type_c_dr_mode(type_c, dr_mode, cc);
+
+ writel(ENABLE_TYPE_C_DETECT | readl(reg), reg);
+
+ return 0;
+}
+
+static int connector_detached(struct type_c_data *type_c, u32 cc, int dr_mode)
+{
+ void __iomem *reg = type_c->reg_base + USB_TYPEC_CTRL;
+
+ writel(~ENABLE_TYPE_C_DETECT & readl(reg), reg);
+
+ switch_type_c_dr_mode(type_c, 0, cc);
+
+ schedule_delayed_work(&type_c->delayed_work, msecs_to_jiffies(DETECT_TIME));
+
+ return 0;
+}
+
+/* detect host device switch */
+static int __detect_host_device(struct type_c_data *type_c, u32 rp_or_rd_en)
+{
+ struct device *dev = type_c->dev;
+ void __iomem *reg_base = type_c->reg_base;
+ u32 cc1_config, cc2_config, default_ctrl;
+ u32 cc1_switch = 0;
+
+ default_ctrl = readl(reg_base + USB_TYPEC_CTRL) & DEBOUNCE_TIME_MASK;
+ writel(default_ctrl, reg_base + USB_TYPEC_CTRL);
+
+ cc1_config = readl(reg_base + USB_TYPEC_CTRL_CC1_0);
+ cc2_config = readl(reg_base + USB_TYPEC_CTRL_CC2_0);
+
+ cc1_config &= ~EN_CC_DET;
+ cc2_config &= ~EN_CC_DET;
+ writel(cc1_config, reg_base + USB_TYPEC_CTRL_CC1_0);
+ writel(cc2_config, reg_base + USB_TYPEC_CTRL_CC2_0);
+
+ if (soc_device_match(rtk_soc_kylin))
+ cc1_switch = cc1_config & CC_SWITCH_MASK;
+
+ cc1_config &= CC_CODE_MASK;
+ cc1_config |= rp_or_rd_en | cc1_switch;
+ cc2_config &= CC_CODE_MASK;
+ cc2_config |= rp_or_rd_en;
+ writel(cc2_config, reg_base + USB_TYPEC_CTRL_CC2_0);
+ writel(cc1_config, reg_base + USB_TYPEC_CTRL_CC1_0);
+
+ /* For kylin to disable external rd control gpio */
+ if (soc_device_match(rtk_soc_kylin)) {
+ struct gpio_desc *gpio = type_c->rd_ctrl_gpio_desc;
+
+ if (gpio && gpiod_direction_output(gpio, 1))
+ dev_err(dev, "%s ERROR set rd_ctrl_gpio_desc fail\n", __func__);
+ }
+
+ cc1_config |= EN_CC_DET;
+ cc2_config |= EN_CC_DET;
+ writel(cc1_config, reg_base + USB_TYPEC_CTRL_CC1_0);
+ writel(cc2_config, reg_base + USB_TYPEC_CTRL_CC2_0);
+
+ return 0;
+}
+
+static int detect_device(struct type_c_data *type_c)
+{
+ return __detect_host_device(type_c, type_c->dfp_mode_rp_en);
+}
+
+static int detect_host(struct type_c_data *type_c)
+{
+ return __detect_host_device(type_c, type_c->ufp_mode_rd_en);
+}
+
+static int host_device_switch_detection(struct type_c_data *type_c)
+{
+ if (type_c->cc_mode == IN_HOST_MODE) {
+ type_c->cc_mode = IN_DEVICE_MODE;
+ detect_host(type_c);
+ } else {
+ type_c->cc_mode = IN_HOST_MODE;
+ detect_device(type_c);
+ }
+
+ return 0;
+}
+
+static int detect_type_c_state(struct type_c_data *type_c)
+{
+ struct device *dev = type_c->dev;
+ void __iomem *reg_base = type_c->reg_base;
+ u32 int_status, cc_status, cc_status_check;
+ unsigned long flags;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+
+ int_status = readl(reg_base + USB_TYPEC_CTRL);
+ cc_status = readl(reg_base + USB_TYPEC_STS);
+
+ type_c->connect_change = CONNECT_NO_CHANGE;
+
+ switch (type_c->cc_mode | type_c->is_attach) {
+ case IN_HOST_MODE | IN_ATTACH:
+ if (((cc_status & CC1_DET_STS) == CC1_DET_STS) && type_c->at_cc1 == AT_CC1) {
+ dev_dbg(dev, "IN host mode and cc1 device detach (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_DETACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ } else if (((cc_status & CC2_DET_STS) == CC2_DET_STS) &&
+ type_c->at_cc1 == AT_CC2) {
+ dev_dbg(dev, "IN host mode and cc2 device detach (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_DETACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ }
+ break;
+ case IN_HOST_MODE | IN_DETACH:
+ cc_status_check = readl(reg_base + USB_TYPEC_STS);
+ if (cc_status_check != (CC1_DET_STS | CC2_DET_STS)) {
+ if (in_interrupt()) {
+ /* Add delay time to avoid capacitive effect of cable. */
+ mdelay(300);
+ } else {
+ spin_unlock_irqrestore(&type_c->lock, flags);
+ /* Add delay time to avoid capacitive effect of cable. */
+ msleep(300);
+ spin_lock_irqsave(&type_c->lock, flags);
+ }
+ cc_status_check = readl(reg_base + USB_TYPEC_STS);
+ }
+ if (cc_status != cc_status_check) {
+ dev_warn(dev, "IN_HOST_MODE: cc_status (0x%x) != cc_status_check (0x%x)\n",
+ cc_status, cc_status_check);
+ cc_status = readl(reg_base + USB_TYPEC_STS);
+ }
+
+ if ((cc_status & CC1_DET_STS) == CC1_DET_STS_RD) {
+ dev_dbg(dev, "IN host mode and cc1 device attach (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_ATTACH;
+ type_c->at_cc1 = AT_CC1;
+ type_c->connect_change = CONNECT_CHANGE;
+ } else if ((cc_status & CC2_DET_STS) == CC2_DET_STS_RD) {
+ dev_dbg(dev, "In host mode and cc2 device attach (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_ATTACH;
+ type_c->at_cc1 = AT_CC2;
+ type_c->connect_change = CONNECT_CHANGE;
+ }
+ break;
+ case IN_DEVICE_MODE | IN_ATTACH:
+ if ((cc_status & CC1_DET_STS) < CC1_DET_STS_RP ||
+ (cc_status & CC2_DET_STS) < CC2_DET_STS_RP) {
+ /* Add a sw debounce to filter cc signal sent from apple pd adapter */
+ mdelay(5);
+ cc_status_check = readl(reg_base + USB_TYPEC_STS);
+
+ if (cc_status != cc_status_check) {
+ dev_dbg(dev, "IN_DEVICE_MODE: cc_status (0x%x) != cc_status_check (0x%x) maybe use a pd adapter\n",
+ cc_status, cc_status_check);
+ cc_status = cc_status_check;
+ }
+ }
+
+ if ((cc_status & CC1_DET_STS) < CC1_DET_STS_RP && type_c->at_cc1 == AT_CC1) {
+ dev_dbg(dev, "IN device mode and cc1 host disconnect (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_DETACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ } else if ((cc_status & CC2_DET_STS) < CC2_DET_STS_RP &&
+ type_c->at_cc1 == AT_CC2) {
+ dev_dbg(dev, "IN device mode and cc2 host disconnect (cc_status=0x%x)",
+ cc_status);
+ type_c->is_attach = TO_DETACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ }
+ break;
+ case IN_DEVICE_MODE | IN_DETACH:
+ cc_status_check = readl(reg_base + USB_TYPEC_STS);
+ if (cc_status_check != 0x0) {
+ if (in_interrupt()) {
+ /* Add delay time to avoid capacitive effect of cable. */
+ mdelay(300);
+ } else {
+ spin_unlock_irqrestore(&type_c->lock, flags);
+ /* Add delay time to avoid capacitive effect of cable. */
+ msleep(300);
+ spin_lock_irqsave(&type_c->lock, flags);
+ }
+ cc_status_check = readl(reg_base + USB_TYPEC_STS);
+ }
+
+ if (cc_status != cc_status_check) {
+ dev_warn(dev, "IN_DEVICE_MODE: cc_status (0x%x) != cc_status_check (0x%x)\n",
+ cc_status, cc_status_check);
+ cc_status = readl(reg_base + USB_TYPEC_STS);
+ }
+
+ if ((cc_status & CC1_DET_STS) >= CC1_DET_STS_RP) {
+ dev_dbg(dev, "IN device mode and cc1 host connect (cc_status=0x%x)",
+ cc_status);
+ type_c->at_cc1 = AT_CC1;
+ type_c->is_attach = TO_ATTACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ } else if ((cc_status & CC2_DET_STS) >= CC2_DET_STS_RP) {
+ dev_dbg(dev, "IN device mode and cc2 host connect (cc_status=0x%x)",
+ cc_status);
+ type_c->at_cc1 = AT_CC2;
+ type_c->is_attach = TO_ATTACH;
+ type_c->connect_change = CONNECT_CHANGE;
+ }
+ break;
+ default:
+ dev_err(dev, "error host or device mode (cc_mode=%d, is_attach=%d) ",
+ type_c->cc_mode, type_c->is_attach);
+ }
+
+ type_c->int_status = int_status;
+ type_c->cc_status = cc_status;
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+ return 0;
+}
+
+static void host_device_switch(struct work_struct *work)
+{
+ struct type_c_data *type_c = container_of(work, struct type_c_data,
+ delayed_work.work);
+ struct device *dev = type_c->dev;
+ unsigned long flags;
+ int connect_change = 0;
+ int cc_mode = 0;
+ int is_attach = 0;
+ int at_cc1 = 0;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+ if (type_c->connect_change)
+ connect_change = type_c->connect_change;
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ if (!connect_change)
+ detect_type_c_state(type_c);
+
+ spin_lock_irqsave(&type_c->lock, flags);
+ if (type_c->connect_change) {
+ connect_change = type_c->connect_change;
+ cc_mode = type_c->cc_mode;
+ is_attach = type_c->is_attach;
+ at_cc1 = type_c->at_cc1;
+ type_c->connect_change = CONNECT_NO_CHANGE;
+ } else {
+ host_device_switch_detection(type_c);
+
+ schedule_delayed_work(&type_c->delayed_work, msecs_to_jiffies(DETECT_TIME));
+ }
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ if (!connect_change)
+ return;
+
+ dev_dbg(dev, "%s: usb cable connection change\n", __func__);
+ if (cc_mode == IN_HOST_MODE) {
+ if (is_attach && at_cc1)
+ connector_attached(type_c, ENABLE_CC1, USB_DR_MODE_HOST);
+ else if (is_attach && !at_cc1)
+ connector_attached(type_c, ENABLE_CC2, USB_DR_MODE_HOST);
+ else
+ connector_detached(type_c, DISABLE_CC, USB_DR_MODE_HOST);
+ } else if (cc_mode == IN_DEVICE_MODE) {
+ if (is_attach && at_cc1)
+ connector_attached(type_c, ENABLE_CC1, USB_DR_MODE_PERIPHERAL);
+ else if (is_attach && !at_cc1)
+ connector_attached(type_c, ENABLE_CC2, USB_DR_MODE_PERIPHERAL);
+ else
+ connector_detached(type_c, DISABLE_CC, USB_DR_MODE_PERIPHERAL);
+ } else {
+ dev_err(dev, "Error: IN unknown mode %d to %s at %s (cc_status=0x%x)\n",
+ cc_mode, is_attach ? "attach" : "detach",
+ at_cc1 ? "cc1" : "cc2", type_c->cc_status);
+ }
+ dev_info(dev, "Connection change OK: IN %s mode to %s at %s (cc_status=0x%x)\n",
+ cc_mode == IN_HOST_MODE ? "host" : "device",
+ is_attach ? "attach" : "detach",
+ at_cc1 ? "cc1" : "cc2", type_c->cc_status);
+}
+
+static irqreturn_t type_c_detect_irq(int irq, void *__data)
+{
+ struct type_c_data *type_c = (struct type_c_data *)__data;
+ struct device *dev = type_c->dev;
+ void __iomem *reg = type_c->reg_base + USB_TYPEC_CTRL;
+ unsigned long flags;
+
+ detect_type_c_state(type_c);
+
+ spin_lock_irqsave(&type_c->lock, flags);
+
+ if (type_c->connect_change) {
+ dev_dbg(dev, "%s: IN %s mode to %s (at %s interrupt) int_status=0x%x, cc_status=0x%x",
+ __func__,
+ type_c->cc_mode == IN_HOST_MODE ? "host" : "device",
+ type_c->is_attach ? "attach" : "detach",
+ type_c->at_cc1 ? "cc1" : "cc2",
+ type_c->int_status, type_c->cc_status);
+
+ /* clear interrupt status */
+ writel(~ALL_CC_INT_STS & readl(reg), reg);
+
+ cancel_delayed_work(&type_c->delayed_work);
+ schedule_delayed_work(&type_c->delayed_work, msecs_to_jiffies(0));
+ } else {
+ static int local_count;
+
+ /* if no connect_change, we keep the status to avoid status lose */
+ if (local_count++ > 10) {
+ /* clear interrupt status */
+ writel(~ALL_CC_INT_STS & readl(reg), reg);
+ local_count = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int type_c_port_dr_set(struct typec_port *port,
+ enum typec_data_role role)
+{
+ struct type_c_data *type_c = typec_get_drvdata(port);
+ u32 enable_cc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+ enable_cc = type_c->at_cc1 ? ENABLE_CC1 : ENABLE_CC2;
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ if (role == TYPEC_HOST)
+ switch_type_c_dr_mode(type_c, USB_DR_MODE_HOST, enable_cc);
+ else if (role == TYPEC_DEVICE)
+ switch_type_c_dr_mode(type_c, USB_DR_MODE_PERIPHERAL, enable_cc);
+ else
+ switch_type_c_dr_mode(type_c, 0, DISABLE_CC);
+
+ return 0;
+}
+
+static const struct typec_operations type_c_port_ops = {
+ .dr_set = type_c_port_dr_set,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static int type_c_parameter_show(struct seq_file *s, void *unused)
+{
+ struct type_c_data *type_c = s->private;
+ struct type_c_cfg *type_c_cfg = type_c->type_c_cfg;
+ struct cc_param *cc_param;
+ unsigned long flags;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+
+ seq_printf(s, "cc_dfp_mode %s\n",
+ ({ char *tmp;
+ switch (type_c_cfg->cc_dfp_mode) {
+ case CC_MODE_DFP_USB:
+ tmp = "CC_MODE_DFP_USB"; break;
+ case CC_MODE_DFP_1_5:
+ tmp = "CC_MODE_DFP_1_5"; break;
+ case CC_MODE_DFP_3_0:
+ tmp = "CC_MODE_DFP_3_0"; break;
+ default:
+ tmp = "?"; break;
+ } tmp; }));
+
+ seq_printf(s, "dfp_mode_rp_en 0x%x\n", type_c->dfp_mode_rp_en);
+ seq_printf(s, "ufp_mode_rd_en 0x%x\n", type_c->ufp_mode_rd_en);
+ seq_printf(s, "cc1_code 0x%x\n", type_c->cc1_code);
+ seq_printf(s, "cc2_code 0x%x\n", type_c->cc2_code);
+ seq_printf(s, "cc1_vref 0x%x\n", type_c->cc1_vref);
+ seq_printf(s, "cc2_vref 0x%x\n", type_c->cc2_vref);
+ seq_printf(s, "debounce 0x%x\n", type_c->debounce);
+ seq_puts(s, "\n");
+
+ cc_param = &type_c_cfg->cc1_param;
+ seq_puts(s, "cc1_param:\n");
+ seq_printf(s, " rp_4p7k_code 0x%x\n", cc_param->rp_4p7k_code);
+ seq_printf(s, " rp_36k_code 0x%x\n", cc_param->rp_36k_code);
+ seq_printf(s, " rp_12k_code 0x%x\n", cc_param->rp_12k_code);
+ seq_printf(s, " rd_code 0x%x\n", cc_param->rd_code);
+ seq_printf(s, " vref_2p6v 0x%x\n", cc_param->vref_2p6v);
+ seq_printf(s, " vref_1p23v 0x%x\n", cc_param->vref_1p23v);
+ seq_printf(s, " vref_0p8v 0x%x\n", cc_param->vref_0p8v);
+ seq_printf(s, " vref_0p66v 0x%x\n", cc_param->vref_0p66v);
+ seq_printf(s, " vref_0p4v 0x%x\n", cc_param->vref_0p4v);
+ seq_printf(s, " vref_0p2v 0x%x\n", cc_param->vref_0p2v);
+ seq_printf(s, " vref_1_1p6v 0x%x\n", cc_param->vref_1_1p6v);
+ seq_printf(s, " vref_0_1p6v 0x%x\n", cc_param->vref_0_1p6v);
+
+ cc_param = &type_c_cfg->cc2_param;
+ seq_puts(s, "cc2_param:\n");
+ seq_printf(s, " rp_4p7k_code 0x%x\n", cc_param->rp_4p7k_code);
+ seq_printf(s, " rp_36k_code 0x%x\n", cc_param->rp_36k_code);
+ seq_printf(s, " rp_12k_code 0x%x\n", cc_param->rp_12k_code);
+ seq_printf(s, " rd_code 0x%x\n", cc_param->rd_code);
+ seq_printf(s, " vref_2p6v 0x%x\n", cc_param->vref_2p6v);
+ seq_printf(s, " vref_1p23v 0x%x\n", cc_param->vref_1p23v);
+ seq_printf(s, " vref_0p8v 0x%x\n", cc_param->vref_0p8v);
+ seq_printf(s, " vref_0p66v 0x%x\n", cc_param->vref_0p66v);
+ seq_printf(s, " vref_0p4v 0x%x\n", cc_param->vref_0p4v);
+ seq_printf(s, " vref_0p2v 0x%x\n", cc_param->vref_0p2v);
+ seq_printf(s, " vref_1_1p6v 0x%x\n", cc_param->vref_1_1p6v);
+ seq_printf(s, " vref_0_1p6v 0x%x\n", cc_param->vref_0_1p6v);
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ return 0;
+}
+
+static int type_c_parameter_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, type_c_parameter_show, inode->i_private);
+}
+
+static const struct file_operations type_c_parameter_fops = {
+ .open = type_c_parameter_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int type_c_status_show(struct seq_file *s, void *unused)
+{
+ struct type_c_data *type_c = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+
+ seq_printf(s, "In %s mode %s at %s (cc_status=0x%x)\n",
+ type_c->cc_mode == IN_HOST_MODE ? "host" : "device",
+ type_c->is_attach ? "attach" : "detach",
+ type_c->at_cc1 ? "cc1" : "cc2", type_c->cc_status);
+
+ seq_printf(s, "Read Register (type_c_ctrl_cc1_0=0x%x)\n",
+ readl(type_c->reg_base + 0x0));
+ seq_printf(s, "Read Register (type_c_ctrl_cc1_1=0x%x)\n",
+ readl(type_c->reg_base + 0x4));
+ seq_printf(s, "Read Register (type_c_ctrl_cc2_0=0x%x)\n",
+ readl(type_c->reg_base + 0x8));
+ seq_printf(s, "Read Register (type_c_ctrl_cc2_1=0x%x)\n",
+ readl(type_c->reg_base + 0xc));
+ seq_printf(s, "Read Register (type_c_status=0x%x)\n",
+ readl(type_c->reg_base + 0x10));
+ seq_printf(s, "Read Register (type_c_ctrl=0x%x)\n",
+ readl(type_c->reg_base + 0x14));
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ return 0;
+}
+
+static int type_c_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, type_c_status_show, inode->i_private);
+}
+
+static const struct file_operations type_c_status_fops = {
+ .open = type_c_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static inline void create_debug_files(struct type_c_data *type_c)
+{
+ type_c->debug_dir = debugfs_create_dir("type_c", usb_debug_root);
+
+ debugfs_create_file("parameter", 0444, type_c->debug_dir, type_c,
+ &type_c_parameter_fops);
+
+ debugfs_create_file("status", 0444, type_c->debug_dir, type_c,
+ &type_c_status_fops);
+}
+
+static inline void remove_debug_files(struct type_c_data *type_c)
+{
+ debugfs_remove_recursive(type_c->debug_dir);
+}
+#else
+static inline void create_debug_files(struct type_c_data *type_c) { }
+static inline void remove_debug_files(struct type_c_data *type_c) { }
+#endif /* CONFIG_DEBUG_FS */
+
+/* Init and probe */
+
+static inline s8 get_value(s8 value)
+{
+ return (((s8)value & 0x8) ? (-(s8)(0x7 & value)) : ((s8)(value)));
+}
+
+static int __updated_type_c_parameter_by_efuse(struct type_c_data *type_c)
+{
+ struct type_c_cfg *type_c_cfg = type_c->type_c_cfg;
+ struct cc_param *cc_param;
+ struct nvmem_cell *cell;
+ s8 cc1_4p7k = 0;
+ s8 cc1_12k = 0;
+ s8 cc1_0p2v = 0;
+ s8 cc1_0p8v = 0;
+ s8 cc1_2p6v = 0;
+ s8 cc1_0p66v = 0;
+ s8 cc1_1p23v = 0;
+ s8 cc2_4p7k = 0;
+ s8 cc2_12k = 0;
+ s8 cc2_0p2v = 0;
+ s8 cc2_0p8v = 0;
+ s8 cc2_2p6v = 0;
+ s8 cc2_0p66v = 0;
+ s8 cc2_1p23v = 0;
+
+ cell = nvmem_cell_get(type_c->dev, "usb-cal");
+ if (IS_ERR(cell)) {
+ dev_warn(type_c->dev, "%s failed to get usb-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+ int value_size = 4;
+ int value_mask = (BIT(value_size) - 1);
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ cc1_0p2v = get_value((buf[0] >> value_size * 0) & value_mask);
+ cc1_0p8v = get_value((buf[0] >> value_size * 1) & value_mask);
+ cc1_2p6v = get_value((buf[1] >> value_size * 0) & value_mask);
+ cc1_0p66v = get_value((buf[1] >> value_size * 1) & value_mask);
+ cc1_1p23v = get_value((buf[2] >> value_size * 0) & value_mask);
+
+ cc2_0p2v = get_value((buf[3] >> value_size * 0) & value_mask);
+ cc2_0p8v = get_value((buf[3] >> value_size * 1) & value_mask);
+ cc2_2p6v = get_value((buf[4] >> value_size * 0) & value_mask);
+ cc2_0p66v = get_value((buf[4] >> value_size * 1) & value_mask);
+ cc2_1p23v = get_value((buf[5] >> value_size * 0) & value_mask);
+
+ cc1_4p7k = get_value((buf[6] >> value_size * 0) & value_mask);
+ cc1_12k = get_value((buf[6] >> value_size * 1) & value_mask);
+ cc2_4p7k = get_value((buf[7] >> value_size * 0) & value_mask);
+ cc2_12k = get_value((buf[7] >> value_size * 1) & value_mask);
+
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ dev_dbg(type_c->dev, "check efuse cc1_4p7k=%d cc1_12k=%d cc2_4p7k=%d cc2_12k=%d\n",
+ cc1_4p7k, cc1_12k, cc2_4p7k, cc2_12k);
+ dev_dbg(type_c->dev, "check efuse cc1_0p2v=%d cc1_0p8v=%d cc1_2p6v=%d cc1_0p66v=%d cc1_1p23v=%d\n",
+ cc1_0p2v, cc1_0p8v, cc1_2p6v, cc1_0p66v, cc1_1p23v);
+ dev_dbg(type_c->dev, "check efuse cc2_0p2v=%d cc2_0p8v=%d cc2_2p6v=%d cc2_0p66v=%d cc2_1p23v=%d\n",
+ cc2_0p2v, cc2_0p8v, cc2_2p6v, cc2_0p66v, cc2_1p23v);
+
+ cc_param = &type_c_cfg->cc1_param;
+ cc_param->rp_4p7k_code = cc_param->rp_4p7k_code + cc1_4p7k;
+ cc_param->rp_12k_code = cc_param->rp_12k_code + cc1_12k;
+
+ cc_param->vref_1p23v = cc_param->vref_1p23v + cc1_1p23v;
+ cc_param->vref_0p66v = cc_param->vref_0p66v + cc1_0p66v;
+ cc_param->vref_2p6v = cc_param->vref_2p6v + cc1_2p6v;
+ cc_param->vref_0p8v = cc_param->vref_0p8v + cc1_0p8v;
+ cc_param->vref_0p2v = cc_param->vref_0p2v + cc1_0p2v;
+
+ cc_param = &type_c_cfg->cc2_param;
+ cc_param->rp_4p7k_code = cc_param->rp_4p7k_code + cc2_4p7k;
+ cc_param->rp_12k_code = cc_param->rp_12k_code + cc2_12k;
+
+ cc_param->vref_1p23v = cc_param->vref_1p23v + cc2_1p23v;
+ cc_param->vref_0p66v = cc_param->vref_0p66v + cc2_0p66v;
+ cc_param->vref_2p6v = cc_param->vref_2p6v + cc2_2p6v;
+ cc_param->vref_0p8v = cc_param->vref_0p8v + cc2_0p8v;
+ cc_param->vref_0p2v = cc_param->vref_0p2v + cc2_0p2v;
+
+ return 0;
+}
+
+static int __updated_type_c_parameter_by_efuse_v2(struct type_c_data *type_c)
+{
+ struct type_c_cfg *type_c_cfg = type_c->type_c_cfg;
+ struct cc_param *cc_param;
+ struct nvmem_cell *cell;
+ s8 cc1_4p7k = 0;
+ s8 cc1_12k = 0;
+ s8 cc1_0p2v = 0;
+ s8 cc1_0p8v = 0;
+ s8 cc1_2p6v = 0;
+ s8 cc1_0p66v = 0;
+ s8 cc1_1p23v = 0;
+ s8 cc2_4p7k = 0;
+ s8 cc2_12k = 0;
+ s8 cc2_0p2v = 0;
+ s8 cc2_0p8v = 0;
+ s8 cc2_2p6v = 0;
+ s8 cc2_0p66v = 0;
+ s8 cc2_1p23v = 0;
+
+ cell = nvmem_cell_get(type_c->dev, "usb-type-c-cal");
+ if (IS_ERR(cell)) {
+ dev_warn(type_c->dev, "%s failed to get usb-type-c-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+ int value_size = 0;
+ int value_mask = (BIT(value_size) - 1);
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value_size = 5;
+ value_mask = (BIT(value_size) - 1);
+ cc1_4p7k = buf[0] & value_mask;
+ cc1_12k = buf[1] & value_mask;
+ cc2_4p7k = buf[2] & value_mask;
+ cc2_12k = buf[3] & value_mask;
+
+ value_size = 4;
+ value_mask = (BIT(value_size) - 1);
+ cc1_0p2v = (buf[4] >> value_size * 0) & value_mask;
+ cc1_0p66v = (buf[4] >> value_size * 1) & value_mask;
+ cc1_0p8v = (buf[5] >> value_size * 0) & value_mask;
+ cc1_1p23v = (buf[5] >> value_size * 1) & value_mask;
+ cc1_2p6v = (buf[6] >> value_size * 0) & value_mask;
+
+ cc2_0p2v = (buf[6] >> value_size * 1) & value_mask;
+ cc2_0p66v = (buf[7] >> value_size * 0) & value_mask;
+ cc2_0p8v = (buf[7] >> value_size * 1) & value_mask;
+ cc2_1p23v = (buf[8] >> value_size * 0) & value_mask;
+ cc2_2p6v = (buf[8] >> value_size * 1) & value_mask;
+
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ dev_dbg(type_c->dev, "check efuse v2 cc1_4p7k=%d cc1_12k=%d cc2_4p7k=%d cc2_12k=%d\n",
+ cc1_4p7k, cc1_12k, cc2_4p7k, cc2_12k);
+ dev_dbg(type_c->dev, "check efuse v2 cc1_0p2v=%d cc1_0p8v=%d cc1_2p6v=%d cc1_0p66v=%d cc1_1p23v=%d\n",
+ cc1_0p2v, cc1_0p8v, cc1_2p6v, cc1_0p66v, cc1_1p23v);
+ dev_dbg(type_c->dev, "check efuse v2 cc2_0p2v=%d cc2_0p8v=%d cc2_2p6v=%d cc2_0p66v=%d cc2_1p23v=%d\n",
+ cc2_0p2v, cc2_0p8v, cc2_2p6v, cc2_0p66v, cc2_1p23v);
+
+ cc_param = &type_c_cfg->cc1_param;
+ if (cc1_4p7k)
+ cc_param->rp_4p7k_code = cc1_4p7k;
+ if (cc1_12k)
+ cc_param->rp_12k_code = cc1_12k;
+
+ if (cc1_1p23v)
+ cc_param->vref_1p23v = cc1_1p23v;
+ if (cc1_0p66v)
+ cc_param->vref_0p66v = cc1_0p66v;
+ if (cc1_2p6v)
+ cc_param->vref_2p6v = cc1_2p6v;
+ if (cc1_0p8v)
+ cc_param->vref_0p8v = cc1_0p8v;
+ if (cc1_0p2v)
+ cc_param->vref_0p2v = cc1_0p2v;
+
+ cc_param = &type_c_cfg->cc2_param;
+ if (cc2_4p7k)
+ cc_param->rp_4p7k_code = cc2_4p7k;
+ if (cc2_12k)
+ cc_param->rp_12k_code = cc2_12k;
+
+ if (cc2_1p23v)
+ cc_param->vref_1p23v = cc2_1p23v;
+ if (cc2_0p66v)
+ cc_param->vref_0p66v = cc2_0p66v;
+ if (cc2_2p6v)
+ cc_param->vref_2p6v = cc2_2p6v;
+ if (cc2_0p8v)
+ cc_param->vref_0p8v = cc2_0p8v;
+ if (cc2_0p2v)
+ cc_param->vref_0p2v = cc2_0p2v;
+
+ return 0;
+}
+
+static void get_default_type_c_parameter(struct type_c_data *type_c)
+{
+ void __iomem *reg;
+ int val;
+
+ type_c->dfp_mode_rp_en = dfp_mode(CC_MODE_DFP_3_0) | EN_RP4P7K;
+ type_c->ufp_mode_rd_en = EN_RD;
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC1_0;
+ val = readl(reg);
+ type_c->cc1_code = CC_CODE_MASK & val;
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC2_0;
+ val = readl(reg);
+ type_c->cc2_code = CC_CODE_MASK & val;
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC1_1;
+ val = readl(reg);
+ type_c->cc1_vref = val;
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC2_1;
+ val = readl(reg);
+ type_c->cc2_vref = val;
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL;
+ val = readl(reg);
+ type_c->debounce = DEBOUNCE_TIME_MASK & val;
+}
+
+static int setup_type_c_parameter(struct type_c_data *type_c)
+{
+ struct type_c_cfg *type_c_cfg = type_c->type_c_cfg;
+ struct cc_param *cc_param;
+ struct soc_device_attribute rtk_soc_efuse_v1[] = {
+ { .family = "Realtek Phoenix",},
+ { .family = "Realtek Kylin",},
+ { .family = "Realtek Hercules",},
+ { .family = "Realtek Thor",},
+ { .family = "Realtek Hank",},
+ { .family = "Realtek Groot",},
+ { .family = "Realtek Stark",},
+ { .family = "Realtek Parker",},
+ { /* empty */ }
+ };
+
+ if (type_c_cfg->use_defalut_parameter) {
+ get_default_type_c_parameter(type_c);
+ return 0;
+ }
+
+ if (soc_device_match(rtk_soc_efuse_v1))
+ __updated_type_c_parameter_by_efuse(type_c);
+ else
+ __updated_type_c_parameter_by_efuse_v2(type_c);
+
+ /*
+ * UFP rd vref_ufp : 1p23v, 0p66v, 0p2v
+ * DFP_USB rp36k vref_dfp_usb: 0_1p6v, 0p2v, unused
+ * DFP_1.5 rp12k vref_dfp_1_5: 1_1p6v, 0p4v, 0p2v
+ * DFP_3.0 rp4p7k vref_dfp_3_0: 2p6v, 0p8v, 0p2v
+ */
+
+ switch (type_c_cfg->cc_dfp_mode) {
+ case CC_MODE_DFP_USB:
+ type_c->dfp_mode_rp_en = dfp_mode(CC_MODE_DFP_USB) | EN_RP36K;
+ break;
+ case CC_MODE_DFP_1_5:
+ type_c->dfp_mode_rp_en = dfp_mode(CC_MODE_DFP_1_5) | EN_RP12K;
+ break;
+ case CC_MODE_DFP_3_0:
+ type_c->dfp_mode_rp_en = dfp_mode(CC_MODE_DFP_3_0) | EN_RP4P7K;
+ break;
+ default:
+ dev_err(type_c->dev, "%s: unknown cc_dfp_mode %d\n",
+ __func__, type_c_cfg->cc_dfp_mode);
+ }
+
+ type_c->ufp_mode_rd_en = EN_RD;
+
+ cc_param = &type_c_cfg->cc1_param;
+ type_c->cc1_code = rp4pk_code(cc_param->rp_4p7k_code) |
+ rp36k_code(cc_param->rp_36k_code) |
+ rp12k_code(cc_param->rp_12k_code) |
+ rd_code(cc_param->rd_code);
+
+ if (type_c_cfg->parameter_ver == PARAMETER_V0)
+ type_c->cc1_vref = V0_vref_2p6v(cc_param->vref_2p6v) |
+ V0_vref_1p23v(cc_param->vref_1p23v) |
+ V0_vref_0p8v(cc_param->vref_0p8v) |
+ V0_vref_0p66v(cc_param->vref_0p66v) |
+ V0_vref_0p4v(cc_param->vref_0p4v) |
+ V0_vref_0p2v(cc_param->vref_0p2v) |
+ V0_vref_1_1p6v(cc_param->vref_1_1p6v) |
+ V0_vref_0_1p6v(cc_param->vref_0_1p6v);
+ else if (type_c_cfg->parameter_ver == PARAMETER_V1)
+ type_c->cc1_vref = V1_vref_2p6v(cc_param->vref_2p6v) |
+ V1_vref_1p23v(cc_param->vref_1p23v) |
+ V1_vref_0p8v(cc_param->vref_0p8v) |
+ V1_vref_0p66v(cc_param->vref_0p66v) |
+ V1_vref_0p4v(cc_param->vref_0p4v) |
+ V1_vref_0p2v(cc_param->vref_0p2v) |
+ V1_vref_1_1p6v(cc_param->vref_1_1p6v) |
+ V1_vref_0_1p6v(cc_param->vref_0_1p6v);
+ else
+ dev_err(type_c->dev, "%s: unknown parameter_ver %d\n",
+ __func__, type_c_cfg->parameter_ver);
+
+ cc_param = &type_c_cfg->cc2_param;
+ type_c->cc2_code = rp4pk_code(cc_param->rp_4p7k_code)
+ | rp36k_code(cc_param->rp_36k_code)
+ | rp12k_code(cc_param->rp_12k_code)
+ | rd_code(cc_param->rd_code);
+
+ if (type_c_cfg->parameter_ver == PARAMETER_V0)
+ type_c->cc2_vref = V0_vref_2p6v(cc_param->vref_2p6v) |
+ V0_vref_1p23v(cc_param->vref_1p23v) |
+ V0_vref_0p8v(cc_param->vref_0p8v) |
+ V0_vref_0p66v(cc_param->vref_0p66v) |
+ V0_vref_0p4v(cc_param->vref_0p4v) |
+ V0_vref_0p2v(cc_param->vref_0p2v) |
+ V0_vref_1_1p6v(cc_param->vref_1_1p6v) |
+ V0_vref_0_1p6v(cc_param->vref_0_1p6v);
+ else if (type_c_cfg->parameter_ver == PARAMETER_V1)
+ type_c->cc2_vref = V1_vref_2p6v(cc_param->vref_2p6v) |
+ V1_vref_1p23v(cc_param->vref_1p23v) |
+ V1_vref_0p8v(cc_param->vref_0p8v) |
+ V1_vref_0p66v(cc_param->vref_0p66v) |
+ V1_vref_0p4v(cc_param->vref_0p4v) |
+ V1_vref_0p2v(cc_param->vref_0p2v) |
+ V1_vref_1_1p6v(cc_param->vref_1_1p6v) |
+ V1_vref_0_1p6v(cc_param->vref_0_1p6v);
+ else
+ dev_err(type_c->dev, "%s: unknown parameter_ver %d\n",
+ __func__, type_c_cfg->parameter_ver);
+
+ type_c->debounce = (type_c_cfg->debounce_val << 1) | DEBOUNCE_EN;
+
+ return 0;
+}
+
+static int extcon_rtk_type_c_init(struct type_c_data *type_c)
+{
+ struct device *dev = type_c->dev;
+ unsigned long flags;
+ void __iomem *reg;
+ int val;
+
+ spin_lock_irqsave(&type_c->lock, flags);
+
+ /* set parameter */
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC1_0;
+ val = readl(reg);
+ val = (~CC_CODE_MASK & val) | (type_c->cc1_code & CC_CODE_MASK);
+ writel(val, reg);
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC2_0;
+ val = readl(reg);
+ val = (~CC_CODE_MASK & val) | (type_c->cc2_code & CC_CODE_MASK);
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC1_1;
+ writel(type_c->cc1_vref, reg);
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL_CC2_1;
+ writel(type_c->cc2_vref, reg);
+
+ reg = type_c->reg_base + USB_TYPEC_CTRL;
+ val = readl(reg);
+ val = (~DEBOUNCE_TIME_MASK & val) | (type_c->debounce & DEBOUNCE_TIME_MASK);
+
+ dev_info(dev, "First check USB_DR_MODE_PERIPHERAL");
+ type_c->cc_mode = IN_DEVICE_MODE;
+ type_c->is_attach = IN_DETACH;
+ type_c->connect_change = CONNECT_NO_CHANGE;
+
+ detect_host(type_c);
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ schedule_delayed_work(&type_c->delayed_work, msecs_to_jiffies(0));
+
+ if (!type_c->port) {
+ struct typec_capability typec_cap = { };
+ struct fwnode_handle *fwnode;
+ const char *buf;
+ int ret;
+
+ typec_cap.revision = USB_TYPEC_REV_1_0;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = type_c;
+ typec_cap.ops = &type_c_port_ops;
+
+ fwnode = device_get_named_child_node(dev, "connector");
+ if (!fwnode)
+ return -EINVAL;
+
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (ret) {
+ dev_err(dev, "power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_power_role(buf);
+ if (ret < 0)
+ return ret;
+ typec_cap.type = ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (ret) {
+ dev_err(dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(buf);
+ if (ret < 0)
+ return ret;
+ typec_cap.data = ret;
+
+ type_c->port = typec_register_port(type_c->dev, &typec_cap);
+ if (IS_ERR(type_c->port))
+ return PTR_ERR(type_c->port);
+ }
+
+ return 0;
+}
+
+static int extcon_rtk_type_c_edev_register(struct type_c_data *type_c)
+{
+ struct device *dev = type_c->dev;
+ int ret = 0;
+
+ type_c->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
+ if (IS_ERR(type_c->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, type_c->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(type_c->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+
+ extcon_set_property_capability(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(type_c->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
+
+ return ret;
+}
+
+static int extcon_rtk_type_c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct type_c_data *type_c;
+ const struct type_c_cfg *type_c_cfg;
+ int ret = 0;
+
+ type_c = devm_kzalloc(dev, sizeof(*type_c), GFP_KERNEL);
+ if (!type_c)
+ return -ENOMEM;
+
+ type_c->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(type_c->reg_base))
+ return PTR_ERR(type_c->reg_base);
+
+ type_c->dev = dev;
+
+ type_c->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (type_c->irq <= 0) {
+ dev_err(&pdev->dev, "Type C driver with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, type_c->irq, type_c_detect_irq,
+ IRQF_SHARED, "type_c_detect", type_c);
+
+ spin_lock_init(&type_c->lock);
+
+ type_c->rd_ctrl_gpio_desc = NULL;
+ if (soc_device_match(rtk_soc_kylin)) {
+ struct gpio_desc *gpio;
+
+ gpio = fwnode_gpiod_get_index(of_fwnode_handle(dev->of_node),
+ "realtek,rd-ctrl-gpios",
+ 0, GPIOD_OUT_HIGH, "rd-ctrl-gpio");
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Error rd_ctrl-gpios no found (err=%d)\n",
+ (int)PTR_ERR(gpio));
+ } else {
+ type_c->rd_ctrl_gpio_desc = gpio;
+ dev_dbg(dev, "%s get rd-ctrl-gpios (id=%d) OK\n",
+ __func__, desc_to_gpio(gpio));
+ }
+ }
+
+ type_c_cfg = of_device_get_match_data(dev);
+ if (!type_c_cfg) {
+ dev_err(dev, "type_c config are not assigned!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ type_c->type_c_cfg = devm_kzalloc(dev, sizeof(*type_c_cfg), GFP_KERNEL);
+
+ memcpy(type_c->type_c_cfg, type_c_cfg, sizeof(*type_c_cfg));
+
+ if (setup_type_c_parameter(type_c)) {
+ dev_err(dev, "ERROR: %s to setup type c parameter!!", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&type_c->delayed_work, host_device_switch);
+
+ ret = extcon_rtk_type_c_init(type_c);
+ if (ret) {
+ dev_err(dev, "%s failed to init type_c\n", __func__);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, type_c);
+
+ ret = extcon_rtk_type_c_edev_register(type_c);
+
+ create_debug_files(type_c);
+
+ return 0;
+
+err:
+ dev_err(&pdev->dev, "%s: Probe fail, %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void extcon_rtk_type_c_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct type_c_data *type_c = dev_get_drvdata(dev);
+ u32 default_ctrl;
+ unsigned long flags;
+
+ remove_debug_files(type_c);
+
+ if (type_c->port) {
+ typec_unregister_port(type_c->port);
+ type_c->port = NULL;
+ }
+
+ cancel_delayed_work_sync(&type_c->delayed_work);
+ flush_delayed_work(&type_c->delayed_work);
+ WARN_ON_ONCE(delayed_work_pending(&type_c->delayed_work));
+
+ spin_lock_irqsave(&type_c->lock, flags);
+ /* disable interrupt */
+ default_ctrl = readl(type_c->reg_base + USB_TYPEC_CTRL) &
+ DEBOUNCE_TIME_MASK;
+ writel(default_ctrl, type_c->reg_base + USB_TYPEC_CTRL);
+
+ /* disable cc detect, rp, rd */
+ writel(PLR_EN, type_c->reg_base + USB_TYPEC_CTRL_CC1_0);
+ writel(0, type_c->reg_base + USB_TYPEC_CTRL_CC2_0);
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ if (type_c->rd_ctrl_gpio_desc)
+ gpiod_put(type_c->rd_ctrl_gpio_desc);
+ type_c->rd_ctrl_gpio_desc = NULL;
+
+ free_irq(type_c->irq, type_c);
+}
+
+static const struct type_c_cfg rtd1295_type_c_cfg = {
+ .parameter_ver = PARAMETER_V0,
+ .cc_dfp_mode = CC_MODE_DFP_3_0,
+ .cc1_param = { .rp_4p7k_code = 0xb,
+ .rp_36k_code = 0x17,
+ .rp_12k_code = 0x10,
+ .rd_code = 0,
+ .ra_code = 0,
+ .vref_2p6v = 0x0,
+ .vref_1p23v = 0x0,
+ .vref_0p8v = 0x3,
+ .vref_0p66v = 0x0,
+ .vref_0p4v = 0x0,
+ .vref_0p2v = 0x4,
+ .vref_1_1p6v = 0,
+ .vref_0_1p6v = 0 },
+ .cc2_param = { .rp_4p7k_code = 0xc,
+ .rp_36k_code = 0x17,
+ .rp_12k_code = 0x12,
+ .rd_code = 0,
+ .ra_code = 0,
+ .vref_2p6v = 0x2,
+ .vref_1p23v = 0x0,
+ .vref_0p8v = 0x3,
+ .vref_0p66v = 0x0,
+ .vref_0p4v = 0x0,
+ .vref_0p2v = 0x5,
+ .vref_1_1p6v = 0,
+ .vref_0_1p6v = 0 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1395_type_c_cfg = {
+ .parameter_ver = PARAMETER_V0,
+ .cc_dfp_mode = CC_MODE_DFP_3_0,
+ .cc1_param = { .rp_4p7k_code = 0xc,
+ .rp_36k_code = 0xb,
+ .rp_12k_code = 0xe,
+ .rd_code = 0x10,
+ .ra_code = 0x0,
+ .vref_2p6v = 0x0,
+ .vref_1p23v = 0x1,
+ .vref_0p8v = 0x0,
+ .vref_0p66v = 0x0,
+ .vref_0p4v = 0x3,
+ .vref_0p2v = 0x0,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xb,
+ .rp_36k_code = 0x9,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x0,
+ .vref_2p6v = 0x1,
+ .vref_1p23v = 0x3,
+ .vref_0p8v = 0x3,
+ .vref_0p66v = 0x2,
+ .vref_0p4v = 0x3,
+ .vref_0p2v = 0x2,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1619_type_c_cfg = {
+ .parameter_ver = PARAMETER_V0,
+ .cc_dfp_mode = CC_MODE_DFP_3_0,
+ .cc1_param = { .rp_4p7k_code = 0xc,
+ .rp_36k_code = 0xf,
+ .rp_12k_code = 0xe,
+ .rd_code = 0x11,
+ .ra_code = 0x0,
+ .vref_2p6v = 0x5,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0xa,
+ .vref_0p66v = 0xa,
+ .vref_0p4v = 0x3,
+ .vref_0p2v = 0x2,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xc,
+ .rp_36k_code = 0xf,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x0,
+ .vref_2p6v = 0x5,
+ .vref_1p23v = 0x8,
+ .vref_0p8v = 0xa,
+ .vref_0p66v = 0xa,
+ .vref_0p4v = 0x3,
+ .vref_0p2v = 0x2,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1319_type_c_cfg = {
+ .parameter_ver = PARAMETER_V0,
+ .cc_dfp_mode = CC_MODE_DFP_1_5,
+ .cc1_param = { .rp_4p7k_code = 0x9,
+ .rp_36k_code = 0xe,
+ .rp_12k_code = 0x9,
+ .rd_code = 0x9,
+ .ra_code = 0x7,
+ .vref_2p6v = 0x3,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x7,
+ .vref_0p66v = 0x6,
+ .vref_0p4v = 0x2,
+ .vref_0p2v = 0x3,
+ .vref_1_1p6v = 0x4,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0x8,
+ .rp_36k_code = 0xe,
+ .rp_12k_code = 0x9,
+ .rd_code = 0x9,
+ .ra_code = 0x7,
+ .vref_2p6v = 0x3,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x7,
+ .vref_0p66v = 0x6,
+ .vref_0p4v = 0x3,
+ .vref_0p2v = 0x3,
+ .vref_1_1p6v = 0x6,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1312c_type_c_cfg = {
+ .parameter_ver = PARAMETER_V0,
+ .cc_dfp_mode = CC_MODE_DFP_1_5,
+ .cc1_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0xc,
+ .rp_12k_code = 0xc,
+ .rd_code = 0xa,
+ .ra_code = 0x3,
+ .vref_2p6v = 0xa,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x7,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x4,
+ .vref_0p2v = 0x4,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0xc,
+ .rp_12k_code = 0xc,
+ .rd_code = 0xa,
+ .ra_code = 0x3,
+ .vref_2p6v = 0xa,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x7,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x4,
+ .vref_0p2v = 0x4,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1619b_type_c_cfg = {
+ .parameter_ver = PARAMETER_V1,
+ .cc_dfp_mode = CC_MODE_DFP_1_5,
+ .cc1_param = { .rp_4p7k_code = 0xf,
+ .rp_36k_code = 0xf,
+ .rp_12k_code = 0xf,
+ .rd_code = 0xf,
+ .ra_code = 0x7,
+ .vref_2p6v = 0x9,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x9,
+ .vref_0p66v = 0x8,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x9,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xf,
+ .rp_36k_code = 0xf,
+ .rp_12k_code = 0xf,
+ .rd_code = 0xf,
+ .ra_code = 0x7,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x9,
+ .vref_0p66v = 0x8,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x8,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1319d_type_c_cfg = {
+ .parameter_ver = PARAMETER_V1,
+ .cc_dfp_mode = CC_MODE_DFP_1_5,
+ .cc1_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0x3,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x6,
+ .vref_2p6v = 0x7,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x8,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x7,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0x3,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x6,
+ .vref_2p6v = 0x7,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x8,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x8,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct type_c_cfg rtd1315e_type_c_cfg = {
+ .parameter_ver = PARAMETER_V1,
+ .cc_dfp_mode = CC_MODE_DFP_1_5,
+ .cc1_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0x3,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x6,
+ .vref_2p6v = 0x7,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x8,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x7,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .cc2_param = { .rp_4p7k_code = 0xe,
+ .rp_36k_code = 0x3,
+ .rp_12k_code = 0xe,
+ .rd_code = 0xf,
+ .ra_code = 0x6,
+ .vref_2p6v = 0x7,
+ .vref_1p23v = 0x7,
+ .vref_0p8v = 0x8,
+ .vref_0p66v = 0x7,
+ .vref_0p4v = 0x7,
+ .vref_0p2v = 0x8,
+ .vref_1_1p6v = 0x7,
+ .vref_0_1p6v = 0x7 },
+ .debounce_val = 0x7f, /* 1b,1us 7f,4.7us */
+ .use_defalut_parameter = false,
+};
+
+static const struct of_device_id extcon_rtk_type_c_match[] = {
+ { .compatible = "realtek,rtd1295-type-c", .data = &rtd1295_type_c_cfg },
+ { .compatible = "realtek,rtd1312c-type-c", .data = &rtd1312c_type_c_cfg },
+ { .compatible = "realtek,rtd1315e-type-c", .data = &rtd1315e_type_c_cfg },
+ { .compatible = "realtek,rtd1319-type-c", .data = &rtd1319_type_c_cfg },
+ { .compatible = "realtek,rtd1319d-type-c", .data = &rtd1319d_type_c_cfg },
+ { .compatible = "realtek,rtd1395-type-c", .data = &rtd1395_type_c_cfg },
+ { .compatible = "realtek,rtd1619-type-c", .data = &rtd1619_type_c_cfg },
+ { .compatible = "realtek,rtd1619b-type-c", .data = &rtd1619b_type_c_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, extcon_rtk_type_c_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int extcon_rtk_type_c_prepare(struct device *dev)
+{
+ struct type_c_data *type_c = dev_get_drvdata(dev);
+ u32 default_ctrl;
+ unsigned long flags;
+
+ cancel_delayed_work_sync(&type_c->delayed_work);
+ flush_delayed_work(&type_c->delayed_work);
+ WARN_ON_ONCE(delayed_work_pending(&type_c->delayed_work));
+
+ spin_lock_irqsave(&type_c->lock, flags);
+ /* disable interrupt */
+ default_ctrl = readl(type_c->reg_base + USB_TYPEC_CTRL) &
+ DEBOUNCE_TIME_MASK;
+ writel(default_ctrl, type_c->reg_base + USB_TYPEC_CTRL);
+
+ /* disable cc detect, rp, rd */
+ writel(PLR_EN, type_c->reg_base + USB_TYPEC_CTRL_CC1_0);
+ writel(0, type_c->reg_base + USB_TYPEC_CTRL_CC2_0);
+
+ spin_unlock_irqrestore(&type_c->lock, flags);
+
+ return 0;
+}
+
+static void extcon_rtk_type_c_complete(struct device *dev)
+{
+ /* nothing */
+}
+
+static int extcon_rtk_type_c_suspend(struct device *dev)
+{
+ /* nothing */
+
+ return 0;
+}
+
+static int extcon_rtk_type_c_resume(struct device *dev)
+{
+ struct type_c_data *type_c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = extcon_rtk_type_c_init(type_c);
+ if (ret) {
+ dev_err(dev, "%s failed to init type_c\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops extcon_rtk_type_c_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(extcon_rtk_type_c_suspend, extcon_rtk_type_c_resume)
+ .prepare = extcon_rtk_type_c_prepare,
+ .complete = extcon_rtk_type_c_complete,
+};
+
+#define DEV_PM_OPS (&extcon_rtk_type_c_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver extcon_rtk_type_c_driver = {
+ .probe = extcon_rtk_type_c_probe,
+ .remove_new = extcon_rtk_type_c_remove,
+ .driver = {
+ .name = "extcon-rtk-type_c",
+ .of_match_table = extcon_rtk_type_c_match,
+ .pm = DEV_PM_OPS,
+ },
+};
+
+module_platform_driver(extcon_rtk_type_c_driver);
+
+MODULE_DESCRIPTION("Realtek Extcon Type C driver");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 2a05f411328f..95c10f3d2282 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -191,7 +191,7 @@ struct fw_node {
/* Upper layer specific data. */
void *data;
- struct fw_node *ports[];
+ struct fw_node *ports[] __counted_by(port_count);
};
static inline struct fw_node *fw_node_get(struct fw_node *node)
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index ef0820f1a924..d4987d013080 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -55,6 +55,15 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
extern __weak const efi_config_table_type_t efi_arch_tables[];
+/*
+ * x86 defines its own screen_info and uses it even without EFI,
+ * everything else can get it from here.
+ */
+#if !defined(CONFIG_X86) && (defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON))
+struct screen_info screen_info __section(".data");
+EXPORT_SYMBOL_GPL(screen_info);
+#endif
+
static void __init init_screen_info(void)
{
struct screen_info *si;
@@ -240,5 +249,8 @@ void __init efi_init(void)
memblock_reserve(data.phys_map & PAGE_MASK,
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
- init_screen_info();
+ if (IS_ENABLED(CONFIG_X86) ||
+ IS_ENABLED(CONFIG_SYSFB) ||
+ IS_ENABLED(CONFIG_EFI_EARLYCON))
+ init_screen_info();
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ef4c12f0877b..06964a3c130f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV) += -fpic
+cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c
index 2f1902e5d407..a6c049835190 100644
--- a/drivers/firmware/efi/libstub/efi-stub-entry.c
+++ b/drivers/firmware/efi/libstub/efi-stub-entry.c
@@ -13,7 +13,13 @@ struct screen_info *alloc_screen_info(void)
{
if (IS_ENABLED(CONFIG_ARM))
return __alloc_screen_info();
- return (void *)&screen_info + screen_info_offset;
+
+ if (IS_ENABLED(CONFIG_X86) ||
+ IS_ENABLED(CONFIG_EFI_EARLYCON) ||
+ IS_ENABLED(CONFIG_SYSFB))
+ return (void *)&screen_info + screen_info_offset;
+
+ return NULL;
}
/*
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 4cc1ac7f76ed..b0d22d4455d9 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -226,7 +226,6 @@ static int do_feature_check_call(const u32 api_id)
return ret;
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
* zynqmp_pm_feature() - Check whether given feature is supported or not and
@@ -246,6 +245,7 @@ int zynqmp_pm_feature(const u32 api_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
* zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index aa758426c22b..578663503297 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -24,7 +24,8 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -127,18 +128,11 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct altera_hps2fpga_data *priv;
- const struct of_device_id *of_id;
struct fpga_bridge *br;
u32 enable;
int ret;
- of_id = of_match_device(altera_fpga_of_match, dev);
- if (!of_id) {
- dev_err(dev, "failed to match device\n");
- return -ENODEV;
- }
-
- priv = (struct altera_hps2fpga_data *)of_id->data;
+ priv = (struct altera_hps2fpga_data *)device_get_match_data(dev);
priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
0);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 5e1e009dba89..740980e7cef8 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -18,8 +18,7 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/sizes.h>
@@ -72,12 +71,6 @@ static struct altera_ps_data a10_data = {
.t_st2ck_us = 10, /* min(t_ST2CK) */
};
-/* Array index is enum altera_ps_devtype */
-static const struct altera_ps_data *altera_ps_data_map[] = {
- &c5_data,
- &a10_data,
-};
-
static const struct of_device_id of_ef_match[] = {
{ .compatible = "altr,fpga-passive-serial", .data = &c5_data },
{ .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
@@ -237,43 +230,16 @@ static const struct fpga_manager_ops altera_ps_ops = {
.write_complete = altera_ps_write_complete,
};
-static const struct altera_ps_data *id_to_data(const struct spi_device_id *id)
-{
- kernel_ulong_t devtype = id->driver_data;
- const struct altera_ps_data *data;
-
- /* someone added a altera_ps_devtype without adding to the map array */
- if (devtype >= ARRAY_SIZE(altera_ps_data_map))
- return NULL;
-
- data = altera_ps_data_map[devtype];
- if (!data || data->devtype != devtype)
- return NULL;
-
- return data;
-}
-
static int altera_ps_probe(struct spi_device *spi)
{
struct altera_ps_conf *conf;
- const struct of_device_id *of_id;
struct fpga_manager *mgr;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
return -ENOMEM;
- if (spi->dev.of_node) {
- of_id = of_match_device(of_ef_match, &spi->dev);
- if (!of_id)
- return -ENODEV;
- conf->data = of_id->data;
- } else {
- conf->data = id_to_data(spi_get_device_id(spi));
- if (!conf->data)
- return -ENODEV;
- }
-
+ conf->data = spi_get_device_match_data(spi);
conf->spi = spi;
conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
if (IS_ERR(conf->config)) {
@@ -308,9 +274,9 @@ static int altera_ps_probe(struct spi_device *spi)
}
static const struct spi_device_id altera_ps_spi_ids[] = {
- { "cyclone-ps-spi", CYCLONE5 },
- { "fpga-passive-serial", CYCLONE5 },
- { "fpga-arria10-passive-serial", ARRIA10 },
+ { "cyclone-ps-spi", (uintptr_t)&c5_data },
+ { "fpga-passive-serial", (uintptr_t)&c5_data },
+ { "fpga-arria10-passive-serial", (uintptr_t)&a10_data },
{}
};
MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
@@ -319,7 +285,7 @@ static struct spi_driver altera_ps_driver = {
.driver = {
.name = "altera-ps-spi",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_ef_match),
+ .of_match_table = of_ef_match,
},
.id_table = altera_ps_spi_ids,
.probe = altera_ps_probe,
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index e1601b3a345b..3710e8f01be2 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -48,7 +48,7 @@ static int versal_fpga_probe(struct platform_device *pdev)
struct fpga_manager *mgr;
int ret;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
if (ret < 0) {
dev_err(dev, "no usable DMA configuration\n");
return ret;
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 208d9560f56d..68835896f180 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -10,8 +10,10 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/fpga/fpga-bridge.h>
#define CTRL_CMD_DECOUPLE BIT(0)
@@ -81,7 +83,6 @@ static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_show = xlnx_pr_decoupler_enable_show,
};
-#ifdef CONFIG_OF
static const struct xlnx_config_data decoupler_config = {
.name = "Xilinx PR Decoupler",
};
@@ -100,11 +101,9 @@ static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
-#endif
static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct xlnx_pr_decoupler_data *priv;
struct fpga_bridge *br;
int err;
@@ -113,13 +112,7 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (np) {
- const struct of_device_id *match;
-
- match = of_match_node(xlnx_pr_decoupler_of_match, np);
- if (match && match->data)
- priv->ipconfig = match->data;
- }
+ priv->ipconfig = device_get_match_data(&pdev->dev);
priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
@@ -174,7 +167,7 @@ static struct platform_driver xlnx_pr_decoupler_driver = {
.remove = xlnx_pr_decoupler_remove,
.driver = {
.name = "xlnx_pr_decoupler",
- .of_match_table = of_match_ptr(xlnx_pr_decoupler_of_match),
+ .of_match_table = xlnx_pr_decoupler_of_match,
},
};
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 913948876c93..b3a133ed31ee 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1312,9 +1312,9 @@ config GPIO_KEMPLD
config GPIO_LJCA
tristate "INTEL La Jolla Cove Adapter GPIO support"
- depends on MFD_LJCA
+ depends on USB_LJCA
select GPIOLIB_IRQCHIP
- default MFD_LJCA
+ default USB_LJCA
help
Select this option to enable GPIO driver for the INTEL
La Jolla Cove Adapter (LJCA) board.
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 58f107194fda..04c03402db6d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -750,12 +750,12 @@ static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
if (!have_gpio(gpiochip_get_data(chip), offset))
return -ENODEV;
- return pinctrl_gpio_request(chip->base + offset);
+ return pinctrl_gpio_request(chip, offset);
}
static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
}
static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
@@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
- return pinctrl_gpio_set_config(chip->base + offset, config);
+ return pinctrl_gpio_set_config(chip, offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 858e6ebbb584..6c862c572322 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -227,14 +227,9 @@ static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
}
-static int em_gio_request(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_request(chip->base + offset);
-}
-
static void em_gio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
@@ -311,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_output = em_gio_direction_output;
gpio_chip->set = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
- gpio_chip->request = em_gio_request;
+ gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
gpio_chip->label = name;
gpio_chip->parent = dev;
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index aca69329455f..dfec9fbfc7a9 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dev_printk.h>
@@ -13,19 +14,18 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kref.h>
-#include <linux/mfd/ljca.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/usb/ljca.h>
/* GPIO commands */
-#define LJCA_GPIO_CONFIG 1
-#define LJCA_GPIO_READ 2
-#define LJCA_GPIO_WRITE 3
-#define LJCA_GPIO_INT_EVENT 4
-#define LJCA_GPIO_INT_MASK 5
-#define LJCA_GPIO_INT_UNMASK 6
+#define LJCA_GPIO_CONFIG 1
+#define LJCA_GPIO_READ 2
+#define LJCA_GPIO_WRITE 3
+#define LJCA_GPIO_INT_EVENT 4
+#define LJCA_GPIO_INT_MASK 5
+#define LJCA_GPIO_INT_UNMASK 6
#define LJCA_GPIO_CONF_DISABLE BIT(0)
#define LJCA_GPIO_CONF_INPUT BIT(1)
@@ -36,45 +36,49 @@
#define LJCA_GPIO_CONF_INTERRUPT BIT(6)
#define LJCA_GPIO_INT_TYPE BIT(7)
-#define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1)
-#define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0)
+#define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1)
+#define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0)
/* Intentional overlap with PULLUP / PULLDOWN */
-#define LJCA_GPIO_CONF_SET BIT(3)
-#define LJCA_GPIO_CONF_CLR BIT(4)
+#define LJCA_GPIO_CONF_SET BIT(3)
+#define LJCA_GPIO_CONF_CLR BIT(4)
-struct gpio_op {
+#define LJCA_GPIO_BUF_SIZE 60u
+
+struct ljca_gpio_op {
u8 index;
u8 value;
} __packed;
-struct gpio_packet {
+struct ljca_gpio_packet {
u8 num;
- struct gpio_op item[];
+ struct ljca_gpio_op item[] __counted_by(num);
} __packed;
-#define LJCA_GPIO_BUF_SIZE 60
struct ljca_gpio_dev {
- struct platform_device *pdev;
+ struct ljca_client *ljca;
struct gpio_chip gc;
struct ljca_gpio_info *gpio_info;
DECLARE_BITMAP(unmasked_irqs, LJCA_MAX_GPIO_NUM);
DECLARE_BITMAP(enabled_irqs, LJCA_MAX_GPIO_NUM);
DECLARE_BITMAP(reenable_irqs, LJCA_MAX_GPIO_NUM);
+ DECLARE_BITMAP(output_enabled, LJCA_MAX_GPIO_NUM);
u8 *connect_mode;
- /* mutex to protect irq bus */
+ /* protect irq bus */
struct mutex irq_lock;
struct work_struct work;
- /* lock to protect package transfer to Hardware */
+ /* protect package transfer to hardware */
struct mutex trans_lock;
u8 obuf[LJCA_GPIO_BUF_SIZE];
u8 ibuf[LJCA_GPIO_BUF_SIZE];
};
-static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config)
+static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
+ u8 config)
{
- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct ljca_gpio_packet *packet =
+ (struct ljca_gpio_packet *)ljca_gpio->obuf;
int ret;
mutex_lock(&ljca_gpio->trans_lock);
@@ -82,43 +86,43 @@ static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config)
packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
packet->num = 1;
- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_CONFIG, packet,
- struct_size(packet, item, packet->num), NULL, NULL);
+ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
+ struct_size(packet, item, packet->num), NULL, 0);
mutex_unlock(&ljca_gpio->trans_lock);
- return ret;
+
+ return ret < 0 ? ret : 0;
}
static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id)
{
- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
- struct gpio_packet *ack_packet = (struct gpio_packet *)ljca_gpio->ibuf;
- unsigned int ibuf_len = LJCA_GPIO_BUF_SIZE;
+ struct ljca_gpio_packet *ack_packet =
+ (struct ljca_gpio_packet *)ljca_gpio->ibuf;
+ struct ljca_gpio_packet *packet =
+ (struct ljca_gpio_packet *)ljca_gpio->obuf;
int ret;
mutex_lock(&ljca_gpio->trans_lock);
packet->num = 1;
packet->item[0].index = gpio_id;
- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_READ, packet,
- struct_size(packet, item, packet->num), ljca_gpio->ibuf, &ibuf_len);
- if (ret)
- goto out_unlock;
-
- if (!ibuf_len || ack_packet->num != packet->num) {
- dev_err(&ljca_gpio->pdev->dev, "failed gpio_id:%u %u", gpio_id, ack_packet->num);
- ret = -EIO;
+ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_READ, (u8 *)packet,
+ struct_size(packet, item, packet->num),
+ ljca_gpio->ibuf, LJCA_GPIO_BUF_SIZE);
+
+ if (ret <= 0 || ack_packet->num != packet->num) {
+ dev_err(&ljca_gpio->ljca->auxdev.dev,
+ "read package error, gpio_id: %u num: %u ret: %d\n",
+ gpio_id, ack_packet->num, ret);
+ ret = ret < 0 ? ret : -EIO;
}
-
-out_unlock:
mutex_unlock(&ljca_gpio->trans_lock);
- if (ret)
- return ret;
- return ack_packet->item[0].value > 0;
+
+ return ret < 0 ? ret : ack_packet->item[0].value > 0;
}
-static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
- int value)
+static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, int value)
{
- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct ljca_gpio_packet *packet =
+ (struct ljca_gpio_packet *)ljca_gpio->obuf;
int ret;
mutex_lock(&ljca_gpio->trans_lock);
@@ -126,10 +130,11 @@ static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
packet->item[0].index = gpio_id;
packet->item[0].value = value & 1;
- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_WRITE, packet,
- struct_size(packet, item, packet->num), NULL, NULL);
+ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_WRITE, (u8 *)packet,
+ struct_size(packet, item, packet->num), NULL, 0);
mutex_unlock(&ljca_gpio->trans_lock);
- return ret;
+
+ return ret < 0 ? ret : 0;
}
static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
@@ -147,16 +152,24 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
ret = ljca_gpio_write(ljca_gpio, offset, val);
if (ret)
- dev_err(chip->parent, "offset:%u val:%d set value failed %d\n", offset, val, ret);
+ dev_err(chip->parent,
+ "set value failed offset: %u val: %d ret: %d\n",
+ offset, val, ret);
}
-static int ljca_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
+static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
u8 config = LJCA_GPIO_CONF_INPUT | LJCA_GPIO_CONF_CLR;
+ int ret;
+
+ ret = ljca_gpio_config(ljca_gpio, offset, config);
+ if (ret)
+ return ret;
- return gpio_config(ljca_gpio, offset, config);
+ clear_bit(offset, ljca_gpio->output_enabled);
+
+ return 0;
}
static int ljca_gpio_direction_output(struct gpio_chip *chip,
@@ -166,14 +179,26 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip,
u8 config = LJCA_GPIO_CONF_OUTPUT | LJCA_GPIO_CONF_CLR;
int ret;
- ret = gpio_config(ljca_gpio, offset, config);
+ ret = ljca_gpio_config(ljca_gpio, offset, config);
if (ret)
return ret;
ljca_gpio_set_value(chip, offset, val);
+ set_bit(offset, ljca_gpio->output_enabled);
+
return 0;
}
+static int ljca_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ if (test_bit(offset, ljca_gpio->output_enabled))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long config)
{
@@ -197,7 +222,8 @@ static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return 0;
}
-static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask,
+static int ljca_gpio_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
unsigned int ngpios)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
@@ -208,15 +234,18 @@ static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *vali
return 0;
}
-static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask,
+static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
unsigned int ngpios)
{
ljca_gpio_init_valid_mask(chip, valid_mask, ngpios);
}
-static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool enable)
+static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id,
+ bool enable)
{
- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct ljca_gpio_packet *packet =
+ (struct ljca_gpio_packet *)ljca_gpio->obuf;
int ret;
mutex_lock(&ljca_gpio->trans_lock);
@@ -224,18 +253,20 @@ static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool en
packet->item[0].index = gpio_id;
packet->item[0].value = 0;
- ret = ljca_transfer(ljca_gpio->gpio_info->ljca,
- enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK, packet,
- struct_size(packet, item, packet->num), NULL, NULL);
+ ret = ljca_transfer(ljca_gpio->ljca,
+ enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK,
+ (u8 *)packet, struct_size(packet, item, packet->num),
+ NULL, 0);
mutex_unlock(&ljca_gpio->trans_lock);
- return ret;
+
+ return ret < 0 ? ret : 0;
}
static void ljca_gpio_async(struct work_struct *work)
{
- struct ljca_gpio_dev *ljca_gpio = container_of(work, struct ljca_gpio_dev, work);
- int gpio_id;
- int unmasked;
+ struct ljca_gpio_dev *ljca_gpio =
+ container_of(work, struct ljca_gpio_dev, work);
+ int gpio_id, unmasked;
for_each_set_bit(gpio_id, ljca_gpio->reenable_irqs, ljca_gpio->gc.ngpio) {
clear_bit(gpio_id, ljca_gpio->reenable_irqs);
@@ -245,20 +276,22 @@ static void ljca_gpio_async(struct work_struct *work)
}
}
-static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, int len)
+static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
+ int len)
{
- const struct gpio_packet *packet = evt_data;
+ const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i;
- int irq;
+ int i, irq;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain, packet->item[i].index);
+ irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
if (!irq) {
- dev_err(ljca_gpio->gc.parent, "gpio_id %u does not mapped to IRQ yet\n",
+ dev_err(ljca_gpio->gc.parent,
+ "gpio_id %u does not mapped to IRQ yet\n",
packet->item[i].index);
return;
}
@@ -299,18 +332,22 @@ static int ljca_irq_set_type(struct irq_data *irqd, unsigned int type)
ljca_gpio->connect_mode[gpio_id] = LJCA_GPIO_CONF_INTERRUPT;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP);
+ ljca_gpio->connect_mode[gpio_id] |=
+ (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP);
break;
case IRQ_TYPE_LEVEL_LOW:
- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN);
+ ljca_gpio->connect_mode[gpio_id] |=
+ (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN);
break;
case IRQ_TYPE_EDGE_BOTH:
break;
case IRQ_TYPE_EDGE_RISING:
- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP);
+ ljca_gpio->connect_mode[gpio_id] |=
+ (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP);
break;
case IRQ_TYPE_EDGE_FALLING:
- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN);
+ ljca_gpio->connect_mode[gpio_id] |=
+ (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN);
break;
default:
return -EINVAL;
@@ -332,15 +369,14 @@ static void ljca_irq_bus_unlock(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
int gpio_id = irqd_to_hwirq(irqd);
- int enabled;
- int unmasked;
+ int enabled, unmasked;
enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs);
unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
if (enabled != unmasked) {
if (unmasked) {
- gpio_config(ljca_gpio, gpio_id, 0);
+ ljca_gpio_config(ljca_gpio, gpio_id, 0);
ljca_enable_irq(ljca_gpio, gpio_id, true);
set_bit(gpio_id, ljca_gpio->enabled_irqs);
} else {
@@ -363,43 +399,48 @@ static const struct irq_chip ljca_gpio_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static int ljca_gpio_probe(struct platform_device *pdev)
+static int ljca_gpio_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *aux_dev_id)
{
+ struct ljca_client *ljca = auxiliary_dev_to_ljca_client(auxdev);
struct ljca_gpio_dev *ljca_gpio;
struct gpio_irq_chip *girq;
int ret;
- ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL);
+ ljca_gpio = devm_kzalloc(&auxdev->dev, sizeof(*ljca_gpio), GFP_KERNEL);
if (!ljca_gpio)
return -ENOMEM;
- ljca_gpio->gpio_info = dev_get_platdata(&pdev->dev);
- ljca_gpio->connect_mode = devm_kcalloc(&pdev->dev, ljca_gpio->gpio_info->num,
- sizeof(*ljca_gpio->connect_mode), GFP_KERNEL);
+ ljca_gpio->ljca = ljca;
+ ljca_gpio->gpio_info = dev_get_platdata(&auxdev->dev);
+ ljca_gpio->connect_mode = devm_kcalloc(&auxdev->dev,
+ ljca_gpio->gpio_info->num,
+ sizeof(*ljca_gpio->connect_mode),
+ GFP_KERNEL);
if (!ljca_gpio->connect_mode)
return -ENOMEM;
mutex_init(&ljca_gpio->irq_lock);
mutex_init(&ljca_gpio->trans_lock);
- ljca_gpio->pdev = pdev;
ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
+ ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
ljca_gpio->gc.set = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
- ljca_gpio->gc.parent = &pdev->dev;
+ ljca_gpio->gc.parent = &auxdev->dev;
ljca_gpio->gc.base = -1;
ljca_gpio->gc.ngpio = ljca_gpio->gpio_info->num;
- ljca_gpio->gc.label = ACPI_COMPANION(&pdev->dev) ?
- acpi_dev_name(ACPI_COMPANION(&pdev->dev)) :
- dev_name(&pdev->dev);
+ ljca_gpio->gc.label = ACPI_COMPANION(&auxdev->dev) ?
+ acpi_dev_name(ACPI_COMPANION(&auxdev->dev)) :
+ dev_name(&auxdev->dev);
ljca_gpio->gc.owner = THIS_MODULE;
- platform_set_drvdata(pdev, ljca_gpio);
- ljca_register_event_cb(ljca_gpio->gpio_info->ljca, ljca_gpio_event_cb, ljca_gpio);
+ auxiliary_set_drvdata(auxdev, ljca_gpio);
+ ljca_register_event_cb(ljca, ljca_gpio_event_cb, ljca_gpio);
girq = &ljca_gpio->gc.irq;
gpio_irq_chip_set_chip(girq, &ljca_gpio_irqchip);
@@ -413,7 +454,7 @@ static int ljca_gpio_probe(struct platform_device *pdev)
INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio);
if (ret) {
- ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca);
+ ljca_unregister_event_cb(ljca);
mutex_destroy(&ljca_gpio->irq_lock);
mutex_destroy(&ljca_gpio->trans_lock);
}
@@ -421,33 +462,33 @@ static int ljca_gpio_probe(struct platform_device *pdev)
return ret;
}
-static void ljca_gpio_remove(struct platform_device *pdev)
+static void ljca_gpio_remove(struct auxiliary_device *auxdev)
{
- struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev);
+ struct ljca_gpio_dev *ljca_gpio = auxiliary_get_drvdata(auxdev);
gpiochip_remove(&ljca_gpio->gc);
- ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca);
+ ljca_unregister_event_cb(ljca_gpio->ljca);
+ cancel_work_sync(&ljca_gpio->work);
mutex_destroy(&ljca_gpio->irq_lock);
mutex_destroy(&ljca_gpio->trans_lock);
}
-#define LJCA_GPIO_DRV_NAME "ljca-gpio"
-static const struct platform_device_id ljca_gpio_id[] = {
- { LJCA_GPIO_DRV_NAME, 0 },
- { /* sentinel */ }
+static const struct auxiliary_device_id ljca_gpio_id_table[] = {
+ { "usb_ljca.ljca-gpio", 0 },
+ { /* sentinel */ },
};
-MODULE_DEVICE_TABLE(platform, ljca_gpio_id);
+MODULE_DEVICE_TABLE(auxiliary, ljca_gpio_id_table);
-static struct platform_driver ljca_gpio_driver = {
- .driver.name = LJCA_GPIO_DRV_NAME,
+static struct auxiliary_driver ljca_gpio_driver = {
.probe = ljca_gpio_probe,
- .remove_new = ljca_gpio_remove,
+ .remove = ljca_gpio_remove,
+ .id_table = ljca_gpio_id_table,
};
-module_platform_driver(ljca_gpio_driver);
+module_auxiliary_driver(ljca_gpio_driver);
-MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
-MODULE_AUTHOR("Wang Zhifeng <zhifeng.wang@intel.com>");
-MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(LJCA);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 8f80ca8ec1ed..a13f3c18ccd4 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -346,7 +346,7 @@ static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
* Check with the pinctrl driver whether this pin is usable as
* an input GPIO
*/
- ret = pinctrl_gpio_direction_input(chip->base + pin);
+ ret = pinctrl_gpio_direction_input(chip, pin);
if (ret)
return ret;
@@ -366,7 +366,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
* Check with the pinctrl driver whether this pin is usable as
* an output GPIO
*/
- ret = pinctrl_gpio_direction_output(chip->base + pin);
+ ret = pinctrl_gpio_direction_output(chip, pin);
if (ret)
return ret;
@@ -757,7 +757,6 @@ static const struct pwm_ops mvebu_pwm_ops = {
.free = mvebu_pwm_free,
.get_state = mvebu_pwm_get_state,
.apply = mvebu_pwm_apply,
- .owner = THIS_MODULE,
};
static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cae9661862fe..91cea97255fa 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -260,7 +260,7 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
int ret;
if (pxa_gpio_has_pinctrl()) {
- ret = pinctrl_gpio_direction_input(chip->base + offset);
+ ret = pinctrl_gpio_direction_input(chip, offset);
if (ret)
return ret;
}
@@ -289,7 +289,7 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
if (pxa_gpio_has_pinctrl()) {
- ret = pinctrl_gpio_direction_output(chip->base + offset);
+ ret = pinctrl_gpio_direction_output(chip, offset);
if (ret)
return ret;
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d8b1baae6357..6159fda38d5d 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -275,7 +275,7 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
return error;
}
- error = pinctrl_gpio_request(chip->base + offset);
+ error = pinctrl_gpio_request(chip, offset);
if (error)
pm_runtime_put(p->dev);
@@ -286,7 +286,7 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 23040a8cea34..0bd339813110 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -159,9 +159,9 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
if (input)
- pinctrl_gpio_direction_input(bank->pin_base + offset);
+ pinctrl_gpio_direction_input(chip, offset);
else
- pinctrl_gpio_direction_output(bank->pin_base + offset);
+ pinctrl_gpio_direction_output(chip, offset);
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index ea715582bcf3..ea5f9cc14bc4 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -137,16 +137,11 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
}
-static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_gpio_request(chip->base + offset);
-}
-
static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
tegra_gpio_disable(tgi, offset);
}
@@ -179,7 +174,7 @@ static int tegra_gpio_direction_input(struct gpio_chip *chip,
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 0);
tegra_gpio_enable(tgi, offset);
- ret = pinctrl_gpio_direction_input(chip->base + offset);
+ ret = pinctrl_gpio_direction_input(chip, offset);
if (ret < 0)
dev_err(tgi->dev,
"Failed to set pinctrl input direction of GPIO %d: %d",
@@ -199,7 +194,7 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip,
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 1);
tegra_gpio_enable(tgi, offset);
- ret = pinctrl_gpio_direction_output(chip->base + offset);
+ ret = pinctrl_gpio_direction_output(chip, offset);
if (ret < 0)
dev_err(tgi->dev,
"Failed to set pinctrl output direction of GPIO %d: %d",
@@ -717,7 +712,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
}
tgi->gc.label = "tegra-gpio";
- tgi->gc.request = tegra_gpio_request;
+ tgi->gc.request = pinctrl_gpio_request;
tgi->gc.free = tegra_gpio_free;
tgi->gc.direction_input = tegra_gpio_direction_input;
tgi->gc.get = tegra_gpio_get;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 444501c56a3b..07e5e6323e86 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -130,7 +130,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- return pinctrl_gpio_direction_input(chip->base + gpio);
+ return pinctrl_gpio_direction_input(chip, gpio);
}
static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
@@ -148,7 +148,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- return pinctrl_gpio_direction_output(chip->base + gpio);
+ return pinctrl_gpio_direction_output(chip, gpio);
}
static void vf610_gpio_irq_handler(struct irq_desc *desc)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 31fc71a612c2..02ffda6c1e51 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2287,8 +2287,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* FIXME: find a non-racy way to retrieve this information. Maybe a
* lock common to both frameworks?
*/
- ok_for_pinctrl =
- pinctrl_gpio_can_use_line(gc->base + info->offset);
+ ok_for_pinctrl = pinctrl_gpio_can_use_line(gc, info->offset);
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cbafcd95243e..95d2a7b2ea3e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1092,28 +1092,6 @@ void gpiochip_remove(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
-/*
- * FIXME: This will be removed soon.
- *
- * This function is depracated, don't use.
- */
-struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc,
- void *data))
-{
- struct gpio_device *gdev;
- struct gpio_chip *gc = NULL;
-
- gdev = gpio_device_find(data, match);
- if (gdev) {
- gc = gdev->chip;
- gpio_device_put(gdev);
- }
-
- return gc;
-}
-EXPORT_SYMBOL_GPL(gpiochip_find);
-
/**
* gpio_device_find() - find a specific GPIO device
* @data: data to pass to match function
@@ -2036,7 +2014,7 @@ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
return 0;
#endif
- return pinctrl_gpio_request(gc->gpiodev->base + offset);
+ return pinctrl_gpio_request(gc, offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -2052,7 +2030,7 @@ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset)
return;
#endif
- pinctrl_gpio_free(gc->gpiodev->base + offset);
+ pinctrl_gpio_free(gc, offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
@@ -2065,7 +2043,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
- return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
+ return pinctrl_gpio_set_config(gc, offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 91820838b63b..8b4ca2576a3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -363,9 +363,6 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
-#define HW_REV(_Major, _Minor, _Rev) \
- ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
-
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1eccad4ce243..41fbc4fd0fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -425,6 +425,32 @@ validate_fail:
return ret;
}
+static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
+{
+ int ret = amdgpu_bo_reserve(bo, false);
+
+ if (ret)
+ return ret;
+
+ ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
+ if (ret)
+ goto unreserve_out;
+
+ ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ if (ret)
+ goto unreserve_out;
+
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+
+unreserve_out:
+ amdgpu_bo_unreserve(bo);
+
+ return ret;
+}
+
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
@@ -1784,6 +1810,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+ } else {
+ mutex_lock(&avm->process_info->lock);
+ if (avm->process_info->eviction_fence &&
+ !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
+ &avm->process_info->eviction_fence->base);
+ mutex_unlock(&avm->process_info->lock);
+ if (ret)
+ goto err_validate_bo;
}
if (offset)
@@ -1793,6 +1828,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
err_pin_bo:
+err_validate_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
@@ -1866,10 +1902,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (unlikely(ret))
return ret;
- /* The eviction fence should be removed by the last unmap.
- * TODO: Log an error condition if the bo still has the eviction fence
- * attached
- */
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
@@ -1998,19 +2030,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
if (unlikely(ret))
goto out_unreserve;
- if (mem->mapped_to_gpu_memory == 0 &&
- !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- /* Validate BO only once. The eviction fence gets added to BO
- * the first time it is mapped. Validate will wait for all
- * background evictions to complete.
- */
- ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
- if (ret) {
- pr_debug("Validate failed\n");
- goto out_unreserve;
- }
- }
-
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || entry->is_mapped)
continue;
@@ -2037,10 +2056,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
mem->mapped_to_gpu_memory);
}
- if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
- dma_resv_add_fence(bo->tbo.base.resv,
- &avm->process_info->eviction_fence->base,
- DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false);
goto out;
@@ -2074,7 +2089,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
- struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
@@ -2115,15 +2129,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
mem->mapped_to_gpu_memory);
}
- /* If BO is unmapped from all VMs, unfence it. It can be evicted if
- * required.
- */
- if (mem->mapped_to_gpu_memory == 0 &&
- !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
- !mem->bo->tbo.pin_count)
- amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence);
-
unreserve_out:
unreserve_bo_and_vms(&ctx, false, false);
out:
@@ -2351,8 +2356,20 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
amdgpu_sync_create(&(*mem)->sync);
(*mem)->is_imported = true;
+ mutex_lock(&avm->process_info->lock);
+ if (avm->process_info->eviction_fence &&
+ !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
+ &avm->process_info->eviction_fence->base);
+ mutex_unlock(&avm->process_info->lock);
+ if (ret)
+ goto err_remove_mem;
+
return 0;
+err_remove_mem:
+ remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+ drm_vma_node_revoke(&obj->vma_node, drm_priv);
err_free_mem:
kfree(*mem);
err_put_obj:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 5bbb23e102ba..618e469e3622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "atom.h"
+#include <linux/device.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/acpi.h>
@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return false;
+ /* ATRM is for on-platform devices only */
+ if (dev_is_removable(&adev->pdev->dev))
+ return false;
+
while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) &&
(pdev->class != PCI_CLASS_DISPLAY_OTHER << 8))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e210fe5c22a0..86f88512eb7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1117,6 +1117,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
return r;
}
+ /* FIXME: In theory this loop shouldn't be needed any more when
+ * amdgpu_vm_handle_moved handles all moved BOs that are reserved
+ * with p->ticket. But removing it caused test regressions, so I'm
+ * leaving it here for now.
+ */
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
bo_va = e->bo_va;
if (bo_va == NULL)
@@ -1131,7 +1136,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
return r;
}
- r = amdgpu_vm_handle_moved(adev, vm);
+ r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d5f78179b2b6..fd8cd8e2d3f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -41,6 +41,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
+#include <linux/device.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@@ -1073,6 +1074,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
+ /* TODO: check the return val and stop device initialization if boot fails */
+ amdgpu_psp_query_boot_status(adev);
return ret;
} else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2223,7 +2226,6 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent;
int i, r;
bool total;
@@ -2294,7 +2296,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((adev->flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
+ !dev_is_removable(&adev->pdev->dev))
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
@@ -3962,13 +3964,23 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
+ r = psp_gpu_reset(adev);
+ break;
+ default:
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ break;
+ }
+
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -4132,7 +4144,7 @@ fence_driver_init:
px = amdgpu_device_supports_px(ddev);
- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
@@ -4282,7 +4294,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
px = amdgpu_device_supports_px(adev_to_drm(adev));
- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
@@ -5566,10 +5578,6 @@ skip_hw_reset:
drm_sched_start(&ring->sched, true);
}
- if (adev->enable_mes &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
- amdgpu_mes_self_test(tmp_adev);
-
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b6a53e8429b2..0431eafa86b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -99,6 +99,7 @@
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3
+#define mmMP0_SMN_C2PMSG_33 0x16061
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
@@ -239,8 +240,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
- uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- int ret = 0;
+ uint64_t vram_size;
+ u32 msg;
+ int i, ret = 0;
+
+ /* It can take up to a second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
+ if (dev_is_removable(&adev->pdev->dev)) {
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ msleep(1);
+ }
+ }
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
@@ -2449,6 +2468,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
adev->gmc.xgmi.supported = true;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
+
/* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index b5e28fa3f414..e7e87a3b2601 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -409,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
if (!r)
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
- r = amdgpu_vm_handle_moved(adev, vm);
+ r = amdgpu_vm_handle_moved(adev, vm, ticket);
if (r && r != -EBUSY)
DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6cc6e3991410..3095a3a864af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2041,6 +2041,14 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static const struct amdgpu_asic_type_quirk asic_type_quirks[] = {
+ /* differentiate between P10 and P11 asics with the same DID */
+ {0x67FF, 0xE3, CHIP_POLARIS10},
+ {0x67FF, 0xE7, CHIP_POLARIS10},
+ {0x67FF, 0xF3, CHIP_POLARIS10},
+ {0x67FF, 0xF7, CHIP_POLARIS10},
+};
+
static const struct drm_driver amdgpu_kms_driver;
static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
@@ -2083,6 +2091,22 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
}
}
+static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) {
+ if (pdev->device == asic_type_quirks[i].device &&
+ pdev->revision == asic_type_quirks[i].revision) {
+ flags &= ~AMD_ASIC_MASK;
+ flags |= asic_type_quirks[i].type;
+ break;
+ }
+ }
+
+ return flags;
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2110,15 +2134,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
"See modparam exp_hw_support\n");
return -ENODEV;
}
- /* differentiate between P10 and P11 asics with the same DID */
- if (pdev->device == 0x67FF &&
- (pdev->revision == 0xE3 ||
- pdev->revision == 0xE7 ||
- pdev->revision == 0xF3 ||
- pdev->revision == 0xF7)) {
- flags &= ~AMD_ASIC_MASK;
- flags |= CHIP_POLARIS10;
- }
+
+ flags = amdgpu_fix_asic_type(pdev, flags);
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however, SME requires an indirect IOMMU mapping because the encryption
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c92e0aba69e1..a2a29dcb2422 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -385,9 +385,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring = &kiq->ring;
u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
domain |= AMDGPU_GEM_DOMAIN_VRAM;
+#endif
/* create MQD for KIQ */
if (!adev->enable_mes_kiq && !ring->mqd_obj) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 59f10b353b3a..9ddbf1494326 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -557,8 +557,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
mqd_prop.hqd_active = false;
+ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
+ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
+ }
+
mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
+ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
+ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
amdgpu_bo_unreserve(q->mqd_obj);
}
@@ -994,9 +1006,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
switch (queue_type) {
case AMDGPU_RING_TYPE_GFX:
ring->funcs = adev->gfx.gfx_ring[0].funcs;
+ ring->me = adev->gfx.gfx_ring[0].me;
+ ring->pipe = adev->gfx.gfx_ring[0].pipe;
break;
case AMDGPU_RING_TYPE_COMPUTE:
ring->funcs = adev->gfx.compute_ring[0].funcs;
+ ring->me = adev->gfx.compute_ring[0].me;
+ ring->pipe = adev->gfx.compute_ring[0].pipe;
break;
case AMDGPU_RING_TYPE_SDMA:
ring->funcs = adev->sdma.instance[0].ring.funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 648bd5e12830..32b701cc0376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2120,6 +2120,21 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return 0;
+
+ if (psp->funcs &&
+ psp->funcs->query_boot_status)
+ ret = psp->funcs->query_boot_status(psp);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 7111dd32e66f..5d36ad3f48c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -134,6 +134,7 @@ struct psp_funcs {
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
+ int (*query_boot_status)(struct psp_context *psp);
};
struct ta_funcs {
@@ -537,4 +538,6 @@ int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
+int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 303fbb6a48b6..b7fe5951b166 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1222,6 +1222,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ struct amdgpu_hive_info *hive;
+ int hive_ras_recovery = 0;
if (!block_obj || !block_obj->hw_ops) {
dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
@@ -1229,15 +1231,22 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
return -EOPNOTSUPP;
}
- /* skip ras error reset in gpu reset */
- if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) &&
- mca_funcs && mca_funcs->mca_set_debug_mode)
- return -EOPNOTSUPP;
-
if (!amdgpu_ras_is_supported(adev, block) ||
!amdgpu_ras_get_mca_debug_mode(adev))
return -EOPNOTSUPP;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
+ /* skip ras error reset in gpu reset */
+ if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
+ hive_ras_recovery) &&
+ mca_funcs && mca_funcs->mca_set_debug_mode)
+ return -EOPNOTSUPP;
+
if (block_obj->hw_ops->reset_ras_error_count)
block_obj->hw_ops->reset_ras_error_count(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f74347cc087a..d65e21914d8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -166,8 +166,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
}
}
- if (reset)
+ if (reset) {
+ /* use mode-2 reset for poison consumption */
+ if (!entry)
+ con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
amdgpu_ras_reset_gpu(adev);
+ }
}
kfree(err_data->err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3cd5977c0709..904252456d25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1373,6 +1373,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @ticket: optional reservation ticket used to reserve the VM
*
* Make sure all BOs which are moved are updated in the PTs.
*
@@ -1382,11 +1383,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
* PTs have to be reserved!
*/
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket)
{
struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
- bool clear;
+ bool clear, unlock;
int r;
spin_lock(&vm->status_lock);
@@ -1409,17 +1411,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
- if (!adev->debug_vm && dma_resv_trylock(resv))
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
/* Somebody else is using the BO right now */
- else
+ } else {
clear = true;
+ unlock = false;
+ }
r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
- if (!clear)
+ if (unlock)
dma_resv_unlock(resv);
spin_lock(&vm->status_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 9c7b5d33b56e..2cd86d2bf73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -443,7 +443,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+ struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 18f58efc9dc7..08916538a615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -77,7 +77,16 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
return true;
}
+static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 size = 0;
+ list_for_each_entry(block, head, link)
+ size += amdgpu_vram_mgr_block_size(block);
+
+ return size;
+}
/**
* DOC: mem_info_vram_total
@@ -516,6 +525,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
vres->base.start = 0;
+ size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+ vres->base.size);
list_for_each_entry(block, &vres->blocks, link) {
unsigned long start;
@@ -523,8 +534,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
amdgpu_vram_mgr_block_size(block);
start >>= PAGE_SHIFT;
- if (start > PFN_UP(vres->base.size))
- start -= PFN_UP(vres->base.size);
+ if (start > PFN_UP(size))
+ start -= PFN_UP(size);
else
start = 0;
vres->base.start = max(vres->base.start, start);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d9ccacd06fba..c8a3bf01743f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3498,6 +3498,8 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
+static int gfx_v10_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
@@ -6465,11 +6467,18 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else {
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1)
+ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
/* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
@@ -6743,7 +6752,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup)
- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -6766,7 +6775,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup)
- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -6787,11 +6796,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else {
/* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
@@ -7172,6 +7181,13 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ /* WA added for Vangogh asic fixing the SMU suspend failure
+ * It needs to set power gating again during gfxoff control
+ * otherwise the gfxoff disallowing will be failed to set.
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
+ gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
+
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
if (amdgpu_gfx_disable_kgq(adev, 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index fd22943685f7..0c6133cc5e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -155,6 +155,7 @@ static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue
{
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
@@ -3714,11 +3715,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else {
/* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
@@ -4007,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup)
- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -4030,7 +4031,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup)
- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -4051,11 +4052,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else {
/* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index e523627cfe25..df218d5ca775 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -28,6 +28,7 @@
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+#include <linux/device.h>
#include <linux/pci.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
- if (pci_is_thunderbolt_attached(adev->pdev))
+ if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
- if (pci_is_thunderbolt_attached(adev->pdev))
+ if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 4142e2fcd866..3cf4684d0d3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -759,6 +759,83 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
return 0;
}
+
+static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
+ uint32_t inst,
+ uint32_t boot_error)
+{
+ uint32_t socket_id;
+ uint32_t aid_id;
+ uint32_t hbm_id;
+ uint32_t reg_data;
+
+ socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
+ aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
+ hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
+
+ reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
+ dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
+ socket_id, aid_id, reg_data);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
+ dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
+ socket_id, aid_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
+ dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
+ socket_id, aid_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
+ dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
+ socket_id, aid_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
+ socket_id, aid_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
+ socket_id, aid_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
+ socket_id, aid_id, hbm_id);
+}
+
+static int psp_v13_0_query_boot_status(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int inst_mask = adev->aid_mask;
+ uint32_t reg_data;
+ uint32_t i;
+ int ret = 0;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
+ return 0;
+
+ if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
+ return 0;
+
+ for_each_inst(i, inst_mask) {
+ reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
+ if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
+ psp_v13_0_boot_error_reporting(adev, i, reg_data);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -781,6 +858,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.update_spirom = psp_v13_0_update_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
+ .query_boot_status = psp_v13_0_query_boot_status,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 743d2f68b090..770b4b4e3138 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -91,8 +91,7 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
{
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 0e792a8496d6..cd8e459201f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1404,6 +1404,66 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
return i;
}
+static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ struct kfd_gpu_cache_info *pcache_info)
+{
+ struct amdgpu_device *adev = kdev->adev;
+ int i = 0;
+
+ /* TCP L1 Cache per CU */
+ if (adev->gfx.config.gc_tcp_size_per_cu) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = 1;
+ i++;
+ }
+ /* Scalar L1 Instruction Cache per SQC */
+ if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
+ pcache_info[i].cache_size =
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+ i++;
+ }
+ /* Scalar L1 Data Cache per SQC */
+ if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ pcache_info[i].cache_level = 1;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+ i++;
+ }
+ /* L2 Data Cache per GPU (Total Tex Cache) */
+ if (adev->gfx.config.gc_tcc_size) {
+ pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
+ pcache_info[i].cache_level = 2;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ i++;
+ }
+ /* L3 Data Cache per GPU */
+ if (adev->gmc.mall_size) {
+ pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
+ pcache_info[i].cache_level = 3;
+ pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE);
+ pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ i++;
+ }
+ return i;
+}
+
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
@@ -1461,10 +1521,14 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
break;
case IP_VERSION(9, 4, 2):
- case IP_VERSION(9, 4, 3):
*pcache_info = aldebaran_cache_info;
num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
break;
+ case IP_VERSION(9, 4, 3):
+ num_of_cache_types =
+ kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
+ *pcache_info);
+ break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
*pcache_info = raven_cache_info;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c1e10f42db28..057284bf50bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1602,10 +1602,13 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
unsigned int cu_sibling_map_mask;
int first_active_cu;
int i, j, k, xcc, start, end;
+ int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL;
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_device *adev = knode->adev;
start = ffs(knode->xcc_mask) - 1;
- end = start + NUM_XCC(knode->xcc_mask);
+ end = start + num_xcc;
cu_sibling_map_mask = cu_info->bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
@@ -1624,7 +1627,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
- pcache->cache_size = pcache_info[cache_type].cache_size;
+
+ if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ else
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (pcache->cache_level == 2)
+ pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
+ else if (mode)
+ pcache->cache_size = pcache_info[cache_type].cache_size / mode;
+ else
+ pcache->cache_size = pcache_info[cache_type].cache_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 74c21d98b4de..4360a696f10a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -4348,7 +4348,6 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
- srf_updates[i].blend_tf ||
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6ed40b6c6178..4bdf105d1d71 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -533,7 +533,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
- if (res_ctx->pipe_ctx[i].stream != stream)
+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue;
return tg->funcs->get_frame_count(tg);
@@ -592,7 +592,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
- if (res_ctx->pipe_ctx[i].stream != stream)
+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue;
tg->funcs->get_scanoutpos(tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 50dc83404644..11f7746f3a65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -613,16 +613,19 @@ static void dpp3_program_blnd_pwl(
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
} else {
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green);
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 2861d974fcf6..75547ce86c09 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -316,7 +316,7 @@ bool hubp3_program_surface_flip_and_addr(
return true;
}
-static void hubp3_program_tiling(
+void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 8a32772d4e91..b010531a7fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -278,6 +278,11 @@ void hubp3_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+void hubp3_program_tiling(
+ struct dcn20_hubp *hubp2,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format);
+
void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 1d052f08aff5..994b21ed272f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -237,16 +237,19 @@ void mpc32_program_post1dlut_pwl(
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
} else {
+ REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
+ REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green);
+ REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c
index 1ed58660779e..771fcd0d3b99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c
@@ -53,11 +53,146 @@ static void hubp35_init(struct hubp *hubp)
/*do nothing for now for dcn3.5 or later*/
}
+
+void hubp35_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t green_bar = 1;
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_3(HUBPRET_CONTROL,
+ CROSSBAR_SRC_Y_G, green_bar,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 112);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 113);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 114);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 118);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 119);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+void hubp35_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+ hubp3_program_tiling(hubp2, tiling_info, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp35_program_pixel_format(hubp, format);
+}
+
struct hubp_funcs dcn35_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
- .hubp_program_surface_config = hubp3_program_surface_config,
+ .hubp_program_surface_config = hubp35_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp3_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h
index 3d830f93141e..586b43aa5834 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h
@@ -58,4 +58,18 @@ bool hubp35_construct(
void hubp35_set_fgcg(struct hubp *hubp, bool enable);
+void hubp35_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format);
+
+void hubp35_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level);
+
#endif /* __DC_HUBP_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 70ae5eba624e..acff3449b8d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -60,8 +60,12 @@ endif
endif
ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+frame_warn_flag := -Wframe-larger-than=3072
+else
frame_warn_flag := -Wframe-larger-than=2048
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h
index 6e29a185de51..765d9ca2316f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h
@@ -242,6 +242,34 @@
//MP0_SMN_C2PMSG_103
#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_109
+#define MP0_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_126
+#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING__SHIFT 0x0
+#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD__SHIFT 0x1
+#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING__SHIFT 0x2
+#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING__SHIFT 0x3
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING__SHIFT 0x4
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING__SHIFT 0x5
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST__SHIFT 0x6
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST__SHIFT 0x7
+#define MP0_SMN_C2PMSG_126__SOCKET_ID__SHIFT 0x8
+#define MP0_SMN_C2PMSG_126__AID_ID__SHIFT 0xb
+#define MP0_SMN_C2PMSG_126__HBM_ID__SHIFT 0xd
+#define MP0_SMN_C2PMSG_126__BOOT_STATUS__SHIFT 0x1f
+#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING_MASK 0x00000001L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD_MASK 0x00000002L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING_MASK 0x00000004L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING_MASK 0x00000008L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING_MASK 0x00000010L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING_MASK 0x00000020L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST_MASK 0x00000040L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST_MASK 0x00000080L
+#define MP0_SMN_C2PMSG_126__SOCKET_ID_MASK 0x00000700L
+#define MP0_SMN_C2PMSG_126__AID_ID_MASK 0x00001800L
+#define MP0_SMN_C2PMSG_126__HBM_ID_MASK 0x00002000L
+#define MP0_SMN_C2PMSG_126__BOOT_STATUS_MASK 0x80000000L
//MP0_SMN_IH_CREDIT
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 3201808c2dd8..cd3c40a86029 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -1080,33 +1080,35 @@ struct gpu_metrics_v3_0 {
uint16_t average_ipu_activity[8];
/* time filtered per-core C0 residency % [0-100]*/
uint16_t average_core_c0_activity[16];
- /* time filtered DRAM read bandwidth [GB/sec] */
+ /* time filtered DRAM read bandwidth [MB/sec] */
uint16_t average_dram_reads;
- /* time filtered DRAM write bandwidth [GB/sec] */
+ /* time filtered DRAM write bandwidth [MB/sec] */
uint16_t average_dram_writes;
/* Driver attached timestamp (in ns) */
uint64_t system_clock_counter;
/* Power/Energy */
- /* average dGPU + APU power on A + A platform */
+ /* time filtered power used for PPT/STAPM [APU+dGPU] [mW] */
uint32_t average_socket_power;
- /* average IPU power [W] */
+ /* time filtered IPU power [mW] */
uint16_t average_ipu_power;
- /* average APU power [W] */
+ /* time filtered APU power [mW] */
uint32_t average_apu_power;
- /* average dGPU power [W] */
+ /* time filtered GFX power [mW] */
+ uint32_t average_gfx_power;
+ /* time filtered dGPU power [mW] */
uint32_t average_dgpu_power;
- /* sum of core power across all cores in the socket [W] */
- uint32_t average_core_power;
- /* calculated core power [W] */
- uint16_t core_power[16];
- /* maximum IRM defined STAPM power limit [W] */
+ /* time filtered sum of core power across all cores in the socket [mW] */
+ uint32_t average_all_core_power;
+ /* calculated core power [mW] */
+ uint16_t average_core_power[16];
+ /* maximum IRM defined STAPM power limit [mW] */
uint16_t stapm_power_limit;
- /* time filtered STAPM power limit [W] */
+ /* time filtered STAPM power limit [mW] */
uint16_t current_stapm_power_limit;
- /* Average clocks */
+ /* time filtered clocks [MHz] */
uint16_t average_gfxclk_frequency;
uint16_t average_socclk_frequency;
uint16_t average_vpeclk_frequency;
@@ -1115,7 +1117,7 @@ struct gpu_metrics_v3_0 {
uint16_t average_vclk_frequency;
/* Current clocks */
- /* target core frequency */
+ /* target core frequency [MHz] */
uint16_t current_coreclk[16];
/* CCLK frequency limit enforced on classic cores [MHz] */
uint16_t current_core_maxfreq;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 517b9fb4624c..4ba9195c83c5 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3288,10 +3288,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
uint32_t tmp;
- /* under multi-vf mode, the hwmon attributes are all not supported */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
/* under pp one vf mode manage of hwmon attributes is not supported */
if (amdgpu_sriov_is_pp_one_vf(adev))
effective_mode &= ~S_IWUSR;
@@ -4162,6 +4158,7 @@ err_out:
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
+ enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
int ret;
@@ -4173,17 +4170,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
- adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
- if (IS_ERR(adev->pm.int_hwmon_dev)) {
- ret = PTR_ERR(adev->pm.int_hwmon_dev);
- dev_err(adev->dev,
- "Unable to register hwmon device: %d\n", ret);
- return ret;
+ mode = amdgpu_virt_get_sriov_vf_mode(adev);
+
+ /* under multi-vf mode, the hwmon attributes are all not supported */
+ if (mode != SRIOV_VF_MODE_MULTI_VF) {
+ adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
+ DRIVER_NAME, adev,
+ hwmon_groups);
+ if (IS_ERR(adev->pm.int_hwmon_dev)) {
+ ret = PTR_ERR(adev->pm.int_hwmon_dev);
+ dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
+ return ret;
+ }
}
- switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+ switch (mode) {
case SRIOV_VF_MODE_ONE_VF:
mask = ATTR_FLAG_ONEVF;
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 9e4f8a4104a3..914c15387157 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1022,6 +1022,9 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
*limit /= 100;
}
break;
+ case PP_PWR_LIMIT_MIN:
+ *limit = 0;
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
index 9fcad69a9f34..2cf2a7b12623 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
@@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record {
typedef struct _ATOM_Tonga_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_VCE_State_Record entries[1];
+ ATOM_Tonga_VCE_State_Record entries[];
} ATOM_Tonga_VCE_State_Table;
typedef struct _ATOM_Tonga_PowerTune_Table {
@@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record {
typedef struct _ATOM_Tonga_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_Hard_Limit_Record entries[1];
+ ATOM_Tonga_Hard_Limit_Record entries[];
} ATOM_Tonga_Hard_Limit_Table;
typedef struct _ATOM_Tonga_GPIO_Table {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
index 8b0590b834cc..de2926df5ed7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
@@ -129,7 +129,7 @@ typedef struct _ATOM_Vega10_State {
typedef struct _ATOM_Vega10_State_Array {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_State states[]; /* Dynamically allocate entries. */
} ATOM_Vega10_State_Array;
typedef struct _ATOM_Vega10_CLK_Dependency_Record {
@@ -169,37 +169,37 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table {
typedef struct _ATOM_Vega10_MCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_MCLK_Dependency_Table;
typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_SOCCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DCEFCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PIXCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries.*/
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DISPCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PHYCLK_Dependency_Table;
typedef struct _ATOM_Vega10_MM_Dependency_Record {
@@ -213,7 +213,7 @@ typedef struct _ATOM_Vega10_MM_Dependency_Record {
typedef struct _ATOM_Vega10_MM_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */
+ ATOM_Vega10_MM_Dependency_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_MM_Dependency_Table;
typedef struct _ATOM_Vega10_PCIE_Record {
@@ -225,7 +225,7 @@ typedef struct _ATOM_Vega10_PCIE_Record {
typedef struct _ATOM_Vega10_PCIE_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PCIE_Table;
typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
@@ -235,7 +235,7 @@ typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
typedef struct _ATOM_Vega10_Voltage_Lookup_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */
+ ATOM_Vega10_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_Voltage_Lookup_Table;
typedef struct _ATOM_Vega10_Fan_Table {
@@ -327,7 +327,7 @@ typedef struct _ATOM_Vega10_VCE_State_Record {
typedef struct _ATOM_Vega10_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Vega10_VCE_State_Record entries[1];
+ ATOM_Vega10_VCE_State_Record entries[];
} ATOM_Vega10_VCE_State_Table;
typedef struct _ATOM_Vega10_PowerTune_Table {
@@ -427,7 +427,7 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record {
typedef struct _ATOM_Vega10_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Vega10_Hard_Limit_Record entries[1];
+ ATOM_Vega10_Hard_Limit_Record entries[];
} ATOM_Vega10_Hard_Limit_Table;
typedef struct _Vega10_PPTable_Generic_SubTable_Header {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 9f86c1fecbb1..23b00eddc1af 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -733,7 +733,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -1742,10 +1742,31 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)))
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1763,7 +1784,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 839553a86aa2..8def291b18bc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -419,6 +419,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
+ SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index b483c8e096e7..22f88842a7fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -150,97 +150,39 @@ typedef struct {
} DpmClocks_t;
typedef struct {
- uint16_t CoreFrequency[16]; //Target core frequency [MHz]
- uint16_t CorePower[16]; //CAC calculated core power [W] [Q8.8]
- uint16_t CoreTemperature[16]; //TSEN measured core temperature [C] [Q8.8]
- uint16_t GfxTemperature; //TSEN measured GFX temperature [C] [Q8.8]
- uint16_t SocTemperature; //TSEN measured SOC temperature [C] [Q8.8]
- uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [W] [Q8.8]
- uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [W] [Q8.8]
- uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
- uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
- uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [C] [Q8.8]
- uint16_t AverageGfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
- uint16_t AverageFclkFrequency; //Time filtered target FCLK frequency [MHz]
- uint16_t AverageGfxActivity; //Time filtered GFX busy % [0-100] [Q8.8]
- uint16_t AverageSocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
- uint16_t AverageVclkFrequency; //Time filtered target VCLK frequency [MHz]
- uint16_t AverageVcnActivity; //Time filtered VCN busy % [0-100] [Q8.8]
- uint16_t AverageVpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
- uint16_t AverageIpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
- uint16_t AverageIpuBusy[8]; //Time filtered IPU per-column busy % [0-100] [Q8.8]
- uint16_t AverageDRAMReads; //Time filtered DRAM read bandwidth [GB/sec] [Q8.8]
- uint16_t AverageDRAMWrites; //Time filtered DRAM write bandwidth [GB/sec] [Q8.8]
- uint16_t AverageCoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] [Q8.8]
- uint16_t IpuPower; //Time filtered IPU power [W] [Q8.8]
- uint32_t ApuPower; //Time filtered APU power [W] [Q24.8]
- uint32_t dGpuPower; //Time filtered dGPU power [W] [Q24.8]
- uint32_t AverageSocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [W] [Q24.8]
- uint32_t AverageCorePower; //Time filtered sum of core power across all cores in the socket [W] [Q24.8]
- uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
- uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+ uint16_t CoreFrequency[16]; //Target core frequency [MHz]
+ uint16_t CorePower[16]; //CAC calculated core power [mW]
+ uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
+ uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
+ uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
+ uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
+ uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
+ uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
+ uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
+ uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
+ uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
+ uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
+ uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
+ uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
+ uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
+ uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
+ uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
+ uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
+ uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
+ uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
+ uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
+ uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
+ uint16_t IpuPower; //Time filtered IPU power [mW]
+ uint32_t ApuPower; //Time filtered APU power [mW]
+ uint32_t GfxPower; //Time filtered GFX power [mW]
+ uint32_t dGpuPower; //Time filtered dGPU power [mW]
+ uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
+ uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
+ uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
+ uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+ uint32_t spare[16];
} SmuMetrics_t;
-typedef struct {
- uint16_t GfxclkFrequency; //[MHz]
- uint16_t SocclkFrequency; //[MHz]
- uint16_t VclkFrequency; //[MHz]
- uint16_t DclkFrequency; //[MHz]
- uint16_t MemclkFrequency; //[MHz]
- uint16_t spare;
- uint16_t UvdActivity; //[centi]
- uint16_t GfxActivity; //[centi]
-
- uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
- uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
- uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
-
- uint16_t CoreFrequency[8]; //[MHz]
- uint16_t CorePower[8]; //[mW]
- uint16_t CoreTemperature[8]; //[centi-Celsius]
- uint16_t L3Frequency[2]; //[MHz]
- uint16_t L3Temperature[2]; //[centi-Celsius]
-
- uint16_t spare2[24];
-
- uint16_t GfxTemperature; //[centi-Celsius]
- uint16_t SocTemperature; //[centi-Celsius]
- uint16_t ThrottlerStatus;
-
- uint16_t CurrentSocketPower; //[mW]
- uint16_t StapmOpnLimit; //[W]
- uint16_t StapmCurrentLimit; //[W]
- uint32_t ApuPower; //[mW]
- uint32_t dGpuPower; //[mW]
-
- uint16_t VddTdcValue; //[mA]
- uint16_t SocTdcValue; //[mA]
- uint16_t VddEdcValue; //[mA]
- uint16_t SocEdcValue; //[mA]
-
- uint16_t InfrastructureCpuMaxFreq; //[MHz]
- uint16_t InfrastructureGfxMaxFreq; //[MHz]
-
- uint16_t SkinTemp;
- uint16_t DeviceState;
- uint16_t CurTemp; //[centi-Celsius]
- uint16_t FilterAlphaValue; //[m]
-
- uint16_t AverageGfxclkFrequency;
- uint16_t AverageFclkFrequency;
- uint16_t AverageGfxActivity;
- uint16_t AverageSocclkFrequency;
- uint16_t AverageVclkFrequency;
- uint16_t AverageVcnActivity;
- uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads
- uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes
- uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower
- uint16_t AverageCorePower[2]; //Filtered of [sum of CorePower[8] per ccx])
- uint16_t AverageCoreC0Residency[16]; //Filtered of [average C0 residency % per core]
- uint16_t spare1;
- uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
-} SmuMetrics_legacy_t;
-
//ISP tile definitions
typedef enum {
TILE_XTILE = 0, //ONO0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cc02f979e9e9..95cb919718ae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -299,5 +299,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint8_t pcie_gen_cap,
uint8_t pcie_width_cap);
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 3efc6aed28f1..762b31455a0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -234,24 +234,15 @@ static int vangogh_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- if (smu->smc_fw_if_version < 0x3) {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
- } else {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
- }
+ smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
- if (smu->smc_fw_version >= 0x043F3E00)
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
- else
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a49e5adf7cc3..cf1b84060bc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2477,3 +2477,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
return 0;
}
+
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
+
+ ret = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ return ret == 0 ? 0 : -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 34bd99b0e137..82c4e1f1c6f0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -354,12 +354,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
smu_baco->platform_support = true;
- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
- smu_baco->maco_support = true;
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+ smu_baco->maco_support = true;
+ }
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
@@ -2530,38 +2530,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
- (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
- ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_COMPUTE_BIT,
- (void *)(&activity_monitor_external),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- true);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
- return ret;
- }
-
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_CUSTOM);
- } else {
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
- }
if (workload_type < 0)
return -EINVAL;
@@ -2602,14 +2574,20 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
static int smu_v13_0_0_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
@@ -2794,7 +2772,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index f42b48b31927..20f66e696f87 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -270,7 +270,7 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
- char ucode_prefix[30];
+ char ucode_prefix[15];
char fw_name[30];
/* No need to load P2S tables in IOV mode */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index ac0e1cc812bd..81eafed76045 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -346,12 +346,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
+ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
smu_baco->platform_support = true;
- if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
- smu_baco->maco_support = true;
+ if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
+ && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+ smu_baco->maco_support = true;
+ }
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
@@ -2498,7 +2499,13 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
@@ -2524,14 +2531,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
static int smu_v13_0_7_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 4ac22f44d160..d8f8ad0e7137 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -57,7 +57,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char fw_name[30];
- char ucode_prefix[30];
+ char ucode_prefix[15];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -229,6 +229,8 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
case IP_VERSION(14, 0, 0):
+ if ((smu->smc_fw_version < 0x5d3a00))
+ dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index c36fc10b63c8..03b38c3a9968 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -156,15 +156,10 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- if (smu->smc_fw_version > 0x5d3500) {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
- } else {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
- }
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -177,10 +172,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
- if (smu->smc_fw_version > 0x5d3500)
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
- else
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err3_out;
@@ -242,13 +234,13 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_AVERAGE_GFXCLK:
- *value = metrics->AverageGfxclkFrequency;
+ *value = metrics->GfxclkFrequency;
break;
case METRICS_AVERAGE_SOCCLK:
- *value = metrics->AverageSocclkFrequency;
+ *value = metrics->SocclkFrequency;
break;
case METRICS_AVERAGE_VCLK:
- *value = metrics->AverageVclkFrequency;
+ *value = metrics->VclkFrequency;
break;
case METRICS_AVERAGE_DCLK:
*value = 0;
@@ -257,25 +249,25 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = 0;
break;
case METRICS_AVERAGE_FCLK:
- *value = metrics->AverageFclkFrequency;
+ *value = metrics->FclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = metrics->AverageGfxActivity >> 8;
+ *value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->AverageVcnActivity >> 8;
+ *value = metrics->VcnActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
case METRICS_CURR_SOCKETPOWER:
- *value = (metrics->AverageSocketPower & 0xff00) +
- ((metrics->AverageSocketPower & 0xff) * 100 >> 8);
+ *value = (metrics->SocketPower / 1000 << 8) +
+ (metrics->SocketPower % 1000 / 10);
break;
case METRICS_TEMPERATURE_EDGE:
- *value = (metrics->GfxTemperature >> 8) *
+ *value = metrics->GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = (metrics->SocTemperature >> 8) *
+ *value = metrics->SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
@@ -317,107 +309,6 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
-static int smu_v14_0_0_legacy_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
- int ret = 0;
-
- ret = smu_cmn_get_metrics_table(smu, NULL, false);
- if (ret)
- return ret;
-
- switch (member) {
- case METRICS_AVERAGE_GFXCLK:
- *value = metrics->GfxclkFrequency;
- break;
- case METRICS_AVERAGE_SOCCLK:
- *value = metrics->SocclkFrequency;
- break;
- case METRICS_AVERAGE_VCLK:
- *value = metrics->VclkFrequency;
- break;
- case METRICS_AVERAGE_DCLK:
- *value = metrics->DclkFrequency;
- break;
- case METRICS_AVERAGE_UCLK:
- *value = metrics->MemclkFrequency;
- break;
- case METRICS_AVERAGE_GFXACTIVITY:
- *value = metrics->GfxActivity / 100;
- break;
- case METRICS_AVERAGE_FCLK:
- *value = metrics->AverageFclkFrequency;
- break;
- case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
- break;
- case METRICS_AVERAGE_SOCKETPOWER:
- *value = (metrics->AverageSocketPower << 8) / 1000;
- break;
- case METRICS_CURR_SOCKETPOWER:
- *value = (metrics->CurrentSocketPower << 8) / 1000;
- break;
- case METRICS_TEMPERATURE_EDGE:
- *value = metrics->GfxTemperature / 100 *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- break;
- case METRICS_TEMPERATURE_HOTSPOT:
- *value = metrics->SocTemperature / 100 *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- break;
- case METRICS_THROTTLER_STATUS:
- *value = metrics->ThrottlerStatus;
- break;
- case METRICS_VOLTAGE_VDDGFX:
- *value = metrics->Voltage[0];
- break;
- case METRICS_VOLTAGE_VDDSOC:
- *value = metrics->Voltage[1];
- break;
- case METRICS_SS_APU_SHARE:
- /* return the percentage of APU power with respect to APU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
- */
- if (metrics->StapmOpnLimit > 0)
- *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
- else
- *value = 0;
- break;
- case METRICS_SS_DGPU_SHARE:
- /* return the percentage of dGPU power with respect to dGPU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
- */
- if ((metrics->dGpuPower > 0) &&
- (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
- *value = (metrics->dGpuPower * 100) /
- (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
- else
- *value = 0;
- break;
- default:
- *value = UINT_MAX;
- break;
- }
-
- return ret;
-}
-
-static int smu_v14_0_0_common_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value)
-{
- if (smu->smc_fw_version > 0x5d3500)
- return smu_v14_0_0_get_smu_metrics_data(smu, member, value);
- else
- return smu_v14_0_0_legacy_get_smu_metrics_data(smu, member, value);
-}
-
static int smu_v14_0_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
@@ -429,69 +320,69 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu,
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_SS_APU_SHARE:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_SS_APU_SHARE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
- ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_SS_DGPU_SHARE,
(uint32_t *)data);
*size = 4;
@@ -588,7 +479,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -597,32 +488,33 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
sizeof(uint16_t) * 16);
gpu_metrics->temperature_skin = metrics.SkinTemp;
- gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
- gpu_metrics->average_vcn_activity = metrics.AverageVcnActivity;
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_vcn_activity = metrics.VcnActivity;
memcpy(&gpu_metrics->average_ipu_activity[0],
- &metrics.AverageIpuBusy[0],
+ &metrics.IpuBusy[0],
sizeof(uint16_t) * 8);
memcpy(&gpu_metrics->average_core_c0_activity[0],
- &metrics.AverageCoreC0Residency[0],
+ &metrics.CoreC0Residency[0],
sizeof(uint16_t) * 16);
- gpu_metrics->average_dram_reads = metrics.AverageDRAMReads;
- gpu_metrics->average_dram_writes = metrics.AverageDRAMWrites;
+ gpu_metrics->average_dram_reads = metrics.DRAMReads;
+ gpu_metrics->average_dram_writes = metrics.DRAMWrites;
- gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+ gpu_metrics->average_socket_power = metrics.SocketPower;
gpu_metrics->average_ipu_power = metrics.IpuPower;
gpu_metrics->average_apu_power = metrics.ApuPower;
+ gpu_metrics->average_gfx_power = metrics.GfxPower;
gpu_metrics->average_dgpu_power = metrics.dGpuPower;
- gpu_metrics->average_core_power = metrics.AverageCorePower;
- memcpy(&gpu_metrics->core_power[0],
+ gpu_metrics->average_all_core_power = metrics.AllCorePower;
+ memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 16);
- gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
- gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
- gpu_metrics->average_vpeclk_frequency = metrics.AverageVpeclkFrequency;
- gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency;
- gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency;
- gpu_metrics->average_ipuclk_frequency = metrics.AverageIpuclkFrequency;
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@@ -638,68 +530,6 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v3_0);
}
-static ssize_t smu_v14_0_0_get_legacy_gpu_metrics(struct smu_context *smu,
- void **table)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v2_1 *gpu_metrics =
- (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
- SmuMetrics_legacy_t metrics;
- int ret = 0;
-
- ret = smu_cmn_get_metrics_table(smu, &metrics, true);
- if (ret)
- return ret;
-
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
-
- gpu_metrics->temperature_gfx = metrics.GfxTemperature;
- gpu_metrics->temperature_soc = metrics.SocTemperature;
- memcpy(&gpu_metrics->temperature_core[0],
- &metrics.CoreTemperature[0],
- sizeof(uint16_t) * 8);
- gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
- gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
-
- gpu_metrics->average_gfx_activity = metrics.GfxActivity;
- gpu_metrics->average_mm_activity = metrics.UvdActivity;
-
- gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
- gpu_metrics->average_gfx_power = metrics.Power[0];
- gpu_metrics->average_soc_power = metrics.Power[1];
- memcpy(&gpu_metrics->average_core_power[0],
- &metrics.CorePower[0],
- sizeof(uint16_t) * 8);
-
- gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
- gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
- gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
- gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
- gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
- gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
-
- memcpy(&gpu_metrics->current_coreclk[0],
- &metrics.CoreFrequency[0],
- sizeof(uint16_t) * 8);
-
- gpu_metrics->throttle_status = metrics.ThrottlerStatus;
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-
- *table = (void *)gpu_metrics;
-
- return sizeof(struct gpu_metrics_v2_1);
-}
-
-static ssize_t smu_v14_0_0_common_get_gpu_metrics(struct smu_context *smu,
- void **table)
-{
-
- if (smu->smc_fw_version > 0x5d3500)
- return smu_v14_0_0_get_gpu_metrics(smu, table);
- else
- return smu_v14_0_0_get_legacy_gpu_metrics(smu, table);
-}
-
static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
{
int ret;
@@ -928,7 +758,7 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return -EINVAL;
}
- return smu_v14_0_0_common_get_smu_metrics_data(smu, member_type, value);
+ return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
@@ -1230,7 +1060,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.read_sensor = smu_v14_0_0_read_sensor,
.is_dpm_running = smu_v14_0_0_is_dpm_running,
.set_watermarks_table = smu_v14_0_0_set_watermarks_table,
- .get_gpu_metrics = smu_v14_0_0_common_get_gpu_metrics,
+ .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v14_0_set_driver_table_location,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 6e57c94379a9..001a5cf09657 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1004,6 +1004,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 4):
structure_size = sizeof(struct gpu_metrics_v2_4);
break;
+ case METRICS_VERSION(3, 0):
+ structure_size = sizeof(struct gpu_metrics_v3_0);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 84148a79414b..c45c07840f64 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1580,7 +1580,6 @@ static const struct pwm_ops ti_sn_pwm_ops = {
.free = ti_sn_pwm_free,
.apply = ti_sn_pwm_apply,
.get_state = ti_sn_pwm_get_state,
- .owner = THIS_MODULE,
};
static int ti_sn_pwm_probe(struct auxiliary_device *adev,
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 58b0b46a21e6..d511d17c5bdf 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -73,9 +73,10 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv,
struct drm_device *dev = &hv->dev;
int ret;
- drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
- screen_info.lfb_size,
- &hyperv_driver);
+ if (IS_ENABLED(CONFIG_SYSFB))
+ drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
+ screen_info.lfb_size,
+ &hyperv_driver);
hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 9327701da1b5..b00d6c280159 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -21,6 +21,8 @@
#include "intel_pxp_tee.h"
#include "intel_pxp_types.h"
+#define PXP_TRANSPORT_TIMEOUT_MS 5000 /* 5 sec */
+
static bool
is_fw_err_platform_config(struct intel_pxp *pxp, u32 type)
{
@@ -73,13 +75,15 @@ static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
goto unlock;
}
- ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size,
+ PXP_TRANSPORT_TIMEOUT_MS);
if (ret) {
drm_err(&i915->drm, "Failed to send PXP TEE message\n");
goto unlock;
}
- ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size,
+ PXP_TRANSPORT_TIMEOUT_MS);
if (ret < 0) {
drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
goto unlock;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index abefc2343443..f045515696cb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a0ac8c258d9f..7840b6428afb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1592,6 +1592,148 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
nv_encoder->crtc = NULL;
}
+// common/inc/displayport/displayport.h
+#define DP_CONFIG_WATERMARK_ADJUST 2
+#define DP_CONFIG_WATERMARK_LIMIT 20
+#define DP_CONFIG_INCREASED_WATERMARK_ADJUST 8
+#define DP_CONFIG_INCREASED_WATERMARK_LIMIT 22
+
+static bool
+nv50_sor_dp_watermark_sst(struct nouveau_encoder *outp,
+ struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ bool enhancedFraming = outp->dp.dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP;
+ u64 minRate = outp->dp.link_bw * 1000;
+ unsigned tuSize = 64;
+ unsigned waterMark;
+ unsigned hBlankSym;
+ unsigned vBlankSym;
+ unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST;
+ unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT;
+ // depth is multiplied by 16 in case of DSC enable
+ s32 hblank_symbols;
+ // number of link clocks per line.
+ int vblank_symbols = 0;
+ bool bEnableDsc = false;
+ unsigned surfaceWidth = asyh->mode.h.blanks - asyh->mode.h.blanke;
+ unsigned rasterWidth = asyh->mode.h.active;
+ unsigned depth = asyh->or.bpc * 3;
+ unsigned DSC_FACTOR = bEnableDsc ? 16 : 1;
+ u64 pixelClockHz = asyh->mode.clock * 1000;
+ u64 PrecisionFactor = 100000, ratioF, watermarkF;
+ u32 numLanesPerLink = outp->dp.link_nr;
+ u32 numSymbolsPerLine;
+ u32 BlankingBits;
+ u32 surfaceWidthPerLink;
+ u32 PixelSteeringBits;
+ u64 NumBlankingLinkClocks;
+ u32 MinHBlank;
+
+ if (outp->outp.info.dp.increased_wm) {
+ watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST;
+ watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT;
+ }
+
+ if ((pixelClockHz * depth) >= (8 * minRate * outp->dp.link_nr * DSC_FACTOR))
+ {
+ return false;
+ }
+
+ //
+ // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with
+ // 0 active symbols. This may cause HW hang. Bug 200379426
+ //
+ if ((bEnableDsc) &&
+ ((pixelClockHz * depth) < div_u64(8 * minRate * outp->dp.link_nr * DSC_FACTOR, 64)))
+ {
+ return false;
+ }
+
+ //
+ // Perform the SST calculation.
+ // For auto mode the watermark calculation does not need to track accumulated error the
+ // formulas for manual mode will not work. So below calculation was extracted from the DTB.
+ //
+ ratioF = div_u64((u64)pixelClockHz * depth * PrecisionFactor, DSC_FACTOR);
+
+ ratioF = div_u64(ratioF, 8 * (u64) minRate * outp->dp.link_nr);
+
+ if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below
+ return false;
+
+ watermarkF = div_u64(ratioF * tuSize * (PrecisionFactor - ratioF), PrecisionFactor);
+ waterMark = (unsigned)(watermarkAdjust + (div_u64(2 * div_u64(depth * PrecisionFactor, 8 * numLanesPerLink * DSC_FACTOR) + watermarkF, PrecisionFactor)));
+
+ //
+ // Bounds check the watermark
+ //
+ numSymbolsPerLine = div_u64(surfaceWidth * depth, 8 * outp->dp.link_nr * DSC_FACTOR);
+
+ if (WARN_ON(waterMark > 39 || waterMark > numSymbolsPerLine))
+ return false;
+
+ //
+ // Clamp the low side
+ //
+ if (waterMark < watermarkMinimum)
+ waterMark = watermarkMinimum;
+
+ //Bits to send BS/BE/Extra symbols due to pixel padding
+ //Also accounts for enhanced framing.
+ BlankingBits = 3*8*numLanesPerLink + (enhancedFraming ? 3*8*numLanesPerLink : 0);
+
+ //VBID/MVID/MAUD sent 4 times all the time
+ BlankingBits += 3*8*4;
+
+ surfaceWidthPerLink = surfaceWidth;
+
+ //Extra bits sent due to pixel steering
+ u32 remain;
+ div_u64_rem(surfaceWidthPerLink, numLanesPerLink, &remain);
+ PixelSteeringBits = remain ? div_u64((numLanesPerLink - remain) * depth, DSC_FACTOR) : 0;
+
+ BlankingBits += PixelSteeringBits;
+ NumBlankingLinkClocks = div_u64((u64)BlankingBits * PrecisionFactor, (8 * numLanesPerLink));
+ MinHBlank = (u32)(div_u64(div_u64(NumBlankingLinkClocks * pixelClockHz, minRate), PrecisionFactor));
+ MinHBlank += 12;
+
+ if (WARN_ON(MinHBlank > rasterWidth - surfaceWidth))
+ return false;
+
+ // Bug 702290 - Active Width should be greater than 60
+ if (WARN_ON(surfaceWidth <= 60))
+ return false;
+
+
+ hblank_symbols = (s32)(div_u64((u64)(rasterWidth - surfaceWidth - MinHBlank) * minRate, pixelClockHz));
+
+ //reduce HBlank Symbols to account for secondary data packet
+ hblank_symbols -= 1; //Stuffer latency to send BS
+ hblank_symbols -= 3; //SPKT latency to send data to stuffer
+
+ hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3;
+
+ hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols;
+
+ // Refer to dev_disp.ref for more information.
+ // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1;
+ // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
+ if (surfaceWidth < 40)
+ {
+ vblank_symbols = 0;
+ }
+ else
+ {
+ vblank_symbols = (s32)((div_u64((u64)(surfaceWidth - 40) * minRate, pixelClockHz))) - 1;
+
+ vblank_symbols -= numLanesPerLink == 1 ? 39 : numLanesPerLink == 2 ? 21 : 12;
+ }
+
+ vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols;
+
+ return nvif_outp_dp_sst(&outp->outp, head->base.index, waterMark, hBlankSym, vBlankSym);
+}
+
static void
nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
@@ -1679,6 +1821,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
break;
case DCB_OUTPUT_DP:
nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
+ nv50_sor_dp_watermark_sst(nv_encoder, head, asyh);
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->outp.or.link & 1)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 8b5a240d57e4..fa161b74d967 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -35,6 +35,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
+#define NV_DEVICE_INFO_V0_ADA 0x0e
__u8 family;
__u8 pad06[2];
__u64 ram_size;
@@ -90,6 +91,8 @@ struct nv_device_time_v0 {
#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2 0x00004000
#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC 0x00008000
#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC 0x00010000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVJPG 0x00020000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_OFA 0x00040000
/* Returns the number of available channels on runlist(data). */
#define NV_DEVICE_HOST_RUNLIST_CHANNELS NV_DEVICE_HOST(0x00000101)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index ad1e5de84e80..e668ab1664f0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -104,6 +104,7 @@
#define GV100_DISP /* if0010.h */ 0x0000c370
#define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670
+#define AD102_DISP /* if0010.h */ 0x0000c770
#define GV100_DISP_CAPS 0x0000c373
@@ -154,6 +155,7 @@
#define GV100_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
+#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
@@ -192,8 +194,15 @@
#define AMPERE_B /* cl9097.h */ 0x0000c797
+#define ADA_A /* cl9097.h */ 0x0000c997
+
#define NV74_BSP 0x000074b0
+#define NVC4B0_VIDEO_DECODER 0x0000c4b0
+#define NVC6B0_VIDEO_DECODER 0x0000c6b0
+#define NVC7B0_VIDEO_DECODER 0x0000c7b0
+#define NVC9B0_VIDEO_DECODER 0x0000c9b0
+
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
#define G98_MSVLD 0x000088b1
@@ -222,6 +231,10 @@
#define AMPERE_DMA_COPY_A 0x0000c6b5
#define AMPERE_DMA_COPY_B 0x0000c7b5
+#define NVC4B7_VIDEO_ENCODER 0x0000c4b7
+#define NVC7B7_VIDEO_ENCODER 0x0000c7b7
+#define NVC9B7_VIDEO_ENCODER 0x0000c9b7
+
#define FERMI_DECOMPRESS 0x000090b8
#define NV50_COMPUTE 0x000050c0
@@ -237,6 +250,14 @@
#define VOLTA_COMPUTE_A 0x0000c3c0
#define TURING_COMPUTE_A 0x0000c5c0
#define AMPERE_COMPUTE_B 0x0000c7c0
+#define ADA_COMPUTE_A 0x0000c9c0
#define NV74_CIPHER 0x000074c1
+
+#define NVC4D1_VIDEO_NVJPG 0x0000c4d1
+#define NVC9D1_VIDEO_NVJPG 0x0000c9d1
+
+#define NVC6FA_VIDEO_OFA 0x0000c6fa
+#define NVC7FA_VIDEO_OFA 0x0000c7fa
+#define NVC9FA_VIDEO_OFA 0x0000c9fa
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index f65b5009acf7..f057d348221e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -46,6 +46,7 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
+ AD100 = 0x190,
} card_type;
u32 chipset;
u8 chiprev;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
index b857cf142c4a..3d3f1063aaa7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -48,6 +48,8 @@ int nvkm_falcon_pio_rd(struct nvkm_falcon *, u8 port, enum nvkm_falcon_mem type,
const u8 *img, u32 img_base, int len);
int nvkm_falcon_dma_wr(struct nvkm_falcon *, const u8 *img, u64 dma_addr, u32 dma_base,
enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec);
+bool nvkm_falcon_riscv_active(struct nvkm_falcon *);
+void nvkm_falcon_intr_retrigger(struct nvkm_falcon *);
int gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
int gm200_flcn_disable(struct nvkm_falcon *);
@@ -61,10 +63,15 @@ void gm200_flcn_tracepc(struct nvkm_falcon *);
int gp102_flcn_reset_eng(struct nvkm_falcon *);
extern const struct nvkm_falcon_func_pio gp102_flcn_emem_pio;
+bool tu102_flcn_riscv_active(struct nvkm_falcon *);
+
+void ga100_flcn_intr_retrigger(struct nvkm_falcon *);
+
int ga102_flcn_select(struct nvkm_falcon *);
int ga102_flcn_reset_prep(struct nvkm_falcon *);
int ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
extern const struct nvkm_falcon_func_dma ga102_flcn_dma;
+bool ga102_flcn_riscv_active(struct nvkm_falcon *);
void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
void *, u32, u32, u16, u8, bool);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index d4e507e252b1..20839be72644 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -10,6 +10,7 @@ struct nvkm_firmware {
enum nvkm_firmware_type {
NVKM_FIRMWARE_IMG_RAM,
NVKM_FIRMWARE_IMG_DMA,
+ NVKM_FIRMWARE_IMG_SGT,
} type;
} *func;
const char *name;
@@ -21,7 +22,10 @@ struct nvkm_firmware {
struct nvkm_firmware_mem {
struct nvkm_memory memory;
- struct scatterlist sgl;
+ union {
+ struct scatterlist sgl; /* DMA */
+ struct sg_table sgt; /* SGT */
+ };
} mem;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 58108dea5aeb..30c17db483cb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
-NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI , struct nvkm_pci , pci)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS , struct nvkm_bios , bios)
@@ -42,9 +42,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC , struct nvkm_engine , msenc)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index ad9aef2df48f..3e8db8280e2a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -5,11 +5,29 @@
#include <core/engine.h>
#include <core/object.h>
#include <core/event.h>
+#include <subdev/gsp.h>
struct nvkm_disp {
const struct nvkm_disp_func *func;
struct nvkm_engine engine;
+ struct {
+ struct nvkm_gsp_client client;
+ struct nvkm_gsp_device device;
+
+ struct nvkm_gsp_object objcom;
+ struct nvkm_gsp_object object;
+
+#define NVKM_DPYID_PLUG BIT(0)
+#define NVKM_DPYID_UNPLUG BIT(1)
+#define NVKM_DPYID_IRQ BIT(2)
+ struct nvkm_event event;
+ struct nvkm_gsp_event hpd;
+ struct nvkm_gsp_event irq;
+
+ u32 assigned_sors;
+ } rm;
+
struct list_head heads;
struct list_head iors;
struct list_head outps;
@@ -69,4 +87,5 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index b7bb8a29a729..341f133dc38c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -62,6 +62,7 @@ struct nvkm_falcon_func {
int (*enable)(struct nvkm_falcon *);
int (*select)(struct nvkm_falcon *);
u32 addr2;
+ u32 riscv_irqmask;
bool reset_pmc;
int (*reset_eng)(struct nvkm_falcon *);
int (*reset_prep)(struct nvkm_falcon *);
@@ -87,6 +88,9 @@ struct nvkm_falcon_func {
u32 stride;
} cmdq, msgq;
+ bool (*riscv_active)(struct nvkm_falcon *);
+ void (*intr_retrigger)(struct nvkm_falcon *);
+
struct {
u32 *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 221abd6c4310..be508f65b280 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -4,6 +4,7 @@
#include <core/engine.h>
#include <core/object.h>
#include <core/event.h>
+#include <subdev/gsp.h>
struct nvkm_fault_data;
#define NVKM_FIFO_ENGN_NR 16
@@ -35,6 +36,15 @@ struct nvkm_chan {
atomic_t blocked;
atomic_t errored;
+ struct {
+ struct nvkm_gsp_object object;
+ struct {
+ dma_addr_t addr;
+ void *ptr;
+ } mthdbuf;
+ struct nvkm_vctx *grctx;
+ } rm;
+
struct list_head cctxs;
struct list_head head;
};
@@ -43,6 +53,8 @@ struct nvkm_chan *nvkm_chan_get_chid(struct nvkm_engine *, int id, unsigned long
struct nvkm_chan *nvkm_chan_get_inst(struct nvkm_engine *, u64 inst, unsigned long *irqflags);
void nvkm_chan_put(struct nvkm_chan **, unsigned long irqflags);
+struct nvkm_chan *nvkm_uchan_chan(struct nvkm_object *);
+
struct nvkm_fifo {
const struct nvkm_fifo_func *func;
struct nvkm_engine engine;
@@ -66,8 +78,15 @@ struct nvkm_fifo {
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar1;
+
+ struct mutex mutex;
+ struct list_head list;
} userd;
+ struct {
+ u32 mthdbuf_size;
+ } rm;
+
spinlock_t lock;
struct mutex mutex;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index a2333cfe6955..8145796ffc61 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -55,4 +55,5 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 9baf197ac833..8d2e170883e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -12,5 +12,8 @@ struct nvkm_nvdec {
};
int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 1a259c5c9a71..018c58fc32ba 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -12,4 +12,7 @@ struct nvkm_nvenc {
};
int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
new file mode 100644
index 000000000000..80b7933a789e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_H__
+#define __NVKM_NVJPG_H__
+#include <core/engine.h>
+
+int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
new file mode 100644
index 000000000000..e72e2115333b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_H__
+#define __NVKM_OFA_H__
+#include <core/engine.h>
+
+int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 4f07836ab984..874a5080ba06 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -11,6 +11,10 @@ struct nvkm_bar {
spinlock_t lock;
bool bar2;
+ void __iomem *flushBAR2PhysMode;
+ struct nvkm_memory *flushFBZero;
+ void __iomem *flushBAR2;
+
/* whether the BAR supports to be ioremapped WC or should be uncached */
bool iomap_uncached;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index b61cfb077533..b4b7841e3b13 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -29,6 +29,7 @@ int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
+void *nvbios_pointer(struct nvkm_bios *, u32 addr);
int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 1755b0df3cc1..5b798a1a313d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -158,9 +158,9 @@ struct nvkm_ram {
struct nvkm_ram_data target;
};
-int
-nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
- bool contig, bool back, struct nvkm_memory **);
+int nvkm_ram_wrap(struct nvkm_device *, u64 addr, u64 size, struct nvkm_memory **);
+int nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+ bool contig, bool back, struct nvkm_memory **);
struct nvkm_ram_func {
u64 upper;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 72619d7df73e..2fa0445d8928 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -3,14 +3,449 @@
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
#include <core/subdev.h>
#include <core/falcon.h>
+#include <core/firmware.h>
+
+#define GSP_PAGE_SHIFT 12
+#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
+
+struct nvkm_gsp_mem {
+ u32 size;
+ void *data;
+ dma_addr_t addr;
+};
+
+struct nvkm_gsp_radix3 {
+ struct nvkm_gsp_mem mem[3];
+};
+
+int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
+void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
+
+typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
+
+struct nvkm_gsp_event;
+typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
struct nvkm_falcon falcon;
+
+ struct {
+ struct {
+ const struct firmware *load;
+ const struct firmware *unload;
+ } booter;
+ const struct firmware *bl;
+ const struct firmware *rm;
+ } fws;
+
+ struct nvkm_firmware fw;
+ struct nvkm_gsp_mem sig;
+ struct nvkm_gsp_radix3 radix3;
+
+ struct {
+ struct {
+ struct {
+ u64 addr;
+ u64 size;
+ } vga_workspace;
+ u64 addr;
+ u64 size;
+ } bios;
+ struct {
+ struct {
+ u64 addr;
+ u64 size;
+ } frts, boot, elf, heap;
+ u64 addr;
+ u64 size;
+ } wpr2;
+ struct {
+ u64 addr;
+ u64 size;
+ } heap;
+ u64 addr;
+ u64 size;
+
+ struct {
+ u64 addr;
+ u64 size;
+ } region[16];
+ int region_nr;
+ u32 rsvd_size;
+ } fb;
+
+ struct {
+ struct nvkm_falcon_fw load;
+ struct nvkm_falcon_fw unload;
+ } booter;
+
+ struct {
+ struct nvkm_gsp_mem fw;
+ u32 code_offset;
+ u32 data_offset;
+ u32 manifest_offset;
+ u32 app_version;
+ } boot;
+
+ struct nvkm_gsp_mem libos;
+ struct nvkm_gsp_mem loginit;
+ struct nvkm_gsp_mem logintr;
+ struct nvkm_gsp_mem logrm;
+ struct nvkm_gsp_mem rmargs;
+
+ struct nvkm_gsp_mem wpr_meta;
+
+ struct {
+ struct sg_table sgt;
+ struct nvkm_gsp_radix3 radix3;
+ struct nvkm_gsp_mem meta;
+ } sr;
+
+ struct {
+ struct nvkm_gsp_mem mem;
+
+ struct {
+ int nr;
+ u32 size;
+ u64 *ptr;
+ } ptes;
+
+ struct {
+ u32 size;
+ void *ptr;
+ } cmdq, msgq;
+ } shm;
+
+ struct nvkm_gsp_cmdq {
+ struct mutex mutex;
+ u32 cnt;
+ u32 seq;
+ u32 *wptr;
+ u32 *rptr;
+ } cmdq;
+
+ struct nvkm_gsp_msgq {
+ struct mutex mutex;
+ u32 cnt;
+ u32 *wptr;
+ u32 *rptr;
+ struct nvkm_gsp_msgq_ntfy {
+ u32 fn;
+ nvkm_gsp_msg_ntfy_func func;
+ void *priv;
+ } ntfy[16];
+ int ntfy_nr;
+ struct work_struct work;
+ } msgq;
+
+ bool running;
+
+ /* Internal GSP-RM control handles. */
+ struct {
+ struct nvkm_gsp_client {
+ struct nvkm_gsp_object {
+ struct nvkm_gsp_client *client;
+ struct nvkm_gsp_object *parent;
+ u32 handle;
+ } object;
+
+ struct nvkm_gsp *gsp;
+
+ struct list_head events;
+ } client;
+
+ struct nvkm_gsp_device {
+ struct nvkm_gsp_object object;
+ struct nvkm_gsp_object subdevice;
+ } device;
+ } internal;
+
+ struct {
+ enum nvkm_subdev_type type;
+ int inst;
+ u32 stall;
+ u32 nonstall;
+ } intr[32];
+ int intr_nr;
+
+ struct {
+ u64 rm_bar1_pdb;
+ u64 rm_bar2_pdb;
+ } bar;
+
+ struct {
+ u8 gpcs;
+ u8 tpcs;
+ } gr;
+
+ const struct nvkm_gsp_rm {
+ void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+ void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
+
+ void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
+ void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
+
+ void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
+ void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
+
+ int (*rm_free)(struct nvkm_gsp_object *);
+
+ int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
+ void (*client_dtor)(struct nvkm_gsp_client *);
+
+ int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*device_dtor)(struct nvkm_gsp_device *);
+
+ int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*event_dtor)(struct nvkm_gsp_event *);
+ } *rm;
+
+ struct {
+ struct mutex mutex;;
+ struct idr idr;
+ } client_id;
};
+static inline bool
+nvkm_gsp_rm(struct nvkm_gsp *gsp)
+{
+ return gsp && (gsp->fws.rm || gsp->fw.img);
+}
+
+static inline void *
+nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+ return gsp->rm->rpc_get(gsp, fn, argc);
+}
+
+static inline void *
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ return gsp->rm->rpc_push(gsp, argv, wait, repc);
+}
+
+static inline void *
+nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+ void *argv = nvkm_gsp_rpc_get(gsp, fn, argc);
+
+ if (IS_ERR_OR_NULL(argv))
+ return argv;
+
+ return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+}
+
+static inline int
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+{
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static inline void
+nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ gsp->rm->rpc_done(gsp, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+ return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+ return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
+{
+ void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+
+ if (IS_ERR(argv))
+ return argv;
+
+ return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+}
+
+static inline int
+nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
+{
+ void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
+
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static inline void
+nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+ object->client->gsp->rm->rm_ctrl_done(object, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+ struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = parent->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ void *argv;
+
+ object->client = parent->client;
+ object->parent = parent;
+ object->handle = handle;
+
+ argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+ if (IS_ERR_OR_NULL(argv)) {
+ object->client = NULL;
+ return argv;
+ }
+
+ return argv;
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+ void *repv = object->client->gsp->rm->rm_alloc_push(object, argv, repc);
+
+ if (IS_ERR(repv))
+ object->client = NULL;
+
+ return repv;
+}
+
+static inline int
+nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
+{
+ void *repv = nvkm_gsp_rm_alloc_push(object, argv, 0);
+
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static inline void
+nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+ object->client->gsp->rm->rm_alloc_done(object, repv);
+}
+
+static inline int
+nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+ struct nvkm_gsp_object *object)
+{
+ void *argv = nvkm_gsp_rm_alloc_get(parent, handle, oclass, argc, object);
+
+ if (IS_ERR_OR_NULL(argv))
+ return argv ? PTR_ERR(argv) : -EIO;
+
+ return nvkm_gsp_rm_alloc_wr(object, argv);
+}
+
+static inline int
+nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
+{
+ if (object->client)
+ return object->client->gsp->rm->rm_free(object);
+
+ return 0;
+}
+
+static inline int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ return gsp->rm->client_ctor(gsp, client);
+}
+
+static inline void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ if (client->gsp)
+ client->gsp->rm->client_dtor(client);
+}
+
+static inline int
+nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ return client->gsp->rm->device_ctor(client, device);
+}
+
+static inline void
+nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ if (device->object.client)
+ device->object.client->gsp->rm->device_dtor(device);
+}
+
+static inline int
+nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp,
+ struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ int ret = nvkm_gsp_client_ctor(gsp, client);
+
+ if (ret == 0) {
+ ret = nvkm_gsp_device_ctor(client, device);
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+ }
+
+ return ret;
+}
+
+struct nvkm_gsp_event {
+ struct nvkm_gsp_device *device;
+ u32 id;
+ nvkm_gsp_event_func func;
+
+ struct nvkm_gsp_object object;
+
+ struct list_head head;
+};
+
+static inline int
+nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+}
+
+static inline void
+nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+
+ if (device)
+ device->object.client->gsp->rm->event_dtor(event);
+}
+
+int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+
int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 92a36ddfc29f..e10cbd9203ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -8,6 +8,8 @@ struct nvkm_instmem {
const struct nvkm_instmem_func *func;
struct nvkm_subdev subdev;
+ bool suspend;
+
spinlock_t lock;
struct list_head list;
struct list_head boot;
@@ -22,6 +24,11 @@ struct nvkm_instmem {
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
struct nvkm_memory *ramfc;
+
+ struct {
+ struct sg_table fbsr;
+ bool fbsr_valid;
+ } rm;
};
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 2fd2f2433fc7..935b1cacd528 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
+#include <subdev/gsp.h>
struct nvkm_vma {
struct list_head head;
@@ -63,6 +64,16 @@ struct nvkm_vmm {
void *nullp;
bool replay;
+
+ struct {
+ u64 bar2_pdb;
+
+ struct nvkm_gsp_client client;
+ struct nvkm_gsp_device device;
+ struct nvkm_gsp_object object;
+
+ struct nvkm_vma *rsvd;
+ } rm;
};
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
new file mode 100644
index 000000000000..7157c7757698
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
@@ -0,0 +1,170 @@
+#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
new file mode 100644
index 000000000000..7a3fc023072d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
+#define __src_common_sdk_nvidia_inc_class_cl0000_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
new file mode 100644
index 000000000000..e4de36d63666
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
+#define __src_common_sdk_nvidia_inc_class_cl0005_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
new file mode 100644
index 000000000000..8868118e47d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
@@ -0,0 +1,43 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
+#define __src_common_sdk_nvidia_inc_class_cl0080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
new file mode 100644
index 000000000000..9040ea5608a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
new file mode 100644
index 000000000000..ba659d6477d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
@@ -0,0 +1,62 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
new file mode 100644
index 000000000000..9eb780a1ac72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
+#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
new file mode 100644
index 000000000000..f1d21776e395
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
+#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
new file mode 100644
index 000000000000..b8f32576cfaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
@@ -0,0 +1,34 @@
+#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
new file mode 100644
index 000000000000..58b3ba7badf1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
@@ -0,0 +1,39 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
new file mode 100644
index 000000000000..596f2ea8344e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
@@ -0,0 +1,166 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
new file mode 100644
index 000000000000..bae4b1997736
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
@@ -0,0 +1,335 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
new file mode 100644
index 000000000000..954958dcf834
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
@@ -0,0 +1,216 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
new file mode 100644
index 000000000000..d69cef3c01fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
@@ -0,0 +1,65 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
new file mode 100644
index 000000000000..6acb3f73242d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
@@ -0,0 +1,57 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
new file mode 100644
index 000000000000..3db099e62364
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
@@ -0,0 +1,48 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
new file mode 100644
index 000000000000..ed01df925573
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
new file mode 100644
index 000000000000..b5b7631de99b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
@@ -0,0 +1,40 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
new file mode 100644
index 000000000000..fe912d2bd183
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
new file mode 100644
index 000000000000..87bc4ff92ce1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
new file mode 100644
index 000000000000..68c81f9f803c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
@@ -0,0 +1,51 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
new file mode 100644
index 000000000000..bc0f63699b06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
@@ -0,0 +1,52 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
new file mode 100644
index 000000000000..29d7a1052142
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
@@ -0,0 +1,100 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
new file mode 100644
index 000000000000..59f8895bc5d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
new file mode 100644
index 000000000000..e11b2dbe5288
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
@@ -0,0 +1,162 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
new file mode 100644
index 000000000000..977e59818533
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
@@ -0,0 +1,95 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
new file mode 100644
index 000000000000..684045796232
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
@@ -0,0 +1,42 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
new file mode 100644
index 000000000000..5c5a004a8031
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
+#define __src_common_sdk_nvidia_inc_nvlimits_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MAX_SUBDEVICES 8
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
new file mode 100644
index 000000000000..51b5591c603e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
@@ -0,0 +1,148 @@
+#ifndef __src_common_sdk_nvidia_inc_nvos_h__
+#define __src_common_sdk_nvidia_inc_nvos_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
new file mode 100644
index 000000000000..5a2f273d95c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
@@ -0,0 +1,46 @@
+#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
new file mode 100644
index 000000000000..83cf1b2c15a3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
@@ -0,0 +1,52 @@
+#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
+#define __src_common_uproc_os_common_include_libos_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef NvU64 LibosAddress;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
new file mode 100644
index 000000000000..73213bdfcbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
@@ -0,0 +1,79 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+#define GSP_FW_SR_META_REVISION 2
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
new file mode 100644
index 000000000000..a2e141e4b459
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
@@ -0,0 +1,170 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ union
+ {
+ struct
+ {
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_REVISION 1
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
new file mode 100644
index 000000000000..4eff473e8990
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
@@ -0,0 +1,82 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
new file mode 100644
index 000000000000..341ab0dbeaf2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
@@ -0,0 +1,100 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
new file mode 100644
index 000000000000..3144e9beac61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_generated_g_allclasses_h__
+#define __src_nvidia_generated_g_allclasses_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
new file mode 100644
index 000000000000..6b8921138c7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
@@ -0,0 +1,38 @@
+#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
+#define __src_nvidia_generated_g_chipset_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
new file mode 100644
index 000000000000..a5128f00225b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
+#define __src_nvidia_generated_g_fbsr_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
new file mode 100644
index 000000000000..5641a21cacca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
@@ -0,0 +1,35 @@
+#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
+#define __src_nvidia_generated_g_gpu_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
new file mode 100644
index 000000000000..b5ad55f854dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
@@ -0,0 +1,62 @@
+#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
+#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
new file mode 100644
index 000000000000..946954ac5b3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
@@ -0,0 +1,119 @@
+#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
new file mode 100644
index 000000000000..daabaee41c87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
@@ -0,0 +1,32 @@
+#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
+#define __src_nvidia_generated_g_mem_desc_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define ADDR_SYSMEM 1 // System memory (PCI)
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
new file mode 100644
index 000000000000..754c6af42f30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
@@ -0,0 +1,44 @@
+#ifndef __src_nvidia_generated_g_os_nvoc_h__
+#define __src_nvidia_generated_g_os_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[0];
+} PACKED_REGISTRY_TABLE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
new file mode 100644
index 000000000000..8d925e24faea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
@@ -0,0 +1,124 @@
+#ifndef __src_nvidia_generated_g_rpc_structures_h__
+#define __src_nvidia_generated_g_rpc_structures_h__
+#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
new file mode 100644
index 000000000000..e9fed4140468
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
@@ -0,0 +1,45 @@
+#ifndef __src_nvidia_generated_g_sdk_structures_h__
+#define __src_nvidia_generated_g_sdk_structures_h__
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
new file mode 100644
index 000000000000..af50b11ec3b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
@@ -0,0 +1,74 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
new file mode 100644
index 000000000000..e3160c60036d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
@@ -0,0 +1,86 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
new file mode 100644
index 000000000000..3abec59f0cc4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
new file mode 100644
index 000000000000..4033a6f85a76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
new file mode 100644
index 000000000000..eeab25a5e290
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
@@ -0,0 +1,174 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
+#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
new file mode 100644
index 000000000000..bd5e01f9814b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MC_ENGINE_IDX_DISP 2
+
+#define MC_ENGINE_IDX_CE0 15
+
+#define MC_ENGINE_IDX_CE9 24
+
+#define MC_ENGINE_IDX_MSENC 38
+
+#define MC_ENGINE_IDX_MSENC2 40
+
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+
+#define MC_ENGINE_IDX_NVJPEG7 57
+
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+
+#define MC_ENGINE_IDX_NVDEC7 71
+
+#define MC_ENGINE_IDX_OFA0 80
+
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
new file mode 100644
index 000000000000..366447a368bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
new file mode 100644
index 000000000000..4a850dad4776
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
+#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
new file mode 100644
index 000000000000..73c57f235f6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
@@ -0,0 +1,262 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef X
+# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP) // 0
+ X(RM, SET_GUEST_SYSTEM_INFO) // 1
+ X(RM, ALLOC_ROOT) // 2
+ X(RM, ALLOC_DEVICE) // 3 deprecated
+ X(RM, ALLOC_MEMORY) // 4
+ X(RM, ALLOC_CTX_DMA) // 5
+ X(RM, ALLOC_CHANNEL_DMA) // 6
+ X(RM, MAP_MEMORY) // 7
+ X(RM, BIND_CTX_DMA) // 8 deprecated
+ X(RM, ALLOC_OBJECT) // 9
+ X(RM, FREE) //10
+ X(RM, LOG) //11
+ X(RM, ALLOC_VIDMEM) //12
+ X(RM, UNMAP_MEMORY) //13
+ X(RM, MAP_MEMORY_DMA) //14
+ X(RM, UNMAP_MEMORY_DMA) //15
+ X(RM, GET_EDID) //16
+ X(RM, ALLOC_DISP_CHANNEL) //17
+ X(RM, ALLOC_DISP_OBJECT) //18
+ X(RM, ALLOC_SUBDEVICE) //19
+ X(RM, ALLOC_DYNAMIC_MEMORY) //20
+ X(RM, DUP_OBJECT) //21
+ X(RM, IDLE_CHANNELS) //22
+ X(RM, ALLOC_EVENT) //23
+ X(RM, SEND_EVENT) //24
+ X(RM, REMAPPER_CONTROL) //25 deprecated
+ X(RM, DMA_CONTROL) //26
+ X(RM, DMA_FILL_PTE_MEM) //27
+ X(RM, MANAGE_HW_RESOURCE) //28
+ X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated
+ X(RM, CREATE_FB_SEGMENT) //30
+ X(RM, DESTROY_FB_SEGMENT) //31
+ X(RM, ALLOC_SHARE_DEVICE) //32
+ X(RM, DEFERRED_API_CONTROL) //33
+ X(RM, REMOVE_DEFERRED_API) //34
+ X(RM, SIM_ESCAPE_READ) //35
+ X(RM, SIM_ESCAPE_WRITE) //36
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37
+ X(RM, FREE_VIDMEM_VIRT) //38
+ X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP
+ X(RM, PERF_GET_PERFMON_SAMPLE) //40
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated
+ X(RM, PERF_GET_LEVEL_INFO) //42
+ X(RM, MAP_SEMA_MEMORY) //43
+ X(RM, UNMAP_SEMA_MEMORY) //44
+ X(RM, SET_SURFACE_PROPERTIES) //45
+ X(RM, CLEANUP_SURFACE) //46
+ X(RM, UNLOADING_GUEST_DRIVER) //47
+ X(RM, TDR_SET_TIMEOUT_STATE) //48
+ X(RM, SWITCH_TO_VGA) //49
+ X(RM, GPU_EXEC_REG_OPS) //50
+ X(RM, GET_STATIC_INFO) //51
+ X(RM, ALLOC_VIRTMEM) //52
+ X(RM, UPDATE_PDE_2) //53
+ X(RM, SET_PAGE_DIRECTORY) //54
+ X(RM, GET_STATIC_PSTATE_INFO) //55
+ X(RM, TRANSLATE_GUEST_GPU_PTES) //56
+ X(RM, RESERVED_57) //57
+ X(RM, RESET_CURRENT_GR_CONTEXT) //58
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59
+ X(RM, GET_ENGINE_UTILIZATION) //60
+ X(RM, UPDATE_GPU_PDES) //61
+ X(RM, GET_ENCODER_CAPACITY) //62
+ X(RM, VGPU_PF_REG_READ32) //63
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64
+ X(GSP, GET_GSP_STATIC_INFO) //65
+ X(RM, RMFS_INIT) //66
+ X(RM, RMFS_CLOSE_QUEUE) //67
+ X(RM, RMFS_CLEANUP) //68
+ X(RM, RMFS_TEST) //69
+ X(RM, UPDATE_BAR_PDE) //70
+ X(RM, CONTINUATION_RECORD) //71
+ X(RM, GSP_SET_SYSTEM_INFO) //72
+ X(RM, SET_REGISTRY) //73
+ X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated
+ X(GSP, GSP_RM_CONTROL) //76
+ X(RM, GET_STATIC_INFO2) //77
+ X(RM, DUMP_PROTOBUF_COMPONENT) //78
+ X(RM, UNSET_PAGE_DIRECTORY) //79
+ X(RM, GET_CONSOLIDATED_STATIC_INFO) //80
+ X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE) //85
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87
+ X(RM, CTRL_RESET_CHANNEL) //88
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91
+ X(RM, CTRL_PERF_BOOST) //92
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96
+ X(RM, CTRL_GPFIFO_SCHEDULE) //97
+ X(RM, CTRL_SET_TIMESLICE) //98
+ X(RM, CTRL_PREEMPT) //99
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102
+ X(GSP, GSP_RM_ALLOC) //103
+ X(RM, CTRL_GET_P2P_CAPS_V2) //104
+ X(RM, CTRL_CIPHER_AES_ENCRYPT) //105
+ X(RM, CTRL_CIPHER_SESSION_KEY) //106
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110
+ X(RM, CTRL_GPU_PROMOTE_CTX) //111
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114
+ X(RM, CTRL_GPU_INITIALIZE_CTX) //115
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120
+ X(RM, CTRL_GET_CE_PCE_MASK) //121
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123
+ X(RM, CTRL_GET_NVLINK_STATUS) //124
+ X(RM, CTRL_GET_P2P_CAPS) //125
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126
+ X(RM, RESERVED_0) //127
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128
+ X(RM, CTRL_RESERVE_HWPM_LEGACY) //129
+ X(RM, CTRL_B0CC_EXEC_REG_OPS) //130
+ X(RM, CTRL_BIND_PM_RESOURCES) //131
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132
+ X(RM, CTRL_DBG_RESUME_CONTEXT) //133
+ X(RM, CTRL_DBG_EXEC_REG_OPS) //134
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139
+ X(RM, CTRL_ALLOC_PMA_STREAM) //140
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141
+ X(RM, CTRL_FB_GET_INFO_V2) //142
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145
+ X(RM, CTRL_GPU_EVICT_CTX) //146
+ X(RM, CTRL_FB_GET_FS_INFO) //147
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148
+ X(RM, CTRL_STOP_CHANNEL) //149
+ X(RM, CTRL_GR_PC_SAMPLING_MODE) //150
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152
+ X(RM, CTRL_FREE_PMA_STREAM) //153
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161
+ X(UVM, UVM_PAGING_CHANNEL_MAP) //162
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167
+ X(RM, DCE_RM_INIT) //168
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170
+ X(RM, GET_PLCABLE_ADDRESS_KIND) //171
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173
+ X(RM, CTRL_GET_MMU_DEBUG_MODE) //174
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177
+ X(RM, DISABLE_CHANNELS) //178
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179
+ X(RM, CTRL_FABRIC_MEM_STATS) //180
+ X(RM, SAVE_HIBERNATION_DATA) //181
+ X(RM, RESTORE_HIBERNATION_DATA) //182
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191
+ X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194
+ X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196
+ X(RM, CTRL_GET_HS_CREDITS) // 197
+ X(RM, CTRL_SET_HS_CREDITS) // 198
+ X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
+ X(RM, INVALIDATE_TLB) // 200
+ X(RM, NUM_FUNCTIONS) //END
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
new file mode 100644
index 000000000000..f14b23852456
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
@@ -0,0 +1,51 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MAX_GPC_COUNT 32
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
new file mode 100644
index 000000000000..7801af232dff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
@@ -0,0 +1,40 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
new file mode 100644
index 000000000000..e6833df1ccc7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVRM_NVTYPES_H__
+#define __NVRM_NVTYPES_H__
+
+#define NV_ALIGN_BYTES(a) __attribute__ ((__aligned__(a)))
+#define NV_DECLARE_ALIGNED(f,a) f __attribute__ ((__aligned__(a)))
+
+typedef u32 NvV32;
+
+typedef u8 NvU8;
+typedef u16 NvU16;
+typedef u32 NvU32;
+typedef u64 NvU64;
+
+typedef void* NvP64;
+
+typedef NvU8 NvBool;
+typedef NvU32 NvHandle;
+typedef NvU64 NvLength;
+
+typedef NvU64 RmPhysAddr;
+
+typedef NvU32 NV_STATUS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 9e878cdc8e38..479effcf607e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,6 +27,8 @@
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
+#include <subdev/gsp.h>
+
#include <linux/io-mapping.h>
#include <linux/firmware.h>
@@ -2087,7 +2089,8 @@ nouveau_bios_init(struct drm_device *dev)
int ret;
/* only relevant for PCI devices */
- if (!dev_is_pci(dev->dev))
+ if (!dev_is_pci(dev->dev) ||
+ nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 097246e10cdb..14da22fa3b5b 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { AD102_DISP, 0 },
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
{ GV100_DISP, 0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 374212da9e95..adc60b25f8e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -112,6 +112,22 @@ nvkm_firmware_put(const struct firmware *fw)
#define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
+static struct scatterlist *
+nvkm_firmware_mem_sgl(struct nvkm_memory *memory)
+{
+ struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
+
+ switch (fw->func->type) {
+ case NVKM_FIRMWARE_IMG_DMA: return &fw->mem.sgl;
+ case NVKM_FIRMWARE_IMG_SGT: return fw->mem.sgt.sgl;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return NULL;
+}
+
static int
nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
@@ -120,10 +136,10 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
struct nvkm_vmm_map map = {
.memory = &fw->mem.memory,
.offset = offset,
- .sgl = &fw->mem.sgl,
+ .sgl = nvkm_firmware_mem_sgl(memory),
};
- if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA))
+ if (!map.sgl)
return -ENOSYS;
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
@@ -132,12 +148,15 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
static u64
nvkm_firmware_mem_size(struct nvkm_memory *memory)
{
- return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl);
+ struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory);
+
+ return sgl ? sg_dma_len(sgl) : 0;
}
static u64
nvkm_firmware_mem_addr(struct nvkm_memory *memory)
{
+ BUG_ON(nvkm_firmware_mem(memory)->func->type != NVKM_FIRMWARE_IMG_DMA);
return nvkm_firmware_mem(memory)->phys;
}
@@ -188,6 +207,12 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
nvkm_memory_unref(&memory);
dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
break;
+ case NVKM_FIRMWARE_IMG_SGT:
+ nvkm_memory_unref(&memory);
+ dma_unmap_sgtable(fw->device->dev, &fw->mem.sgt, DMA_TO_DEVICE, 0);
+ sg_free_table(&fw->mem.sgt);
+ vfree(fw->img);
+ break;
default:
WARN_ON(1);
break;
@@ -225,6 +250,49 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
sg_dma_len(&fw->mem.sgl) = len;
}
break;
+ case NVKM_FIRMWARE_IMG_SGT:
+ len = ALIGN(fw->len, PAGE_SIZE);
+
+ fw->img = vmalloc(len);
+ if (fw->img) {
+ int pages = len >> PAGE_SHIFT;
+ int ret = 0;
+
+ memcpy(fw->img, src, fw->len);
+
+ ret = sg_alloc_table(&fw->mem.sgt, pages, GFP_KERNEL);
+ if (ret == 0) {
+ struct scatterlist *sgl;
+ u8 *data = fw->img;
+ int i;
+
+ for_each_sgtable_sg(&fw->mem.sgt, sgl, i) {
+ struct page *page = vmalloc_to_page(data);
+
+ if (!page) {
+ ret = -EFAULT;
+ break;
+ }
+
+ sg_set_page(sgl, page, PAGE_SIZE, 0);
+ data += PAGE_SIZE;
+ }
+
+ if (ret == 0) {
+ ret = dma_map_sgtable(fw->device->dev, &fw->mem.sgt,
+ DMA_TO_DEVICE, 0);
+ }
+
+ if (ret)
+ sg_free_table(&fw->mem.sgt);
+ }
+
+ if (ret) {
+ vfree(fw->img);
+ fw->img = NULL;
+ }
+ }
+ break;
default:
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index c6dfed18f35b..bfaaff645a34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -17,6 +17,8 @@ include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
+include $(src)/nvkm/engine/nvjpg/Kbuild
+include $(src)/nvkm/engine/ofa/Kbuild
include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 8bf1635ffabc..165d61fc5d6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -10,3 +10,5 @@ nvkm-y += nvkm/engine/ce/gv100.o
nvkm-y += nvkm/engine/ce/tu102.o
nvkm-y += nvkm/engine/ce/ga100.o
nvkm-y += nvkm/engine/ce/ga102.o
+
+nvkm-y += nvkm/engine/ce/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 315a69f7fdd1..9427a592bd16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -21,6 +21,7 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
#include <subdev/vfn.h>
#include <nvif/class.h>
@@ -88,5 +89,8 @@ int
ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index 461b73c7e2e0..ce56ede7c2e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
static const struct nvkm_engine_func
@@ -41,5 +43,8 @@ int
ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 0be72c463b21..806a76a72249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -3,6 +3,9 @@
#define __NVKM_CE_PRIV_H__
#include <engine/ce.h>
+int r535_ce_new(const struct nvkm_engine_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_engine **);
+
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
new file mode 100644
index 000000000000..bd0d435dbbd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
+
+struct r535_ce_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ce_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_ce_obj = {
+ .dtor = r535_ce_obj_dtor,
+};
+
+static int
+r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+ struct r535_ce_obj *obj;
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+ sizeof(*args), &obj->rm);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
+
+ return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ce_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+ struct nvkm_engine_func *rm;
+ int nclass, ret;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_ce_dtor;
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_ce_obj_ctor;
+ }
+
+ ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 9563c0175142..7c8647dcb349 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
static const struct nvkm_engine_func
@@ -37,5 +39,8 @@ int
tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 1c81e5b34d29..31ed3da32fe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2408,7 +2408,7 @@ nv162_chipset = {
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
- .gsp = { 0x00000001, gv100_gsp_new },
+ .gsp = { 0x00000001, tu102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
@@ -2426,8 +2426,8 @@ nv162_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
- .nvdec = { 0x00000001, gm107_nvdec_new },
- .nvenc = { 0x00000001, gm107_nvenc_new },
+ .nvdec = { 0x00000001, tu102_nvdec_new },
+ .nvenc = { 0x00000001, tu102_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
@@ -2443,7 +2443,7 @@ nv164_chipset = {
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
- .gsp = { 0x00000001, gv100_gsp_new },
+ .gsp = { 0x00000001, tu102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
@@ -2461,8 +2461,8 @@ nv164_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
- .nvdec = { 0x00000003, gm107_nvdec_new },
- .nvenc = { 0x00000001, gm107_nvenc_new },
+ .nvdec = { 0x00000003, tu102_nvdec_new },
+ .nvenc = { 0x00000001, tu102_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
@@ -2478,7 +2478,7 @@ nv166_chipset = {
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
- .gsp = { 0x00000001, gv100_gsp_new },
+ .gsp = { 0x00000001, tu102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
@@ -2496,8 +2496,8 @@ nv166_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
- .nvdec = { 0x00000007, gm107_nvdec_new },
- .nvenc = { 0x00000001, gm107_nvenc_new },
+ .nvdec = { 0x00000007, tu102_nvdec_new },
+ .nvenc = { 0x00000001, tu102_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
@@ -2513,7 +2513,7 @@ nv167_chipset = {
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
- .gsp = { 0x00000001, gv100_gsp_new },
+ .gsp = { 0x00000001, tu116_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
@@ -2531,8 +2531,8 @@ nv167_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
- .nvdec = { 0x00000001, gm107_nvdec_new },
- .nvenc = { 0x00000001, gm107_nvenc_new },
+ .nvdec = { 0x00000001, tu102_nvdec_new },
+ .nvenc = { 0x00000001, tu102_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
@@ -2548,7 +2548,7 @@ nv168_chipset = {
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
- .gsp = { 0x00000001, gv100_gsp_new },
+ .gsp = { 0x00000001, tu116_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
@@ -2566,8 +2566,8 @@ nv168_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
- .nvdec = { 0x00000001, gm107_nvdec_new },
- .nvenc = { 0x00000001, gm107_nvenc_new },
+ .nvdec = { 0x00000001, tu102_nvdec_new },
+ .nvenc = { 0x00000001, tu102_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
@@ -2580,6 +2580,7 @@ nv170_chipset = {
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga100_fb_new },
.gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, ga100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, ga100_mc_new },
@@ -2591,6 +2592,9 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
+ .nvdec = { 0x0000001f, ga100_nvdec_new },
+ .nvjpg = { 0x00000001, ga100_nvjpg_new },
+ .ofa = { 0x00000001, ga100_ofa_new },
};
static const struct nvkm_device_chip
@@ -2619,7 +2623,9 @@ nv172_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
- .nvdec = { 0x00000001, ga102_nvdec_new },
+ .nvdec = { 0x00000003, ga102_nvdec_new },
+ .nvenc = { 0x00000001, ga102_nvenc_new },
+ .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2649,7 +2655,9 @@ nv173_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
- .nvdec = { 0x00000001, ga102_nvdec_new },
+ .nvdec = { 0x00000003, ga102_nvdec_new },
+ .nvenc = { 0x00000001, ga102_nvenc_new },
+ .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2679,7 +2687,9 @@ nv174_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
- .nvdec = { 0x00000001, ga102_nvdec_new },
+ .nvdec = { 0x00000003, ga102_nvdec_new },
+ .nvenc = { 0x00000001, ga102_nvenc_new },
+ .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2709,7 +2719,9 @@ nv176_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
- .nvdec = { 0x00000001, ga102_nvdec_new },
+ .nvdec = { 0x00000003, ga102_nvdec_new },
+ .nvenc = { 0x00000001, ga102_nvenc_new },
+ .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2739,7 +2751,139 @@ nv177_chipset = {
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
- .nvdec = { 0x00000001, ga102_nvdec_new },
+ .nvdec = { 0x00000003, ga102_nvdec_new },
+ .nvenc = { 0x00000001, ga102_nvenc_new },
+ .ofa = { 0x00000001, ga102_ofa_new },
+ .sec2 = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv192_chipset = {
+ .name = "AD102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gsp = { 0x00000001, ad102_gsp_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .ce = { 0x0000001f, ga102_ce_new },
+ .disp = { 0x00000001, ad102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+ .gr = { 0x00000001, ad102_gr_new },
+ .nvdec = { 0x0000000f, ad102_nvdec_new },
+ .nvenc = { 0x00000007, ad102_nvenc_new },
+ .nvjpg = { 0x0000000f, ad102_nvjpg_new },
+ .ofa = { 0x00000001, ad102_ofa_new },
+ .sec2 = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv193_chipset = {
+ .name = "AD103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gsp = { 0x00000001, ad102_gsp_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .ce = { 0x0000001f, ga102_ce_new },
+ .disp = { 0x00000001, ad102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+ .gr = { 0x00000001, ad102_gr_new },
+ .nvdec = { 0x0000000f, ad102_nvdec_new },
+ .nvenc = { 0x00000007, ad102_nvenc_new },
+ .nvjpg = { 0x0000000f, ad102_nvjpg_new },
+ .ofa = { 0x00000001, ad102_ofa_new },
+ .sec2 = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv194_chipset = {
+ .name = "AD104",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gsp = { 0x00000001, ad102_gsp_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .ce = { 0x0000001f, ga102_ce_new },
+ .disp = { 0x00000001, ad102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+ .gr = { 0x00000001, ad102_gr_new },
+ .nvdec = { 0x0000000f, ad102_nvdec_new },
+ .nvenc = { 0x00000007, ad102_nvenc_new },
+ .nvjpg = { 0x0000000f, ad102_nvjpg_new },
+ .ofa = { 0x00000001, ad102_ofa_new },
+ .sec2 = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv196_chipset = {
+ .name = "AD106",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gsp = { 0x00000001, ad102_gsp_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .ce = { 0x0000001f, ga102_ce_new },
+ .disp = { 0x00000001, ad102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+ .gr = { 0x00000001, ad102_gr_new },
+ .nvdec = { 0x0000000f, ad102_nvdec_new },
+ .nvenc = { 0x00000007, ad102_nvenc_new },
+ .nvjpg = { 0x0000000f, ad102_nvjpg_new },
+ .ofa = { 0x00000001, ad102_ofa_new },
+ .sec2 = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv197_chipset = {
+ .name = "AD107",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gsp = { 0x00000001, ad102_gsp_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .ce = { 0x0000001f, ga102_ce_new },
+ .disp = { 0x00000001, ad102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+ .gr = { 0x00000001, ad102_gr_new },
+ .nvdec = { 0x0000000f, ad102_nvdec_new },
+ .nvenc = { 0x00000007, ad102_nvenc_new },
+ .nvjpg = { 0x0000000f, ad102_nvjpg_new },
+ .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -3061,6 +3205,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
+ case 0x190: device->card_type = AD100; break;
default:
break;
}
@@ -3163,6 +3308,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
+ case 0x192: device->chip = &nv192_chipset; break;
+ case 0x193: device->chip = &nv193_chipset; break;
+ case 0x194: device->chip = &nv194_chipset; break;
+ case 0x196: device->chip = &nv196_chipset; break;
+ case 0x197: device->chip = &nv197_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 24faaac15891..bf3176bec18a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -43,6 +43,8 @@
#include <engine/msvld.h>
#include <engine/nvenc.h>
#include <engine/nvdec.h>
+#include <engine/nvjpg.h>
+#include <engine/ofa.h>
#include <engine/pm.h>
#include <engine/sec.h>
#include <engine/sec2.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 9b39ec341615..7fd4800a876a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -147,6 +147,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
+ case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e1aecd3fe96c..e346e924fee8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o
+nvkm-y += nvkm/engine/disp/ad102.o
+
+nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
new file mode 100644
index 000000000000..7f300a79aa29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_disp_func
+ad102_disp = {
+ .uevent = &gv100_disp_chan_uevent,
+ .ramht_size = 0x2000,
+ .root = { 0, 0,AD102_DISP },
+ .user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
+ {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
+ {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
+ {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
+ {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
+ {}
+ },
+};
+
+int
+ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 39f7e7ce9f4a..457ec5db794d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -105,7 +105,7 @@ nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
struct nvkm_outp *outp;
if (disp->func->fini)
- disp->func->fini(disp);
+ disp->func->fini(disp, suspend);
list_for_each_entry(outp, &disp->outps, head) {
if (outp->func->fini)
@@ -137,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine)
* each output resource to 'fully enabled'.
*/
list_for_each_entry(ior, &disp->iors, head) {
- ior->func->power(ior, true, true, true, true, true);
+ if (ior->func->power)
+ ior->func->power(ior, true, true, true, true, true);
}
return 0;
@@ -208,6 +209,9 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
nvkm_head_del(&head);
}
+ if (disp->func->dtor)
+ disp->func->dtor(disp);
+
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
index 398336ffb685..020290510157 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
@@ -22,6 +22,10 @@ struct nvkm_disp_chan {
u64 push;
u32 suspend_put;
+
+ struct {
+ struct nvkm_gsp_object object;
+ } rm;
};
int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
index efe66ba3c61f..ab0a85c92430 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -24,6 +24,7 @@
#include "head.h"
#include "ior.h"
+#include <subdev/gsp.h>
#include <subdev/timer.h>
#include <nvif/class.h>
@@ -147,5 +148,8 @@ int
ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_disp_new(&ga102_disp, device, type, inst, pdisp);
+
return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index b48ead31da30..83a1323600ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -1154,7 +1154,7 @@ gf119_disp_intr(struct nvkm_disp *disp)
}
void
-gf119_disp_fini(struct nvkm_disp *disp)
+gf119_disp_fini(struct nvkm_disp *disp, bool suspend)
{
struct nvkm_device *device = disp->engine.subdev.device;
/* disable all interrupts */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index e1634f7bca56..cfa3698d3a2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -96,7 +96,7 @@ gv100_sor_dp = {
.watermark = gv100_sor_dp_watermark,
};
-static void
+void
gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz
nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001);
}
-static void
+void
gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -1115,7 +1115,7 @@ gv100_disp_intr(struct nvkm_disp *disp)
}
void
-gv100_disp_fini(struct nvkm_disp *disp)
+gv100_disp_fini(struct nvkm_disp *disp, bool suspend)
{
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_wr32(device, 0x611db0, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 9beb9d1e8633..3ba04bead2f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -187,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi;
+void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *, int, void *, u32);
+void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *, int, void *, u32);
void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 2d05e2f7e46b..03a5f88a4b99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -1504,7 +1504,7 @@ nv50_disp_intr(struct nvkm_disp *disp)
}
void
-nv50_disp_fini(struct nvkm_disp *disp)
+nv50_disp_fini(struct nvkm_disp *disp, bool suspend)
{
struct nvkm_device *device = disp->engine.subdev.device;
/* disable all interrupts */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index bfb2a4db8d64..28adc5a30f2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -386,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
outp->disp = disp;
outp->index = index;
outp->info = *dcbE;
- outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
+ if (!disp->rm.client.gsp)
+ outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index ec5292a8f3c8..a3fd7cb7c488 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -8,6 +8,9 @@ struct nvkm_head;
struct nvkm_outp;
struct dcb_output;
+int r535_disp_new(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp **);
+
int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_disp *);
int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
@@ -15,9 +18,10 @@ int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvk
void nvkm_disp_vblank(struct nvkm_disp *, int head);
struct nvkm_disp_func {
+ void (*dtor)(struct nvkm_disp *);
int (*oneinit)(struct nvkm_disp *);
int (*init)(struct nvkm_disp *);
- void (*fini)(struct nvkm_disp *);
+ void (*fini)(struct nvkm_disp *, bool suspend);
void (*intr)(struct nvkm_disp *);
void (*intr_error)(struct nvkm_disp *, int chid);
@@ -32,7 +36,7 @@ struct nvkm_disp_func {
u16 ramht_size;
- const struct nvkm_sclass root;
+ struct nvkm_sclass root;
struct nvkm_disp_user {
struct nvkm_sclass base;
@@ -44,7 +48,7 @@ struct nvkm_disp_func {
int nv50_disp_oneinit(struct nvkm_disp *);
int nv50_disp_init(struct nvkm_disp *);
-void nv50_disp_fini(struct nvkm_disp *);
+void nv50_disp_fini(struct nvkm_disp *, bool suspend);
void nv50_disp_intr(struct nvkm_disp *);
extern const struct nvkm_enum nv50_disp_intr_error_type[];
void nv50_disp_super(struct work_struct *);
@@ -56,12 +60,12 @@ void nv50_disp_super_2_2(struct nvkm_disp *, struct nvkm_head *);
void nv50_disp_super_3_0(struct nvkm_disp *, struct nvkm_head *);
int gf119_disp_init(struct nvkm_disp *);
-void gf119_disp_fini(struct nvkm_disp *);
+void gf119_disp_fini(struct nvkm_disp *, bool suspend);
void gf119_disp_intr(struct nvkm_disp *);
void gf119_disp_super(struct work_struct *);
void gf119_disp_intr_error(struct nvkm_disp *, int);
-void gv100_disp_fini(struct nvkm_disp *);
+void gv100_disp_fini(struct nvkm_disp *, bool suspend);
void gv100_disp_intr(struct nvkm_disp *);
void gv100_disp_super(struct work_struct *);
int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
new file mode 100644
index 000000000000..298035070b3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -0,0 +1,1671 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+#include "conn.h"
+#include "dp.h"
+#include "head.h"
+#include "ior.h"
+#include "outp.h"
+
+#include <core/ramht.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+
+#include <linux/acpi.h>
+
+static u64
+r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+ switch (chan->object.oclass & 0xff) {
+ case 0x7d: *psize = 0x10000; return 0x680000;
+ case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
+ case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
+ case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ return 0ULL;
+}
+
+static void
+r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+}
+
+static void
+r535_chan_fini(struct nvkm_disp_chan *chan)
+{
+ nvkm_gsp_rm_free(&chan->rm.object);
+}
+
+static int
+r535_chan_push(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (chan->memory) {
+ switch (nvkm_memory_target(chan->memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
+ ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+ }
+
+ ctrl->hclass = chan->object.oclass;
+ ctrl->channelInstance = chan->head;
+ ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r535_curs_init(struct nvkm_disp_chan *chan)
+{
+ NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
+ int ret;
+
+ ret = r535_chan_push(chan);
+ if (ret)
+ return ret;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+ (chan->object.oclass << 16) | chan->head,
+ chan->object.oclass, sizeof(*args), &chan->rm.object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = chan->head;
+
+ return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static const struct nvkm_disp_chan_func
+r535_curs_func = {
+ .init = r535_curs_init,
+ .fini = r535_chan_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_curs = {
+ .func = &r535_curs_func,
+ .user = 73,
+};
+
+static int
+r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
+{
+ return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
+ chan->chid.user << 25 |
+ (chan->disp->rm.client.object.handle & 0x3fff));
+}
+
+static void
+r535_dmac_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_device *device = chan->disp->engine.subdev.device;
+ const u32 uoff = (chan->chid.user - 1) * 0x1000;
+
+ chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
+ r535_chan_fini(chan);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+ int ret;
+
+ ret = r535_chan_push(chan);
+ if (ret)
+ return ret;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+ (chan->object.oclass << 16) | chan->head,
+ chan->object.oclass, sizeof(*args), &chan->rm.object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = chan->head;
+ args->offset = chan->suspend_put;
+
+ return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static int
+r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
+{
+ chan->memory = nvkm_umem_search(chan->object.client, memory);
+ if (IS_ERR(chan->memory))
+ return PTR_ERR(chan->memory);
+
+ return 0;
+}
+
+static const struct nvkm_disp_chan_func
+r535_dmac_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_dmac_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+ .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_func
+r535_wimm_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_dmac_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wimm = {
+ .func = &r535_wimm_func,
+ .user = 33,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wndw = {
+ .func = &r535_dmac_func,
+ .user = 1,
+};
+
+static void
+r535_core_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_device *device = chan->disp->engine.subdev.device;
+
+ chan->suspend_put = nvkm_rd32(device, 0x680000);
+ r535_chan_fini(chan);
+}
+
+static const struct nvkm_disp_chan_func
+r535_core_func = {
+ .push = r535_dmac_push,
+ .init = r535_dmac_init,
+ .fini = r535_core_fini,
+ .intr = r535_chan_intr,
+ .user = r535_chan_user,
+ .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+r535_core = {
+ .func = &r535_core_func,
+ .user = 0,
+};
+
+static int
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->brightness = lvl;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int lvl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ lvl = ctrl->brightness;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return lvl;
+}
+
+static const struct nvkm_ior_func_bl
+r535_sor_bl = {
+ .get = r535_sor_bl_get,
+ .set = r535_sor_bl_set,
+};
+
+static void
+r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+ if (WARN_ON(size > sizeof(ctrl->bufferELD)))
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->numELDSize = size;
+ memcpy(ctrl->bufferELD, data, size);
+ ctrl->maxFreqSupported = 0; //XXX
+ ctrl->ctrl = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
+ ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
+ ctrl->deviceEntry = head;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+ if (present)
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->deviceEntry = head;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hda
+r535_sor_hda = {
+ .hpd = r535_sor_hda_hpd,
+ .eld = r535_sor_hda_eld,
+};
+
+static void
+r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->mute = mute;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
+
+ if (!enable)
+ r535_sor_dp_audio_mute(sor, true);
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->enable = enable;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+
+ if (enable)
+ r535_sor_dp_audio_mute(sor, false);
+}
+
+static void
+r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
+{
+ struct nvkm_disp *disp = sor->disp;
+ struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+ ctrl->sorIndex = sor->id;
+ ctrl->dpLink = sor->asy.link == 2;
+ ctrl->bEnableOverride = 1;
+ ctrl->bMST = 1;
+ ctrl->hBlankSym = 0;
+ ctrl->vBlankSym = 0;
+ ctrl->colorFormat = 0;
+ ctrl->bEnableTwoHeadOneOr = 0;
+ ctrl->singleHeadMultistreamMode = 0;
+ ctrl->MST.slotStart = slot;
+ ctrl->MST.slotEnd = slot + slot_nr - 1;
+ ctrl->MST.PBN = pbn;
+ ctrl->MST.Timeslice = aligned_pbn;
+ ctrl->MST.sendACT = 0;
+ ctrl->MST.singleHeadMSTPipeline = 0;
+ ctrl->MST.bEnableAudioOverRightPanel = 0;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static int
+r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
+ u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+ struct nvkm_disp *disp = sor->disp;
+ struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+ ctrl->sorIndex = sor->id;
+ ctrl->dpLink = sor->asy.link == 2;
+ ctrl->bEnableOverride = 1;
+ ctrl->bMST = 0;
+ ctrl->hBlankSym = hblanksym;
+ ctrl->vBlankSym = vblanksym;
+ ctrl->colorFormat = 0;
+ ctrl->bEnableTwoHeadOneOr = 0;
+ ctrl->SST.bEnhancedFraming = ef;
+ ctrl->SST.tuSize = 64;
+ ctrl->SST.waterMark = watermark;
+ ctrl->SST.bEnableAudioOverRightPanel = 0;
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static const struct nvkm_ior_func_dp
+r535_sor_dp = {
+ .sst = r535_sor_dp_sst,
+ .vcpi = r535_sor_dp_vcpi,
+ .audio = r535_sor_dp_audio,
+};
+
+static void
+r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
+ bool scrambling_low_rates)
+{
+ struct nvkm_outp *outp = sor->asy.outp;
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->caps = 0;
+ if (support)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
+ if (scrambling)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
+ if (scrambling_low_rates)
+ ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->mute = mute;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->transmitControl =
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
+ NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
+ ctrl->packetSize = 10;
+ ctrl->aPacket[0] = 0x03;
+ ctrl->aPacket[1] = 0x00;
+ ctrl->aPacket[2] = 0x00;
+ ctrl->aPacket[3] = enable ? 0x10 : 0x01;
+ ctrl->aPacket[4] = 0x00;
+ ctrl->aPacket[5] = 0x00;
+ ctrl->aPacket[6] = 0x00;
+ ctrl->aPacket[7] = 0x00;
+ ctrl->aPacket[8] = 0x00;
+ ctrl->aPacket[9] = 0x00;
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 hdmi = head * 0x400;
+
+ r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
+ r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
+
+ /* General Control (GCP). */
+ nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
+ nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+}
+
+static void
+r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
+{
+ struct nvkm_disp *disp = sor->disp;
+ NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
+
+ if (!enable)
+ return;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return;
+
+ ctrl->displayId = BIT(sor->asy.outp->index);
+ ctrl->enable = enable;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hdmi
+r535_sor_hdmi = {
+ .ctrl = r535_sor_hdmi_ctrl,
+ .scdc = r535_sor_hdmi_scdc,
+ /*TODO: SF_USER -> KMS. */
+ .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
+ .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
+ .audio = r535_sor_hdmi_audio,
+};
+
+static const struct nvkm_ior_func
+r535_sor = {
+ .hdmi = &r535_sor_hdmi,
+ .dp = &r535_sor_dp,
+ .hda = &r535_sor_hda,
+ .bl = &r535_sor_bl,
+};
+
+static int
+r535_sor_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
+}
+
+static int
+r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ *pmask = 0xf;
+ return 4;
+}
+
+static void
+r535_head_vblank_put(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
+}
+
+static void
+r535_head_vblank_get(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
+ nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
+}
+
+static void
+r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+}
+
+static const struct nvkm_head_func
+r535_head = {
+ .state = r535_head_state,
+ .vblank_get = r535_head_vblank_get,
+ .vblank_put = r535_head_vblank_put,
+};
+
+static struct nvkm_conn *
+r535_conn_new(struct nvkm_disp *disp, u32 id)
+{
+ NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
+ struct nvbios_connE dcbE = {};
+ struct nvkm_conn *conn;
+ int ret, index;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return (void *)ctrl;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(id);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return (void *)ctrl;
+
+ list_for_each_entry(conn, &disp->conns, head) {
+ if (conn->index == ctrl->data[0].index) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return conn;
+ }
+ }
+
+ dcbE.type = ctrl->data[0].type;
+ index = ctrl->data[0].index;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ ret = nvkm_conn_new(disp, index, &dcbE, &conn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ list_add_tail(&conn->head, &disp->conns);
+ return conn;
+}
+
+static void
+r535_outp_release(struct nvkm_outp *outp)
+{
+ outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
+ outp->ior->asy.outp = NULL;
+ outp->ior = NULL;
+}
+
+static int
+r535_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+ struct nvkm_disp *disp = outp->disp;
+ struct nvkm_ior *ior;
+ NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
+ int or;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->sorExcludeMask = disp->rm.assigned_sors;
+ if (hda)
+ ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
+ if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
+ disp->rm.assigned_sors |= BIT(or);
+ break;
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
+ return -EINVAL;
+
+ ior = nvkm_ior_find(disp, SOR, or);
+ if (WARN_ON(!ior))
+ return -EINVAL;
+
+ nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+ return 0;
+}
+
+static int
+r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static struct nvkm_ior *
+r535_outp_inherit(struct nvkm_outp *outp)
+{
+ struct nvkm_disp *disp = outp->disp;
+ struct nvkm_head *head;
+ u32 displayid;
+ int ret;
+
+ list_for_each_entry(head, &disp->heads, head) {
+ ret = r535_disp_head_displayid(disp, head->id, &displayid);
+ if (WARN_ON(ret))
+ return NULL;
+
+ if (displayid == BIT(outp->index)) {
+ NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+ u32 id, proto;
+ struct nvkm_ior *ior;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return NULL;
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = displayid;
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return NULL;
+
+ id = ctrl->index;
+ proto = ctrl->protocol;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ ior = nvkm_ior_find(disp, SOR, id);
+ if (WARN_ON(!ior))
+ return NULL;
+
+ switch (proto) {
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 2;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+ ior->arm.proto = TMDS;
+ ior->arm.link = 3;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+ ior->arm.proto = DP;
+ ior->arm.link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+ ior->arm.proto = DP;
+ ior->arm.link = 2;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ ior->arm.proto_evo = proto;
+ ior->arm.head = BIT(head->id);
+ disp->rm.assigned_sors |= BIT(ior->id);
+ return ior;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+r535_outp_dfp_get_info(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
+ ctrl->displayId, ctrl->flags, ctrl->flags2);
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(outp->index);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (ctrl->displayMask & BIT(outp->index)) {
+ ret = r535_outp_dfp_get_info(outp);
+ if (ret == 0)
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+ NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = id;
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+ NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pid = ctrl->displayIdAssigned;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+ NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ ctrl->numLanes = lanes;
+ for (int i = 0; i < lanes; i++)
+ ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA, PREEMPHASIS, pe[i]) |
+ NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+ ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+ NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+ NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+
+ if (mst)
+ ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+
+ if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+
+ if (target == 0 &&
+ (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
+ !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
+ ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ret = ctrl->err ? -EIO : 0;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r535_dp_train(struct nvkm_outp *outp, bool retrain)
+{
+ for (int target = outp->dp.lttprs; target >= 0; target--) {
+ int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
+ outp->dp.lt.nr,
+ outp->dp.lt.bw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+{
+ struct nvkm_disp *disp = outp->disp;
+ NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
+ u8 size = *psize;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->bAddrOnly = !size;
+ ctrl->cmd = type;
+ if (ctrl->bAddrOnly) {
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
+ }
+ ctrl->addr = addr;
+ ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+ memcpy(ctrl->data, data, size);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ memcpy(data, ctrl->data, size);
+ *psize = ctrl->size;
+ ret = ctrl->replyType;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+ return 0;
+}
+
+static void
+r535_dp_release(struct nvkm_outp *outp)
+{
+ if (!outp->dp.lt.bw) {
+ if (!WARN_ON(!outp->dp.rates))
+ outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
+ else
+ outp->dp.lt.bw = 0x06;
+ }
+
+ outp->dp.lt.nr = 0;
+
+ r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
+ r535_outp_release(outp);
+}
+
+static int
+r535_dp_acquire(struct nvkm_outp *outp, bool hda)
+{
+ int ret;
+
+ ret = r535_outp_acquire(outp, hda);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nvkm_outp_func
+r535_dp = {
+ .detect = r535_outp_detect,
+ .inherit = r535_outp_inherit,
+ .acquire = r535_dp_acquire,
+ .release = r535_dp_release,
+ .dp.aux_pwr = r535_dp_aux_pwr,
+ .dp.aux_xfer = r535_dp_aux_xfer,
+ .dp.mst_id_get = r535_dp_mst_id_get,
+ .dp.mst_id_put = r535_dp_mst_id_put,
+ .dp.rates = r535_dp_rates,
+ .dp.train = r535_dp_train,
+ .dp.drive = r535_dp_drive,
+};
+
+static int
+r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
+{
+ NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+ int ret = -E2BIG;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (ctrl->bufferSize <= *psize) {
+ memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
+ *psize = ctrl->bufferSize;
+ ret = 0;
+ }
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static const struct nvkm_outp_func
+r535_tmds = {
+ .detect = r535_outp_detect,
+ .inherit = r535_outp_inherit,
+ .acquire = r535_outp_acquire,
+ .release = r535_outp_release,
+ .edid_get = r535_tmds_edid_get,
+};
+
+static int
+r535_outp_new(struct nvkm_disp *disp, u32 id)
+{
+ NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+ enum nvkm_ior_proto proto;
+ struct dcb_output dcbE = {};
+ struct nvkm_conn *conn;
+ struct nvkm_outp *outp;
+ u8 locn, link = 0;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(id);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ switch (ctrl->type) {
+ case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
+ return 0;
+ case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
+ switch (ctrl->protocol) {
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+ proto = TMDS;
+ link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+ proto = TMDS;
+ link = 2;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+ proto = TMDS;
+ link = 3;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+ proto = DP;
+ link = 1;
+ break;
+ case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+ proto = DP;
+ link = 2;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ locn = ctrl->location;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ conn = r535_conn_new(disp, id);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
+
+ switch (proto) {
+ case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
+ case DP: dcbE.type = DCB_OUTPUT_DP; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dcbE.location = locn;
+ dcbE.connector = conn->index;
+ dcbE.heads = disp->head.mask;
+ dcbE.i2c_index = 0xff;
+ dcbE.link = dcbE.sorconf.link = link;
+
+ if (proto == TMDS) {
+ ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
+ if (ret)
+ return ret;
+ } else {
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ bool mst, wm;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ dcbE.dpconf.link_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ dcbE.dpconf.link_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ dcbE.dpconf.link_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ dcbE.dpconf.link_bw = 0x1e;
+ break;
+ default:
+ dcbE.dpconf.link_bw = 0x00;
+ break;
+ }
+
+ mst = ctrl->bIsMultistreamSupported;
+ wm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ if (WARN_ON(!dcbE.dpconf.link_bw))
+ return -EINVAL;
+
+ dcbE.dpconf.link_nr = 4;
+
+ ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
+ if (ret)
+ return ret;
+
+ outp->dp.mst = mst;
+ outp->dp.increased_wm = wm;
+ }
+
+
+ outp->conn = conn;
+ list_add_tail(&outp->head, &disp->outps);
+ return 0;
+}
+
+static void
+r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
+ Nv2080DpIrqNotification *irq = repv;
+
+ if (WARN_ON(repc < sizeof(*irq)))
+ return;
+
+ nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
+
+ if (irq->displayId)
+ nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
+}
+
+static void
+r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
+ Nv2080HotplugNotification *hpd = repv;
+
+ if (WARN_ON(repc < sizeof(*hpd)))
+ return;
+
+ nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
+ hpd->plugDisplayMask, hpd->unplugDisplayMask);
+
+ for (int i = 0; i < 31; i++) {
+ u32 mask = 0;
+
+ if (hpd->plugDisplayMask & BIT(i))
+ mask |= NVKM_DPYID_PLUG;
+ if (hpd->unplugDisplayMask & BIT(i))
+ mask |= NVKM_DPYID_UNPLUG;
+
+ if (mask)
+ nvkm_event_ntfy(&disp->rm.event, i, mask);
+ }
+}
+
+static const struct nvkm_event_func
+r535_disp_event = {
+};
+
+static void
+r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
+{
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
+
+ if (stat & 0x00000002) {
+ nvkm_disp_vblank(disp, head);
+
+ nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
+ }
+}
+
+static irqreturn_t
+r535_disp_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
+ int head;
+
+ for_each_set_bit(head, &mask, 8)
+ r535_disp_intr_head_timing(disp, head);
+
+ return IRQ_HANDLED;
+}
+
+static void
+r535_disp_fini(struct nvkm_disp *disp, bool suspend)
+{
+ if (!disp->engine.subdev.use.enabled)
+ return;
+
+ nvkm_gsp_rm_free(&disp->rm.object);
+
+ if (!suspend) {
+ nvkm_gsp_event_dtor(&disp->rm.irq);
+ nvkm_gsp_event_dtor(&disp->rm.hpd);
+ nvkm_event_fini(&disp->rm.event);
+
+ nvkm_gsp_rm_free(&disp->rm.objcom);
+ nvkm_gsp_device_dtor(&disp->rm.device);
+ nvkm_gsp_client_dtor(&disp->rm.client);
+ }
+}
+
+static int
+r535_disp_init(struct nvkm_disp *disp)
+{
+ int ret;
+
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
+ disp->func->root.oclass, 0, &disp->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+r535_disp_oneinit(struct nvkm_disp *disp)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ int ret, i;
+
+ /* RAMIN. */
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
+ ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
+ ctrl->instMemAddrSpace = ADDR_FBMEM;
+ ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ /* OBJs. */
+ ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+ &disp->rm.objcom);
+ if (ret)
+ return ret;
+
+ {
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ }
+
+ /* */
+ {
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
+ struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
+ NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
+
+ {
+ const guid_t NBCI_DSM_GUID =
+ GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
+ 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
+ u64 NBCI_DSM_REV = 0x00000102;
+ const guid_t NVHG_DSM_GUID =
+ GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
+ u64 NVHG_DSM_REV = 0x00000102;
+ acpi_handle handle = ACPI_HANDLE(device->dev);
+
+ if (handle && acpi_has_method(handle, "_DSM")) {
+ bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
+ 1ULL << 0x00000014);
+ bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
+ 1ULL << 0x00000014);
+
+ printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
+
+ if (nbci || nvhg) {
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(ctrl->backLightData),
+ .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ }, *obj;
+
+ obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
+ 0x00000102, 0x14, &argv4);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ } else {
+ printk(KERN_ERR "bl: obj type %d\n", obj->type);
+ printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
+
+ for (int i = 0; i < obj->package.count; i++) {
+ union acpi_object *elt = &obj->package.elements[i];
+ u32 size;
+
+ if (elt->integer.value & ~0xffffffffULL)
+ size = 8;
+ else
+ size = 4;
+
+ printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
+ memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
+ ctrl->backLightDataSize += size;
+ }
+
+ printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
+ ctrl->status = 0;
+ ACPI_FREE(obj);
+ }
+
+ kfree(argv4.buffer.pointer);
+ }
+ }
+ }
+
+ ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
+ if (ret)
+ return ret;
+#endif
+ }
+
+ /* */
+ {
+ NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ if (ret)
+ return ret;
+ }
+
+ /* */
+ {
+ NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->head.nr = ctrl->numHeads;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ }
+
+ /* */
+ {
+ NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->head.mask = ctrl->headMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+ ret = nvkm_head_new_(&r535_head, disp, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
+ nvkm_debug(&disp->engine.subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+ for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+ ret = disp->func->sor.new(disp, i);
+ if (ret)
+ return ret;
+ }
+
+ /* */
+ {
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+ unsigned long mask;
+ int i;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ mask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
+ r535_disp_hpd, &disp->rm.hpd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
+ r535_disp_irq, &disp->rm.irq);
+ if (ret)
+ return ret;
+
+ /* RAMHT. */
+ ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
+ 0x1000, 0, disp->inst, &disp->ramht);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
+ if (ret < 0)
+ return ret;
+
+ ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
+ r535_disp_intr, &disp->engine.subdev.inth);
+ if (ret)
+ return ret;
+
+ nvkm_inth_allow(&disp->engine.subdev.inth);
+ return 0;
+}
+
+static void
+r535_disp_dtor(struct nvkm_disp *disp)
+{
+ kfree(disp->func);
+}
+
+int
+r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+{
+ struct nvkm_disp_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_disp_dtor;
+ rm->oneinit = r535_disp_oneinit;
+ rm->init = r535_disp_init;
+ rm->fini = r535_disp_fini;
+ rm->uevent = hw->uevent;
+ rm->sor.cnt = r535_sor_cnt;
+ rm->sor.new = r535_sor_new;
+ rm->ramht_size = hw->ramht_size;
+
+ rm->root = hw->root;
+
+ for (int i = 0; hw->user[i].ctor; i++) {
+ switch (hw->user[i].base.oclass & 0xff) {
+ case 0x73: rm->user[i] = hw->user[i]; break;
+ case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
+ case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
+ case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
+ case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+ }
+
+ ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
+ if (ret)
+ kfree(rm);
+
+ mutex_init(&(*pdisp)->super.mutex); //XXX
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 19f5d3a6035e..dcb9f8ba374c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -25,6 +25,7 @@
#include "ior.h"
#include <core/gpuobj.h>
+#include <subdev/gsp.h>
#include <subdev/timer.h>
#include <nvif/class.h>
@@ -233,5 +234,8 @@ int
tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_disp_new(&tu102_disp, device, type, inst, pdisp);
+
return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 104f6ee9ae6d..2dab6612c4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -31,6 +31,23 @@
#include <nvif/if0011.h>
static int
+nvkm_uconn_uevent_gsp(struct nvkm_object *object, u64 token, u32 bits)
+{
+ union nvif_conn_event_args args;
+
+ args.v0.version = 0;
+ args.v0.types = 0;
+ if (bits & NVKM_DPYID_PLUG)
+ args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
+ if (bits & NVKM_DPYID_UNPLUG)
+ args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
+ if (bits & NVKM_DPYID_IRQ)
+ args.v0.types |= NVIF_CONN_EVENT_V0_IRQ;
+
+ return object->client->event(token, &args, sizeof(args.v0));
+}
+
+static int
nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits)
{
union nvif_conn_event_args args;
@@ -78,13 +95,14 @@ static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
struct nvkm_conn *conn = nvkm_uconn(object);
- struct nvkm_device *device = conn->disp->engine.subdev.device;
+ struct nvkm_disp *disp = conn->disp;
+ struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_outp *outp;
union nvif_conn_event_args *args = argv;
u64 bits = 0;
if (!uevent) {
- if (conn->info.hpd == DCB_GPIO_UNUSED)
+ if (!disp->rm.client.gsp && conn->info.hpd == DCB_GPIO_UNUSED)
return -ENOSYS;
return 0;
}
@@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
if (&outp->head == &conn->disp->outps)
return -EINVAL;
+ if (disp->rm.client.gsp) {
+ if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_DPYID_PLUG;
+ if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_DPYID_UNPLUG;
+ if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_DPYID_IRQ;
+
+ return nvkm_uevent_add(uevent, &disp->rm.event, outp->index, bits,
+ nvkm_uconn_uevent_gsp);
+ }
+
if (outp->dp.aux && !outp->info.location) {
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index d619b40a42c3..fd5ee9f0af36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -318,14 +318,14 @@ nvkm_falcon_init(struct nvkm_engine *engine)
}
static void *
-nvkm_falcon_dtor(struct nvkm_engine *engine)
+nvkm_falcon_dtor_engine(struct nvkm_engine *engine)
{
return nvkm_falcon(engine);
}
static const struct nvkm_engine_func
nvkm_falcon = {
- .dtor = nvkm_falcon_dtor,
+ .dtor = nvkm_falcon_dtor_engine,
.oneinit = nvkm_falcon_oneinit,
.init = nvkm_falcon_init,
.fini = nvkm_falcon_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 5a074b9970ab..aff92848abfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -26,5 +26,7 @@ nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/ga100.o
nvkm-y += nvkm/engine/fifo/ga102.o
+nvkm-y += nvkm/engine/fifo/r535.o
+
nvkm-y += nvkm/engine/fifo/ucgrp.o
nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 5db37247dc29..22443fe4a39f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -210,6 +210,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
CASE(SEC2 );
CASE(NVDEC );
CASE(NVENC );
+ CASE(NVJPG );
+ CASE(OFA );
default:
WARN_ON(1);
break;
@@ -347,8 +349,14 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
+ mutex_destroy(&fifo->userd.mutex);
+
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
+
+ if (fifo->func->dtor)
+ fifo->func->dtor(fifo);
+
return fifo;
}
@@ -383,5 +391,8 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
+ INIT_LIST_HEAD(&fifo->userd.list);
+ mutex_init(&fifo->userd.mutex);
+
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
index ea53fb3d5d06..814db9daa194 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
@@ -156,6 +156,9 @@ nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_c
atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
/* Allocate the HW structures. */
+ if (engn->func->ctor2) {
+ ret = engn->func->ctor2(engn, vctx, chan);
+ } else
if (engn->func->bind) {
ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
if (ret == 0 && engn->func->ctor)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index b7c9d6115bce..87a62d4ff4bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -275,13 +275,17 @@ nvkm_chan_del(struct nvkm_chan **pchan)
nvkm_gpuobj_del(&chan->cache);
nvkm_gpuobj_del(&chan->ramfc);
- nvkm_memory_unref(&chan->userd.mem);
-
if (chan->cgrp) {
- nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+ if (!chan->func->id_put)
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+ else
+ chan->func->id_put(chan);
+
nvkm_cgrp_unref(&chan->cgrp);
}
+ nvkm_memory_unref(&chan->userd.mem);
+
if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory);
nvkm_vmm_unref(&chan->vmm);
@@ -438,7 +442,32 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
}
/* Allocate channel ID. */
- chan->id = nvkm_chid_get(runl->chid, chan);
+ if (!chan->func->id_get) {
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id >= 0) {
+ if (func->userd->bar < 0) {
+ if (ouserd + chan->func->userd->size >=
+ nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
+ }
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
+ }
+ }
+ } else {
+ chan->id = chan->func->id_get(chan, userd, ouserd);
+ }
+
if (chan->id < 0) {
RUNL_ERROR(runl, "!chids");
return -ENOSPC;
@@ -448,24 +477,6 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
cgrp->id = chan->id;
/* Initialise USERD. */
- if (func->userd->bar < 0) {
- if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- ret = nvkm_memory_kmap(userd, &chan->userd.mem);
- if (ret) {
- RUNL_DEBUG(runl, "userd %d", ret);
- return ret;
- }
-
- chan->userd.base = ouserd;
- } else {
- chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
- chan->userd.base = chan->id * chan->func->userd->size;
- }
-
if (chan->func->userd->clear)
chan->func->userd->clear(chan);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 85b94f699128..013682a709d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,6 +17,9 @@ struct nvkm_cctx {
};
struct nvkm_chan_func {
+ int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
+ void (*id_put)(struct nvkm_chan *);
+
const struct nvkm_chan_func_inst {
u32 size;
bool zero;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
index c56d2a839efb..c8ce7ff18713 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -27,6 +27,7 @@
#include "runq.h"
#include <core/gpuobj.h>
+#include <subdev/gsp.h>
#include <subdev/top.h>
#include <subdev/vfn.h>
@@ -607,5 +608,8 @@ int
ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fifo_new(&ga100_fifo, device, type, inst, pfifo);
+
return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
index 2cdf5da339b6..755235f55b3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
static const struct nvkm_fifo_func
@@ -34,12 +36,15 @@ ga102_fifo = {
.engn = &ga100_engn,
.engn_ce = &ga100_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true },
- .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan },
+ .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan },
};
int
ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fifo_new(&ga102_fifo, device, type, inst, pfifo);
+
return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 4d448be19224..a0f3277605a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -13,6 +13,8 @@ struct nvkm_runq;
struct nvkm_vctx;
struct nvkm_fifo_func {
+ void (*dtor)(struct nvkm_fifo *);
+
int (*chid_nr)(struct nvkm_fifo *);
int (*chid_ctor)(struct nvkm_fifo *, int nr);
int (*runq_nr)(struct nvkm_fifo *);
@@ -58,6 +60,8 @@ struct nvkm_fifo_func {
} chan;
};
+int r535_fifo_new(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fifo **);
int nvkm_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fifo **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
new file mode 100644
index 000000000000..3adbb05ff587
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+#include <engine/gr.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+static u32
+r535_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+r535_chan_stop(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_start(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+
+ nvkm_gsp_rm_free(&chan->rm.object);
+
+ dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
+ chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
+
+ nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
+}
+
+#define CHID_PER_USERD 8
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+ const int userd_p = chan->id / CHID_PER_USERD;
+ const int userd_i = chan->id % CHID_PER_USERD;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
+ fifo->func->chan.user.oclass, sizeof(*args),
+ &chan->rm.object);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = offset;
+ args->gpFifoEntries = length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = chan->vmm->rm.object.handle;
+ args->engineType = eT;
+
+ args->instanceMem.base = chan->inst->addr;
+ args->instanceMem.size = chan->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = chan->inst->addr + 0;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ if (ret)
+ return ret;
+
+ if (1) {
+ NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
+
+ if (1) {
+ NVA06F_CTRL_BIND_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+ NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = eT;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+ NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->bEnable = 1;
+ ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_chan_func_ramfc
+r535_chan_ramfc = {
+ .write = r535_chan_ramfc_write,
+ .clear = r535_chan_ramfc_clear,
+ .devm = 0xfff,
+ .priv = true,
+};
+
+struct r535_chan_userd {
+ struct nvkm_memory *mem;
+ struct nvkm_memory *map;
+ int chid;
+ u32 used;
+
+ struct list_head head;
+} *userd;
+
+static void
+r535_chan_id_put(struct nvkm_chan *chan)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct r535_chan_userd *userd;
+
+ mutex_lock(&fifo->userd.mutex);
+ list_for_each_entry(userd, &fifo->userd.list, head) {
+ if (userd->map == chan->userd.mem) {
+ u32 chid = chan->userd.base / chan->func->userd->size;
+
+ userd->used &= ~BIT(chid);
+ if (!userd->used) {
+ nvkm_memory_unref(&userd->map);
+ nvkm_memory_unref(&userd->mem);
+ nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+ list_del(&userd->head);
+ }
+
+ break;
+ }
+ }
+ mutex_unlock(&fifo->userd.mutex);
+
+}
+
+static int
+r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+ const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
+ struct nvkm_runl *runl = chan->cgrp->runl;
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct r535_chan_userd *userd;
+ u32 chid;
+ int ret;
+
+ if (ouserd + chan->func->userd->size >= userd_size ||
+ (ouserd & (chan->func->userd->size - 1))) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ chid = div_u64(ouserd, chan->func->userd->size);
+
+ list_for_each_entry(userd, &fifo->userd.list, head) {
+ if (userd->mem == muserd) {
+ if (userd->used & BIT(chid))
+ return -EBUSY;
+ break;
+ }
+ }
+
+ if (&userd->head == &fifo->userd.list) {
+ if (nvkm_memory_size(muserd) < userd_size) {
+ RUNL_DEBUG(runl, "userd too small");
+ return -EINVAL;
+ }
+
+ userd = kzalloc(sizeof(*userd), GFP_KERNEL);
+ if (!userd)
+ return -ENOMEM;
+
+ userd->chid = nvkm_chid_get(runl->chid, chan);
+ if (userd->chid < 0) {
+ ret = userd->chid;
+ kfree(userd);
+ return ret;
+ }
+
+ userd->mem = nvkm_memory_ref(muserd);
+
+ ret = nvkm_memory_kmap(userd->mem, &userd->map);
+ if (ret) {
+ nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+ kfree(userd);
+ return ret;
+ }
+
+
+ list_add(&userd->head, &fifo->userd.list);
+ }
+
+ userd->used |= BIT(chid);
+
+ chan->userd.mem = nvkm_memory_ref(userd->map);
+ chan->userd.base = ouserd;
+
+ return (userd->chid * CHID_PER_USERD) + chid;
+}
+
+static int
+r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ int ret;
+
+ mutex_lock(&fifo->userd.mutex);
+ ret = r535_chan_id_get_locked(chan, muserd, ouserd);
+ mutex_unlock(&fifo->userd.mutex);
+ return ret;
+}
+
+static const struct nvkm_chan_func
+r535_chan = {
+ .id_get = r535_chan_id_get,
+ .id_put = r535_chan_id_put,
+ .inst = &gf100_chan_inst,
+ .userd = &gv100_chan_userd,
+ .ramfc = &r535_chan_ramfc,
+ .start = r535_chan_start,
+ .stop = r535_chan_stop,
+ .doorbell_handle = r535_chan_doorbell_handle,
+};
+
+static const struct nvkm_cgrp_func
+r535_cgrp = {
+};
+
+static int
+r535_engn_nonstall(struct nvkm_engn *engn)
+{
+ struct nvkm_subdev *subdev = &engn->engine->subdev;
+ int ret;
+
+ ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+ WARN_ON(ret < 0);
+ return ret;
+}
+
+static const struct nvkm_engn_func
+r535_ce = {
+ .nonstall = r535_engn_nonstall,
+};
+
+static int
+r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ /* RM requires GR context buffers to remain mapped until after the
+ * channel has been destroyed (as opposed to after the last gr obj
+ * has been deleted).
+ *
+ * Take an extra ref here, which will be released once the channel
+ * object has been deleted.
+ */
+ refcount_inc(&vctx->refs);
+ chan->rm.grctx = vctx;
+ return 0;
+}
+
+static const struct nvkm_engn_func
+r535_gr = {
+ .nonstall = r535_engn_nonstall,
+ .ctor2 = r535_gr_ctor,
+};
+
+static int
+r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ struct nvkm_gsp_client *client = &chan->vmm->rm.client;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = client->object.handle;
+ ctrl->hObject = chan->rm.object.handle;
+ ctrl->hChanClient = client->object.handle;
+ ctrl->virtAddress = vctx->vma->addr;
+ ctrl->size = vctx->inst->size;
+ ctrl->engineType = engn->id;
+ ctrl->ChID = chan->id;
+
+ return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+ int ret;
+
+ if (WARN_ON(!engn->rm.size))
+ return -EINVAL;
+
+ ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
+ &vctx->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+ if (ret)
+ return ret;
+
+ return r535_flcn_bind(engn, vctx, chan);
+}
+
+static const struct nvkm_engn_func
+r535_flcn = {
+ .nonstall = r535_engn_nonstall,
+ .ctor2 = r535_flcn_ctor,
+};
+
+static void
+r535_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static void
+r535_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static const struct nvkm_runl_func
+r535_runl = {
+ .block = r535_runl_block,
+ .allow = r535_runl_allow,
+};
+
+static int
+r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+{
+ switch (type) {
+ case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
+ case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
+ case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
+ case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
+ case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
+ case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
+ case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
+ case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
+ default:
+ break;
+ }
+
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int
+r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+{
+ switch (rm) {
+ case RM_ENGINE_TYPE_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ return 0;
+ case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
+ *ptype = NVKM_ENGINE_CE;
+ return rm - RM_ENGINE_TYPE_COPY0;
+ case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ return rm - RM_ENGINE_TYPE_NVDEC0;
+ case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ return rm - RM_ENGINE_TYPE_NVENC0;
+ case RM_ENGINE_TYPE_SW:
+ *ptype = NVKM_ENGINE_SW;
+ return 0;
+ case RM_ENGINE_TYPE_SEC2:
+ *ptype = NVKM_ENGINE_SEC2;
+ return 0;
+ case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ return rm - RM_ENGINE_TYPE_NVJPEG0;
+ case RM_ENGINE_TYPE_OFA:
+ *ptype = NVKM_ENGINE_OFA;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ u32 cgids = 2048;
+ u32 chids = 2048 / CHID_PER_USERD;
+ int ret;
+ NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
+
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numEntries; i++) {
+ const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+ const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+
+ runl = nvkm_runl_get(fifo, id, addr);
+ if (!runl) {
+ runl = nvkm_runl_new(fifo, id, addr, 0);
+ if (WARN_ON(IS_ERR(runl)))
+ continue;
+ }
+ }
+
+ for (int i = 0; i < ctrl->numEntries; i++) {
+ const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+ const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
+ const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+ enum nvkm_subdev_type type;
+ int inst, nv2080;
+
+ runl = nvkm_runl_get(fifo, id, addr);
+ if (!runl)
+ continue;
+
+ inst = r535_fifo_engn_type(rmid, &type);
+ if (inst < 0) {
+ nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ nv2080 = r535_fifo_2080_type(type, inst);
+ if (nv2080 < 0) {
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+ break;
+ case NVKM_ENGINE_GR:
+ engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ case NVKM_ENGINE_NVENC:
+ case NVKM_ENGINE_NVJPG:
+ case NVKM_ENGINE_OFA:
+ engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
+ break;
+ case NVKM_ENGINE_SW:
+ continue;
+ default:
+ engn = NULL;
+ break;
+ }
+
+ if (!engn) {
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+
+ {
+ NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ fifo->rm.mthdbuf_size = ctrl->size;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ }
+
+ return r535_fifo_ectx_size(fifo);
+}
+
+static void
+r535_fifo_dtor(struct nvkm_fifo *fifo)
+{
+ kfree(fifo->func);
+}
+
+int
+r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+ struct nvkm_fifo_func *rm;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_fifo_dtor;
+ rm->runl_ctor = r535_fifo_runl_ctor;
+ rm->runl = &r535_runl;
+ rm->cgrp = hw->cgrp;
+ rm->cgrp.func = &r535_cgrp;
+ rm->chan = hw->chan;
+ rm->chan.func = &r535_chan;
+ rm->nonstall = &ga100_fifo_nonstall;
+ rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+
+ return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
index 5421321f8e85..19e6772ead11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
@@ -18,6 +18,7 @@ struct nvkm_engn {
bool (*mmu_fault_triggered)(struct nvkm_engn *);
int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *);
void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+ int (*ctor2)(struct nvkm_engn *, struct nvkm_vctx *, struct nvkm_chan *);
int (*ramht_add)(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *);
void (*ramht_del)(struct nvkm_chan *, int hash);
} *func;
@@ -28,6 +29,11 @@ struct nvkm_engn {
int fault;
+ struct {
+ u32 desc;
+ u32 size;
+ } rm;
+
struct list_head head;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index ea9e151dbb48..1d39a6840a40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -25,6 +25,7 @@
#include "runl.h"
#include <core/memory.h>
+#include <subdev/gsp.h>
#include <subdev/mc.h>
#include <subdev/vfn.h>
@@ -282,5 +283,8 @@ int
tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fifo_new(&tu102_fifo, device, type, inst, pfifo);
+
return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 04140e0110be..9e56bcc166ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -317,6 +317,15 @@ nvkm_uchan = {
.uevent = nvkm_uchan_uevent,
};
+struct nvkm_chan *
+nvkm_uchan_chan(struct nvkm_object *object)
+{
+ if (WARN_ON(object->func != &nvkm_uchan))
+ return NULL;
+
+ return nvkm_uchan(object)->chan;
+}
+
int
nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index b5418f05ccd8..1555f8c40b4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -41,6 +41,9 @@ nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ga102.o
+nvkm-y += nvkm/engine/gr/ad102.o
+
+nvkm-y += nvkm/engine/gr/r535.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
new file mode 100644
index 000000000000..7bfa6240d283
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+ad102_gr = {
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, ADA_A },
+ { -1, -1, ADA_COMPUTE_A },
+ {}
+ }
+};
+
+int
+ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 0096ad401b15..f5e68f09df76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -160,7 +160,11 @@ static int
nvkm_gr_init(struct nvkm_engine *engine)
{
struct nvkm_gr *gr = nvkm_gr(engine);
- return gr->func->init(gr);
+
+ if (gr->func->init)
+ return gr->func->init(gr);
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index 00cd70abad67..d285c597aff9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -23,6 +23,7 @@
#include "ctxgf100.h"
#include <core/firmware.h>
+#include <subdev/gsp.h>
#include <subdev/acr.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
@@ -350,5 +351,8 @@ ga102_gr_fwif[] = {
int
ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+
return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 54f686ba39ac..b0e0c9305034 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -445,4 +445,6 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
+int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
new file mode 100644
index 000000000000..f4bed3eb1ec2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_gr_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_gr_obj = {
+ .dtor = r535_gr_obj_dtor,
+};
+
+static int
+r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+ struct r535_gr_obj *obj;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
+ &obj->rm);
+}
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+static int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+static int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_mmu *mmu = device->mmu;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = mmu->func->promote_vmm(golden.vmm);
+ if (ret)
+ goto done;
+
+ {
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
+ device->fifo->func->chan.user.oclass,
+ sizeof(*args), &golden.chan);
+ if (IS_ERR(args)) {
+ ret = PTR_ERR(args);
+ goto done;
+ }
+
+ args->gpFifoOffset = 0;
+ args->gpFifoEntries = 0x1000 / 8;
+ args->flags =
+ NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
+ NVDEF(NVOS04, FLAGS, VPR, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
+ NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
+ NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
+ NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
+ NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
+ NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
+ NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
+ NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
+ NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
+ NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
+ NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+ args->hVASpace = golden.vmm->rm.object.handle;
+ args->engineType = 1;
+ args->instanceMem.base = nvkm_memory_addr(golden.inst);
+ args->instanceMem.size = 0x1000;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+ args->ramfcMem.base = nvkm_memory_addr(golden.inst);
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+ args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
+ args->userdMem.size = 0x200;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+ args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
+ args->mthdbufMem.size = 0x5000;
+ args->mthdbufMem.addressSpace = 2;
+ args->mthdbufMem.cacheAttrib = 1;
+ args->internalFlags =
+ NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
+ NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
+ NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
+ if (ret)
+ goto done;
+ }
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info))) {
+ ret = PTR_ERR(info);
+ goto done;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->engineContextBuffersInfo[0].engine[i].size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ continue;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ continue;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ continue;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ {
+ int i;
+
+ for (i = 0; gr->base.func->sclass[i].ctor; i++) {
+ if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
+ struct nvkm_gsp_object threed;
+
+ ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
+ gr->base.func->sclass[i].oclass, 0,
+ &threed);
+ if (ret)
+ goto done;
+
+ nvkm_gsp_rm_free(&threed);
+ break;
+ }
+ }
+
+ if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+static void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+int
+r535_gr_new(const struct gf100_gr_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+ struct nvkm_gr_func *rm;
+ struct r535_gr *gr;
+ int nclass;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_gr_dtor;
+ rm->oneinit = r535_gr_oneinit;
+ rm->units = r535_gr_units;
+ rm->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_gr_obj_ctor;
+ }
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
+ kfree(rm);
+ return -ENOMEM;
+ }
+
+ *pgr = &gr->base;
+
+ return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index a7775aa18541..b7a458e9040a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -22,6 +22,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
void
@@ -216,5 +218,8 @@ tu102_gr_fwif[] = {
int
tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+
return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index f05e79670d22..2b0e923cb755 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,4 +1,9 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gm107.o
+nvkm-y += nvkm/engine/nvdec/tu102.o
+nvkm-y += nvkm/engine/nvdec/ga100.o
nvkm-y += nvkm/engine/nvdec/ga102.o
+nvkm-y += nvkm/engine/nvdec/ad102.o
+
+nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
new file mode 100644
index 000000000000..d72b3aae9a2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvdec = {
+ .sclass = {
+ { -1, -1, NVC9B0_VIDEO_DECODER },
+ {}
+ }
+};
+
+int
+ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvdec **pnvdec)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 1f6e3b32ba16..7d1c6791ae82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -33,6 +33,7 @@ nvkm_nvdec_dtor(struct nvkm_engine *engine)
static const struct nvkm_engine_func
nvkm_nvdec = {
.dtor = nvkm_nvdec_dtor,
+ .sclass = { {} },
};
int
@@ -58,4 +59,4 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
nvdec->engine.subdev.name, addr, &nvdec->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
new file mode 100644
index 000000000000..932934227b9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvdec = {
+ .sclass = {
+ { -1, -1, NVC6B0_VIDEO_DECODER },
+ {}
+ }
+};
+
+int
+ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvdec **pnvdec)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
index 37d8c3c0f3ab..022a9c824304 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
@@ -21,8 +21,17 @@
*/
#include "priv.h"
-#include <subdev/mc.h>
-#include <subdev/timer.h>
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvdec_gsp = {
+ .sclass = {
+ { -1, -1, NVC7B0_VIDEO_DECODER },
+ {}
+ }
+};
static const struct nvkm_falcon_func
ga102_nvdec_flcn = {
@@ -57,5 +66,8 @@ int
ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+
return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index 564f7e8960a2..51c9d0e68ee4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -44,7 +44,7 @@ gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
return 0;
}
-static const struct nvkm_nvdec_fwif
+const struct nvkm_nvdec_fwif
gm107_nvdec_fwif[] = {
{ -1, gm107_nvdec_nofw, &gm107_nvdec },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 61e1f7aaa509..f506ae83bfd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -5,6 +5,8 @@
struct nvkm_nvdec_func {
const struct nvkm_falcon_func *flcn;
+
+ struct nvkm_sclass sclass[];
};
struct nvkm_nvdec_fwif {
@@ -14,6 +16,11 @@ struct nvkm_nvdec_fwif {
const struct nvkm_nvdec_func *func;
};
+extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
+
int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
+
+int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
new file mode 100644
index 000000000000..75a24f3e6617
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvdec_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvdec_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvdec_obj = {
+ .dtor = r535_nvdec_obj_dtor,
+};
+
+static int
+r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+ struct r535_nvdec_obj *obj;
+ NV_BSP_ALLOCATION_PARAMETERS *args;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+ sizeof(*args), &obj->rm);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = oclass->engine->subdev.inst;
+
+ return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvdec_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+
+ kfree(nvdec->engine.func);
+ return nvdec;
+}
+
+int
+r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
+{
+ struct nvkm_engine_func *rm;
+ int nclass;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_nvdec_dtor;
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_nvdec_obj_ctor;
+ }
+
+ if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
+ kfree(rm);
+ return -ENOMEM;
+ }
+
+ return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
new file mode 100644
index 000000000000..808c8e010b9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvdec = {
+ .sclass = {
+ { -1, -1, NVC4B0_VIDEO_DECODER },
+ {}
+ }
+};
+
+int
+tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvdec **pnvdec)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 75bf4436bf3f..2c1495b730f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,3 +1,8 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvenc/base.o
nvkm-y += nvkm/engine/nvenc/gm107.o
+nvkm-y += nvkm/engine/nvenc/tu102.o
+nvkm-y += nvkm/engine/nvenc/ga102.o
+nvkm-y += nvkm/engine/nvenc/ad102.o
+
+nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
new file mode 100644
index 000000000000..1b4619ff9e8e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvenc = {
+ .sclass = {
+ { -1, -1, NVC9B7_VIDEO_ENCODER },
+ {}
+ }
+};
+
+int
+ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvenc **pnvenc)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
index cf5dcfda7b25..d45dbb42a0db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -34,6 +34,7 @@ nvkm_nvenc_dtor(struct nvkm_engine *engine)
static const struct nvkm_engine_func
nvkm_nvenc = {
.dtor = nvkm_nvenc_dtor,
+ .sclass = { {} },
};
int
@@ -59,4 +60,4 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
nvenc->engine.subdev.name, 0, &nvenc->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
new file mode 100644
index 000000000000..6463ab8e5871
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvenc = {
+ .sclass = {
+ { -1, -1, NVC7B7_VIDEO_ENCODER },
+ {}
+ }
+};
+
+int
+ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvenc **pnvenc)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
index ad27d8b97569..922abb647ad3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -38,7 +38,7 @@ gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
return 0;
}
-static const struct nvkm_nvenc_fwif
+const struct nvkm_nvenc_fwif
gm107_nvenc_fwif[] = {
{ -1, gm107_nvenc_nofw, &gm107_nvenc },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 4130a2bfbb4f..7917affc6505 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -14,6 +14,11 @@ struct nvkm_nvenc_fwif {
const struct nvkm_nvenc_func *func;
};
+extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
+
int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
+
+int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
new file mode 100644
index 000000000000..c8a2a9196ce5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvenc_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvenc_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvenc_obj = {
+ .dtor = r535_nvenc_obj_dtor,
+};
+
+static int
+r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+ struct r535_nvenc_obj *obj;
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+ sizeof(*args), &obj->rm);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = oclass->engine->subdev.inst;
+
+ return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvenc_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+
+ kfree(nvenc->engine.func);
+ return nvenc;
+}
+
+int
+r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
+{
+ struct nvkm_engine_func *rm;
+ int nclass;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_nvenc_dtor;
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_nvenc_obj_ctor;
+ }
+
+ if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
+ kfree(rm);
+ return -ENOMEM;
+ }
+
+ return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
new file mode 100644
index 000000000000..933864423bb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvenc = {
+ .sclass = {
+ { -1, -1, NVC4B7_VIDEO_ENCODER },
+ {}
+ }
+};
+
+int
+tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_nvenc **pnvenc)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
new file mode 100644
index 000000000000..1408f664add6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/nvjpg/ga100.o
+nvkm-y += nvkm/engine/nvjpg/ad102.o
+
+nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
new file mode 100644
index 000000000000..62705dc6494c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvjpg = {
+ .sclass = {
+ { -1, -1, NVC9D1_VIDEO_NVJPG },
+ {}
+ }
+};
+
+int
+ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
new file mode 100644
index 000000000000..f550eb07da5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvjpg = {
+ .sclass = {
+ { -1, -1, NVC4D1_VIDEO_NVJPG },
+ {}
+ }
+};
+
+int
+ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
new file mode 100644
index 000000000000..1e80cf70033a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_PRIV_H__
+#define __NVKM_NVJPG_PRIV_H__
+#include <engine/nvjpg.h>
+
+int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
new file mode 100644
index 000000000000..1babddc4eb80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvjpg_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvjpg_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvjpg_obj = {
+ .dtor = r535_nvjpg_obj_dtor,
+};
+
+static int
+r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+ struct r535_nvjpg_obj *obj;
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+ sizeof(*args), &obj->rm);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = oclass->engine->subdev.inst;
+
+ return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvjpg_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+ struct nvkm_engine_func *rm;
+ int nclass, ret;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_nvjpg_dtor;
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
+ }
+
+ ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
new file mode 100644
index 000000000000..99f1713d7e51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/ofa/ga100.o
+nvkm-y += nvkm/engine/ofa/ga102.o
+nvkm-y += nvkm/engine/ofa/ad102.o
+
+nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
new file mode 100644
index 000000000000..7ac87ef26aec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_ofa = {
+ .sclass = {
+ { -1, -1, NVC9FA_VIDEO_OFA },
+ {}
+ }
+};
+
+int
+ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
new file mode 100644
index 000000000000..ef474f61a1b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_ofa = {
+ .sclass = {
+ { -1, -1, NVC6FA_VIDEO_OFA },
+ {}
+ }
+};
+
+int
+ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
new file mode 100644
index 000000000000..bea255529993
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_ofa = {
+ .sclass = {
+ { -1, -1, NVC7FA_VIDEO_OFA },
+ {}
+ }
+};
+
+int
+ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
new file mode 100644
index 000000000000..caf29e6bddb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_PRIV_H__
+#define __NVKM_OFA_PRIV_H__
+#include <engine/ofa.h>
+
+int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
new file mode 100644
index 000000000000..438dc692eefe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_ofa_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ofa_obj_dtor(struct nvkm_object *object)
+{
+ struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+r535_ofa_obj = {
+ .dtor = r535_ofa_obj_dtor,
+};
+
+static int
+r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+ struct r535_ofa_obj *obj;
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+
+ args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+ sizeof(*args), &obj->rm);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+
+ return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ofa_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+ struct nvkm_engine_func *rm;
+ int nclass, ret;
+
+ for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+ if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_ofa_dtor;
+ for (int i = 0; i < nclass; i++) {
+ rm->sclass[i].minver = hw->sclass[i].minver;
+ rm->sclass[i].maxver = hw->sclass[i].maxver;
+ rm->sclass[i].oclass = hw->sclass[i].oclass;
+ rm->sclass[i].ctor = r535_ofa_obj_ctor;
+ }
+
+ ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 19feadb1f67b..b43b7e5e2733 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -4,3 +4,5 @@ nvkm-y += nvkm/engine/sec2/gp102.o
nvkm-y += nvkm/engine/sec2/gp108.o
nvkm-y += nvkm/engine/sec2/tu102.o
nvkm-y += nvkm/engine/sec2/ga102.o
+
+nvkm-y += nvkm/engine/sec2/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c
index 945abb8156d7..54be7596b046 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c
@@ -21,6 +21,7 @@
*/
#include "priv.h"
#include <subdev/acr.h>
+#include <subdev/gsp.h>
#include <subdev/vfn.h>
#include <nvfw/flcn.h>
@@ -193,5 +194,10 @@ ga102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, 0x840000, psec2);
+ const u32 addr = 0x840000;
+
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_sec2_new(&ga102_sec2, device, type, inst, addr, psec2);
+
+ return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, addr, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 172d2705c199..e158a40a4f09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -4,6 +4,9 @@
#include <engine/sec2.h>
struct nvkm_acr_lsfw;
+int r535_sec2_new(const struct nvkm_sec2_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_sec2 **);
+
struct nvkm_sec2_func {
const struct nvkm_falcon_func *flcn;
u8 unit_unload;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c
new file mode 100644
index 000000000000..83a6bad5967e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void *
+r535_sec2_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+
+ nvkm_falcon_dtor(&sec2->falcon);
+ return sec2;
+}
+
+static const struct nvkm_engine_func
+r535_sec2 = {
+ .dtor = r535_sec2_dtor,
+};
+
+int
+r535_sec2_new(const struct nvkm_sec2_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
+{
+ struct nvkm_sec2 *sec2;
+ int ret;
+
+ if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nvkm_engine_ctor(&r535_sec2, device, type, inst, true, &sec2->engine);
+ if (ret)
+ return ret;
+
+ return nvkm_falcon_ctor(func->flcn, &sec2->engine.subdev, sec2->engine.subdev.name,
+ addr, &sec2->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index 0afc4b2fa529..20452046d7d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -21,6 +21,7 @@
*/
#include "priv.h"
#include <subdev/acr.h>
+#include <subdev/gsp.h>
#include <nvfw/sec2.h>
@@ -82,5 +83,10 @@ tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
+ const u32 addr = 0x840000;
+
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_sec2_new(&tu102_sec2, device, type, inst, addr, psec2);
+
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, addr, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index 9ffe7b921ccb..d6b015564401 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/falcon/v1.o
nvkm-y += nvkm/falcon/gm200.o
nvkm-y += nvkm/falcon/gp102.o
+nvkm-y += nvkm/falcon/tu102.o
nvkm-y += nvkm/falcon/ga100.o
nvkm-y += nvkm/falcon/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 235149f73a69..3b790865aece 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -25,6 +25,22 @@
#include <subdev/timer.h>
#include <subdev/top.h>
+void
+nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon)
+{
+ if (falcon->func->intr_retrigger)
+ falcon->func->intr_retrigger(falcon);
+}
+
+bool
+nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
+{
+ if (!falcon->func->riscv_active)
+ return false;
+
+ return falcon->func->riscv_active(falcon);
+}
+
static const struct nvkm_falcon_func_dma *
nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
index 49fd32943916..5db94fb10afc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
@@ -21,6 +21,12 @@
*/
#include "priv.h"
+void
+ga100_flcn_intr_retrigger(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_wr32(falcon, 0x3e8, 0x00000001);
+}
+
int
ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
index 0ff450fe3590..834afa45f2fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
@@ -24,6 +24,12 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
+bool
+ga102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+ return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0;
+}
+
static bool
ga102_flcn_dma_done(struct nvkm_falcon *falcon)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c
new file mode 100644
index 000000000000..399918219485
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+bool
+tu102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+ return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x240) & 0x00000001) != 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c
index 45dcf493e972..c7d38609bb7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include <subdev/gsp.h>
#include <nvfw/acr.h>
@@ -322,5 +323,8 @@ int
ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index c22d551c0078..565e9a070b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -201,5 +201,8 @@ int
tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 8faee3317a74..9754c6872543 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,3 +7,5 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
+
+nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index d017a1b5e5dd..91bc53be97ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -93,8 +93,16 @@ static int
nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
+
+ if (!subdev->use.enabled)
+ return 0;
+
if (bar->func->bar1.fini)
bar->func->bar1.fini(bar);
+
+ if (!suspend) /* Handled by instmem. */
+ nvkm_bar_bar2_fini(subdev->device);
+
return 0;
}
@@ -120,7 +128,7 @@ static void *
nvkm_bar_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- nvkm_bar_bar2_fini(subdev->device);
+
return bar->func->dtor(bar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index daebfc991c76..d0168e0b78fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -4,6 +4,9 @@
#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
#include <subdev/bar.h>
+int r535_bar_new_(const struct nvkm_bar_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+
void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
new file mode 100644
index 000000000000..4135690326f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/mm.h>
+#include <subdev/fb.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+static void
+r535_bar_flush(struct nvkm_bar *bar)
+{
+ ioread32_native(bar->flushBAR2);
+}
+
+static void
+r535_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static int
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+{
+ rpc_update_bar_pde_v15_00 *rpc;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+ rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+ rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+
+ return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+ bar->flushBAR2 = bar->flushBAR2PhysMode;
+ nvkm_done(bar->flushFBZero);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+}
+
+static void
+r535_bar_bar2_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+ struct nvkm_gsp *gsp = device->gsp;
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+ vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+
+ if (!bar->flushFBZero) {
+ struct nvkm_memory *fbZero;
+ int ret;
+
+ ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
+ if (ret == 0) {
+ ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
+ nvkm_memory_unref(&fbZero);
+ }
+ WARN_ON(ret);
+ }
+
+ bar->bar2 = true;
+ bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
+ WARN_ON(!bar->flushBAR2);
+}
+
+static void
+r535_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+ struct nvkm_memory *pd3;
+ int ret;
+
+ ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+ if (WARN_ON(ret))
+ return;
+
+ nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+ ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+ nvkm_memory_unref(&pd3);
+ if (WARN_ON(ret))
+ return;
+
+ vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r535_bar_dtor(struct nvkm_bar *bar)
+{
+ void *data = gf100_bar_dtor(bar);
+
+ nvkm_memory_unref(&bar->flushFBZero);
+
+ if (bar->flushBAR2PhysMode)
+ iounmap(bar->flushBAR2PhysMode);
+
+ kfree(bar->func);
+ return data;
+}
+
+int
+r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+ struct nvkm_bar_func *rm;
+ struct nvkm_bar *bar;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_bar_dtor;
+ rm->oneinit = hw->oneinit;
+ rm->bar1.init = r535_bar_bar1_init;
+ rm->bar1.fini = r535_bar_bar1_fini;
+ rm->bar1.wait = r535_bar_bar1_wait;
+ rm->bar1.vmm = hw->bar1.vmm;
+ rm->bar2.init = r535_bar_bar2_init;
+ rm->bar2.fini = r535_bar_bar2_fini;
+ rm->bar2.wait = r535_bar_bar2_wait;
+ rm->bar2.vmm = hw->bar2.vmm;
+ rm->flush = r535_bar_flush;
+
+ ret = gf100_bar_new_(rm, device, type, inst, &bar);
+ *pbar = bar;
+ if (ret) {
+ if (!bar)
+ kfree(rm);
+ return ret;
+ }
+
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+ if (!bar->flushBAR2PhysMode)
+ return -ENOMEM;
+
+ bar->flushBAR2 = bar->flushBAR2PhysMode;
+
+ gf100_bar(*pbar)->bar2_halve = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index c25ab407b85d..b4196edad5b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -22,6 +22,7 @@
#include "gf100.h"
#include <core/memory.h>
+#include <subdev/gsp.h>
#include <subdev/timer.h>
static void
@@ -95,5 +96,8 @@ int
tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_bar_new_(&tu102_bar, device, type, inst, pbar);
+
return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 6c318e41bde0..91f486ee4c42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
return true;
}
+void *
+nvbios_pointer(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 0)))
+ return &bios->data[addr];
+ return NULL;
+}
+
u8
nvbios_rd08(struct nvkm_bios *bios, u32 addr)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index 80b5aaceeaad..8e1e0b057a0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -24,6 +24,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static void
gf100_bus_intr(struct nvkm_bus *bus)
{
@@ -72,5 +74,8 @@ int
gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index d1abb64841da..5f97bffca979 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -16,3 +16,5 @@ nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
nvkm-y += nvkm/subdev/devinit/tu102.o
nvkm-y += nvkm/subdev/devinit/ga100.o
+
+nvkm-y += nvkm/subdev/devinit/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
index 6b280b05c4ca..5f0b12a1fc38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -24,6 +24,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
static int
ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -62,8 +63,19 @@ ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
+static void
+ga100_devinit_disable(struct nvkm_devinit *init)
+{
+ struct nvkm_device *device = init->subdev.device;
+ u32 r820c04 = nvkm_rd32(device, 0x820c04);
+
+ if (r820c04 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
+}
+
static const struct nvkm_devinit_func
ga100_devinit = {
+ .disable = ga100_devinit_disable,
.init = nv50_devinit_init,
.post = tu102_devinit_post,
.pll_set = ga100_devinit_pll_set,
@@ -73,5 +85,8 @@ int
ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_devinit_new(&ga100_devinit, device, type, inst, pinit);
+
return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index a648482d06e9..06bbfdcc788c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -4,6 +4,9 @@
#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
#include <subdev/devinit.h>
+int r535_devinit_new(const struct nvkm_devinit_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+
struct nvkm_devinit_func {
void *(*dtor)(struct nvkm_devinit *);
void (*preinit)(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
new file mode 100644
index 000000000000..666eb93b1742
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+static void *
+r535_devinit_dtor(struct nvkm_devinit *devinit)
+{
+ kfree(devinit->func);
+ return devinit;
+}
+
+int
+r535_devinit_new(const struct nvkm_devinit_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pdevinit)
+{
+ struct nvkm_devinit_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_devinit_dtor;
+ rm->post = hw->post;
+ rm->disable = hw->disable;
+
+ ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 40997ad1d101..f406b1525a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -24,6 +24,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
static int
tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -100,5 +101,8 @@ int
tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_devinit_new(&tu102_devinit, device, type, inst, pinit);
+
return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 967efaddae28..5390417a58b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <core/memory.h>
+#include <subdev/gsp.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <subdev/vfn.h>
@@ -175,7 +176,12 @@ int
tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
+ int ret;
+
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
+ ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 394c305e759a..d1611ad3bf81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -36,6 +36,8 @@ nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
+nvkm-y += nvkm/subdev/fb/r535.o
+
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
nvkm-y += nvkm/subdev/fb/ramnv10.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
index 12037fd4fdf2..e9e7c1d5c4c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -22,6 +22,8 @@
#include "gf100.h"
#include "ram.h"
+#include <subdev/gsp.h>
+
static const struct nvkm_fb_func
ga100_fb = {
.dtor = gf100_fb_dtor,
@@ -38,5 +40,8 @@ ga100_fb = {
int
ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fb_new(&ga100_fb, device, type, inst, pfb);
+
return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 76f6877b54c6..25f82b372bca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -22,6 +22,7 @@
#include "gf100.h"
#include "ram.h"
+#include <subdev/gsp.h>
#include <engine/nvdec.h>
static u64
@@ -59,6 +60,9 @@ ga102_fb = {
int
ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fb_new(&ga102_fb, device, type, inst, pfb);
+
return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 77d6a8c10829..35c55dfba23d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -6,6 +6,9 @@
#include <subdev/therm.h>
struct nvkm_bios;
+int r535_fb_new(const struct nvkm_fb_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+
struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
u32 (*tags)(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c
new file mode 100644
index 000000000000..d32515010167
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "ram.h"
+
+#include <subdev/gsp.h>
+
+static const struct nvkm_ram_func
+r535_fb_ram = {
+};
+
+static int
+r535_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_gsp *gsp = fb->subdev.device->gsp;
+ struct nvkm_ram *ram;
+ int ret;
+
+ if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ram->func = &r535_fb_ram;
+ ram->fb = fb;
+ ram->type = NVKM_RAM_TYPE_UNKNOWN; /*TODO: pull this from GSP. */
+ ram->size = gsp->fb.size;
+ ram->stolen = false;
+ mutex_init(&ram->mutex);
+
+ for (int i = 0; i < gsp->fb.region_nr; i++) {
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ gsp->fb.region[i].addr >> NVKM_RAM_MM_SHIFT,
+ gsp->fb.region[i].size >> NVKM_RAM_MM_SHIFT,
+ 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void *
+r535_fb_dtor(struct nvkm_fb *fb)
+{
+ kfree(fb->func);
+ return fb;
+}
+
+int
+r535_fb_new(const struct nvkm_fb_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ struct nvkm_fb_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_fb_dtor;
+ rm->sysmem.flush_page_init = hw->sysmem.flush_page_init;
+ rm->vidmem.size = hw->vidmem.size;
+ rm->ram_new = r535_fb_ram_new;
+
+ ret = nvkm_fb_new_(rm, device, type, inst, pfb);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index 5c34416cb637..c826980bf70e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -88,12 +88,20 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
struct nvkm_vram *vram = nvkm_vram(memory);
struct nvkm_mm_node *next = vram->mn;
struct nvkm_mm_node *node;
- mutex_lock(&vram->ram->mutex);
- while ((node = next)) {
- next = node->next;
- nvkm_mm_free(&vram->ram->vram, &node);
+
+ if (next) {
+ if (likely(next->nl_entry.next)){
+ mutex_lock(&vram->ram->mutex);
+ while ((node = next)) {
+ next = node->next;
+ nvkm_mm_free(&vram->ram->vram, &node);
+ }
+ mutex_unlock(&vram->ram->mutex);
+ } else {
+ kfree(vram->mn);
+ }
}
- mutex_unlock(&vram->ram->mutex);
+
return vram;
}
@@ -109,6 +117,34 @@ nvkm_vram = {
};
int
+nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size,
+ struct nvkm_memory **pmemory)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_vram *vram;
+
+ if (!device->fb || !(ram = device->fb->ram))
+ return -ENODEV;
+ ram = device->fb->ram;
+
+ if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+ vram->ram = ram;
+ vram->page = NVKM_RAM_MM_SHIFT;
+ *pmemory = &vram->memory;
+
+ vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL);
+ if (!vram->mn)
+ return -ENOMEM;
+
+ vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT;
+ vram->mn->length = size >> NVKM_RAM_MM_SHIFT;
+ return 0;
+}
+
+int
nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
bool contig, bool back, struct nvkm_memory **pmemory)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
index bcc23d4c8115..f7d2a749ce3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -22,6 +22,8 @@
#include "gf100.h"
#include "ram.h"
+#include <subdev/gsp.h>
+
bool
tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
{
@@ -46,6 +48,9 @@ tu102_fb = {
int
tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_fb_new(&tu102_fb, device, type, inst, pfb);
+
return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 7dc99492f536..d621edbdff9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static u32
gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
@@ -39,5 +41,8 @@ int
gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fuse **pfuse)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
index 4a96f926b66d..4dbffae21ddc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static void
ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
@@ -115,5 +117,8 @@ int
ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index c0e4cdb45520..5f7063d5579b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static void
gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
@@ -71,5 +73,8 @@ int
gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 7f61a1ed158b..16bf2f1bb780 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/gsp/base.o
+nvkm-y += nvkm/subdev/gsp/fwsec.o
+
nvkm-y += nvkm/subdev/gsp/gv100.o
+nvkm-y += nvkm/subdev/gsp/tu102.o
+nvkm-y += nvkm/subdev/gsp/tu116.o
+nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/ad102.o
+
+nvkm-y += nvkm/subdev/gsp/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
new file mode 100644
index 000000000000..c849c6299c52
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+ad102_gsp_r535_113_01 = {
+ .flcn = &ga102_gsp_flcn,
+ .fwsec = &ga102_gsp_fwsec,
+
+ .sig_section = ".fwsignature_ad10x",
+
+ .wpr_heap.os_carveout_size = 20 << 20,
+ .wpr_heap.base_size = 8 << 20,
+ .wpr_heap.min_size = 84 << 20,
+
+ .booter.ctor = ga102_gsp_booter_ctor,
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = tu102_gsp_oneinit,
+ .init = r535_gsp_init,
+ .fini = r535_gsp_fini,
+ .reset = ga102_gsp_reset,
+
+ .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ad102_gsps[] = {
+ { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ {}
+};
+
+int
+ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index 591ac95c2669..04bceaa28a19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -20,15 +20,78 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
-#include <core/falcon.h>
-#include <core/firmware.h>
-#include <subdev/acr.h>
-#include <subdev/top.h>
+
+int
+nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+ for (int i = 0; i < gsp->intr_nr; i++) {
+ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+ if (gsp->intr[i].nonstall != ~0)
+ return gsp->intr[i].nonstall;
+
+ return -EINVAL;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+ for (int i = 0; i < gsp->intr_nr; i++) {
+ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+ if (gsp->intr[i].stall != ~0)
+ return gsp->intr[i].stall;
+
+ return -EINVAL;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int
+nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+ if (!gsp->func->fini)
+ return 0;
+
+ return gsp->func->fini(gsp, suspend);
+}
+
+static int
+nvkm_gsp_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+ if (!gsp->func->init)
+ return 0;
+
+ return gsp->func->init(gsp);
+}
+
+static int
+nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+ if (!gsp->func->oneinit)
+ return 0;
+
+ return gsp->func->oneinit(gsp);
+}
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+ if (gsp->func && gsp->func->dtor)
+ gsp->func->dtor(gsp);
+
nvkm_falcon_dtor(&gsp->falcon);
return gsp;
}
@@ -36,6 +99,9 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_gsp = {
.dtor = nvkm_gsp_dtor,
+ .oneinit = nvkm_gsp_oneinit,
+ .init = nvkm_gsp_init,
+ .fini = nvkm_gsp_fini,
};
int
@@ -54,6 +120,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
+ gsp->rm = gsp->func->rm;
- return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
+ return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
+ &gsp->falcon);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
new file mode 100644
index 000000000000..330d72b1a4af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pmu.h>
+
+#include <nvfw/fw.h>
+
+union nvfw_falcon_appif_hdr {
+ struct nvfw_falcon_appif_hdr_v1 {
+ u8 ver;
+ u8 hdr;
+ u8 len;
+ u8 cnt;
+ } v1;
+};
+
+union nvfw_falcon_appif {
+ struct nvfw_falcon_appif_v1 {
+#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004
+ u32 id;
+ u32 dmem_base;
+ } v1;
+};
+
+union nvfw_falcon_appif_dmemmapper {
+ struct {
+ u32 signature;
+ u16 version;
+ u16 size;
+ u32 cmd_in_buffer_offset;
+ u32 cmd_in_buffer_size;
+ u32 cmd_out_buffer_offset;
+ u32 cmd_out_buffer_size;
+ u32 nvf_img_data_buffer_offset;
+ u32 nvf_img_data_buffer_size;
+ u32 printf_buffer_hdr;
+ u32 ucode_build_time_stamp;
+ u32 ucode_signature;
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB 0x00000019
+ u32 init_cmd;
+ u32 ucode_feature;
+ u32 ucode_cmd_mask0;
+ u32 ucode_cmd_mask1;
+ u32 multi_tgt_tbl;
+ } v3;
+};
+
+struct nvfw_fwsec_frts_cmd {
+ struct {
+ u32 ver;
+ u32 hdr;
+ u64 addr;
+ u32 size;
+ u32 flags;
+ } read_vbios;
+ struct {
+ u32 ver;
+ u32 hdr;
+ u32 addr;
+ u32 size;
+#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002
+ u32 type;
+ } frts_region;
+};
+
+static int
+nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd)
+{
+ union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset);
+ const u8 *dmem = fw->fw.img + fw->dmem_base_img;
+ int i;
+
+ if (WARN_ON(hdr->v1.ver != 1))
+ return -EINVAL;
+
+ for (i = 0; i < hdr->v1.cnt; i++) {
+ union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len);
+ union nvfw_falcon_appif_dmemmapper *dmemmap;
+ struct nvfw_fwsec_frts_cmd *frtscmd;
+
+ if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER)
+ continue;
+
+ dmemmap = (void *)(dmem + app->v1.dmem_base);
+ dmemmap->v3.init_cmd = init_cmd;
+
+ frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset);
+
+ frtscmd->read_vbios.ver = 1;
+ frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios);
+ frtscmd->read_vbios.addr = 0;
+ frtscmd->read_vbios.size = 0;
+ frtscmd->read_vbios.flags = 2;
+
+ if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) {
+ frtscmd->frts_region.ver = 1;
+ frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region);
+ frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12;
+ frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12;
+ frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB;
+ }
+
+ break;
+ }
+
+ if (WARN_ON(i == hdr->v1.cnt))
+ return -EINVAL;
+
+ return 0;
+}
+
+union nvfw_falcon_ucode_desc {
+ struct nvkm_falcon_ucode_desc_v2 {
+ u32 Hdr;
+ u32 StoredSize;
+ u32 UncompressedSize;
+ u32 VirtualEntry;
+ u32 InterfaceOffset;
+ u32 IMEMPhysBase;
+ u32 IMEMLoadSize;
+ u32 IMEMVirtBase;
+ u32 IMEMSecBase;
+ u32 IMEMSecSize;
+ u32 DMEMOffset;
+ u32 DMEMPhysBase;
+ u32 DMEMLoadSize;
+ u32 altIMEMLoadSize;
+ u32 altDMEMLoadSize;
+ } v2;
+
+ struct nvkm_falcon_ucode_desc_v3 {
+ u32 Hdr;
+ u32 StoredSize;
+ u32 PKCDataOffset;
+ u32 InterfaceOffset;
+ u32 IMEMPhysBase;
+ u32 IMEMLoadSize;
+ u32 IMEMVirtBase;
+ u32 DMEMPhysBase;
+ u32 DMEMLoadSize;
+ u16 EngineIdMask;
+ u8 UcodeId;
+ u8 SignatureCount;
+ u16 SignatureVersions;
+ u16 Reserved;
+ } v3;
+};
+
+static int
+nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
+ const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd,
+ struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ const struct firmware *bl;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *bld;
+ int ret;
+
+ /* Build ucode. */
+ ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true,
+ (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+ &gsp->falcon, fw);
+ if (WARN_ON(ret))
+ return ret;
+
+ fw->nmem_base_img = 0;
+ fw->nmem_base = desc->IMEMPhysBase;
+ fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize;
+
+ fw->imem_base_img = 0;
+ fw->imem_base = desc->IMEMSecBase;
+ fw->imem_size = desc->IMEMSecSize;
+
+ fw->dmem_base_img = desc->DMEMOffset;
+ fw->dmem_base = desc->DMEMPhysBase;
+ fw->dmem_size = desc->DMEMLoadSize;
+
+ /* Bootloader. */
+ ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, bl->data);
+ bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+
+ fw->boot_addr = bld->start_tag << 8;
+ fw->boot_size = bld->code_size;
+ fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
+ if (!fw->boot)
+ ret = -ENOMEM;
+
+ nvkm_firmware_put(bl);
+
+ /* Patch in interface data. */
+ return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
+ const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd,
+ struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ int ret;
+
+ /* Build ucode. */
+ ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true,
+ (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+ &gsp->falcon, fw);
+ if (WARN_ON(ret))
+ return ret;
+
+ fw->imem_base_img = 0;
+ fw->imem_base = desc->IMEMPhysBase;
+ fw->imem_size = desc->IMEMLoadSize;
+ fw->dmem_base_img = desc->IMEMLoadSize;
+ fw->dmem_base = desc->DMEMPhysBase;
+ fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256);
+ fw->dmem_sign = desc->PKCDataOffset;
+ fw->boot_addr = 0;
+ fw->fuse_ver = desc->SignatureVersions;
+ fw->ucode_id = desc->UcodeId;
+ fw->engine_id = desc->EngineIdMask;
+
+ /* Patch in signature. */
+ ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4,
+ nvbios_pointer(bios, 0), desc->SignatureCount,
+ (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Patch in interface data. */
+ return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ const union nvfw_falcon_ucode_desc *desc;
+ struct nvbios_pmuE flcn_ucode;
+ u8 idx, ver, hdr;
+ u32 data;
+ u16 size, vers;
+ struct nvkm_falcon_fw fw = {};
+ u32 mbox0 = 0;
+ int ret;
+
+ /* Lookup in VBIOS. */
+ for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) {
+ if (flcn_ucode.type == 0x85)
+ break;
+ }
+
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ /* Deteremine version. */
+ desc = nvbios_pointer(bios, flcn_ucode.data);
+ if (WARN_ON(!(desc->v2.Hdr & 0x00000001)))
+ return -EINVAL;
+
+ size = (desc->v2.Hdr & 0xffff0000) >> 16;
+ vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
+
+ switch (vers) {
+ case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
+ case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
+ default:
+ nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret);
+ return ret;
+ }
+
+ /* Boot. */
+ ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
+ nvkm_falcon_fw_dtor(&fw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+ u32 err;
+
+ ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
+ if (ret)
+ return ret;
+
+ /* Verify. */
+ err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
+ if (err) {
+ nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+ u32 err, wpr2_lo, wpr2_hi;
+
+ ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
+ if (ret)
+ return ret;
+
+ /* Verify. */
+ err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
+ if (err) {
+ nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
+ return -EIO;
+ }
+
+ wpr2_lo = nvkm_rd32(device, 0x1fa824);
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
new file mode 100644
index 000000000000..223f68b532ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+ga100_gsp_flcn = {
+ .disable = gm200_flcn_disable,
+ .enable = gm200_flcn_enable,
+ .addr2 = 0x1000,
+ .riscv_irqmask = 0x2b4,
+ .reset_eng = gp102_flcn_reset_eng,
+ .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+ .bind_inst = gm200_flcn_bind_inst,
+ .bind_stat = gm200_flcn_bind_stat,
+ .bind_intr = true,
+ .imem_pio = &gm200_flcn_imem_pio,
+ .dmem_pio = &gm200_flcn_dmem_pio,
+ .riscv_active = tu102_flcn_riscv_active,
+ .intr_retrigger = ga100_flcn_intr_retrigger,
+};
+
+static const struct nvkm_gsp_func
+ga100_gsp_r535_113_01 = {
+ .flcn = &ga100_gsp_flcn,
+ .fwsec = &tu102_gsp_fwsec,
+
+ .sig_section = ".fwsignature_ga100",
+
+ .wpr_heap.base_size = 8 << 20,
+ .wpr_heap.min_size = 64 << 20,
+
+ .booter.ctor = tu102_gsp_booter_ctor,
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = tu102_gsp_oneinit,
+ .init = r535_gsp_init,
+ .fini = r535_gsp_fini,
+ .reset = tu102_gsp_reset,
+
+ .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ga100_gsps[] = {
+ { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { -1, gv100_gsp_nofw, &gv100_gsp },
+ {}
+};
+
+int
+ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index a3996ceca995..4c4b4168a266 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -21,33 +21,165 @@
*/
#include "priv.h"
-static const struct nvkm_falcon_func
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+ga102_gsp_reset(struct nvkm_gsp *gsp)
+{
+ int ret;
+
+ ret = gsp->falcon.func->reset_eng(&gsp->falcon);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111);
+ return 0;
+}
+
+int
+ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+ struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header_v2 *hshdr;
+ const struct nvfw_hs_load_header_v2 *lhdr;
+ u32 loc, sig, cnt, *meta;
+ int ret;
+
+ hdr = nvfw_bin_hdr(subdev, blob->data);
+ hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+ meta = (u32 *)(blob->data + hshdr->meta_data_offset);
+ loc = *(u32 *)(blob->data + hshdr->patch_loc);
+ sig = *(u32 *)(blob->data + hshdr->patch_sig);
+ cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+ ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+ blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+ cnt, hshdr->sig_prod_offset + sig, 0, 0);
+ if (ret)
+ goto done;
+
+ lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+ fw->imem_base_img = lhdr->app[0].offset;
+ fw->imem_base = 0;
+ fw->imem_size = lhdr->app[0].size;
+
+ fw->dmem_base_img = lhdr->os_data_offset;
+ fw->dmem_base = 0;
+ fw->dmem_size = lhdr->os_data_size;
+ fw->dmem_sign = loc - lhdr->os_data_offset;
+
+ fw->boot_addr = lhdr->app[0].offset;
+
+ fw->fuse_ver = meta[0];
+ fw->engine_id = meta[1];
+ fw->ucode_id = meta[2];
+
+done:
+ if (ret)
+ nvkm_falcon_fw_dtor(fw);
+
+ return ret;
+}
+
+static int
+ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
+{
+ struct nvkm_falcon *falcon = fw->falcon;
+ struct nvkm_device *device = falcon->owner->device;
+ u32 sig_fuse_version = fw->fuse_ver;
+ u32 reg_fuse_version;
+ int idx = 0;
+
+ FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
+ FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version);
+
+ if (fw->engine_id & 0x00000400) {
+ reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
+ } else {
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+ reg_fuse_version = BIT(fls(reg_fuse_version));
+ FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+ if (!(reg_fuse_version & fw->fuse_ver))
+ return -EINVAL;
+
+ while (!(reg_fuse_version & sig_fuse_version & 1)) {
+ idx += (sig_fuse_version & 1);
+ reg_fuse_version >>= 1;
+ sig_fuse_version >>= 1;
+ }
+
+ return idx;
+}
+
+const struct nvkm_falcon_fw_func
+ga102_gsp_fwsec = {
+ .signature = ga102_gsp_fwsec_signature,
+ .reset = gm200_flcn_fw_reset,
+ .load = ga102_flcn_fw_load,
+ .boot = ga102_flcn_fw_boot,
+};
+
+const struct nvkm_falcon_func
ga102_gsp_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.select = ga102_flcn_select,
.addr2 = 0x1000,
+ .riscv_irqmask = 0x528,
.reset_eng = gp102_flcn_reset_eng,
.reset_prep = ga102_flcn_reset_prep,
.reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
.imem_dma = &ga102_flcn_dma,
.dmem_dma = &ga102_flcn_dma,
+ .riscv_active = ga102_flcn_riscv_active,
+ .intr_retrigger = ga100_flcn_intr_retrigger,
};
static const struct nvkm_gsp_func
-ga102_gsp = {
+ga102_gsp_r535_113_01 = {
.flcn = &ga102_gsp_flcn,
+ .fwsec = &ga102_gsp_fwsec,
+
+ .sig_section = ".fwsignature_ga10x",
+
+ .wpr_heap.os_carveout_size = 20 << 20,
+ .wpr_heap.base_size = 8 << 20,
+ .wpr_heap.min_size = 84 << 20,
+
+ .booter.ctor = ga102_gsp_booter_ctor,
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = tu102_gsp_oneinit,
+ .init = r535_gsp_init,
+ .fini = r535_gsp_fini,
+ .reset = ga102_gsp_reset,
+
+ .rm = &r535_gsp_rm,
};
-static int
-ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- return 0;
-}
+static const struct nvkm_gsp_func
+ga102_gsp = {
+ .flcn = &ga102_gsp_flcn,
+};
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { -1, ga102_gsp_nofw, &ga102_gsp },
+ { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index da6a809cd317..62d9289bcaa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -34,12 +34,12 @@ gv100_gsp_flcn = {
.dmem_pio = &gm200_flcn_dmem_pio,
};
-static const struct nvkm_gsp_func
+const struct nvkm_gsp_func
gv100_gsp = {
.flcn = &gv100_gsp_flcn,
};
-static int
+int
gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 89749a40203c..9f4a62375a27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -4,16 +4,67 @@
#include <subdev/gsp.h>
enum nvkm_acr_lsf_id;
-struct nvkm_gsp_func {
- const struct nvkm_falcon_func *flcn;
-};
+int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const char *ver;
+ bool enable;
};
+int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+struct nvkm_gsp_func {
+ const struct nvkm_falcon_func *flcn;
+ const struct nvkm_falcon_fw_func *fwsec;
+
+ char *sig_section;
+
+ struct {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 min_size;
+ } wpr_heap;
+
+ struct {
+ int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
+ struct nvkm_falcon *, struct nvkm_falcon_fw *);
+ } booter;
+
+ void (*dtor)(struct nvkm_gsp *);
+ int (*oneinit)(struct nvkm_gsp *);
+ int (*init)(struct nvkm_gsp *);
+ int (*fini)(struct nvkm_gsp *, bool suspend);
+ int (*reset)(struct nvkm_gsp *);
+
+ const struct nvkm_gsp_rm *rm;
+};
+
+extern const struct nvkm_falcon_func tu102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
+int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+ struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_reset(struct nvkm_gsp *);
+
+extern const struct nvkm_falcon_func ga102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
+int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+ struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int ga102_gsp_reset(struct nvkm_gsp *);
+
+void r535_gsp_dtor(struct nvkm_gsp *);
+int r535_gsp_oneinit(struct nvkm_gsp *);
+int r535_gsp_init(struct nvkm_gsp *);
+int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
+extern const struct nvkm_gsp_rm r535_gsp_rm;
+
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
+
+extern const struct nvkm_gsp_func gv100_gsp;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
new file mode 100644
index 000000000000..e31f9641114b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -0,0 +1,2236 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+#include <subdev/timer.h>
+#include <subdev/vfn.h>
+#include <engine/fifo/chan.h>
+#include <engine/sec2.h>
+
+#include <nvfw/fw.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
+#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+#include <linux/acpi.h>
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+static void *
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+{
+ struct r535_gsp_msg *mqe;
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+ u8 *msg;
+ u32 len;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return ERR_PTR(-EINVAL);
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return ERR_PTR(-ETIMEDOUT);
+
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+
+ if (prepc) {
+ *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
+ return mqe->data;
+ }
+
+ msg = kvmalloc(repc, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, repc, len);
+ memcpy(msg, mqe->data, len);
+
+ rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
+ if (rptr == gsp->msgq.cnt)
+ rptr = 0;
+
+ repc -= len;
+
+ if (repc) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(msg + len, mqe, repc);
+
+ rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
+ }
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return msg;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
+{
+ return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+{
+ struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
+ struct r535_gsp_msg *cqe;
+ u32 argc = cmd->checksum;
+ u64 *ptr = (void *)cmd;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size;
+ u32 off = 0;
+
+ argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + argc);
+ cmd->pad = 0;
+ cmd->checksum = 0;
+ cmd->sequence = gsp->cmdq.seq++;
+ cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(cmd);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+ memcpy(cqe, (u8 *)cmd + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ argc -= size;
+ } while(argc);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(cmd);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
+{
+ struct r535_gsp_msg *cmd;
+ u32 size = GSP_MSG_HDR_SIZE + argc;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ cmd = kvzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ cmd->checksum = argc;
+ return cmd->data;
+}
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+static struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *msg;
+ int time = 4000000, i;
+ u32 size;
+
+retry:
+ msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
+ if (IS_ERR_OR_NULL(msg))
+ return msg;
+
+ msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
+ if (IS_ERR_OR_NULL(msg))
+ return msg;
+
+ if (msg->rpc_result) {
+ r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, msg);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
+
+ if (fn && msg->function == fn) {
+ if (repc) {
+ if (msg->length < sizeof(*msg) + repc) {
+ nvkm_error(subdev, "msg len %d < %zd\n",
+ msg->length, sizeof(*msg) + repc);
+ r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, msg);
+ return ERR_PTR(-EIO);
+ }
+
+ return msg;
+ }
+
+ r535_gsp_msg_done(gsp, msg);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == msg->function) {
+ ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, msg);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+static int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+static int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+ struct nvfw_gsp_rpc *msg;
+ u32 fn = rpc->function;
+ void *repv = NULL;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret) {
+ mutex_unlock(&gsp->cmdq.mutex);
+ return ERR_PTR(ret);
+ }
+
+ if (wait) {
+ msg = r535_gsp_msg_recv(gsp, fn, repc);
+ if (!IS_ERR_OR_NULL(msg))
+ repv = msg->data;
+ else
+ repv = msg;
+ }
+
+ return repv;
+}
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
+ &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+static void
+r535_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ struct nvkm_gsp *gsp = client->gsp;
+
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+static int
+r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+ int ret;
+
+ mutex_lock(&gsp->client_id.mutex);
+ ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (ret < 0)
+ return ret;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args)) {
+ r535_gsp_client_dtor(client);
+ return ret;
+ }
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
+ if (ret) {
+ r535_gsp_client_dtor(client);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ ret = repc ? rpc->params : NULL;
+ }
+
+ if (IS_ERR_OR_NULL(ret))
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
+ client->object.handle, object->parent->handle, object->handle, oclass, argc);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = argc;
+ return rpc->params;
+}
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ ret = repc ? rpc->params : NULL;
+ }
+
+ if (IS_ERR_OR_NULL(ret))
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
+ client->object.handle, object->handle, cmd, argc);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = argc;
+ return rpc->params;
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
+ if (!rpc)
+ return NULL;
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + argc;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+ struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
+ const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
+ const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
+ u32 rpc_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (rpc_size > max_rpc_size) {
+ const u32 fn = rpc->function;
+
+ /* Adjust length, and send initial RPC. */
+ rpc->length = sizeof(*rpc) + max_rpc_size;
+ cmd->checksum = rpc->length;
+
+ repv = r535_gsp_rpc_send(gsp, argv, false, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ argv += max_rpc_size;
+ rpc_size -= max_rpc_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (rpc_size) {
+ u32 size = min(rpc_size, max_rpc_size);
+ void *next;
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, argv, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, false, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ argv += size;
+ rpc_size -= size;
+ }
+
+ /* Wait for reply. */
+ if (wait) {
+ rpc = r535_gsp_msg_recv(gsp, fn, repc);
+ if (!IS_ERR_OR_NULL(rpc))
+ repv = rpc->data;
+ else
+ repv = rpc;
+ } else {
+ repv = NULL;
+ }
+ } else {
+ repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_gsp_rm
+r535_gsp_rm = {
+ .rpc_get = r535_gsp_rpc_get,
+ .rpc_push = r535_gsp_rpc_push,
+ .rpc_done = r535_gsp_rpc_done,
+
+ .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
+ .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
+ .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
+
+ .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
+ .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
+ .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
+
+ .rm_free = r535_gsp_rpc_rm_free,
+
+ .client_ctor = r535_gsp_client_ctor,
+ .client_dtor = r535_gsp_client_dtor,
+
+ .device_ctor = r535_gsp_device_ctor,
+ .device_dtor = r535_gsp_device_dtor,
+
+ .event_ctor = r535_gsp_device_event_ctor,
+ .event_dtor = r535_gsp_event_dtor,
+};
+
+static void
+r535_gsp_msgq_work(struct work_struct *work)
+{
+ struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ r535_gsp_msg_recv(gsp, 0, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+}
+
+static irqreturn_t
+r535_gsp_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
+ u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
+ gsp->falcon.func->riscv_irqmask);
+ u32 stat = intr & inte;
+
+ if (!stat) {
+ nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
+ return IRQ_NONE;
+ }
+
+ if (stat & 0x00000040) {
+ nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
+ schedule_work(&gsp->msgq.work);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
+ nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
+ }
+
+ nvkm_falcon_intr_retrigger(&gsp->falcon);
+ return IRQ_HANDLED;
+}
+
+static int
+r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
+{
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ int ret = 0;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (unsigned i = 0; i < ctrl->tableLen; i++) {
+ enum nvkm_subdev_type type;
+ int inst;
+
+ nvkm_debug(&gsp->subdev,
+ "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
+ ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
+ ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
+
+ switch (ctrl->table[i].engineIdx) {
+ case MC_ENGINE_IDX_GSP:
+ type = NVKM_SUBDEV_GSP;
+ inst = 0;
+ break;
+ case MC_ENGINE_IDX_DISP:
+ type = NVKM_ENGINE_DISP;
+ inst = 0;
+ break;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ type = NVKM_ENGINE_CE;
+ inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
+ break;
+ case MC_ENGINE_IDX_GR0:
+ type = NVKM_ENGINE_GR;
+ inst = 0;
+ break;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ type = NVKM_ENGINE_NVDEC;
+ inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
+ break;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ type = NVKM_ENGINE_NVENC;
+ inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
+ break;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ type = NVKM_ENGINE_NVJPG;
+ inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
+ break;
+ case MC_ENGINE_IDX_OFA0:
+ type = NVKM_ENGINE_OFA;
+ inst = 0;
+ break;
+ default:
+ continue;
+ }
+
+ if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ gsp->intr[gsp->intr_nr].type = type;
+ gsp->intr[gsp->intr_nr].inst = inst;
+ gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
+ gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
+ gsp->intr_nr++;
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return ret;
+}
+
+static int
+r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ int last_usable = -1;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
+ &rpc->fbRegionInfoParams.fbRegion[i];
+
+ nvkm_debug(&gsp->subdev, "fb region %d: "
+ "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
+ reg->base, reg->limit, reg->reserved, reg->performance,
+ reg->supportCompressed, reg->supportISO, reg->bProtected);
+
+ if (!reg->reserved && !reg->bProtected) {
+ if (reg->supportCompressed && reg->supportISO &&
+ !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
+ const u64 size = (reg->limit + 1) - reg->base;
+
+ gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
+ gsp->fb.region[gsp->fb.region_nr].size = size;
+ gsp->fb.region_nr++;
+ }
+
+ last_usable = i;
+ }
+ }
+
+ if (last_usable >= 0) {
+ u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+
+ gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
+ }
+
+ for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
+ if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
+ gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+ return 0;
+}
+
+static int
+r535_gsp_postinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
+
+ ret = r535_gsp_intr_get_table(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
+ r535_gsp_intr, &gsp->subdev.inth);
+ if (WARN_ON(ret))
+ return ret;
+
+ nvkm_inth_allow(&gsp->subdev.inth);
+ nvkm_wr32(device, 0x110004, 0x00000040);
+ return ret;
+}
+
+static int
+r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
+{
+ rpc_unloading_guest_driver_v1F_07 *rpc;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ if (suspend) {
+ rpc->bInPMTransition = 1;
+ rpc->bGc6Entering = 0;
+ rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ } else {
+ rpc->bInPMTransition = 0;
+ rpc->bGc6Entering = 0;
+ rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
+ }
+
+ return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+/* dword only */
+struct nv_gsp_registry_entries {
+ const char *name;
+ u32 value;
+};
+
+static const struct nv_gsp_registry_entries r535_registry_entries[] = {
+ { "RMSecBusResetEnable", 1 },
+ { "RMForcePcieConfigSave", 1 },
+};
+#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
+
+static int
+r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
+{
+ PACKED_REGISTRY_TABLE *rpc;
+ char *strings;
+ int str_offset;
+ int i;
+ size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+
+ /* add strings + null terminator */
+ for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
+ rpc_size += strlen(r535_registry_entries[i].name) + 1;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size);
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ rpc->size = sizeof(*rpc);
+ rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
+
+ str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
+ strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+ for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
+ int name_len = strlen(r535_registry_entries[i].name) + 1;
+
+ rpc->entries[i].nameOffset = str_offset;
+ rpc->entries[i].type = 1;
+ rpc->entries[i].data = r535_registry_entries[i].value;
+ rpc->entries[i].length = 4;
+ memcpy(strings, r535_registry_entries[i].name, name_len);
+ strings += name_len;
+ str_offset += name_len;
+ }
+
+ return nvkm_gsp_rpc_wr(gsp, rpc, false);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+static void
+r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+{
+ const guid_t NVOP_DSM_GUID =
+ GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+ 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
+ u64 NVOP_DSM_REV = 0x00000100;
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 4,
+ .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ }, *obj;
+
+ caps->status = 0xffff;
+
+ if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
+ return;
+
+ obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
+ if (!obj)
+ return;
+
+ printk(KERN_ERR "nvop: obj type %d\n", obj->type);
+ printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
+
+ if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+ WARN_ON(obj->buffer.length != 4))
+ return;
+
+ caps->status = 0;
+ caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+ printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
+
+ ACPI_FREE(obj);
+
+ kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
+{
+ const guid_t JT_DSM_GUID =
+ GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
+ 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
+ u64 JT_DSM_REV = 0x00000103;
+ u32 caps;
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(caps),
+ .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ }, *obj;
+
+ jt->status = 0xffff;
+
+ obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
+ if (!obj)
+ return;
+
+ printk(KERN_ERR "jt: obj type %d\n", obj->type);
+ printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
+
+ if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+ WARN_ON(obj->buffer.length != 4))
+ return;
+
+ jt->status = 0;
+ jt->jtCaps = *(u32 *)obj->buffer.pointer;
+ jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
+ jt->bSBIOSCaps = 0;
+ printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
+
+ ACPI_FREE(obj);
+
+ kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
+ MUX_METHOD_DATA_ELEMENT *part)
+{
+ acpi_handle iter = NULL, handle_mux;
+ acpi_status status;
+ unsigned long long value;
+
+ mode->status = 0xffff;
+ part->status = 0xffff;
+
+ do {
+ status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
+ if (ACPI_FAILURE(status) || !iter)
+ return;
+
+ status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
+ if (ACPI_FAILURE(status) || value != id)
+ continue;
+
+ handle_mux = iter;
+ } while (!handle_mux);
+
+ if (!handle_mux)
+ return;
+
+ status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+ if (ACPI_SUCCESS(status)) {
+ mode->acpiId = id;
+ mode->mode = value;
+ mode->status = 0;
+ }
+
+ status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+ if (ACPI_SUCCESS(status)) {
+ part->acpiId = id;
+ part->mode = value;
+ part->status = 0;
+ }
+}
+
+static void
+r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
+{
+ mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
+
+ for (int i = 0; i < mux->tableLen; i++) {
+ r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
+ &mux->acpiIdMuxPartTable[i]);
+ }
+}
+
+static void
+r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
+{
+ acpi_status status;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *_DOD;
+
+ dod->status = 0xffff;
+
+ status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return;
+
+ _DOD = output.pointer;
+
+ if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
+ WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
+ return;
+
+ for (int i = 0; i < _DOD->package.count; i++) {
+ if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
+ return;
+
+ dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
+ dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
+ }
+
+ printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
+ dod->status = 0;
+}
+#endif
+
+static void
+r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ if (acpi->dodMethodData.status == 0)
+ r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
+
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, 0);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = 0x088000;
+ info->pciConfigMirrorSize = 0x001000;
+ r535_gsp_acpi_info(gsp, &info->acpiMethodData);
+
+ return nvkm_gsp_rpc_wr(gsp, info, false);
+}
+
+static int
+r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ rpc_os_error_log_v17_00 *msg = repv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
+ return 0;
+}
+
+static int
+r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
+ if (!chan) {
+ nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
+ return 0;
+ }
+
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
+ return 0;
+}
+
+static int
+r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+
+ WARN_ON(repc != 0);
+
+ nvkm_error(subdev, "mmu fault queued\n");
+ return 0;
+}
+
+static int
+r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_gsp_client *client;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ rpc_post_event_v17_00 *msg = repv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+ if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
+ return -EINVAL;
+
+ nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
+ msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
+ msg->status, msg->eventDataSize, msg->bNotifyList);
+
+ mutex_lock(&gsp->client_id.mutex);
+ client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
+ if (client) {
+ struct nvkm_gsp_event *event;
+ bool handled = false;
+
+ list_for_each_entry(event, &client->events, head) {
+ if (event->object.handle == msg->hEvent) {
+ event->func(event, msg->eventData, msg->eventDataSize);
+ handled = true;
+ }
+ }
+
+ if (!handled) {
+ nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
+ msg->hClient, msg->hEvent);
+ }
+ } else {
+ nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static int
+r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ rpc_run_cpu_sequencer_v17_00 *seq = repv;
+ int ptr = 0, ret;
+
+ nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
+
+ while (ptr < seq->cmdIndex) {
+ GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
+
+ ptr += 1;
+ ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
+
+ switch (cmd->opCode) {
+ case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
+ u32 addr = cmd->payload.regWrite.addr;
+ u32 data = cmd->payload.regWrite.val;
+
+ nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
+ nvkm_wr32(device, addr, data);
+ }
+ break;
+ case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
+ u32 addr = cmd->payload.regModify.addr;
+ u32 mask = cmd->payload.regModify.mask;
+ u32 data = cmd->payload.regModify.val;
+
+ nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
+ nvkm_mask(device, addr, mask, data);
+ }
+ break;
+ case GSP_SEQ_BUF_OPCODE_REG_POLL: {
+ u32 addr = cmd->payload.regPoll.addr;
+ u32 mask = cmd->payload.regPoll.mask;
+ u32 data = cmd->payload.regPoll.val;
+ u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
+ //u32 error = cmd->payload.regPoll.error;
+
+ nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
+ nvkm_rd32(device, addr);
+ nvkm_usec(device, usec,
+ if ((nvkm_rd32(device, addr) & mask) == data)
+ break;
+ );
+ }
+ break;
+ case GSP_SEQ_BUF_OPCODE_DELAY_US: {
+ u32 usec = cmd->payload.delayUs.val;
+
+ nvkm_trace(subdev, "seq usec %d\n", usec);
+ udelay(usec);
+ }
+ break;
+ case GSP_SEQ_BUF_OPCODE_REG_STORE: {
+ u32 addr = cmd->payload.regStore.addr;
+ u32 slot = cmd->payload.regStore.index;
+
+ seq->regSaveArea[slot] = nvkm_rd32(device, addr);
+ nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
+ seq->regSaveArea[slot]);
+ }
+ break;
+ case GSP_SEQ_BUF_OPCODE_CORE_RESET:
+ nvkm_trace(subdev, "seq core reset\n");
+ nvkm_falcon_reset(&gsp->falcon);
+ nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
+ nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
+ break;
+ case GSP_SEQ_BUF_OPCODE_CORE_START:
+ nvkm_trace(subdev, "seq core start\n");
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
+ nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
+ else
+ nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
+ break;
+ case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
+ nvkm_trace(subdev, "seq core wait halt\n");
+ nvkm_msec(device, 2000,
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
+ break;
+ );
+ break;
+ case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
+ struct nvkm_sec2 *sec2 = device->sec2;
+ u32 mbox0;
+
+ nvkm_trace(subdev, "seq core resume\n");
+
+ ret = gsp->func->reset(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+ nvkm_falcon_start(&sec2->falcon);
+
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
+ if (WARN_ON(mbox0)) {
+ nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
+ return -EIO;
+ }
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
+ }
+ break;
+ default:
+ nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+ if (mem->data) {
+ dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ mem->data = NULL;
+ }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
+{
+ mem->size = size;
+ mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+ if (WARN_ON(!mem->data))
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+static int
+r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ int ret;
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+static int
+r535_gsp_shared_init(struct nvkm_gsp *gsp)
+{
+ struct {
+ msgqTxHeader tx;
+ msgqRxHeader rx;
+ } *cmdq, *msgq;
+ int ret, i;
+
+ gsp->shm.cmdq.size = 0x40000;
+ gsp->shm.msgq.size = 0x40000;
+
+ gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
+ gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+ gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+
+ ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
+ gsp->shm.cmdq.size +
+ gsp->shm.msgq.size,
+ &gsp->shm.mem);
+ if (ret)
+ return ret;
+
+ gsp->shm.ptes.ptr = gsp->shm.mem.data;
+ gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
+ gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
+
+ for (i = 0; i < gsp->shm.ptes.nr; i++)
+ gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
+
+ cmdq = gsp->shm.cmdq.ptr;
+ cmdq->tx.version = 0;
+ cmdq->tx.size = gsp->shm.cmdq.size;
+ cmdq->tx.entryOff = GSP_PAGE_SIZE;
+ cmdq->tx.msgSize = GSP_PAGE_SIZE;
+ cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
+ cmdq->tx.writePtr = 0;
+ cmdq->tx.flags = 1;
+ cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
+
+ msgq = gsp->shm.msgq.ptr;
+
+ gsp->cmdq.cnt = cmdq->tx.msgCount;
+ gsp->cmdq.wptr = &cmdq->tx.writePtr;
+ gsp->cmdq.rptr = &msgq->rx.readPtr;
+ gsp->msgq.cnt = cmdq->tx.msgCount;
+ gsp->msgq.wptr = &msgq->tx.writePtr;
+ gsp->msgq.rptr = &cmdq->rx.readPtr;
+ return 0;
+}
+
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ return 0;
+}
+
+static inline u64
+r535_gsp_libos_id8(const char *name)
+{
+ u64 id = 0;
+
+ for (int i = 0; i < sizeof(id) && *name; i++, name++)
+ id = (id << 8) | *name;
+
+ return id;
+}
+
+static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
+{
+ unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
+ unsigned int i;
+
+ for (i = 0; i < num_pages; i++)
+ ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
+}
+
+static int
+r535_gsp_libos_init(struct nvkm_gsp *gsp)
+{
+ LibosMemoryRegionInitArgument *args;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
+ if (ret)
+ return ret;
+
+ args = gsp->libos.data;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
+ if (ret)
+ return ret;
+
+ args[0].id8 = r535_gsp_libos_id8("LOGINIT");
+ args[0].pa = gsp->loginit.addr;
+ args[0].size = gsp->loginit.size;
+ args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+ args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+ create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
+ if (ret)
+ return ret;
+
+ args[1].id8 = r535_gsp_libos_id8("LOGINTR");
+ args[1].pa = gsp->logintr.addr;
+ args[1].size = gsp->logintr.size;
+ args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+ args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+ create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
+ if (ret)
+ return ret;
+
+ args[2].id8 = r535_gsp_libos_id8("LOGRM");
+ args[2].pa = gsp->logrm.addr;
+ args[2].size = gsp->logrm.size;
+ args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+ args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+ create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
+
+ ret = r535_gsp_rmargs_init(gsp, false);
+ if (ret)
+ return ret;
+
+ args[3].id8 = r535_gsp_libos_id8("RMARGS");
+ args[3].pa = gsp->rmargs.addr;
+ args[3].size = gsp->rmargs.size;
+ args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+ args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+ return 0;
+}
+
+void
+nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
+{
+ struct scatterlist *sgl;
+ int i;
+
+ dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+
+ for_each_sgtable_sg(sgt, sgl, i) {
+ struct page *page = sg_page(sgl);
+
+ __free_page(page);
+ }
+
+ sg_free_table(sgt);
+}
+
+int
+nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
+{
+ const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct scatterlist *sgl;
+ int ret, i;
+
+ ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ for_each_sgtable_sg(sgt, sgl, i) {
+ struct page *page = alloc_page(GFP_KERNEL);
+
+ if (!page) {
+ nvkm_gsp_sg_free(device, sgt);
+ return -ENOMEM;
+ }
+
+ sg_set_page(sgl, page, PAGE_SIZE, 0);
+ }
+
+ ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
+ nvkm_gsp_sg_free(device, sgt);
+
+ return ret;
+}
+
+static void
+nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
+{
+ for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
+ nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+}
+
+static int
+nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+ struct nvkm_gsp_radix3 *rx3)
+{
+ u64 addr;
+
+ for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
+ u64 *ptes;
+ int idx;
+
+ rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
+ &rx3->mem[i].addr, GFP_KERNEL);
+ if (WARN_ON(!rx3->mem[i].data))
+ return -ENOMEM;
+
+ ptes = rx3->mem[i].data;
+ if (i == 2) {
+ struct scatterlist *sgl;
+
+ for_each_sgtable_dma_sg(sgt, sgl, idx) {
+ for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
+ *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
+ }
+ } else {
+ for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
+ *ptes++ = addr + GSP_PAGE_SIZE * j;
+ }
+
+ size = rx3->mem[i].size;
+ addr = rx3->mem[i].addr;
+ }
+
+ return 0;
+}
+
+int
+r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ if (!gsp->running)
+ return 0;
+
+ if (suspend) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+ u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ GspFwSRMeta *sr;
+
+ ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
+ if (ret)
+ return ret;
+
+ sr = gsp->sr.meta.data;
+ sr->magic = GSP_FW_SR_META_MAGIC;
+ sr->revision = GSP_FW_SR_META_REVISION;
+ sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+ sr->sizeOfSuspendResumeData = len;
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
+ if (WARN_ON(ret))
+ return ret;
+
+ nvkm_msec(gsp->subdev.device, 2000,
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ break;
+ );
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+
+ gsp->running = false;
+ return 0;
+}
+
+int
+r535_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ r535_gsp_rmargs_init(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ goto done;
+
+ ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
+ if (ret)
+ goto done;
+
+ gsp->running = true;
+
+done:
+ if (gsp->sr.meta.data) {
+ nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
+
+ if (ret == 0)
+ ret = r535_gsp_postinit(gsp);
+
+ return ret;
+}
+
+static int
+r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+{
+ const struct firmware *fw = gsp->fws.bl;
+ const struct nvfw_bin_hdr *hdr;
+ RM_RISCV_UCODE_DESC *desc;
+ int ret;
+
+ hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
+ desc = (void *)fw->data + hdr->header_offset;
+
+ ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
+
+ gsp->boot.code_offset = desc->monitorCodeOffset;
+ gsp->boot.data_offset = desc->monitorDataOffset;
+ gsp->boot.manifest_offset = desc->manifestOffset;
+ gsp->boot.app_version = desc->appVersion;
+ return 0;
+}
+
+static const struct nvkm_firmware_func
+r535_gsp_fw = {
+ .type = NVKM_FIRMWARE_IMG_SGT,
+};
+
+static int
+r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
+{
+ const u8 *img = gsp->fws.rm->data;
+ const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
+ const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
+ const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
+
+ for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ if (!strcmp(&names[shdr->sh_name], name)) {
+ *pdata = &img[shdr->sh_offset];
+ *psize = shdr->sh_size;
+ return 0;
+ }
+ }
+
+ nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
+ return -ENOENT;
+}
+
+static void
+r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
+void
+r535_gsp_dtor(struct nvkm_gsp *gsp)
+{
+ idr_destroy(&gsp->client_id.idr);
+ mutex_destroy(&gsp->client_id.mutex);
+
+ nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
+ nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+ nvkm_firmware_dtor(&gsp->fw);
+
+ nvkm_falcon_fw_dtor(&gsp->booter.unload);
+ nvkm_falcon_fw_dtor(&gsp->booter.load);
+
+ mutex_destroy(&gsp->msgq.mutex);
+ mutex_destroy(&gsp->cmdq.mutex);
+
+ r535_gsp_dtor_fws(gsp);
+}
+
+int
+r535_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ const u8 *data;
+ u64 size;
+ int ret;
+
+ mutex_init(&gsp->cmdq.mutex);
+ mutex_init(&gsp->msgq.mutex);
+
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ /* Load GSP firmware from ELF image into DMA-accessible memory. */
+ ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
+ if (ret)
+ return ret;
+
+ ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
+ if (ret)
+ return ret;
+
+ /* Load relevant signature from ELF image. */
+ ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->sig.data, data, size);
+
+ /* Build radix3 page table for ELF image. */
+ ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+ if (ret)
+ return ret;
+
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
+ r535_gsp_msg_run_cpu_sequencer, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
+ r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
+ r535_gsp_msg_mmu_fault_queued, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
+
+ ret = r535_gsp_rm_boot_ctor(gsp);
+ if (ret)
+ return ret;
+
+ /* Release FW images - we've copied them to DMA buffers now. */
+ r535_gsp_dtor_fws(gsp);
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ {
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+
+ gsp->fb.wpr2.heap.size =
+ gsp->func->wpr_heap.os_carveout_size +
+ gsp->func->wpr_heap.base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
+ }
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = r535_gsp_libos_init(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = r535_gsp_wpr_meta_init(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = r535_gsp_rpc_set_system_info(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = r535_gsp_rpc_set_registry(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+ mutex_init(&gsp->client_id.mutex);
+ idr_init(&gsp->client_id.idr);
+ return 0;
+}
+
+static int
+r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
+r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ int ret;
+
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+ return -EINVAL;
+
+ if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
+ (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
+ (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
+ (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
+ r535_gsp_dtor_fws(gsp);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define NVKM_GSP_FIRMWARE(chip) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
+
+NVKM_GSP_FIRMWARE(tu102);
+NVKM_GSP_FIRMWARE(tu104);
+NVKM_GSP_FIRMWARE(tu106);
+
+NVKM_GSP_FIRMWARE(tu116);
+NVKM_GSP_FIRMWARE(tu117);
+
+NVKM_GSP_FIRMWARE(ga100);
+
+NVKM_GSP_FIRMWARE(ga102);
+NVKM_GSP_FIRMWARE(ga103);
+NVKM_GSP_FIRMWARE(ga104);
+NVKM_GSP_FIRMWARE(ga106);
+NVKM_GSP_FIRMWARE(ga107);
+
+NVKM_GSP_FIRMWARE(ad102);
+NVKM_GSP_FIRMWARE(ad103);
+NVKM_GSP_FIRMWARE(ad104);
+NVKM_GSP_FIRMWARE(ad106);
+NVKM_GSP_FIRMWARE(ad107);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
new file mode 100644
index 000000000000..59c5f2b9172a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/fb.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+ struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header_v2 *hshdr;
+ const struct nvfw_hs_load_header_v2 *lhdr;
+ u32 loc, sig, cnt;
+ int ret;
+
+ hdr = nvfw_bin_hdr(subdev, blob->data);
+ hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+ loc = *(u32 *)(blob->data + hshdr->patch_loc);
+ sig = *(u32 *)(blob->data + hshdr->patch_sig);
+ cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+ ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+ blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+ cnt, hshdr->sig_prod_offset + sig, 0, 0);
+ if (ret)
+ goto done;
+
+ lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+ fw->nmem_base_img = 0;
+ fw->nmem_base = lhdr->os_code_offset;
+ fw->nmem_size = lhdr->os_code_size;
+ fw->imem_base_img = fw->nmem_size;
+ fw->imem_base = lhdr->app[0].offset;
+ fw->imem_size = lhdr->app[0].size;
+ fw->dmem_base_img = lhdr->os_data_offset;
+ fw->dmem_base = 0;
+ fw->dmem_size = lhdr->os_data_size;
+ fw->dmem_sign = loc - fw->dmem_base_img;
+ fw->boot_addr = lhdr->os_code_offset;
+
+done:
+ if (ret)
+ nvkm_falcon_fw_dtor(fw);
+
+ return ret;
+}
+
+static int
+tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw)
+{
+ struct flcn_bl_dmem_desc_v2 desc = {
+ .ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH,
+ .code_dma_base = fw->fw.phys,
+ .non_sec_code_off = fw->nmem_base,
+ .non_sec_code_size = fw->nmem_size,
+ .sec_code_off = fw->imem_base,
+ .sec_code_size = fw->imem_size,
+ .code_entry_point = 0,
+ .data_dma_base = fw->fw.phys + fw->dmem_base_img,
+ .data_size = fw->dmem_size,
+ .argc = 0,
+ .argv = 0,
+ };
+
+ flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc);
+
+ nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005);
+
+ return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0);
+}
+
+const struct nvkm_falcon_fw_func
+tu102_gsp_fwsec = {
+ .reset = gm200_flcn_fw_reset,
+ .load = gm200_flcn_fw_load,
+ .load_bld = tu102_gsp_fwsec_load_bld,
+ .boot = gm200_flcn_fw_boot,
+};
+
+int
+tu102_gsp_reset(struct nvkm_gsp *gsp)
+{
+ return gsp->falcon.func->reset_eng(&gsp->falcon);
+}
+
+static u64
+tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ const u64 base = fb_size - 0x100000;
+ u64 addr = 0;
+
+ if (device->disp)
+ addr = nvkm_rd32(gsp->subdev.device, 0x625f04);
+ if (!(addr & 0x00000008))
+ return base;
+
+ addr = (addr & 0xffffff00) << 8;
+ if (addr < base)
+ return fb_size - 0x20000;
+
+ return addr;
+}
+
+int
+tu102_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+
+ gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
+ gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
+ gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
+ gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
+
+ return r535_gsp_oneinit(gsp);
+}
+
+const struct nvkm_falcon_func
+tu102_gsp_flcn = {
+ .disable = gm200_flcn_disable,
+ .enable = gm200_flcn_enable,
+ .addr2 = 0x1000,
+ .riscv_irqmask = 0x2b4,
+ .reset_eng = gp102_flcn_reset_eng,
+ .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+ .bind_inst = gm200_flcn_bind_inst,
+ .bind_stat = gm200_flcn_bind_stat,
+ .bind_intr = true,
+ .imem_pio = &gm200_flcn_imem_pio,
+ .dmem_pio = &gm200_flcn_dmem_pio,
+ .riscv_active = tu102_flcn_riscv_active,
+};
+
+static const struct nvkm_gsp_func
+tu102_gsp_r535_113_01 = {
+ .flcn = &tu102_gsp_flcn,
+ .fwsec = &tu102_gsp_fwsec,
+
+ .sig_section = ".fwsignature_tu10x",
+
+ .wpr_heap.base_size = 8 << 20,
+ .wpr_heap.min_size = 64 << 20,
+
+ .booter.ctor = tu102_gsp_booter_ctor,
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = tu102_gsp_oneinit,
+ .init = r535_gsp_init,
+ .fini = r535_gsp_fini,
+ .reset = tu102_gsp_reset,
+
+ .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu102_gsps[] = {
+ { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { -1, gv100_gsp_nofw, &gv100_gsp },
+ {}
+};
+
+int
+tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
new file mode 100644
index 000000000000..04fbd9ed28b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+tu116_gsp_r535_113_01 = {
+ .flcn = &tu102_gsp_flcn,
+ .fwsec = &tu102_gsp_fwsec,
+
+ .sig_section = ".fwsignature_tu11x",
+
+ .wpr_heap.base_size = 8 << 20,
+ .wpr_heap.min_size = 64 << 20,
+
+ .booter.ctor = tu102_gsp_booter_ctor,
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = tu102_gsp_oneinit,
+ .init = r535_gsp_init,
+ .fini = r535_gsp_fini,
+ .reset = tu102_gsp_reset,
+
+ .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu116_gsps[] = {
+ { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { -1, gv100_gsp_nofw, &gv100_gsp },
+ {}
+};
+
+int
+tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index 46917eb600f9..049477511312 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -24,6 +24,8 @@
#include "priv.h"
#include "pad.h"
+#include <subdev/gsp.h>
+
static void
gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
{
@@ -44,5 +46,8 @@ int
gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index 06cbe19ce376..553d540f2736 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
nvkm-y += nvkm/subdev/instmem/gk20a.o
+
+nvkm-y += nvkm/subdev/instmem/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 24886eabe8dc..a2cd3330efc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -28,7 +28,7 @@
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
-static void
+void
nvkm_instobj_load(struct nvkm_instobj *iobj)
{
struct nvkm_memory *memory = &iobj->memory;
@@ -48,7 +48,7 @@ nvkm_instobj_load(struct nvkm_instobj *iobj)
iobj->suspend = NULL;
}
-static int
+int
nvkm_instobj_save(struct nvkm_instobj *iobj)
{
struct nvkm_memory *memory = &iobj->memory;
@@ -179,24 +179,14 @@ static int
nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
- struct nvkm_instobj *iobj;
+ int ret;
if (suspend) {
- list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->preserve) {
- int ret = nvkm_instobj_save(iobj);
- if (ret)
- return ret;
- }
- }
-
- nvkm_bar_bar2_fini(subdev->device);
+ ret = imem->func->suspend(imem);
+ if (ret)
+ return ret;
- list_for_each_entry(iobj, &imem->boot, head) {
- int ret = nvkm_instobj_save(iobj);
- if (ret)
- return ret;
- }
+ imem->suspend = true;
}
if (imem->func->fini)
@@ -209,20 +199,16 @@ static int
nvkm_instmem_init(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
- struct nvkm_instobj *iobj;
- list_for_each_entry(iobj, &imem->boot, head) {
- if (iobj->suspend)
- nvkm_instobj_load(iobj);
- }
+ if (imem->suspend) {
+ if (imem->func->resume)
+ imem->func->resume(imem);
- nvkm_bar_bar2_init(subdev->device);
-
- list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->suspend)
- nvkm_instobj_load(iobj);
+ imem->suspend = false;
+ return 0;
}
+ nvkm_bar_bar2_init(subdev->device);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a4ac94a2ab57..1b811d6972a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -564,6 +564,8 @@ gk20a_instmem_dtor(struct nvkm_instmem *base)
static const struct nvkm_instmem_func
gk20a_instmem = {
.dtor = gk20a_instmem_dtor,
+ .suspend = nv04_instmem_suspend,
+ .resume = nv04_instmem_resume,
.memory_new = gk20a_instobj_new,
.zero = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 25603b01d6f8..e5320ef849bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -25,6 +25,7 @@
#include "priv.h"
#include <core/ramht.h>
+#include <subdev/bar.h>
struct nv04_instmem {
struct nvkm_instmem base;
@@ -154,6 +155,48 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
}
+void
+nv04_instmem_resume(struct nvkm_instmem *imem)
+{
+ struct nvkm_instobj *iobj;
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ nvkm_bar_bar2_init(imem->subdev.device);
+
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+}
+
+int
+nv04_instmem_suspend(struct nvkm_instmem *imem)
+{
+ struct nvkm_instobj *iobj;
+
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ nvkm_bar_bar2_fini(imem->subdev.device);
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
nv04_instmem_oneinit(struct nvkm_instmem *base)
{
@@ -210,6 +253,8 @@ static const struct nvkm_instmem_func
nv04_instmem = {
.dtor = nv04_instmem_dtor,
.oneinit = nv04_instmem_oneinit,
+ .suspend = nv04_instmem_suspend,
+ .resume = nv04_instmem_resume,
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
.memory_new = nv04_instobj_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 4b2d7465d22f..a7f3fc342d87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -27,6 +27,7 @@
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/gsp.h>
#include <subdev/mmu.h>
struct nv50_instmem {
@@ -394,24 +395,44 @@ nv50_instmem_fini(struct nvkm_instmem *base)
nv50_instmem(base)->addr = ~0ULL;
}
+static void *
+nv50_instmem_dtor(struct nvkm_instmem *base)
+{
+ return nv50_instmem(base);
+}
+
static const struct nvkm_instmem_func
nv50_instmem = {
+ .dtor = nv50_instmem_dtor,
.fini = nv50_instmem_fini,
+ .suspend = nv04_instmem_suspend,
+ .resume = nv04_instmem_resume,
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
};
int
-nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_instmem **pimem)
+nv50_instmem_new_(const struct nvkm_instmem_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
{
struct nv50_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
+ nvkm_instmem_ctor(func, device, type, inst, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
}
+
+int
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_instmem_new(&nv50_instmem, device, type, inst, pimem);
+
+ return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 390ca00ab567..4c14c96fb60a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -7,6 +7,8 @@
struct nvkm_instmem_func {
void *(*dtor)(struct nvkm_instmem *);
int (*oneinit)(struct nvkm_instmem *);
+ int (*suspend)(struct nvkm_instmem *);
+ void (*resume)(struct nvkm_instmem *);
void (*fini)(struct nvkm_instmem *);
u32 (*rd32)(struct nvkm_instmem *, u32 addr);
void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
@@ -16,10 +18,19 @@ struct nvkm_instmem_func {
bool zero;
};
+int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem *);
void nvkm_instmem_boot(struct nvkm_instmem *);
+int nv04_instmem_suspend(struct nvkm_instmem *);
+void nv04_instmem_resume(struct nvkm_instmem *);
+
+int r535_instmem_new(const struct nvkm_instmem_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
#include <core/memory.h>
struct nvkm_instobj {
@@ -32,4 +43,6 @@ struct nvkm_instobj {
void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
struct nvkm_instmem *, struct nvkm_instobj *);
void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
+int nvkm_instobj_save(struct nvkm_instobj *);
+void nvkm_instobj_load(struct nvkm_instobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
new file mode 100644
index 000000000000..5f3c9c02a4c0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+struct fbsr_item {
+ const char *type;
+ u64 addr;
+ u64 size;
+
+ struct list_head head;
+};
+
+struct fbsr {
+ struct list_head items;
+
+ u64 size;
+ int regions;
+
+ struct nvkm_gsp_client client;
+ struct nvkm_gsp_device device;
+
+ u64 hmemory;
+ u64 sys_offset;
+};
+
+static int
+fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ const u32 pages = size / GSP_PAGE_SIZE;
+ rpc_alloc_memory_v13_01 *rpc;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+ sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ rpc->hClient = client->object.handle;
+ rpc->hDevice = device->object.handle;
+ rpc->hMemory = handle;
+ if (aper == NVKM_MEM_TARGET_HOST) {
+ rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
+ rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
+ NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
+ NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+ } else {
+ rpc->hClass = NV01_MEMORY_LIST_FBMEM;
+ rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
+ NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
+ NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+ rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
+ }
+ rpc->pteAdjust = 0;
+ rpc->length = size;
+ rpc->pageCount = pages;
+ rpc->pteDesc.idr = 0;
+ rpc->pteDesc.reserved1 = 0;
+ rpc->pteDesc.length = pages;
+
+ if (sgt) {
+ struct scatterlist *sgl;
+ int pte = 0, idx;
+
+ for_each_sgtable_dma_sg(sgt, sgl, idx) {
+ for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
+ rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
+
+ }
+ } else {
+ for (int i = 0; i < pages; i++)
+ rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+ }
+
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ if (ret)
+ return ret;
+
+ object->client = device->object.client;
+ object->parent = &device->object;
+ object->handle = handle;
+ return 0;
+}
+
+static int
+fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
+{
+ NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fbsr->client.gsp;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto done;
+ }
+
+ ctrl->fbsrType = FBSR_TYPE_DMA;
+ ctrl->hClient = fbsr->client.object.handle;
+ ctrl->hVidMem = fbsr->hmemory++;
+ ctrl->vidOffset = 0;
+ ctrl->sysOffset = fbsr->sys_offset;
+ ctrl->size = item->size;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+done:
+ nvkm_gsp_rm_free(&memlist);
+ if (ret)
+ return ret;
+
+ fbsr->sys_offset += item->size;
+ return 0;
+}
+
+static int
+fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fbsr->client.gsp;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->fbsrType = FBSR_TYPE_DMA;
+ ctrl->numRegions = fbsr->regions;
+ ctrl->hClient = fbsr->client.object.handle;
+ ctrl->hSysMem = fbsr->hmemory++;
+ ctrl->gspFbAllocsSysOffset = items_size;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static bool
+fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
+{
+ struct fbsr_item *item;
+
+ if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
+ return false;
+
+ item->type = type;
+ item->addr = addr;
+ item->size = size;
+ list_add_tail(&item->head, &fbsr->items);
+ return true;
+}
+
+static bool
+fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
+{
+ return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+}
+
+static void
+r535_instmem_resume(struct nvkm_instmem *imem)
+{
+ /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
+ if (imem->rm.fbsr_valid) {
+ nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
+ imem->rm.fbsr_valid = false;
+ }
+}
+
+static int
+r535_instmem_suspend(struct nvkm_instmem *imem)
+{
+ struct nvkm_subdev *subdev = &imem->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_instobj *iobj;
+ struct fbsr fbsr = {};
+ struct fbsr_item *item, *temp;
+ u64 items_size;
+ int ret;
+
+ INIT_LIST_HEAD(&fbsr.items);
+ fbsr.hmemory = 0xcaf00003;
+
+ /* Create a list of all regions we need RM to save during suspend. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
+ return -ENOMEM;
+ }
+
+ if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
+ return -ENOMEM;
+
+ /* Determine memory requirements. */
+ list_for_each_entry(item, &fbsr.items, head) {
+ nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
+ item->addr, item->size, item->type);
+ fbsr.size += item->size;
+ fbsr.regions++;
+ }
+
+ items_size = fbsr.size;
+ nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
+
+ fbsr.size += gsp->fb.rsvd_size;
+ fbsr.size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
+
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+ if (ret)
+ goto done;
+
+ /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
+ ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
+ if (ret)
+ goto done_sgt;
+
+ ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+ if (WARN_ON(ret))
+ goto done_sgt;
+
+ /* Send VRAM regions that need saving. */
+ list_for_each_entry(item, &fbsr.items, head) {
+ ret = fbsr_send(&fbsr, item);
+ if (WARN_ON(ret))
+ goto done_sgt;
+ }
+
+ imem->rm.fbsr_valid = true;
+
+ /* Cleanup everything except the sysmem backup, which will be removed after resume. */
+done_sgt:
+ if (ret) /* ... unless we failed already. */
+ nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+done:
+ list_for_each_entry_safe(item, temp, &fbsr.items, head) {
+ list_del(&item->head);
+ kfree(item);
+ }
+
+ nvkm_gsp_device_dtor(&fbsr.device);
+ nvkm_gsp_client_dtor(&fbsr.client);
+ return ret;
+}
+
+static void *
+r535_instmem_dtor(struct nvkm_instmem *imem)
+{
+ kfree(imem->func);
+ return imem;
+}
+
+int
+r535_instmem_new(const struct nvkm_instmem_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pinstmem)
+{
+ struct nvkm_instmem_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_instmem_dtor;
+ rm->fini = hw->fini;
+ rm->suspend = r535_instmem_suspend;
+ rm->resume = r535_instmem_resume;
+ rm->memory_new = hw->memory_new;
+ rm->memory_wrap = hw->memory_wrap;
+ rm->zero = false;
+
+ ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c
index 159d9f8c95f3..951f01e3032a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static void
ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
@@ -53,5 +55,8 @@ int
ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
index 265a05fd5f6b..053302ecb0a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
void
gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
{
@@ -49,5 +51,8 @@ int
gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
index 5d28d30d09d5..65e9f04972dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static void
ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask)
{
@@ -72,5 +74,8 @@ ga100_mc = {
int
ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index eb2ab03f4360..05d2fa95e05e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
const struct nvkm_intr_data
gp100_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
@@ -98,5 +100,8 @@ gp100_mc = {
int
gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index a602b0cb5b31..7ba35ea59c06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -16,6 +16,8 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
nvkm-y += nvkm/subdev/mmu/tu102.o
+nvkm-y += nvkm/subdev/mmu/r535.o
+
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
nvkm-y += nvkm/subdev/mmu/memnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index ad3b44a9e0e7..b67ace7ae93c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -403,6 +403,10 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
nvkm_mmu_ptc_fini(mmu);
mutex_destroy(&mmu->mutex);
+
+ if (mmu->func->dtor)
+ mmu->func->dtor(mmu);
+
return mmu;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 5265bf4d8366..e9ca6537778c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -4,12 +4,16 @@
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
#include <subdev/mmu.h>
+int r535_mmu_new(const struct nvkm_mmu_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu **);
+
void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_mmu *);
int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_mmu **);
struct nvkm_mmu_func {
+ void (*dtor)(struct nvkm_mmu *);
void (*init)(struct nvkm_mmu *);
u8 dma_bits;
@@ -37,6 +41,8 @@ struct nvkm_mmu_func {
const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
bool kind_sys;
+
+ int (*promote_vmm)(struct nvkm_vmm *);
};
extern const struct nvkm_mmu_func nv04_mmu;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
new file mode 100644
index 000000000000..d3e95453f25d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ NV_VASPACE_ALLOCATION_PARAMETERS *args;
+ int ret;
+
+ ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
+ &vmm->rm.client, &vmm->rm.device);
+ if (ret)
+ return ret;
+
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+ sizeof(*args), &vmm->rm.object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+
+ ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
+ if (ret)
+ return ret;
+
+ {
+ NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+ &vmm->rm.rsvd);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
+ NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->pageSize = 0x20000000;
+ ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+ ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+ ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
+ ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
+ ctrl->levels[0].size = 0x20;
+ ctrl->levels[0].aperture = 1;
+ ctrl->levels[0].pageShift = 0x2f;
+ ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
+ ctrl->levels[1].size = 0x1000;
+ ctrl->levels[1].aperture = 1;
+ ctrl->levels[1].pageShift = 0x26;
+ if (vmm->pd->pde[0]->pde[0]) {
+ ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
+ ctrl->levels[2].size = 0x1000;
+ ctrl->levels[2].aperture = 1;
+ ctrl->levels[2].pageShift = 0x1d;
+ }
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ }
+
+ return ret;
+}
+
+static void
+r535_mmu_dtor(struct nvkm_mmu *mmu)
+{
+ kfree(mmu->func);
+}
+
+int
+r535_mmu_new(const struct nvkm_mmu_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ struct nvkm_mmu_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_mmu_dtor;
+ rm->dma_bits = hw->dma_bits;
+ rm->mmu = hw->mmu;
+ rm->mem = hw->mem;
+ rm->vmm = hw->vmm;
+ rm->kind = hw->kind;
+ rm->kind_sys = hw->kind_sys;
+ rm->promote_vmm = r535_mmu_promote_vmm;
+
+ ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 8d060ce47f86..df662ce4a4b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -24,6 +24,7 @@
#include "vmm.h"
#include <core/option.h>
+#include <subdev/gsp.h>
#include <nvif/class.h>
@@ -54,5 +55,8 @@ int
tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_mmu_new(&tu102_mmu, device, type, inst, pmmu);
+
return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 8e459d88ff8f..cf490ff2b9f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -572,6 +572,12 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
}
uvmm->vmm->managed.raw = raw;
+ if (mmu->func->promote_vmm) {
+ ret = mmu->func->promote_vmm(uvmm->vmm);
+ if (ret)
+ return ret;
+ }
+
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;
while (page && (page++)->shift)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index eb5fcadcb39a..9c97800fe037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1030,6 +1030,13 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
+ if (vmm->rm.client.gsp) {
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+ }
+
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 0095d58d4d9a..e34bc6076401 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -35,9 +35,11 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
mutex_lock(&vmm->mmu->mutex);
- nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ if (!vmm->rm.bar2_pdb)
+ nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ else
+ nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
- nvkm_wr32(device, 0x100e68, 0x00000000);
nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
nvkm_msec(device, 2000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index cd3148360996..da5b2b2190d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static const struct nvkm_falcon_func
gp102_pmu_flcn = {
.disable = gm200_flcn_disable,
@@ -54,5 +56,8 @@ int
gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
index b4eaf6db36d7..b4530073bfdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static const struct nvkm_subdev_func
gm200_privring = {
.intr = gk104_privring_intr,
@@ -32,5 +34,8 @@ int
gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
index 44f021392b95..5392833d3614 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static int
gp100_temp_get(struct nvkm_therm *therm)
{
@@ -52,5 +54,8 @@ int
gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
index 84790cf52b90..129eabb8b9e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static int
ga100_top_parse(struct nvkm_top *top)
{
@@ -76,7 +78,7 @@ ga100_top_parse(struct nvkm_top *top)
case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
- case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
+ case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break;
case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break;
break;
@@ -104,5 +106,8 @@ int
ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_top **ptop)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 2bbba8244cbf..da55dac8c286 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -23,6 +23,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
static int
gk104_top_parse(struct nvkm_top *top)
{
@@ -89,7 +91,7 @@ gk104_top_parse(struct nvkm_top *top)
case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
- case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
+ case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
default:
break;
}
@@ -115,5 +117,8 @@ int
gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_top **ptop)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return -ENODEV;
+
return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild
index 23cd21b40a25..23a85460615f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/vfn/uvfn.o
nvkm-y += nvkm/subdev/vfn/gv100.o
nvkm-y += nvkm/subdev/vfn/tu102.o
nvkm-y += nvkm/subdev/vfn/ga100.o
+
+nvkm-y += nvkm/subdev/vfn/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
index fd5c6931322d..bb0bb6fda54b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
static const struct nvkm_intr_data
@@ -43,5 +45,8 @@ int
ga100_vfn_new(struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_vfn_new(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
+
return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
index 96d53c02041b..3a09781ad032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
@@ -5,16 +5,21 @@
#include <subdev/vfn.h>
struct nvkm_vfn_func {
+ void (*dtor)(struct nvkm_vfn *);
+
const struct nvkm_intr_func *intr;
const struct nvkm_intr_data *intrs;
struct {
u32 addr;
u32 size;
- const struct nvkm_sclass base;
+ struct nvkm_sclass base;
} user;
};
+int r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+ u32 addr, struct nvkm_vfn **);
+
int nvkm_vfn_new_(const struct nvkm_vfn_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
u32 addr, struct nvkm_vfn **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
new file mode 100644
index 000000000000..dce337306cab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+r535_vfn_dtor(struct nvkm_vfn *vfn)
+{
+ kfree(vfn->func);
+}
+
+int
+r535_vfn_new(const struct nvkm_vfn_func *hw,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
+ struct nvkm_vfn **pvfn)
+{
+ struct nvkm_vfn_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r535_vfn_dtor;
+ rm->intr = hw->intr;
+ rm->user = hw->user;
+
+ ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
+ if (ret)
+ kfree(rm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c
index 3d063fb5e136..a3bf13c5c79b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <subdev/gsp.h>
+
#include <nvif/class.h>
static void
@@ -104,5 +106,8 @@ int
tu102_vfn_new(struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_vfn_new(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
+
return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 8a6621f1e82c..2db40789235c 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3893,7 +3893,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
typedef struct _ATOM_GPIO_PIN_LUT
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[];
}ATOM_GPIO_PIN_LUT;
/****************************************************************************/
@@ -4061,7 +4061,7 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
UCHAR ucNumberOfSrc;
USHORT usSrcObjectID[1];
UCHAR ucNumberOfDst;
- USHORT usDstObjectID[1];
+ USHORT usDstObjectID[];
}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
@@ -4233,7 +4233,7 @@ typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucNumberOfDevice;
UCHAR ucReserved;
- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
@@ -4293,7 +4293,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucFlags; // Future expnadibility
UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[]; // the real gpio pin pair determined by number of pins ucNumberOfPins
}ATOM_OBJECT_GPIO_CNTL_RECORD;
//Definitions for GPIO pin state
@@ -4444,7 +4444,7 @@ typedef struct _ATOM_BRACKET_LAYOUT_RECORD
UCHAR ucWidth;
UCHAR ucConnNum;
UCHAR ucReserved;
- ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
+ ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[];
}ATOM_BRACKET_LAYOUT_RECORD;
/****************************************************************************/
@@ -4600,7 +4600,7 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
UCHAR ucVoltageControlAddress;
UCHAR ucVoltageControlOffset;
ULONG ulReserved;
- VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
+ VOLTAGE_LUT_ENTRY asVolI2cLut[]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
@@ -4625,7 +4625,7 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
ULONG ulMaxVoltageLevel;
- LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
+ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
@@ -4753,7 +4753,7 @@ typedef struct _ATOM_POWER_SOURCE_INFO
{
ATOM_COMMON_TABLE_HEADER asHeader;
UCHAR asPwrbehave[16];
- ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[];
}ATOM_POWER_SOURCE_INFO;
@@ -5440,7 +5440,7 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
typedef struct _ATOM_I2C_DATA_RECORD
{
UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
- UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+ UCHAR ucI2CData[]; //I2C data in bytes, should be less than 16 bytes usually
}ATOM_I2C_DATA_RECORD;
@@ -5451,14 +5451,14 @@ typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
UCHAR ucSSChipID; //SS chip being used
UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
UCHAR ucNumOfI2CDataRecords; //number of data block
- ATOM_I2C_DATA_RECORD asI2CData[1];
+ ATOM_I2C_DATA_RECORD asI2CData[];
}ATOM_I2C_DEVICE_SETUP_INFO;
//==========================================================================================
typedef struct _ATOM_ASIC_MVDD_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[];
}ATOM_ASIC_MVDD_INFO;
//==========================================================================================
@@ -5520,7 +5520,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[]; //this is point only.
}ATOM_ASIC_INTERNAL_SS_INFO_V2;
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
@@ -5542,7 +5542,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[]; //this is pointer only.
}ATOM_ASIC_INTERNAL_SS_INFO_V3;
@@ -6282,7 +6282,7 @@ typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
- ULONG aulMemData[1];
+ ULONG aulMemData[];
}ATOM_MEMORY_SETTING_DATA_BLOCK;
@@ -7092,7 +7092,7 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
UCHAR ucDispCaps;
UCHAR ucReserved[2];
- ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
+ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
//ucDispCaps
@@ -7324,12 +7324,12 @@ typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
USHORT usMaxClockFreq;
UCHAR ucEncodeMode;
UCHAR ucPhySel;
- ULONG ulAnalogSetting[1];
+ ULONG ulAnalogSetting[];
}CLOCK_CONDITION_SETTING_ENTRY;
typedef struct _CLOCK_CONDITION_SETTING_INFO{
USHORT usEntrySize;
- CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+ CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[];
}CLOCK_CONDITION_SETTING_INFO;
typedef struct _PHY_CONDITION_REG_VAL{
@@ -7346,27 +7346,27 @@ typedef struct _PHY_CONDITION_REG_VAL_V2{
typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex;
USHORT usSize;
- PHY_CONDITION_REG_VAL asRegVal[1];
+ PHY_CONDITION_REG_VAL asRegVal[];
}PHY_CONDITION_REG_INFO;
typedef struct _PHY_CONDITION_REG_INFO_V2{
USHORT usRegIndex;
USHORT usSize;
- PHY_CONDITION_REG_VAL_V2 asRegVal[1];
+ PHY_CONDITION_REG_VAL_V2 asRegVal[];
}PHY_CONDITION_REG_INFO_V2;
typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
USHORT usSize;
- PHY_CONDITION_REG_INFO asAnalogSetting[1];
+ PHY_CONDITION_REG_INFO asAnalogSetting[];
}PHY_ANALOG_SETTING_INFO;
typedef struct _PHY_ANALOG_SETTING_INFO_V2{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
USHORT usSize;
- PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1];
+ PHY_CONDITION_REG_INFO_V2 asAnalogSetting[];
}PHY_ANALOG_SETTING_INFO_V2;
typedef struct _GFX_HAVESTING_PARAMETERS {
diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig
index ad14112999ad..027220b8fe1c 100644
--- a/drivers/gpu/drm/renesas/shmobile/Kconfig
+++ b/drivers/gpu/drm/renesas/shmobile/Kconfig
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
- depends on DRM
+ depends on DRM && PM
depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
+ select VIDEOMODE_HELPERS
help
Choose this option if you have an SH Mobile chipset.
If M is selected the module will be called shmob-drm.
diff --git a/drivers/gpu/drm/renesas/shmobile/Makefile b/drivers/gpu/drm/renesas/shmobile/Makefile
index 861edafed856..2679555d61a7 100644
--- a/drivers/gpu/drm/renesas/shmobile/Makefile
+++ b/drivers/gpu/drm/renesas/shmobile/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-shmob-drm-y := shmob_drm_backlight.o \
- shmob_drm_crtc.o \
+shmob-drm-y := shmob_drm_crtc.o \
shmob_drm_drv.o \
shmob_drm_kms.o \
shmob_drm_plane.o
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c
deleted file mode 100644
index 794573badfe8..000000000000
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * shmob_drm_backlight.c -- SH Mobile DRM Backlight
- *
- * Copyright (C) 2012 Renesas Electronics Corporation
- *
- * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- */
-
-#include <linux/backlight.h>
-
-#include "shmob_drm_backlight.h"
-#include "shmob_drm_crtc.h"
-#include "shmob_drm_drv.h"
-
-static int shmob_drm_backlight_update(struct backlight_device *bdev)
-{
- struct shmob_drm_connector *scon = bl_get_data(bdev);
- struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
- const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
- int brightness = backlight_get_brightness(bdev);
-
- return bdata->set_brightness(brightness);
-}
-
-static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
-{
- struct shmob_drm_connector *scon = bl_get_data(bdev);
- struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
- const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
-
- return bdata->get_brightness();
-}
-
-static const struct backlight_ops shmob_drm_backlight_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .update_status = shmob_drm_backlight_update,
- .get_brightness = shmob_drm_backlight_get_brightness,
-};
-
-void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
-{
- if (scon->backlight == NULL)
- return;
-
- scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
- ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
- backlight_update_status(scon->backlight);
-}
-
-int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
-{
- struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
- const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
- struct drm_connector *connector = &scon->connector;
- struct drm_device *dev = connector->dev;
- struct backlight_device *backlight;
-
- if (!bdata->max_brightness)
- return 0;
-
- backlight = backlight_device_register(bdata->name, dev->dev, scon,
- &shmob_drm_backlight_ops, NULL);
- if (IS_ERR(backlight)) {
- dev_err(dev->dev, "unable to register backlight device: %ld\n",
- PTR_ERR(backlight));
- return PTR_ERR(backlight);
- }
-
- backlight->props.max_brightness = bdata->max_brightness;
- backlight->props.brightness = bdata->max_brightness;
- backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(backlight);
-
- scon->backlight = backlight;
- return 0;
-}
-
-void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
-{
- backlight_device_unregister(scon->backlight);
-}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h
deleted file mode 100644
index d9abb7a60be5..000000000000
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * shmob_drm_backlight.h -- SH Mobile DRM Backlight
- *
- * Copyright (C) 2012 Renesas Electronics Corporation
- *
- * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- */
-
-#ifndef __SHMOB_DRM_BACKLIGHT_H__
-#define __SHMOB_DRM_BACKLIGHT_H__
-
-struct shmob_drm_connector;
-
-void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
-int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
-void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
-
-#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
index 11dd2bc803e7..2e2f37b9d0a4 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
@@ -7,9 +7,18 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <linux/backlight.h>
#include <linux/clk.h>
-
+#include <linux/media-bus-format.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
@@ -18,85 +27,123 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
-#include "shmob_drm_backlight.h"
+#include <video/videomode.h>
+
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h"
-/*
- * TODO: panel support
- */
-
/* -----------------------------------------------------------------------------
- * Clock management
+ * Page Flip
*/
-static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
{
- int ret;
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = scrtc->base.dev;
+ unsigned long flags;
- if (sdev->clock) {
- ret = clk_prepare_enable(sdev->clock);
- if (ret < 0)
- return ret;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = scrtc->event;
+ scrtc->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(&scrtc->base, event);
+ wake_up(&scrtc->flip_wait);
+ drm_crtc_vblank_put(&scrtc->base);
}
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
- return 0;
+static bool shmob_drm_crtc_page_flip_pending(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_device *dev = scrtc->base.dev;
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = scrtc->event != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
}
-static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+static void shmob_drm_crtc_wait_page_flip(struct shmob_drm_crtc *scrtc)
{
- if (sdev->clock)
- clk_disable_unprepare(sdev->clock);
+ struct drm_crtc *crtc = &scrtc->base;
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+
+ if (wait_event_timeout(scrtc->flip_wait,
+ !shmob_drm_crtc_page_flip_pending(scrtc),
+ msecs_to_jiffies(50)))
+ return;
+
+ dev_warn(sdev->dev, "page flip timeout\n");
+
+ shmob_drm_crtc_finish_page_flip(scrtc);
}
/* -----------------------------------------------------------------------------
* CRTC
*/
+static const struct {
+ u32 fmt;
+ u32 ldmt1r;
+} shmob_drm_bus_fmts[] = {
+ { MEDIA_BUS_FMT_RGB888_3X8, LDMT1R_MIFTYP_RGB8 },
+ { MEDIA_BUS_FMT_RGB666_2X9_BE, LDMT1R_MIFTYP_RGB9 },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, LDMT1R_MIFTYP_RGB12A },
+ { MEDIA_BUS_FMT_RGB444_1X12, LDMT1R_MIFTYP_RGB12B },
+ { MEDIA_BUS_FMT_RGB565_1X16, LDMT1R_MIFTYP_RGB16 },
+ { MEDIA_BUS_FMT_RGB666_1X18, LDMT1R_MIFTYP_RGB18 },
+ { MEDIA_BUS_FMT_RGB888_1X24, LDMT1R_MIFTYP_RGB24 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, LDMT1R_MIFTYP_YCBCR },
+};
+
static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
{
- struct drm_crtc *crtc = &scrtc->crtc;
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
- const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+ struct drm_crtc *crtc = &scrtc->base;
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+ const struct drm_display_info *info = &sdev->connector->display_info;
const struct drm_display_mode *mode = &crtc->mode;
+ unsigned int i;
u32 value;
- value = sdev->ldmt1r
- | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
- | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
- | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
- | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
- | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
- | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
- lcdc_write(sdev, LDMT1R, value);
-
- if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
- idata->interface <= SHMOB_DRM_IFACE_SYS24) {
- /* Setup SYS bus. */
- value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
- | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
- | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
- | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
- | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
- | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
- lcdc_write(sdev, LDMT2R, value);
-
- value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
- | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
- | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
- | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
- lcdc_write(sdev, LDMT3R, value);
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_warn(sdev->dev, "No bus format reported, using RGB888\n");
+ value = LDMT1R_MIFTYP_RGB24;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
+ if (shmob_drm_bus_fmts[i].fmt == info->bus_formats[0])
+ break;
+ }
+ if (i < ARRAY_SIZE(shmob_drm_bus_fmts)) {
+ value = shmob_drm_bus_fmts[i].ldmt1r;
+ } else {
+ dev_warn(sdev->dev,
+ "unsupported bus format 0x%x, using RGB888\n",
+ info->bus_formats[0]);
+ value = LDMT1R_MIFTYP_RGB24;
+ }
}
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ value |= LDMT1R_DWPOL;
+ if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ value |= LDMT1R_DIPOL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= LDMT1R_VPOL;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= LDMT1R_HPOL;
+ lcdc_write(sdev, LDMT1R, value);
+
value = ((mode->hdisplay / 8) << 16) /* HDCN */
| (mode->htotal / 8); /* HTCN */
lcdc_write(sdev, LDHCNR, value);
@@ -121,7 +168,7 @@ static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
{
- struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+ struct shmob_drm_device *sdev = to_shmob_device(scrtc->base.dev);
u32 value;
value = lcdc_read(sdev, LDCNT2R);
@@ -145,34 +192,23 @@ static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
}
}
-/*
- * shmob_drm_crtc_start - Configure and start the LCDC
- * @scrtc: the SH Mobile CRTC
- *
- * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
- * ...) are not touched by this function.
- */
-static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+static inline struct shmob_drm_crtc *to_shmob_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct shmob_drm_crtc, base);
+}
+
+static void shmob_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = &scrtc->crtc;
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
- const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
- const struct shmob_drm_format_info *format;
- struct drm_device *dev = sdev->ddev;
- struct drm_plane *plane;
+ struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+ unsigned int clk_div = sdev->config.clk_div;
+ struct device *dev = sdev->dev;
u32 value;
int ret;
- if (scrtc->started)
- return;
-
- format = shmob_drm_format_info(crtc->primary->fb->format->format);
- if (WARN_ON(format == NULL))
- return;
-
- /* Enable clocks before accessing the hardware. */
- ret = shmob_drm_clk_on(sdev);
- if (ret < 0)
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return;
/* Reset and enable the LCDC. */
@@ -188,79 +224,50 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
lcdc_write(sdev, LDPMR, 0);
value = sdev->lddckr;
- if (idata->clk_div) {
+ if (clk_div) {
/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
* denominator.
*/
lcdc_write(sdev, LDDCKPAT1R, 0);
- lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+ lcdc_write(sdev, LDDCKPAT2R, (1 << (clk_div / 2)) - 1);
- if (idata->clk_div == 1)
+ if (clk_div == 1)
value |= LDDCKR_MOSEL;
else
- value |= idata->clk_div;
+ value |= clk_div;
}
lcdc_write(sdev, LDDCKR, value);
lcdc_write(sdev, LDDCKSTPR, 0);
lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
- /* TODO: Setup SYS panel */
-
/* Setup geometry, format, frame buffer memory and operation mode. */
shmob_drm_crtc_setup_geometry(scrtc);
- /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
- lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
- lcdc_write(sdev, LDMLSR, scrtc->line_size);
- lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
- if (format->yuv)
- lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
lcdc_write(sdev, LDSM1R, 0);
- /* Word and long word swap. */
- switch (format->fourcc) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV42:
- value = LDDDSR_LS | LDDDSR_WS;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV24:
- value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- default:
- value = LDDDSR_LS;
- break;
- }
- lcdc_write(sdev, LDDDSR, value);
-
- /* Setup planes. */
- drm_for_each_legacy_plane(plane, dev) {
- if (plane->crtc == crtc)
- shmob_drm_plane_setup(plane);
- }
-
/* Enable the display output. */
lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
shmob_drm_crtc_start_stop(scrtc, true);
- scrtc->started = true;
+ /* Turn vertical blank interrupt reporting back on. */
+ drm_crtc_vblank_on(crtc);
}
-static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+static void shmob_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = &scrtc->crtc;
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
- if (!scrtc->started)
- return;
+ /*
+ * Disable vertical blank interrupt reporting. We first need to wait
+ * for page flip completion before stopping the CRTC as userspace
+ * expects page flips to eventually complete.
+ */
+ shmob_drm_crtc_wait_page_flip(scrtc);
+ drm_crtc_vblank_off(crtc);
/* Stop the LCDC. */
shmob_drm_crtc_start_stop(scrtc, false);
@@ -268,145 +275,31 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
/* Disable the display output. */
lcdc_write(sdev, LDCNT1R, 0);
- /* Stop clocks. */
- shmob_drm_clk_off(sdev);
-
- scrtc->started = false;
-}
-
-void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
-{
- shmob_drm_crtc_stop(scrtc);
-}
-
-void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
-{
- if (scrtc->dpms != DRM_MODE_DPMS_ON)
- return;
-
- shmob_drm_crtc_start(scrtc);
+ pm_runtime_put(sdev->dev);
}
-static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
- int x, int y)
+static void shmob_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = &scrtc->crtc;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_dma_object *gem;
- unsigned int bpp;
-
- bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
- gem = drm_fb_dma_get_gem_obj(fb, 0);
- scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
- + y * fb->pitches[0] + x * bpp / 8;
-
- if (scrtc->format->yuv) {
- bpp = scrtc->format->bpp - 8;
- gem = drm_fb_dma_get_gem_obj(fb, 1);
- scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
- + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
- + x * (bpp == 16 ? 2 : 1);
- }
-}
-
-static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
-{
- struct drm_crtc *crtc = &scrtc->crtc;
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
-
- shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
-
- lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
- if (scrtc->format->yuv)
- lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
-
- lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
-}
-
-#define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc)
-
-static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
-
- if (scrtc->dpms == mode)
- return;
-
- if (mode == DRM_MODE_DPMS_ON)
- shmob_drm_crtc_start(scrtc);
- else
- shmob_drm_crtc_stop(scrtc);
-
- scrtc->dpms = mode;
-}
-
-static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
-{
- shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
-static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
- const struct shmob_drm_format_info *format;
-
- format = shmob_drm_format_info(crtc->primary->fb->format->format);
- if (format == NULL) {
- dev_dbg(sdev->dev, "mode_set: unsupported format %p4cc\n",
- &crtc->primary->fb->format->format);
- return -EINVAL;
+ if (crtc->state->event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- scrtc->format = format;
- scrtc->line_size = crtc->primary->fb->pitches[0];
-
- shmob_drm_crtc_compute_base(scrtc, x, y);
-
- return 0;
-}
-
-static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
-{
- shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
-
- return 0;
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .dpms = shmob_drm_crtc_dpms,
- .prepare = shmob_drm_crtc_mode_prepare,
- .commit = shmob_drm_crtc_mode_commit,
- .mode_set = shmob_drm_crtc_mode_set,
- .mode_set_base = shmob_drm_crtc_mode_set_base,
+ .atomic_flush = shmob_drm_crtc_atomic_flush,
+ .atomic_enable = shmob_drm_crtc_atomic_enable,
+ .atomic_disable = shmob_drm_crtc_atomic_disable,
};
-void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = scrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- event = scrtc->event;
- scrtc->event = NULL;
- if (event) {
- drm_crtc_send_vblank_event(&scrtc->crtc, event);
- drm_crtc_vblank_put(&scrtc->crtc);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -414,7 +307,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
- struct drm_device *dev = scrtc->crtc.dev;
+ struct drm_device *dev = scrtc->base.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -424,12 +317,11 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- crtc->primary->fb = fb;
- shmob_drm_crtc_update_base(scrtc);
+ drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
if (event) {
event->pipe = 0;
- drm_crtc_vblank_get(&scrtc->crtc);
+ drm_crtc_vblank_get(&scrtc->base);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -457,7 +349,7 @@ static void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev,
static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
{
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
shmob_drm_crtc_enable_vblank(sdev, true);
@@ -466,88 +358,65 @@ static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
static void shmob_drm_disable_vblank(struct drm_crtc *crtc)
{
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
shmob_drm_crtc_enable_vblank(sdev, false);
}
static const struct drm_crtc_funcs crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = drm_atomic_helper_set_config,
.page_flip = shmob_drm_crtc_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank,
};
-static const uint32_t modeset_formats[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
-};
-
-static const struct drm_plane_funcs primary_plane_funcs = {
- DRM_PLANE_NON_ATOMIC_FUNCS,
-};
-
int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
{
- struct drm_crtc *crtc = &sdev->crtc.crtc;
- struct drm_plane *primary;
+ struct drm_crtc *crtc = &sdev->crtc.base;
+ struct drm_plane *primary, *plane;
+ unsigned int i;
int ret;
- sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&sdev->crtc.flip_wait);
- primary = __drm_universal_plane_alloc(sdev->ddev, sizeof(*primary), 0,
- 0, &primary_plane_funcs,
- modeset_formats,
- ARRAY_SIZE(modeset_formats),
- NULL, DRM_PLANE_TYPE_PRIMARY,
- NULL);
+ primary = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary))
return PTR_ERR(primary);
- ret = drm_crtc_init_with_planes(sdev->ddev, crtc, primary, NULL,
+ for (i = 1; i < 5; ++i) {
+ plane = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_OVERLAY, i);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+ }
+
+ ret = drm_crtc_init_with_planes(&sdev->ddev, crtc, primary, NULL,
&crtc_funcs, NULL);
- if (ret < 0) {
- drm_plane_cleanup(primary);
- kfree(primary);
+ if (ret < 0)
return ret;
- }
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+ /* Start with vertical blank interrupt reporting disabled. */
+ drm_crtc_vblank_off(crtc);
+
return 0;
}
/* -----------------------------------------------------------------------------
- * Encoder
+ * Legacy Encoder
*/
-#define to_shmob_encoder(e) \
- container_of(e, struct shmob_drm_encoder, encoder)
-
-static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
- struct shmob_drm_device *sdev = encoder->dev->dev_private;
- struct shmob_drm_connector *scon = &sdev->connector;
-
- if (senc->dpms == mode)
- return;
-
- shmob_drm_backlight_dpms(scon, mode);
-
- senc->dpms = mode;
-}
-
static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct shmob_drm_device *sdev = dev->dev_private;
- struct drm_connector *connector = &sdev->connector.connector;
+ struct shmob_drm_device *sdev = to_shmob_device(dev);
+ struct drm_connector *connector = sdev->connector;
const struct drm_display_mode *panel_mode;
if (list_empty(&connector->modes)) {
@@ -563,60 +432,61 @@ static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
-{
- /* No-op, everything is handled in the CRTC code. */
-}
-
-static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* No-op, everything is handled in the CRTC code. */
-}
-
-static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
-{
- /* No-op, everything is handled in the CRTC code. */
-}
-
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = shmob_drm_encoder_dpms,
.mode_fixup = shmob_drm_encoder_mode_fixup,
- .prepare = shmob_drm_encoder_mode_prepare,
- .commit = shmob_drm_encoder_mode_commit,
- .mode_set = shmob_drm_encoder_mode_set,
};
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{
- struct drm_encoder *encoder = &sdev->encoder.encoder;
+ struct drm_encoder *encoder = &sdev->encoder;
+ struct drm_bridge *bridge;
int ret;
- sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
-
encoder->possible_crtcs = 1;
- ret = drm_simple_encoder_init(sdev->ddev, encoder,
- DRM_MODE_ENCODER_LVDS);
+ ret = drm_simple_encoder_init(&sdev->ddev, encoder,
+ DRM_MODE_ENCODER_DPI);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+ if (sdev->pdata) {
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+ return 0;
+ }
+
+ /* Create a panel bridge */
+ bridge = devm_drm_of_get_bridge(sdev->dev, sdev->dev->of_node, 0, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ /* Attach the bridge to the encoder */
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(sdev->dev, "failed to attach bridge: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
return 0;
}
/* -----------------------------------------------------------------------------
- * Connector
+ * Legacy Connector
*/
-#define to_shmob_connector(c) \
- container_of(c, struct shmob_drm_connector, connector)
+static inline struct shmob_drm_connector *to_shmob_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct shmob_drm_connector, base);
+}
static int shmob_drm_connector_get_modes(struct drm_connector *connector)
{
- struct shmob_drm_device *sdev = connector->dev->dev_private;
+ struct shmob_drm_connector *scon = to_shmob_connector(connector);
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
@@ -624,22 +494,10 @@ static int shmob_drm_connector_get_modes(struct drm_connector *connector)
return 0;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
- mode->clock = sdev->pdata->panel.mode.clock;
- mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
- mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
- mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
- mode->htotal = sdev->pdata->panel.mode.htotal;
- mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
- mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
- mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
- mode->vtotal = sdev->pdata->panel.mode.vtotal;
- mode->flags = sdev->pdata->panel.mode.flags;
-
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- connector->display_info.width_mm = sdev->pdata->panel.width_mm;
- connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+ drm_display_mode_from_videomode(scon->mode, mode);
+
+ drm_mode_probed_add(connector, mode);
return 1;
}
@@ -659,54 +517,106 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void shmob_drm_connector_destroy(struct drm_connector *connector)
{
- struct shmob_drm_connector *scon = to_shmob_connector(connector);
-
- shmob_drm_backlight_exit(scon);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
+
+ kfree(connector);
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = shmob_drm_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-int shmob_drm_connector_create(struct shmob_drm_device *sdev,
- struct drm_encoder *encoder)
+static struct drm_connector *
+shmob_drm_connector_init(struct shmob_drm_device *sdev,
+ struct drm_encoder *encoder)
{
- struct drm_connector *connector = &sdev->connector.connector;
+ u32 bus_fmt = sdev->pdata->iface.bus_fmt;
+ struct shmob_drm_connector *scon;
+ struct drm_connector *connector;
+ struct drm_display_info *info;
+ unsigned int i;
int ret;
- sdev->connector.encoder = encoder;
+ for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
+ if (shmob_drm_bus_fmts[i].fmt == bus_fmt)
+ break;
+ }
+ if (i == ARRAY_SIZE(shmob_drm_bus_fmts)) {
+ dev_err(sdev->dev, "unsupported bus format 0x%x\n", bus_fmt);
+ return ERR_PTR(-EINVAL);
+ }
- connector->display_info.width_mm = sdev->pdata->panel.width_mm;
- connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+ scon = kzalloc(sizeof(*scon), GFP_KERNEL);
+ if (!scon)
+ return ERR_PTR(-ENOMEM);
- ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret < 0)
- return ret;
+ connector = &scon->base;
+ scon->encoder = encoder;
+ scon->mode = &sdev->pdata->panel.mode;
+
+ info = &connector->display_info;
+ info->width_mm = sdev->pdata->panel.width_mm;
+ info->height_mm = sdev->pdata->panel.height_mm;
+
+ if (scon->mode->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
+ info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ if (scon->mode->flags & DISPLAY_FLAGS_DE_LOW)
+ info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
+
+ ret = drm_display_info_set_bus_formats(info, &bus_fmt, 1);
+ if (ret < 0) {
+ kfree(scon);
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_connector_init(&sdev->ddev, connector, &connector_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (ret < 0) {
+ kfree(scon);
+ return ERR_PTR(ret);
+ }
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = shmob_drm_backlight_init(&sdev->connector);
- if (ret < 0)
- goto err_cleanup;
+ return connector;
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ if (sdev->pdata)
+ connector = shmob_drm_connector_init(sdev, encoder);
+ else
+ connector = drm_bridge_connector_init(&sdev->ddev, encoder);
+ if (IS_ERR(connector)) {
+ dev_err(sdev->dev, "failed to created connector: %pe\n",
+ connector);
+ return PTR_ERR(connector);
+ }
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
- goto err_backlight;
+ goto error;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_object_property_set_value(&connector->base,
- sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+ sdev->connector = connector;
return 0;
-err_backlight:
- shmob_drm_backlight_exit(&sdev->connector);
-err_cleanup:
+error:
drm_connector_cleanup(connector);
return ret;
}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
index 21718843f46d..16e1712dd04e 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
@@ -14,39 +14,30 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
-struct backlight_device;
+#include <linux/wait.h>
+
+#include <video/videomode.h>
+
struct drm_pending_vblank_event;
struct shmob_drm_device;
struct shmob_drm_format_info;
struct shmob_drm_crtc {
- struct drm_crtc crtc;
+ struct drm_crtc base;
struct drm_pending_vblank_event *event;
- int dpms;
-
- const struct shmob_drm_format_info *format;
- unsigned long dma[2];
- unsigned int line_size;
- bool started;
-};
-
-struct shmob_drm_encoder {
- struct drm_encoder encoder;
- int dpms;
+ wait_queue_head_t flip_wait;
};
+/* Legacy connector */
struct shmob_drm_connector {
- struct drm_connector connector;
+ struct drm_connector base;
struct drm_encoder *encoder;
-
- struct backlight_device *backlight;
+ const struct videomode *mode;
};
int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
-void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
-void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
int shmob_drm_connector_create(struct shmob_drm_device *sdev,
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index e5db4e0095ba..e83c3e52251d 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -11,13 +11,17 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -31,57 +35,23 @@
* Hardware initialization
*/
-static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
-{
- static const u32 ldmt1r[] = {
- [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
- [SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
- [SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
- [SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
- [SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
- [SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
- [SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
- [SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
- [SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
- [SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
- [SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
- [SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
- [SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
- [SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
- [SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
- [SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
- [SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
- [SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
- [SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
- };
-
- if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
- dev_err(sdev->dev, "invalid interface type %u\n",
- sdev->pdata->iface.interface);
- return -EINVAL;
- }
-
- sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
- return 0;
-}
-
static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
- enum shmob_drm_clk_source clksrc)
+ enum shmob_drm_clk_source clksrc)
{
struct clk *clk;
char *clkname;
switch (clksrc) {
case SHMOB_DRM_CLK_BUS:
- clkname = "bus_clk";
+ clkname = "fck";
sdev->lddckr = LDDCKR_ICKSEL_BUS;
break;
case SHMOB_DRM_CLK_PERIPHERAL:
- clkname = "peripheral_clk";
+ clkname = "media";
sdev->lddckr = LDDCKR_ICKSEL_MIPI;
break;
case SHMOB_DRM_CLK_EXTERNAL:
- clkname = NULL;
+ clkname = "lclk";
sdev->lddckr = LDDCKR_ICKSEL_HDMI;
break;
default:
@@ -105,7 +75,7 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
static irqreturn_t shmob_drm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct shmob_drm_device *sdev = dev->dev_private;
+ struct shmob_drm_device *sdev = to_shmob_device(dev);
unsigned long flags;
u32 status;
@@ -119,7 +89,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
spin_unlock_irqrestore(&sdev->irq_lock, flags);
if (status & LDINTR_VES) {
- drm_handle_vblank(dev, 0);
+ drm_crtc_handle_vblank(&sdev->crtc.base);
shmob_drm_crtc_finish_page_flip(&sdev->crtc);
}
@@ -129,7 +99,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
@@ -147,26 +117,45 @@ static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- drm_kms_helper_poll_disable(sdev->ddev);
- shmob_drm_crtc_suspend(&sdev->crtc);
+ return drm_mode_config_helper_suspend(&sdev->ddev);
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(&sdev->ddev);
+}
+
+static int shmob_drm_pm_runtime_suspend(struct device *dev)
+{
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+ if (sdev->clock)
+ clk_disable_unprepare(sdev->clock);
return 0;
}
-static int shmob_drm_pm_resume(struct device *dev)
+static int shmob_drm_pm_runtime_resume(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+ int ret;
- drm_modeset_lock_all(sdev->ddev);
- shmob_drm_crtc_resume(&sdev->crtc);
- drm_modeset_unlock_all(sdev->ddev);
+ if (sdev->clock) {
+ ret = clk_prepare_enable(sdev->clock);
+ if (ret < 0)
+ return ret;
+ }
- drm_kms_helper_poll_enable(sdev->ddev);
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
- shmob_drm_pm_suspend, shmob_drm_pm_resume);
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+ RUNTIME_PM_OPS(shmob_drm_pm_runtime_suspend,
+ shmob_drm_pm_runtime_resume, NULL)
+};
/* -----------------------------------------------------------------------------
* Platform driver
@@ -175,37 +164,45 @@ static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
static void shmob_drm_remove(struct platform_device *pdev)
{
struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *ddev = sdev->ddev;
+ struct drm_device *ddev = &sdev->ddev;
drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
drm_kms_helper_poll_fini(ddev);
- free_irq(sdev->irq, ddev);
- drm_dev_put(ddev);
}
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
+ const struct shmob_drm_config *config;
struct shmob_drm_device *sdev;
struct drm_device *ddev;
- unsigned int i;
int ret;
- if (pdata == NULL) {
+ config = of_device_get_match_data(&pdev->dev);
+ if (!config && !pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
/*
- * Allocate and initialize the driver private data, I/O resources and
- * clocks.
+ * Allocate and initialize the DRM device, driver private data, I/O
+ * resources and clocks.
*/
- sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
- if (sdev == NULL)
- return -ENOMEM;
+ sdev = devm_drm_dev_alloc(&pdev->dev, &shmob_drm_driver,
+ struct shmob_drm_device, ddev);
+ if (IS_ERR(sdev))
+ return PTR_ERR(sdev);
+ ddev = &sdev->ddev;
sdev->dev = &pdev->dev;
- sdev->pdata = pdata;
+ if (config) {
+ sdev->config = *config;
+ } else {
+ sdev->pdata = pdata;
+ sdev->config.clk_source = pdata->clk_source;
+ sdev->config.clk_div = pdata->iface.clk_div;
+ }
spin_lock_init(&sdev->irq_lock);
platform_set_drvdata(pdev, sdev);
@@ -214,49 +211,32 @@ static int shmob_drm_probe(struct platform_device *pdev)
if (IS_ERR(sdev->mmio))
return PTR_ERR(sdev->mmio);
- ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+ ret = shmob_drm_setup_clocks(sdev, sdev->config.clk_source);
if (ret < 0)
return ret;
- ret = shmob_drm_init_interface(sdev);
- if (ret < 0)
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
return ret;
- /* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&shmob_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- sdev->ddev = ddev;
- ddev->dev_private = sdev;
-
- ret = shmob_drm_modeset_init(sdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize mode setting\n");
- goto err_free_drm_dev;
- }
-
- for (i = 0; i < 4; ++i) {
- ret = shmob_drm_plane_create(sdev, i);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to create plane %u\n", i);
- goto err_modeset_cleanup;
- }
- }
-
ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize vblank\n");
- goto err_modeset_cleanup;
+ return ret;
}
+ ret = shmob_drm_modeset_init(sdev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to initialize mode setting\n");
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_modeset_cleanup;
sdev->irq = ret;
- ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name,
- ddev);
+ ret = devm_request_irq(&pdev->dev, sdev->irq, shmob_drm_irq, 0,
+ ddev->driver->name, ddev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ handler\n");
goto err_modeset_cleanup;
@@ -268,28 +248,35 @@ static int shmob_drm_probe(struct platform_device *pdev)
*/
ret = drm_dev_register(ddev, 0);
if (ret < 0)
- goto err_irq_uninstall;
+ goto err_modeset_cleanup;
drm_fbdev_generic_setup(ddev, 16);
return 0;
-err_irq_uninstall:
- free_irq(sdev->irq, ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
-err_free_drm_dev:
- drm_dev_put(ddev);
-
return ret;
}
+static const struct shmob_drm_config shmob_arm_config = {
+ .clk_source = SHMOB_DRM_CLK_BUS,
+ .clk_div = 5,
+};
+
+static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
+ { .compatible = "renesas,r8a7740-lcdc", .data = &shmob_arm_config, },
+ { .compatible = "renesas,sh73a0-lcdc", .data = &shmob_arm_config, },
+ { /* sentinel */ }
+};
+
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove_new = shmob_drm_remove,
.driver = {
.name = "shmob-drm",
- .pm = pm_sleep_ptr(&shmob_drm_pm_ops),
+ .of_match_table = of_match_ptr(shmob_drm_of_table),
+ .pm = &shmob_drm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
index 4964ddd5ab74..088ac5381e91 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
@@ -20,23 +20,33 @@ struct clk;
struct device;
struct drm_device;
+struct shmob_drm_config {
+ enum shmob_drm_clk_source clk_source;
+ unsigned int clk_div;
+};
+
struct shmob_drm_device {
struct device *dev;
const struct shmob_drm_platform_data *pdata;
+ struct shmob_drm_config config;
void __iomem *mmio;
struct clk *clock;
u32 lddckr;
- u32 ldmt1r;
unsigned int irq;
spinlock_t irq_lock; /* Protects hardware LDINTR register */
- struct drm_device *ddev;
+ struct drm_device ddev;
struct shmob_drm_crtc crtc;
- struct shmob_drm_encoder encoder;
- struct shmob_drm_connector connector;
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
};
+static inline struct shmob_drm_device *to_shmob_device(struct drm_device *dev)
+{
+ return container_of(dev, struct shmob_drm_device, ddev);
+}
+
#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
index 99381cc0abf3..4202ab00fb0c 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
@@ -7,6 +7,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
@@ -17,6 +18,7 @@
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h"
/* -----------------------------------------------------------------------------
@@ -27,53 +29,73 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
.bpp = 16,
- .yuv = false,
.lddfr = LDDFR_PKF_RGB16,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16,
}, {
.fourcc = DRM_FORMAT_RGB888,
.bpp = 24,
- .yuv = false,
.lddfr = LDDFR_PKF_RGB24,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_SWPB | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.bpp = 32,
- .yuv = false,
.lddfr = LDDFR_PKF_ARGB32,
+ .ldddsr = LDDDSR_LS,
+ .ldbbsifr = LDBBSIFR_AL_PK | LDBBSIFR_SWPL | LDBBSIFR_RY |
+ LDBBSIFR_RPKF_ARGB32,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
.bpp = 32,
- .yuv = false,
.lddfr = LDDFR_PKF_ARGB32,
+ .ldddsr = LDDDSR_LS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_RY |
+ LDBBSIFR_RPKF_ARGB32,
}, {
.fourcc = DRM_FORMAT_NV12,
.bpp = 12,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_SWPB | LDBBSIFR_CHRR_420,
}, {
.fourcc = DRM_FORMAT_NV21,
.bpp = 12,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_CHRR_420,
}, {
.fourcc = DRM_FORMAT_NV16,
.bpp = 16,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_SWPB | LDBBSIFR_CHRR_422,
}, {
.fourcc = DRM_FORMAT_NV61,
.bpp = 16,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_CHRR_422,
}, {
.fourcc = DRM_FORMAT_NV24,
.bpp = 24,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_SWPB | LDBBSIFR_CHRR_444,
}, {
.fourcc = DRM_FORMAT_NV42,
.bpp = 24,
- .yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
+ .ldddsr = LDDDSR_LS | LDDDSR_WS,
+ .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+ LDBBSIFR_CHRR_444,
},
};
@@ -112,7 +134,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- if (format->yuv) {
+ if (shmob_drm_format_is_yuv(format)) {
unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
@@ -127,29 +149,40 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
.fb_create = shmob_drm_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{
+ struct drm_device *dev = &sdev->ddev;
int ret;
- ret = drmm_mode_config_init(sdev->ddev);
+ ret = drmm_mode_config_init(dev);
if (ret)
return ret;
- shmob_drm_crtc_create(sdev);
- shmob_drm_encoder_create(sdev);
- shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+ ret = shmob_drm_crtc_create(sdev);
+ if (ret < 0)
+ return ret;
+
+ ret = shmob_drm_encoder_create(sdev);
+ if (ret < 0)
+ return ret;
+
+ ret = shmob_drm_connector_create(sdev, &sdev->encoder);
+ if (ret < 0)
+ return ret;
- drm_kms_helper_poll_init(sdev->ddev);
+ drm_mode_config_reset(dev);
- sdev->ddev->mode_config.min_width = 0;
- sdev->ddev->mode_config.min_height = 0;
- sdev->ddev->mode_config.max_width = 4095;
- sdev->ddev->mode_config.max_height = 4095;
- sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+ drm_kms_helper_poll_init(dev);
- drm_helper_disable_unused_functions(sdev->ddev);
+ sdev->ddev.mode_config.min_width = 0;
+ sdev->ddev.mode_config.min_height = 0;
+ sdev->ddev.mode_config.max_width = 4095;
+ sdev->ddev.mode_config.max_height = 4095;
+ sdev->ddev.mode_config.funcs = &shmob_drm_mode_config_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
index 0347b1fd2338..590162c3db20 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
@@ -17,11 +17,14 @@ struct shmob_drm_device;
struct shmob_drm_format_info {
u32 fourcc;
- unsigned int bpp;
- bool yuv;
- u32 lddfr;
+ u32 lddfr; /* LCD Data Format Register */
+ u16 ldbbsifr; /* CHn Source Image Format Register low bits */
+ u8 ldddsr; /* LCDC Input Image Data Swap Register low bits */
+ u8 bpp;
};
+#define shmob_drm_format_is_yuv(format) ((format)->lddfr & LDDFR_CC)
+
const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 850986cee848..8f9a728affde 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -7,11 +7,14 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -19,102 +22,84 @@
#include "shmob_drm_regs.h"
struct shmob_drm_plane {
- struct drm_plane plane;
+ struct drm_plane base;
unsigned int index;
- unsigned int alpha;
+};
+
+struct shmob_drm_plane_state {
+ struct drm_plane_state base;
const struct shmob_drm_format_info *format;
- unsigned long dma[2];
-
- unsigned int src_x;
- unsigned int src_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
+ u32 dma[2];
};
-#define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane)
+static inline struct shmob_drm_plane *to_shmob_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct shmob_drm_plane, base);
+}
+
+static inline struct shmob_drm_plane_state *to_shmob_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct shmob_drm_plane_state, base);
+}
-static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
- struct drm_framebuffer *fb,
- int x, int y)
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane_state *sstate)
{
+ struct drm_framebuffer *fb = sstate->base.fb;
+ unsigned int x = sstate->base.src_x >> 16;
+ unsigned int y = sstate->base.src_y >> 16;
struct drm_gem_dma_object *gem;
unsigned int bpp;
- bpp = splane->format->yuv ? 8 : splane->format->bpp;
+ bpp = shmob_drm_format_is_yuv(sstate->format) ? 8 : sstate->format->bpp;
gem = drm_fb_dma_get_gem_obj(fb, 0);
- splane->dma[0] = gem->dma_addr + fb->offsets[0]
+ sstate->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
- if (splane->format->yuv) {
- bpp = splane->format->bpp - 8;
+ if (shmob_drm_format_is_yuv(sstate->format)) {
+ bpp = sstate->format->bpp - 8;
gem = drm_fb_dma_get_gem_obj(fb, 1);
- splane->dma[1] = gem->dma_addr + fb->offsets[1]
+ sstate->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
}
-static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
- struct drm_framebuffer *fb)
+static void shmob_drm_primary_plane_setup(struct shmob_drm_plane *splane,
+ struct drm_plane_state *state)
{
- struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+ struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
+ struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
+ struct drm_framebuffer *fb = state->fb;
+
+ /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+ lcdc_write(sdev, LDDFR, sstate->format->lddfr | LDDFR_CF1);
+ lcdc_write(sdev, LDMLSR, fb->pitches[0]);
+
+ /* Word and long word swap. */
+ lcdc_write(sdev, LDDDSR, sstate->format->ldddsr);
+
+ lcdc_write_mirror(sdev, LDSA1R, sstate->dma[0]);
+ if (shmob_drm_format_is_yuv(sstate->format))
+ lcdc_write_mirror(sdev, LDSA2R, sstate->dma[1]);
+
+ lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+static void shmob_drm_overlay_plane_setup(struct shmob_drm_plane *splane,
+ struct drm_plane_state *state)
+{
+ struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
+ struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
+ struct drm_framebuffer *fb = state->fb;
u32 format;
/* TODO: Support ROP3 mode */
- format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
-
- switch (splane->format->fourcc) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV42:
- format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV24:
- format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- default:
- format |= LDBBSIFR_SWPL;
- break;
- }
-
- switch (splane->format->fourcc) {
- case DRM_FORMAT_RGB565:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
- break;
- case DRM_FORMAT_RGB888:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
- break;
- case DRM_FORMAT_ARGB8888:
- format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
- break;
- case DRM_FORMAT_XRGB8888:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
- break;
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
- break;
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
- break;
- }
+ format = LDBBSIFR_EN | ((state->alpha >> 8) << LDBBSIFR_LAY_SHIFT) |
+ sstate->format->ldbbsifr;
#define plane_reg_dump(sdev, splane, reg) \
- dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+ dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
splane->index, #reg, \
lcdc_read(sdev, reg(splane->index)), \
lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
@@ -127,29 +112,27 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
plane_reg_dump(sdev, splane, LDBnBSACR);
lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
- dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+ dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR));
lcdc_write(sdev, LDBnBSIFR(splane->index), format);
lcdc_write(sdev, LDBnBSSZR(splane->index),
- (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
- (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+ (state->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+ (state->crtc_w << LDBBSSZR_BHSS_SHIFT));
lcdc_write(sdev, LDBnBLOCR(splane->index),
- (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
- (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+ (state->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+ (state->crtc_x << LDBBLOCR_CHLC_SHIFT));
lcdc_write(sdev, LDBnBSMWR(splane->index),
fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
- shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
-
- lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
- if (splane->format->yuv)
- lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+ lcdc_write(sdev, LDBnBSAYR(splane->index), sstate->dma[0]);
+ if (shmob_drm_format_is_yuv(sstate->format))
+ lcdc_write(sdev, LDBnBSACR(splane->index), sstate->dma[1]);
lcdc_write(sdev, LDBCR,
LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
- dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+ dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR));
plane_reg_dump(sdev, splane, LDBnBSIFR);
@@ -160,75 +143,143 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
plane_reg_dump(sdev, splane, LDBnBSACR);
}
-void shmob_drm_plane_setup(struct drm_plane *plane)
+static int shmob_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct shmob_drm_plane *splane = to_shmob_plane(plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct shmob_drm_plane_state *sstate = to_shmob_plane_state(new_plane_state);
+ struct drm_crtc_state *crtc_state;
+ bool is_primary = plane->type == DRM_PLANE_TYPE_PRIMARY;
+ int ret;
- if (plane->fb == NULL)
- return;
+ if (!new_plane_state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_atomic_helper_check_plane_state(), set it
+ * manually.
+ */
+ new_plane_state->visible = false;
+ sstate->format = NULL;
+ return 0;
+ }
- __shmob_drm_plane_setup(splane, plane->fb);
-}
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
-static int
-shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct shmob_drm_plane *splane = to_shmob_plane(plane);
- struct shmob_drm_device *sdev = plane->dev->dev_private;
- const struct shmob_drm_format_info *format;
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ !is_primary, true);
+ if (ret < 0)
+ return ret;
- format = shmob_drm_format_info(fb->format->format);
- if (format == NULL) {
- dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
- fb->format->format);
- return -EINVAL;
+ if (!new_plane_state->visible) {
+ sstate->format = NULL;
+ return 0;
}
- if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
- dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+ sstate->format = shmob_drm_format_info(new_plane_state->fb->format->format);
+ if (!sstate->format) {
+ dev_dbg(plane->dev->dev,
+ "plane_atomic_check: unsupported format %p4cc\n",
+ &new_plane_state->fb->format->format);
return -EINVAL;
}
- splane->format = format;
+ shmob_drm_plane_compute_base(sstate);
- splane->src_x = src_x >> 16;
- splane->src_y = src_y >> 16;
- splane->crtc_x = crtc_x;
- splane->crtc_y = crtc_y;
- splane->crtc_w = crtc_w;
- splane->crtc_h = crtc_h;
-
- __shmob_drm_plane_setup(splane, fb);
return 0;
}
-static int shmob_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+static void shmob_drm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct shmob_drm_plane *splane = to_shmob_plane(plane);
- struct shmob_drm_device *sdev = plane->dev->dev_private;
- splane->format = NULL;
+ if (!new_plane_state->visible)
+ return;
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ shmob_drm_primary_plane_setup(splane, new_plane_state);
+ else
+ shmob_drm_overlay_plane_setup(splane, new_plane_state);
+}
+
+static void shmob_drm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct shmob_drm_device *sdev = to_shmob_device(plane->dev);
+ struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+ if (!old_state->crtc)
+ return;
+
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return;
+
+ lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
- return 0;
+ lcdc_write(sdev, LDBCR,
+ LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
}
-static void shmob_drm_plane_destroy(struct drm_plane *plane)
+static struct drm_plane_state *
+shmob_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- drm_plane_force_disable(plane);
- drm_plane_cleanup(plane);
+ struct shmob_drm_plane_state *state;
+ struct shmob_drm_plane_state *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = to_shmob_plane_state(plane->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (copy == NULL)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+
+ return &copy->base;
}
+static void shmob_drm_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_shmob_plane_state(state));
+}
+
+static void shmob_drm_plane_reset(struct drm_plane *plane)
+{
+ struct shmob_drm_plane_state *state;
+
+ if (plane->state) {
+ shmob_drm_plane_atomic_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &state->base);
+}
+
+static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = {
+ .atomic_check = shmob_drm_plane_atomic_check,
+ .atomic_update = shmob_drm_plane_atomic_update,
+ .atomic_disable = shmob_drm_plane_atomic_disable,
+};
+
static const struct drm_plane_funcs shmob_drm_plane_funcs = {
- .update_plane = shmob_drm_plane_update,
- .disable_plane = shmob_drm_plane_disable,
- .destroy = shmob_drm_plane_destroy,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = shmob_drm_plane_reset,
+ .atomic_duplicate_state = shmob_drm_plane_atomic_duplicate_state,
+ .atomic_destroy_state = shmob_drm_plane_atomic_destroy_state,
};
static const uint32_t formats[] = {
@@ -244,22 +295,23 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV42,
};
-int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
+ enum drm_plane_type type,
+ unsigned int index)
{
struct shmob_drm_plane *splane;
- int ret;
- splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL);
- if (splane == NULL)
- return -ENOMEM;
+ splane = drmm_universal_plane_alloc(&sdev->ddev,
+ struct shmob_drm_plane, base, 1,
+ &shmob_drm_plane_funcs, formats,
+ ARRAY_SIZE(formats), NULL, type,
+ NULL);
+ if (IS_ERR(splane))
+ return ERR_CAST(splane);
splane->index = index;
- splane->alpha = 255;
- ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1,
- &shmob_drm_plane_funcs,
- formats, ARRAY_SIZE(formats), NULL,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs);
- return ret;
+ return &splane->base;
}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
index e72b21a4288f..dcfddd605899 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
@@ -13,7 +13,8 @@
struct drm_plane;
struct shmob_drm_device;
-int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
-void shmob_drm_plane_setup(struct drm_plane *plane);
+struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
+ enum drm_plane_type type,
+ unsigned int index);
#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 32f0857aec9f..e0174f82e353 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -910,7 +910,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi;
unsigned int pitch;
int ret;
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index 78ba3c3083d5..033d31dbf3b8 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -17,6 +17,16 @@ menuconfig GREYBUS
if GREYBUS
+config GREYBUS_BEAGLEPLAY
+ tristate "Greybus BeaglePlay driver"
+ depends on SERIAL_DEV_BUS
+ help
+ Select this option if you have a BeaglePlay where CC1352
+ co-processor acts as Greybus SVC.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-beagleplay.ko
+
config GREYBUS_ES2
tristate "Greybus ES3 USB host controller"
depends on USB
diff --git a/drivers/greybus/Makefile b/drivers/greybus/Makefile
index 9bccdd229aa2..d986e94f8897 100644
--- a/drivers/greybus/Makefile
+++ b/drivers/greybus/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_GREYBUS) += greybus.o
# needed for trace events
ccflags-y += -I$(src)
+obj-$(CONFIG_GREYBUS_BEAGLEPLAY) += gb-beagleplay.o
+
# Greybus Host controller drivers
gb-es2-y := es2.o
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
new file mode 100644
index 000000000000..43318c1993ba
--- /dev/null
+++ b/drivers/greybus/gb-beagleplay.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Beagleplay Linux Driver for Greybus
+ *
+ * Copyright (c) 2023 Ayush Singh <ayushdevel1325@gmail.com>
+ * Copyright (c) 2023 BeagleBoard.org Foundation
+ */
+
+#include <linux/gfp.h>
+#include <linux/greybus.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/serdev.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/greybus/hd.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/crc-ccitt.h>
+#include <linux/circ_buf.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define RX_HDLC_PAYLOAD 256
+#define CRC_LEN 2
+#define MAX_RX_HDLC (1 + RX_HDLC_PAYLOAD + CRC_LEN)
+#define TX_CIRC_BUF_SIZE 1024
+
+#define ADDRESS_GREYBUS 0x01
+#define ADDRESS_DBG 0x02
+#define ADDRESS_CONTROL 0x03
+
+#define HDLC_FRAME 0x7E
+#define HDLC_ESC 0x7D
+#define HDLC_XOR 0x20
+
+#define CONTROL_SVC_START 0x01
+#define CONTROL_SVC_STOP 0x02
+
+/* The maximum number of CPorts supported by Greybus Host Device */
+#define GB_MAX_CPORTS 32
+
+/**
+ * struct gb_beagleplay - BeaglePlay Greybus driver
+ *
+ * @sd: underlying serdev device
+ *
+ * @gb_hd: greybus host device
+ *
+ * @tx_work: hdlc transmit work
+ * @tx_producer_lock: hdlc transmit data producer lock. acquired when appending data to buffer.
+ * @tx_consumer_lock: hdlc transmit data consumer lock. acquired when sending data over uart.
+ * @tx_circ_buf: hdlc transmit circular buffer.
+ * @tx_crc: hdlc transmit crc-ccitt fcs
+ *
+ * @rx_buffer_len: length of receive buffer filled.
+ * @rx_buffer: hdlc frame receive buffer
+ * @rx_in_esc: hdlc rx flag to indicate ESC frame
+ */
+struct gb_beagleplay {
+ struct serdev_device *sd;
+
+ struct gb_host_device *gb_hd;
+
+ struct work_struct tx_work;
+ spinlock_t tx_producer_lock;
+ spinlock_t tx_consumer_lock;
+ struct circ_buf tx_circ_buf;
+ u16 tx_crc;
+
+ u16 rx_buffer_len;
+ bool rx_in_esc;
+ u8 rx_buffer[MAX_RX_HDLC];
+};
+
+/**
+ * struct hdlc_payload - Structure to represent part of HDCL frame payload data.
+ *
+ * @len: buffer length in bytes
+ * @buf: payload buffer
+ */
+struct hdlc_payload {
+ u16 len;
+ void *buf;
+};
+
+static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len)
+{
+ u16 cport_id;
+ struct gb_operation_msg_hdr *hdr = (struct gb_operation_msg_hdr *)buf;
+
+ memcpy(&cport_id, hdr->pad, sizeof(cport_id));
+
+ dev_dbg(&bg->sd->dev, "Greybus Operation %u type %X cport %u status %u received",
+ hdr->operation_id, hdr->type, cport_id, hdr->result);
+
+ greybus_data_rcvd(bg->gb_hd, cport_id, buf, len);
+}
+
+static void hdlc_rx_dbg_frame(const struct gb_beagleplay *bg, const char *buf, u16 len)
+{
+ dev_dbg(&bg->sd->dev, "CC1352 Log: %.*s", (int)len, buf);
+}
+
+/**
+ * hdlc_write() - Consume HDLC Buffer.
+ * @bg: beagleplay greybus driver
+ *
+ * Assumes that consumer lock has been acquired.
+ */
+static void hdlc_write(struct gb_beagleplay *bg)
+{
+ int written;
+ /* Start consuming HDLC data */
+ int head = smp_load_acquire(&bg->tx_circ_buf.head);
+ int tail = bg->tx_circ_buf.tail;
+ int count = CIRC_CNT_TO_END(head, tail, TX_CIRC_BUF_SIZE);
+ const unsigned char *buf = &bg->tx_circ_buf.buf[tail];
+
+ if (count > 0) {
+ written = serdev_device_write_buf(bg->sd, buf, count);
+
+ /* Finish consuming HDLC data */
+ smp_store_release(&bg->tx_circ_buf.tail, (tail + written) & (TX_CIRC_BUF_SIZE - 1));
+ }
+}
+
+/**
+ * hdlc_append() - Queue HDLC data for sending.
+ * @bg: beagleplay greybus driver
+ * @value: hdlc byte to transmit
+ *
+ * Assumes that producer lock as been acquired.
+ */
+static void hdlc_append(struct gb_beagleplay *bg, u8 value)
+{
+ int tail, head = bg->tx_circ_buf.head;
+
+ while (true) {
+ tail = READ_ONCE(bg->tx_circ_buf.tail);
+
+ if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= 1) {
+ bg->tx_circ_buf.buf[head] = value;
+
+ /* Finish producing HDLC byte */
+ smp_store_release(&bg->tx_circ_buf.head,
+ (head + 1) & (TX_CIRC_BUF_SIZE - 1));
+ return;
+ }
+ dev_warn(&bg->sd->dev, "Tx circ buf full");
+ usleep_range(3000, 5000);
+ }
+}
+
+static void hdlc_append_escaped(struct gb_beagleplay *bg, u8 value)
+{
+ if (value == HDLC_FRAME || value == HDLC_ESC) {
+ hdlc_append(bg, HDLC_ESC);
+ value ^= HDLC_XOR;
+ }
+ hdlc_append(bg, value);
+}
+
+static void hdlc_append_tx_frame(struct gb_beagleplay *bg)
+{
+ bg->tx_crc = 0xFFFF;
+ hdlc_append(bg, HDLC_FRAME);
+}
+
+static void hdlc_append_tx_u8(struct gb_beagleplay *bg, u8 value)
+{
+ bg->tx_crc = crc_ccitt(bg->tx_crc, &value, 1);
+ hdlc_append_escaped(bg, value);
+}
+
+static void hdlc_append_tx_buf(struct gb_beagleplay *bg, const u8 *buf, u16 len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ hdlc_append_tx_u8(bg, buf[i]);
+}
+
+static void hdlc_append_tx_crc(struct gb_beagleplay *bg)
+{
+ bg->tx_crc ^= 0xffff;
+ hdlc_append_escaped(bg, bg->tx_crc & 0xff);
+ hdlc_append_escaped(bg, (bg->tx_crc >> 8) & 0xff);
+}
+
+static void hdlc_transmit(struct work_struct *work)
+{
+ struct gb_beagleplay *bg = container_of(work, struct gb_beagleplay, tx_work);
+
+ spin_lock_bh(&bg->tx_consumer_lock);
+ hdlc_write(bg);
+ spin_unlock_bh(&bg->tx_consumer_lock);
+}
+
+static void hdlc_tx_frames(struct gb_beagleplay *bg, u8 address, u8 control,
+ const struct hdlc_payload payloads[], size_t count)
+{
+ size_t i;
+
+ spin_lock(&bg->tx_producer_lock);
+
+ hdlc_append_tx_frame(bg);
+ hdlc_append_tx_u8(bg, address);
+ hdlc_append_tx_u8(bg, control);
+
+ for (i = 0; i < count; ++i)
+ hdlc_append_tx_buf(bg, payloads[i].buf, payloads[i].len);
+
+ hdlc_append_tx_crc(bg);
+ hdlc_append_tx_frame(bg);
+
+ spin_unlock(&bg->tx_producer_lock);
+
+ schedule_work(&bg->tx_work);
+}
+
+static void hdlc_tx_s_frame_ack(struct gb_beagleplay *bg)
+{
+ hdlc_tx_frames(bg, bg->rx_buffer[0], (bg->rx_buffer[1] >> 1) & 0x7, NULL, 0);
+}
+
+static void hdlc_rx_frame(struct gb_beagleplay *bg)
+{
+ u16 crc, len;
+ u8 ctrl, *buf;
+ u8 address = bg->rx_buffer[0];
+
+ crc = crc_ccitt(0xffff, bg->rx_buffer, bg->rx_buffer_len);
+ if (crc != 0xf0b8) {
+ dev_warn_ratelimited(&bg->sd->dev, "CRC failed from %02x: 0x%04x", address, crc);
+ return;
+ }
+
+ ctrl = bg->rx_buffer[1];
+ buf = &bg->rx_buffer[2];
+ len = bg->rx_buffer_len - 4;
+
+ /* I-Frame, send S-Frame ACK */
+ if ((ctrl & 1) == 0)
+ hdlc_tx_s_frame_ack(bg);
+
+ switch (address) {
+ case ADDRESS_DBG:
+ hdlc_rx_dbg_frame(bg, buf, len);
+ break;
+ case ADDRESS_GREYBUS:
+ hdlc_rx_greybus_frame(bg, buf, len);
+ break;
+ default:
+ dev_warn_ratelimited(&bg->sd->dev, "unknown frame %u", address);
+ }
+}
+
+static int hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
+{
+ size_t i;
+ u8 c;
+
+ for (i = 0; i < count; ++i) {
+ c = data[i];
+
+ switch (c) {
+ case HDLC_FRAME:
+ if (bg->rx_buffer_len)
+ hdlc_rx_frame(bg);
+
+ bg->rx_buffer_len = 0;
+ break;
+ case HDLC_ESC:
+ bg->rx_in_esc = true;
+ break;
+ default:
+ if (bg->rx_in_esc) {
+ c ^= 0x20;
+ bg->rx_in_esc = false;
+ }
+
+ if (bg->rx_buffer_len < MAX_RX_HDLC) {
+ bg->rx_buffer[bg->rx_buffer_len] = c;
+ bg->rx_buffer_len++;
+ } else {
+ dev_err_ratelimited(&bg->sd->dev, "RX Buffer Overflow");
+ bg->rx_buffer_len = 0;
+ }
+ }
+ }
+
+ return count;
+}
+
+static int hdlc_init(struct gb_beagleplay *bg)
+{
+ INIT_WORK(&bg->tx_work, hdlc_transmit);
+ spin_lock_init(&bg->tx_producer_lock);
+ spin_lock_init(&bg->tx_consumer_lock);
+ bg->tx_circ_buf.head = 0;
+ bg->tx_circ_buf.tail = 0;
+
+ bg->tx_circ_buf.buf = devm_kmalloc(&bg->sd->dev, TX_CIRC_BUF_SIZE, GFP_KERNEL);
+ if (!bg->tx_circ_buf.buf)
+ return -ENOMEM;
+
+ bg->rx_buffer_len = 0;
+ bg->rx_in_esc = false;
+
+ return 0;
+}
+
+static void hdlc_deinit(struct gb_beagleplay *bg)
+{
+ flush_work(&bg->tx_work);
+}
+
+static int gb_tty_receive(struct serdev_device *sd, const unsigned char *data, size_t count)
+{
+ struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
+
+ return hdlc_rx(bg, data, count);
+}
+
+static void gb_tty_wakeup(struct serdev_device *serdev)
+{
+ struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
+
+ schedule_work(&bg->tx_work);
+}
+
+static struct serdev_device_ops gb_beagleplay_ops = {
+ .receive_buf = gb_tty_receive,
+ .write_wakeup = gb_tty_wakeup,
+};
+
+static int gb_message_send(struct gb_host_device *hd, u16 cport, struct gb_message *msg, gfp_t mask)
+{
+ struct gb_beagleplay *bg = dev_get_drvdata(&hd->dev);
+ struct hdlc_payload payloads[2];
+
+ dev_dbg(&hd->dev, "Sending greybus message with Operation %u, Type: %X on Cport %u",
+ msg->header->operation_id, msg->header->type, cport);
+
+ if (msg->header->size > RX_HDLC_PAYLOAD)
+ return dev_err_probe(&hd->dev, -E2BIG, "Greybus message too big");
+
+ memcpy(msg->header->pad, &cport, sizeof(cport));
+
+ payloads[0].buf = msg->header;
+ payloads[0].len = sizeof(*msg->header);
+ payloads[1].buf = msg->payload;
+ payloads[1].len = msg->payload_size;
+
+ hdlc_tx_frames(bg, ADDRESS_GREYBUS, 0x03, payloads, 2);
+ greybus_message_sent(bg->gb_hd, msg, 0);
+
+ return 0;
+}
+
+static void gb_message_cancel(struct gb_message *message)
+{
+}
+
+static struct gb_hd_driver gb_hdlc_driver = { .message_send = gb_message_send,
+ .message_cancel = gb_message_cancel };
+
+static void gb_beagleplay_start_svc(struct gb_beagleplay *bg)
+{
+ const u8 command = CONTROL_SVC_START;
+ const struct hdlc_payload payload = { .len = 1, .buf = (void *)&command };
+
+ hdlc_tx_frames(bg, ADDRESS_CONTROL, 0x03, &payload, 1);
+}
+
+static void gb_beagleplay_stop_svc(struct gb_beagleplay *bg)
+{
+ const u8 command = CONTROL_SVC_STOP;
+ const struct hdlc_payload payload = { .len = 1, .buf = (void *)&command };
+
+ hdlc_tx_frames(bg, ADDRESS_CONTROL, 0x03, &payload, 1);
+}
+
+static void gb_greybus_deinit(struct gb_beagleplay *bg)
+{
+ gb_hd_del(bg->gb_hd);
+ gb_hd_put(bg->gb_hd);
+}
+
+static int gb_greybus_init(struct gb_beagleplay *bg)
+{
+ int ret;
+
+ bg->gb_hd = gb_hd_create(&gb_hdlc_driver, &bg->sd->dev, TX_CIRC_BUF_SIZE, GB_MAX_CPORTS);
+ if (IS_ERR(bg->gb_hd)) {
+ dev_err(&bg->sd->dev, "Failed to create greybus host device");
+ return PTR_ERR(bg->gb_hd);
+ }
+
+ ret = gb_hd_add(bg->gb_hd);
+ if (ret) {
+ dev_err(&bg->sd->dev, "Failed to add greybus host device");
+ goto free_gb_hd;
+ }
+ dev_set_drvdata(&bg->gb_hd->dev, bg);
+
+ return 0;
+
+free_gb_hd:
+ gb_greybus_deinit(bg);
+ return ret;
+}
+
+static void gb_serdev_deinit(struct gb_beagleplay *bg)
+{
+ serdev_device_close(bg->sd);
+}
+
+static int gb_serdev_init(struct gb_beagleplay *bg)
+{
+ int ret;
+
+ serdev_device_set_drvdata(bg->sd, bg);
+ serdev_device_set_client_ops(bg->sd, &gb_beagleplay_ops);
+ ret = serdev_device_open(bg->sd);
+ if (ret)
+ return dev_err_probe(&bg->sd->dev, ret, "Unable to open serial device");
+
+ serdev_device_set_baudrate(bg->sd, 115200);
+ serdev_device_set_flow_control(bg->sd, false);
+
+ return 0;
+}
+
+static int gb_beagleplay_probe(struct serdev_device *serdev)
+{
+ int ret = 0;
+ struct gb_beagleplay *bg;
+
+ bg = devm_kmalloc(&serdev->dev, sizeof(*bg), GFP_KERNEL);
+ if (!bg)
+ return -ENOMEM;
+
+ bg->sd = serdev;
+ ret = gb_serdev_init(bg);
+ if (ret)
+ return ret;
+
+ ret = hdlc_init(bg);
+ if (ret)
+ goto free_serdev;
+
+ ret = gb_greybus_init(bg);
+ if (ret)
+ goto free_hdlc;
+
+ gb_beagleplay_start_svc(bg);
+
+ return 0;
+
+free_hdlc:
+ hdlc_deinit(bg);
+free_serdev:
+ gb_serdev_deinit(bg);
+ return ret;
+}
+
+static void gb_beagleplay_remove(struct serdev_device *serdev)
+{
+ struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
+
+ gb_greybus_deinit(bg);
+ gb_beagleplay_stop_svc(bg);
+ hdlc_deinit(bg);
+ gb_serdev_deinit(bg);
+}
+
+static const struct of_device_id gb_beagleplay_of_match[] = {
+ {
+ .compatible = "ti,cc1352p7",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gb_beagleplay_of_match);
+
+static struct serdev_device_driver gb_beagleplay_driver = {
+ .probe = gb_beagleplay_probe,
+ .remove = gb_beagleplay_remove,
+ .driver = {
+ .name = "gb_beagleplay",
+ .of_match_table = gb_beagleplay_of_match,
+ },
+};
+
+module_serdev_device_driver(gb_beagleplay_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ayush Singh <ayushdevel1325@gmail.com>");
+MODULE_DESCRIPTION("A Greybus driver for BeaglePlay");
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index 8716a05950c8..ef1f9be8b893 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -257,6 +257,13 @@ static u8 get_input_report(u8 current_index, int sensor_idx, int report_id,
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+
+ if (sensor_idx == ACS_IDX) {
+ als_input.light_color_temp = sensor_virt_addr[1];
+ als_input.chromaticity_x_value = sensor_virt_addr[2];
+ als_input.chromaticity_y_value = sensor_virt_addr[3];
+ }
+
report_size = sizeof(als_input);
memcpy(input_report, &als_input, sizeof(als_input));
break;
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
index ebd55675eb62..882434b1501f 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
@@ -99,6 +99,9 @@ struct als_input_report {
struct common_input_property common_property;
/* values specific to this sensor */
int illuminance_value;
+ int light_color_temp;
+ int chromaticity_x_value;
+ int chromaticity_y_value;
} __packed;
struct hpd_feature_report {
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
index 697f2791ea9c..67ec2d6a417d 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
@@ -641,6 +641,27 @@ static const u8 als_report_descriptor[] = {
0x75, 32, /* HID report size(32) */
0x95, 1, /* HID report count (1) */
0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0xD2, 0x04, /* HID usage sensor data light temperature */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x55, 0x0, /* HID unit exponent(0x0) */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0xD4, 0x04, /* HID usage sensor data light chromaticity_x */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x55, 0x0, /* HID unit exponent(0x0) */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0X81, 0x02, /* HID Input (Data_Var_Abs) */
+0x0A, 0xD5, 0x04, /* HID usage sensor data light chromaticity_y */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x55, 0x0, /* HID unit exponent(0x0) */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Var_Abs) */
0xC0 /* HID end collection */
};
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
index 06bdcf072d10..8a037de08e92 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
@@ -188,6 +188,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
struct sfh_mag_data mag_data;
struct sfh_als_data als_data;
struct hpd_status hpdstatus;
+ struct sfh_base_info binfo;
void __iomem *sensoraddr;
u8 report_size = 0;
@@ -235,6 +236,14 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
get_common_inputs(&als_input.common_property, report_id);
als_input.illuminance_value = float_to_int(als_data.lux);
+
+ memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
+ if (binfo.sbase.s_prop[ALS_IDX].sf.feat & 0x2) {
+ als_input.light_color_temp = als_data.light_color_temp;
+ als_input.chromaticity_x_value = float_to_int(als_data.chromaticity_x);
+ als_input.chromaticity_y_value = float_to_int(als_data.chromaticity_y);
+ }
+
report_size = sizeof(als_input);
memcpy(input_report, &als_input, sizeof(als_input));
break;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 9d31d5b510eb..656c3e95ef8c 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -88,6 +88,16 @@ struct sfh_sensor_list {
};
};
+struct sfh_sensor_prop {
+ union {
+ u32 sprop;
+ struct {
+ u32 elist : 16;
+ u32 feat : 16;
+ } sf;
+ };
+};
+
struct sfh_base_info {
union {
u32 sfh_base[24];
@@ -95,6 +105,8 @@ struct sfh_base_info {
struct sfh_platform_info plat_info;
struct sfh_firmware_info fw_info;
struct sfh_sensor_list s_list;
+ u32 rsvd;
+ struct sfh_sensor_prop s_prop[16];
} sbase;
};
};
@@ -134,6 +146,9 @@ struct sfh_mag_data {
struct sfh_als_data {
struct sfh_common_data commondata;
u32 lux;
+ u32 light_color_temp;
+ u32 chromaticity_x;
+ u32 chromaticity_y;
};
struct hpd_status {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index edbb38f6956b..b33d5abd9beb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2100,8 +2100,10 @@ static void __maybe_unused vmbus_reserve_fb(void)
if (efi_enabled(EFI_BOOT)) {
/* Gen2 VM: get FB base from EFI framebuffer */
- start = screen_info.lfb_base;
- size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ if (IS_ENABLED(CONFIG_SYSFB)) {
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ }
} else {
/* Gen1 VM: get FB base from PCI */
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6644eebedaf3..28eb48dd5b32 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -158,6 +158,7 @@ config I2C_I801
Alder Lake (PCH)
Raptor Lake (PCH)
Meteor Lake (SOC and PCH)
+ Birch Stream (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -1264,6 +1265,17 @@ config I2C_DLN2
This driver can also be built as a module. If so, the module
will be called i2c-dln2.
+config I2C_LJCA
+ tristate "I2C functionality of Intel La Jolla Cove Adapter"
+ depends on USB_LJCA
+ default USB_LJCA
+ help
+ If you say yes to this option, I2C functionality support of Intel
+ La Jolla Cove Adapter (LJCA) will be included.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-ljca.
+
config I2C_CP2615
tristate "Silicon Labs CP2615 USB sound card and I2C adapter"
depends on USB
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index af56fe2c75c0..3757b9391e60 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -133,6 +133,7 @@ obj-$(CONFIG_I2C_GXP) += i2c-gxp.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
+obj-$(CONFIG_I2C_LJCA) += i2c-ljca.o
obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PCI1XXXX) += i2c-mchp-pci1xxxx.o
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index db45554327ae..dc52b3530725 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -221,11 +221,10 @@ static int at91_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->clk = devm_clk_get(dev->dev, NULL);
+ dev->clk = devm_clk_get_enabled(dev->dev, NULL);
if (IS_ERR(dev->clk))
- return dev_err_probe(dev->dev, PTR_ERR(dev->clk), "no clock defined\n");
-
- clk_prepare_enable(dev->clk);
+ return dev_err_probe(dev->dev, PTR_ERR(dev->clk),
+ "failed to enable clock\n");
snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
i2c_set_adapdata(&dev->adapter, dev);
@@ -254,8 +253,6 @@ static int at91_twi_probe(struct platform_device *pdev)
rc = i2c_add_numbered_adapter(&dev->adapter);
if (rc) {
- clk_disable_unprepare(dev->clk);
-
pm_runtime_disable(dev->dev);
pm_runtime_set_suspended(dev->dev);
@@ -272,7 +269,6 @@ static void at91_twi_remove(struct platform_device *pdev)
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adapter);
- clk_disable_unprepare(dev->clk);
pm_runtime_disable(dev->dev);
pm_runtime_set_suspended(dev->dev);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index d7f1e98777ac..a66f7f67b3b8 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -131,6 +131,8 @@
* @i2c_clk: clock reference for i2c input clock
* @bus_clk_rate: current i2c bus clock rate
* @last: a flag indicating is this is last message in transfer
+ * @slave: associated &i2c_client
+ * @irq: platform device IRQ number
*/
struct axxia_i2c_dev {
void __iomem *base;
@@ -165,7 +167,7 @@ static void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask)
writel(int_en | mask, idev->base + MST_INT_ENABLE);
}
-/**
+/*
* ns_to_clk - Convert time (ns) to clock cycles for the given clock frequency.
*/
static u32 ns_to_clk(u64 ns, u32 clk_mhz)
@@ -263,7 +265,7 @@ static int i2c_m_recv_len(const struct i2c_msg *msg)
return (msg->flags & I2C_M_RECV_LEN) != 0;
}
-/**
+/*
* axxia_i2c_empty_rx_fifo - Fetch data from RX FIFO and update SMBus block
* transfer length if this is the first byte of such a transfer.
*/
@@ -295,7 +297,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
return 0;
}
-/**
+/*
* axxia_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
* @return: Number of bytes left to transfer.
*/
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 51aab662050b..e905734c26a0 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
-static void bcm_iproc_i2c_check_slave_status(
- struct bcm_iproc_i2c_dev *iproc_i2c)
+static bool bcm_iproc_i2c_check_slave_status
+ (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
{
u32 val;
+ bool recover = false;
- val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
- /* status is valid only when START_BUSY is cleared after it was set */
- if (val & BIT(S_CMD_START_BUSY_SHIFT))
- return;
+ /* check slave transmit status only if slave is transmitting */
+ if (!iproc_i2c->slave_rx_only) {
+ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+ /* status is valid only when START_BUSY is cleared */
+ if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
+ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+ if (val == S_CMD_STATUS_TIMEOUT ||
+ val == S_CMD_STATUS_MASTER_ABORT) {
+ dev_warn(iproc_i2c->device,
+ (val == S_CMD_STATUS_TIMEOUT) ?
+ "slave random stretch time timeout\n" :
+ "Master aborted read transaction\n");
+ recover = true;
+ }
+ }
+ }
+
+ /* RX_EVENT is not valid when START_BUSY is set */
+ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
+ (status & BIT(IS_S_START_BUSY_SHIFT))) {
+ dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
+ recover = true;
+ }
- val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
- if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
- dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
- "slave random stretch time timeout\n" :
- "Master aborted read transaction\n");
+ if (recover) {
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
bcm_iproc_i2c_slave_init(iproc_i2c, true);
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
}
+
+ return recover;
}
static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
u8 value;
- /*
- * Slave events in case of master-write, master-write-read and,
- * master-read
- *
- * Master-write : only IS_S_RX_EVENT_SHIFT event
- * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
- * events
- * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
- * events or only IS_S_RD_EVENT_SHIFT
- *
- * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
- * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
- * full. This can happen if Master issues write requests of more than
- * 64 bytes.
- */
- if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
- status & BIT(IS_S_RD_EVENT_SHIFT) ||
- status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
- /* disable slave interrupts */
- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- val &= ~iproc_i2c->slave_int_mask;
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
-
- if (status & BIT(IS_S_RD_EVENT_SHIFT))
- /* Master-write-read request */
- iproc_i2c->slave_rx_only = false;
- else
- /* Master-write request only */
- iproc_i2c->slave_rx_only = true;
-
- /* schedule tasklet to read data later */
- tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
-
- /*
- * clear only IS_S_RX_EVENT_SHIFT and
- * IS_S_RX_FIFO_FULL_SHIFT interrupt.
- */
- val = BIT(IS_S_RX_EVENT_SHIFT);
- if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
- val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
- }
if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
iproc_i2c->tx_underrun++;
@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
* less than PKT_LENGTH bytes were output on the SMBUS
*/
iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
- iproc_i2c->slave_int_mask);
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
/* End of SMBUS for Master Read */
val = BIT(S_TX_WR_STATUS_SHIFT);
@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
BIT(IS_S_START_BUSY_SHIFT));
}
- /* check slave transmit status only if slave is transmitting */
- if (!iproc_i2c->slave_rx_only)
- bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ /* if the controller has been reset, immediately return from the ISR */
+ if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
+ return true;
+
+ /*
+ * Slave events in case of master-write, master-write-read and,
+ * master-read
+ *
+ * Master-write : only IS_S_RX_EVENT_SHIFT event
+ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events
+ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events or only IS_S_RD_EVENT_SHIFT
+ *
+ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+ * full. This can happen if Master issues write requests of more than
+ * 64 bytes.
+ */
+ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+ status & BIT(IS_S_RD_EVENT_SHIFT) ||
+ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+ /* disable slave interrupts */
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~iproc_i2c->slave_int_mask;
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ if (status & BIT(IS_S_RD_EVENT_SHIFT))
+ /* Master-write-read request */
+ iproc_i2c->slave_rx_only = false;
+ else
+ /* Master-write request only */
+ iproc_i2c->slave_rx_only = true;
+
+ /* schedule tasklet to read data later */
+ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+
+ /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
+ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+ val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+ }
+ }
return true;
}
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index acee76732544..38f276c99193 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -160,6 +160,7 @@ struct brcmstb_i2c_dev {
struct completion done;
u32 clk_freq_hz;
int data_regsz;
+ bool atomic;
};
/* register accessors for both be and le cpu arch */
@@ -240,7 +241,7 @@ static int brcmstb_i2c_wait_for_completion(struct brcmstb_i2c_dev *dev)
int ret = 0;
unsigned long timeout = msecs_to_jiffies(I2C_TIMEOUT);
- if (dev->irq >= 0) {
+ if (dev->irq >= 0 && !dev->atomic) {
if (!wait_for_completion_timeout(&dev->done, timeout))
ret = -ETIMEDOUT;
} else {
@@ -287,7 +288,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
return rc;
/* only if we are in interrupt mode */
- if (dev->irq >= 0)
+ if (dev->irq >= 0 && !dev->atomic)
reinit_completion(&dev->done);
/* enable BSC CTL interrupt line */
@@ -520,6 +521,23 @@ out:
}
+static int brcmstb_i2c_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg msgs[], int num)
+{
+ struct brcmstb_i2c_dev *dev = i2c_get_adapdata(adapter);
+ int ret;
+
+ if (dev->irq >= 0)
+ disable_irq(dev->irq);
+ dev->atomic = true;
+ ret = brcmstb_i2c_xfer(adapter, msgs, num);
+ dev->atomic = false;
+ if (dev->irq >= 0)
+ enable_irq(dev->irq);
+
+ return ret;
+}
+
static u32 brcmstb_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR
@@ -528,6 +546,7 @@ static u32 brcmstb_i2c_functionality(struct i2c_adapter *adap)
static const struct i2c_algorithm brcmstb_i2c_algo = {
.master_xfer = brcmstb_i2c_xfer,
+ .master_xfer_atomic = brcmstb_i2c_xfer_atomic,
.functionality = brcmstb_i2c_functionality,
};
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
index 3ded28632e4c..cf3747d87034 100644
--- a/drivers/i2c/busses/i2c-cp2615.c
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -85,7 +85,7 @@ static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_t
if (!ret)
return -EINVAL;
- ret->preamble = 0x2A2A;
+ ret->preamble = htons(0x2A2AU);
ret->length = htons(data_len + 6);
ret->msg = htons(msg);
if (data && data_len)
@@ -298,7 +298,7 @@ cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id)
if (!adap)
return -ENOMEM;
- strncpy(adap->name, usbdev->serial, sizeof(adap->name) - 1);
+ strscpy(adap->name, usbdev->serial, sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->dev.parent = &usbif->dev;
adap->dev.of_node = usbif->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index ca1035e010c7..85dbd0eb5392 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -519,10 +519,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
/*
* Because we don't know the buffer length in the
- * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
- * the transaction here.
+ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
+ * transaction here. Also disable the TX_EMPTY IRQ
+ * while waiting for the data length byte to avoid the
+ * bogus interrupts flood.
*/
- if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
+ if (flags & I2C_M_RECV_LEN) {
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
+ break;
+ } else if (buf_len > 0) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
@@ -558,6 +564,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
msgs[dev->msg_read_idx].len = len;
msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+ /*
+ * Received buffer length, re-enable TX_EMPTY interrupt
+ * to resume the SMBUS transaction.
+ */
+ regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
+ DW_IC_INTR_TX_EMPTY);
+
return len;
}
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 2b0b9cdffa86..385ef9d9e4d4 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -194,6 +194,11 @@ struct exynos5_i2c {
*/
int trans_done;
+ /*
+ * Called from atomic context, don't use interrupts.
+ */
+ unsigned int atomic;
+
/* Controller operating frequency */
unsigned int op_clock;
@@ -265,7 +270,7 @@ static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
* exynos5_i2c_set_timing: updates the registers with appropriate
* timing values calculated
*
- * Timing values for operation are calculated against either 100kHz
+ * Timing values for operation are calculated against 100kHz, 400kHz
* or 1MHz controller operating frequency.
*
* Returns 0 on success, -EINVAL if the cycle length cannot
@@ -328,6 +333,23 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*
* Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510
*
+ * To split SCL clock into low, high periods appropriately, one
+ * proportion factor for each I2C mode is used, which is calculated
+ * using this formula.
+ * ```
+ * ((t_low_min + (scl_clock - t_low_min - t_high_min) / 2) / scl_clock)
+ * ```
+ * where:
+ * t_low_min is the minimal value of low period of the SCL clock in us;
+ * t_high_min is the minimal value of high period of the SCL clock in us;
+ * scl_clock is converted from SCL clock frequency into us.
+ *
+ * Below are the proportion factors for these I2C modes:
+ * t_low_min, t_high_min, scl_clock, proportion
+ * Standard Mode: 4.7us, 4.0us, 10us, 0.535
+ * Fast Mode: 1.3us, 0.6us, 2.5us, 0.64
+ * Fast-Plus Mode: 0.5us, 0.26us, 1us, 0.62
+ *
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
temp = clkin / op_clk - 8 - t_ftl_cycle;
@@ -341,8 +363,19 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
return -EINVAL;
}
- t_scl_l = clk_cycle / 2;
- t_scl_h = clk_cycle / 2;
+ /*
+ * Scale clk_cycle to get t_scl_l using the proption factors for individual I2C modes.
+ */
+ if (op_clk <= I2C_MAX_STANDARD_MODE_FREQ)
+ t_scl_l = clk_cycle * 535 / 1000;
+ else if (op_clk <= I2C_MAX_FAST_MODE_FREQ)
+ t_scl_l = clk_cycle * 64 / 100;
+ else
+ t_scl_l = clk_cycle * 62 / 100;
+
+ if (t_scl_l > 0xFF)
+ t_scl_l = 0xFF;
+ t_scl_h = clk_cycle - t_scl_l;
t_start_su = t_scl_l;
t_start_hd = t_scl_l;
t_stop_su = t_scl_l;
@@ -711,6 +744,22 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
spin_unlock_irqrestore(&i2c->lock, flags);
}
+static bool exynos5_i2c_poll_irqs_timeout(struct exynos5_i2c *i2c,
+ unsigned long timeout)
+{
+ unsigned long time_left = jiffies + timeout;
+
+ while (time_before(jiffies, time_left) &&
+ !((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) ||
+ (i2c->state < 0))) {
+ while (readl(i2c->regs + HSI2C_INT_ENABLE) &
+ readl(i2c->regs + HSI2C_INT_STATUS))
+ exynos5_i2c_irq(i2c->irq, i2c);
+ usleep_range(100, 200);
+ }
+ return time_before(jiffies, time_left);
+}
+
static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
struct i2c_msg *msgs, int stop)
{
@@ -725,8 +774,13 @@ static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
exynos5_i2c_message_start(i2c, stop);
- timeout = wait_for_completion_timeout(&i2c->msg_complete,
- EXYNOS5_I2C_TIMEOUT);
+ if (!i2c->atomic)
+ timeout = wait_for_completion_timeout(&i2c->msg_complete,
+ EXYNOS5_I2C_TIMEOUT);
+ else
+ timeout = exynos5_i2c_poll_irqs_timeout(i2c,
+ EXYNOS5_I2C_TIMEOUT);
+
if (timeout == 0)
ret = -ETIMEDOUT;
else
@@ -777,6 +831,21 @@ err_pclk:
return ret ?: num;
}
+static int exynos5_i2c_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct exynos5_i2c *i2c = adap->algo_data;
+ int ret;
+
+ disable_irq(i2c->irq);
+ i2c->atomic = true;
+ ret = exynos5_i2c_xfer(adap, msgs, num);
+ i2c->atomic = false;
+ enable_irq(i2c->irq);
+
+ return ret;
+}
+
static u32 exynos5_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -784,6 +853,7 @@ static u32 exynos5_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm exynos5_i2c_algorithm = {
.master_xfer = exynos5_i2c_xfer,
+ .master_xfer_atomic = exynos5_i2c_xfer_atomic,
.functionality = exynos5_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e5a5b9e8bf2c..fb35a75fe0e3 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -263,15 +263,10 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
* 'fault-injector' dir there. Until then, we have a global dir with
* all adapters as subdirs.
*/
- if (!i2c_gpio_debug_dir) {
+ if (!i2c_gpio_debug_dir)
i2c_gpio_debug_dir = debugfs_create_dir("i2c-fault-injector", NULL);
- if (!i2c_gpio_debug_dir)
- return;
- }
priv->debug_dir = debugfs_create_dir(pdev->name, i2c_gpio_debug_dir);
- if (!priv->debug_dir)
- return;
init_completion(&priv->scl_irq_completion);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 1d855258a45d..070999139c6d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -79,6 +79,7 @@
* Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
* Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
* Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
+ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -231,6 +232,7 @@
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
+#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
@@ -285,7 +287,6 @@ struct i801_priv {
u8 *data;
#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
- const struct i801_mux_config *mux_drvdata;
struct platform_device *mux_pdev;
struct gpiod_lookup_table *lookup;
#endif
@@ -293,10 +294,9 @@ struct i801_priv {
/*
* If set to true the host controller registers are reserved for
- * ACPI AML use. Protected by acpi_lock.
+ * ACPI AML use.
*/
bool acpi_reserved;
- struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC BIT(0)
@@ -679,15 +679,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
return result ? priv->status : -ETIMEDOUT;
}
- for (i = 1; i <= len; i++) {
- if (i == len && read_write == I2C_SMBUS_READ)
- smbcmd |= SMBHSTCNT_LAST_BYTE;
- outb_p(smbcmd, SMBHSTCNT(priv));
-
- if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
- SMBHSTCNT(priv));
+ if (len == 1 && read_write == I2C_SMBUS_READ)
+ smbcmd |= SMBHSTCNT_LAST_BYTE;
+ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ for (i = 1; i <= len; i++) {
status = i801_wait_byte_done(priv);
if (status)
return status;
@@ -710,9 +706,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
data->block[0] = len;
}
- /* Retrieve/store value in SMBBLKDAT */
- if (read_write == I2C_SMBUS_READ)
+ if (read_write == I2C_SMBUS_READ) {
data->block[i] = inb_p(SMBBLKDAT(priv));
+ if (i == len - 1)
+ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
+ }
+
if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
outb_p(data->block[i+1], SMBBLKDAT(priv));
@@ -875,11 +874,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
int hwpec, ret;
struct i801_priv *priv = i2c_get_adapdata(adap);
- mutex_lock(&priv->acpi_lock);
- if (priv->acpi_reserved) {
- mutex_unlock(&priv->acpi_lock);
+ if (priv->acpi_reserved)
return -EBUSY;
- }
pm_runtime_get_sync(&priv->pci_dev->dev);
@@ -920,7 +916,6 @@ out:
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
- mutex_unlock(&priv->acpi_lock);
return ret;
}
@@ -1044,6 +1039,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
@@ -1288,7 +1284,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
#if IS_ENABLED(CONFIG_I2C_MUX_GPIO)
- if (!priv->mux_drvdata)
+ if (!priv->mux_pdev)
#endif
i2c_register_spd(&priv->adapter);
}
@@ -1390,11 +1386,14 @@ static void i801_add_mux(struct i801_priv *priv)
const struct i801_mux_config *mux_config;
struct i2c_mux_gpio_platform_data gpio_data;
struct gpiod_lookup_table *lookup;
+ const struct dmi_system_id *id;
int i;
- if (!priv->mux_drvdata)
+ id = dmi_first_match(mux_dmi_table);
+ if (!id)
return;
- mux_config = priv->mux_drvdata;
+
+ mux_config = id->driver_data;
/* Prepare the platform data */
memset(&gpio_data, 0, sizeof(struct i2c_mux_gpio_platform_data));
@@ -1438,35 +1437,9 @@ static void i801_del_mux(struct i801_priv *priv)
platform_device_unregister(priv->mux_pdev);
gpiod_remove_lookup_table(priv->lookup);
}
-
-static unsigned int i801_get_adapter_class(struct i801_priv *priv)
-{
- const struct dmi_system_id *id;
- const struct i801_mux_config *mux_config;
- unsigned int class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- int i;
-
- id = dmi_first_match(mux_dmi_table);
- if (id) {
- /* Remove branch classes from trunk */
- mux_config = id->driver_data;
- for (i = 0; i < mux_config->n_values; i++)
- class &= ~mux_config->classes[i];
-
- /* Remember for later */
- priv->mux_drvdata = mux_config;
- }
-
- return class;
-}
#else
static inline void i801_add_mux(struct i801_priv *priv) { }
static inline void i801_del_mux(struct i801_priv *priv) { }
-
-static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
-{
- return I2C_CLASS_HWMON | I2C_CLASS_SPD;
-}
#endif
static struct platform_device *
@@ -1572,7 +1545,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
* further access from the driver itself. This device is now owned
* by the system firmware.
*/
- mutex_lock(&priv->acpi_lock);
+ i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
@@ -1592,7 +1565,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
else
status = acpi_os_write_port(address, (u32)*value, bits);
- mutex_unlock(&priv->acpi_lock);
+ i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
return status;
}
@@ -1630,6 +1603,12 @@ static void i801_setup_hstcfg(struct i801_priv *priv)
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
}
+static void i801_restore_regs(struct i801_priv *priv)
+{
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err, i;
@@ -1641,12 +1620,11 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i2c_set_adapdata(&priv->adapter, priv);
priv->adapter.owner = THIS_MODULE;
- priv->adapter.class = i801_get_adapter_class(priv);
+ priv->adapter.class = I2C_CLASS_HWMON;
priv->adapter.algo = &smbus_algorithm;
priv->adapter.dev.parent = &dev->dev;
- ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
+ acpi_use_parent_companion(&priv->adapter.dev);
priv->adapter.retries = 3;
- mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
priv->features = id->driver_data;
@@ -1756,6 +1734,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err) {
platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
+ i801_restore_regs(priv);
return err;
}
@@ -1780,12 +1759,10 @@ static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
- outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
i801_acpi_remove(priv);
- pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
@@ -1793,6 +1770,8 @@ static void i801_remove(struct pci_dev *dev)
if (!priv->acpi_reserved)
pm_runtime_get_noresume(&dev->dev);
+ i801_restore_regs(priv);
+
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
* some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -1803,18 +1782,18 @@ static void i801_shutdown(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
- /* Restore config registers to avoid hard hang on some systems */
- outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
- pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+ /* Restore config registers to avoid hard hang on some systems */
+ i801_restore_regs(priv);
}
static int i801_suspend(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
- outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
- pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
+ i2c_mark_adapter_suspended(&priv->adapter);
+ i801_restore_regs(priv);
+
return 0;
}
@@ -1824,6 +1803,7 @@ static int i801_resume(struct device *dev)
i801_setup_hstcfg(priv);
i801_enable_host_notify(&priv->adapter);
+ i2c_mark_adapter_resumed(&priv->adapter);
return 0;
}
@@ -1842,16 +1822,11 @@ static struct pci_driver i801_driver = {
},
};
-static int __init i2c_i801_init(void)
+static int __init i2c_i801_init(struct pci_driver *drv)
{
if (dmi_name_in_vendors("FUJITSU"))
input_apanel_init();
- return pci_register_driver(&i801_driver);
-}
-
-static void __exit i2c_i801_exit(void)
-{
- pci_unregister_driver(&i801_driver);
+ return pci_register_driver(drv);
}
MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
@@ -1859,5 +1834,4 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I801 SMBus driver");
MODULE_LICENSE("GPL");
-module_init(i2c_i801_init);
-module_exit(i2c_i801_exit);
+module_driver(i801_driver, i2c_i801_init, pci_unregister_driver);
diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c
new file mode 100644
index 000000000000..b4927622177c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ljca.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-I2C driver
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/usb/ljca.h>
+
+/* I2C init flags */
+#define LJCA_I2C_INIT_FLAG_MODE BIT(0)
+#define LJCA_I2C_INIT_FLAG_MODE_POLLING FIELD_PREP(LJCA_I2C_INIT_FLAG_MODE, 0)
+#define LJCA_I2C_INIT_FLAG_MODE_INTERRUPT FIELD_PREP(LJCA_I2C_INIT_FLAG_MODE, 1)
+
+#define LJCA_I2C_INIT_FLAG_ADDR_16BIT BIT(0)
+
+#define LJCA_I2C_INIT_FLAG_FREQ GENMASK(2, 1)
+#define LJCA_I2C_INIT_FLAG_FREQ_100K FIELD_PREP(LJCA_I2C_INIT_FLAG_FREQ, 0)
+#define LJCA_I2C_INIT_FLAG_FREQ_400K FIELD_PREP(LJCA_I2C_INIT_FLAG_FREQ, 1)
+#define LJCA_I2C_INIT_FLAG_FREQ_1M FIELD_PREP(LJCA_I2C_INIT_FLAG_FREQ, 2)
+
+#define LJCA_I2C_BUF_SIZE 60u
+#define LJCA_I2C_MAX_XFER_SIZE (LJCA_I2C_BUF_SIZE - sizeof(struct ljca_i2c_rw_packet))
+
+/* I2C commands */
+enum ljca_i2c_cmd {
+ LJCA_I2C_INIT = 1,
+ LJCA_I2C_XFER,
+ LJCA_I2C_START,
+ LJCA_I2C_STOP,
+ LJCA_I2C_READ,
+ LJCA_I2C_WRITE,
+};
+
+enum ljca_xfer_type {
+ LJCA_I2C_WRITE_XFER_TYPE,
+ LJCA_I2C_READ_XFER_TYPE,
+};
+
+/* I2C raw commands: Init/Start/Read/Write/Stop */
+struct ljca_i2c_rw_packet {
+ u8 id;
+ __le16 len;
+ u8 data[] __counted_by(len);
+} __packed;
+
+struct ljca_i2c_dev {
+ struct ljca_client *ljca;
+ struct ljca_i2c_info *i2c_info;
+ struct i2c_adapter adap;
+
+ u8 obuf[LJCA_I2C_BUF_SIZE];
+ u8 ibuf[LJCA_I2C_BUF_SIZE];
+};
+
+static int ljca_i2c_init(struct ljca_i2c_dev *ljca_i2c, u8 id)
+{
+ struct ljca_i2c_rw_packet *w_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
+ int ret;
+
+ w_packet->id = id;
+ w_packet->len = cpu_to_le16(sizeof(*w_packet->data));
+ w_packet->data[0] = LJCA_I2C_INIT_FLAG_FREQ_400K;
+
+ ret = ljca_transfer(ljca_i2c->ljca, LJCA_I2C_INIT, (u8 *)w_packet,
+ struct_size(w_packet, data, 1), NULL, 0);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ enum ljca_xfer_type type)
+{
+ struct ljca_i2c_rw_packet *w_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
+ struct ljca_i2c_rw_packet *r_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->ibuf;
+ s16 rp_len;
+ int ret;
+
+ w_packet->id = ljca_i2c->i2c_info->id;
+ w_packet->len = cpu_to_le16(sizeof(*w_packet->data));
+ w_packet->data[0] = (slave_addr << 1) | type;
+
+ ret = ljca_transfer(ljca_i2c->ljca, LJCA_I2C_START, (u8 *)w_packet,
+ struct_size(w_packet, data, 1), (u8 *)r_packet,
+ LJCA_I2C_BUF_SIZE);
+ if (ret < 0 || ret < sizeof(*r_packet))
+ return ret < 0 ? ret : -EIO;
+
+ rp_len = le16_to_cpu(r_packet->len);
+ if (rp_len < 0 || r_packet->id != w_packet->id) {
+ dev_dbg(&ljca_i2c->adap.dev,
+ "i2c start failed len: %d id: %d %d\n",
+ rp_len, r_packet->id, w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr)
+{
+ struct ljca_i2c_rw_packet *w_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
+ struct ljca_i2c_rw_packet *r_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->ibuf;
+ s16 rp_len;
+ int ret;
+
+ w_packet->id = ljca_i2c->i2c_info->id;
+ w_packet->len = cpu_to_le16(sizeof(*w_packet->data));
+ w_packet->data[0] = 0;
+
+ ret = ljca_transfer(ljca_i2c->ljca, LJCA_I2C_STOP, (u8 *)w_packet,
+ struct_size(w_packet, data, 1), (u8 *)r_packet,
+ LJCA_I2C_BUF_SIZE);
+ if (ret < 0 || ret < sizeof(*r_packet)) {
+ dev_dbg(&ljca_i2c->adap.dev,
+ "i2c stop failed ret: %d id: %d\n",
+ ret, w_packet->id);
+ return;
+ }
+
+ rp_len = le16_to_cpu(r_packet->len);
+ if (rp_len < 0 || r_packet->id != w_packet->id)
+ dev_dbg(&ljca_i2c->adap.dev,
+ "i2c stop failed len: %d id: %d %d\n",
+ rp_len, r_packet->id, w_packet->id);
+}
+
+static int ljca_i2c_pure_read(struct ljca_i2c_dev *ljca_i2c, u8 *data, u8 len)
+{
+ struct ljca_i2c_rw_packet *w_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
+ struct ljca_i2c_rw_packet *r_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->ibuf;
+ s16 rp_len;
+ int ret;
+
+ w_packet->id = ljca_i2c->i2c_info->id;
+ w_packet->len = cpu_to_le16(len);
+ w_packet->data[0] = 0;
+
+ ret = ljca_transfer(ljca_i2c->ljca, LJCA_I2C_READ, (u8 *)w_packet,
+ struct_size(w_packet, data, 1), (u8 *)r_packet,
+ LJCA_I2C_BUF_SIZE);
+ if (ret < 0 || ret < sizeof(*r_packet))
+ return ret < 0 ? ret : -EIO;
+
+ rp_len = le16_to_cpu(r_packet->len);
+ if (rp_len != len || r_packet->id != w_packet->id) {
+ dev_dbg(&ljca_i2c->adap.dev,
+ "i2c raw read failed len: %d id: %d %d\n",
+ rp_len, r_packet->id, w_packet->id);
+ return -EIO;
+ }
+
+ memcpy(data, r_packet->data, len);
+
+ return 0;
+}
+
+static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr, u8 *data,
+ u8 len)
+{
+ int ret;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, LJCA_I2C_READ_XFER_TYPE);
+ if (!ret)
+ ret = ljca_i2c_pure_read(ljca_i2c, data, len);
+
+ ljca_i2c_stop(ljca_i2c, slave_addr);
+
+ return ret;
+}
+
+static int ljca_i2c_pure_write(struct ljca_i2c_dev *ljca_i2c, u8 *data, u8 len)
+{
+ struct ljca_i2c_rw_packet *w_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
+ struct ljca_i2c_rw_packet *r_packet =
+ (struct ljca_i2c_rw_packet *)ljca_i2c->ibuf;
+ s16 rplen;
+ int ret;
+
+ w_packet->id = ljca_i2c->i2c_info->id;
+ w_packet->len = cpu_to_le16(len);
+ memcpy(w_packet->data, data, len);
+
+ ret = ljca_transfer(ljca_i2c->ljca, LJCA_I2C_WRITE, (u8 *)w_packet,
+ struct_size(w_packet, data, len), (u8 *)r_packet,
+ LJCA_I2C_BUF_SIZE);
+ if (ret < 0 || ret < sizeof(*r_packet))
+ return ret < 0 ? ret : -EIO;
+
+ rplen = le16_to_cpu(r_packet->len);
+ if (rplen != len || r_packet->id != w_packet->id) {
+ dev_dbg(&ljca_i2c->adap.dev,
+ "i2c write failed len: %d id: %d/%d\n",
+ rplen, r_packet->id, w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ u8 *data, u8 len)
+{
+ int ret;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, LJCA_I2C_WRITE_XFER_TYPE);
+ if (!ret)
+ ret = ljca_i2c_pure_write(ljca_i2c, data, len);
+
+ ljca_i2c_stop(ljca_i2c, slave_addr);
+
+ return ret;
+}
+
+static int ljca_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg,
+ int num)
+{
+ struct ljca_i2c_dev *ljca_i2c;
+ struct i2c_msg *cur_msg;
+ int i, ret;
+
+ ljca_i2c = i2c_get_adapdata(adapter);
+ if (!ljca_i2c)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ cur_msg = &msg[i];
+ if (cur_msg->flags & I2C_M_RD)
+ ret = ljca_i2c_read(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+ else
+ ret = ljca_i2c_write(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static u32 ljca_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_adapter_quirks ljca_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = LJCA_I2C_MAX_XFER_SIZE,
+ .max_write_len = LJCA_I2C_MAX_XFER_SIZE,
+};
+
+static const struct i2c_algorithm ljca_i2c_algo = {
+ .master_xfer = ljca_i2c_xfer,
+ .functionality = ljca_i2c_func,
+};
+
+static int ljca_i2c_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *aux_dev_id)
+{
+ struct ljca_client *ljca = auxiliary_dev_to_ljca_client(auxdev);
+ struct ljca_i2c_dev *ljca_i2c;
+ int ret;
+
+ ljca_i2c = devm_kzalloc(&auxdev->dev, sizeof(*ljca_i2c), GFP_KERNEL);
+ if (!ljca_i2c)
+ return -ENOMEM;
+
+ ljca_i2c->ljca = ljca;
+ ljca_i2c->i2c_info = dev_get_platdata(&auxdev->dev);
+
+ ljca_i2c->adap.owner = THIS_MODULE;
+ ljca_i2c->adap.class = I2C_CLASS_HWMON;
+ ljca_i2c->adap.algo = &ljca_i2c_algo;
+ ljca_i2c->adap.quirks = &ljca_i2c_quirks;
+ ljca_i2c->adap.dev.parent = &auxdev->dev;
+
+ snprintf(ljca_i2c->adap.name, sizeof(ljca_i2c->adap.name), "%s-%s-%d",
+ dev_name(&auxdev->dev), dev_name(auxdev->dev.parent),
+ ljca_i2c->i2c_info->id);
+
+ device_set_node(&ljca_i2c->adap.dev, dev_fwnode(&auxdev->dev));
+
+ i2c_set_adapdata(&ljca_i2c->adap, ljca_i2c);
+ auxiliary_set_drvdata(auxdev, ljca_i2c);
+
+ ret = ljca_i2c_init(ljca_i2c, ljca_i2c->i2c_info->id);
+ if (ret)
+ return dev_err_probe(&auxdev->dev, -EIO,
+ "i2c init failed id: %d\n",
+ ljca_i2c->i2c_info->id);
+
+ ret = devm_i2c_add_adapter(&auxdev->dev, &ljca_i2c->adap);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(&ljca_i2c->adap.dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(&ljca_i2c->adap.dev));
+
+ return 0;
+}
+
+static void ljca_i2c_remove(struct auxiliary_device *auxdev)
+{
+ struct ljca_i2c_dev *ljca_i2c = auxiliary_get_drvdata(auxdev);
+
+ i2c_del_adapter(&ljca_i2c->adap);
+}
+
+static const struct auxiliary_device_id ljca_i2c_id_table[] = {
+ { "usb_ljca.ljca-i2c", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ljca_i2c_id_table);
+
+static struct auxiliary_driver ljca_i2c_driver = {
+ .probe = ljca_i2c_probe,
+ .remove = ljca_i2c_remove,
+ .id_table = ljca_i2c_id_table,
+};
+module_auxiliary_driver(ljca_i2c_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LJCA);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 1a9b5a068ef1..a8b5719c3372 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1442,15 +1442,19 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk))
return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk);
+ i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = devm_clk_get_optional(&pdev->dev, "pmic");
+ if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk)) {
+ dev_err(&pdev->dev, "cannot get pmic clock\n");
+ return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk);
+ }
+
if (i2c->have_pmic) {
- i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = devm_clk_get(&pdev->dev, "pmic");
- if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk)) {
+ if (!i2c->clocks[I2C_MT65XX_CLK_PMIC].clk) {
dev_err(&pdev->dev, "cannot get pmic clock\n");
- return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk);
+ return -ENODEV;
}
speed_clk = I2C_MT65XX_CLK_PMIC;
} else {
- i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = NULL;
speed_clk = I2C_MT65XX_CLK_MAIN;
}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index fd8403b07fa6..dc160cbc3155 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -19,11 +19,10 @@
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/reset.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
@@ -859,7 +858,7 @@ static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device *dev)
{
- const struct of_device_id *device;
+ const struct mv64xxx_i2c_regs *data;
struct device_node *np = dev->of_node;
u32 bus_freq, tclk;
int rc = 0;
@@ -897,11 +896,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
*/
drv_data->adapter.timeout = HZ;
- device = of_match_device(mv64xxx_i2c_of_match_table, dev);
- if (!device)
+ data = device_get_match_data(dev);
+ if (!data)
return -ENODEV;
- memcpy(&drv_data->reg_offsets, device->data, sizeof(drv_data->reg_offsets));
+ memcpy(&drv_data->reg_offsets, data, sizeof(drv_data->reg_offsets));
/*
* For controllers embedded in new SoCs activate the
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 58fd6fa3edf1..42165ef57946 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -25,11 +25,11 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/platform_data/i2c-omap.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/property.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
@@ -1358,7 +1358,6 @@ omap_i2c_probe(struct platform_device *pdev)
const struct omap_i2c_bus_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
- const struct of_device_id *match;
int irq;
int r;
u32 rev;
@@ -1376,11 +1375,10 @@ omap_i2c_probe(struct platform_device *pdev)
if (IS_ERR(omap->base))
return PTR_ERR(omap->base);
- match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
- if (match) {
+ if (pdev->dev.of_node) {
u32 freq = I2C_MAX_STANDARD_MODE_FREQ;
- pdata = match->data;
+ pdata = device_get_match_data(&pdev->dev);
omap->flags = pdata->flags;
of_property_read_u32(node, "clock-frequency", &freq);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 4996a628fdae..8e57ebe595be 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -231,7 +231,7 @@ static void i2c_powermac_create_one(struct i2c_adapter *adap,
struct i2c_board_info info = {};
struct i2c_client *newdev;
- strncpy(info.type, type, sizeof(info.type));
+ strscpy(info.type, type, sizeof(info.type));
info.addr = addr;
newdev = i2c_new_client_device(adap, &info);
if (IS_ERR(newdev))
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 29be05af826b..1d7648242749 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -32,6 +32,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/property.h>
#include <linux/slab.h>
/* I2C register field definitions */
@@ -1252,10 +1253,8 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
enum pxa_i2c_types *i2c_types)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(i2c_pxa_dt_ids, &pdev->dev);
- if (!of_id)
+ if (!pdev->dev.of_node)
return 1;
/* For device tree we always use the dynamic or alias-assigned ID */
@@ -1264,7 +1263,7 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
i2c->use_pio = of_property_read_bool(np, "mrvl,i2c-polling");
i2c->fast_mode = of_property_read_bool(np, "mrvl,i2c-fast-mode");
- *i2c_types = (enum pxa_i2c_types)(of_id->data);
+ *i2c_types = (enum pxa_i2c_types)device_get_match_data(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 229353e96e09..6d829ed2f868 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -722,6 +722,7 @@ static const struct i2c_algorithm geni_i2c_algo = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id geni_i2c_acpi_match[] = {
{ "QCOM0220"},
+ { "QCOM0411" },
{ },
};
MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a32a93f9a60d..829ac053bbb7 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -41,6 +41,10 @@
#define ICSAR 0x1C /* slave address */
#define ICMAR 0x20 /* master address */
#define ICRXTX 0x24 /* data port */
+#define ICCCR2 0x28 /* Clock control 2 */
+#define ICMPR 0x2C /* SCL mask control */
+#define ICHPR 0x30 /* SCL HIGH control */
+#define ICLPR 0x34 /* SCL LOW control */
#define ICFBSCR 0x38 /* first bit setup cycle (Gen3) */
#define ICDMAER 0x3c /* DMA enable (Gen3) */
@@ -84,11 +88,25 @@
#define RMDMAE BIT(1) /* DMA Master Received Enable */
#define TMDMAE BIT(0) /* DMA Master Transmitted Enable */
+/* ICCCR2 */
+#define CDFD BIT(2) /* CDF Disable */
+#define HLSE BIT(1) /* HIGH/LOW Separate Control Enable */
+#define SME BIT(0) /* SCL Mask Enable */
+
/* ICFBSCR */
#define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */
#define RCAR_MIN_DMA_LEN 8
+/* SCL low/high ratio 5:4 to meet all I2C timing specs (incl safety margin) */
+#define RCAR_SCLD_RATIO 5
+#define RCAR_SCHD_RATIO 4
+/*
+ * SMD should be smaller than SCLD/SCHD and is always around 20 in the docs.
+ * Thus, we simply use 20 which works for low and high speeds.
+ */
+#define RCAR_DEFAULT_SMD 20
+
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
@@ -128,6 +146,8 @@ struct rcar_i2c_priv {
int pos;
u32 icccr;
+ u16 schd;
+ u16 scld;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -216,11 +236,16 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICMCR, MDBS);
rcar_i2c_write(priv, ICMSR, 0);
/* start clock */
- rcar_i2c_write(priv, ICCCR, priv->icccr);
-
- if (priv->devtype == I2C_RCAR_GEN3)
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ rcar_i2c_write(priv, ICCCR, priv->icccr);
+ } else {
+ rcar_i2c_write(priv, ICCCR2, CDFD | HLSE | SME);
+ rcar_i2c_write(priv, ICCCR, priv->icccr);
+ rcar_i2c_write(priv, ICMPR, RCAR_DEFAULT_SMD);
+ rcar_i2c_write(priv, ICHPR, priv->schd);
+ rcar_i2c_write(priv, ICLPR, priv->scld);
rcar_i2c_write(priv, ICFBSCR, TCYC17);
-
+ }
}
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
@@ -241,7 +266,7 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
{
- u32 scgd, cdf, round, ick, sum, scl, cdf_width;
+ u32 cdf, round, ick, sum, scl, cdf_width;
unsigned long rate;
struct device *dev = rcar_i2c_priv_to_dev(priv);
struct i2c_timings t = {
@@ -254,27 +279,17 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
/* Fall back to previously used values if not supplied */
i2c_parse_fw_timings(dev, &t, false);
- switch (priv->devtype) {
- case I2C_RCAR_GEN1:
- cdf_width = 2;
- break;
- case I2C_RCAR_GEN2:
- case I2C_RCAR_GEN3:
- cdf_width = 3;
- break;
- default:
- dev_err(dev, "device type error\n");
- return -EIO;
- }
-
/*
* calculate SCL clock
* see
- * ICCCR
+ * ICCCR (and ICCCR2 for Gen3+)
*
* ick = clkp / (1 + CDF)
* SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
*
+ * for Gen3+:
+ * SCL = clkp / (8 + SMD * 2 + SCLD + SCHD +F[(ticf + tr + intd) * clkp])
+ *
* ick : I2C internal clock < 20 MHz
* ticf : I2C SCL falling time
* tr : I2C SCL rising time
@@ -284,52 +299,82 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
*/
rate = clk_get_rate(priv->clk);
cdf = rate / 20000000;
- if (cdf >= 1U << cdf_width) {
- dev_err(dev, "Input clock %lu too high\n", rate);
- return -EIO;
- }
- ick = rate / (cdf + 1);
+ cdf_width = (priv->devtype == I2C_RCAR_GEN1) ? 2 : 3;
+ if (cdf >= 1U << cdf_width)
+ goto err_no_val;
+
+ /* On Gen3+, we use cdf only for the filters, not as a SCL divider */
+ ick = rate / (priv->devtype < I2C_RCAR_GEN3 ? (cdf + 1) : 1);
/*
- * it is impossible to calculate large scale
- * number on u32. separate it
+ * It is impossible to calculate a large scale number on u32. Separate it.
*
* F[(ticf + tr + intd) * ick] with sum = (ticf + tr + intd)
* = F[sum * ick / 1000000000]
* = F[(ick / 1000000) * sum / 1000]
*/
sum = t.scl_fall_ns + t.scl_rise_ns + t.scl_int_delay_ns;
- round = (ick + 500000) / 1000000 * sum;
- round = (round + 500) / 1000;
+ round = DIV_ROUND_CLOSEST(ick, 1000000);
+ round = DIV_ROUND_CLOSEST(round * sum, 1000);
- /*
- * SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
- *
- * Calculation result (= SCL) should be less than
- * bus_speed for hardware safety
- *
- * We could use something along the lines of
- * div = ick / (bus_speed + 1) + 1;
- * scgd = (div - 20 - round + 7) / 8;
- * scl = ick / (20 + (scgd * 8) + round);
- * (not fully verified) but that would get pretty involved
- */
- for (scgd = 0; scgd < 0x40; scgd++) {
- scl = ick / (20 + (scgd * 8) + round);
- if (scl <= t.bus_freq_hz)
- goto scgd_find;
- }
- dev_err(dev, "it is impossible to calculate best SCL\n");
- return -EIO;
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ u32 scgd;
+ /*
+ * SCL = ick / (20 + 8 * SCGD + F[(ticf + tr + intd) * ick])
+ * 20 + 8 * SCGD + F[...] = ick / SCL
+ * SCGD = ((ick / SCL) - 20 - F[...]) / 8
+ * Result (= SCL) should be less than bus_speed for hardware safety
+ */
+ scgd = DIV_ROUND_UP(ick, t.bus_freq_hz ?: 1);
+ scgd = DIV_ROUND_UP(scgd - 20 - round, 8);
+ scl = ick / (20 + 8 * scgd + round);
+
+ if (scgd > 0x3f)
+ goto err_no_val;
-scgd_find:
- dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, t.bus_freq_hz, rate, round, cdf, scgd);
+ dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u, SCGD: %u\n",
+ scl, t.bus_freq_hz, rate, round, cdf, scgd);
- /* keep icccr value */
- priv->icccr = scgd << cdf_width | cdf;
+ priv->icccr = scgd << cdf_width | cdf;
+ } else {
+ u32 x, sum_ratio = RCAR_SCHD_RATIO + RCAR_SCLD_RATIO;
+ /*
+ * SCLD/SCHD ratio and SMD default value are explained above
+ * where they are defined. With these definitions, we can compute
+ * x as a base value for the SCLD/SCHD ratio:
+ *
+ * SCL = clkp / (8 + 2 * SMD + SCLD + SCHD + F[(ticf + tr + intd) * clkp])
+ * SCL = clkp / (8 + 2 * RCAR_DEFAULT_SMD + RCAR_SCLD_RATIO * x
+ * + RCAR_SCHD_RATIO * x + F[...])
+ *
+ * with: sum_ratio = RCAR_SCLD_RATIO + RCAR_SCHD_RATIO
+ * and: smd = RCAR_DEFAULT_SMD
+ *
+ * SCL = clkp / (8 + 2 * smd + sum_ratio * x + F[...])
+ * 8 + 2 * smd + sum_ratio * x + F[...] = clkp / SCL
+ * x = ((clkp / SCL) - 8 - 2 * smd - F[...]) / sum_ratio
+ */
+ x = DIV_ROUND_UP(rate, t.bus_freq_hz ?: 1);
+ x = DIV_ROUND_UP(x - 8 - 2 * RCAR_DEFAULT_SMD - round, sum_ratio);
+ scl = rate / (8 + 2 * RCAR_DEFAULT_SMD + sum_ratio * x + round);
+
+ /* Bail out if values don't fit into 16 bit or SMD became too large */
+ if (x * RCAR_SCLD_RATIO > 0xffff || RCAR_DEFAULT_SMD > x * RCAR_SCHD_RATIO)
+ goto err_no_val;
+
+ priv->icccr = cdf;
+ priv->schd = RCAR_SCHD_RATIO * x;
+ priv->scld = RCAR_SCLD_RATIO * x;
+
+ dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u\n",
+ scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld);
+ }
return 0;
+
+err_no_val:
+ dev_err(dev, "it is impossible to calculate best SCL\n");
+ return -EINVAL;
}
/*
@@ -843,12 +888,10 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
/* Gen3 needs a reset before allowing RXDMA once */
if (priv->devtype == I2C_RCAR_GEN3) {
- priv->flags |= ID_P_NO_RXDMA;
- if (!IS_ERR(priv->rstc)) {
- ret = rcar_i2c_do_reset(priv);
- if (ret == 0)
- priv->flags &= ~ID_P_NO_RXDMA;
- }
+ priv->flags &= ~ID_P_NO_RXDMA;
+ ret = rcar_i2c_do_reset(priv);
+ if (ret)
+ goto out;
}
rcar_i2c_init(priv);
@@ -1099,15 +1142,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
irqhandler = rcar_i2c_gen2_irq;
}
- if (priv->devtype == I2C_RCAR_GEN3) {
- priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (!IS_ERR(priv->rstc)) {
- ret = reset_control_status(priv->rstc);
- if (ret < 0)
- priv->rstc = ERR_PTR(-ENOTSUPP);
- }
- }
-
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
@@ -1117,6 +1151,18 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc)) {
+ ret = PTR_ERR(priv->rstc);
+ goto out_pm_put;
+ }
+
+ ret = reset_control_status(priv->rstc);
+ if (ret < 0)
+ goto out_pm_put;
+ }
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_pm_put;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index f0ee8871d5ae..e43ff483c56e 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -313,7 +313,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
* frequency with only 62 clock ticks max (31 high, 31 low).
* Aim for a duty of 60% LOW, 40% HIGH.
*/
- total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
+ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
for (cks = 0; cks < 7; cks++) {
/*
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 127eb3805fac..c56886af724e 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -133,7 +133,7 @@ static const struct platform_device_id s3c24xx_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat);
+static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat);
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
@@ -377,11 +377,10 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
/*
* process an interrupt and work out what to do
*/
-static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
- int ret = 0;
switch (i2c->state) {
@@ -544,7 +543,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
tmp &= ~S3C2410_IICCON_IRQPEND;
writel(tmp, i2c->regs + S3C2410_IICCON);
out:
- return ret;
+ return;
}
/*
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index ecc54792a66f..859ac0cf7f6c 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -783,23 +783,17 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
- dev_err(&pdev->dev, "Error: Missing controller clock\n");
+ dev_err(&pdev->dev, "Failed to enable clock\n");
return PTR_ERR(i2c_dev->clk);
}
- ret = clk_prepare_enable(i2c_dev->clk);
- if (ret) {
- dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n");
- return ret;
- }
rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rst)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
- "Error: Missing reset ctrl\n");
- goto clk_free;
- }
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "Error: Missing reset ctrl\n");
+
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
@@ -816,7 +810,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request irq event %i\n",
irq_event);
- goto clk_free;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, irq_error, stm32f4_i2c_isr_error, 0,
@@ -824,12 +818,12 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request irq error %i\n",
irq_error);
- goto clk_free;
+ return ret;
}
ret = stm32f4_i2c_hw_config(i2c_dev);
if (ret)
- goto clk_free;
+ return ret;
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
@@ -845,7 +839,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret)
- goto clk_free;
+ return ret;
platform_set_drvdata(pdev, i2c_dev);
@@ -854,10 +848,6 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
dev_info(i2c_dev->dev, "STM32F4 I2C driver registered\n");
return 0;
-
-clk_free:
- clk_disable_unprepare(i2c_dev->clk);
- return ret;
}
static void stm32f4_i2c_remove(struct platform_device *pdev)
@@ -865,8 +855,6 @@ static void stm32f4_i2c_remove(struct platform_device *pdev)
struct stm32f4_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adap);
-
- clk_unprepare(i2c_dev->clk);
}
static const struct of_device_id stm32f4_i2c_match[] = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 0d3c9a041b56..983509936727 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -325,6 +325,7 @@ struct stm32f7_i2c_alert {
* @dnf_dt: value of digital filter requested via dt
* @dnf: value of digital filter to apply
* @alert: SMBus alert specific data
+ * @atomic: boolean indicating that current transfer is atomic
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -357,6 +358,7 @@ struct stm32f7_i2c_dev {
u32 dnf_dt;
u32 dnf;
struct stm32f7_i2c_alert *alert;
+ bool atomic;
};
/*
@@ -915,7 +917,8 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure DMA or enable RX/TX interrupt */
i2c_dev->use_dma = false;
- if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) {
+ if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
+ && !i2c_dev->atomic) {
ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
msg->flags & I2C_M_RD,
f7_msg->count, f7_msg->buf,
@@ -939,6 +942,9 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
cr1 |= STM32F7_I2C_CR1_TXDMAEN;
}
+ if (i2c_dev->atomic)
+ cr1 &= ~STM32F7_I2C_ALL_IRQ_MASK; /* Disable all interrupts */
+
/* Configure Start/Repeated Start */
cr2 |= STM32F7_I2C_CR2_START;
@@ -1673,7 +1679,22 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
return IRQ_HANDLED;
}
-static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), i2c_dev->adap.timeout);
+
+ while (ktime_compare(ktime_get(), timeout) < 0) {
+ udelay(5);
+ stm32f7_i2c_isr_event(0, i2c_dev);
+
+ if (completion_done(&i2c_dev->complete))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
@@ -1697,8 +1718,12 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
stm32f7_i2c_xfer_msg(i2c_dev, msgs);
- time_left = wait_for_completion_timeout(&i2c_dev->complete,
- i2c_dev->adap.timeout);
+ if (!i2c_dev->atomic)
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
+ else
+ time_left = stm32f7_i2c_wait_polling(i2c_dev);
+
ret = f7_msg->result;
if (ret) {
if (i2c_dev->use_dma)
@@ -1730,6 +1755,24 @@ pm_free:
return (ret < 0) ? ret : num;
}
+static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+
+ i2c_dev->atomic = false;
+ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
+}
+
+static int stm32f7_i2c_xfer_atomic(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+
+ i2c_dev->atomic = true;
+ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
+}
+
static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char read_write,
u8 command, int size,
@@ -2098,6 +2141,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm stm32f7_i2c_algo = {
.master_xfer = stm32f7_i2c_xfer,
+ .master_xfer_atomic = stm32f7_i2c_xfer_atomic,
.smbus_xfer = stm32f7_i2c_smbus_xfer,
.functionality = stm32f7_i2c_func,
.reg_slave = stm32f7_i2c_reg_slave,
@@ -2134,23 +2178,16 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
- i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->clk),
- "Failed to get controller clock\n");
-
- ret = clk_prepare_enable(i2c_dev->clk);
- if (ret) {
- dev_err(&pdev->dev, "Failed to prepare_enable clock\n");
- return ret;
- }
+ "Failed to enable controller clock\n");
rst = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(rst)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
- "Error: Missing reset ctrl\n");
- goto clk_free;
- }
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "Error: Missing reset ctrl\n");
+
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
@@ -2165,7 +2202,7 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request irq event %i\n",
irq_event);
- goto clk_free;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
@@ -2173,29 +2210,28 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request irq error %i\n",
irq_error);
- goto clk_free;
+ return ret;
}
setup = of_device_get_match_data(&pdev->dev);
if (!setup) {
dev_err(&pdev->dev, "Can't get device data\n");
- ret = -ENODEV;
- goto clk_free;
+ return -ENODEV;
}
i2c_dev->setup = *setup;
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
- goto clk_free;
+ return ret;
/* Setup Fast mode plus if necessary */
if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) {
ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
if (ret)
- goto clk_free;
+ return ret;
ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
if (ret)
- goto clk_free;
+ return ret;
}
adap = &i2c_dev->adap;
@@ -2306,9 +2342,6 @@ clr_wakeup_capable:
fmp_clear:
stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
-clk_free:
- clk_disable_unprepare(i2c_dev->clk);
-
return ret;
}
@@ -2342,8 +2375,6 @@ static void stm32f7_i2c_remove(struct platform_device *pdev)
}
stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
-
- clk_disable_unprepare(i2c_dev->clk);
}
static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index fa6020dced59..85e035e7a1d7 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (clk_freq == 0) {
+ dev_err(dev, "clock-frequency is set to 0 in DT\n");
+ return -EINVAL;
+ }
+
if (of_get_child_count(np) > 1) {
dev_err(dev, "P2WI only supports one slave device\n");
return -EINVAL;
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 8ca1daadec93..f21475ae5921 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -94,7 +94,7 @@ struct i2c_atr {
struct notifier_block i2c_nb;
- struct i2c_adapter *adapter[];
+ struct i2c_adapter *adapter[] __counted_by(max_adapters);
};
static struct i2c_atr_alias_pair *
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 60746652fd52..eac90a3cf61a 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
- struct i2c_client *client;
- int status;
+ struct i2c_client *client;
+ bool need_put = false;
+ int status;
client = kzalloc(sizeof *client, GFP_KERNEL);
if (!client)
@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.fwnode = info->fwnode;
device_enable_async_suspend(&client->dev);
- i2c_dev_set_name(adap, client, info);
if (info->swnode) {
status = device_add_software_node(&client->dev, info->swnode);
@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
}
}
+ i2c_dev_set_name(adap, client, info);
status = device_register(&client->dev);
if (status)
goto out_remove_swnode;
@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
out_remove_swnode:
device_remove_software_node(&client->dev);
+ need_put = true;
out_err_put_of_node:
of_node_put(info->of_node);
out_err:
@@ -1000,7 +1002,10 @@ out_err:
"Failed to register i2c client %s at 0x%02x (%d)\n",
client->name, client->addr, status);
out_err_silent:
- kfree(client);
+ if (need_put)
+ put_device(&client->dev);
+ else
+ kfree(client);
return ERR_PTR(status);
}
EXPORT_SYMBOL_GPL(i2c_new_client_device);
@@ -1189,9 +1194,11 @@ static void i2c_adapter_dev_release(struct device *dev)
unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
{
unsigned int depth = 0;
+ struct device *parent;
- while ((adapter = i2c_parent_is_i2c_adapter(adapter)))
- depth++;
+ for (parent = adapter->dev.parent; parent; parent = parent->parent)
+ if (parent->type == &i2c_adapter_type)
+ depth++;
WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES,
"adapter depth exceeds lockdep subclass limit\n");
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index a01b59e3599b..8b7e599f1674 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
- rdwr_pa = memdup_user(rdwr_arg.msgs,
- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+ rdwr_pa = memdup_array_user(rdwr_arg.msgs,
+ rdwr_arg.nmsgs, sizeof(struct i2c_msg));
if (IS_ERR(rdwr_pa))
return PTR_ERR(rdwr_pa);
@@ -636,7 +636,10 @@ static const struct file_operations i2cdev_fops = {
/* ------------------------------------------------------------------------- */
-static struct class *i2c_dev_class;
+static const struct class i2c_dev_class = {
+ .name = "i2c-dev",
+ .dev_groups = i2c_groups,
+};
static void i2cdev_dev_release(struct device *dev)
{
@@ -665,7 +668,7 @@ static int i2cdev_attach_adapter(struct device *dev)
device_initialize(&i2c_dev->dev);
i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
- i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.class = &i2c_dev_class;
i2c_dev->dev.parent = &adap->dev;
i2c_dev->dev.release = i2cdev_dev_release;
@@ -751,12 +754,9 @@ static int __init i2c_dev_init(void)
if (res)
goto out;
- i2c_dev_class = class_create("i2c-dev");
- if (IS_ERR(i2c_dev_class)) {
- res = PTR_ERR(i2c_dev_class);
+ res = class_register(&i2c_dev_class);
+ if (res)
goto out_unreg_chrdev;
- }
- i2c_dev_class->dev_groups = i2c_groups;
/* Keep track of adapters which will be added or removed later */
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
@@ -769,7 +769,7 @@ static int __init i2c_dev_init(void)
return 0;
out_unreg_class:
- class_destroy(i2c_dev_class);
+ class_unregister(&i2c_dev_class);
out_unreg_chrdev:
unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
out:
@@ -781,7 +781,7 @@ static void __exit i2c_dev_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2c_dev_detach_adapter);
- class_destroy(i2c_dev_class);
+ class_unregister(&i2c_dev_class);
unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 9f2e4aa28159..7e2686b606c0 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -32,7 +32,7 @@ struct i2c_demux_pinctrl_priv {
const char *bus_name;
struct i2c_adapter cur_adap;
struct i2c_algorithm algo;
- struct i2c_demux_pinctrl_chan chan[];
+ struct i2c_demux_pinctrl_chan chan[] __counted_by(num_chan);
};
static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -167,9 +167,9 @@ static ssize_t available_masters_show(struct device *dev,
int count = 0, i;
for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
- count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%pOF%c",
- i, priv->chan[i].parent_np,
- i == priv->num_chan - 1 ? '\n' : ' ');
+ count += sysfs_emit_at(buf, count, "%d:%pOF%c",
+ i, priv->chan[i].parent_np,
+ i == priv->num_chan - 1 ? '\n' : ' ');
return count;
}
@@ -226,6 +226,8 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
if (!priv || !props)
return -ENOMEM;
+ priv->num_chan = num_chan;
+
err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
if (err)
return err;
@@ -253,9 +255,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
}
- priv->num_chan = num_chan;
priv->dev = &pdev->dev;
-
platform_set_drvdata(pdev, priv);
pm_runtime_no_callbacks(&pdev->dev);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 78987ead6be0..6b979a0a6ab8 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -22,7 +22,7 @@ struct gpiomux {
struct gpio_desc **gpios;
};
-static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned int val)
{
DECLARE_BITMAP(values, BITS_PER_TYPE(val));
@@ -58,7 +58,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
struct device_node *adapter_np;
struct i2c_adapter *adapter = NULL;
struct fwnode_handle *child;
- unsigned *values;
+ unsigned int *values;
int rc, i = 0;
if (is_of_node(fwnode)) {
@@ -101,7 +101,6 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
device_for_each_child_node(dev, child) {
if (is_of_node(child)) {
fwnode_property_read_u32(child, "reg", values + i);
-
} else if (is_acpi_node(child)) {
rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
if (rc) {
@@ -126,7 +125,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
struct gpiomux *mux;
struct i2c_adapter *parent;
struct i2c_adapter *root;
- unsigned initial_state;
+ unsigned int initial_state;
int i, ngpios, ret;
mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 959ec5269376..95caa162706f 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1556,9 +1556,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
desc->dev->dev.of_node = desc->boardinfo->of_node;
ret = device_register(&desc->dev->dev);
- if (ret)
+ if (ret) {
dev_err(&master->dev,
"Failed to add I3C device (err = %d)\n", ret);
+ put_device(&desc->dev->dev);
+ }
}
}
@@ -2340,7 +2342,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
- strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+ strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
/* FIXME: Should we allow i3c masters to override these values? */
adap->timeout = 1000;
@@ -2403,7 +2405,7 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
{
atomic_inc(&dev->ibi->pending_ibis);
- queue_work(dev->common.master->wq, &slot->work);
+ queue_work(dev->ibi->wq, &slot->work);
}
EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
@@ -2660,6 +2662,10 @@ int i3c_master_register(struct i3c_master_controller *master,
device_initialize(&master->dev);
dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+ master->dev.dma_mask = parent->dma_mask;
+ master->dev.coherent_dma_mask = parent->coherent_dma_mask;
+ master->dev.dma_parms = parent->dma_parms;
+
ret = of_populate_i3c_bus(master);
if (ret)
goto err_put_dev;
@@ -2848,6 +2854,12 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
if (!ibi)
return -ENOMEM;
+ ibi->wq = alloc_ordered_workqueue(dev_name(i3cdev_to_dev(dev->dev)), WQ_MEM_RECLAIM);
+ if (!ibi->wq) {
+ kfree(ibi);
+ return -ENOMEM;
+ }
+
atomic_set(&ibi->pending_ibis, 0);
init_completion(&ibi->all_ibis_handled);
ibi->handler = req->handler;
@@ -2875,6 +2887,12 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
WARN_ON(i3c_dev_disable_ibi_locked(dev));
master->ops->free_ibi(dev);
+
+ if (dev->ibi->wq) {
+ destroy_workqueue(dev->ibi->wq);
+ dev->ibi->wq = NULL;
+ }
+
kfree(dev->ibi);
dev->ibi = NULL;
}
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 9332ae5f6419..ef5751e91cc9 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -233,7 +233,7 @@ struct dw_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct dw_i3c_cmd cmds[];
+ struct dw_i3c_cmd cmds[] __counted_by(ncmds);
};
struct dw_i3c_i2c_dev_data {
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 49551db71bc9..bcbe8f914149 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -191,7 +191,7 @@
#define SLV_STATUS1_HJ_DIS BIT(18)
#define SLV_STATUS1_MR_DIS BIT(17)
#define SLV_STATUS1_PROT_ERR BIT(16)
-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
#define SLV_STATUS1_HAS_DA BIT(8)
#define SLV_STATUS1_DDR_RX_FULL BIT(7)
#define SLV_STATUS1_DDR_TX_FULL BIT(6)
@@ -387,7 +387,7 @@ struct cdns_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct cdns_i3c_cmd cmds[];
+ struct cdns_i3c_cmd cmds[] __counted_by(ncmds);
};
struct cdns_i3c_data {
@@ -1623,13 +1623,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
/* Device ID0 is reserved to describe this master. */
master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
master->free_rr_slots = GENMASK(master->maxdevs, 1);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
val = readl(master->regs + CONF_STATUS1);
master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
spin_lock_init(&master->ibi.lock);
master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index 6a781f89b0e4..2b2323aa6714 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -332,6 +332,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
CMD_A0_DEV_COUNT(1) |
CMD_A0_ROC | CMD_A0_TOC;
xfer->cmd_desc[1] = 0;
+ xfer->completion = &done;
hci->io->queue_xfer(hci, xfer, 1);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 1)) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 837af83c85f4..1ae56a5699c6 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -161,10 +161,12 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
+ struct platform_device *pdev = to_platform_device(m->dev.parent);
DBG("");
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ synchronize_irq(platform_get_irq(pdev, 0));
hci->io->cleanup(hci);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.cleanup(hci);
@@ -172,8 +174,7 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
void mipi_i3c_hci_resume(struct i3c_hci *hci)
{
- /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
- reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+ reg_set(HC_CONTROL, HC_CONTROL_RESUME);
}
/* located here rather than pio.c because needed bits are in core reg space */
@@ -610,17 +611,17 @@ static int i3c_hci_init(struct i3c_hci *hci)
offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
- hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
- hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+ hci->DAT_entries, hci->DAT_entry_size, offset);
regval = reg_read(DCT_SECTION);
offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
- hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
- hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+ hci->DCT_entries, hci->DCT_entry_size, offset);
regval = reg_read(RING_HEADERS_SECTION);
offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
@@ -787,6 +788,7 @@ static struct platform_driver i3c_hci_driver = {
},
};
module_platform_driver(i3c_hci_driver);
+MODULE_ALIAS("platform:mipi-i3c-hci");
MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
MODULE_DESCRIPTION("MIPI I3C HCI driver");
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 97bb49ff5b53..47b9b4d4ed3f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
return -EOPNOTSUPP;
}
- /* use a bitmap for faster free slot search */
- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
- if (!hci->DAT_data)
- return -ENOMEM;
-
- /* clear them */
- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
- dat_w0_write(dat_idx, 0);
- dat_w1_write(dat_idx, 0);
+ if (!hci->DAT_data) {
+ /* use a bitmap for faster free slot search */
+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+ if (!hci->DAT_data)
+ return -ENOMEM;
+
+ /* clear them */
+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ }
}
return 0;
@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
{
unsigned int dat_idx;
+ int ret;
+ if (!hci->DAT_data) {
+ ret = hci_dat_v1_init(hci);
+ if (ret)
+ return ret;
+ }
dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
if (dat_idx >= hci->DAT_entries)
return -ENOENT;
@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
{
dat_w0_write(dat_idx, 0);
dat_w1_write(dat_idx, 0);
- __clear_bit(dat_idx, hci->DAT_data);
+ if (hci->DAT_data)
+ __clear_bit(dat_idx, hci->DAT_data);
}
static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 2990ac9eaade..c805a8497319 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -139,7 +139,7 @@ struct hci_rh_data {
struct hci_rings_data {
unsigned int total;
- struct hci_rh_data headers[];
+ struct hci_rh_data headers[] __counted_by(total);
};
struct hci_dma_dev_ibi_data {
@@ -229,6 +229,9 @@ static int hci_dma_init(struct i3c_hci *hci)
hci->io_data = rings;
rings->total = nr_rings;
+ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+ rhs_reg_write(CONTROL, regval);
+
for (i = 0; i < rings->total; i++) {
u32 offset = rhs_reg_read(RHn_OFFSET(i));
@@ -325,11 +328,10 @@ static int hci_dma_init(struct i3c_hci *hci)
rh_reg_write(INTR_SIGNAL_ENABLE, regval);
ring_ready:
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
}
- regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
- rhs_reg_write(CONTROL, regval);
return 0;
err_out:
@@ -345,6 +347,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
xfer = xfer_list + i;
+ if (!xfer->data)
+ continue;
dma_unmap_single(&hci->master.dev,
xfer->data_dma, xfer->data_len,
xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
@@ -450,10 +454,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
/*
* We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc.
- * Better suspend the world than risking silent corruption.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
- BUG();
+ WARN_ON(1);
}
for (i = 0; i < n; i++) {
@@ -734,7 +737,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
unsigned int i;
bool handled = false;
- for (i = 0; mask && i < 8; i++) {
+ for (i = 0; mask && i < rings->total; i++) {
struct hci_rh_data *rh;
u32 status;
@@ -756,9 +759,11 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
if (status & INTR_RING_OP)
complete(&rh->op_done);
- if (status & INTR_TRANSFER_ABORT)
+ if (status & INTR_TRANSFER_ABORT) {
dev_notice_ratelimited(&hci->master.dev,
"ring %d: Transfer Aborted\n", i);
+ mipi_i3c_hci_resume(hci);
+ }
if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev,
"ring %d: Inserted Stop on Mode Change\n", i);
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 8f8295acdadb..cf703c00f633 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -93,6 +93,7 @@
#define SVC_I3C_MINTMASKED 0x098
#define SVC_I3C_MERRWARN 0x09C
#define SVC_I3C_MERRWARN_NACK BIT(2)
+#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
#define SVC_I3C_MDMACTRL 0x0A0
#define SVC_I3C_MDATACTRL 0x0AC
#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
@@ -143,7 +144,7 @@ struct svc_i3c_xfer {
int ret;
unsigned int type;
unsigned int ncmds;
- struct svc_i3c_cmd cmds[];
+ struct svc_i3c_cmd cmds[] __counted_by(ncmds);
};
struct svc_i3c_regs_save {
@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
* @ibi.slots: Available IBI slots
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
+ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
*/
struct svc_i3c_master {
struct i3c_master_controller base;
@@ -203,6 +205,7 @@ struct svc_i3c_master {
/* Prevent races within IBI handlers */
spinlock_t lock;
} ibi;
+ struct mutex lock;
};
/**
@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+
+ /* Ignore timeout error */
+ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
+ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+ return false;
+ }
+
dev_err(master->dev,
"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
mstatus, merrwarn);
@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
struct i3c_ibi_slot *slot;
unsigned int count;
u32 mdatactrl;
+ int ret, val;
u8 *buf;
slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
slot->len = 0;
buf = slot->data;
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ return ret;
+ }
+
while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
u32 status, val;
int ret;
+ mutex_lock(&master->lock);
/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
SVC_I3C_MCTRL_IBIRESP_AUTO,
@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
+ svc_i3c_master_emit_stop(master);
goto reenable_ibis;
}
@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
reenable_ibis:
svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+ mutex_unlock(&master->lock);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
{
struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
+ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
if (!SVC_I3C_MSTATUS_SLVSTART(active))
return IRQ_NONE;
@@ -765,7 +787,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u8 data[6];
/*
- * We only care about the 48-bit provisional ID yet to
+ * We only care about the 48-bit provisioned ID yet to
* be sure a device does not nack an address twice.
* Otherwise, we would just need to flush the RX FIFO.
*/
@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
u32 reg;
int ret;
+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
xfer_type |
SVC_I3C_MCTRL_IBIRESP_NACK |
@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
goto emit_stop;
}
+ /*
+ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
+ * with I3C Target Address.
+ *
+ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
+ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
+ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
+ * a Hot-Join Request has been made.
+ *
+ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
+ * and yield the above events handler.
+ */
+ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
if (rnw)
ret = svc_i3c_master_read(master, in, xfer_len);
else
@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
cmd->read_len = 0;
cmd->continued = false;
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
kfree(buf);
@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->read_len = read_len;
cmd->continued = false;
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
if (cmd->read_len != xfer_len)
ccc->dests[0].payload.len = cmd->read_len;
@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
cmd->continued = (i + 1) < nxfers;
}
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
cmd->continued = (i + 1 < nxfers);
}
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+ mutex_init(&master->lock);
+
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
IRQF_NO_SUSPEND, "svc-i3c-irq", master);
if (ret)
@@ -1651,7 +1703,7 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
};
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
- { .compatible = "silvaco,i3c-master" },
+ { .compatible = "silvaco,i3c-master-v1"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b6b45d359f28..f113dae59048 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -418,8 +418,9 @@ config IIO_KX022A_SPI
select IIO_KX022A
select REGMAP_SPI
help
- Enable support for the Kionix KX022A digital tri-axis
- accelerometer connected to I2C interface.
+ Enable support for the Kionix digital tri-axis accelerometers
+ connected to SPI interface. Supported devices are:
+ KX022A, KX132-1211, KX132ACR-LBZ
config IIO_KX022A_I2C
tristate "Kionix KX022A tri-axis digital accelerometer I2C interface"
@@ -427,8 +428,9 @@ config IIO_KX022A_I2C
select IIO_KX022A
select REGMAP_I2C
help
- Enable support for the Kionix KX022A digital tri-axis
- accelerometer connected to I2C interface.
+ Enable support for the Kionix digital tri-axis accelerometers
+ connected to I2C interface. Supported devices are:
+ KX022A, KX132-1211, KX132ACR-LBZ
config KXSD9
tristate "Kionix KXSD9 Accelerometer Driver"
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index d7e67cb08538..284bd387ce69 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -8,9 +8,24 @@
#ifndef _ADXL345_H_
#define _ADXL345_H_
-enum adxl345_device_type {
- ADXL345 = 1,
- ADXL375 = 2,
+/*
+ * In full-resolution mode, scale factor is maintained at ~4 mg/LSB
+ * in all g ranges.
+ *
+ * At +/- 16g with 13-bit resolution, scale is computed as:
+ * (16 + 16) * 9.81 / (2^13 - 1) = 0.0383
+ */
+#define ADXL345_USCALE 38300
+
+/*
+ * The Datasheet lists a resolution of Resolution is ~49 mg per LSB. That's
+ * ~480mm/s**2 per LSB.
+ */
+#define ADXL375_USCALE 480000
+
+struct adxl345_chip_info {
+ const char *name;
+ int uscale;
};
int adxl345_core_probe(struct device *dev, struct regmap *regmap);
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 1919e0089c11..8bd30a23ed3b 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -45,25 +45,10 @@
#define ADXL345_DEVID 0xE5
-/*
- * In full-resolution mode, scale factor is maintained at ~4 mg/LSB
- * in all g ranges.
- *
- * At +/- 16g with 13-bit resolution, scale is computed as:
- * (16 + 16) * 9.81 / (2^13 - 1) = 0.0383
- */
-static const int adxl345_uscale = 38300;
-
-/*
- * The Datasheet lists a resolution of Resolution is ~49 mg per LSB. That's
- * ~480mm/s**2 per LSB.
- */
-static const int adxl375_uscale = 480000;
-
struct adxl345_data {
+ const struct adxl345_chip_info *info;
struct regmap *regmap;
u8 data_range;
- enum adxl345_device_type type;
};
#define ADXL345_CHANNEL(index, axis) { \
@@ -110,15 +95,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- switch (data->type) {
- case ADXL345:
- *val2 = adxl345_uscale;
- break;
- case ADXL375:
- *val2 = adxl375_uscale;
- break;
- }
-
+ *val2 = data->info->uscale;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(data->regmap,
@@ -222,25 +199,11 @@ static void adxl345_powerdown(void *regmap)
int adxl345_core_probe(struct device *dev, struct regmap *regmap)
{
- enum adxl345_device_type type;
struct adxl345_data *data;
struct iio_dev *indio_dev;
- const char *name;
u32 regval;
int ret;
- type = (uintptr_t)device_get_match_data(dev);
- switch (type) {
- case ADXL345:
- name = "adxl345";
- break;
- case ADXL375:
- name = "adxl375";
- break;
- default:
- return -EINVAL;
- }
-
ret = regmap_read(regmap, ADXL345_REG_DEVID, &regval);
if (ret < 0)
return dev_err_probe(dev, ret, "Error reading device ID\n");
@@ -255,16 +218,18 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap)
data = iio_priv(indio_dev);
data->regmap = regmap;
- data->type = type;
/* Enable full-resolution mode */
data->data_range = ADXL345_DATA_FORMAT_FULL_RES;
+ data->info = device_get_match_data(dev);
+ if (!data->info)
+ return -ENODEV;
ret = regmap_write(data->regmap, ADXL345_REG_DATA_FORMAT,
data->data_range);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to set data range\n");
- indio_dev->name = name;
+ indio_dev->name = data->info->name;
indio_dev->info = &adxl345_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adxl345_channels;
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index e47d12f19602..a3084b0a8f78 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -30,22 +30,32 @@ static int adxl345_i2c_probe(struct i2c_client *client)
return adxl345_core_probe(&client->dev, regmap);
}
+static const struct adxl345_chip_info adxl345_i2c_info = {
+ .name = "adxl345",
+ .uscale = ADXL345_USCALE,
+};
+
+static const struct adxl345_chip_info adxl375_i2c_info = {
+ .name = "adxl375",
+ .uscale = ADXL375_USCALE,
+};
+
static const struct i2c_device_id adxl345_i2c_id[] = {
- { "adxl345", ADXL345 },
- { "adxl375", ADXL375 },
+ { "adxl345", (kernel_ulong_t)&adxl345_i2c_info },
+ { "adxl375", (kernel_ulong_t)&adxl375_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, adxl345_i2c_id);
static const struct of_device_id adxl345_of_match[] = {
- { .compatible = "adi,adxl345", .data = (const void *)ADXL345 },
- { .compatible = "adi,adxl375", .data = (const void *)ADXL375 },
+ { .compatible = "adi,adxl345", .data = &adxl345_i2c_info },
+ { .compatible = "adi,adxl375", .data = &adxl375_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(of, adxl345_of_match);
static const struct acpi_device_id adxl345_acpi_match[] = {
- { "ADS0345", ADXL345 },
+ { "ADS0345", (kernel_ulong_t)&adxl345_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(acpi, adxl345_acpi_match);
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index aaade5808657..93ca349f1780 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -36,22 +36,32 @@ static int adxl345_spi_probe(struct spi_device *spi)
return adxl345_core_probe(&spi->dev, regmap);
}
+static const struct adxl345_chip_info adxl345_spi_info = {
+ .name = "adxl345",
+ .uscale = ADXL345_USCALE,
+};
+
+static const struct adxl345_chip_info adxl375_spi_info = {
+ .name = "adxl375",
+ .uscale = ADXL375_USCALE,
+};
+
static const struct spi_device_id adxl345_spi_id[] = {
- { "adxl345", ADXL345 },
- { "adxl375", ADXL375 },
+ { "adxl345", (kernel_ulong_t)&adxl345_spi_info },
+ { "adxl375", (kernel_ulong_t)&adxl375_spi_info },
{ }
};
MODULE_DEVICE_TABLE(spi, adxl345_spi_id);
static const struct of_device_id adxl345_of_match[] = {
- { .compatible = "adi,adxl345", .data = (const void *)ADXL345 },
- { .compatible = "adi,adxl375", .data = (const void *)ADXL375 },
+ { .compatible = "adi,adxl345", .data = &adxl345_spi_info },
+ { .compatible = "adi,adxl375", .data = &adxl375_spi_info },
{ }
};
MODULE_DEVICE_TABLE(of, adxl345_of_match);
static const struct acpi_device_id adxl345_acpi_match[] = {
- { "ADS0345", ADXL345 },
+ { "ADS0345", (kernel_ulong_t)&adxl345_spi_info },
{ }
};
MODULE_DEVICE_TABLE(acpi, adxl345_acpi_match);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 13439f52d26d..ab4fccb24b6c 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -926,7 +926,6 @@ static int bma180_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct bma180_data *data;
struct iio_dev *indio_dev;
- enum chip_ids chip;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -936,11 +935,7 @@ static int bma180_probe(struct i2c_client *client)
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- if (client->dev.of_node)
- chip = (uintptr_t)of_device_get_match_data(dev);
- else
- chip = id->driver_data;
- data->part_info = &bma180_part_info[chip];
+ data->part_info = i2c_get_match_data(client);
ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
@@ -1092,11 +1087,11 @@ static int bma180_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
static const struct i2c_device_id bma180_ids[] = {
- { "bma023", BMA023 },
- { "bma150", BMA150 },
- { "bma180", BMA180 },
- { "bma250", BMA250 },
- { "smb380", BMA150 },
+ { "bma023", (kernel_ulong_t)&bma180_part_info[BMA023] },
+ { "bma150", (kernel_ulong_t)&bma180_part_info[BMA150] },
+ { "bma180", (kernel_ulong_t)&bma180_part_info[BMA180] },
+ { "bma250", (kernel_ulong_t)&bma180_part_info[BMA250] },
+ { "smb380", (kernel_ulong_t)&bma180_part_info[BMA150] },
{ }
};
@@ -1105,23 +1100,23 @@ MODULE_DEVICE_TABLE(i2c, bma180_ids);
static const struct of_device_id bma180_of_match[] = {
{
.compatible = "bosch,bma023",
- .data = (void *)BMA023
+ .data = &bma180_part_info[BMA023]
},
{
.compatible = "bosch,bma150",
- .data = (void *)BMA150
+ .data = &bma180_part_info[BMA150]
},
{
.compatible = "bosch,bma180",
- .data = (void *)BMA180
+ .data = &bma180_part_info[BMA180]
},
{
.compatible = "bosch,bma250",
- .data = (void *)BMA250
+ .data = &bma180_part_info[BMA250]
},
{
.compatible = "bosch,smb380",
- .data = (void *)BMA150
+ .data = &bma180_part_info[BMA150]
},
{ }
};
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index 9cbe98c3ba9a..6eaa2803b26f 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -14,7 +14,6 @@ enum {
};
int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
-int fxls8962af_core_remove(struct device *dev);
extern const struct dev_pm_ops fxls8962af_pm_ops;
extern const struct regmap_config fxls8962af_i2c_regmap_conf;
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 5eac7ea19993..9b7a73a4c48a 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -422,7 +422,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_accel_3d_remove(struct platform_device *pdev)
+static void hid_accel_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -431,8 +431,6 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_accel_3d_ids[] = {
@@ -454,7 +452,7 @@ static struct platform_driver hid_accel_3d_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_accel_3d_probe,
- .remove = hid_accel_3d_remove,
+ .remove_new = hid_accel_3d_remove,
};
module_platform_driver(hid_accel_3d_platform_driver);
diff --git a/drivers/iio/accel/kionix-kx022a-i2c.c b/drivers/iio/accel/kionix-kx022a-i2c.c
index b0ac78e85dad..8a1d4fc28ddd 100644
--- a/drivers/iio/accel/kionix-kx022a-i2c.c
+++ b/drivers/iio/accel/kionix-kx022a-i2c.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 ROHM Semiconductors
*
- * ROHM/KIONIX KX022A accelerometer driver
+ * ROHM/KIONIX accelerometer driver
*/
#include <linux/i2c.h>
@@ -15,6 +15,7 @@
static int kx022a_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
+ const struct kx022a_chip_info *chip_info;
struct regmap *regmap;
if (!i2c->irq) {
@@ -22,16 +23,30 @@ static int kx022a_i2c_probe(struct i2c_client *i2c)
return -EINVAL;
}
- regmap = devm_regmap_init_i2c(i2c, &kx022a_regmap);
+ chip_info = i2c_get_match_data(i2c);
+ if (!chip_info)
+ return -EINVAL;
+
+ regmap = devm_regmap_init_i2c(i2c, chip_info->regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"Failed to initialize Regmap\n");
- return kx022a_probe_internal(dev);
+ return kx022a_probe_internal(dev, chip_info);
}
+static const struct i2c_device_id kx022a_i2c_id[] = {
+ { .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
+ { .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, kx022a_i2c_id);
+
static const struct of_device_id kx022a_of_match[] = {
- { .compatible = "kionix,kx022a", },
+ { .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
+ { .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
@@ -43,6 +58,7 @@ static struct i2c_driver kx022a_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = kx022a_i2c_probe,
+ .id_table = kx022a_i2c_id,
};
module_i2c_driver(kx022a_i2c_driver);
diff --git a/drivers/iio/accel/kionix-kx022a-spi.c b/drivers/iio/accel/kionix-kx022a-spi.c
index f45a46899a5f..f798b964d0b5 100644
--- a/drivers/iio/accel/kionix-kx022a-spi.c
+++ b/drivers/iio/accel/kionix-kx022a-spi.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 ROHM Semiconductors
*
- * ROHM/KIONIX KX022A accelerometer driver
+ * ROHM/KIONIX accelerometer driver
*/
#include <linux/interrupt.h>
@@ -15,6 +15,7 @@
static int kx022a_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ const struct kx022a_chip_info *chip_info;
struct regmap *regmap;
if (!spi->irq) {
@@ -22,22 +23,30 @@ static int kx022a_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- regmap = devm_regmap_init_spi(spi, &kx022a_regmap);
+ chip_info = spi_get_device_match_data(spi);
+ if (!chip_info)
+ return -EINVAL;
+
+ regmap = devm_regmap_init_spi(spi, chip_info->regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"Failed to initialize Regmap\n");
- return kx022a_probe_internal(dev);
+ return kx022a_probe_internal(dev, chip_info);
}
static const struct spi_device_id kx022a_id[] = {
- { "kx022a" },
+ { .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
+ { .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, kx022a_id);
static const struct of_device_id kx022a_of_match[] = {
- { .compatible = "kionix,kx022a", },
+ { .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
+ { .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 4ea3c6718ed4..60864be3a667 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 ROHM Semiconductors
*
- * ROHM/KIONIX KX022A accelerometer driver
+ * ROHM/KIONIX accelerometer driver
*/
#include <linux/delay.h>
@@ -15,7 +15,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/string_helpers.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <linux/iio/iio.h>
@@ -48,7 +48,7 @@ enum {
KX022A_STATE_FIFO,
};
-/* Regmap configs */
+/* kx022a Regmap configs */
static const struct regmap_range kx022a_volatile_ranges[] = {
{
.range_min = KX022A_REG_XHP_L,
@@ -138,7 +138,7 @@ static const struct regmap_access_table kx022a_nir_regs = {
.n_yes_ranges = ARRAY_SIZE(kx022a_noinc_read_ranges),
};
-const struct regmap_config kx022a_regmap = {
+static const struct regmap_config kx022a_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &kx022a_volatile_regs,
@@ -149,10 +149,121 @@ const struct regmap_config kx022a_regmap = {
.max_register = KX022A_MAX_REGISTER,
.cache_type = REGCACHE_RBTREE,
};
-EXPORT_SYMBOL_NS_GPL(kx022a_regmap, IIO_KX022A);
+
+/* Regmap configs kx132 */
+static const struct regmap_range kx132_volatile_ranges[] = {
+ {
+ .range_min = KX132_REG_XADP_L,
+ .range_max = KX132_REG_COTR,
+ }, {
+ .range_min = KX132_REG_TSCP,
+ .range_max = KX132_REG_INT_REL,
+ }, {
+ /* The reset bit will be cleared by sensor */
+ .range_min = KX132_REG_CNTL2,
+ .range_max = KX132_REG_CNTL2,
+ }, {
+ .range_min = KX132_REG_CNTL5,
+ .range_max = KX132_REG_CNTL5,
+ }, {
+ .range_min = KX132_REG_BUF_STATUS_1,
+ .range_max = KX132_REG_BUF_READ,
+ },
+};
+
+static const struct regmap_access_table kx132_volatile_regs = {
+ .yes_ranges = &kx132_volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(kx132_volatile_ranges),
+};
+
+static const struct regmap_range kx132_precious_ranges[] = {
+ {
+ .range_min = KX132_REG_INT_REL,
+ .range_max = KX132_REG_INT_REL,
+ },
+};
+
+static const struct regmap_access_table kx132_precious_regs = {
+ .yes_ranges = &kx132_precious_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(kx132_precious_ranges),
+};
+
+static const struct regmap_range kx132_read_only_ranges[] = {
+ {
+ .range_min = KX132_REG_XADP_L,
+ .range_max = KX132_REG_INT_REL,
+ }, {
+ .range_min = KX132_REG_BUF_STATUS_1,
+ .range_max = KX132_REG_BUF_STATUS_2,
+ }, {
+ .range_min = KX132_REG_BUF_READ,
+ .range_max = KX132_REG_BUF_READ,
+ }, {
+ /* Kionix reserved registers: should not be written */
+ .range_min = 0x28,
+ .range_max = 0x28,
+ }, {
+ .range_min = 0x35,
+ .range_max = 0x36,
+ }, {
+ .range_min = 0x3c,
+ .range_max = 0x48,
+ }, {
+ .range_min = 0x4e,
+ .range_max = 0x5c,
+ }, {
+ .range_min = 0x77,
+ .range_max = 0x7f,
+ },
+};
+
+static const struct regmap_access_table kx132_ro_regs = {
+ .no_ranges = &kx132_read_only_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(kx132_read_only_ranges),
+};
+
+static const struct regmap_range kx132_write_only_ranges[] = {
+ {
+ .range_min = KX132_REG_SELF_TEST,
+ .range_max = KX132_REG_SELF_TEST,
+ }, {
+ .range_min = KX132_REG_BUF_CLEAR,
+ .range_max = KX132_REG_BUF_CLEAR,
+ },
+};
+
+static const struct regmap_access_table kx132_wo_regs = {
+ .no_ranges = &kx132_write_only_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(kx132_write_only_ranges),
+};
+
+static const struct regmap_range kx132_noinc_read_ranges[] = {
+ {
+ .range_min = KX132_REG_BUF_READ,
+ .range_max = KX132_REG_BUF_READ,
+ },
+};
+
+static const struct regmap_access_table kx132_nir_regs = {
+ .yes_ranges = &kx132_noinc_read_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(kx132_noinc_read_ranges),
+};
+
+static const struct regmap_config kx132_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &kx132_volatile_regs,
+ .rd_table = &kx132_wo_regs,
+ .wr_table = &kx132_ro_regs,
+ .rd_noinc_table = &kx132_nir_regs,
+ .precious_table = &kx132_precious_regs,
+ .max_register = KX132_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+};
struct kx022a_data {
struct regmap *regmap;
+ const struct kx022a_chip_info *chip_info;
struct iio_trigger *trig;
struct device *dev;
struct iio_mount_matrix orientation;
@@ -175,6 +286,8 @@ struct kx022a_data {
struct mutex mutex;
u8 watermark;
+ __le16 *fifo_buffer;
+
/* 3 x 16bit accel data + timestamp */
__le16 buffer[8] __aligned(IIO_DMA_MINALIGN);
struct {
@@ -208,7 +321,7 @@ static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
{ }
};
-#define KX022A_ACCEL_CHAN(axis, index) \
+#define KX022A_ACCEL_CHAN(axis, reg, index) \
{ \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -220,7 +333,7 @@ static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.ext_info = kx022a_ext_info, \
- .address = KX022A_REG_##axis##OUT_L, \
+ .address = reg, \
.scan_index = index, \
.scan_type = { \
.sign = 's', \
@@ -231,9 +344,16 @@ static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
}
static const struct iio_chan_spec kx022a_channels[] = {
- KX022A_ACCEL_CHAN(X, 0),
- KX022A_ACCEL_CHAN(Y, 1),
- KX022A_ACCEL_CHAN(Z, 2),
+ KX022A_ACCEL_CHAN(X, KX022A_REG_XOUT_L, 0),
+ KX022A_ACCEL_CHAN(Y, KX022A_REG_YOUT_L, 1),
+ KX022A_ACCEL_CHAN(Z, KX022A_REG_ZOUT_L, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_chan_spec kx132_channels[] = {
+ KX022A_ACCEL_CHAN(X, KX132_REG_XOUT_L, 0),
+ KX022A_ACCEL_CHAN(Y, KX132_REG_YOUT_L, 1),
+ KX022A_ACCEL_CHAN(Z, KX132_REG_ZOUT_L, 2),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -332,16 +452,15 @@ static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
int ret;
if (on)
- ret = regmap_set_bits(data->regmap, KX022A_REG_CNTL,
+ ret = regmap_set_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_PC1);
else
- ret = regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
+ ret = regmap_clear_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_PC1);
if (ret)
dev_err(data->dev, "Turn %s fail %d\n", str_on_off(on), ret);
return ret;
-
}
static int kx022a_turn_off_lock(struct kx022a_data *data)
@@ -403,7 +522,7 @@ static int kx022a_write_raw(struct iio_dev *idev,
break;
ret = regmap_update_bits(data->regmap,
- KX022A_REG_ODCNTL,
+ data->chip_info->odcntl,
KX022A_MASK_ODR, n);
data->odr_ns = kx022a_odrs[n];
kx022a_turn_on_unlock(data);
@@ -424,7 +543,7 @@ static int kx022a_write_raw(struct iio_dev *idev,
if (ret)
break;
- ret = regmap_update_bits(data->regmap, KX022A_REG_CNTL,
+ ret = regmap_update_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_GSEL,
n << KX022A_GSEL_SHIFT);
kx022a_turn_on_unlock(data);
@@ -446,7 +565,7 @@ static int kx022a_fifo_set_wmi(struct kx022a_data *data)
threshold = data->watermark;
- return regmap_update_bits(data->regmap, KX022A_REG_BUF_CNTL1,
+ return regmap_update_bits(data->regmap, data->chip_info->buf_cntl1,
KX022A_MASK_WM_TH, threshold);
}
@@ -489,7 +608,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = regmap_read(data->regmap, KX022A_REG_ODCNTL, &regval);
+ ret = regmap_read(data->regmap, data->chip_info->odcntl, &regval);
if (ret)
return ret;
@@ -504,7 +623,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
- ret = regmap_read(data->regmap, KX022A_REG_CNTL, &regval);
+ ret = regmap_read(data->regmap, data->chip_info->cntl, &regval);
if (ret < 0)
return ret;
@@ -520,8 +639,7 @@ static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val)
{
struct kx022a_data *data = iio_priv(idev);
- if (val > KX022A_FIFO_LENGTH)
- val = KX022A_FIFO_LENGTH;
+ val = min(data->chip_info->fifo_length, val);
mutex_lock(&data->mutex);
data->watermark = val;
@@ -582,30 +700,56 @@ static int kx022a_drop_fifo_contents(struct kx022a_data *data)
*/
data->timestamp = 0;
- return regmap_write(data->regmap, KX022A_REG_BUF_CLEAR, 0x0);
+ return regmap_write(data->regmap, data->chip_info->buf_clear, 0x0);
+}
+
+static int kx022a_get_fifo_bytes_available(struct kx022a_data *data)
+{
+ int ret, fifo_bytes;
+
+ ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes);
+ if (ret) {
+ dev_err(data->dev, "Error reading buffer status\n");
+ return ret;
+ }
+
+ if (fifo_bytes == KX022A_FIFO_FULL_VALUE)
+ return KX022A_FIFO_MAX_BYTES;
+
+ return fifo_bytes;
+}
+
+static int kx132_get_fifo_bytes_available(struct kx022a_data *data)
+{
+ __le16 buf_status;
+ int ret, fifo_bytes;
+
+ ret = regmap_bulk_read(data->regmap, data->chip_info->buf_status1,
+ &buf_status, sizeof(buf_status));
+ if (ret) {
+ dev_err(data->dev, "Error reading buffer status\n");
+ return ret;
+ }
+
+ fifo_bytes = le16_to_cpu(buf_status);
+ fifo_bytes &= data->chip_info->buf_smp_lvl_mask;
+ fifo_bytes = min((unsigned int)fifo_bytes, data->chip_info->fifo_length *
+ KX022A_FIFO_SAMPLES_SIZE_BYTES);
+
+ return fifo_bytes;
}
static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples,
bool irq)
{
struct kx022a_data *data = iio_priv(idev);
- struct device *dev = regmap_get_device(data->regmap);
- __le16 buffer[KX022A_FIFO_LENGTH * 3];
uint64_t sample_period;
int count, fifo_bytes;
bool renable = false;
int64_t tstamp;
int ret, i;
- ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes);
- if (ret) {
- dev_err(dev, "Error reading buffer status\n");
- return ret;
- }
-
- /* Let's not overflow if we for some reason get bogus value from i2c */
- if (fifo_bytes == KX022A_FIFO_FULL_VALUE)
- fifo_bytes = KX022A_FIFO_MAX_BYTES;
+ fifo_bytes = data->chip_info->get_fifo_bytes_available(data);
if (fifo_bytes % KX022A_FIFO_SAMPLES_SIZE_BYTES)
dev_warn(data->dev, "Bad FIFO alignment. Data may be corrupt\n");
@@ -669,13 +813,13 @@ static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples,
}
fifo_bytes = count * KX022A_FIFO_SAMPLES_SIZE_BYTES;
- ret = regmap_noinc_read(data->regmap, KX022A_REG_BUF_READ,
- &buffer[0], fifo_bytes);
+ ret = regmap_noinc_read(data->regmap, data->chip_info->buf_read,
+ data->fifo_buffer, fifo_bytes);
if (ret)
goto renable_out;
for (i = 0; i < count; i++) {
- __le16 *sam = &buffer[i * 3];
+ __le16 *sam = &data->fifo_buffer[i * 3];
__le16 *chs;
int bit;
@@ -722,10 +866,10 @@ static const struct iio_info kx022a_info = {
static int kx022a_set_drdy_irq(struct kx022a_data *data, bool en)
{
if (en)
- return regmap_set_bits(data->regmap, KX022A_REG_CNTL,
+ return regmap_set_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_DRDY);
- return regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
+ return regmap_clear_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_DRDY);
}
@@ -760,7 +904,7 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
if (ret)
goto unlock_out;
- ret = regmap_clear_bits(data->regmap, KX022A_REG_BUF_CNTL2,
+ ret = regmap_clear_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
goto unlock_out;
@@ -769,6 +913,8 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
kx022a_drop_fifo_contents(data);
+ kfree(data->fifo_buffer);
+
return kx022a_turn_on_unlock(data);
unlock_out:
@@ -791,6 +937,12 @@ static int kx022a_fifo_enable(struct kx022a_data *data)
{
int ret;
+ data->fifo_buffer = kmalloc_array(data->chip_info->fifo_length,
+ KX022A_FIFO_SAMPLES_SIZE_BYTES,
+ GFP_KERNEL);
+ if (!data->fifo_buffer)
+ return -ENOMEM;
+
ret = kx022a_turn_off_lock(data);
if (ret)
return ret;
@@ -801,7 +953,7 @@ static int kx022a_fifo_enable(struct kx022a_data *data)
goto unlock_out;
/* Enable buffer */
- ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
+ ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
goto unlock_out;
@@ -847,7 +999,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
struct kx022a_data *data = iio_priv(idev);
int ret;
- ret = regmap_bulk_read(data->regmap, KX022A_REG_XOUT_L, data->buffer,
+ ret = regmap_bulk_read(data->regmap, data->chip_info->xout_l, data->buffer,
KX022A_FIFO_SAMPLES_SIZE_BYTES);
if (ret < 0)
goto err_read;
@@ -895,7 +1047,7 @@ static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
if (data->state & KX022A_STATE_FIFO) {
int ok;
- ok = __kx022a_fifo_flush(idev, KX022A_FIFO_LENGTH, true);
+ ok = __kx022a_fifo_flush(idev, data->chip_info->fifo_length, true);
if (ok > 0)
ret = IRQ_HANDLED;
}
@@ -948,7 +1100,7 @@ static int kx022a_chip_init(struct kx022a_data *data)
int ret, val;
/* Reset the senor */
- ret = regmap_write(data->regmap, KX022A_REG_CNTL2, KX022A_MASK_SRST);
+ ret = regmap_write(data->regmap, data->chip_info->cntl2, KX022A_MASK_SRST);
if (ret)
return ret;
@@ -958,7 +1110,7 @@ static int kx022a_chip_init(struct kx022a_data *data)
*/
msleep(1);
- ret = regmap_read_poll_timeout(data->regmap, KX022A_REG_CNTL2, val,
+ ret = regmap_read_poll_timeout(data->regmap, data->chip_info->cntl2, val,
!(val & KX022A_MASK_SRST),
KX022A_SOFT_RESET_WAIT_TIME_US,
KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US);
@@ -968,14 +1120,14 @@ static int kx022a_chip_init(struct kx022a_data *data)
return ret;
}
- ret = regmap_reinit_cache(data->regmap, &kx022a_regmap);
+ ret = regmap_reinit_cache(data->regmap, data->chip_info->regmap_config);
if (ret) {
dev_err(data->dev, "Failed to reinit reg cache\n");
return ret;
}
/* set data res 16bit */
- ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
+ ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BRES16);
if (ret) {
dev_err(data->dev, "Failed to set data resolution\n");
@@ -985,7 +1137,90 @@ static int kx022a_chip_init(struct kx022a_data *data)
return kx022a_prepare_irq_pin(data);
}
-int kx022a_probe_internal(struct device *dev)
+const struct kx022a_chip_info kx022a_chip_info = {
+ .name = "kx022-accel",
+ .regmap_config = &kx022a_regmap_config,
+ .channels = kx022a_channels,
+ .num_channels = ARRAY_SIZE(kx022a_channels),
+ .fifo_length = KX022A_FIFO_LENGTH,
+ .who = KX022A_REG_WHO,
+ .id = KX022A_ID,
+ .cntl = KX022A_REG_CNTL,
+ .cntl2 = KX022A_REG_CNTL2,
+ .odcntl = KX022A_REG_ODCNTL,
+ .buf_cntl1 = KX022A_REG_BUF_CNTL1,
+ .buf_cntl2 = KX022A_REG_BUF_CNTL2,
+ .buf_clear = KX022A_REG_BUF_CLEAR,
+ .buf_status1 = KX022A_REG_BUF_STATUS_1,
+ .buf_read = KX022A_REG_BUF_READ,
+ .inc1 = KX022A_REG_INC1,
+ .inc4 = KX022A_REG_INC4,
+ .inc5 = KX022A_REG_INC5,
+ .inc6 = KX022A_REG_INC6,
+ .xout_l = KX022A_REG_XOUT_L,
+ .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx022a_chip_info, IIO_KX022A);
+
+const struct kx022a_chip_info kx132_chip_info = {
+ .name = "kx132-1211",
+ .regmap_config = &kx132_regmap_config,
+ .channels = kx132_channels,
+ .num_channels = ARRAY_SIZE(kx132_channels),
+ .fifo_length = KX132_FIFO_LENGTH,
+ .who = KX132_REG_WHO,
+ .id = KX132_ID,
+ .cntl = KX132_REG_CNTL,
+ .cntl2 = KX132_REG_CNTL2,
+ .odcntl = KX132_REG_ODCNTL,
+ .buf_cntl1 = KX132_REG_BUF_CNTL1,
+ .buf_cntl2 = KX132_REG_BUF_CNTL2,
+ .buf_clear = KX132_REG_BUF_CLEAR,
+ .buf_status1 = KX132_REG_BUF_STATUS_1,
+ .buf_smp_lvl_mask = KX132_MASK_BUF_SMP_LVL,
+ .buf_read = KX132_REG_BUF_READ,
+ .inc1 = KX132_REG_INC1,
+ .inc4 = KX132_REG_INC4,
+ .inc5 = KX132_REG_INC5,
+ .inc6 = KX132_REG_INC6,
+ .xout_l = KX132_REG_XOUT_L,
+ .get_fifo_bytes_available = kx132_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx132_chip_info, IIO_KX022A);
+
+/*
+ * Despite the naming, KX132ACR-LBZ is not similar to KX132-1211 but it is
+ * exact subset of KX022A. KX132ACR-LBZ is meant to be used for industrial
+ * applications and the tap/double tap, free fall and tilt engines were
+ * removed. Rest of the registers and functionalities (excluding the ID
+ * register) are exact match to what is found in KX022.
+ */
+const struct kx022a_chip_info kx132acr_chip_info = {
+ .name = "kx132acr-lbz",
+ .regmap_config = &kx022a_regmap_config,
+ .channels = kx022a_channels,
+ .num_channels = ARRAY_SIZE(kx022a_channels),
+ .fifo_length = KX022A_FIFO_LENGTH,
+ .who = KX022A_REG_WHO,
+ .id = KX132ACR_LBZ_ID,
+ .cntl = KX022A_REG_CNTL,
+ .cntl2 = KX022A_REG_CNTL2,
+ .odcntl = KX022A_REG_ODCNTL,
+ .buf_cntl1 = KX022A_REG_BUF_CNTL1,
+ .buf_cntl2 = KX022A_REG_BUF_CNTL2,
+ .buf_clear = KX022A_REG_BUF_CLEAR,
+ .buf_status1 = KX022A_REG_BUF_STATUS_1,
+ .buf_read = KX022A_REG_BUF_READ,
+ .inc1 = KX022A_REG_INC1,
+ .inc4 = KX022A_REG_INC4,
+ .inc5 = KX022A_REG_INC5,
+ .inc6 = KX022A_REG_INC6,
+ .xout_l = KX022A_REG_XOUT_L,
+ .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx132acr_chip_info, IIO_KX022A);
+
+int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chip_info)
{
static const char * const regulator_names[] = {"io-vdd", "vdd"};
struct iio_trigger *indio_trig;
@@ -1012,6 +1247,7 @@ int kx022a_probe_internal(struct device *dev)
return -ENOMEM;
data = iio_priv(idev);
+ data->chip_info = chip_info;
/*
* VDD is the analog and digital domain voltage supply and
@@ -1022,26 +1258,24 @@ int kx022a_probe_internal(struct device *dev)
if (ret && ret != -ENODEV)
return dev_err_probe(dev, ret, "failed to enable regulator\n");
- ret = regmap_read(regmap, KX022A_REG_WHO, &chip_id);
+ ret = regmap_read(regmap, chip_info->who, &chip_id);
if (ret)
return dev_err_probe(dev, ret, "Failed to access sensor\n");
- if (chip_id != KX022A_ID) {
- dev_err(dev, "unsupported device 0x%x\n", chip_id);
- return -EINVAL;
- }
+ if (chip_id != chip_info->id)
+ dev_warn(dev, "unknown device 0x%x\n", chip_id);
irq = fwnode_irq_get_byname(fwnode, "INT1");
if (irq > 0) {
- data->inc_reg = KX022A_REG_INC1;
- data->ien_reg = KX022A_REG_INC4;
+ data->inc_reg = chip_info->inc1;
+ data->ien_reg = chip_info->inc4;
} else {
irq = fwnode_irq_get_byname(fwnode, "INT2");
if (irq < 0)
return dev_err_probe(dev, irq, "No suitable IRQ\n");
- data->inc_reg = KX022A_REG_INC5;
- data->ien_reg = KX022A_REG_INC6;
+ data->inc_reg = chip_info->inc5;
+ data->ien_reg = chip_info->inc6;
}
data->regmap = regmap;
@@ -1050,9 +1284,9 @@ int kx022a_probe_internal(struct device *dev)
data->odr_ns = KX022A_DEFAULT_PERIOD_NS;
mutex_init(&data->mutex);
- idev->channels = kx022a_channels;
- idev->num_channels = ARRAY_SIZE(kx022a_channels);
- idev->name = "kx022-accel";
+ idev->channels = chip_info->channels;
+ idev->num_channels = chip_info->num_channels;
+ idev->name = chip_info->name;
idev->info = &kx022a_info;
idev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
idev->available_scan_masks = kx022a_scan_masks;
@@ -1110,7 +1344,6 @@ int kx022a_probe_internal(struct device *dev)
if (ret)
return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
-
ret = devm_iio_trigger_register(dev, indio_trig);
if (ret)
return dev_err_probe(data->dev, ret,
diff --git a/drivers/iio/accel/kionix-kx022a.h b/drivers/iio/accel/kionix-kx022a.h
index 12424649d438..7060438ad88c 100644
--- a/drivers/iio/accel/kionix-kx022a.h
+++ b/drivers/iio/accel/kionix-kx022a.h
@@ -13,6 +13,7 @@
#define KX022A_REG_WHO 0x0f
#define KX022A_ID 0xc8
+#define KX132ACR_LBZ_ID 0xd8
#define KX022A_REG_CNTL2 0x19
#define KX022A_MASK_SRST BIT(7)
@@ -74,9 +75,118 @@
#define KX022A_REG_SELF_TEST 0x60
#define KX022A_MAX_REGISTER 0x60
+#define KX132_REG_WHO 0x13
+#define KX132_ID 0x3d
+
+#define KX132_FIFO_LENGTH 86
+
+#define KX132_REG_CNTL 0x1b
+#define KX132_REG_CNTL2 0x1c
+#define KX132_REG_CNTL5 0x1f
+#define KX132_MASK_RES BIT(6)
+#define KX132_GSEL_2 0x0
+#define KX132_GSEL_4 BIT(3)
+#define KX132_GSEL_8 BIT(4)
+#define KX132_GSEL_16 GENMASK(4, 3)
+
+#define KX132_REG_INS2 0x17
+#define KX132_MASK_INS2_WMI BIT(5)
+
+#define KX132_REG_XADP_L 0x02
+#define KX132_REG_XOUT_L 0x08
+#define KX132_REG_YOUT_L 0x0a
+#define KX132_REG_ZOUT_L 0x0c
+#define KX132_REG_COTR 0x12
+#define KX132_REG_TSCP 0x14
+#define KX132_REG_INT_REL 0x1a
+
+#define KX132_REG_ODCNTL 0x21
+
+#define KX132_REG_BTS_WUF_TH 0x4a
+
+#define KX132_REG_BUF_CNTL1 0x5e
+#define KX132_REG_BUF_CNTL2 0x5f
+#define KX132_REG_BUF_STATUS_1 0x60
+#define KX132_REG_BUF_STATUS_2 0x61
+#define KX132_MASK_BUF_SMP_LVL GENMASK(9, 0)
+#define KX132_REG_BUF_CLEAR 0x62
+#define KX132_REG_BUF_READ 0x63
+#define KX132_ODR_SHIFT 3
+#define KX132_FIFO_MAX_WMI_TH 86
+
+#define KX132_REG_INC1 0x22
+#define KX132_REG_INC5 0x26
+#define KX132_REG_INC6 0x27
+#define KX132_IPOL_LOW 0
+#define KX132_IPOL_HIGH KX022A_MASK_IPOL
+#define KX132_ITYP_PULSE KX022A_MASK_ITYP
+
+#define KX132_REG_INC4 0x25
+
+#define KX132_REG_SELF_TEST 0x5d
+#define KX132_MAX_REGISTER 0x76
+
struct device;
-int kx022a_probe_internal(struct device *dev);
-extern const struct regmap_config kx022a_regmap;
+struct kx022a_data;
+
+/**
+ * struct kx022a_chip_info - Kionix accelerometer chip specific information
+ *
+ * @name: name of the device
+ * @regmap_config: pointer to register map configuration
+ * @channels: pointer to iio_chan_spec array
+ * @num_channels: number of iio_chan_spec channels
+ * @fifo_length: number of 16-bit samples in a full buffer
+ * @buf_smp_lvl_mask: buffer sample level mask
+ * @who: WHO_AM_I register
+ * @id: WHO_AM_I register value
+ * @cntl: control register 1
+ * @cntl2: control register 2
+ * @odcntl: output data control register
+ * @buf_cntl1: buffer control register 1
+ * @buf_cntl2: buffer control register 2
+ * @buf_clear: buffer clear register
+ * @buf_status1: buffer status register 1
+ * @buf_read: buffer read register
+ * @inc1: interrupt control register 1
+ * @inc4: interrupt control register 4
+ * @inc5: interrupt control register 5
+ * @inc6: interrupt control register 6
+ * @xout_l: x-axis output least significant byte
+ * @get_fifo_bytes_available: function pointer to get amount of acceleration
+ * data bytes currently stored in the sensor's FIFO
+ * buffer
+ */
+struct kx022a_chip_info {
+ const char *name;
+ const struct regmap_config *regmap_config;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int fifo_length;
+ u16 buf_smp_lvl_mask;
+ u8 who;
+ u8 id;
+ u8 cntl;
+ u8 cntl2;
+ u8 odcntl;
+ u8 buf_cntl1;
+ u8 buf_cntl2;
+ u8 buf_clear;
+ u8 buf_status1;
+ u8 buf_read;
+ u8 inc1;
+ u8 inc4;
+ u8 inc5;
+ u8 inc6;
+ u8 xout_l;
+ int (*get_fifo_bytes_available)(struct kx022a_data *);
+};
+
+int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chip_info);
+
+extern const struct kx022a_chip_info kx022a_chip_info;
+extern const struct kx022a_chip_info kx132_chip_info;
+extern const struct kx022a_chip_info kx132acr_chip_info;
#endif
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index f42a88711486..d3fd0318e47b 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1535,19 +1535,18 @@ static int mma8452_reset(struct i2c_client *client)
}
static const struct of_device_id mma8452_dt_ids[] = {
+ { .compatible = "fsl,fxls8471", .data = &mma_chip_info_table[fxls8471] },
{ .compatible = "fsl,mma8451", .data = &mma_chip_info_table[mma8451] },
{ .compatible = "fsl,mma8452", .data = &mma_chip_info_table[mma8452] },
{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
{ .compatible = "fsl,mma8653", .data = &mma_chip_info_table[mma8653] },
- { .compatible = "fsl,fxls8471", .data = &mma_chip_info_table[fxls8471] },
{ }
};
MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
static int mma8452_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct mma8452_data *data;
struct iio_dev *indio_dev;
int ret;
@@ -1560,15 +1559,10 @@ static int mma8452_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->lock);
- data->chip_info = device_get_match_data(&client->dev);
- if (!data->chip_info) {
- if (id) {
- data->chip_info = &mma_chip_info_table[id->driver_data];
- } else {
- dev_err(&client->dev, "unknown device model\n");
- return -ENODEV;
- }
- }
+ data->chip_info = i2c_get_match_data(client);
+ if (!data->chip_info)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "unknown device model\n");
ret = iio_read_mount_matrix(&client->dev, &data->orientation);
if (ret)
@@ -1830,12 +1824,12 @@ static const struct dev_pm_ops mma8452_pm_ops = {
};
static const struct i2c_device_id mma8452_id[] = {
- { "mma8451", mma8451 },
- { "mma8452", mma8452 },
- { "mma8453", mma8453 },
- { "mma8652", mma8652 },
- { "mma8653", mma8653 },
- { "fxls8471", fxls8471 },
+ { "fxls8471", (kernel_ulong_t)&mma_chip_info_table[fxls8471] },
+ { "mma8451", (kernel_ulong_t)&mma_chip_info_table[mma8451] },
+ { "mma8452", (kernel_ulong_t)&mma_chip_info_table[mma8452] },
+ { "mma8453", (kernel_ulong_t)&mma_chip_info_table[mma8453] },
+ { "mma8652", (kernel_ulong_t)&mma_chip_info_table[mma8652] },
+ { "mma8653", (kernel_ulong_t)&mma_chip_info_table[mma8653] },
{ }
};
MODULE_DEVICE_TABLE(i2c, mma8452_id);
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 6ddcc3c2f840..b8ddbfd98f11 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -33,7 +33,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/string_helpers.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <linux/iio/buffer.h>
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 75d142bc14b4..82e8d0b39049 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -476,6 +476,13 @@ static const struct acpi_device_id mxc4005_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
+static const struct of_device_id mxc4005_of_match[] = {
+ { .compatible = "memsic,mxc4005", },
+ { .compatible = "memsic,mxc6655", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mxc4005_of_match);
+
static const struct i2c_device_id mxc4005_id[] = {
{"mxc4005", 0},
{"mxc6655", 0},
@@ -487,6 +494,7 @@ static struct i2c_driver mxc4005_driver = {
.driver = {
.name = MXC4005_DRV_NAME,
.acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
+ .of_match_table = mxc4005_of_match,
},
.probe = mxc4005_probe,
.id_table = mxc4005_id,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 517b3db114b8..35f9867da12c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -607,6 +607,16 @@ config LPC32XX_ADC
activate only one via device tree selection. Provides direct access
via sysfs.
+config LTC2309
+ tristate "Linear Technology LTC2309 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Linear Technology LTC2309, a low
+ noise, low power, 8-channel, 12-bit SAR ADC
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2309.
+
config LTC2471
tristate "Linear Technology LTC2471 and LTC2473 ADC driver"
depends on I2C
@@ -779,14 +789,29 @@ config MCP3422
This driver can also be built as a module. If so, the module will be
called mcp3422.
+config MCP3564
+ tristate "Microchip Technology MCP3461/2/4/R, MCP3561/2/4/R driver"
+ depends on SPI
+ depends on IIO
+ help
+ Say yes here to build support for Microchip Technology's MCP3461,
+ MCP3462, MCP3464, MCP3461R, MCP3462R, MCP3464R, MCP3561, MCP3562,
+ MCP3564, MCP3561R, MCP3562R and MCP3564R analog to digital
+ converters.
+
+ This driver can also be built as a module. If so, the module will be
+ called mcp3564.
+
config MCP3911
tristate "Microchip Technology MCP3911 driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Microchip Technology's MCP3911
- analog to digital converter.
+ Say yes here to build support for one of the following
+ Microchip Technology's analog to digital converters:
+ MCP3910, MCP3911, MCP3912, MCP3913, MCP3914,
+ MCP3918 and MCP3919.
This driver can also be built as a module. If so, the module will be
called mcp3911.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 2facf979327d..bee11d442af4 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
+obj-$(CONFIG_LTC2309) += ltc2309.o
obj-$(CONFIG_LTC2471) += ltc2471.o
obj-$(CONFIG_LTC2485) += ltc2485.o
obj-$(CONFIG_LTC2496) += ltc2496.o ltc2497-core.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
+obj-$(CONFIG_MCP3564) += mcp3564.o
obj-$(CONFIG_MCP3911) += mcp3911.o
obj-$(CONFIG_MEDIATEK_MT6360_ADC) += mt6360-adc.o
obj-$(CONFIG_MEDIATEK_MT6370_ADC) += mt6370-adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 3b1bdd0b531d..80645fee79a4 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -1179,7 +1179,7 @@ out_dis_pm:
return ret;
}
-static int ab8500_gpadc_remove(struct platform_device *pdev)
+static void ab8500_gpadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
@@ -1188,8 +1188,6 @@ static int ab8500_gpadc_remove(struct platform_device *pdev)
pm_runtime_put_noidle(gpadc->dev);
pm_runtime_disable(gpadc->dev);
regulator_disable(gpadc->vddadc);
-
- return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(ab8500_gpadc_pm_ops,
@@ -1198,7 +1196,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ab8500_gpadc_pm_ops,
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
- .remove = ab8500_gpadc_remove,
+ .remove_new = ab8500_gpadc_remove,
.driver = {
.name = "ab8500-gpadc",
.pm = pm_ptr(&ab8500_gpadc_pm_ops),
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 5a5dd5e87ffc..feb86fe6c422 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1817,18 +1817,12 @@ static const struct clk_ops ad4130_int_clk_ops = {
.unprepare = ad4130_int_clk_unprepare,
};
-static void ad4130_clk_del_provider(void *of_node)
-{
- of_clk_del_provider(of_node);
-}
-
static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
struct device_node *of_node = dev_of_node(dev);
struct clk_init_data init;
const char *clk_name;
- struct clk *clk;
int ret;
if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
@@ -1845,15 +1839,12 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
init.ops = &ad4130_int_clk_ops;
st->int_clk_hw.init = &init;
- clk = devm_clk_register(dev, &st->int_clk_hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+ ret = devm_clk_hw_register(dev, &st->int_clk_hw);
if (ret)
return ret;
- return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &st->int_clk_hw);
}
static int ad4130_setup(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index b64fd365f83f..adc3cbe92d6e 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -6,6 +6,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -43,7 +44,7 @@
#define AD7192_COMM_WEN BIT(7) /* Write Enable */
#define AD7192_COMM_WRITE 0 /* Write Operation */
#define AD7192_COMM_READ BIT(6) /* Read Operation */
-#define AD7192_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
+#define AD7192_COMM_ADDR_MASK GENMASK(5, 3) /* Register Address Mask */
#define AD7192_COMM_CREAD BIT(2) /* Continuous Read of Data Register */
/* Status Register Bit Designations (AD7192_REG_STAT) */
@@ -56,17 +57,18 @@
#define AD7192_STAT_CH1 BIT(0) /* Channel 1 */
/* Mode Register Bit Designations (AD7192_REG_MODE) */
-#define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */
-#define AD7192_MODE_SEL_MASK (0x7 << 21) /* Operation Mode Select Mask */
-#define AD7192_MODE_STA(x) (((x) & 0x1) << 20) /* Status Register transmission */
+#define AD7192_MODE_SEL_MASK GENMASK(23, 21) /* Operation Mode Select Mask */
#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
-#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
+#define AD7192_MODE_CLKSRC_MASK GENMASK(19, 18) /* Clock Source Select Mask */
+#define AD7192_MODE_AVG_MASK GENMASK(17, 16)
+ /* Fast Settling Filter Average Select Mask (AD7193 only) */
#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
#define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */
#define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
#define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */
#define AD7192_MODE_REJ60 BIT(10) /* 50/60Hz notch filter */
-#define AD7192_MODE_RATE(x) ((x) & 0x3FF) /* Filter Update Rate Select */
+ /* Filter Update Rate Select Mask */
+#define AD7192_MODE_RATE_MASK GENMASK(9, 0)
/* Mode Register: AD7192_MODE_SEL options */
#define AD7192_MODE_CONT 0 /* Continuous Conversion Mode */
@@ -92,13 +94,12 @@
#define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
#define AD7192_CONF_ACX BIT(22) /* AC excitation enable(AD7195 only) */
#define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
-#define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */
-#define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */
+#define AD7192_CONF_CHAN_MASK GENMASK(18, 8) /* Channel select mask */
#define AD7192_CONF_BURN BIT(7) /* Burnout current enable */
#define AD7192_CONF_REFDET BIT(6) /* Reference detect enable */
#define AD7192_CONF_BUF BIT(4) /* Buffered Mode Enable */
#define AD7192_CONF_UNIPOLAR BIT(3) /* Unipolar/Bipolar Enable */
-#define AD7192_CONF_GAIN(x) ((x) & 0x7) /* Gain Select */
+#define AD7192_CONF_GAIN_MASK GENMASK(2, 0) /* Gain Select */
#define AD7192_CH_AIN1P_AIN2M BIT(0) /* AIN1(+) - AIN2(-) */
#define AD7192_CH_AIN3P_AIN4M BIT(1) /* AIN3(+) - AIN4(-) */
@@ -130,7 +131,7 @@
#define CHIPID_AD7192 0x0
#define CHIPID_AD7193 0x2
#define CHIPID_AD7195 0x6
-#define AD7192_ID_MASK 0x0F
+#define AD7192_ID_MASK GENMASK(3, 0)
/* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */
#define AD7192_GPOCON_BPDSW BIT(6) /* Bridge power-down switch enable */
@@ -172,6 +173,9 @@ enum {
struct ad7192_chip_info {
unsigned int chip_id;
const char *name;
+ const struct iio_chan_spec *channels;
+ u8 num_channels;
+ const struct iio_info *info;
};
struct ad7192_state {
@@ -181,10 +185,10 @@ struct ad7192_state {
struct clk *mclk;
u16 int_vref_mv;
u32 fclk;
- u32 f_order;
u32 mode;
u32 conf;
u32 scale_avail[8][2];
+ u32 oversampling_ratio_avail[4];
u8 gpocon;
u8 clock_sel;
struct mutex lock; /* protect sensor state */
@@ -273,7 +277,7 @@ static int ad7192_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
st->conf &= ~AD7192_CONF_CHAN_MASK;
- st->conf |= AD7192_CONF_CHAN(channel);
+ st->conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, channel);
return ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
}
@@ -284,7 +288,7 @@ static int ad7192_set_mode(struct ad_sigma_delta *sd,
struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
st->mode &= ~AD7192_MODE_SEL_MASK;
- st->mode |= AD7192_MODE_SEL(mode);
+ st->mode |= FIELD_PREP(AD7192_MODE_SEL_MASK, mode);
return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
}
@@ -296,7 +300,7 @@ static int ad7192_append_status(struct ad_sigma_delta *sd, bool append)
int ret;
mode &= ~AD7192_MODE_STA_MASK;
- mode |= AD7192_MODE_STA(append);
+ mode |= FIELD_PREP(AD7192_MODE_STA_MASK, append);
ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, mode);
if (ret < 0)
@@ -400,17 +404,17 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
if (ret)
return ret;
- id &= AD7192_ID_MASK;
+ id = FIELD_GET(AD7192_ID_MASK, id);
if (id != st->chip_info->chip_id)
dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X != 0x%X)\n",
id, st->chip_info->chip_id);
- st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
- AD7192_MODE_CLKSRC(st->clock_sel) |
- AD7192_MODE_RATE(480);
+ st->mode = FIELD_PREP(AD7192_MODE_SEL_MASK, AD7192_MODE_IDLE) |
+ FIELD_PREP(AD7192_MODE_CLKSRC_MASK, st->clock_sel) |
+ FIELD_PREP(AD7192_MODE_RATE_MASK, 480);
- st->conf = AD7192_CONF_GAIN(0);
+ st->conf = FIELD_PREP(AD7192_CONF_GAIN_MASK, 0);
rej60_en = of_property_read_bool(np, "adi,rejection-60-Hz-enable");
if (rej60_en)
@@ -421,7 +425,6 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
st->conf |= AD7192_CONF_REFSEL;
st->conf &= ~AD7192_CONF_CHOP;
- st->f_order = AD7192_NO_SYNC_FILTER;
buf_en = of_property_read_bool(np, "adi,buffer-enable");
if (buf_en)
@@ -456,13 +459,18 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
scale_uv = ((u64)st->int_vref_mv * 100000000)
>> (indio_dev->channels[0].scan_type.realbits -
- ((st->conf & AD7192_CONF_UNIPOLAR) ? 0 : 1));
+ !FIELD_GET(AD7192_CONF_UNIPOLAR, st->conf));
scale_uv >>= i;
st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10;
st->scale_avail[i][0] = scale_uv;
}
+ st->oversampling_ratio_avail[0] = 1;
+ st->oversampling_ratio_avail[1] = 2;
+ st->oversampling_ratio_avail[2] = 8;
+ st->oversampling_ratio_avail[3] = 16;
+
return 0;
}
@@ -473,7 +481,7 @@ static ssize_t ad7192_show_ac_excitation(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX));
+ return sysfs_emit(buf, "%ld\n", FIELD_GET(AD7192_CONF_ACX, st->conf));
}
static ssize_t ad7192_show_bridge_switch(struct device *dev,
@@ -483,7 +491,8 @@ static ssize_t ad7192_show_bridge_switch(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- return sysfs_emit(buf, "%d\n", !!(st->gpocon & AD7192_GPOCON_BPDSW));
+ return sysfs_emit(buf, "%ld\n",
+ FIELD_GET(AD7192_GPOCON_BPDSW, st->gpocon));
}
static ssize_t ad7192_set(struct device *dev,
@@ -531,22 +540,66 @@ static ssize_t ad7192_set(struct device *dev,
return ret ? ret : len;
}
+static int ad7192_compute_f_order(struct ad7192_state *st, bool sinc3_en, bool chop_en)
+{
+ u8 avg_factor_selected, oversampling_ratio;
+
+ avg_factor_selected = FIELD_GET(AD7192_MODE_AVG_MASK, st->mode);
+
+ if (!avg_factor_selected && !chop_en)
+ return 1;
+
+ oversampling_ratio = st->oversampling_ratio_avail[avg_factor_selected];
+
+ if (sinc3_en)
+ return AD7192_SYNC3_FILTER + oversampling_ratio - 1;
+
+ return AD7192_SYNC4_FILTER + oversampling_ratio - 1;
+}
+
+static int ad7192_get_f_order(struct ad7192_state *st)
+{
+ bool sinc3_en, chop_en;
+
+ sinc3_en = FIELD_GET(AD7192_MODE_SINC3, st->mode);
+ chop_en = FIELD_GET(AD7192_CONF_CHOP, st->conf);
+
+ return ad7192_compute_f_order(st, sinc3_en, chop_en);
+}
+
+static int ad7192_compute_f_adc(struct ad7192_state *st, bool sinc3_en,
+ bool chop_en)
+{
+ unsigned int f_order = ad7192_compute_f_order(st, sinc3_en, chop_en);
+
+ return DIV_ROUND_CLOSEST(st->fclk,
+ f_order * FIELD_GET(AD7192_MODE_RATE_MASK, st->mode));
+}
+
+static int ad7192_get_f_adc(struct ad7192_state *st)
+{
+ unsigned int f_order = ad7192_get_f_order(st);
+
+ return DIV_ROUND_CLOSEST(st->fclk,
+ f_order * FIELD_GET(AD7192_MODE_RATE_MASK, st->mode));
+}
+
static void ad7192_get_available_filter_freq(struct ad7192_state *st,
int *freq)
{
unsigned int fadc;
/* Formulas for filter at page 25 of the datasheet */
- fadc = DIV_ROUND_CLOSEST(st->fclk,
- AD7192_SYNC4_FILTER * AD7192_MODE_RATE(st->mode));
+ fadc = ad7192_compute_f_adc(st, false, true);
freq[0] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
- fadc = DIV_ROUND_CLOSEST(st->fclk,
- AD7192_SYNC3_FILTER * AD7192_MODE_RATE(st->mode));
+ fadc = ad7192_compute_f_adc(st, true, true);
freq[1] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
- fadc = DIV_ROUND_CLOSEST(st->fclk, AD7192_MODE_RATE(st->mode));
+ fadc = ad7192_compute_f_adc(st, false, false);
freq[2] = DIV_ROUND_CLOSEST(fadc * 230, 1024);
+
+ fadc = ad7192_compute_f_adc(st, true, false);
freq[3] = DIV_ROUND_CLOSEST(fadc * 272, 1024);
}
@@ -629,25 +682,21 @@ static int ad7192_set_3db_filter_freq(struct ad7192_state *st,
switch (idx) {
case 0:
- st->f_order = AD7192_SYNC4_FILTER;
st->mode &= ~AD7192_MODE_SINC3;
st->conf |= AD7192_CONF_CHOP;
break;
case 1:
- st->f_order = AD7192_SYNC3_FILTER;
st->mode |= AD7192_MODE_SINC3;
st->conf |= AD7192_CONF_CHOP;
break;
case 2:
- st->f_order = AD7192_NO_SYNC_FILTER;
st->mode &= ~AD7192_MODE_SINC3;
st->conf &= ~AD7192_CONF_CHOP;
break;
case 3:
- st->f_order = AD7192_NO_SYNC_FILTER;
st->mode |= AD7192_MODE_SINC3;
st->conf &= ~AD7192_CONF_CHOP;
@@ -665,12 +714,11 @@ static int ad7192_get_3db_filter_freq(struct ad7192_state *st)
{
unsigned int fadc;
- fadc = DIV_ROUND_CLOSEST(st->fclk,
- st->f_order * AD7192_MODE_RATE(st->mode));
+ fadc = ad7192_get_f_adc(st);
- if (st->conf & AD7192_CONF_CHOP)
+ if (FIELD_GET(AD7192_CONF_CHOP, st->conf))
return DIV_ROUND_CLOSEST(fadc * 240, 1024);
- if (st->mode & AD7192_MODE_SINC3)
+ if (FIELD_GET(AD7192_MODE_SINC3, st->mode))
return DIV_ROUND_CLOSEST(fadc * 272, 1024);
else
return DIV_ROUND_CLOSEST(fadc * 230, 1024);
@@ -683,7 +731,8 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad7192_state *st = iio_priv(indio_dev);
- bool unipolar = !!(st->conf & AD7192_CONF_UNIPOLAR);
+ bool unipolar = FIELD_GET(AD7192_CONF_UNIPOLAR, st->conf);
+ u8 gain = FIELD_GET(AD7192_CONF_GAIN_MASK, st->conf);
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -692,8 +741,8 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_VOLTAGE:
mutex_lock(&st->lock);
- *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
- *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
+ *val = st->scale_avail[gain][0];
+ *val2 = st->scale_avail[gain][1];
mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
@@ -713,13 +762,15 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
*val -= 273 * ad7192_get_temp_scale(unipolar);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = st->fclk /
- (st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
+ *val = DIV_ROUND_CLOSEST(ad7192_get_f_adc(st), 1024);
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
*val = ad7192_get_3db_filter_freq(st);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling_ratio_avail[FIELD_GET(AD7192_MODE_AVG_MASK, st->mode)];
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -747,8 +798,8 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
if (val2 == st->scale_avail[i][1]) {
ret = 0;
tmp = st->conf;
- st->conf &= ~AD7192_CONF_GAIN(-1);
- st->conf |= AD7192_CONF_GAIN(i);
+ st->conf &= ~AD7192_CONF_GAIN_MASK;
+ st->conf |= FIELD_PREP(AD7192_CONF_GAIN_MASK, i);
if (tmp == st->conf)
break;
ad_sd_write_reg(&st->sd, AD7192_REG_CONF,
@@ -764,19 +815,36 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
break;
}
- div = st->fclk / (val * st->f_order * 1024);
+ div = st->fclk / (val * ad7192_get_f_order(st) * 1024);
if (div < 1 || div > 1023) {
ret = -EINVAL;
break;
}
- st->mode &= ~AD7192_MODE_RATE(-1);
- st->mode |= AD7192_MODE_RATE(div);
+ st->mode &= ~AD7192_MODE_RATE_MASK;
+ st->mode |= FIELD_PREP(AD7192_MODE_RATE_MASK, div);
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
break;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
ret = ad7192_set_3db_filter_freq(st, val, val2 / 1000);
break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = -EINVAL;
+ mutex_lock(&st->lock);
+ for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++)
+ if (val == st->oversampling_ratio_avail[i]) {
+ ret = 0;
+ tmp = st->mode;
+ st->mode &= ~AD7192_MODE_AVG_MASK;
+ st->mode |= FIELD_PREP(AD7192_MODE_AVG_MASK, i);
+ if (tmp == st->mode)
+ break;
+ ad_sd_write_reg(&st->sd, AD7192_REG_MODE,
+ 3, st->mode);
+ break;
+ }
+ mutex_unlock(&st->lock);
+ break;
default:
ret = -EINVAL;
}
@@ -797,6 +865,8 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -817,6 +887,12 @@ static int ad7192_read_avail(struct iio_dev *indio_dev,
*length = ARRAY_SIZE(st->scale_avail) * 2;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = (int *)st->oversampling_ratio_avail;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->oversampling_ratio_avail);
+
+ return IIO_AVAIL_LIST;
}
return -EINVAL;
@@ -831,7 +907,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
conf &= ~AD7192_CONF_CHAN_MASK;
for_each_set_bit(i, scan_mask, 8)
- conf |= AD7192_CONF_CHAN(i);
+ conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
if (ret < 0)
@@ -862,8 +938,8 @@ static const struct iio_info ad7195_info = {
.update_scan_mode = ad7192_update_scan_mode,
};
-#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \
- _type, _mask_type_av, _ext_info) \
+#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _type, \
+ _mask_all, _mask_type_av, _mask_all_av, _ext_info) \
{ \
.type = (_type), \
.differential = ((_channel2) == -1 ? 0 : 1), \
@@ -871,13 +947,14 @@ static const struct iio_info ad7195_info = {
.channel = (_channel1), \
.channel2 = (_channel2), \
.address = (_address), \
- .extend_name = (_extend_name), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ (_mask_all), \
.info_mask_shared_by_type_available = (_mask_type_av), \
+ .info_mask_shared_by_all_available = (_mask_all_av), \
.ext_info = (_ext_info), \
.scan_index = (_si), \
.scan_type = { \
@@ -889,16 +966,26 @@ static const struct iio_info ad7195_info = {
}
#define AD719x_DIFF_CHANNEL(_si, _channel1, _channel2, _address) \
- __AD719x_CHANNEL(_si, _channel1, _channel2, _address, NULL, \
- IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE), \
- ad7192_calibsys_ext_info)
+ __AD719x_CHANNEL(_si, _channel1, _channel2, _address, IIO_VOLTAGE, 0, \
+ BIT(IIO_CHAN_INFO_SCALE), 0, ad7192_calibsys_ext_info)
#define AD719x_CHANNEL(_si, _channel1, _address) \
- __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
+ __AD719x_CHANNEL(_si, _channel1, -1, _address, IIO_VOLTAGE, 0, \
+ BIT(IIO_CHAN_INFO_SCALE), 0, ad7192_calibsys_ext_info)
#define AD719x_TEMP_CHANNEL(_si, _address) \
- __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
+ __AD719x_CHANNEL(_si, 0, -1, _address, IIO_TEMP, 0, 0, 0, NULL)
+
+#define AD7193_DIFF_CHANNEL(_si, _channel1, _channel2, _address) \
+ __AD719x_CHANNEL(_si, _channel1, _channel2, _address, \
+ IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ ad7192_calibsys_ext_info)
+
+#define AD7193_CHANNEL(_si, _channel1, _address) \
+ AD7193_DIFF_CHANNEL(_si, _channel1, -1, _address)
static const struct iio_chan_spec ad7192_channels[] = {
AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
@@ -913,20 +1000,20 @@ static const struct iio_chan_spec ad7192_channels[] = {
};
static const struct iio_chan_spec ad7193_channels[] = {
- AD719x_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M),
- AD719x_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M),
- AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
- AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
+ AD7193_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M),
+ AD7193_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M),
+ AD7193_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
+ AD7193_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
- AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
- AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
- AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
- AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
- AD719x_CHANNEL(9, 4, AD7193_CH_AIN4),
- AD719x_CHANNEL(10, 5, AD7193_CH_AIN5),
- AD719x_CHANNEL(11, 6, AD7193_CH_AIN6),
- AD719x_CHANNEL(12, 7, AD7193_CH_AIN7),
- AD719x_CHANNEL(13, 8, AD7193_CH_AIN8),
+ AD7193_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
+ AD7193_CHANNEL(6, 1, AD7193_CH_AIN1),
+ AD7193_CHANNEL(7, 2, AD7193_CH_AIN2),
+ AD7193_CHANNEL(8, 3, AD7193_CH_AIN3),
+ AD7193_CHANNEL(9, 4, AD7193_CH_AIN4),
+ AD7193_CHANNEL(10, 5, AD7193_CH_AIN5),
+ AD7193_CHANNEL(11, 6, AD7193_CH_AIN6),
+ AD7193_CHANNEL(12, 7, AD7193_CH_AIN7),
+ AD7193_CHANNEL(13, 8, AD7193_CH_AIN8),
IIO_CHAN_SOFT_TIMESTAMP(14),
};
@@ -934,39 +1021,33 @@ static const struct ad7192_chip_info ad7192_chip_info_tbl[] = {
[ID_AD7190] = {
.chip_id = CHIPID_AD7190,
.name = "ad7190",
+ .channels = ad7192_channels,
+ .num_channels = ARRAY_SIZE(ad7192_channels),
+ .info = &ad7192_info,
},
[ID_AD7192] = {
.chip_id = CHIPID_AD7192,
.name = "ad7192",
+ .channels = ad7192_channels,
+ .num_channels = ARRAY_SIZE(ad7192_channels),
+ .info = &ad7192_info,
},
[ID_AD7193] = {
.chip_id = CHIPID_AD7193,
.name = "ad7193",
+ .channels = ad7193_channels,
+ .num_channels = ARRAY_SIZE(ad7193_channels),
+ .info = &ad7192_info,
},
[ID_AD7195] = {
.chip_id = CHIPID_AD7195,
.name = "ad7195",
+ .channels = ad7192_channels,
+ .num_channels = ARRAY_SIZE(ad7192_channels),
+ .info = &ad7195_info,
},
};
-static int ad7192_channels_config(struct iio_dev *indio_dev)
-{
- struct ad7192_state *st = iio_priv(indio_dev);
-
- switch (st->chip_info->chip_id) {
- case CHIPID_AD7193:
- indio_dev->channels = ad7193_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
- break;
- default:
- indio_dev->channels = ad7192_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
- break;
- }
-
- return 0;
-}
-
static void ad7192_reg_disable(void *reg)
{
regulator_disable(reg);
@@ -1041,15 +1122,9 @@ static int ad7192_probe(struct spi_device *spi)
st->chip_info = (void *)spi_get_device_id(spi)->driver_data;
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = ad7192_channels_config(indio_dev);
- if (ret < 0)
- return ret;
-
- if (st->chip_info->chip_id == CHIPID_AD7195)
- indio_dev->info = &ad7195_info;
- else
- indio_dev->info = &ad7192_info;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = st->chip_info->info;
ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
if (ret)
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index df67b63ccf69..d7fd21e7c6e2 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -2486,7 +2486,7 @@ reg_disable:
return ret;
}
-static int at91_adc_remove(struct platform_device *pdev)
+static void at91_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(indio_dev);
@@ -2501,8 +2501,6 @@ static int at91_adc_remove(struct platform_device *pdev)
regulator_disable(st->vref);
regulator_disable(st->reg);
-
- return 0;
}
static int at91_adc_suspend(struct device *dev)
@@ -2627,7 +2625,7 @@ MODULE_DEVICE_TABLE(of, at91_adc_dt_match);
static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
- .remove = at91_adc_remove,
+ .remove_new = at91_adc_remove,
.driver = {
.name = "at91-sama5d2_adc",
.of_match_table = at91_adc_dt_match,
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index de6650f9c4b1..eb501e3c86a5 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1013,28 +1013,25 @@ static int at91_adc_probe(struct platform_device *pdev)
st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers");
- if (of_property_read_u32(node, "atmel,adc-channels-used", &prop)) {
- dev_err(&idev->dev, "Missing adc-channels-used property in the DT.\n");
- return -EINVAL;
- }
+ if (of_property_read_u32(node, "atmel,adc-channels-used", &prop))
+ return dev_err_probe(&idev->dev, -EINVAL,
+ "Missing adc-channels-used property in the DT.\n");
st->channels_mask = prop;
st->sleep_mode = of_property_read_bool(node, "atmel,adc-sleep-mode");
- if (of_property_read_u32(node, "atmel,adc-startup-time", &prop)) {
- dev_err(&idev->dev, "Missing adc-startup-time property in the DT.\n");
- return -EINVAL;
- }
+ if (of_property_read_u32(node, "atmel,adc-startup-time", &prop))
+ return dev_err_probe(&idev->dev, -EINVAL,
+ "Missing adc-startup-time property in the DT.\n");
st->startup_time = prop;
prop = 0;
of_property_read_u32(node, "atmel,adc-sample-hold-time", &prop);
st->sample_hold_time = prop;
- if (of_property_read_u32(node, "atmel,adc-vref", &prop)) {
- dev_err(&idev->dev, "Missing adc-vref property in the DT.\n");
- return -EINVAL;
- }
+ if (of_property_read_u32(node, "atmel,adc-vref", &prop))
+ return dev_err_probe(&idev->dev, -EINVAL,
+ "Missing adc-vref property in the DT.\n");
st->vref_mv = prop;
st->res = st->caps->high_res_bits;
@@ -1069,7 +1066,6 @@ static int at91_adc_probe(struct platform_device *pdev)
if (IS_ERR(st->reg_base))
return PTR_ERR(st->reg_base);
-
/*
* Disable all IRQs before setting up the handler
*/
@@ -1077,43 +1073,26 @@ static int at91_adc_probe(struct platform_device *pdev)
at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF);
if (st->caps->has_tsmr)
- ret = request_irq(st->irq, at91_adc_9x5_interrupt, 0,
- pdev->dev.driver->name, idev);
+ ret = devm_request_irq(&pdev->dev, st->irq,
+ at91_adc_9x5_interrupt, 0,
+ pdev->dev.driver->name, idev);
else
- ret = request_irq(st->irq, at91_adc_rl_interrupt, 0,
- pdev->dev.driver->name, idev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to allocate IRQ.\n");
- return ret;
- }
-
- st->clk = devm_clk_get(&pdev->dev, "adc_clk");
- if (IS_ERR(st->clk)) {
- dev_err(&pdev->dev, "Failed to get the clock.\n");
- ret = PTR_ERR(st->clk);
- goto error_free_irq;
- }
-
- ret = clk_prepare_enable(st->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not prepare or enable the clock.\n");
- goto error_free_irq;
- }
+ ret = devm_request_irq(&pdev->dev, st->irq,
+ at91_adc_rl_interrupt, 0,
+ pdev->dev.driver->name, idev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to allocate IRQ.\n");
- st->adc_clk = devm_clk_get(&pdev->dev, "adc_op_clk");
- if (IS_ERR(st->adc_clk)) {
- dev_err(&pdev->dev, "Failed to get the ADC clock.\n");
- ret = PTR_ERR(st->adc_clk);
- goto error_disable_clk;
- }
+ st->clk = devm_clk_get_enabled(&pdev->dev, "adc_clk");
+ if (IS_ERR(st->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(st->clk),
+ "Could not prepare or enable the clock.\n");
- ret = clk_prepare_enable(st->adc_clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not prepare or enable the ADC clock.\n");
- goto error_disable_clk;
- }
+ st->adc_clk = devm_clk_get_enabled(&pdev->dev, "adc_op_clk");
+ if (IS_ERR(st->adc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(st->adc_clk),
+ "Could not prepare or enable the ADC clock.\n");
/*
* Prescaler rate computation using the formula from the Atmel's
@@ -1129,11 +1108,9 @@ static int at91_adc_probe(struct platform_device *pdev)
prsc = (mstrclk / (2 * adc_clk)) - 1;
- if (!st->startup_time) {
- dev_err(&pdev->dev, "No startup time available.\n");
- ret = -EINVAL;
- goto error_disable_adc_clk;
- }
+ if (!st->startup_time)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "No startup time available.\n");
ticks = (*st->caps->calc_startup_ticks)(st->startup_time, adc_clk_khz);
/*
@@ -1158,10 +1135,9 @@ static int at91_adc_probe(struct platform_device *pdev)
/* Setup the ADC channels available on the board */
ret = at91_adc_channel_init(idev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't initialize the channels.\n");
- goto error_disable_adc_clk;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't initialize the channels.\n");
init_waitqueue_head(&st->wq_data_avail);
mutex_init(&st->lock);
@@ -1173,21 +1149,20 @@ static int at91_adc_probe(struct platform_device *pdev)
*/
if (!st->touchscreen_type) {
ret = at91_adc_buffer_init(idev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't initialize the buffer.\n");
- goto error_disable_adc_clk;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't initialize the buffer.\n");
ret = at91_adc_trigger_init(idev);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't setup the triggers.\n");
at91_adc_buffer_remove(idev);
- goto error_disable_adc_clk;
+ return ret;
}
} else {
ret = at91_ts_register(idev, pdev);
if (ret)
- goto error_disable_adc_clk;
+ return ret;
at91_ts_hw_init(idev, adc_clk_khz);
}
@@ -1207,16 +1182,10 @@ error_iio_device_register:
} else {
at91_ts_unregister(st);
}
-error_disable_adc_clk:
- clk_disable_unprepare(st->adc_clk);
-error_disable_clk:
- clk_disable_unprepare(st->clk);
-error_free_irq:
- free_irq(st->irq, idev);
return ret;
}
-static int at91_adc_remove(struct platform_device *pdev)
+static void at91_adc_remove(struct platform_device *pdev)
{
struct iio_dev *idev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(idev);
@@ -1228,11 +1197,6 @@ static int at91_adc_remove(struct platform_device *pdev)
} else {
at91_ts_unregister(st);
}
- clk_disable_unprepare(st->adc_clk);
- clk_disable_unprepare(st->clk);
- free_irq(st->irq, idev);
-
- return 0;
}
static int at91_adc_suspend(struct device *dev)
@@ -1382,7 +1346,7 @@ MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
- .remove = at91_adc_remove,
+ .remove_new = at91_adc_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = at91_adc_dt_ids,
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 75bda94dbce1..d6c51b0f48e3 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -745,7 +745,7 @@ fail_map:
return ret;
}
-static int axp20x_remove(struct platform_device *pdev)
+static void axp20x_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct axp20x_adc_iio *info = iio_priv(indio_dev);
@@ -757,8 +757,6 @@ static int axp20x_remove(struct platform_device *pdev)
if (info->data->adc_en2_mask)
regmap_write(info->regmap, AXP20X_ADC_EN2, 0);
-
- return 0;
}
static struct platform_driver axp20x_adc_driver = {
@@ -768,7 +766,7 @@ static struct platform_driver axp20x_adc_driver = {
},
.id_table = axp20x_adc_id_match,
.probe = axp20x_probe,
- .remove = axp20x_remove,
+ .remove_new = axp20x_remove,
};
module_platform_driver(axp20x_adc_driver);
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 0d6885413a7e..5bc514bd5ebc 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -594,7 +594,7 @@ err_adc_enable:
return ret;
}
-static int iproc_adc_remove(struct platform_device *pdev)
+static void iproc_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
@@ -602,8 +602,6 @@ static int iproc_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
iproc_adc_disable(indio_dev);
clk_disable_unprepare(adc_priv->adc_clk);
-
- return 0;
}
static const struct of_device_id iproc_adc_of_match[] = {
@@ -614,7 +612,7 @@ MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
static struct platform_driver iproc_adc_driver = {
.probe = iproc_adc_probe,
- .remove = iproc_adc_remove,
+ .remove_new = iproc_adc_remove,
.driver = {
.name = "iproc-static-adc",
.of_match_table = iproc_adc_of_match,
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 97d162a3cba4..06cfbbabaf8d 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -691,19 +691,18 @@ unregister_event:
return ret;
}
-static int dln2_adc_remove(struct platform_device *pdev)
+static void dln2_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
iio_device_unregister(indio_dev);
dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
- return 0;
}
static struct platform_driver dln2_adc_driver = {
.driver.name = DLN2_ADC_MOD_NAME,
.probe = dln2_adc_probe,
- .remove = dln2_adc_remove,
+ .remove_new = dln2_adc_remove,
};
module_platform_driver(dln2_adc_driver);
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index a35e6cead67d..971942ce4c66 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -217,15 +217,13 @@ static int ep93xx_adc_probe(struct platform_device *pdev)
return ret;
}
-static int ep93xx_adc_remove(struct platform_device *pdev)
+static void ep93xx_adc_remove(struct platform_device *pdev)
{
struct iio_dev *iiodev = platform_get_drvdata(pdev);
struct ep93xx_adc_priv *priv = iio_priv(iiodev);
iio_device_unregister(iiodev);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id ep93xx_adc_of_ids[] = {
@@ -240,7 +238,7 @@ static struct platform_driver ep93xx_adc_driver = {
.of_match_table = ep93xx_adc_of_ids,
},
.probe = ep93xx_adc_probe,
- .remove = ep93xx_adc_remove,
+ .remove_new = ep93xx_adc_remove,
};
module_platform_driver(ep93xx_adc_driver);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 43c8af41b4a9..614de9644800 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -950,7 +950,7 @@ err_disable_reg:
return ret;
}
-static int exynos_adc_remove(struct platform_device *pdev)
+static void exynos_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
@@ -968,8 +968,6 @@ static int exynos_adc_remove(struct platform_device *pdev)
exynos_adc_disable_clk(info);
exynos_adc_unprepare_clk(info);
regulator_disable(info->vdd);
-
- return 0;
}
static int exynos_adc_suspend(struct device *dev)
@@ -1010,7 +1008,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(exynos_adc_pm_ops, exynos_adc_suspend,
static struct platform_driver exynos_adc_driver = {
.probe = exynos_adc_probe,
- .remove = exynos_adc_remove,
+ .remove_new = exynos_adc_remove,
.driver = {
.name = "exynos-adc",
.of_match_table = exynos_adc_match,
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index 551e83ae573c..68c813de0605 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -384,7 +384,7 @@ err_regulator_disable:
return ret;
}
-static int mx25_gcq_remove(struct platform_device *pdev)
+static void mx25_gcq_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct mx25_gcq_priv *priv = iio_priv(indio_dev);
@@ -397,8 +397,6 @@ static int mx25_gcq_remove(struct platform_device *pdev)
if (priv->vref[i])
regulator_disable(priv->vref[i]);
}
-
- return 0;
}
static const struct of_device_id mx25_gcq_ids[] = {
@@ -413,7 +411,7 @@ static struct platform_driver mx25_gcq_driver = {
.of_match_table = mx25_gcq_ids,
},
.probe = mx25_gcq_probe,
- .remove = mx25_gcq_remove,
+ .remove_new = mx25_gcq_remove,
};
module_platform_driver(mx25_gcq_driver);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index f7ee856a6b8b..c80c55fb8c6c 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -580,7 +580,7 @@ error_regulator:
return ret;
}
-static int hx711_remove(struct platform_device *pdev)
+static void hx711_remove(struct platform_device *pdev)
{
struct hx711_data *hx711_data;
struct iio_dev *indio_dev;
@@ -593,8 +593,6 @@ static int hx711_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(hx711_data->reg_avdd);
-
- return 0;
}
static const struct of_device_id of_hx711_match[] = {
@@ -606,7 +604,7 @@ MODULE_DEVICE_TABLE(of, of_hx711_match);
static struct platform_driver hx711_driver = {
.probe = hx711_probe,
- .remove = hx711_remove,
+ .remove_new = hx711_remove,
.driver = {
.name = "hx711-gpio",
.of_match_table = of_hx711_match,
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index fff6e5a2d956..fe82198170d5 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -404,7 +404,7 @@ error_regulator_disable:
return ret;
}
-static int imx8qxp_adc_remove(struct platform_device *pdev)
+static void imx8qxp_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct imx8qxp_adc *adc = iio_priv(indio_dev);
@@ -422,8 +422,6 @@ static int imx8qxp_adc_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-
- return 0;
}
static int imx8qxp_adc_runtime_suspend(struct device *dev)
@@ -489,7 +487,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_adc_match);
static struct platform_driver imx8qxp_adc_driver = {
.probe = imx8qxp_adc_probe,
- .remove = imx8qxp_adc_remove,
+ .remove_new = imx8qxp_adc_remove,
.driver = {
.name = ADC_DRIVER_NAME,
.of_match_table = imx8qxp_adc_match,
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index dce9ec91e4a7..9bb1e4ba1aee 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -392,7 +392,7 @@ error_regulator_disable:
return ret;
}
-static int imx93_adc_remove(struct platform_device *pdev)
+static void imx93_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct imx93_adc *adc = iio_priv(indio_dev);
@@ -410,8 +410,6 @@ static int imx93_adc_remove(struct platform_device *pdev)
free_irq(adc->irq, adc);
clk_disable_unprepare(adc->ipg_clk);
regulator_disable(adc->vref);
-
- return 0;
}
static int imx93_adc_runtime_suspend(struct device *dev)
@@ -468,7 +466,7 @@ MODULE_DEVICE_TABLE(of, imx93_adc_match);
static struct platform_driver imx93_adc_driver = {
.probe = imx93_adc_probe,
- .remove = imx93_adc_remove,
+ .remove_new = imx93_adc_remove,
.driver = {
.name = IMX93_ADC_DRIVER_NAME,
.of_match_table = imx93_adc_match,
diff --git a/drivers/iio/adc/ltc2309.c b/drivers/iio/adc/ltc2309.c
new file mode 100644
index 000000000000..8b3a89c1b840
--- /dev/null
+++ b/drivers/iio/adc/ltc2309.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The LTC2309 is an 8-Channel, 12-Bit SAR ADC with an I2C Interface.
+ *
+ * Datasheet:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/2309fd.pdf
+ *
+ * Copyright (c) 2023, Liam Beguin <liambeguin@gmail.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+
+#define LTC2309_ADC_RESOLUTION 12
+
+#define LTC2309_DIN_CH_MASK GENMASK(7, 4)
+#define LTC2309_DIN_SDN BIT(7)
+#define LTC2309_DIN_OSN BIT(6)
+#define LTC2309_DIN_S1 BIT(5)
+#define LTC2309_DIN_S0 BIT(4)
+#define LTC2309_DIN_UNI BIT(3)
+#define LTC2309_DIN_SLEEP BIT(2)
+
+/**
+ * struct ltc2309 - internal device data structure
+ * @dev: Device reference
+ * @client: I2C reference
+ * @vref: External reference source
+ * @lock: Lock to serialize data access
+ * @vref_mv: Internal voltage reference
+ */
+struct ltc2309 {
+ struct device *dev;
+ struct i2c_client *client;
+ struct regulator *vref;
+ struct mutex lock; /* serialize data access */
+ int vref_mv;
+};
+
+/* Order matches expected channel address, See datasheet Table 1. */
+enum ltc2309_channels {
+ LTC2309_CH0_CH1 = 0,
+ LTC2309_CH2_CH3,
+ LTC2309_CH4_CH5,
+ LTC2309_CH6_CH7,
+ LTC2309_CH1_CH0,
+ LTC2309_CH3_CH2,
+ LTC2309_CH5_CH4,
+ LTC2309_CH7_CH6,
+ LTC2309_CH0,
+ LTC2309_CH2,
+ LTC2309_CH4,
+ LTC2309_CH6,
+ LTC2309_CH1,
+ LTC2309_CH3,
+ LTC2309_CH5,
+ LTC2309_CH7,
+};
+
+#define LTC2309_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+#define LTC2309_DIFF_CHAN(_chan, _chan2, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec ltc2309_channels[] = {
+ LTC2309_CHAN(0, LTC2309_CH0),
+ LTC2309_CHAN(1, LTC2309_CH1),
+ LTC2309_CHAN(2, LTC2309_CH2),
+ LTC2309_CHAN(3, LTC2309_CH3),
+ LTC2309_CHAN(4, LTC2309_CH4),
+ LTC2309_CHAN(5, LTC2309_CH5),
+ LTC2309_CHAN(6, LTC2309_CH6),
+ LTC2309_CHAN(7, LTC2309_CH7),
+ LTC2309_DIFF_CHAN(0, 1, LTC2309_CH0_CH1),
+ LTC2309_DIFF_CHAN(2, 3, LTC2309_CH2_CH3),
+ LTC2309_DIFF_CHAN(4, 5, LTC2309_CH4_CH5),
+ LTC2309_DIFF_CHAN(6, 7, LTC2309_CH6_CH7),
+ LTC2309_DIFF_CHAN(1, 0, LTC2309_CH1_CH0),
+ LTC2309_DIFF_CHAN(3, 2, LTC2309_CH3_CH2),
+ LTC2309_DIFF_CHAN(5, 4, LTC2309_CH5_CH4),
+ LTC2309_DIFF_CHAN(7, 6, LTC2309_CH7_CH6),
+};
+
+static int ltc2309_read_raw_channel(struct ltc2309 *ltc2309,
+ unsigned long address, int *val)
+{
+ int ret;
+ u16 buf;
+ u8 din;
+
+ din = FIELD_PREP(LTC2309_DIN_CH_MASK, address & 0x0f) |
+ FIELD_PREP(LTC2309_DIN_UNI, 1) |
+ FIELD_PREP(LTC2309_DIN_SLEEP, 0);
+
+ ret = i2c_smbus_write_byte(ltc2309->client, din);
+ if (ret < 0) {
+ dev_err(ltc2309->dev, "i2c command failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = i2c_master_recv(ltc2309->client, (char *)&buf, 2);
+ if (ret < 0) {
+ dev_err(ltc2309->dev, "i2c read failed: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ *val = be16_to_cpu(buf) >> 4;
+
+ return ret;
+}
+
+static int ltc2309_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ltc2309 *ltc2309 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ltc2309->lock);
+ ret = ltc2309_read_raw_channel(ltc2309, chan->address, val);
+ mutex_unlock(&ltc2309->lock);
+ if (ret < 0)
+ return -EINVAL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = ltc2309->vref_mv;
+ *val2 = LTC2309_ADC_RESOLUTION;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltc2309_info = {
+ .read_raw = ltc2309_read_raw,
+};
+
+static void ltc2309_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
+static int ltc2309_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2309 *ltc2309;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*ltc2309));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ltc2309 = iio_priv(indio_dev);
+ ltc2309->dev = &indio_dev->dev;
+ ltc2309->client = client;
+ ltc2309->vref_mv = 4096; /* Default to the internal ref */
+
+ indio_dev->name = "ltc2309";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ltc2309_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltc2309_channels);
+ indio_dev->info = &ltc2309_info;
+
+ ltc2309->vref = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(ltc2309->vref)) {
+ ret = PTR_ERR(ltc2309->vref);
+ if (ret == -ENODEV)
+ ltc2309->vref = NULL;
+ else
+ return ret;
+ }
+
+ if (ltc2309->vref) {
+ ret = regulator_enable(ltc2309->vref);
+ if (ret)
+ return dev_err_probe(ltc2309->dev, ret,
+ "failed to enable vref\n");
+
+ ret = devm_add_action_or_reset(ltc2309->dev,
+ ltc2309_regulator_disable,
+ ltc2309->vref);
+ if (ret) {
+ return dev_err_probe(ltc2309->dev, ret,
+ "failed to add regulator_disable action: %d\n",
+ ret);
+ }
+
+ ret = regulator_get_voltage(ltc2309->vref);
+ if (ret < 0)
+ return ret;
+
+ ltc2309->vref_mv = ret / 1000;
+ }
+
+ mutex_init(&ltc2309->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id ltc2309_of_match[] = {
+ { .compatible = "lltc,ltc2309" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ltc2309_of_match);
+
+static const struct i2c_device_id ltc2309_id[] = {
+ { "ltc2309" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc2309_id);
+
+static struct i2c_driver ltc2309_driver = {
+ .driver = {
+ .name = "ltc2309",
+ .of_match_table = ltc2309_of_match,
+ },
+ .probe = ltc2309_probe,
+ .id_table = ltc2309_id,
+};
+module_i2c_driver(ltc2309_driver);
+
+MODULE_AUTHOR("Liam Beguin <liambeguin@gmail.com>");
+MODULE_DESCRIPTION("Linear Technology LTC2309 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 5bdd40729611..6401a7727c31 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -95,7 +95,6 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
static int ltc2497_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct ltc2497_chip_info *chip_info;
struct iio_dev *indio_dev;
struct ltc2497_driverdata *st;
@@ -115,9 +114,7 @@ static int ltc2497_probe(struct i2c_client *client)
st->client = client;
st->common_ddata.result_and_measure = ltc2497_result_and_measure;
- chip_info = device_get_match_data(dev);
- if (!chip_info)
- chip_info = (const struct ltc2497_chip_info *)id->driver_data;
+ chip_info = i2c_get_match_data(client);
st->common_ddata.chip_info = chip_info;
resolution = chip_info->resolution;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index b31581616ce3..7c2a98b8c3a9 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1599,9 +1599,7 @@ static int max1363_probe(struct i2c_client *client)
if (ret)
return ret;
- st->chip_info = device_get_match_data(&client->dev);
- if (!st->chip_info)
- st->chip_info = &max1363_chip_info_tbl[id->driver_data];
+ st->chip_info = i2c_get_match_data(client);
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
@@ -1669,46 +1667,51 @@ static int max1363_probe(struct i2c_client *client)
return devm_iio_device_register(&client->dev, indio_dev);
}
+#define MAX1363_ID_TABLE(_name, cfg) { \
+ .name = _name, \
+ .driver_data = (kernel_ulong_t)&max1363_chip_info_tbl[cfg], \
+}
+
static const struct i2c_device_id max1363_id[] = {
- { "max1361", max1361 },
- { "max1362", max1362 },
- { "max1363", max1363 },
- { "max1364", max1364 },
- { "max1036", max1036 },
- { "max1037", max1037 },
- { "max1038", max1038 },
- { "max1039", max1039 },
- { "max1136", max1136 },
- { "max1137", max1137 },
- { "max1138", max1138 },
- { "max1139", max1139 },
- { "max1236", max1236 },
- { "max1237", max1237 },
- { "max1238", max1238 },
- { "max1239", max1239 },
- { "max11600", max11600 },
- { "max11601", max11601 },
- { "max11602", max11602 },
- { "max11603", max11603 },
- { "max11604", max11604 },
- { "max11605", max11605 },
- { "max11606", max11606 },
- { "max11607", max11607 },
- { "max11608", max11608 },
- { "max11609", max11609 },
- { "max11610", max11610 },
- { "max11611", max11611 },
- { "max11612", max11612 },
- { "max11613", max11613 },
- { "max11614", max11614 },
- { "max11615", max11615 },
- { "max11616", max11616 },
- { "max11617", max11617 },
- { "max11644", max11644 },
- { "max11645", max11645 },
- { "max11646", max11646 },
- { "max11647", max11647 },
- {}
+ MAX1363_ID_TABLE("max1361", max1361),
+ MAX1363_ID_TABLE("max1362", max1362),
+ MAX1363_ID_TABLE("max1363", max1363),
+ MAX1363_ID_TABLE("max1364", max1364),
+ MAX1363_ID_TABLE("max1036", max1036),
+ MAX1363_ID_TABLE("max1037", max1037),
+ MAX1363_ID_TABLE("max1038", max1038),
+ MAX1363_ID_TABLE("max1039", max1039),
+ MAX1363_ID_TABLE("max1136", max1136),
+ MAX1363_ID_TABLE("max1137", max1137),
+ MAX1363_ID_TABLE("max1138", max1138),
+ MAX1363_ID_TABLE("max1139", max1139),
+ MAX1363_ID_TABLE("max1236", max1236),
+ MAX1363_ID_TABLE("max1237", max1237),
+ MAX1363_ID_TABLE("max1238", max1238),
+ MAX1363_ID_TABLE("max1239", max1239),
+ MAX1363_ID_TABLE("max11600", max11600),
+ MAX1363_ID_TABLE("max11601", max11601),
+ MAX1363_ID_TABLE("max11602", max11602),
+ MAX1363_ID_TABLE("max11603", max11603),
+ MAX1363_ID_TABLE("max11604", max11604),
+ MAX1363_ID_TABLE("max11605", max11605),
+ MAX1363_ID_TABLE("max11606", max11606),
+ MAX1363_ID_TABLE("max11607", max11607),
+ MAX1363_ID_TABLE("max11608", max11608),
+ MAX1363_ID_TABLE("max11609", max11609),
+ MAX1363_ID_TABLE("max11610", max11610),
+ MAX1363_ID_TABLE("max11611", max11611),
+ MAX1363_ID_TABLE("max11612", max11612),
+ MAX1363_ID_TABLE("max11613", max11613),
+ MAX1363_ID_TABLE("max11614", max11614),
+ MAX1363_ID_TABLE("max11615", max11615),
+ MAX1363_ID_TABLE("max11616", max11616),
+ MAX1363_ID_TABLE("max11617", max11617),
+ MAX1363_ID_TABLE("max11644", max11644),
+ MAX1363_ID_TABLE("max11645", max11645),
+ MAX1363_ID_TABLE("max11646", max11646),
+ MAX1363_ID_TABLE("max11647", max11647),
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, max1363_id);
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
new file mode 100644
index 000000000000..e3f1de5fcc5a
--- /dev/null
+++ b/drivers/iio/adc/mcp3564.c
@@ -0,0 +1,1513 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IIO driver for MCP356X/MCP356XR and MCP346X/MCP346XR series ADC chip family
+ *
+ * Copyright (C) 2022-2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Marius Cristea <marius.cristea@microchip.com>
+ *
+ * Datasheet for MCP3561, MCP3562, MCP3564 can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/MSLD/ProductDocuments/DataSheets/MCP3561-2-4-Family-Data-Sheet-DS20006181C.pdf
+ * Datasheet for MCP3561R, MCP3562R, MCP3564R can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3561_2_4R-Data-Sheet-DS200006391C.pdf
+ * Datasheet for MCP3461, MCP3462, MCP3464 can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3461-2-4-Two-Four-Eight-Channel-153.6-ksps-Low-Noise-16-Bit-Delta-Sigma-ADC-Data-Sheet-20006180D.pdf
+ * Datasheet for MCP3461R, MCP3462R, MCP3464R can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/APID/ProductDocuments/DataSheets/MCP3461-2-4R-Family-Data-Sheet-DS20006404C.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+#include <linux/util_macros.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MCP3564_ADCDATA_REG 0x00
+
+#define MCP3564_CONFIG0_REG 0x01
+#define MCP3564_CONFIG0_ADC_MODE_MASK GENMASK(1, 0)
+/* Current Source/Sink Selection Bits for Sensor Bias */
+#define MCP3564_CONFIG0_CS_SEL_MASK GENMASK(3, 2)
+/* Internal clock is selected and AMCLK is present on the analog master clock output pin */
+#define MCP3564_CONFIG0_USE_INT_CLK_OUTPUT_EN 0x03
+/* Internal clock is selected and no clock output is present on the CLK pin */
+#define MCP3564_CONFIG0_USE_INT_CLK 0x02
+/* External digital clock */
+#define MCP3564_CONFIG0_USE_EXT_CLK 0x01
+/* External digital clock (default) */
+#define MCP3564_CONFIG0_USE_EXT_CLK_DEFAULT 0x00
+#define MCP3564_CONFIG0_CLK_SEL_MASK GENMASK(5, 4)
+#define MCP3456_CONFIG0_BIT6_DEFAULT BIT(6)
+#define MCP3456_CONFIG0_VREF_MASK BIT(7)
+
+#define MCP3564_CONFIG1_REG 0x02
+#define MCP3564_CONFIG1_OVERSPL_RATIO_MASK GENMASK(5, 2)
+
+#define MCP3564_CONFIG2_REG 0x03
+#define MCP3564_CONFIG2_AZ_REF_MASK BIT(1)
+#define MCP3564_CONFIG2_AZ_MUX_MASK BIT(2)
+
+#define MCP3564_CONFIG2_HARDWARE_GAIN_MASK GENMASK(5, 3)
+#define MCP3564_DEFAULT_HARDWARE_GAIN 0x01
+#define MCP3564_CONFIG2_BOOST_CURRENT_MASK GENMASK(7, 6)
+
+#define MCP3564_CONFIG3_REG 0x04
+#define MCP3464_CONFIG3_EN_GAINCAL_MASK BIT(0)
+#define MCP3464_CONFIG3_EN_OFFCAL_MASK BIT(1)
+#define MCP3464_CONFIG3_EN_CRCCOM_MASK BIT(2)
+#define MCP3464_CONFIG3_CRC_FORMAT_MASK BIT(3)
+/*
+ * ADC Output Data Format 32-bit (25-bit right justified data + Channel ID):
+ * CHID[3:0] + SGN extension (4 bits) + 24-bit ADC data.
+ * It allows overrange with the SGN extension.
+ */
+#define MCP3464_CONFIG3_DATA_FMT_32B_WITH_CH_ID 3
+/*
+ * ADC Output Data Format 32-bit (25-bit right justified data):
+ * SGN extension (8-bit) + 24-bit ADC data.
+ * It allows overrange with the SGN extension.
+ */
+#define MCP3464_CONFIG3_DATA_FMT_32B_SGN_EXT 2
+/*
+ * ADC Output Data Format 32-bit (24-bit left justified data):
+ * 24-bit ADC data + 0x00 (8-bit).
+ * It does not allow overrange (ADC code locked to 0xFFFFFF or 0x800000).
+ */
+#define MCP3464_CONFIG3_DATA_FMT_32B_LEFT_JUSTIFIED 1
+/*
+ * ADC Output Data Format 24-bit (default ADC coding):
+ * 24-bit ADC data.
+ * It does not allow overrange (ADC code locked to 0xFFFFFF or 0x800000).
+ */
+#define MCP3464_CONFIG3_DATA_FMT_24B 0
+#define MCP3464_CONFIG3_DATA_FORMAT_MASK GENMASK(5, 4)
+
+/* Continuous Conversion mode or continuous conversion cycle in SCAN mode. */
+#define MCP3464_CONFIG3_CONV_MODE_CONTINUOUS 3
+/*
+ * One-shot conversion or one-shot cycle in SCAN mode. It sets ADC_MODE[1:0] to ‘10’
+ * (standby) at the end of the conversion or at the end of the conversion cycle in SCAN mode.
+ */
+#define MCP3464_CONFIG3_CONV_MODE_ONE_SHOT_STANDBY 2
+/*
+ * One-shot conversion or one-shot cycle in SCAN mode. It sets ADC_MODE[1:0] to ‘0x’ (ADC
+ * Shutdown) at the end of the conversion or at the end of the conversion cycle in SCAN
+ * mode (default).
+ */
+#define MCP3464_CONFIG3_CONV_MODE_ONE_SHOT_SHUTDOWN 0
+#define MCP3464_CONFIG3_CONV_MODE_MASK GENMASK(7, 6)
+
+#define MCP3564_IRQ_REG 0x05
+#define MCP3464_EN_STP_MASK BIT(0)
+#define MCP3464_EN_FASTCMD_MASK BIT(1)
+#define MCP3464_IRQ_MODE_0_MASK BIT(2)
+#define MCP3464_IRQ_MODE_1_MASK BIT(3)
+#define MCP3564_POR_STATUS_MASK BIT(4)
+#define MCP3564_CRCCFG_STATUS_MASK BIT(5)
+#define MCP3564_DATA_READY_MASK BIT(6)
+
+#define MCP3564_MUX_REG 0x06
+#define MCP3564_MUX_VIN_P_MASK GENMASK(7, 4)
+#define MCP3564_MUX_VIN_N_MASK GENMASK(3, 0)
+#define MCP3564_MUX_SET(x, y) (FIELD_PREP(MCP3564_MUX_VIN_P_MASK, (x)) | \
+ FIELD_PREP(MCP3564_MUX_VIN_N_MASK, (y)))
+
+#define MCP3564_SCAN_REG 0x07
+#define MCP3564_SCAN_CH_SEL_MASK GENMASK(15, 0)
+#define MCP3564_SCAN_CH_SEL_SET(x) FIELD_PREP(MCP3564_SCAN_CH_SEL_MASK, (x))
+#define MCP3564_SCAN_DELAY_TIME_MASK GENMASK(23, 21)
+#define MCP3564_SCAN_DELAY_TIME_SET(x) FIELD_PREP(MCP3564_SCAN_DELAY_TIME_MASK, (x))
+#define MCP3564_SCAN_DEFAULT_VALUE 0
+
+#define MCP3564_TIMER_REG 0x08
+#define MCP3564_TIMER_DEFAULT_VALUE 0
+
+#define MCP3564_OFFSETCAL_REG 0x09
+#define MCP3564_DEFAULT_OFFSETCAL 0
+
+#define MCP3564_GAINCAL_REG 0x0A
+#define MCP3564_DEFAULT_GAINCAL 0x00800000
+
+#define MCP3564_RESERVED_B_REG 0x0B
+
+#define MCP3564_RESERVED_C_REG 0x0C
+#define MCP3564_C_REG_DEFAULT 0x50
+#define MCP3564R_C_REG_DEFAULT 0x30
+
+#define MCP3564_LOCK_REG 0x0D
+#define MCP3564_LOCK_WRITE_ACCESS_PASSWORD 0xA5
+#define MCP3564_RESERVED_E_REG 0x0E
+#define MCP3564_CRCCFG_REG 0x0F
+
+#define MCP3564_CMD_HW_ADDR_MASK GENMASK(7, 6)
+#define MCP3564_CMD_ADDR_MASK GENMASK(5, 2)
+
+#define MCP3564_HW_ADDR_MASK GENMASK(1, 0)
+
+#define MCP3564_FASTCMD_START 0x0A
+#define MCP3564_FASTCMD_RESET 0x0E
+
+#define MCP3461_HW_ID 0x0008
+#define MCP3462_HW_ID 0x0009
+#define MCP3464_HW_ID 0x000B
+
+#define MCP3561_HW_ID 0x000C
+#define MCP3562_HW_ID 0x000D
+#define MCP3564_HW_ID 0x000F
+#define MCP3564_HW_ID_MASK GENMASK(3, 0)
+
+#define MCP3564R_INT_VREF_MV 2400
+
+#define MCP3564_DATA_READY_TIMEOUT_MS 2000
+
+#define MCP3564_MAX_PGA 8
+#define MCP3564_MAX_BURNOUT_IDX 4
+#define MCP3564_MAX_CHANNELS 66
+
+enum mcp3564_ids {
+ mcp3461,
+ mcp3462,
+ mcp3464,
+ mcp3561,
+ mcp3562,
+ mcp3564,
+ mcp3461r,
+ mcp3462r,
+ mcp3464r,
+ mcp3561r,
+ mcp3562r,
+ mcp3564r,
+};
+
+enum mcp3564_delay_time {
+ MCP3564_NO_DELAY,
+ MCP3564_DELAY_8_DMCLK,
+ MCP3564_DELAY_16_DMCLK,
+ MCP3564_DELAY_32_DMCLK,
+ MCP3564_DELAY_64_DMCLK,
+ MCP3564_DELAY_128_DMCLK,
+ MCP3564_DELAY_256_DMCLK,
+ MCP3564_DELAY_512_DMCLK
+};
+
+enum mcp3564_adc_conversion_mode {
+ MCP3564_ADC_MODE_DEFAULT,
+ MCP3564_ADC_MODE_SHUTDOWN,
+ MCP3564_ADC_MODE_STANDBY,
+ MCP3564_ADC_MODE_CONVERSION
+};
+
+enum mcp3564_adc_bias_current {
+ MCP3564_BOOST_CURRENT_x0_50,
+ MCP3564_BOOST_CURRENT_x0_66,
+ MCP3564_BOOST_CURRENT_x1_00,
+ MCP3564_BOOST_CURRENT_x2_00
+};
+
+enum mcp3564_burnout {
+ MCP3564_CONFIG0_CS_SEL_0_0_uA,
+ MCP3564_CONFIG0_CS_SEL_0_9_uA,
+ MCP3564_CONFIG0_CS_SEL_3_7_uA,
+ MCP3564_CONFIG0_CS_SEL_15_uA
+};
+
+enum mcp3564_channel_names {
+ MCP3564_CH0,
+ MCP3564_CH1,
+ MCP3564_CH2,
+ MCP3564_CH3,
+ MCP3564_CH4,
+ MCP3564_CH5,
+ MCP3564_CH6,
+ MCP3564_CH7,
+ MCP3564_AGND,
+ MCP3564_AVDD,
+ MCP3564_RESERVED, /* do not use */
+ MCP3564_REFIN_POZ,
+ MCP3564_REFIN_NEG,
+ MCP3564_TEMP_DIODE_P,
+ MCP3564_TEMP_DIODE_M,
+ MCP3564_INTERNAL_VCM,
+};
+
+enum mcp3564_oversampling {
+ MCP3564_OVERSAMPLING_RATIO_32,
+ MCP3564_OVERSAMPLING_RATIO_64,
+ MCP3564_OVERSAMPLING_RATIO_128,
+ MCP3564_OVERSAMPLING_RATIO_256,
+ MCP3564_OVERSAMPLING_RATIO_512,
+ MCP3564_OVERSAMPLING_RATIO_1024,
+ MCP3564_OVERSAMPLING_RATIO_2048,
+ MCP3564_OVERSAMPLING_RATIO_4096,
+ MCP3564_OVERSAMPLING_RATIO_8192,
+ MCP3564_OVERSAMPLING_RATIO_16384,
+ MCP3564_OVERSAMPLING_RATIO_20480,
+ MCP3564_OVERSAMPLING_RATIO_24576,
+ MCP3564_OVERSAMPLING_RATIO_40960,
+ MCP3564_OVERSAMPLING_RATIO_49152,
+ MCP3564_OVERSAMPLING_RATIO_81920,
+ MCP3564_OVERSAMPLING_RATIO_98304
+};
+
+static const unsigned int mcp3564_oversampling_avail[] = {
+ [MCP3564_OVERSAMPLING_RATIO_32] = 32,
+ [MCP3564_OVERSAMPLING_RATIO_64] = 64,
+ [MCP3564_OVERSAMPLING_RATIO_128] = 128,
+ [MCP3564_OVERSAMPLING_RATIO_256] = 256,
+ [MCP3564_OVERSAMPLING_RATIO_512] = 512,
+ [MCP3564_OVERSAMPLING_RATIO_1024] = 1024,
+ [MCP3564_OVERSAMPLING_RATIO_2048] = 2048,
+ [MCP3564_OVERSAMPLING_RATIO_4096] = 4096,
+ [MCP3564_OVERSAMPLING_RATIO_8192] = 8192,
+ [MCP3564_OVERSAMPLING_RATIO_16384] = 16384,
+ [MCP3564_OVERSAMPLING_RATIO_20480] = 20480,
+ [MCP3564_OVERSAMPLING_RATIO_24576] = 24576,
+ [MCP3564_OVERSAMPLING_RATIO_40960] = 40960,
+ [MCP3564_OVERSAMPLING_RATIO_49152] = 49152,
+ [MCP3564_OVERSAMPLING_RATIO_81920] = 81920,
+ [MCP3564_OVERSAMPLING_RATIO_98304] = 98304
+};
+
+/*
+ * Current Source/Sink Selection Bits for Sensor Bias (source on VIN+/sink on VIN-)
+ */
+static const int mcp3564_burnout_avail[][2] = {
+ [MCP3564_CONFIG0_CS_SEL_0_0_uA] = { 0, 0 },
+ [MCP3564_CONFIG0_CS_SEL_0_9_uA] = { 0, 900 },
+ [MCP3564_CONFIG0_CS_SEL_3_7_uA] = { 0, 3700 },
+ [MCP3564_CONFIG0_CS_SEL_15_uA] = { 0, 15000 }
+};
+
+/*
+ * BOOST[1:0]: ADC Bias Current Selection
+ */
+static const char * const mcp3564_boost_current_avail[] = {
+ [MCP3564_BOOST_CURRENT_x0_50] = "0.5",
+ [MCP3564_BOOST_CURRENT_x0_66] = "0.66",
+ [MCP3564_BOOST_CURRENT_x1_00] = "1",
+ [MCP3564_BOOST_CURRENT_x2_00] = "2",
+};
+
+/*
+ * Calibration bias values
+ */
+static const int mcp3564_calib_bias[] = {
+ -8388608, /* min: -2^23 */
+ 1, /* step: 1 */
+ 8388607 /* max: 2^23 - 1 */
+};
+
+/*
+ * Calibration scale values
+ * The Gain Error Calibration register (GAINCAL) is an
+ * unsigned 24-bit register that holds the digital gain error
+ * calibration value, GAINCAL which could be calculated by
+ * GAINCAL (V/V) = (GAINCAL[23:0])/8388608
+ * The gain error calibration value range in equivalent voltage is [0; 2-2^(-23)]
+ */
+static const unsigned int mcp3564_calib_scale[] = {
+ 0, /* min: 0 */
+ 1, /* step: 1/8388608 */
+ 16777215 /* max: 2 - 2^(-23) */
+};
+
+/* Programmable hardware gain x1/3, x1, x2, x4, x8, x16, x32, x64 */
+static const int mcp3564_hwgain_frac[] = {
+ 3, 10,
+ 1, 1,
+ 2, 1,
+ 4, 1,
+ 8, 1,
+ 16, 1,
+ 32, 1,
+ 64, 1
+};
+
+static const char *mcp3564_channel_labels[2] = {
+ "burnout_current", "temperature",
+};
+
+/**
+ * struct mcp3564_chip_info - chip specific data
+ * @name: device name
+ * @num_channels: number of channels
+ * @resolution: ADC resolution
+ * @have_vref: does the hardware have an internal voltage reference?
+ */
+struct mcp3564_chip_info {
+ const char *name;
+ unsigned int num_channels;
+ unsigned int resolution;
+ bool have_vref;
+};
+
+/**
+ * struct mcp3564_state - working data for a ADC device
+ * @chip_info: chip specific data
+ * @spi: SPI device structure
+ * @vref: the regulator device used as a voltage reference in case
+ * external voltage reference is used
+ * @vref_mv: voltage reference value in miliVolts
+ * @lock: synchronize access to driver's state members
+ * @dev_addr: hardware device address
+ * @oversampling: the index inside oversampling list of the ADC
+ * @hwgain: the index inside hardware gain list of the ADC
+ * @scale_tbls: table with precalculated scale
+ * @calib_bias: calibration bias value
+ * @calib_scale: calibration scale value
+ * @current_boost_mode: the index inside current boost list of the ADC
+ * @burnout_mode: the index inside current bias list of the ADC
+ * @auto_zeroing_mux: set if ADC auto-zeroing algorithm is enabled
+ * @auto_zeroing_ref: set if ADC auto-Zeroing Reference Buffer Setting is enabled
+ * @have_vref: does the ADC have an internal voltage reference?
+ * @labels: table with channels labels
+ */
+struct mcp3564_state {
+ const struct mcp3564_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regulator *vref;
+ unsigned short vref_mv;
+ struct mutex lock; /* Synchronize access to driver's state members */
+ u8 dev_addr;
+ enum mcp3564_oversampling oversampling;
+ unsigned int hwgain;
+ unsigned int scale_tbls[MCP3564_MAX_PGA][2];
+ int calib_bias;
+ int calib_scale;
+ unsigned int current_boost_mode;
+ enum mcp3564_burnout burnout_mode;
+ bool auto_zeroing_mux;
+ bool auto_zeroing_ref;
+ bool have_vref;
+ const char *labels[MCP3564_MAX_CHANNELS];
+};
+
+static inline u8 mcp3564_cmd_write(u8 chip_addr, u8 reg)
+{
+ return FIELD_PREP(MCP3564_CMD_HW_ADDR_MASK, chip_addr) |
+ FIELD_PREP(MCP3564_CMD_ADDR_MASK, reg) |
+ BIT(1);
+}
+
+static inline u8 mcp3564_cmd_read(u8 chip_addr, u8 reg)
+{
+ return FIELD_PREP(MCP3564_CMD_HW_ADDR_MASK, chip_addr) |
+ FIELD_PREP(MCP3564_CMD_ADDR_MASK, reg) |
+ BIT(0);
+}
+
+static int mcp3564_read_8bits(struct mcp3564_state *adc, u8 reg, u8 *val)
+{
+ int ret;
+ u8 tx_buf;
+ u8 rx_buf;
+
+ tx_buf = mcp3564_cmd_read(adc->dev_addr, reg);
+
+ ret = spi_write_then_read(adc->spi, &tx_buf, sizeof(tx_buf),
+ &rx_buf, sizeof(rx_buf));
+ *val = rx_buf;
+
+ return ret;
+}
+
+static int mcp3564_read_16bits(struct mcp3564_state *adc, u8 reg, u16 *val)
+{
+ int ret;
+ u8 tx_buf;
+ __be16 rx_buf;
+
+ tx_buf = mcp3564_cmd_read(adc->dev_addr, reg);
+
+ ret = spi_write_then_read(adc->spi, &tx_buf, sizeof(tx_buf),
+ &rx_buf, sizeof(rx_buf));
+ *val = be16_to_cpu(rx_buf);
+
+ return ret;
+}
+
+static int mcp3564_read_32bits(struct mcp3564_state *adc, u8 reg, u32 *val)
+{
+ int ret;
+ u8 tx_buf;
+ __be32 rx_buf;
+
+ tx_buf = mcp3564_cmd_read(adc->dev_addr, reg);
+
+ ret = spi_write_then_read(adc->spi, &tx_buf, sizeof(tx_buf),
+ &rx_buf, sizeof(rx_buf));
+ *val = be32_to_cpu(rx_buf);
+
+ return ret;
+}
+
+static int mcp3564_write_8bits(struct mcp3564_state *adc, u8 reg, u8 val)
+{
+ u8 tx_buf[2];
+
+ tx_buf[0] = mcp3564_cmd_write(adc->dev_addr, reg);
+ tx_buf[1] = val;
+
+ return spi_write_then_read(adc->spi, tx_buf, sizeof(tx_buf), NULL, 0);
+}
+
+static int mcp3564_write_24bits(struct mcp3564_state *adc, u8 reg, u32 val)
+{
+ __be32 val_be;
+
+ val |= (mcp3564_cmd_write(adc->dev_addr, reg) << 24);
+ val_be = cpu_to_be32(val);
+
+ return spi_write_then_read(adc->spi, &val_be, sizeof(val_be), NULL, 0);
+}
+
+static int mcp3564_fast_cmd(struct mcp3564_state *adc, u8 fast_cmd)
+{
+ u8 val;
+
+ val = FIELD_PREP(MCP3564_CMD_HW_ADDR_MASK, adc->dev_addr) |
+ FIELD_PREP(MCP3564_CMD_ADDR_MASK, fast_cmd);
+
+ return spi_write_then_read(adc->spi, &val, 1, NULL, 0);
+}
+
+static int mcp3564_update_8bits(struct mcp3564_state *adc, u8 reg, u32 mask, u8 val)
+{
+ u8 tmp;
+ int ret;
+
+ val &= mask;
+
+ ret = mcp3564_read_8bits(adc, reg, &tmp);
+ if (ret < 0)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= val;
+
+ return mcp3564_write_8bits(adc, reg, tmp);
+}
+
+static int mcp3564_set_current_boost_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ int ret;
+
+ dev_dbg(&indio_dev->dev, "%s: %d\n", __func__, mode);
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG2_REG, MCP3564_CONFIG2_BOOST_CURRENT_MASK,
+ FIELD_PREP(MCP3564_CONFIG2_BOOST_CURRENT_MASK, mode));
+
+ if (ret)
+ dev_err(&indio_dev->dev, "Failed to configure CONFIG2 register\n");
+ else
+ adc->current_boost_mode = mode;
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
+}
+
+static int mcp3564_get_current_boost_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+
+ return adc->current_boost_mode;
+}
+
+static const struct iio_enum mcp3564_current_boost_mode_enum = {
+ .items = mcp3564_boost_current_avail,
+ .num_items = ARRAY_SIZE(mcp3564_boost_current_avail),
+ .set = mcp3564_set_current_boost_mode,
+ .get = mcp3564_get_current_boost_mode,
+};
+
+static const struct iio_chan_spec_ext_info mcp3564_ext_info[] = {
+ IIO_ENUM("boost_current_gain", IIO_SHARED_BY_ALL, &mcp3564_current_boost_mode_enum),
+ {
+ .name = "boost_current_gain_available",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = iio_enum_available_read,
+ .private = (uintptr_t)&mcp3564_current_boost_mode_enum,
+ },
+ { }
+};
+
+static ssize_t mcp3564_auto_zeroing_mux_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", adc->auto_zeroing_mux);
+}
+
+static ssize_t mcp3564_auto_zeroing_mux_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ bool auto_zero;
+ int ret;
+
+ ret = kstrtobool(buf, &auto_zero);
+ if (ret)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG2_REG, MCP3564_CONFIG2_AZ_MUX_MASK,
+ FIELD_PREP(MCP3564_CONFIG2_AZ_MUX_MASK, auto_zero));
+
+ if (ret)
+ dev_err(&indio_dev->dev, "Failed to update CONFIG2 register\n");
+ else
+ adc->auto_zeroing_mux = auto_zero;
+
+ mutex_unlock(&adc->lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t mcp3564_auto_zeroing_ref_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", adc->auto_zeroing_ref);
+}
+
+static ssize_t mcp3564_auto_zeroing_ref_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ bool auto_zero;
+ int ret;
+
+ ret = kstrtobool(buf, &auto_zero);
+ if (ret)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG2_REG, MCP3564_CONFIG2_AZ_REF_MASK,
+ FIELD_PREP(MCP3564_CONFIG2_AZ_REF_MASK, auto_zero));
+
+ if (ret)
+ dev_err(&indio_dev->dev, "Failed to update CONFIG2 register\n");
+ else
+ adc->auto_zeroing_ref = auto_zero;
+
+ mutex_unlock(&adc->lock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_chan_spec mcp3564_channel_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .differential = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .ext_info = mcp3564_ext_info,
+};
+
+static const struct iio_chan_spec mcp3564_temp_channel_template = {
+ .type = IIO_TEMP,
+ .channel = 0,
+ .address = ((MCP3564_TEMP_DIODE_P << 4) | MCP3564_TEMP_DIODE_M),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+};
+
+static const struct iio_chan_spec mcp3564_burnout_channel_template = {
+ .type = IIO_CURRENT,
+ .output = true,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+};
+
+/*
+ * Number of channels could be calculated:
+ * num_channels = single_ended_input + differential_input + temperature + burnout
+ * Eg. for MCP3561 (only 2 channels available: CH0 and CH1)
+ * single_ended_input = (CH0 - GND), (CH1 - GND) = 2
+ * differential_input = (CH0 - CH1), (CH0 - CH0) = 2
+ * num_channels = 2 + 2 + 2
+ * Generic formula is:
+ * num_channels = P^R(Number_of_single_ended_channels, 2) + 2 (temperature + burnout channels)
+ * P^R(Number_of_single_ended_channels, 2) is Permutations with Replacement of
+ * Number_of_single_ended_channels taken by 2
+ */
+static const struct mcp3564_chip_info mcp3564_chip_infos_tbl[] = {
+ [mcp3461] = {
+ .name = "mcp3461",
+ .num_channels = 6,
+ .resolution = 16,
+ .have_vref = false,
+ },
+ [mcp3462] = {
+ .name = "mcp3462",
+ .num_channels = 18,
+ .resolution = 16,
+ .have_vref = false,
+ },
+ [mcp3464] = {
+ .name = "mcp3464",
+ .num_channels = 66,
+ .resolution = 16,
+ .have_vref = false,
+ },
+ [mcp3561] = {
+ .name = "mcp3561",
+ .num_channels = 6,
+ .resolution = 24,
+ .have_vref = false,
+ },
+ [mcp3562] = {
+ .name = "mcp3562",
+ .num_channels = 18,
+ .resolution = 24,
+ .have_vref = false,
+ },
+ [mcp3564] = {
+ .name = "mcp3564",
+ .num_channels = 66,
+ .resolution = 24,
+ .have_vref = false,
+ },
+ [mcp3461r] = {
+ .name = "mcp3461r",
+ .num_channels = 6,
+ .resolution = 16,
+ .have_vref = false,
+ },
+ [mcp3462r] = {
+ .name = "mcp3462r",
+ .num_channels = 18,
+ .resolution = 16,
+ .have_vref = true,
+ },
+ [mcp3464r] = {
+ .name = "mcp3464r",
+ .num_channels = 66,
+ .resolution = 16,
+ .have_vref = true,
+ },
+ [mcp3561r] = {
+ .name = "mcp3561r",
+ .num_channels = 6,
+ .resolution = 24,
+ .have_vref = true,
+ },
+ [mcp3562r] = {
+ .name = "mcp3562r",
+ .num_channels = 18,
+ .resolution = 24,
+ .have_vref = true,
+ },
+ [mcp3564r] = {
+ .name = "mcp3564r",
+ .num_channels = 66,
+ .resolution = 24,
+ .have_vref = true,
+ },
+};
+
+static int mcp3564_read_single_value(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ int *val)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ int ret;
+ u8 tmp;
+ int ret_read = 0;
+
+ ret = mcp3564_write_8bits(adc, MCP3564_MUX_REG, channel->address);
+ if (ret)
+ return ret;
+
+ /* Start ADC Conversion using fast command (overwrites ADC_MODE[1:0] = 11) */
+ ret = mcp3564_fast_cmd(adc, MCP3564_FASTCMD_START);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the conversion is ready. If not, wait a little bit, and
+ * in case of timeout exit with an error.
+ */
+ ret = read_poll_timeout(mcp3564_read_8bits, ret_read,
+ ret_read || !(tmp & MCP3564_DATA_READY_MASK),
+ 20000, MCP3564_DATA_READY_TIMEOUT_MS * 1000, true,
+ adc, MCP3564_IRQ_REG, &tmp);
+
+ /* failed to read status register */
+ if (ret_read)
+ return ret_read;
+
+ if (ret)
+ return ret;
+
+ if (tmp & MCP3564_DATA_READY_MASK)
+ /* failing to finish conversion */
+ return -EBUSY;
+
+ return mcp3564_read_32bits(adc, MCP3564_ADCDATA_REG, val);
+}
+
+static int mcp3564_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (!channel->output)
+ return -EINVAL;
+
+ *vals = mcp3564_burnout_avail[0];
+ *length = ARRAY_SIZE(mcp3564_burnout_avail) * 2;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = mcp3564_oversampling_avail;
+ *length = ARRAY_SIZE(mcp3564_oversampling_avail);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)adc->scale_tbls;
+ *length = ARRAY_SIZE(adc->scale_tbls) * 2;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *vals = mcp3564_calib_bias;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *vals = mcp3564_calib_scale;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp3564_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ int *val, int *val2, long mask)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (channel->output) {
+ mutex_lock(&adc->lock);
+ *val = mcp3564_burnout_avail[adc->burnout_mode][0];
+ *val2 = mcp3564_burnout_avail[adc->burnout_mode][1];
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ ret = mcp3564_read_single_value(indio_dev, channel, val);
+ if (ret)
+ return -EINVAL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&adc->lock);
+ *val = adc->scale_tbls[adc->hwgain][0];
+ *val2 = adc->scale_tbls[adc->hwgain][1];
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = mcp3564_oversampling_avail[adc->oversampling];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *val = adc->calib_bias;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = adc->calib_scale;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp3564_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp3564_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int val,
+ int val2, long mask)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ int tmp;
+ unsigned int hwgain;
+ enum mcp3564_burnout burnout;
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (!channel->output)
+ return -EINVAL;
+
+ for (burnout = 0; burnout < MCP3564_MAX_BURNOUT_IDX; burnout++)
+ if (val == mcp3564_burnout_avail[burnout][0] &&
+ val2 == mcp3564_burnout_avail[burnout][1])
+ break;
+
+ if (burnout == MCP3564_MAX_BURNOUT_IDX)
+ return -EINVAL;
+
+ if (burnout == adc->burnout_mode)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG0_REG,
+ MCP3564_CONFIG0_CS_SEL_MASK,
+ FIELD_PREP(MCP3564_CONFIG0_CS_SEL_MASK, burnout));
+
+ if (ret)
+ dev_err(&indio_dev->dev, "Failed to configure burnout current\n");
+ else
+ adc->burnout_mode = burnout;
+ mutex_unlock(&adc->lock);
+ return ret;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < mcp3564_calib_bias[0] && val > mcp3564_calib_bias[2])
+ return -EINVAL;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_write_24bits(adc, MCP3564_OFFSETCAL_REG, val);
+ if (!ret)
+ adc->calib_bias = val;
+ mutex_unlock(&adc->lock);
+ return ret;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val < mcp3564_calib_scale[0] && val > mcp3564_calib_scale[2])
+ return -EINVAL;
+
+ if (adc->calib_scale == val)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_write_24bits(adc, MCP3564_GAINCAL_REG, val);
+ if (!ret)
+ adc->calib_scale = val;
+ mutex_unlock(&adc->lock);
+ return ret;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val < 0)
+ return -EINVAL;
+
+ tmp = find_closest(val, mcp3564_oversampling_avail,
+ ARRAY_SIZE(mcp3564_oversampling_avail));
+
+ if (adc->oversampling == tmp)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG1_REG,
+ MCP3564_CONFIG1_OVERSPL_RATIO_MASK,
+ FIELD_PREP(MCP3564_CONFIG1_OVERSPL_RATIO_MASK,
+ adc->oversampling));
+ if (!ret)
+ adc->oversampling = tmp;
+ mutex_unlock(&adc->lock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ for (hwgain = 0; hwgain < MCP3564_MAX_PGA; hwgain++)
+ if (val == adc->scale_tbls[hwgain][0] &&
+ val2 == adc->scale_tbls[hwgain][1])
+ break;
+
+ if (hwgain == MCP3564_MAX_PGA)
+ return -EINVAL;
+
+ if (hwgain == adc->hwgain)
+ return ret;
+
+ mutex_lock(&adc->lock);
+ ret = mcp3564_update_8bits(adc, MCP3564_CONFIG2_REG,
+ MCP3564_CONFIG2_HARDWARE_GAIN_MASK,
+ FIELD_PREP(MCP3564_CONFIG2_HARDWARE_GAIN_MASK, hwgain));
+ if (!ret)
+ adc->hwgain = hwgain;
+
+ mutex_unlock(&adc->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp3564_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+
+ return sprintf(label, "%s\n", adc->labels[chan->scan_index]);
+}
+
+static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ struct device *dev = &adc->spi->dev;
+ struct iio_chan_spec *channels;
+ struct fwnode_handle *child;
+ struct iio_chan_spec chanspec = mcp3564_channel_template;
+ struct iio_chan_spec temp_chanspec = mcp3564_temp_channel_template;
+ struct iio_chan_spec burnout_chanspec = mcp3564_burnout_channel_template;
+ int chan_idx = 0;
+ unsigned int num_ch;
+ u32 inputs[2];
+ const char *node_name;
+ const char *label;
+ int ret;
+
+ num_ch = device_get_child_node_count(dev);
+ if (num_ch == 0)
+ return dev_err_probe(&indio_dev->dev, -ENODEV,
+ "FW has no channels defined\n");
+
+ /* Reserve space for burnout and temperature channel */
+ num_ch += 2;
+
+ if (num_ch > adc->chip_info->num_channels)
+ return dev_err_probe(dev, -EINVAL, "Too many channels %d > %d\n",
+ num_ch, adc->chip_info->num_channels);
+
+ channels = devm_kcalloc(dev, num_ch, sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return dev_err_probe(dev, -ENOMEM, "Can't allocate memory\n");
+
+ device_for_each_child_node(dev, child) {
+ node_name = fwnode_get_name(child);
+
+ if (fwnode_property_present(child, "diff-channels")) {
+ ret = fwnode_property_read_u32_array(child,
+ "diff-channels",
+ inputs,
+ ARRAY_SIZE(inputs));
+ chanspec.differential = 1;
+ } else {
+ ret = fwnode_property_read_u32(child, "reg", &inputs[0]);
+
+ chanspec.differential = 0;
+ inputs[1] = MCP3564_AGND;
+ }
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+
+ if (inputs[0] > MCP3564_INTERNAL_VCM ||
+ inputs[1] > MCP3564_INTERNAL_VCM) {
+ fwnode_handle_put(child);
+ return dev_err_probe(&indio_dev->dev, -EINVAL,
+ "Channel index > %d, for %s\n",
+ MCP3564_INTERNAL_VCM + 1,
+ node_name);
+ }
+
+ chanspec.address = (inputs[0] << 4) | inputs[1];
+ chanspec.channel = inputs[0];
+ chanspec.channel2 = inputs[1];
+ chanspec.scan_index = chan_idx;
+
+ if (fwnode_property_present(child, "label")) {
+ fwnode_property_read_string(child, "label", &label);
+ adc->labels[chan_idx] = label;
+ }
+
+ channels[chan_idx] = chanspec;
+ chan_idx++;
+ }
+
+ /* Add burnout current channel */
+ burnout_chanspec.scan_index = chan_idx;
+ channels[chan_idx] = burnout_chanspec;
+ adc->labels[chan_idx] = mcp3564_channel_labels[0];
+ chanspec.scan_index = chan_idx;
+ chan_idx++;
+
+ /* Add temperature channel */
+ temp_chanspec.scan_index = chan_idx;
+ channels[chan_idx] = temp_chanspec;
+ adc->labels[chan_idx] = mcp3564_channel_labels[1];
+ chan_idx++;
+
+ indio_dev->num_channels = chan_idx;
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static void mcp3564_disable_reg(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc)
+{
+ unsigned int pow = adc->chip_info->resolution - 1;
+ int ref;
+ unsigned int i;
+ int tmp0;
+ u64 tmp1;
+
+ for (i = 0; i < MCP3564_MAX_PGA; i++) {
+ ref = adc->vref_mv;
+ tmp1 = ((u64)ref * NANO) >> pow;
+ div_u64_rem(tmp1, NANO, &tmp0);
+
+ tmp1 = tmp1 * mcp3564_hwgain_frac[(2 * i) + 1];
+ tmp0 = (int)div_u64(tmp1, mcp3564_hwgain_frac[2 * i]);
+
+ adc->scale_tbls[i][1] = tmp0;
+ }
+}
+
+static int mcp3564_config(struct iio_dev *indio_dev)
+{
+ struct mcp3564_state *adc = iio_priv(indio_dev);
+ struct device *dev = &adc->spi->dev;
+ const struct spi_device_id *dev_id;
+ u8 tmp_reg;
+ u16 tmp_u16;
+ enum mcp3564_ids ids;
+ int ret = 0;
+ unsigned int tmp = 0x01;
+ bool err = true;
+
+ /*
+ * The address is set on a per-device basis by fuses in the factory,
+ * configured on request. If not requested, the fuses are set for 0x1.
+ * The device address is part of the device markings to avoid
+ * potential confusion. This address is coded on two bits, so four possible
+ * addresses are available when multiple devices are present on the same
+ * SPI bus with only one Chip Select line for all devices.
+ */
+ device_property_read_u32(dev, "microchip,hw-device-address", &tmp);
+
+ if (tmp > 3) {
+ dev_err_probe(dev, tmp,
+ "invalid device address. Must be in range 0-3.\n");
+ return -EINVAL;
+ }
+
+ adc->dev_addr = FIELD_GET(MCP3564_HW_ADDR_MASK, tmp);
+
+ dev_dbg(dev, "use HW device address %i\n", adc->dev_addr);
+
+ ret = mcp3564_read_8bits(adc, MCP3564_RESERVED_C_REG, &tmp_reg);
+ if (ret < 0)
+ return ret;
+
+ switch (tmp_reg) {
+ case MCP3564_C_REG_DEFAULT:
+ adc->have_vref = false;
+ break;
+ case MCP3564R_C_REG_DEFAULT:
+ adc->have_vref = true;
+ break;
+ default:
+ dev_info(dev, "Unknown chip found: %d\n", tmp_reg);
+ err = true;
+ }
+
+ if (!err) {
+ ret = mcp3564_read_16bits(adc, MCP3564_RESERVED_E_REG, &tmp_u16);
+ if (ret < 0)
+ return ret;
+
+ switch (tmp_u16 & MCP3564_HW_ID_MASK) {
+ case MCP3461_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3461r;
+ else
+ ids = mcp3461;
+ break;
+ case MCP3462_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3462r;
+ else
+ ids = mcp3462;
+ break;
+ case MCP3464_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3464r;
+ else
+ ids = mcp3464;
+ break;
+ case MCP3561_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3561r;
+ else
+ ids = mcp3561;
+ break;
+ case MCP3562_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3562r;
+ else
+ ids = mcp3562;
+ break;
+ case MCP3564_HW_ID:
+ if (adc->have_vref)
+ ids = mcp3564r;
+ else
+ ids = mcp3564;
+ break;
+ default:
+ dev_info(dev, "Unknown chip found: %d\n", tmp_u16);
+ err = true;
+ }
+ }
+
+ if (err) {
+ /*
+ * If failed to identify the hardware based on internal registers,
+ * try using fallback compatible in device tree to deal with some newer part number.
+ */
+ adc->chip_info = spi_get_device_match_data(adc->spi);
+ if (!adc->chip_info) {
+ dev_id = spi_get_device_id(adc->spi);
+ adc->chip_info = (const struct mcp3564_chip_info *)dev_id->driver_data;
+ }
+
+ adc->have_vref = adc->chip_info->have_vref;
+ } else {
+ adc->chip_info = &mcp3564_chip_infos_tbl[ids];
+ }
+
+ dev_dbg(dev, "Found %s chip\n", adc->chip_info->name);
+
+ adc->vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ if (PTR_ERR(adc->vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(adc->vref),
+ "failed to get regulator\n");
+
+ /* Check if chip has internal vref */
+ if (!adc->have_vref)
+ return dev_err_probe(dev, PTR_ERR(adc->vref),
+ "Unknown Vref\n");
+ adc->vref = NULL;
+ dev_dbg(dev, "%s: Using internal Vref\n", __func__);
+ } else {
+ ret = regulator_enable(adc->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, mcp3564_disable_reg,
+ adc->vref);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "%s: Using External Vref\n", __func__);
+
+ ret = regulator_get_voltage(adc->vref);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to read vref regulator\n");
+
+ adc->vref_mv = ret / MILLI;
+ }
+
+ ret = mcp3564_parse_fw_children(indio_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Command sequence that ensures a recovery with the desired settings
+ * in any cases of loss-of-power scenario (Full Chip Reset):
+ * - Write LOCK register to 0xA5
+ * - Write IRQ register to 0x03
+ * - Send "Device Full Reset" fast command
+ * - Wait 1ms for "Full Reset" to complete
+ */
+ ret = mcp3564_write_8bits(adc, MCP3564_LOCK_REG, MCP3564_LOCK_WRITE_ACCESS_PASSWORD);
+ if (ret)
+ return ret;
+
+ ret = mcp3564_write_8bits(adc, MCP3564_IRQ_REG, 0x03);
+ if (ret)
+ return ret;
+
+ ret = mcp3564_fast_cmd(adc, MCP3564_FASTCMD_RESET);
+ if (ret)
+ return ret;
+
+ /*
+ * After Full reset wait some time to be able to fully reset the part and place
+ * it back in a default configuration.
+ * From datasheet: POR (Power On Reset Time) is ~1us
+ * 1ms should be enough.
+ */
+ mdelay(1);
+
+ /* set a gain of 1x for GAINCAL */
+ ret = mcp3564_write_24bits(adc, MCP3564_GAINCAL_REG, MCP3564_DEFAULT_GAINCAL);
+ if (ret)
+ return ret;
+
+ adc->calib_scale = MCP3564_DEFAULT_GAINCAL;
+
+ ret = mcp3564_write_24bits(adc, MCP3564_OFFSETCAL_REG, MCP3564_DEFAULT_OFFSETCAL);
+ if (ret)
+ return ret;
+
+ ret = mcp3564_write_24bits(adc, MCP3564_TIMER_REG, MCP3564_TIMER_DEFAULT_VALUE);
+ if (ret)
+ return ret;
+
+ ret = mcp3564_write_24bits(adc, MCP3564_SCAN_REG,
+ MCP3564_SCAN_DELAY_TIME_SET(MCP3564_NO_DELAY) |
+ MCP3564_SCAN_CH_SEL_SET(MCP3564_SCAN_DEFAULT_VALUE));
+ if (ret)
+ return ret;
+
+ ret = mcp3564_write_8bits(adc, MCP3564_MUX_REG, MCP3564_MUX_SET(MCP3564_CH0, MCP3564_CH1));
+ if (ret)
+ return ret;
+
+ ret = mcp3564_write_8bits(adc, MCP3564_IRQ_REG,
+ FIELD_PREP(MCP3464_EN_FASTCMD_MASK, 1) |
+ FIELD_PREP(MCP3464_EN_STP_MASK, 1));
+ if (ret)
+ return ret;
+
+ tmp_reg = FIELD_PREP(MCP3464_CONFIG3_CONV_MODE_MASK,
+ MCP3464_CONFIG3_CONV_MODE_ONE_SHOT_STANDBY);
+ tmp_reg |= FIELD_PREP(MCP3464_CONFIG3_DATA_FORMAT_MASK,
+ MCP3464_CONFIG3_DATA_FMT_32B_SGN_EXT);
+ tmp_reg |= MCP3464_CONFIG3_EN_OFFCAL_MASK;
+ tmp_reg |= MCP3464_CONFIG3_EN_GAINCAL_MASK;
+
+ ret = mcp3564_write_8bits(adc, MCP3564_CONFIG3_REG, tmp_reg);
+ if (ret)
+ return ret;
+
+ tmp_reg = FIELD_PREP(MCP3564_CONFIG2_BOOST_CURRENT_MASK, MCP3564_BOOST_CURRENT_x1_00);
+ tmp_reg |= FIELD_PREP(MCP3564_CONFIG2_HARDWARE_GAIN_MASK, 0x01);
+ tmp_reg |= FIELD_PREP(MCP3564_CONFIG2_AZ_MUX_MASK, 1);
+
+ ret = mcp3564_write_8bits(adc, MCP3564_CONFIG2_REG, tmp_reg);
+ if (ret)
+ return ret;
+
+ adc->hwgain = 0x01;
+ adc->auto_zeroing_mux = true;
+ adc->auto_zeroing_ref = false;
+ adc->current_boost_mode = MCP3564_BOOST_CURRENT_x1_00;
+
+ tmp_reg = FIELD_PREP(MCP3564_CONFIG1_OVERSPL_RATIO_MASK, MCP3564_OVERSAMPLING_RATIO_98304);
+
+ ret = mcp3564_write_8bits(adc, MCP3564_CONFIG1_REG, tmp_reg);
+ if (ret)
+ return ret;
+
+ adc->oversampling = MCP3564_OVERSAMPLING_RATIO_98304;
+
+ tmp_reg = FIELD_PREP(MCP3564_CONFIG0_ADC_MODE_MASK, MCP3564_ADC_MODE_STANDBY);
+ tmp_reg |= FIELD_PREP(MCP3564_CONFIG0_CS_SEL_MASK, MCP3564_CONFIG0_CS_SEL_0_0_uA);
+ tmp_reg |= FIELD_PREP(MCP3564_CONFIG0_CLK_SEL_MASK, MCP3564_CONFIG0_USE_INT_CLK);
+ tmp_reg |= MCP3456_CONFIG0_BIT6_DEFAULT;
+
+ if (!adc->vref) {
+ tmp_reg |= FIELD_PREP(MCP3456_CONFIG0_VREF_MASK, 1);
+ adc->vref_mv = MCP3564R_INT_VREF_MV;
+ }
+
+ ret = mcp3564_write_8bits(adc, MCP3564_CONFIG0_REG, tmp_reg);
+
+ adc->burnout_mode = MCP3564_CONFIG0_CS_SEL_0_0_uA;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(auto_zeroing_ref_enable, 0644,
+ mcp3564_auto_zeroing_ref_show,
+ mcp3564_auto_zeroing_ref_store, 0);
+
+static IIO_DEVICE_ATTR(auto_zeroing_mux_enable, 0644,
+ mcp3564_auto_zeroing_mux_show,
+ mcp3564_auto_zeroing_mux_store, 0);
+
+static struct attribute *mcp3564_attributes[] = {
+ &iio_dev_attr_auto_zeroing_mux_enable.dev_attr.attr,
+ NULL
+};
+
+static struct attribute *mcp3564r_attributes[] = {
+ &iio_dev_attr_auto_zeroing_mux_enable.dev_attr.attr,
+ &iio_dev_attr_auto_zeroing_ref_enable.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group mcp3564_attribute_group = {
+ .attrs = mcp3564_attributes,
+};
+
+static struct attribute_group mcp3564r_attribute_group = {
+ .attrs = mcp3564r_attributes,
+};
+
+static const struct iio_info mcp3564_info = {
+ .read_raw = mcp3564_read_raw,
+ .read_avail = mcp3564_read_avail,
+ .write_raw = mcp3564_write_raw,
+ .write_raw_get_fmt = mcp3564_write_raw_get_fmt,
+ .read_label = mcp3564_read_label,
+ .attrs = &mcp3564_attribute_group,
+};
+
+static const struct iio_info mcp3564r_info = {
+ .read_raw = mcp3564_read_raw,
+ .read_avail = mcp3564_read_avail,
+ .write_raw = mcp3564_write_raw,
+ .write_raw_get_fmt = mcp3564_write_raw_get_fmt,
+ .read_label = mcp3564_read_label,
+ .attrs = &mcp3564r_attribute_group,
+};
+
+static int mcp3564_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct mcp3564_state *adc;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+
+ dev_dbg(&spi->dev, "%s: probe(spi = 0x%p)\n", __func__, spi);
+
+ /*
+ * Do any chip specific initialization, e.g:
+ * read/write some registers
+ * enable/disable certain channels
+ * change the sampling rate to the requested value
+ */
+ ret = mcp3564_config(indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Can't configure MCP356X device\n");
+
+ dev_dbg(&spi->dev, "%s: Vref (mV): %d\n", __func__, adc->vref_mv);
+
+ mcp3564_fill_scale_tbls(adc);
+
+ indio_dev->name = adc->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (!adc->vref)
+ indio_dev->info = &mcp3564r_info;
+ else
+ indio_dev->info = &mcp3564_info;
+
+ mutex_init(&adc->lock);
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Can't register IIO device\n");
+
+ return 0;
+}
+
+static const struct of_device_id mcp3564_dt_ids[] = {
+ { .compatible = "microchip,mcp3461", .data = &mcp3564_chip_infos_tbl[mcp3461] },
+ { .compatible = "microchip,mcp3462", .data = &mcp3564_chip_infos_tbl[mcp3462] },
+ { .compatible = "microchip,mcp3464", .data = &mcp3564_chip_infos_tbl[mcp3464] },
+ { .compatible = "microchip,mcp3561", .data = &mcp3564_chip_infos_tbl[mcp3561] },
+ { .compatible = "microchip,mcp3562", .data = &mcp3564_chip_infos_tbl[mcp3562] },
+ { .compatible = "microchip,mcp3564", .data = &mcp3564_chip_infos_tbl[mcp3564] },
+ { .compatible = "microchip,mcp3461r", .data = &mcp3564_chip_infos_tbl[mcp3461r] },
+ { .compatible = "microchip,mcp3462r", .data = &mcp3564_chip_infos_tbl[mcp3462r] },
+ { .compatible = "microchip,mcp3464r", .data = &mcp3564_chip_infos_tbl[mcp3464r] },
+ { .compatible = "microchip,mcp3561r", .data = &mcp3564_chip_infos_tbl[mcp3561r] },
+ { .compatible = "microchip,mcp3562r", .data = &mcp3564_chip_infos_tbl[mcp3562r] },
+ { .compatible = "microchip,mcp3564r", .data = &mcp3564_chip_infos_tbl[mcp3564r] },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp3564_dt_ids);
+
+static const struct spi_device_id mcp3564_id[] = {
+ { "mcp3461", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3461] },
+ { "mcp3462", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3462] },
+ { "mcp3464", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3464] },
+ { "mcp3561", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3561] },
+ { "mcp3562", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3562] },
+ { "mcp3564", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3564] },
+ { "mcp3461r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3461r] },
+ { "mcp3462r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3462r] },
+ { "mcp3464r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3464r] },
+ { "mcp3561r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3561r] },
+ { "mcp3562r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3562r] },
+ { "mcp3564r", (kernel_ulong_t)&mcp3564_chip_infos_tbl[mcp3564r] },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp3564_id);
+
+static struct spi_driver mcp3564_driver = {
+ .driver = {
+ .name = "mcp3564",
+ .of_match_table = mcp3564_dt_ids,
+ },
+ .probe = mcp3564_probe,
+ .id_table = mcp3564_id,
+};
+
+module_spi_driver(mcp3564_driver);
+
+MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
+MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP346xR ADCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 974c5bd923a6..d864558bc087 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -29,11 +29,11 @@
#define MCP3911_REG_MOD 0x06
#define MCP3911_REG_PHASE 0x07
#define MCP3911_REG_GAIN 0x09
-#define MCP3911_GAIN_MASK(ch) (GENMASK(2, 0) << 3 * ch)
-#define MCP3911_GAIN_VAL(ch, val) ((val << 3 * ch) & MCP3911_GAIN_MASK(ch))
+#define MCP3911_GAIN_MASK(ch) (GENMASK(2, 0) << 3 * (ch))
+#define MCP3911_GAIN_VAL(ch, val) ((val << 3 * (ch)) & MCP3911_GAIN_MASK(ch))
#define MCP3911_REG_STATUSCOM 0x0a
-#define MCP3911_STATUSCOM_DRHIZ BIT(12)
+#define MCP3911_STATUSCOM_DRHIZ BIT(12)
#define MCP3911_STATUSCOM_READ GENMASK(7, 6)
#define MCP3911_STATUSCOM_CH1_24WIDTH BIT(4)
#define MCP3911_STATUSCOM_CH0_24WIDTH BIT(3)
@@ -51,8 +51,8 @@
#define MCP3911_REG_GAINCAL_CH1 0x17
#define MCP3911_REG_VREFCAL 0x1a
-#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
-#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
+#define MCP3911_CHANNEL(ch) (MCP3911_REG_CHANNEL0 + (ch) * 3)
+#define MCP3911_OFFCAL(ch) (MCP3911_REG_OFFCAL_CH0 + (ch) * 6)
/* Internal voltage reference in mV */
#define MCP3911_INT_VREF_MV 1200
@@ -61,12 +61,56 @@
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 6) | (0 << 0)) & 0xff)
#define MCP3911_REG_MASK GENMASK(4, 1)
-#define MCP3911_NUM_CHANNELS 2
#define MCP3911_NUM_SCALES 6
+/* Registers compatible with MCP3910 */
+#define MCP3910_REG_STATUSCOM 0x0c
+#define MCP3910_STATUSCOM_READ GENMASK(23, 22)
+#define MCP3910_STATUSCOM_DRHIZ BIT(20)
+
+#define MCP3910_REG_GAIN 0x0b
+
+#define MCP3910_REG_CONFIG0 0x0d
+#define MCP3910_CONFIG0_EN_OFFCAL BIT(23)
+#define MCP3910_CONFIG0_OSR GENMASK(15, 13)
+
+#define MCP3910_REG_CONFIG1 0x0e
+#define MCP3910_CONFIG1_CLKEXT BIT(6)
+#define MCP3910_CONFIG1_VREFEXT BIT(7)
+
+#define MCP3910_REG_OFFCAL_CH0 0x0f
+#define MCP3910_OFFCAL(ch) (MCP3910_REG_OFFCAL_CH0 + (ch) * 6)
+
+/* Maximal number of channels used by the MCP39XX family */
+#define MCP39XX_MAX_NUM_CHANNELS 8
+
static const int mcp3911_osr_table[] = { 32, 64, 128, 256, 512, 1024, 2048, 4096 };
static u32 mcp3911_scale_table[MCP3911_NUM_SCALES][2];
+enum mcp3911_id {
+ MCP3910,
+ MCP3911,
+ MCP3912,
+ MCP3913,
+ MCP3914,
+ MCP3918,
+ MCP3919,
+};
+
+struct mcp3911;
+struct mcp3911_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+
+ int (*config)(struct mcp3911 *adc);
+ int (*get_osr)(struct mcp3911 *adc, u32 *val);
+ int (*set_osr)(struct mcp3911 *adc, u32 val);
+ int (*enable_offset)(struct mcp3911 *adc, bool enable);
+ int (*get_offset)(struct mcp3911 *adc, int channel, int *val);
+ int (*set_offset)(struct mcp3911 *adc, int channel, int val);
+ int (*set_scale)(struct mcp3911 *adc, int channel, u32 val);
+};
+
struct mcp3911 {
struct spi_device *spi;
struct mutex lock;
@@ -74,14 +118,15 @@ struct mcp3911 {
struct clk *clki;
u32 dev_addr;
struct iio_trigger *trig;
- u32 gain[MCP3911_NUM_CHANNELS];
+ u32 gain[MCP39XX_MAX_NUM_CHANNELS];
+ const struct mcp3911_chip_info *chip;
struct {
- u32 channels[MCP3911_NUM_CHANNELS];
+ u32 channels[MCP39XX_MAX_NUM_CHANNELS];
s64 ts __aligned(8);
} scan;
u8 tx_buf __aligned(IIO_DMA_MINALIGN);
- u8 rx_buf[MCP3911_NUM_CHANNELS * 3];
+ u8 rx_buf[MCP39XX_MAX_NUM_CHANNELS * 3];
};
static int mcp3911_read(struct mcp3911 *adc, u8 reg, u32 *val, u8 len)
@@ -111,8 +156,7 @@ static int mcp3911_write(struct mcp3911 *adc, u8 reg, u32 val, u8 len)
return spi_write(adc->spi, &val, len + 1);
}
-static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask,
- u32 val, u8 len)
+static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask, u32 val, u8 len)
{
u32 tmp;
int ret;
@@ -126,9 +170,115 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask,
return mcp3911_write(adc, reg, val, len);
}
+static int mcp3910_enable_offset(struct mcp3911 *adc, bool enable)
+{
+ unsigned int mask = MCP3910_CONFIG0_EN_OFFCAL;
+ unsigned int value = enable ? mask : 0;
+
+ return mcp3911_update(adc, MCP3910_REG_CONFIG0, mask, value, 3);
+}
+
+static int mcp3910_get_offset(struct mcp3911 *adc, int channel, int *val)
+{
+ return mcp3911_read(adc, MCP3910_OFFCAL(channel), val, 3);
+}
+
+static int mcp3910_set_offset(struct mcp3911 *adc, int channel, int val)
+{
+ int ret;
+
+ ret = mcp3911_write(adc, MCP3910_OFFCAL(channel), val, 3);
+ if (ret)
+ return ret;
+
+ return adc->chip->enable_offset(adc, 1);
+}
+
+static int mcp3911_enable_offset(struct mcp3911 *adc, bool enable)
+{
+ unsigned int mask = MCP3911_STATUSCOM_EN_OFFCAL;
+ unsigned int value = enable ? mask : 0;
+
+ return mcp3911_update(adc, MCP3911_REG_STATUSCOM, mask, value, 2);
+}
+
+static int mcp3911_get_offset(struct mcp3911 *adc, int channel, int *val)
+{
+ return mcp3911_read(adc, MCP3911_OFFCAL(channel), val, 3);
+}
+
+static int mcp3911_set_offset(struct mcp3911 *adc, int channel, int val)
+{
+ int ret;
+
+ ret = mcp3911_write(adc, MCP3911_OFFCAL(channel), val, 3);
+ if (ret)
+ return ret;
+
+ return adc->chip->enable_offset(adc, 1);
+}
+
+static int mcp3910_get_osr(struct mcp3911 *adc, u32 *val)
+{
+ int ret;
+ unsigned int osr;
+
+ ret = mcp3911_read(adc, MCP3910_REG_CONFIG0, val, 3);
+ if (ret)
+ return ret;
+
+ osr = FIELD_GET(MCP3910_CONFIG0_OSR, *val);
+ *val = 32 << osr;
+ return 0;
+}
+
+static int mcp3910_set_osr(struct mcp3911 *adc, u32 val)
+{
+ unsigned int osr = FIELD_PREP(MCP3910_CONFIG0_OSR, val);
+ unsigned int mask = MCP3910_CONFIG0_OSR;
+
+ return mcp3911_update(adc, MCP3910_REG_CONFIG0, mask, osr, 3);
+}
+
+static int mcp3911_set_osr(struct mcp3911 *adc, u32 val)
+{
+ unsigned int osr = FIELD_PREP(MCP3911_CONFIG_OSR, val);
+ unsigned int mask = MCP3911_CONFIG_OSR;
+
+ return mcp3911_update(adc, MCP3911_REG_CONFIG, mask, osr, 2);
+}
+
+static int mcp3911_get_osr(struct mcp3911 *adc, u32 *val)
+{
+ int ret;
+ unsigned int osr;
+
+ ret = mcp3911_read(adc, MCP3911_REG_CONFIG, val, 2);
+ if (ret)
+ return ret;
+
+ osr = FIELD_GET(MCP3911_CONFIG_OSR, *val);
+ *val = 32 << osr;
+ return ret;
+}
+
+static int mcp3910_set_scale(struct mcp3911 *adc, int channel, u32 val)
+{
+ return mcp3911_update(adc, MCP3910_REG_GAIN,
+ MCP3911_GAIN_MASK(channel),
+ MCP3911_GAIN_VAL(channel, val), 3);
+}
+
+static int mcp3911_set_scale(struct mcp3911 *adc, int channel, u32 val)
+{
+ return mcp3911_update(adc, MCP3911_REG_GAIN,
+ MCP3911_GAIN_MASK(channel),
+ MCP3911_GAIN_VAL(channel, val), 1);
+}
+
static int mcp3911_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
+ struct iio_chan_spec const *chan,
+ long mask)
{
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -141,9 +291,9 @@ static int mcp3911_write_raw_get_fmt(struct iio_dev *indio_dev,
}
static int mcp3911_read_avail(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- const int **vals, int *type, int *length,
- long info)
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
{
switch (info) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -182,20 +332,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_OFFSET:
- ret = mcp3911_read(adc,
- MCP3911_OFFCAL(channel->channel), val, 3);
+
+ ret = adc->chip->get_offset(adc, channel->channel, val);
if (ret)
goto out;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = mcp3911_read(adc, MCP3911_REG_CONFIG, val, 2);
+ ret = adc->chip->get_osr(adc, val);
if (ret)
goto out;
- *val = FIELD_GET(MCP3911_CONFIG_OSR, *val);
- *val = 32 << *val;
ret = IIO_VAL_INT;
break;
@@ -212,8 +360,8 @@ out:
}
static int mcp3911_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *channel, int val,
- int val2, long mask)
+ struct iio_chan_spec const *channel, int val,
+ int val2, long mask)
{
struct mcp3911 *adc = iio_priv(indio_dev);
int ret = -EINVAL;
@@ -223,12 +371,10 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
for (int i = 0; i < MCP3911_NUM_SCALES; i++) {
if (val == mcp3911_scale_table[i][0] &&
- val2 == mcp3911_scale_table[i][1]) {
+ val2 == mcp3911_scale_table[i][1]) {
adc->gain[channel->channel] = BIT(i);
- ret = mcp3911_update(adc, MCP3911_REG_GAIN,
- MCP3911_GAIN_MASK(channel->channel),
- MCP3911_GAIN_VAL(channel->channel, i), 1);
+ ret = adc->chip->set_scale(adc, channel->channel, i);
}
}
break;
@@ -238,24 +384,13 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
goto out;
}
- /* Write offset */
- ret = mcp3911_write(adc, MCP3911_OFFCAL(channel->channel), val,
- 3);
- if (ret)
- goto out;
-
- /* Enable offset*/
- ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM,
- MCP3911_STATUSCOM_EN_OFFCAL,
- MCP3911_STATUSCOM_EN_OFFCAL, 2);
+ ret = adc->chip->set_offset(adc, channel->channel, val);
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) {
if (val == mcp3911_osr_table[i]) {
- val = FIELD_PREP(MCP3911_CONFIG_OSR, i);
- ret = mcp3911_update(adc, MCP3911_REG_CONFIG, MCP3911_CONFIG_OSR,
- val, 2);
+ ret = adc->chip->set_osr(adc, i);
break;
}
}
@@ -269,6 +404,7 @@ out:
static int mcp3911_calc_scale_table(struct mcp3911 *adc)
{
+ struct device *dev = &adc->spi->dev;
u32 ref = MCP3911_INT_VREF_MV;
u32 div;
int ret;
@@ -277,10 +413,7 @@ static int mcp3911_calc_scale_table(struct mcp3911 *adc)
if (adc->vref) {
ret = regulator_get_voltage(adc->vref);
if (ret < 0) {
- dev_err(&adc->spi->dev,
- "failed to get vref voltage: %d\n",
- ret);
- return ret;
+ return dev_err_probe(dev, ret, "failed to get vref voltage\n");
}
ref = ret / 1000;
@@ -326,24 +459,73 @@ static int mcp3911_calc_scale_table(struct mcp3911 *adc)
}, \
}
+static const struct iio_chan_spec mcp3910_channels[] = {
+ MCP3911_CHAN(0),
+ MCP3911_CHAN(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
static const struct iio_chan_spec mcp3911_channels[] = {
MCP3911_CHAN(0),
MCP3911_CHAN(1),
IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static const struct iio_chan_spec mcp3912_channels[] = {
+ MCP3911_CHAN(0),
+ MCP3911_CHAN(1),
+ MCP3911_CHAN(2),
+ MCP3911_CHAN(3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec mcp3913_channels[] = {
+ MCP3911_CHAN(0),
+ MCP3911_CHAN(1),
+ MCP3911_CHAN(2),
+ MCP3911_CHAN(3),
+ MCP3911_CHAN(4),
+ MCP3911_CHAN(5),
+ IIO_CHAN_SOFT_TIMESTAMP(6),
+};
+
+static const struct iio_chan_spec mcp3914_channels[] = {
+ MCP3911_CHAN(0),
+ MCP3911_CHAN(1),
+ MCP3911_CHAN(2),
+ MCP3911_CHAN(3),
+ MCP3911_CHAN(4),
+ MCP3911_CHAN(5),
+ MCP3911_CHAN(6),
+ MCP3911_CHAN(7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+static const struct iio_chan_spec mcp3918_channels[] = {
+ MCP3911_CHAN(0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec mcp3919_channels[] = {
+ MCP3911_CHAN(0),
+ MCP3911_CHAN(1),
+ MCP3911_CHAN(2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mcp3911 *adc = iio_priv(indio_dev);
+ struct device *dev = &adc->spi->dev;
struct spi_transfer xfer[] = {
{
.tx_buf = &adc->tx_buf,
.len = 1,
}, {
.rx_buf = adc->rx_buf,
- .len = sizeof(adc->rx_buf),
+ .len = (adc->chip->num_channels - 1) * 3,
},
};
int scan_index;
@@ -354,8 +536,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr);
ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer));
if (ret < 0) {
- dev_warn(&adc->spi->dev,
- "failed to get conversion data\n");
+ dev_warn(dev, "failed to get conversion data\n");
goto out;
}
@@ -387,43 +568,25 @@ static int mcp3911_config(struct mcp3911 *adc)
u32 regval;
int ret;
- ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
-
- /*
- * Fallback to "device-addr" due to historical mismatch between
- * dt-bindings and implementation
- */
- if (ret)
- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
- if (adc->dev_addr > 3) {
- dev_err(&adc->spi->dev,
- "invalid device address (%i). Must be in range 0-3.\n",
- adc->dev_addr);
- return -EINVAL;
- }
- dev_dbg(&adc->spi->dev, "use device address %i\n", adc->dev_addr);
-
ret = mcp3911_read(adc, MCP3911_REG_CONFIG, &regval, 2);
if (ret)
return ret;
regval &= ~MCP3911_CONFIG_VREFEXT;
if (adc->vref) {
- dev_dbg(&adc->spi->dev, "use external voltage reference\n");
+ dev_dbg(dev, "use external voltage reference\n");
regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1);
} else {
- dev_dbg(&adc->spi->dev,
- "use internal voltage reference (1.2V)\n");
+ dev_dbg(dev, "use internal voltage reference (1.2V)\n");
regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 0);
}
regval &= ~MCP3911_CONFIG_CLKEXT;
if (adc->clki) {
- dev_dbg(&adc->spi->dev, "use external clock as clocksource\n");
+ dev_dbg(dev, "use external clock as clocksource\n");
regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 1);
} else {
- dev_dbg(&adc->spi->dev,
- "use crystal oscillator as clocksource\n");
+ dev_dbg(dev, "use crystal oscillator as clocksource\n");
regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 0);
}
@@ -439,7 +602,97 @@ static int mcp3911_config(struct mcp3911 *adc)
regval &= ~MCP3911_STATUSCOM_READ;
regval |= FIELD_PREP(MCP3911_STATUSCOM_READ, 0x02);
- return mcp3911_write(adc, MCP3911_REG_STATUSCOM, regval, 2);
+ regval &= ~MCP3911_STATUSCOM_DRHIZ;
+ if (device_property_read_bool(dev, "microchip,data-ready-hiz"))
+ regval |= FIELD_PREP(MCP3911_STATUSCOM_DRHIZ, 0);
+ else
+ regval |= FIELD_PREP(MCP3911_STATUSCOM_DRHIZ, 1);
+
+ /* Disable offset to ignore any old values in offset register */
+ regval &= ~MCP3911_STATUSCOM_EN_OFFCAL;
+
+ ret = mcp3911_write(adc, MCP3911_REG_STATUSCOM, regval, 2);
+ if (ret)
+ return ret;
+
+ /* Set gain to 1 for all channels */
+ ret = mcp3911_read(adc, MCP3911_REG_GAIN, &regval, 1);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < adc->chip->num_channels - 1; i++) {
+ adc->gain[i] = 1;
+ regval &= ~MCP3911_GAIN_MASK(i);
+ }
+
+ return mcp3911_write(adc, MCP3911_REG_GAIN, regval, 1);
+}
+
+static int mcp3910_config(struct mcp3911 *adc)
+{
+ struct device *dev = &adc->spi->dev;
+ u32 regval;
+ int ret;
+
+ ret = mcp3911_read(adc, MCP3910_REG_CONFIG1, &regval, 3);
+ if (ret)
+ return ret;
+
+ regval &= ~MCP3910_CONFIG1_VREFEXT;
+ if (adc->vref) {
+ dev_dbg(dev, "use external voltage reference\n");
+ regval |= FIELD_PREP(MCP3910_CONFIG1_VREFEXT, 1);
+ } else {
+ dev_dbg(dev, "use internal voltage reference (1.2V)\n");
+ regval |= FIELD_PREP(MCP3910_CONFIG1_VREFEXT, 0);
+ }
+
+ regval &= ~MCP3910_CONFIG1_CLKEXT;
+ if (adc->clki) {
+ dev_dbg(dev, "use external clock as clocksource\n");
+ regval |= FIELD_PREP(MCP3910_CONFIG1_CLKEXT, 1);
+ } else {
+ dev_dbg(dev, "use crystal oscillator as clocksource\n");
+ regval |= FIELD_PREP(MCP3910_CONFIG1_CLKEXT, 0);
+ }
+
+ ret = mcp3911_write(adc, MCP3910_REG_CONFIG1, regval, 3);
+ if (ret)
+ return ret;
+
+ ret = mcp3911_read(adc, MCP3910_REG_STATUSCOM, &regval, 3);
+ if (ret)
+ return ret;
+
+ /* Address counter incremented, cycle through register types */
+ regval &= ~MCP3910_STATUSCOM_READ;
+ regval |= FIELD_PREP(MCP3910_STATUSCOM_READ, 0x02);
+
+ regval &= ~MCP3910_STATUSCOM_DRHIZ;
+ if (device_property_read_bool(dev, "microchip,data-ready-hiz"))
+ regval |= FIELD_PREP(MCP3910_STATUSCOM_DRHIZ, 0);
+ else
+ regval |= FIELD_PREP(MCP3910_STATUSCOM_DRHIZ, 1);
+
+ ret = mcp3911_write(adc, MCP3910_REG_STATUSCOM, regval, 3);
+ if (ret)
+ return ret;
+
+ /* Set gain to 1 for all channels */
+ ret = mcp3911_read(adc, MCP3910_REG_GAIN, &regval, 3);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < adc->chip->num_channels - 1; i++) {
+ adc->gain[i] = 1;
+ regval &= ~MCP3911_GAIN_MASK(i);
+ }
+ ret = mcp3911_write(adc, MCP3910_REG_GAIN, regval, 3);
+ if (ret)
+ return ret;
+
+ /* Disable offset to ignore any old values in offset register */
+ return adc->chip->enable_offset(adc, 0);
}
static void mcp3911_cleanup_regulator(void *vref)
@@ -466,26 +719,25 @@ static const struct iio_trigger_ops mcp3911_trigger_ops = {
static int mcp3911_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct mcp3911 *adc;
int ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
adc = iio_priv(indio_dev);
adc->spi = spi;
+ adc->chip = spi_get_device_match_data(spi);
- adc->vref = devm_regulator_get_optional(&adc->spi->dev, "vref");
+ adc->vref = devm_regulator_get_optional(dev, "vref");
if (IS_ERR(adc->vref)) {
if (PTR_ERR(adc->vref) == -ENODEV) {
adc->vref = NULL;
} else {
- dev_err(&adc->spi->dev,
- "failed to get regulator (%ld)\n",
- PTR_ERR(adc->vref));
- return PTR_ERR(adc->vref);
+ return dev_err_probe(dev, PTR_ERR(adc->vref), "failed to get regulator\n");
}
} else {
@@ -493,34 +745,35 @@ static int mcp3911_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_add_action_or_reset(&spi->dev,
- mcp3911_cleanup_regulator, adc->vref);
+ ret = devm_add_action_or_reset(dev, mcp3911_cleanup_regulator, adc->vref);
if (ret)
return ret;
}
- adc->clki = devm_clk_get_enabled(&adc->spi->dev, NULL);
+ adc->clki = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(adc->clki)) {
if (PTR_ERR(adc->clki) == -ENOENT) {
adc->clki = NULL;
} else {
- dev_err(&adc->spi->dev,
- "failed to get adc clk (%ld)\n",
- PTR_ERR(adc->clki));
- return PTR_ERR(adc->clki);
+ return dev_err_probe(dev, PTR_ERR(adc->clki), "failed to get adc clk\n");
}
}
- ret = mcp3911_config(adc);
+ /*
+ * Fallback to "device-addr" due to historical mismatch between
+ * dt-bindings and implementation.
+ */
+ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
if (ret)
- return ret;
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
+ if (adc->dev_addr > 3) {
+ return dev_err_probe(dev, -EINVAL,
+ "invalid device address (%i). Must be in range 0-3.\n",
+ adc->dev_addr);
+ }
+ dev_dbg(dev, "use device address %i\n", adc->dev_addr);
- if (device_property_read_bool(&adc->spi->dev, "microchip,data-ready-hiz"))
- ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
- 0, 2);
- else
- ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
- MCP3911_STATUSCOM_DRHIZ, 2);
+ ret = adc->chip->config(adc);
if (ret)
return ret;
@@ -528,12 +781,12 @@ static int mcp3911_probe(struct spi_device *spi)
if (ret)
return ret;
- /* Set gain to 1 for all channels */
- for (int i = 0; i < MCP3911_NUM_CHANNELS; i++) {
+ /* Set gain to 1 for all channels */
+ for (int i = 0; i < adc->chip->num_channels - 1; i++) {
adc->gain[i] = 1;
ret = mcp3911_update(adc, MCP3911_REG_GAIN,
- MCP3911_GAIN_MASK(i),
- MCP3911_GAIN_VAL(i, 0), 1);
+ MCP3911_GAIN_MASK(i),
+ MCP3911_GAIN_VAL(i, 0), 1);
if (ret)
return ret;
}
@@ -543,21 +796,20 @@ static int mcp3911_probe(struct spi_device *spi)
indio_dev->info = &mcp3911_info;
spi_set_drvdata(spi, indio_dev);
- indio_dev->channels = mcp3911_channels;
- indio_dev->num_channels = ARRAY_SIZE(mcp3911_channels);
+ indio_dev->channels = adc->chip->channels;
+ indio_dev->num_channels = adc->chip->num_channels;
mutex_init(&adc->lock);
if (spi->irq > 0) {
- adc->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
- indio_dev->name,
- iio_device_id(indio_dev));
+ adc->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
if (!adc->trig)
return -ENOMEM;
adc->trig->ops = &mcp3911_trigger_ops;
iio_trigger_set_drvdata(adc->trig, adc);
- ret = devm_iio_trigger_register(&spi->dev, adc->trig);
+ ret = devm_iio_trigger_register(dev, adc->trig);
if (ret)
return ret;
@@ -566,30 +818,120 @@ static int mcp3911_probe(struct spi_device *spi)
* Some platforms might not allow the option to power it down so
* don't enable the interrupt to avoid extra load on the system.
*/
- ret = devm_request_irq(&spi->dev, spi->irq,
- &iio_trigger_generic_data_rdy_poll, IRQF_NO_AUTOEN | IRQF_ONESHOT,
- indio_dev->name, adc->trig);
+ ret = devm_request_irq(dev, spi->irq, &iio_trigger_generic_data_rdy_poll,
+ IRQF_NO_AUTOEN | IRQF_ONESHOT,
+ indio_dev->name, adc->trig);
if (ret)
return ret;
}
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- NULL,
- mcp3911_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ mcp3911_trigger_handler, NULL);
if (ret)
return ret;
- return devm_iio_device_register(&adc->spi->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
+static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ [MCP3910] = {
+ .channels = mcp3910_channels,
+ .num_channels = ARRAY_SIZE(mcp3910_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+ [MCP3911] = {
+ .channels = mcp3911_channels,
+ .num_channels = ARRAY_SIZE(mcp3911_channels),
+ .config = mcp3911_config,
+ .get_osr = mcp3911_get_osr,
+ .set_osr = mcp3911_set_osr,
+ .enable_offset = mcp3911_enable_offset,
+ .get_offset = mcp3911_get_offset,
+ .set_offset = mcp3911_set_offset,
+ .set_scale = mcp3911_set_scale,
+ },
+ [MCP3912] = {
+ .channels = mcp3912_channels,
+ .num_channels = ARRAY_SIZE(mcp3912_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+ [MCP3913] = {
+ .channels = mcp3913_channels,
+ .num_channels = ARRAY_SIZE(mcp3913_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+ [MCP3914] = {
+ .channels = mcp3914_channels,
+ .num_channels = ARRAY_SIZE(mcp3914_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+ [MCP3918] = {
+ .channels = mcp3918_channels,
+ .num_channels = ARRAY_SIZE(mcp3918_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+ [MCP3919] = {
+ .channels = mcp3919_channels,
+ .num_channels = ARRAY_SIZE(mcp3919_channels),
+ .config = mcp3910_config,
+ .get_osr = mcp3910_get_osr,
+ .set_osr = mcp3910_set_osr,
+ .enable_offset = mcp3910_enable_offset,
+ .get_offset = mcp3910_get_offset,
+ .set_offset = mcp3910_set_offset,
+ .set_scale = mcp3910_set_scale,
+ },
+};
static const struct of_device_id mcp3911_dt_ids[] = {
- { .compatible = "microchip,mcp3911" },
+ { .compatible = "microchip,mcp3910", .data = &mcp3911_chip_info[MCP3910] },
+ { .compatible = "microchip,mcp3911", .data = &mcp3911_chip_info[MCP3911] },
+ { .compatible = "microchip,mcp3912", .data = &mcp3911_chip_info[MCP3912] },
+ { .compatible = "microchip,mcp3913", .data = &mcp3911_chip_info[MCP3913] },
+ { .compatible = "microchip,mcp3914", .data = &mcp3911_chip_info[MCP3914] },
+ { .compatible = "microchip,mcp3918", .data = &mcp3911_chip_info[MCP3918] },
+ { .compatible = "microchip,mcp3919", .data = &mcp3911_chip_info[MCP3919] },
{ }
};
MODULE_DEVICE_TABLE(of, mcp3911_dt_ids);
static const struct spi_device_id mcp3911_id[] = {
- { "mcp3911", 0 },
+ { "mcp3910", (kernel_ulong_t)&mcp3911_chip_info[MCP3910] },
+ { "mcp3911", (kernel_ulong_t)&mcp3911_chip_info[MCP3911] },
+ { "mcp3912", (kernel_ulong_t)&mcp3911_chip_info[MCP3912] },
+ { "mcp3913", (kernel_ulong_t)&mcp3911_chip_info[MCP3913] },
+ { "mcp3914", (kernel_ulong_t)&mcp3911_chip_info[MCP3914] },
+ { "mcp3918", (kernel_ulong_t)&mcp3911_chip_info[MCP3918] },
+ { "mcp3919", (kernel_ulong_t)&mcp3911_chip_info[MCP3919] },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp3911_id);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 320e3e7e3d4d..950ff13e6dde 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1045,8 +1045,10 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
u32 regval;
ret = meson_sar_adc_lock(indio_dev);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to lock adc\n");
goto err_lock;
+ }
ret = regulator_enable(priv->vref);
if (ret < 0) {
@@ -1354,15 +1356,15 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
priv->regmap = devm_regmap_init_mmio(dev, base, priv->param->regmap_config);
if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "failed to init regmap\n");
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "failed to get irq\n");
ret = devm_request_irq(dev, irq, meson_sar_adc_irq, IRQF_SHARED, dev_name(dev), indio_dev);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "failed to request irq\n");
priv->clkin = devm_clk_get(dev, "clkin");
if (IS_ERR(priv->clkin))
@@ -1384,7 +1386,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (!priv->adc_clk) {
ret = meson_sar_adc_clk_init(indio_dev, base);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "failed to init internal clk\n");
}
priv->vref = devm_regulator_get(dev, "vref");
@@ -1426,8 +1428,10 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
ret = iio_device_register(indio_dev);
- if (ret)
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register iio device\n");
goto err_hw;
+ }
return 0;
@@ -1437,15 +1441,13 @@ err:
return ret;
}
-static int meson_sar_adc_remove(struct platform_device *pdev)
+static void meson_sar_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
iio_device_unregister(indio_dev);
meson_sar_adc_hw_disable(indio_dev);
-
- return 0;
}
static int meson_sar_adc_suspend(struct device *dev)
@@ -1480,7 +1482,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(meson_sar_adc_pm_ops,
static struct platform_driver meson_sar_adc_driver = {
.probe = meson_sar_adc_probe,
- .remove = meson_sar_adc_remove,
+ .remove_new = meson_sar_adc_remove,
.driver = {
.name = "meson-saradc",
.of_match_table = meson_sar_adc_of_match,
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 88e947f300cf..7c66c2cd5be2 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -171,7 +171,7 @@ fail_disable:
return ret;
}
-static int mp2629_adc_remove(struct platform_device *pdev)
+static void mp2629_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct mp2629_adc *info = iio_priv(indio_dev);
@@ -184,8 +184,6 @@ static int mp2629_adc_remove(struct platform_device *pdev)
MP2629_ADC_CONTINUOUS, 0);
regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
MP2629_ADC_START, 0);
-
- return 0;
}
static const struct of_device_id mp2629_adc_of_match[] = {
@@ -200,7 +198,7 @@ static struct platform_driver mp2629_adc_driver = {
.of_match_table = mp2629_adc_of_match,
},
.probe = mp2629_adc_probe,
- .remove = mp2629_adc_remove,
+ .remove_new = mp2629_adc_remove,
};
module_platform_driver(mp2629_adc_driver);
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 0e134777bdd2..3343b54e8e44 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -246,6 +246,14 @@ static int mt6577_auxadc_suspend(struct device *dev)
return 0;
}
+static void mt6577_power_off(void *data)
+{
+ struct mt6577_auxadc_device *adc_dev = data;
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+}
+
static int mt6577_auxadc_probe(struct platform_device *pdev)
{
struct mt6577_auxadc_device *adc_dev;
@@ -265,29 +273,18 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
indio_dev->num_channels = ARRAY_SIZE(mt6577_auxadc_iio_channels);
adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(adc_dev->reg_base)) {
- dev_err(&pdev->dev, "failed to get auxadc base address\n");
- return PTR_ERR(adc_dev->reg_base);
- }
-
- adc_dev->adc_clk = devm_clk_get(&pdev->dev, "main");
- if (IS_ERR(adc_dev->adc_clk)) {
- dev_err(&pdev->dev, "failed to get auxadc clock\n");
- return PTR_ERR(adc_dev->adc_clk);
- }
+ if (IS_ERR(adc_dev->reg_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adc_dev->reg_base),
+ "failed to get auxadc base address\n");
- ret = clk_prepare_enable(adc_dev->adc_clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable auxadc clock\n");
- return ret;
- }
+ adc_dev->adc_clk = devm_clk_get_enabled(&pdev->dev, "main");
+ if (IS_ERR(adc_dev->adc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adc_dev->adc_clk),
+ "failed to enable auxadc clock\n");
adc_clk_rate = clk_get_rate(adc_dev->adc_clk);
- if (!adc_clk_rate) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "null clock rate\n");
- goto err_disable_clk;
- }
+ if (!adc_clk_rate)
+ return dev_err_probe(&pdev->dev, -EINVAL, "null clock rate\n");
adc_dev->dev_comp = device_get_match_data(&pdev->dev);
@@ -296,36 +293,16 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
MT6577_AUXADC_PDN_EN, 0);
mdelay(MT6577_AUXADC_POWER_READY_MS);
-
platform_set_drvdata(pdev, indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register iio device\n");
- goto err_power_off;
- }
-
- return 0;
-
-err_power_off:
- mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
- 0, MT6577_AUXADC_PDN_EN);
-err_disable_clk:
- clk_disable_unprepare(adc_dev->adc_clk);
- return ret;
-}
-
-static int mt6577_auxadc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
+ ret = devm_add_action_or_reset(&pdev->dev, mt6577_power_off, adc_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add action to managed power off\n");
- mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
- 0, MT6577_AUXADC_PDN_EN);
-
- clk_disable_unprepare(adc_dev->adc_clk);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to register iio device\n");
return 0;
}
@@ -352,7 +329,6 @@ static struct platform_driver mt6577_auxadc_driver = {
.pm = pm_sleep_ptr(&mt6577_auxadc_pm_ops),
},
.probe = mt6577_auxadc_probe,
- .remove = mt6577_auxadc_remove,
};
module_platform_driver(mt6577_auxadc_driver);
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index a50f39143d3e..2e60c10ee4ff 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -807,7 +807,7 @@ err_trig:
return ret;
}
-static int mxs_lradc_adc_remove(struct platform_device *pdev)
+static void mxs_lradc_adc_remove(struct platform_device *pdev)
{
struct iio_dev *iio = platform_get_drvdata(pdev);
struct mxs_lradc_adc *adc = iio_priv(iio);
@@ -816,8 +816,6 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
mxs_lradc_adc_hw_stop(adc);
iio_triggered_buffer_cleanup(iio);
mxs_lradc_adc_trigger_remove(iio);
-
- return 0;
}
static struct platform_driver mxs_lradc_adc_driver = {
@@ -825,7 +823,7 @@ static struct platform_driver mxs_lradc_adc_driver = {
.name = "mxs-lradc-adc",
},
.probe = mxs_lradc_adc_probe,
- .remove = mxs_lradc_adc_remove,
+ .remove_new = mxs_lradc_adc_remove,
};
module_platform_driver(mxs_lradc_adc_driver);
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 3d9207c160eb..3a55465951e7 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -320,7 +320,7 @@ err_disable_clk:
return ret;
}
-static int npcm_adc_remove(struct platform_device *pdev)
+static void npcm_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct npcm_adc *info = iio_priv(indio_dev);
@@ -333,13 +333,11 @@ static int npcm_adc_remove(struct platform_device *pdev)
if (!IS_ERR(info->vref))
regulator_disable(info->vref);
clk_disable_unprepare(info->adc_clk);
-
- return 0;
}
static struct platform_driver npcm_adc_driver = {
.probe = npcm_adc_probe,
- .remove = npcm_adc_remove,
+ .remove_new = npcm_adc_remove,
.driver = {
.name = "npcm_adc",
.of_match_table = npcm_adc_match,
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index e202ea18af10..203cbbc70719 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -457,7 +457,7 @@ static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
*
* The gain error include both gain error, as specified in the datasheet, and
* the gain error drift. These paramenters vary depending on device and whether
- * the the channel is calibrated (trimmed) or not.
+ * the channel is calibrated (trimmed) or not.
*/
static int palmas_gpadc_threshold_with_tolerance(int val, const int INL,
const int gain_error,
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 64a3aeb6261c..01c5586df56d 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -957,7 +957,7 @@ out_disable_vref:
return ret;
}
-static int pm8xxx_xoadc_remove(struct platform_device *pdev)
+static void pm8xxx_xoadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
@@ -965,8 +965,6 @@ static int pm8xxx_xoadc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
regulator_disable(adc->vref);
-
- return 0;
}
static const struct xoadc_variant pm8018_variant = {
@@ -1019,7 +1017,7 @@ static struct platform_driver pm8xxx_xoadc_driver = {
.of_match_table = pm8xxx_xoadc_id_table,
},
.probe = pm8xxx_xoadc_probe,
- .remove = pm8xxx_xoadc_remove,
+ .remove_new = pm8xxx_xoadc_remove,
};
module_platform_driver(pm8xxx_xoadc_driver);
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index b8972f673c9d..d524f2e8e927 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -559,7 +559,7 @@ err_clk_if_enable:
return ret;
}
-static int rcar_gyroadc_remove(struct platform_device *pdev)
+static void rcar_gyroadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct rcar_gyroadc *priv = iio_priv(indio_dev);
@@ -573,8 +573,6 @@ static int rcar_gyroadc_remove(struct platform_device *pdev)
pm_runtime_set_suspended(dev);
clk_disable_unprepare(priv->clk);
rcar_gyroadc_deinit_supplies(indio_dev);
-
- return 0;
}
static int rcar_gyroadc_suspend(struct device *dev)
@@ -603,7 +601,7 @@ static const struct dev_pm_ops rcar_gyroadc_pm_ops = {
static struct platform_driver rcar_gyroadc_driver = {
.probe = rcar_gyroadc_probe,
- .remove = rcar_gyroadc_remove,
+ .remove_new = rcar_gyroadc_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = rcar_gyroadc_match,
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index ad54ef798109..71362c2ddf89 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -274,10 +274,9 @@ static int spear_adc_probe(struct platform_device *pdev)
int irq;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
- if (!indio_dev) {
- dev_err(dev, "failed allocating iio device\n");
- return -ENOMEM;
- }
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed allocating iio device\n");
st = iio_priv(indio_dev);
@@ -297,37 +296,24 @@ static int spear_adc_probe(struct platform_device *pdev)
st->adc_base_spear3xx =
(struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;
- st->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(st->clk)) {
- dev_err(dev, "failed getting clock\n");
- return PTR_ERR(st->clk);
- }
-
- ret = clk_prepare_enable(st->clk);
- if (ret) {
- dev_err(dev, "failed enabling clock\n");
- return ret;
- }
+ st->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(st->clk))
+ return dev_err_probe(dev, PTR_ERR(st->clk),
+ "failed enabling clock\n");
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto errout2;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
st);
- if (ret < 0) {
- dev_err(dev, "failed requesting interrupt\n");
- goto errout2;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed requesting interrupt\n");
if (of_property_read_u32(np, "sampling-frequency",
- &st->sampling_freq)) {
- dev_err(dev, "sampling-frequency missing in DT\n");
- ret = -EINVAL;
- goto errout2;
- }
+ &st->sampling_freq))
+ return dev_err_probe(dev, -EINVAL,
+ "sampling-frequency missing in DT\n");
/*
* Optional avg_samples defaults to 0, resulting in single data
@@ -343,8 +329,6 @@ static int spear_adc_probe(struct platform_device *pdev)
spear_adc_configure(st);
- platform_set_drvdata(pdev, indio_dev);
-
init_completion(&st->completion);
indio_dev->name = SPEAR_ADC_MOD_NAME;
@@ -353,28 +337,13 @@ static int spear_adc_probe(struct platform_device *pdev)
indio_dev->channels = spear_adc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(spear_adc_iio_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
- goto errout2;
+ return ret;
dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
return 0;
-
-errout2:
- clk_disable_unprepare(st->clk);
- return ret;
-}
-
-static int spear_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct spear_adc_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- clk_disable_unprepare(st->clk);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -387,7 +356,6 @@ MODULE_DEVICE_TABLE(of, spear_adc_dt_ids);
static struct platform_driver spear_adc_driver = {
.probe = spear_adc_probe,
- .remove = spear_adc_remove,
.driver = {
.name = SPEAR_ADC_MOD_NAME,
.of_match_table = of_match_ptr(spear_adc_dt_ids),
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2f082006550f..616dd729666a 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -17,10 +17,11 @@
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -720,8 +721,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, &priv->common);
- priv->cfg = (const struct stm32_adc_priv_cfg *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ priv->cfg = device_get_match_data(dev);
priv->nb_adc_max = priv->cfg->num_adcs;
spin_lock_init(&priv->common.lock);
@@ -814,7 +814,7 @@ err_pm_stop:
return ret;
}
-static int stm32_adc_remove(struct platform_device *pdev)
+static void stm32_adc_remove(struct platform_device *pdev)
{
struct stm32_adc_common *common = platform_get_drvdata(pdev);
struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
@@ -826,8 +826,6 @@ static int stm32_adc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
}
static int stm32_adc_core_runtime_suspend(struct device *dev)
@@ -908,7 +906,7 @@ MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
static struct platform_driver stm32_adc_driver = {
.probe = stm32_adc_probe,
- .remove = stm32_adc_remove,
+ .remove_new = stm32_adc_remove,
.driver = {
.name = "stm32-adc-core",
.of_match_table = stm32_adc_of_match,
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index f7613efb870d..b5d3c9cea5c4 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2209,7 +2209,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
ret = -EINVAL;
goto err;
}
- strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
+ strscpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
ret = stm32_adc_populate_int_ch(indio_dev, name, val);
if (ret == -ENOENT)
continue;
@@ -2513,7 +2513,7 @@ err_dma_disable:
return ret;
}
-static int stm32_adc_remove(struct platform_device *pdev)
+static void stm32_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct stm32_adc *adc = iio_priv(indio_dev);
@@ -2532,8 +2532,6 @@ static int stm32_adc_remove(struct platform_device *pdev)
adc->rx_buf, adc->rx_dma_buf);
dma_release_channel(adc->dma_chan);
}
-
- return 0;
}
static int stm32_adc_suspend(struct device *dev)
@@ -2659,7 +2657,7 @@ MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
static struct platform_driver stm32_adc_driver = {
.probe = stm32_adc_probe,
- .remove = stm32_adc_remove,
+ .remove_new = stm32_adc_remove,
.driver = {
.name = "stm32-adc",
.of_match_table = stm32_adc_of_match,
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index b5cc43d12b6f..ca08ae3108b2 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1620,7 +1620,7 @@ err_cleanup:
return ret;
}
-static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+static void stm32_dfsdm_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1629,8 +1629,6 @@ static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
iio_device_unregister(indio_dev);
stm32_dfsdm_dma_release(indio_dev);
-
- return 0;
}
static int stm32_dfsdm_adc_suspend(struct device *dev)
@@ -1677,7 +1675,7 @@ static struct platform_driver stm32_dfsdm_adc_driver = {
.pm = pm_sleep_ptr(&stm32_dfsdm_adc_pm_ops),
},
.probe = stm32_dfsdm_adc_probe,
- .remove = stm32_dfsdm_adc_remove,
+ .remove_new = stm32_dfsdm_adc_remove,
};
module_platform_driver(stm32_dfsdm_adc_driver);
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 0f6ebb3061a0..a05d978b8cb8 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -436,7 +436,7 @@ pm_put:
return ret;
}
-static int stm32_dfsdm_core_remove(struct platform_device *pdev)
+static void stm32_dfsdm_core_remove(struct platform_device *pdev)
{
struct stm32_dfsdm *dfsdm = platform_get_drvdata(pdev);
@@ -446,8 +446,6 @@ static int stm32_dfsdm_core_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
stm32_dfsdm_clk_disable_unprepare(dfsdm);
-
- return 0;
}
static int stm32_dfsdm_core_suspend(struct device *dev)
@@ -508,7 +506,7 @@ static const struct dev_pm_ops stm32_dfsdm_core_pm_ops = {
static struct platform_driver stm32_dfsdm_driver = {
.probe = stm32_dfsdm_probe,
- .remove = stm32_dfsdm_core_remove,
+ .remove_new = stm32_dfsdm_core_remove,
.driver = {
.name = "stm32-dfsdm",
.of_match_table = stm32_dfsdm_of_match,
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 25bba96367a8..100ecced5fc1 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -669,7 +669,7 @@ err_map:
return ret;
}
-static int sun4i_gpadc_remove(struct platform_device *pdev)
+static void sun4i_gpadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
@@ -678,12 +678,10 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
if (!IS_ENABLED(CONFIG_THERMAL_OF))
- return 0;
+ return;
if (!info->no_irq)
iio_map_array_unregister(indio_dev);
-
- return 0;
}
static const struct platform_device_id sun4i_gpadc_id[] = {
@@ -702,7 +700,7 @@ static struct platform_driver sun4i_gpadc_driver = {
},
.id_table = sun4i_gpadc_id,
.probe = sun4i_gpadc_probe,
- .remove = sun4i_gpadc_remove,
+ .remove_new = sun4i_gpadc_remove,
};
MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 50c450e7a55f..6c2cb3dabbbf 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -154,7 +154,6 @@ static void adc081c_reg_disable(void *reg)
static int adc081c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct iio_dev *iio;
struct adc081c *adc;
const struct adcxx1c_model *model;
@@ -163,10 +162,7 @@ static int adc081c_probe(struct i2c_client *client)
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
- if (dev_fwnode(&client->dev))
- model = device_get_match_data(&client->dev);
- else
- model = &adcxx1c_models[id->driver_data];
+ model = i2c_get_match_data(client);
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
@@ -207,9 +203,9 @@ static int adc081c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adc081c_id[] = {
- { "adc081c", ADC081C },
- { "adc101c", ADC101C },
- { "adc121c", ADC121C },
+ { "adc081c", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
+ { "adc101c", (kernel_ulong_t)&adcxx1c_models[ADC101C] },
+ { "adc121c", (kernel_ulong_t)&adcxx1c_models[ADC121C] },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 075c75a87544..6799ea49dbc7 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -976,16 +976,13 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode)
static int ads1015_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct ads1015_chip_data *chip;
struct iio_dev *indio_dev;
struct ads1015_data *data;
int ret;
int i;
- chip = device_get_match_data(&client->dev);
- if (!chip)
- chip = (const struct ads1015_chip_data *)id->driver_data;
+ chip = i2c_get_match_data(client);
if (!chip)
return dev_err_probe(&client->dev, -EINVAL, "Unknown chip\n");
@@ -1047,11 +1044,13 @@ static int ads1015_probe(struct i2c_client *client)
1 << ADS1015_CFG_COMP_LAT_SHIFT;
switch (irq_trig) {
+ case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
cfg_comp |= ADS1015_CFG_COMP_POL_LOW <<
ADS1015_CFG_COMP_POL_SHIFT;
break;
case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
cfg_comp |= ADS1015_CFG_COMP_POL_HIGH <<
ADS1015_CFG_COMP_POL_SHIFT;
break;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 8db7a01cb5fb..c755e8cd5220 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -681,7 +681,7 @@ err_dma:
return err;
}
-static int tiadc_remove(struct platform_device *pdev)
+static void tiadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -697,8 +697,6 @@ static int tiadc_remove(struct platform_device *pdev)
step_en = get_adc_step_mask(adc_dev);
am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en);
-
- return 0;
}
static int tiadc_suspend(struct device *dev)
@@ -747,7 +745,7 @@ static struct platform_driver tiadc_driver = {
.of_match_table = ti_adc_dt_ids,
},
.probe = tiadc_probe,
- .remove = tiadc_remove,
+ .remove_new = tiadc_remove,
};
module_platform_driver(tiadc_driver);
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index c279c4f2c9b7..4a247ca25a44 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -892,7 +892,7 @@ err_current_generator:
return ret;
}
-static int twl4030_madc_remove(struct platform_device *pdev)
+static void twl4030_madc_remove(struct platform_device *pdev)
{
struct iio_dev *iio_dev = platform_get_drvdata(pdev);
struct twl4030_madc_data *madc = iio_priv(iio_dev);
@@ -903,8 +903,6 @@ static int twl4030_madc_remove(struct platform_device *pdev)
twl4030_madc_set_power(madc, 0);
regulator_disable(madc->usb3v1);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -917,7 +915,7 @@ MODULE_DEVICE_TABLE(of, twl_madc_of_match);
static struct platform_driver twl4030_madc_driver = {
.probe = twl4030_madc_probe,
- .remove = twl4030_madc_remove,
+ .remove_new = twl4030_madc_remove,
.driver = {
.name = "twl4030_madc",
.of_match_table = of_match_ptr(twl_madc_of_match),
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 32873fb5f367..78bf55438b2c 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -16,9 +16,10 @@
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of_platform.h>
+#include <linux/property.h>
#include <linux/mfd/twl.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -879,17 +880,14 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct twl6030_gpadc_data *gpadc;
const struct twl6030_gpadc_platform_data *pdata;
- const struct of_device_id *match;
struct iio_dev *indio_dev;
int irq;
int ret;
- match = of_match_device(of_twl6030_match_tbl, dev);
- if (!match)
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
return -EINVAL;
- pdata = match->data;
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
if (!indio_dev)
return -ENOMEM;
@@ -968,14 +966,12 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
return iio_device_register(indio_dev);
}
-static int twl6030_gpadc_remove(struct platform_device *pdev)
+static void twl6030_gpadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
twl6030_gpadc_disable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
iio_device_unregister(indio_dev);
-
- return 0;
}
static int twl6030_gpadc_suspend(struct device *pdev)
@@ -1007,7 +1003,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(twl6030_gpadc_pm_ops, twl6030_gpadc_suspend,
static struct platform_driver twl6030_gpadc_driver = {
.probe = twl6030_gpadc_probe,
- .remove = twl6030_gpadc_remove,
+ .remove_new = twl6030_gpadc_remove,
.driver = {
.name = DRIVER_NAME,
.pm = pm_sleep_ptr(&twl6030_gpadc_pm_ops),
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index ae31aafd2653..e4548df3f8fb 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -916,7 +916,7 @@ error_adc_clk_enable:
return ret;
}
-static int vf610_adc_remove(struct platform_device *pdev)
+static void vf610_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct vf610_adc *info = iio_priv(indio_dev);
@@ -925,8 +925,6 @@ static int vf610_adc_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(info->vref);
clk_disable_unprepare(info->clk);
-
- return 0;
}
static int vf610_adc_suspend(struct device *dev)
@@ -974,7 +972,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend,
static struct platform_driver vf610_adc_driver = {
.probe = vf610_adc_probe,
- .remove = vf610_adc_remove,
+ .remove_new = vf610_adc_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = vf610_adc_match,
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
index 397544f23b85..16876b8b5c4e 100644
--- a/drivers/iio/addac/Kconfig
+++ b/drivers/iio/addac/Kconfig
@@ -40,6 +40,7 @@ config STX104
select REGMAP_MMIO
select GPIOLIB
select GPIO_REGMAP
+ select I8254
help
Say yes here to build support for the Apex Embedded Systems STX104
integrated analog PC/104 card.
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 6b0e8218f150..7af3e4b8fe3b 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -442,11 +442,29 @@ static int ad74413r_set_channel_function(struct ad74413r_state *st,
int ret;
ret = regmap_update_bits(st->regmap,
+ AD74413R_REG_CH_FUNC_SETUP_X(channel),
+ AD74413R_CH_FUNC_SETUP_MASK,
+ CH_FUNC_HIGH_IMPEDANCE);
+ if (ret)
+ return ret;
+
+ /* Set DAC code to 0 prior to changing channel function */
+ ret = ad74413r_set_channel_dac_code(st, channel, 0);
+ if (ret)
+ return ret;
+
+ /* Delay required before transition to new desired mode */
+ usleep_range(130, 150);
+
+ ret = regmap_update_bits(st->regmap,
AD74413R_REG_CH_FUNC_SETUP_X(channel),
AD74413R_CH_FUNC_SETUP_MASK, func);
if (ret)
return ret;
+ /* Delay required before updating the new DAC code */
+ usleep_range(150, 170);
+
if (func == CH_FUNC_CURRENT_INPUT_LOOP_POWER)
ret = regmap_set_bits(st->regmap,
AD74413R_REG_ADC_CONFIG_X(channel),
@@ -705,8 +723,8 @@ static int ad74413r_get_input_current_scale(struct ad74413r_state *st,
return IIO_VAL_FRACTIONAL;
}
-static int ad74413_get_input_current_offset(struct ad74413r_state *st,
- unsigned int channel, int *val)
+static int ad74413r_get_input_current_offset(struct ad74413r_state *st,
+ unsigned int channel, int *val)
{
unsigned int range;
int voltage_range;
@@ -991,7 +1009,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
return ad74413r_get_input_voltage_offset(st,
chan->channel, val);
case IIO_CURRENT:
- return ad74413_get_input_current_offset(st,
+ return ad74413r_get_input_current_offset(st,
chan->channel, val);
default:
return -EINVAL;
diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c
index d1f7ce033b46..6946a65512ca 100644
--- a/drivers/iio/addac/stx104.c
+++ b/drivers/iio/addac/stx104.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/regmap.h>
+#include <linux/i8254.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/isa.h>
@@ -55,6 +56,7 @@ MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
#define STX104_ADC_STATUS (STX104_AIO_BASE + 0x8)
#define STX104_ADC_CONTROL (STX104_AIO_BASE + 0x9)
#define STX104_ADC_CONFIGURATION (STX104_AIO_BASE + 0x11)
+#define STX104_I8254_BASE (STX104_AIO_BASE + 0x12)
#define STX104_AIO_DATA_STRIDE 2
#define STX104_DAC_OFFSET(_channel) (STX104_DAC_BASE + STX104_AIO_DATA_STRIDE * (_channel))
@@ -77,6 +79,7 @@ MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
/* ADC Configuration */
#define STX104_GAIN GENMASK(1, 0)
#define STX104_ADBU BIT(2)
+#define STX104_RBK GENMASK(7, 4)
#define STX104_BIPOLAR 0
#define STX104_GAIN_X1 0
#define STX104_GAIN_X2 1
@@ -168,6 +171,32 @@ static const struct regmap_config dio_regmap_config = {
.io_port = true,
};
+static const struct regmap_range pit_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x3),
+};
+static const struct regmap_range pit_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2),
+};
+static const struct regmap_access_table pit_wr_table = {
+ .yes_ranges = pit_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pit_wr_ranges),
+};
+static const struct regmap_access_table pit_rd_table = {
+ .yes_ranges = pit_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pit_rd_ranges),
+};
+
+static const struct regmap_config pit_regmap_config = {
+ .name = "i8254",
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .reg_base = STX104_I8254_BASE,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &pit_wr_table,
+ .rd_table = &pit_rd_table,
+};
+
static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -339,6 +368,21 @@ static const char *stx104_names[STX104_NGPIO] = {
"DIN0", "DIN1", "DIN2", "DIN3", "DOUT0", "DOUT1", "DOUT2", "DOUT3"
};
+static int bank_select_i8254(struct regmap *map)
+{
+ const u8 select_i8254[] = { 0x3, 0xB, 0xA };
+ size_t i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(select_i8254); i++) {
+ err = regmap_write_bits(map, STX104_ADC_CONFIGURATION, STX104_RBK, select_i8254[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int stx104_init_hw(struct stx104_iio *const priv)
{
int err;
@@ -361,7 +405,7 @@ static int stx104_init_hw(struct stx104_iio *const priv)
if (err)
return err;
- return 0;
+ return bank_select_i8254(priv->aio_ctl_map);
}
static int stx104_probe(struct device *dev, unsigned int id)
@@ -369,6 +413,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
struct iio_dev *indio_dev;
struct stx104_iio *priv;
struct gpio_regmap_config gpio_config;
+ struct i8254_regmap_config pit_config;
void __iomem *stx104_base;
struct regmap *aio_ctl_map;
struct regmap *aio_data_map;
@@ -406,6 +451,11 @@ static int stx104_probe(struct device *dev, unsigned int id)
return dev_err_probe(dev, PTR_ERR(dio_map),
"Unable to initialize dio register map\n");
+ pit_config.map = devm_regmap_init_mmio(dev, stx104_base, &pit_regmap_config);
+ if (IS_ERR(pit_config.map))
+ return dev_err_probe(dev, PTR_ERR(pit_config.map),
+ "Unable to initialize i8254 register map\n");
+
priv = iio_priv(indio_dev);
priv->aio_ctl_map = aio_ctl_map;
priv->aio_data_map = aio_data_map;
@@ -449,7 +499,13 @@ static int stx104_probe(struct device *dev, unsigned int id)
.drvdata = dio_map,
};
- return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+ err = PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+ if (err)
+ return err;
+
+ pit_config.parent = dev;
+
+ return devm_i8254_regmap_register(dev, &pit_config);
}
static struct isa_driver stx104_driver = {
@@ -464,3 +520,4 @@ module_isa_driver(stx104_driver, num_stx104);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("Apex Embedded Systems STX104 IIO driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(I8254);
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 108f0f1685ef..e87d35d50a95 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -21,6 +21,7 @@
enum hmc425a_type {
ID_HMC425A,
+ ID_HMC540S,
};
struct hmc425a_chip_info {
@@ -70,6 +71,9 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev,
case ID_HMC425A:
gain = ~code * -500;
break;
+ case ID_HMC540S:
+ gain = ~code * -1000;
+ break;
}
*val = gain / 1000;
@@ -106,6 +110,9 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev,
case ID_HMC425A:
code = ~((abs(gain) / 500) & 0x3F);
break;
+ case ID_HMC540S:
+ code = ~((abs(gain) / 1000) & 0xF);
+ break;
}
mutex_lock(&st->lock);
@@ -157,6 +164,7 @@ static const struct iio_chan_spec hmc425a_channels[] = {
/* Match table for of_platform binding */
static const struct of_device_id hmc425a_of_match[] = {
{ .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
+ { .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S },
{},
};
MODULE_DEVICE_TABLE(of, hmc425a_of_match);
@@ -171,6 +179,15 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_max = 0,
.default_gain = -0x40, /* set default gain -31.5db*/
},
+ [ID_HMC540S] = {
+ .name = "hmc540s",
+ .channels = hmc425a_channels,
+ .num_channels = ARRAY_SIZE(hmc425a_channels),
+ .num_gpios = 4,
+ .gain_min = -15000,
+ .gain_max = 0,
+ .default_gain = -0x10, /* set default gain -15.0db*/
+ },
};
static int hmc425a_probe(struct platform_device *pdev)
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
index 8fc926a2d33b..761a853a4d17 100644
--- a/drivers/iio/chemical/atlas-ezo-sensor.c
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -203,7 +203,6 @@ MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
static int atlas_ezo_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct atlas_ezo_device *chip;
struct atlas_ezo_data *data;
struct iio_dev *indio_dev;
@@ -212,10 +211,7 @@ static int atlas_ezo_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
- if (dev_fwnode(&client->dev))
- chip = device_get_match_data(&client->dev);
- else
- chip = (const struct atlas_ezo_device *)id->driver_data;
+ chip = i2c_get_match_data(client);
if (!chip)
return -EINVAL;
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index fb15bb216019..baf93e5e3ca7 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -87,7 +87,7 @@ enum {
struct atlas_data {
struct i2c_client *client;
struct iio_trigger *trig;
- struct atlas_device *chip;
+ const struct atlas_device *chip;
struct regmap *regmap;
struct irq_work work;
unsigned int interrupt_enabled;
@@ -353,7 +353,7 @@ struct atlas_device {
int delay;
};
-static struct atlas_device atlas_devices[] = {
+static const struct atlas_device atlas_devices[] = {
[ATLAS_PH_SM] = {
.channels = atlas_ph_channels,
.num_channels = 3,
@@ -589,30 +589,29 @@ static const struct iio_info atlas_info = {
};
static const struct i2c_device_id atlas_id[] = {
- { "atlas-ph-sm", ATLAS_PH_SM },
- { "atlas-ec-sm", ATLAS_EC_SM },
- { "atlas-orp-sm", ATLAS_ORP_SM },
- { "atlas-do-sm", ATLAS_DO_SM },
- { "atlas-rtd-sm", ATLAS_RTD_SM },
+ { "atlas-ph-sm", (kernel_ulong_t)&atlas_devices[ATLAS_PH_SM] },
+ { "atlas-ec-sm", (kernel_ulong_t)&atlas_devices[ATLAS_EC_SM] },
+ { "atlas-orp-sm", (kernel_ulong_t)&atlas_devices[ATLAS_ORP_SM] },
+ { "atlas-do-sm", (kernel_ulong_t)&atlas_devices[ATLAS_DO_SM] },
+ { "atlas-rtd-sm", (kernel_ulong_t)&atlas_devices[ATLAS_RTD_SM] },
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
static const struct of_device_id atlas_dt_ids[] = {
- { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
- { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
- { .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
- { .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, },
- { .compatible = "atlas,rtd-sm", .data = (void *)ATLAS_RTD_SM, },
+ { .compatible = "atlas,ph-sm", .data = &atlas_devices[ATLAS_PH_SM] },
+ { .compatible = "atlas,ec-sm", .data = &atlas_devices[ATLAS_EC_SM] },
+ { .compatible = "atlas,orp-sm", .data = &atlas_devices[ATLAS_ORP_SM] },
+ { .compatible = "atlas,do-sm", .data = &atlas_devices[ATLAS_DO_SM] },
+ { .compatible = "atlas,rtd-sm", .data = &atlas_devices[ATLAS_RTD_SM] },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
static int atlas_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atlas_data *data;
- struct atlas_device *chip;
+ const struct atlas_device *chip;
struct iio_trigger *trig;
struct iio_dev *indio_dev;
int ret;
@@ -621,10 +620,7 @@ static int atlas_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
- if (!dev_fwnode(&client->dev))
- chip = &atlas_devices[id->driver_data];
- else
- chip = &atlas_devices[(unsigned long)device_get_match_data(&client->dev)];
+ chip = i2c_get_match_data(client);
indio_dev->info = &atlas_info;
indio_dev->name = ATLAS_DRV_NAME;
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index b509cff9ce37..21730d62b5c8 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -114,6 +114,7 @@ struct sgp_data {
};
struct sgp_device {
+ unsigned long product_id;
const struct iio_chan_spec *channels;
int num_channels;
};
@@ -182,10 +183,12 @@ static const struct iio_chan_spec sgpc3_channels[] = {
static const struct sgp_device sgp_devices[] = {
[SGP30] = {
+ .product_id = SGP30,
.channels = sgp30_channels,
.num_channels = ARRAY_SIZE(sgp30_channels),
},
[SGPC3] = {
+ .product_id = SGPC3,
.channels = sgpc3_channels,
.num_channels = ARRAY_SIZE(sgpc3_channels),
},
@@ -491,28 +494,25 @@ static const struct iio_info sgp_info = {
};
static const struct of_device_id sgp_dt_ids[] = {
- { .compatible = "sensirion,sgp30", .data = (void *)SGP30 },
- { .compatible = "sensirion,sgpc3", .data = (void *)SGPC3 },
+ { .compatible = "sensirion,sgp30", .data = &sgp_devices[SGP30] },
+ { .compatible = "sensirion,sgpc3", .data = &sgp_devices[SGPC3] },
{ }
};
static int sgp_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct sgp_device *match_data;
struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct sgp_data *data;
- unsigned long product_id;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- if (dev_fwnode(dev))
- product_id = (unsigned long)device_get_match_data(dev);
- else
- product_id = id->driver_data;
+ match_data = i2c_get_match_data(client);
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
@@ -528,15 +528,15 @@ static int sgp_probe(struct i2c_client *client)
data->feature_set = be16_to_cpu(data->buffer.raw_words[0].value);
- ret = sgp_check_compat(data, product_id);
+ ret = sgp_check_compat(data, match_data->product_id);
if (ret)
return ret;
indio_dev->info = &sgp_info;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = sgp_devices[product_id].channels;
- indio_dev->num_channels = sgp_devices[product_id].num_channels;
+ indio_dev->channels = match_data->channels;
+ indio_dev->num_channels = match_data->num_channels;
sgp_init(data);
@@ -562,8 +562,8 @@ static void sgp_remove(struct i2c_client *client)
}
static const struct i2c_device_id sgp_id[] = {
- { "sgp30", SGP30 },
- { "sgpc3", SGPC3 },
+ { "sgp30", (kernel_ulong_t)&sgp_devices[SGP30] },
+ { "sgpc3", (kernel_ulong_t)&sgp_devices[SGPC3] },
{ }
};
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 13555f4f401a..5b358bcd311b 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -342,19 +342,17 @@ static const struct vz89x_chip_data vz89x_chips[] = {
};
static const struct of_device_id vz89x_dt_ids[] = {
- { .compatible = "sgx,vz89x", .data = (void *) VZ89X },
- { .compatible = "sgx,vz89te", .data = (void *) VZ89TE },
+ { .compatible = "sgx,vz89x", .data = &vz89x_chips[VZ89X] },
+ { .compatible = "sgx,vz89te", .data = &vz89x_chips[VZ89TE] },
{ }
};
MODULE_DEVICE_TABLE(of, vz89x_dt_ids);
static int vz89x_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct vz89x_data *data;
- int chip_id;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -369,14 +367,10 @@ static int vz89x_probe(struct i2c_client *client)
else
return -EOPNOTSUPP;
- if (!dev_fwnode(dev))
- chip_id = id->driver_data;
- else
- chip_id = (unsigned long)device_get_match_data(dev);
+ data->chip = i2c_get_match_data(client);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->chip = &vz89x_chips[chip_id];
data->last_update = jiffies - HZ;
mutex_init(&data->lock);
@@ -391,8 +385,8 @@ static int vz89x_probe(struct i2c_client *client)
}
static const struct i2c_device_id vz89x_id[] = {
- { "vz89x", VZ89X },
- { "vz89te", VZ89TE },
+ { "vz89x", (kernel_ulong_t)&vz89x_chips[VZ89X] },
+ { "vz89te", (kernel_ulong_t)&vz89x_chips[VZ89TE] },
{ }
};
MODULE_DEVICE_TABLE(i2c, vz89x_id);
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 83ce9489259c..7332064d0852 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -226,15 +226,13 @@ disable_reg:
return ret;
}
-static int dpot_dac_remove(struct platform_device *pdev)
+static void dpot_dac_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct dpot_dac *dac = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(dac->vref);
-
- return 0;
}
static const struct of_device_id dpot_dac_match[] = {
@@ -245,7 +243,7 @@ MODULE_DEVICE_TABLE(of, dpot_dac_match);
static struct platform_driver dpot_dac_driver = {
.probe = dpot_dac_probe,
- .remove = dpot_dac_remove,
+ .remove_new = dpot_dac_remove,
.driver = {
.name = "iio-dpot-dac",
.of_match_table = dpot_dac_match,
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 60467c6f2c6e..b3aa4443a6a4 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -165,7 +165,7 @@ dis_reg:
return ret;
}
-static int lpc18xx_dac_remove(struct platform_device *pdev)
+static void lpc18xx_dac_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct lpc18xx_dac *dac = iio_priv(indio_dev);
@@ -175,8 +175,6 @@ static int lpc18xx_dac_remove(struct platform_device *pdev)
writel(0, dac->base + LPC18XX_DAC_CTRL);
clk_disable_unprepare(dac->clk);
regulator_disable(dac->vref);
-
- return 0;
}
static const struct of_device_id lpc18xx_dac_match[] = {
@@ -187,7 +185,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_dac_match);
static struct platform_driver lpc18xx_dac_driver = {
.probe = lpc18xx_dac_probe,
- .remove = lpc18xx_dac_remove,
+ .remove_new = lpc18xx_dac_remove,
.driver = {
.name = "lpc18xx-dac",
.of_match_table = lpc18xx_dac_match,
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index f4a3124d29f2..25bb1c0490af 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -30,9 +30,14 @@
#define MCP472X_REF_VREF_UNBUFFERED 0x02
#define MCP472X_REF_VREF_BUFFERED 0x03
+struct mcp4725_chip_info {
+ const struct iio_chan_spec *chan_spec;
+ u8 dac_reg_offset;
+ bool use_ext_ref_voltage;
+};
+
struct mcp4725_data {
struct i2c_client *client;
- int id;
unsigned ref_mode;
bool vref_buffered;
u16 dac_value;
@@ -384,6 +389,7 @@ static int mcp4725_probe_dt(struct device *dev,
static int mcp4725_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct mcp4725_chip_info *info;
struct mcp4725_data *data;
struct iio_dev *indio_dev;
struct mcp4725_platform_data *pdata, pdata_dt;
@@ -398,10 +404,7 @@ static int mcp4725_probe(struct i2c_client *client)
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- if (dev_fwnode(&client->dev))
- data->id = (uintptr_t)device_get_match_data(&client->dev);
- else
- data->id = id->driver_data;
+ info = i2c_get_match_data(client);
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
@@ -414,7 +417,7 @@ static int mcp4725_probe(struct i2c_client *client)
pdata = &pdata_dt;
}
- if (data->id == MCP4725 && pdata->use_vref) {
+ if (info->use_ext_ref_voltage && pdata->use_vref) {
dev_err(&client->dev,
"external reference is unavailable on MCP4725");
return -EINVAL;
@@ -455,12 +458,12 @@ static int mcp4725_probe(struct i2c_client *client)
indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
- indio_dev->channels = &mcp472x_channel[id->driver_data];
+ indio_dev->channels = info->chan_spec;
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
/* read current DAC value and settings */
- err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
+ err = i2c_master_recv(client, inbuf, info->dac_reg_offset);
if (err < 0) {
dev_err(&client->dev, "failed to read DAC value");
@@ -470,10 +473,10 @@ static int mcp4725_probe(struct i2c_client *client)
data->powerdown = pd > 0;
data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
- if (data->id == MCP4726)
+ if (!info->use_ext_ref_voltage)
ref = (inbuf[3] >> 3) & 0x3;
- if (data->id == MCP4726 && ref != data->ref_mode) {
+ if (!info->use_ext_ref_voltage && ref != data->ref_mode) {
dev_info(&client->dev,
"voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
data->ref_mode, ref, data->ref_mode);
@@ -510,9 +513,20 @@ static void mcp4725_remove(struct i2c_client *client)
regulator_disable(data->vdd_reg);
}
+static const struct mcp4725_chip_info mcp4725 = {
+ .chan_spec = &mcp472x_channel[MCP4725],
+ .dac_reg_offset = 3,
+ .use_ext_ref_voltage = true,
+};
+
+static const struct mcp4725_chip_info mcp4726 = {
+ .chan_spec = &mcp472x_channel[MCP4726],
+ .dac_reg_offset = 4,
+};
+
static const struct i2c_device_id mcp4725_id[] = {
- { "mcp4725", MCP4725 },
- { "mcp4726", MCP4726 },
+ { "mcp4725", (kernel_ulong_t)&mcp4725 },
+ { "mcp4726", (kernel_ulong_t)&mcp4726 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp4725_id);
@@ -520,11 +534,11 @@ MODULE_DEVICE_TABLE(i2c, mcp4725_id);
static const struct of_device_id mcp4725_of_match[] = {
{
.compatible = "microchip,mcp4725",
- .data = (void *)MCP4725
+ .data = &mcp4725
},
{
.compatible = "microchip,mcp4726",
- .data = (void *)MCP4726
+ .data = &mcp4726
},
{ }
};
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 83bf184e3adc..e150ac729154 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -9,9 +9,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -94,16 +97,12 @@ static int stm32_dac_probe(struct platform_device *pdev)
struct reset_control *rst;
int ret;
- if (!dev->of_node)
- return -ENODEV;
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, &priv->common);
- cfg = (const struct stm32_dac_cfg *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ cfg = device_get_match_data(dev);
mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
@@ -183,7 +182,7 @@ err_pm_stop:
return ret;
}
-static int stm32_dac_remove(struct platform_device *pdev)
+static void stm32_dac_remove(struct platform_device *pdev)
{
pm_runtime_get_sync(&pdev->dev);
of_platform_depopulate(&pdev->dev);
@@ -191,8 +190,6 @@ static int stm32_dac_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
}
static int stm32_dac_core_resume(struct device *dev)
@@ -249,7 +246,7 @@ MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
static struct platform_driver stm32_dac_driver = {
.probe = stm32_dac_probe,
- .remove = stm32_dac_remove,
+ .remove_new = stm32_dac_remove,
.driver = {
.name = "stm32-dac-core",
.of_match_table = stm32_dac_of_match,
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 15eb44075107..5a722f307e7e 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -11,12 +11,13 @@
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/string_helpers.h>
+#include <linux/string_choices.h>
#include "stm32-dac-core.h"
@@ -361,7 +362,7 @@ err_pm_put:
return ret;
}
-static int stm32_dac_remove(struct platform_device *pdev)
+static void stm32_dac_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -370,8 +371,6 @@ static int stm32_dac_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
}
static int stm32_dac_suspend(struct device *dev)
@@ -399,7 +398,7 @@ MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
static struct platform_driver stm32_dac_driver = {
.probe = stm32_dac_probe,
- .remove = stm32_dac_remove,
+ .remove_new = stm32_dac_remove,
.driver = {
.name = "stm32-dac",
.of_match_table = stm32_dac_of_match,
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index bab11b9adc25..efb1269a77c1 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -313,7 +313,6 @@ static int dac5571_probe(struct i2c_client *client)
const struct dac5571_spec *spec;
struct dac5571_data *data;
struct iio_dev *indio_dev;
- enum chip_id chip_id;
int ret, i;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -329,12 +328,7 @@ static int dac5571_probe(struct i2c_client *client)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = dac5571_channels;
- if (dev_fwnode(dev))
- chip_id = (uintptr_t)device_get_match_data(dev);
- else
- chip_id = id->driver_data;
-
- spec = &dac5571_spec[chip_id];
+ spec = i2c_get_match_data(client);
indio_dev->num_channels = spec->num_channels;
data->spec = spec;
@@ -392,31 +386,31 @@ static void dac5571_remove(struct i2c_client *i2c)
}
static const struct of_device_id dac5571_of_id[] = {
- {.compatible = "ti,dac5571", .data = (void *)single_8bit},
- {.compatible = "ti,dac6571", .data = (void *)single_10bit},
- {.compatible = "ti,dac7571", .data = (void *)single_12bit},
- {.compatible = "ti,dac5574", .data = (void *)quad_8bit},
- {.compatible = "ti,dac6574", .data = (void *)quad_10bit},
- {.compatible = "ti,dac7574", .data = (void *)quad_12bit},
- {.compatible = "ti,dac5573", .data = (void *)quad_8bit},
- {.compatible = "ti,dac6573", .data = (void *)quad_10bit},
- {.compatible = "ti,dac7573", .data = (void *)quad_12bit},
- {.compatible = "ti,dac121c081", .data = (void *)single_12bit},
+ {.compatible = "ti,dac121c081", .data = &dac5571_spec[single_12bit] },
+ {.compatible = "ti,dac5571", .data = &dac5571_spec[single_8bit] },
+ {.compatible = "ti,dac6571", .data = &dac5571_spec[single_10bit] },
+ {.compatible = "ti,dac7571", .data = &dac5571_spec[single_12bit] },
+ {.compatible = "ti,dac5574", .data = &dac5571_spec[quad_8bit] },
+ {.compatible = "ti,dac6574", .data = &dac5571_spec[quad_10bit] },
+ {.compatible = "ti,dac7574", .data = &dac5571_spec[quad_12bit] },
+ {.compatible = "ti,dac5573", .data = &dac5571_spec[quad_8bit] },
+ {.compatible = "ti,dac6573", .data = &dac5571_spec[quad_10bit] },
+ {.compatible = "ti,dac7573", .data = &dac5571_spec[quad_12bit] },
{}
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
static const struct i2c_device_id dac5571_id[] = {
- {"dac5571", single_8bit},
- {"dac6571", single_10bit},
- {"dac7571", single_12bit},
- {"dac5574", quad_8bit},
- {"dac6574", quad_10bit},
- {"dac7574", quad_12bit},
- {"dac5573", quad_8bit},
- {"dac6573", quad_10bit},
- {"dac7573", quad_12bit},
- {"dac121c081", single_12bit},
+ {"dac121c081", (kernel_ulong_t)&dac5571_spec[single_12bit] },
+ {"dac5571", (kernel_ulong_t)&dac5571_spec[single_8bit] },
+ {"dac6571", (kernel_ulong_t)&dac5571_spec[single_10bit] },
+ {"dac7571", (kernel_ulong_t)&dac5571_spec[single_12bit] },
+ {"dac5574", (kernel_ulong_t)&dac5571_spec[quad_8bit] },
+ {"dac6574", (kernel_ulong_t)&dac5571_spec[quad_10bit] },
+ {"dac7574", (kernel_ulong_t)&dac5571_spec[quad_12bit] },
+ {"dac5573", (kernel_ulong_t)&dac5571_spec[quad_8bit] },
+ {"dac6573", (kernel_ulong_t)&dac5571_spec[quad_10bit] },
+ {"dac7573", (kernel_ulong_t)&dac5571_spec[quad_12bit] },
{}
};
MODULE_DEVICE_TABLE(i2c, dac5571_id);
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index fc182250c622..de73bc5a1c93 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -231,7 +231,7 @@ error_iio_device_register:
return ret;
}
-static int vf610_dac_remove(struct platform_device *pdev)
+static void vf610_dac_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct vf610_dac *info = iio_priv(indio_dev);
@@ -239,8 +239,6 @@ static int vf610_dac_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
vf610_dac_exit(info);
clk_disable_unprepare(info->clk);
-
- return 0;
}
static int vf610_dac_suspend(struct device *dev)
@@ -274,7 +272,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(vf610_dac_pm_ops, vf610_dac_suspend,
static struct platform_driver vf610_dac_driver = {
.probe = vf610_dac_probe,
- .remove = vf610_dac_remove,
+ .remove_new = vf610_dac_remove,
.driver = {
.name = "vf610-dac",
.of_match_table = vf610_dac_match,
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 85e289700c3c..4abf80f75ef5 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -33,7 +33,6 @@ enum {
struct adf4350_state {
struct spi_device *spi;
- struct regulator *reg;
struct gpio_desc *lock_detect_gpiod;
struct adf4350_platform_data *pdata;
struct clk *clk;
@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
return pdata;
}
+static void adf4350_power_down(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct adf4350_state *st = iio_priv(indio_dev);
+
+ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+ adf4350_sync_config(st);
+}
+
static int adf4350_probe(struct spi_device *spi)
{
struct adf4350_platform_data *pdata;
@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
}
if (!pdata->clkin) {
- clk = devm_clk_get(&spi->dev, "clkin");
+ clk = devm_clk_get_enabled(&spi->dev, "clkin");
if (IS_ERR(clk))
- return -EPROBE_DEFER;
-
- ret = clk_prepare_enable(clk);
- if (ret < 0)
- return ret;
+ return PTR_ERR(clk);
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_clk;
- }
+ if (indio_dev == NULL)
+ return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- goto error_disable_clk;
- }
+ ret = devm_regulator_get_enable(&spi->dev, "vcc");
+ if (ret)
+ return ret;
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->pdata = pdata;
@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
GPIOD_IN);
- if (IS_ERR(st->lock_detect_gpiod)) {
- ret = PTR_ERR(st->lock_detect_gpiod);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->lock_detect_gpiod))
+ return PTR_ERR(st->lock_detect_gpiod);
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
if (ret)
- goto error_disable_reg;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
if (ret)
- goto error_disable_reg;
-
- return 0;
-
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-error_disable_clk:
- clk_disable_unprepare(clk);
-
- return ret;
-}
-
-static void adf4350_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adf4350_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
-
- st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
- adf4350_sync_config(st);
-
- iio_device_unregister(indio_dev);
-
- clk_disable_unprepare(st->clk);
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to add action to managed power down\n");
- if (!IS_ERR(reg))
- regulator_disable(reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id adf4350_of_match[] = {
@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
.of_match_table = adf4350_of_match,
},
.probe = adf4350_probe,
- .remove = adf4350_remove,
.id_table = adf4350_id,
};
module_spi_driver(adf4350_driver);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 698c50da1f10..59a38bf9459b 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -359,7 +359,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_gyro_3d_remove(struct platform_device *pdev)
+static void hid_gyro_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -368,8 +368,6 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_gyro_3d_ids[] = {
@@ -388,7 +386,7 @@ static struct platform_driver hid_gyro_3d_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_gyro_3d_probe,
- .remove = hid_gyro_3d_remove,
+ .remove_new = hid_gyro_3d_remove,
};
module_platform_driver(hid_gyro_3d_platform_driver);
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index fa0fe404a70a..bf6d2636a85e 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -260,7 +260,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_humidity_remove(struct platform_device *pdev)
+static void hid_humidity_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -269,8 +269,6 @@ static int hid_humidity_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_humidity_ids[] = {
@@ -289,7 +287,7 @@ static struct platform_driver hid_humidity_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_humidity_probe,
- .remove = hid_humidity_remove,
+ .remove_new = hid_humidity_remove,
};
module_platform_driver(hid_humidity_platform_driver);
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index ebfb79bc9edc..9465908cc65e 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -169,9 +169,16 @@ static const struct i2c_device_id si7005_id[] = {
};
MODULE_DEVICE_TABLE(i2c, si7005_id);
+static const struct of_device_id si7005_dt_ids[] = {
+ { .compatible = "silabs,si7005" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7005_dt_ids);
+
static struct i2c_driver si7005_driver = {
.driver = {
.name = "si7005",
+ .of_match_table = si7005_dt_ids,
},
.probe = si7005_probe,
.id_table = si7005_id,
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 501e286702ef..1a38b1915e7a 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -30,9 +30,6 @@ struct iio_ioctl_handler {
unsigned int cmd, unsigned long arg);
};
-long iio_device_ioctl(struct iio_dev *indio_dev, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
struct iio_ioctl_handler *h);
void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h);
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 17275a53ca2c..b7cbe1565aee 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -31,6 +31,12 @@
#define ADIS16475_REG_Y_ACCEL_L 0x14
#define ADIS16475_REG_Z_ACCEL_L 0x18
#define ADIS16475_REG_TEMP_OUT 0x1c
+#define ADIS16475_REG_X_DELTANG_L 0x24
+#define ADIS16475_REG_Y_DELTANG_L 0x28
+#define ADIS16475_REG_Z_DELTANG_L 0x2C
+#define ADIS16475_REG_X_DELTVEL_L 0x30
+#define ADIS16475_REG_Y_DELTVEL_L 0x34
+#define ADIS16475_REG_Z_DELTVEL_L 0x38
#define ADIS16475_REG_X_GYRO_BIAS_L 0x40
#define ADIS16475_REG_Y_GYRO_BIAS_L 0x44
#define ADIS16475_REG_Z_GYRO_BIAS_L 0x48
@@ -55,6 +61,7 @@
#define ADIS16475_REG_PROD_ID 0x72
#define ADIS16475_REG_SERIAL_NUM 0x74
#define ADIS16475_REG_FLASH_CNT 0x7c
+#define ADIS16500_BURST_DATA_SEL_MASK BIT(8)
#define ADIS16500_BURST32_MASK BIT(9)
#define ADIS16500_BURST32(x) FIELD_PREP(ADIS16500_BURST32_MASK, x)
/* number of data elements in burst mode */
@@ -65,6 +72,8 @@
#define ADIS16475_BURST_MAX_SPEED 1000000
#define ADIS16475_LSB_DEC_MASK BIT(0)
#define ADIS16475_LSB_FIR_MASK BIT(1)
+#define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
+#define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7)
enum {
ADIS16475_SYNC_DIRECT = 1,
@@ -84,16 +93,20 @@ struct adis16475_chip_info {
const struct adis16475_sync *sync;
const struct adis_data adis_data;
const char *name;
+#define ADIS16475_HAS_BURST32 BIT(0)
+#define ADIS16475_HAS_BURST_DELTA_DATA BIT(1)
+ const long flags;
u32 num_channels;
u32 gyro_max_val;
u32 gyro_max_scale;
u32 accel_max_val;
u32 accel_max_scale;
u32 temp_scale;
+ u32 deltang_max_val;
+ u32 deltvel_max_val;
u32 int_clk;
u16 max_dec;
u8 num_sync;
- bool has_burst32;
};
struct adis16475 {
@@ -115,6 +128,12 @@ enum {
ADIS16475_SCAN_ACCEL_Y,
ADIS16475_SCAN_ACCEL_Z,
ADIS16475_SCAN_TEMP,
+ ADIS16475_SCAN_DELTANG_X,
+ ADIS16475_SCAN_DELTANG_Y,
+ ADIS16475_SCAN_DELTANG_Z,
+ ADIS16475_SCAN_DELTVEL_X,
+ ADIS16475_SCAN_DELTVEL_Y,
+ ADIS16475_SCAN_DELTVEL_Z,
};
static bool low_rate_allow;
@@ -451,6 +470,14 @@ static int adis16475_read_raw(struct iio_dev *indio_dev,
case IIO_TEMP:
*val = st->info->temp_scale;
return IIO_VAL_INT;
+ case IIO_DELTA_ANGL:
+ *val = st->info->deltang_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_DELTA_VELOCITY:
+ *val = st->info->deltvel_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
@@ -551,6 +578,57 @@ static int adis16475_write_raw(struct iio_dev *indio_dev,
}, \
}
+#define ADIS16475_MOD_CHAN_DELTA(_type, _mod, _address, _si, _r_bits, _s_bits) { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = (_address), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_r_bits), \
+ .storagebits = (_s_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16475_DELTANG_CHAN(_mod) \
+ ADIS16475_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _DELTANG_L, ADIS16475_SCAN_DELTANG_ ## _mod, 32, 32)
+
+#define ADIS16475_DELTVEL_CHAN(_mod) \
+ ADIS16475_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _DELTVEL_L, ADIS16475_SCAN_DELTVEL_ ## _mod, 32, 32)
+
+#define ADIS16475_DELTANG_CHAN_NO_SCAN(_mod) \
+ ADIS16475_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _DELTANG_L, -1, 32, 32)
+
+#define ADIS16475_DELTVEL_CHAN_NO_SCAN(_mod) \
+ ADIS16475_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _DELTVEL_L, -1, 32, 32)
+
+static const struct iio_chan_spec adis16477_channels[] = {
+ ADIS16475_GYRO_CHANNEL(X),
+ ADIS16475_GYRO_CHANNEL(Y),
+ ADIS16475_GYRO_CHANNEL(Z),
+ ADIS16475_ACCEL_CHANNEL(X),
+ ADIS16475_ACCEL_CHANNEL(Y),
+ ADIS16475_ACCEL_CHANNEL(Z),
+ ADIS16475_TEMP_CHANNEL(),
+ ADIS16475_DELTANG_CHAN(X),
+ ADIS16475_DELTANG_CHAN(Y),
+ ADIS16475_DELTANG_CHAN(Z),
+ ADIS16475_DELTVEL_CHAN(X),
+ ADIS16475_DELTVEL_CHAN(Y),
+ ADIS16475_DELTVEL_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(13)
+};
+
static const struct iio_chan_spec adis16475_channels[] = {
ADIS16475_GYRO_CHANNEL(X),
ADIS16475_GYRO_CHANNEL(Y),
@@ -559,6 +637,12 @@ static const struct iio_chan_spec adis16475_channels[] = {
ADIS16475_ACCEL_CHANNEL(Y),
ADIS16475_ACCEL_CHANNEL(Z),
ADIS16475_TEMP_CHANNEL(),
+ ADIS16475_DELTANG_CHAN_NO_SCAN(X),
+ ADIS16475_DELTANG_CHAN_NO_SCAN(Y),
+ ADIS16475_DELTANG_CHAN_NO_SCAN(Z),
+ ADIS16475_DELTVEL_CHAN_NO_SCAN(X),
+ ADIS16475_DELTVEL_CHAN_NO_SCAN(Y),
+ ADIS16475_DELTVEL_CHAN_NO_SCAN(Z),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
@@ -662,6 +746,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -677,6 +763,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -692,6 +780,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -707,6 +797,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -715,50 +807,56 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
},
[ADIS16477_1] = {
.name = "adis16477-1",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16477_2] = {
.name = "adis16477-2",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16477_3] = {
.name = "adis16477-3",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16465_1] = {
@@ -770,6 +868,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -785,6 +885,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -800,6 +902,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -815,6 +919,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -830,6 +936,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -845,6 +953,8 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.accel_max_val = 1,
.accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
@@ -853,129 +963,168 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
},
[ADIS16500] = {
.name = "adis16500",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
.accel_max_val = 392,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts),
},
[ADIS16505_1] = {
.name = "adis16505-1",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
.accel_max_val = 78,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
},
[ADIS16505_2] = {
.name = "adis16505-2",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
.accel_max_val = 78,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
},
[ADIS16505_3] = {
.name = "adis16505-3",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
.accel_max_val = 78,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 100,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
},
[ADIS16507_1] = {
.name = "adis16507-1",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
.accel_max_val = 392,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(360),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
},
[ADIS16507_2] = {
.name = "adis16507-2",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
.accel_max_val = 392,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
},
[ADIS16507_3] = {
.name = "adis16507-3",
- .num_channels = ARRAY_SIZE(adis16475_channels),
- .channels = adis16475_channels,
+ .num_channels = ARRAY_SIZE(adis16477_channels),
+ .channels = adis16477_channels,
.gyro_max_val = 1,
.gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
.accel_max_val = 392,
.accel_max_scale = 32000 << 16,
.temp_scale = 100,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(2160),
+ .deltvel_max_val = 400,
.int_clk = 2000,
.max_dec = 1999,
.sync = adis16475_sync_mode,
/* pulse sync not supported */
.num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
- .has_burst32 = true,
+ .flags = ADIS16475_HAS_BURST32 | ADIS16475_HAS_BURST_DELTA_DATA,
.adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
},
};
+static int adis16475_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ u16 en;
+ int ret;
+ struct adis16475 *st = iio_priv(indio_dev);
+
+ if (st->info->flags & ADIS16475_HAS_BURST_DELTA_DATA) {
+ if ((*scan_mask & ADIS16500_BURST_DATA_SEL_0_CHN_MASK) &&
+ (*scan_mask & ADIS16500_BURST_DATA_SEL_1_CHN_MASK))
+ return -EINVAL;
+ if (*scan_mask & ADIS16500_BURST_DATA_SEL_0_CHN_MASK)
+ en = FIELD_PREP(ADIS16500_BURST_DATA_SEL_MASK, 0);
+ else
+ en = FIELD_PREP(ADIS16500_BURST_DATA_SEL_MASK, 1);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST_DATA_SEL_MASK, en);
+ if (ret)
+ return ret;
+ }
+
+ return adis_update_scan_mode(indio_dev, scan_mask);
+}
+
static const struct iio_info adis16475_info = {
.read_raw = &adis16475_read_raw,
.write_raw = &adis16475_write_raw,
- .update_scan_mode = adis_update_scan_mode,
+ .update_scan_mode = adis16475_update_scan_mode,
.debugfs_reg_access = adis_debugfs_reg_access,
};
@@ -998,7 +1147,7 @@ static void adis16475_burst32_check(struct adis16475 *st)
int ret;
struct adis *adis = &st->adis;
- if (!st->info->has_burst32)
+ if (!(st->info->flags & ADIS16475_HAS_BURST32))
return;
if (st->lsb_flag && !st->burst32) {
@@ -1044,7 +1193,7 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16475 *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
- int ret, bit, i = 0;
+ int ret, bit, buff_offset = 0, i = 0;
__be16 *buffer;
u16 crc;
bool valid;
@@ -1073,7 +1222,20 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
switch (bit) {
case ADIS16475_SCAN_TEMP:
st->data[i++] = buffer[offset];
+ /*
+ * The temperature channel has 16-bit storage size.
+ * We need to perform the padding to have the buffer
+ * elements naturally aligned in case there are any
+ * 32-bit storage size channels enabled which have a
+ * scan index higher than the temperature channel scan
+ * index.
+ */
+ if (*indio_dev->active_scan_mask & GENMASK(ADIS16475_SCAN_DELTVEL_Z, ADIS16475_SCAN_DELTANG_X))
+ st->data[i++] = 0;
break;
+ case ADIS16475_SCAN_DELTANG_X ... ADIS16475_SCAN_DELTVEL_Z:
+ buff_offset = ADIS16475_SCAN_DELTANG_X;
+ fallthrough;
case ADIS16475_SCAN_GYRO_X ... ADIS16475_SCAN_ACCEL_Z:
/*
* The first 2 bytes on the received data are the
@@ -1081,18 +1243,18 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
*/
if (st->burst32) {
/* upper 16 */
- st->data[i++] = buffer[bit * 2 + 2];
+ st->data[i++] = buffer[(bit - buff_offset) * 2 + 2];
/* lower 16 */
- st->data[i++] = buffer[bit * 2 + 1];
+ st->data[i++] = buffer[(bit - buff_offset) * 2 + 1];
} else {
- st->data[i++] = buffer[bit + 1];
+ st->data[i++] = buffer[(bit - buff_offset) + 1];
/*
* Don't bother in doing the manual read if the
* device supports burst32. burst32 will be
* enabled in the next call to
* adis16475_burst32_check()...
*/
- if (st->lsb_flag && !st->info->has_burst32) {
+ if (st->lsb_flag && !(st->info->flags & ADIS16475_HAS_BURST32)) {
u16 val = 0;
const u32 reg = ADIS16475_REG_X_GYRO_L +
bit * 4;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
index 7327e5723f96..8a7f2911905a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
@@ -71,6 +71,19 @@ int inv_mpu_aux_init(const struct inv_mpu6050_state *st)
unsigned int val;
int ret;
+ /*
+ * Code based on the vendor Linux kernel v3.0,
+ * the exact meaning is unknown.
+ */
+ if (st->chip_type == INV_MPU9150) {
+ unsigned int mask = BIT(7);
+
+ val = st->level_shifter ? mask : 0;
+ ret = regmap_update_bits(st->map, 0x1, mask, val);
+ if (ret)
+ return ret;
+ }
+
/* configure i2c master */
val = INV_MPU6050_BITS_I2C_MST_CLK_400KHZ |
INV_MPU6050_BIT_WAIT_FOR_ES;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 29f906c884bd..3fbeef1a7018 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/iio/common/inv_sensors_timestamp.h>
#include <linux/iio/iio.h>
@@ -1495,6 +1496,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
st->irq = irq;
st->map = regmap;
+ st->level_shifter = device_property_read_bool(dev,
+ "invensense,level-shifter");
pdata = dev_get_platdata(dev);
if (!pdata) {
result = iio_read_mount_matrix(dev, &st->orientation);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ed5a96e78df0..5950e2419ebb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -203,6 +203,7 @@ struct inv_mpu6050_state {
s32 magn_raw_to_gauss[3];
struct iio_mount_matrix magn_orient;
unsigned int suspended_sensors;
+ bool level_shifter;
u8 *data;
};
@@ -464,7 +465,6 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type);
int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable);
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en,
unsigned int mask);
-int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d752e9c0499b..c77745b594bd 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -90,6 +90,10 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
[IIO_MASSCONCENTRATION] = "massconcentration",
+ [IIO_DELTA_ANGL] = "deltaangl",
+ [IIO_DELTA_VELOCITY] = "deltavelocity",
+ [IIO_COLORTEMP] = "colortemp",
+ [IIO_CHROMATICITY] = "chromaticity",
};
static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 19f7a91157ee..910c1f14abd5 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -355,6 +355,21 @@ static ssize_t iio_ev_value_store(struct device *dev,
return len;
}
+static ssize_t iio_ev_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ if (indio_dev->info->read_event_label)
+ return indio_dev->info->read_event_label(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), buf);
+
+ return -EINVAL;
+}
+
static int iio_device_add_event(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int spec_index,
enum iio_event_type type, enum iio_event_direction dir,
@@ -411,6 +426,41 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
return attrcount;
}
+static int iio_device_add_event_label(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int spec_index,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ char *postfix;
+ int ret;
+
+ if (!indio_dev->info->read_event_label)
+ return 0;
+
+ if (dir != IIO_EV_DIR_NONE)
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_label",
+ iio_ev_type_text[type],
+ iio_ev_dir_text[dir]);
+ else
+ postfix = kasprintf(GFP_KERNEL, "%s_label",
+ iio_ev_type_text[type]);
+ if (postfix == NULL)
+ return -ENOMEM;
+
+ ret = __iio_add_chan_devattr(postfix, chan, &iio_ev_label_show, NULL,
+ spec_index, IIO_SEPARATE, &indio_dev->dev, NULL,
+ &iio_dev_opaque->event_interface->dev_attr_list);
+
+ kfree(postfix);
+
+ if (ret < 0)
+ return ret;
+
+ return 1;
+}
+
static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
@@ -448,6 +498,11 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
attrcount += ret;
+
+ ret = iio_device_add_event_label(indio_dev, chan, i, type, dir);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
}
ret = attrcount;
return ret;
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index e7f0b81b7f5a..22a63a89f289 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -266,7 +266,7 @@ out_disable_vdd:
return ret;
}
-static int cm3605_remove(struct platform_device *pdev)
+static void cm3605_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct cm3605 *cm3605 = iio_priv(indio_dev);
@@ -276,8 +276,6 @@ static int cm3605_remove(struct platform_device *pdev)
gpiod_set_value_cansleep(cm3605->aset, 0);
iio_device_unregister(indio_dev);
regulator_disable(cm3605->vdd);
-
- return 0;
}
static int cm3605_pm_suspend(struct device *dev)
@@ -320,7 +318,7 @@ static struct platform_driver cm3605_driver = {
.pm = pm_sleep_ptr(&cm3605_dev_pm_ops),
},
.probe = cm3605_probe,
- .remove = cm3605_remove,
+ .remove_new = cm3605_remove,
};
module_platform_driver(cm3605_driver);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index eb1aedad7edc..f17304b54468 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,11 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
- CHANNEL_SCAN_INDEX_INTENSITY = 0,
- CHANNEL_SCAN_INDEX_ILLUM = 1,
+ CHANNEL_SCAN_INDEX_INTENSITY,
+ CHANNEL_SCAN_INDEX_ILLUM,
+ CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
CHANNEL_SCAN_INDEX_MAX
};
@@ -24,7 +27,7 @@ enum {
struct als_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
- struct hid_sensor_hub_attribute_info als_illum;
+ struct hid_sensor_hub_attribute_info als[CHANNEL_SCAN_INDEX_MAX];
struct {
u32 illum[CHANNEL_SCAN_INDEX_MAX];
u64 timestamp __aligned(8);
@@ -65,6 +68,40 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
+ {
+ .type = IIO_COLORTEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
+ },
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@@ -99,10 +136,25 @@ static int als_read_raw(struct iio_dev *indio_dev,
switch (chan->scan_index) {
case CHANNEL_SCAN_INDEX_INTENSITY:
case CHANNEL_SCAN_INDEX_ILLUM:
- report_id = als_state->als_illum.report_id;
- min = als_state->als_illum.logical_minimum;
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
+ case CHANNEL_SCAN_INDEX_COLOR_TEMP:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
+ break;
default:
report_id = -1;
break;
@@ -223,6 +275,18 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
+ ret = 0;
+ break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@@ -242,22 +306,56 @@ static int als_parse_report(struct platform_device *pdev,
struct als_state *st)
{
int ret;
+ int i;
+
+ for (i = 0; i <= CHANNEL_SCAN_INDEX_ILLUM; ++i) {
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ &st->als[i]);
+ if (ret < 0)
+ return ret;
+ als_adjust_channel_bit_mask(channels, i, st->als[i].size);
+
+ dev_dbg(&pdev->dev, "als %x:%x\n", st->als[i].index,
+ st->als[i].report_id);
+ }
ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
- usage_id,
- HID_USAGE_SENSOR_LIGHT_ILLUM,
- &st->als_illum);
+ usage_id,
+ HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
+ &st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP]);
if (ret < 0)
return ret;
- als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_INTENSITY,
- st->als_illum.size);
- als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_ILLUM,
- st->als_illum.size);
-
- dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index,
- st->als_illum.report_id);
+ als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].size);
+
+ dev_dbg(&pdev->dev, "als %x:%x\n",
+ st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].index,
+ st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].report_id);
+
+ for (i = 0; i < 2; i++) {
+ int next_scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X + i;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT, usage_id,
+ HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X + i,
+ &st->als[next_scan_index]);
+ if (ret < 0)
+ return ret;
+
+ als_adjust_channel_bit_mask(channels,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_X + i,
+ st->als[next_scan_index].size);
+
+ dev_dbg(&pdev->dev, "als %x:%x\n",
+ st->als[next_scan_index].index,
+ st->als[next_scan_index].report_id);
+ }
- st->scale_precision = hid_sensor_format_scale(usage_id, &st->als_illum,
+ st->scale_precision = hid_sensor_format_scale(usage_id,
+ &st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);
return ret;
@@ -347,7 +445,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_als_remove(struct platform_device *pdev)
+static void hid_als_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -356,8 +454,6 @@ static int hid_als_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_als_ids[] = {
@@ -380,7 +476,7 @@ static struct platform_driver hid_als_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_als_probe,
- .remove = hid_als_remove,
+ .remove_new = hid_als_remove,
};
module_platform_driver(hid_als_platform_driver);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index a47591e1bad9..26c481d2998c 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -313,7 +313,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_prox_remove(struct platform_device *pdev)
+static void hid_prox_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -322,8 +322,6 @@ static int hid_prox_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_prox_ids[] = {
@@ -346,7 +344,7 @@ static struct platform_driver hid_prox_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_prox_probe,
- .remove = hid_prox_remove,
+ .remove_new = hid_prox_remove,
};
module_platform_driver(hid_prox_platform_driver);
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 827bc25269e9..7800f7fa51b7 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -895,7 +895,7 @@ err_free_irq:
return ret;
}
-static int lm3533_als_remove(struct platform_device *pdev)
+static void lm3533_als_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct lm3533_als *als = iio_priv(indio_dev);
@@ -905,8 +905,6 @@ static int lm3533_als_remove(struct platform_device *pdev)
lm3533_als_disable(als);
if (als->irq)
free_irq(als->irq, indio_dev);
-
- return 0;
}
static struct platform_driver lm3533_als_driver = {
@@ -914,7 +912,7 @@ static struct platform_driver lm3533_als_driver = {
.name = "lm3533-als",
},
.probe = lm3533_als_probe,
- .remove = lm3533_als_remove,
+ .remove_new = lm3533_als_remove,
};
module_platform_driver(lm3533_als_driver);
diff --git a/drivers/iio/light/opt4001.c b/drivers/iio/light/opt4001.c
index 502946bf9f94..6cf60151b3d8 100644
--- a/drivers/iio/light/opt4001.c
+++ b/drivers/iio/light/opt4001.c
@@ -412,7 +412,7 @@ static int opt4001_probe(struct i2c_client *client)
if (dev_id != OPT4001_DEVICE_ID_VAL)
dev_warn(&client->dev, "Device ID: %#04x unknown\n", dev_id);
- chip->chip_info = device_get_match_data(&client->dev);
+ chip->chip_info = i2c_get_match_data(client);
indio_dev->channels = opt4001_channels;
indio_dev->num_channels = ARRAY_SIZE(opt4001_channels);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index eb706d0bf70b..dd466c5fa621 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -204,7 +204,6 @@ static long ak09912_raw_to_gauss(u16 data)
/* Compatible Asahi Kasei Compass parts */
enum asahi_compass_chipset {
- AKXXXX = 0,
AK8975,
AK8963,
AK09911,
@@ -248,7 +247,7 @@ struct ak_def {
};
static const struct ak_def ak_def_array[] = {
- {
+ [AK8975] = {
.type = AK8975,
.raw_to_gauss = ak8975_raw_to_gauss,
.range = 4096,
@@ -273,7 +272,7 @@ static const struct ak_def ak_def_array[] = {
AK8975_REG_HYL,
AK8975_REG_HZL},
},
- {
+ [AK8963] = {
.type = AK8963,
.raw_to_gauss = ak8963_09911_raw_to_gauss,
.range = 8190,
@@ -298,7 +297,7 @@ static const struct ak_def ak_def_array[] = {
AK8975_REG_HYL,
AK8975_REG_HZL},
},
- {
+ [AK09911] = {
.type = AK09911,
.raw_to_gauss = ak8963_09911_raw_to_gauss,
.range = 8192,
@@ -323,7 +322,7 @@ static const struct ak_def ak_def_array[] = {
AK09912_REG_HYL,
AK09912_REG_HZL},
},
- {
+ [AK09912] = {
.type = AK09912,
.raw_to_gauss = ak09912_raw_to_gauss,
.range = 32752,
@@ -348,7 +347,7 @@ static const struct ak_def ak_def_array[] = {
AK09912_REG_HYL,
AK09912_REG_HZL},
},
- {
+ [AK09916] = {
.type = AK09916,
.raw_to_gauss = ak09912_raw_to_gauss,
.range = 32752,
@@ -812,18 +811,6 @@ static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
};
-static const struct acpi_device_id ak_acpi_match[] = {
- {"AK8975", AK8975},
- {"AK8963", AK8963},
- {"INVN6500", AK8963},
- {"AK009911", AK09911},
- {"AK09911", AK09911},
- {"AKM9911", AK09911},
- {"AK09912", AK09912},
- { }
-};
-MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
-
static void ak8975_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8975_data *data = iio_priv(indio_dev);
@@ -883,10 +870,7 @@ static int ak8975_probe(struct i2c_client *client)
struct iio_dev *indio_dev;
struct gpio_desc *eoc_gpiod;
struct gpio_desc *reset_gpiod;
- const void *match;
- unsigned int i;
int err;
- enum asahi_compass_chipset chipset;
const char *name = NULL;
/*
@@ -928,27 +912,15 @@ static int ak8975_probe(struct i2c_client *client)
return err;
/* id will be NULL when enumerated via ACPI */
- match = device_get_match_data(&client->dev);
- if (match) {
- chipset = (uintptr_t)match;
- name = dev_name(&client->dev);
- } else if (id) {
- chipset = (enum asahi_compass_chipset)(id->driver_data);
- name = id->name;
- } else
- return -ENOSYS;
-
- for (i = 0; i < ARRAY_SIZE(ak_def_array); i++)
- if (ak_def_array[i].type == chipset)
- break;
-
- if (i == ARRAY_SIZE(ak_def_array)) {
- dev_err(&client->dev, "AKM device type unsupported: %d\n",
- chipset);
+ data->def = i2c_get_match_data(client);
+ if (!data->def)
return -ENODEV;
- }
- data->def = &ak_def_array[i];
+ /* If enumerated via firmware node, fix the ABI */
+ if (dev_fwnode(&client->dev))
+ name = dev_name(&client->dev);
+ else
+ name = id->name;
/* Fetch the regulators */
data->vdd = devm_regulator_get(&client->dev, "vdd");
@@ -1076,29 +1048,40 @@ static int ak8975_runtime_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(ak8975_dev_pm_ops, ak8975_runtime_suspend,
ak8975_runtime_resume, NULL);
+static const struct acpi_device_id ak_acpi_match[] = {
+ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] },
+ {"AK8975", (kernel_ulong_t)&ak_def_array[AK8975] },
+ {"AK009911", (kernel_ulong_t)&ak_def_array[AK09911] },
+ {"AK09911", (kernel_ulong_t)&ak_def_array[AK09911] },
+ {"AK09912", (kernel_ulong_t)&ak_def_array[AK09912] },
+ {"AKM9911", (kernel_ulong_t)&ak_def_array[AK09911] },
+ {"INVN6500", (kernel_ulong_t)&ak_def_array[AK8963] },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
+
static const struct i2c_device_id ak8975_id[] = {
- {"ak8975", AK8975},
- {"ak8963", AK8963},
- {"AK8963", AK8963},
- {"ak09911", AK09911},
- {"ak09912", AK09912},
- {"ak09916", AK09916},
+ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] },
+ {"ak8963", (kernel_ulong_t)&ak_def_array[AK8963] },
+ {"ak8975", (kernel_ulong_t)&ak_def_array[AK8975] },
+ {"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] },
+ {"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] },
+ {"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] },
{}
};
-
MODULE_DEVICE_TABLE(i2c, ak8975_id);
static const struct of_device_id ak8975_of_match[] = {
- { .compatible = "asahi-kasei,ak8975", },
- { .compatible = "ak8975", },
- { .compatible = "asahi-kasei,ak8963", },
- { .compatible = "ak8963", },
- { .compatible = "asahi-kasei,ak09911", },
- { .compatible = "ak09911", },
- { .compatible = "asahi-kasei,ak09912", },
- { .compatible = "ak09912", },
- { .compatible = "asahi-kasei,ak09916", },
- { .compatible = "ak09916", },
+ { .compatible = "asahi-kasei,ak8975", .data = &ak_def_array[AK8975] },
+ { .compatible = "ak8975", .data = &ak_def_array[AK8975] },
+ { .compatible = "asahi-kasei,ak8963", .data = &ak_def_array[AK8963] },
+ { .compatible = "ak8963", .data = &ak_def_array[AK8963] },
+ { .compatible = "asahi-kasei,ak09911", .data = &ak_def_array[AK09911] },
+ { .compatible = "ak09911", .data = &ak_def_array[AK09911] },
+ { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] },
+ { .compatible = "ak09912", .data = &ak_def_array[AK09912] },
+ { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] },
+ { .compatible = "ak09916", .data = &ak_def_array[AK09916] },
{}
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index e85a3a8eea90..5c795a430d09 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -547,7 +547,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_magn_3d_remove(struct platform_device *pdev)
+static void hid_magn_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -556,8 +556,6 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
-
- return 0;
}
static const struct platform_device_id hid_magn_3d_ids[] = {
@@ -576,7 +574,7 @@ static struct platform_driver hid_magn_3d_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_magn_3d_probe,
- .remove = hid_magn_3d_remove,
+ .remove_new = hid_magn_3d_remove,
};
module_platform_driver(hid_magn_3d_platform_driver);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index c5e485bfc6fc..7b041bb38693 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -1434,9 +1434,7 @@ static int yas5xx_probe(struct i2c_client *i2c)
goto assert_reset;
}
- ci = device_get_match_data(dev);
- if (!ci)
- ci = (const struct yas5xx_chip_info *)id->driver_data;
+ ci = i2c_get_match_data(i2c);
yas5xx->chip_info = ci;
ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &id_check);
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index ba5b581d5b25..8943d5c78bc0 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -383,7 +383,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_incl_3d_remove(struct platform_device *pdev)
+static void hid_incl_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -392,8 +392,6 @@ static int hid_incl_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_incl_3d_ids[] = {
@@ -412,7 +410,7 @@ static struct platform_driver hid_incl_3d_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_incl_3d_probe,
- .remove = hid_incl_3d_remove,
+ .remove_new = hid_incl_3d_remove,
};
module_platform_driver(hid_incl_3d_platform_driver);
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index a033699910e8..5e8cadd5177a 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -327,7 +327,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_dev_rot_remove(struct platform_device *pdev)
+static void hid_dev_rot_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -336,8 +336,6 @@ static int hid_dev_rot_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_dev_rot_ids[] = {
@@ -364,7 +362,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
- .remove = hid_dev_rot_remove,
+ .remove_new = hid_dev_rot_remove,
};
module_platform_driver(hid_dev_rot_platform_driver);
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
index 07c30d217255..76e173850a35 100644
--- a/drivers/iio/position/hid-sensor-custom-intel-hinge.c
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -342,7 +342,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_hinge_remove(struct platform_device *pdev)
+static void hid_hinge_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -351,8 +351,6 @@ static int hid_hinge_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
sensor_hub_remove_callback(hsdev, hsdev->usage);
hid_sensor_remove_trigger(indio_dev, &st->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_hinge_ids[] = {
@@ -371,7 +369,7 @@ static struct platform_driver hid_hinge_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_hinge_probe,
- .remove = hid_hinge_remove,
+ .remove_new = hid_hinge_remove,
};
module_platform_driver(hid_hinge_platform_driver);
diff --git a/drivers/iio/potentiometer/ad5110.c b/drivers/iio/potentiometer/ad5110.c
index 991e745c4f93..aaf02cc7aeba 100644
--- a/drivers/iio/potentiometer/ad5110.c
+++ b/drivers/iio/potentiometer/ad5110.c
@@ -278,14 +278,19 @@ static const struct of_device_id ad5110_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ad5110_of_match);
+#define AD5110_ID_TABLE(_name, cfg) { \
+ .name = _name, \
+ .driver_data = (kernel_ulong_t)&ad5110_cfg[cfg], \
+}
+
static const struct i2c_device_id ad5110_id[] = {
- { "ad5110-10", AD5110_10 },
- { "ad5110-80", AD5110_80 },
- { "ad5112-05", AD5112_05 },
- { "ad5112-10", AD5112_10 },
- { "ad5112-80", AD5112_80 },
- { "ad5114-10", AD5114_10 },
- { "ad5114-80", AD5114_80 },
+ AD5110_ID_TABLE("ad5110-10", AD5110_10),
+ AD5110_ID_TABLE("ad5110-80", AD5110_80),
+ AD5110_ID_TABLE("ad5112-05", AD5112_05),
+ AD5110_ID_TABLE("ad5112-10", AD5112_10),
+ AD5110_ID_TABLE("ad5112-80", AD5112_80),
+ AD5110_ID_TABLE("ad5114-10", AD5114_10),
+ AD5110_ID_TABLE("ad5114-80", AD5114_80),
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5110_id);
@@ -305,7 +310,7 @@ static int ad5110_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->lock);
data->enable = 1;
- data->cfg = device_get_match_data(dev);
+ data->cfg = i2c_get_match_data(client);
/* refresh RDAC register with EEPROM */
ret = ad5110_write(data, AD5110_RESET, 0);
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index fc183e0790da..e0526dd0e3cb 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -204,7 +204,6 @@ static const struct iio_info ds1803_info = {
static int ds1803_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ds1803_data *data;
struct iio_dev *indio_dev;
@@ -217,9 +216,7 @@ static int ds1803_probe(struct i2c_client *client)
data = iio_priv(indio_dev);
data->client = client;
- data->cfg = device_get_match_data(dev);
- if (!data->cfg)
- data->cfg = &ds1803_cfg[id->driver_data];
+ data->cfg = i2c_get_match_data(client);
indio_dev->info = &ds1803_info;
indio_dev->channels = data->cfg->channels;
@@ -239,10 +236,10 @@ static const struct of_device_id ds1803_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
static const struct i2c_device_id ds1803_id[] = {
- { "ds1803-010", DS1803_010 },
- { "ds1803-050", DS1803_050 },
- { "ds1803-100", DS1803_100 },
- { "ds3502", DS3502 },
+ { "ds1803-010", (kernel_ulong_t)&ds1803_cfg[DS1803_010] },
+ { "ds1803-050", (kernel_ulong_t)&ds1803_cfg[DS1803_050] },
+ { "ds1803-100", (kernel_ulong_t)&ds1803_cfg[DS1803_100] },
+ { "ds3502", (kernel_ulong_t)&ds1803_cfg[DS3502] },
{}
};
MODULE_DEVICE_TABLE(i2c, ds1803_id);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 7b4c2af32852..95efa32e4289 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -16,6 +16,15 @@ config ABP060MG
To compile this driver as a module, choose M here: the module
will be called abp060mg.
+config ROHM_BM1390
+ tristate "ROHM BM1390GLV-Z pressure sensor driver"
+ depends on I2C
+ help
+ Support for the ROHM BM1390 pressure sensor. The BM1390GLV-Z
+ can measure pressures ranging from 300 hPa to 1300 hPa with
+ configurable measurement averaging and internal FIFO. The
+ sensor does also provide temperature measurements.
+
config BMP280
tristate "Bosch Sensortec BMP180/BMP280/BMP380/BMP580 pressure sensor driver"
depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index c90f77210e94..436aec7e65f3 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ABP060MG) += abp060mg.o
+obj-$(CONFIG_ROHM_BM1390) += rohm-bm1390.o
obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index a9215eb32d70..956045e2db29 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -323,7 +323,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_press_remove(struct platform_device *pdev)
+static void hid_press_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -332,8 +332,6 @@ static int hid_press_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_press_ids[] = {
@@ -352,7 +350,7 @@ static struct platform_driver hid_press_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_press_probe,
- .remove = hid_press_remove,
+ .remove_new = hid_press_remove,
};
module_platform_driver(hid_press_platform_driver);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index 9b3abffb724b..ac30d76285d1 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -144,7 +144,6 @@ static const struct iio_info ms5637_info = {
static int ms5637_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct ms_tp_data *data;
struct ms_tp_dev *dev_data;
struct iio_dev *indio_dev;
@@ -159,10 +158,7 @@ static int ms5637_probe(struct i2c_client *client)
return -EOPNOTSUPP;
}
- if (id)
- data = (const struct ms_tp_data *)id->driver_data;
- else
- data = device_get_match_data(&client->dev);
+ data = i2c_get_match_data(client);
if (!data)
return -EINVAL;
diff --git a/drivers/iio/pressure/rohm-bm1390.c b/drivers/iio/pressure/rohm-bm1390.c
new file mode 100644
index 000000000000..ccaa07a569c9
--- /dev/null
+++ b/drivers/iio/pressure/rohm-bm1390.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BM1390 ROHM pressure sensor
+ *
+ * Copyright (c) 2023, ROHM Semiconductor.
+ * https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/pressure/bm1390glv-z-e.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BM1390_REG_MANUFACT_ID 0x0f
+#define BM1390_REG_PART_ID 0x10
+#define BM1390_REG_POWER 0x12
+#define BM1390_MASK_POWER BIT(0)
+#define BM1390_POWER_ON BM1390_MASK_POWER
+#define BM1390_POWER_OFF 0x00
+#define BM1390_REG_RESET 0x13
+#define BM1390_MASK_RESET BIT(0)
+#define BM1390_RESET_RELEASE BM1390_MASK_RESET
+#define BM1390_RESET 0x00
+#define BM1390_REG_MODE_CTRL 0x14
+#define BM1390_MASK_MEAS_MODE GENMASK(1, 0)
+#define BM1390_MASK_DRDY_EN BIT(4)
+#define BM1390_MASK_WMI_EN BIT(2)
+#define BM1390_MASK_AVE_NUM GENMASK(7, 5)
+
+/*
+ * Data-sheet states that when the IIR is used, the AVE_NUM must be set to
+ * value 110b
+ */
+#define BM1390_IIR_AVE_NUM 0x06
+#define BM1390_REG_FIFO_CTRL 0x15
+#define BM1390_MASK_IIR_MODE GENMASK(1, 0)
+#define BM1390_IIR_MODE_OFF 0x0
+#define BM1390_IIR_MODE_WEAK 0x1
+#define BM1390_IIR_MODE_MID 0x2
+#define BM1390_IIR_MODE_STRONG 0x3
+
+#define BM1390_MASK_FIFO_LEN BIT(6)
+#define BM1390_MASK_FIFO_EN BIT(7)
+#define BM1390_WMI_MIN 2
+#define BM1390_WMI_MAX 3
+
+#define BM1390_REG_FIFO_LVL 0x18
+#define BM1390_MASK_FIFO_LVL GENMASK(2, 0)
+#define BM1390_REG_STATUS 0x19
+#define BM1390_REG_PRESSURE_BASE 0x1a
+#define BM1390_REG_TEMP_HI 0x1d
+#define BM1390_REG_TEMP_LO 0x1e
+#define BM1390_MAX_REGISTER BM1390_REG_TEMP_LO
+
+#define BM1390_ID 0x34
+
+/* Regmap configs */
+static const struct regmap_range bm1390_volatile_ranges[] = {
+ {
+ .range_min = BM1390_REG_STATUS,
+ .range_max = BM1390_REG_STATUS,
+ },
+ {
+ .range_min = BM1390_REG_FIFO_LVL,
+ .range_max = BM1390_REG_TEMP_LO,
+ },
+};
+
+static const struct regmap_access_table bm1390_volatile_regs = {
+ .yes_ranges = &bm1390_volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bm1390_volatile_ranges),
+};
+
+static const struct regmap_range bm1390_precious_ranges[] = {
+ {
+ .range_min = BM1390_REG_STATUS,
+ .range_max = BM1390_REG_STATUS,
+ },
+};
+
+static const struct regmap_access_table bm1390_precious_regs = {
+ .yes_ranges = &bm1390_precious_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bm1390_precious_ranges),
+};
+
+static const struct regmap_range bm1390_read_only_ranges[] = {
+ {
+ .range_min = BM1390_REG_MANUFACT_ID,
+ .range_max = BM1390_REG_PART_ID,
+ }, {
+ .range_min = BM1390_REG_FIFO_LVL,
+ .range_max = BM1390_REG_TEMP_LO,
+ },
+};
+
+static const struct regmap_access_table bm1390_ro_regs = {
+ .no_ranges = &bm1390_read_only_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bm1390_read_only_ranges),
+};
+
+static const struct regmap_range bm1390_noinc_read_ranges[] = {
+ {
+ .range_min = BM1390_REG_PRESSURE_BASE,
+ .range_max = BM1390_REG_TEMP_LO,
+ },
+};
+
+static const struct regmap_access_table bm1390_nir_regs = {
+ .yes_ranges = &bm1390_noinc_read_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bm1390_noinc_read_ranges),
+};
+
+static const struct regmap_config bm1390_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &bm1390_volatile_regs,
+ .wr_table = &bm1390_ro_regs,
+ .rd_noinc_table = &bm1390_nir_regs,
+ .precious_table = &bm1390_precious_regs,
+ .max_register = BM1390_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+ .disable_locking = true,
+};
+
+enum {
+ BM1390_STATE_SAMPLE,
+ BM1390_STATE_FIFO,
+};
+
+struct bm1390_data_buf {
+ u32 pressure;
+ __be16 temp;
+ s64 ts __aligned(8);
+};
+
+/* BM1390 has FIFO for 4 pressure samples */
+#define BM1390_FIFO_LENGTH 4
+
+struct bm1390_data {
+ s64 timestamp, old_timestamp;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ struct device *dev;
+ struct bm1390_data_buf buf;
+ int irq;
+ unsigned int state;
+ bool trigger_enabled;
+ u8 watermark;
+
+ /* Prevent accessing sensor during FIFO read sequence */
+ struct mutex mutex;
+};
+
+enum {
+ BM1390_CHAN_PRESSURE,
+ BM1390_CHAN_TEMP,
+};
+
+static const struct iio_chan_spec bm1390_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ /*
+ * When IIR is used, we must fix amount of averaged samples.
+ * Thus we don't allow setting oversampling ratio.
+ */
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = BM1390_CHAN_PRESSURE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 22,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = BM1390_CHAN_TEMP,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+/*
+ * We can't skip reading the pressure because the watermark IRQ is acked
+ * only when the pressure data is read from the FIFO.
+ */
+static const unsigned long bm1390_scan_masks[] = {
+ BIT(BM1390_CHAN_PRESSURE),
+ BIT(BM1390_CHAN_PRESSURE) | BIT(BM1390_CHAN_TEMP),
+ 0
+};
+
+static int bm1390_read_temp(struct bm1390_data *data, int *temp)
+{
+ __be16 temp_raw;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BM1390_REG_TEMP_HI, &temp_raw,
+ sizeof(temp_raw));
+ if (ret)
+ return ret;
+
+ *temp = be16_to_cpu(temp_raw);
+
+ return 0;
+}
+
+static int bm1390_pressure_read(struct bm1390_data *data, u32 *pressure)
+{
+ /* Pressure data is in 3 8-bit registers */
+ u8 raw[3];
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BM1390_REG_PRESSURE_BASE,
+ raw, sizeof(raw));
+ if (ret < 0)
+ return ret;
+
+ *pressure = (u32)(raw[2] >> 2 | raw[1] << 6 | raw[0] << 14);
+
+ return 0;
+}
+
+ /* The enum values map directly to register bits */
+enum bm1390_meas_mode {
+ BM1390_MEAS_MODE_STOP = 0x0,
+ BM1390_MEAS_MODE_1SHOT = 0x1,
+ BM1390_MEAS_MODE_CONTINUOUS = 0x2,
+};
+
+static int bm1390_meas_set(struct bm1390_data *data, enum bm1390_meas_mode mode)
+{
+ return regmap_update_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_MEAS_MODE, mode);
+}
+
+/*
+ * If the trigger is not used we just wait until the measurement has
+ * completed. The data-sheet says maximum measurement cycle (regardless
+ * the AVE_NUM) is 200 mS so let's just sleep at least that long. If speed
+ * is needed the trigger should be used.
+ */
+#define BM1390_MAX_MEAS_TIME_MS 205
+
+static int bm1390_read_data(struct bm1390_data *data,
+ struct iio_chan_spec const *chan, int *val, int *val2)
+{
+ int ret, warn;
+
+ mutex_lock(&data->mutex);
+ /*
+ * We use 'continuous mode' even for raw read because according to the
+ * data-sheet an one-shot mode can't be used with IIR filter.
+ */
+ ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
+ if (ret)
+ goto unlock_out;
+
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ msleep(BM1390_MAX_MEAS_TIME_MS);
+ ret = bm1390_pressure_read(data, val);
+ break;
+ case IIO_TEMP:
+ msleep(BM1390_MAX_MEAS_TIME_MS);
+ ret = bm1390_read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ warn = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
+ if (warn)
+ dev_warn(data->dev, "Failed to stop measurement (%d)\n", warn);
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bm1390_read_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bm1390_data *data = iio_priv(idev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ *val = 31;
+ *val2 = 250000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else if (chan->type == IIO_PRESSURE) {
+ /*
+ * pressure in hPa is register value divided by 2048.
+ * This means kPa is 1/20480 times the register value,
+ */
+ *val = 1;
+ *val2 = 2048;
+
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(idev);
+ if (ret)
+ return ret;
+
+ ret = bm1390_read_data(data, chan, val, val2);
+ iio_device_release_direct_mode(idev);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int __bm1390_fifo_flush(struct iio_dev *idev, unsigned int samples,
+ s64 timestamp)
+{
+ /* BM1390_FIFO_LENGTH is small so we shouldn't run out of stack */
+ struct bm1390_data_buf buffer[BM1390_FIFO_LENGTH];
+ struct bm1390_data *data = iio_priv(idev);
+ int smp_lvl, ret, i, warn, dummy;
+ u64 sample_period;
+ __be16 temp = 0;
+
+ ret = regmap_read(data->regmap, BM1390_REG_FIFO_LVL, &smp_lvl);
+ if (ret)
+ return ret;
+
+ smp_lvl = FIELD_GET(BM1390_MASK_FIFO_LVL, smp_lvl);
+ if (!smp_lvl)
+ return 0;
+
+ if (smp_lvl > BM1390_FIFO_LENGTH) {
+ /*
+ * The fifo holds maximum of 4 samples so valid values
+ * should be 0, 1, 2, 3, 4 - rest are probably bit errors
+ * in I2C line. Don't overflow if this happens.
+ */
+ dev_err(data->dev, "bad FIFO level %d\n", smp_lvl);
+ smp_lvl = BM1390_FIFO_LENGTH;
+ }
+
+ sample_period = timestamp - data->old_timestamp;
+ do_div(sample_period, smp_lvl);
+
+ if (samples && smp_lvl > samples)
+ smp_lvl = samples;
+
+
+ /*
+ * After some testing it appears that the temperature is not readable
+ * until the FIFO access has been done after the WMI. Thus, we need
+ * to read the all pressure values to memory and read the temperature
+ * only after that.
+ */
+ for (i = 0; i < smp_lvl; i++) {
+ /*
+ * When we start reading data from the FIFO the sensor goes to
+ * special FIFO reading mode. If any other register is accessed
+ * during the FIFO read, samples can be dropped. Prevent access
+ * until FIFO_LVL is read. We have mutex locked and we do also
+ * go performing reading of FIFO_LVL even if this read fails.
+ */
+ if (test_bit(BM1390_CHAN_PRESSURE, idev->active_scan_mask)) {
+ ret = bm1390_pressure_read(data, &buffer[i].pressure);
+ if (ret)
+ break;
+ }
+
+ /*
+ * Old timestamp is either the previous sample IRQ time,
+ * previous flush-time or, if this was first sample, the enable
+ * time. When we add a sample period to that we should get the
+ * best approximation of the time-stamp we are handling.
+ *
+ * Idea is to always keep the "old_timestamp" matching the
+ * timestamp which we are currently handling.
+ */
+ data->old_timestamp += sample_period;
+ buffer[i].ts = data->old_timestamp;
+ }
+ /* Reading the FIFO_LVL closes the FIFO access sequence */
+ warn = regmap_read(data->regmap, BM1390_REG_FIFO_LVL, &dummy);
+ if (warn)
+ dev_warn(data->dev, "Closing FIFO sequence failed\n");
+
+ if (ret)
+ return ret;
+
+ if (test_bit(BM1390_CHAN_TEMP, idev->active_scan_mask)) {
+ ret = regmap_bulk_read(data->regmap, BM1390_REG_TEMP_HI, &temp,
+ sizeof(temp));
+ if (ret)
+ return ret;
+ }
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < smp_lvl; i++) {
+ buffer[i].temp = temp;
+ iio_push_to_buffers(idev, &buffer[i]);
+ }
+
+ return smp_lvl;
+}
+
+static int bm1390_fifo_flush(struct iio_dev *idev, unsigned int samples)
+{
+ struct bm1390_data *data = iio_priv(idev);
+ s64 timestamp;
+ int ret;
+
+ /*
+ * If fifo_flush is being called from IRQ handler we know the stored
+ * timestamp is fairly accurate for the last stored sample. If we are
+ * called as a result of a read operation from userspace and hence
+ * before the watermark interrupt was triggered, take a timestamp
+ * now. We can fall anywhere in between two samples so the error in this
+ * case is at most one sample period.
+ * We need to have the IRQ disabled or we risk of messing-up
+ * the timestamps. If we are ran from IRQ, then the
+ * IRQF_ONESHOT has us covered - but if we are ran by the
+ * user-space read we need to disable the IRQ to be on a safe
+ * side. We do this usng synchronous disable so that if the
+ * IRQ thread is being ran on other CPU we wait for it to be
+ * finished.
+ */
+
+ timestamp = iio_get_time_ns(idev);
+ mutex_lock(&data->mutex);
+ ret = __bm1390_fifo_flush(idev, samples, timestamp);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bm1390_set_watermark(struct iio_dev *idev, unsigned int val)
+{
+ struct bm1390_data *data = iio_priv(idev);
+
+ if (val < BM1390_WMI_MIN || val > BM1390_WMI_MAX)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ data->watermark = val;
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static const struct iio_info bm1390_noirq_info = {
+ .read_raw = &bm1390_read_raw,
+};
+
+static const struct iio_info bm1390_info = {
+ .read_raw = &bm1390_read_raw,
+ .hwfifo_set_watermark = bm1390_set_watermark,
+ .hwfifo_flush_to_buffer = bm1390_fifo_flush,
+};
+
+static int bm1390_chip_init(struct bm1390_data *data)
+{
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BM1390_REG_POWER,
+ BM1390_MASK_POWER, BM1390_POWER_ON);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ ret = regmap_write_bits(data->regmap, BM1390_REG_RESET,
+ BM1390_MASK_RESET, BM1390_RESET);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ ret = regmap_write_bits(data->regmap, BM1390_REG_RESET,
+ BM1390_MASK_RESET, BM1390_RESET_RELEASE);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ ret = regmap_reinit_cache(data->regmap, &bm1390_regmap);
+ if (ret) {
+ dev_err(data->dev, "Failed to reinit reg cache\n");
+ return ret;
+ }
+
+ /*
+ * Default to use IIR filter in "middle" mode. Also the AVE_NUM must
+ * be fixed when IIR is in use.
+ */
+ ret = regmap_update_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_AVE_NUM, BM1390_IIR_AVE_NUM);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BM1390_REG_FIFO_CTRL,
+ BM1390_MASK_IIR_MODE, BM1390_IIR_MODE_MID);
+}
+
+static int bm1390_fifo_set_wmi(struct bm1390_data *data)
+{
+ u8 regval;
+
+ regval = FIELD_PREP(BM1390_MASK_FIFO_LEN,
+ data->watermark - BM1390_WMI_MIN);
+
+ return regmap_update_bits(data->regmap, BM1390_REG_FIFO_CTRL,
+ BM1390_MASK_FIFO_LEN, regval);
+}
+
+static int bm1390_fifo_enable(struct iio_dev *idev)
+{
+ struct bm1390_data *data = iio_priv(idev);
+ int ret;
+
+ /* We can't do buffered stuff without IRQ as we never get WMI */
+ if (data->irq <= 0)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ if (data->trigger_enabled) {
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ /* Update watermark to HW */
+ ret = bm1390_fifo_set_wmi(data);
+ if (ret)
+ goto unlock_out;
+
+ /* Enable WMI_IRQ */
+ ret = regmap_set_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_WMI_EN);
+ if (ret)
+ goto unlock_out;
+
+ /* Enable FIFO */
+ ret = regmap_set_bits(data->regmap, BM1390_REG_FIFO_CTRL,
+ BM1390_MASK_FIFO_EN);
+ if (ret)
+ goto unlock_out;
+
+ data->state = BM1390_STATE_FIFO;
+
+ data->old_timestamp = iio_get_time_ns(idev);
+ ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
+
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bm1390_fifo_disable(struct iio_dev *idev)
+{
+ struct bm1390_data *data = iio_priv(idev);
+ int ret;
+
+ msleep(1);
+
+ mutex_lock(&data->mutex);
+ ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
+ if (ret)
+ goto unlock_out;
+
+ /* Disable FIFO */
+ ret = regmap_clear_bits(data->regmap, BM1390_REG_FIFO_CTRL,
+ BM1390_MASK_FIFO_EN);
+ if (ret)
+ goto unlock_out;
+
+ data->state = BM1390_STATE_SAMPLE;
+
+ /* Disable WMI_IRQ */
+ ret = regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_WMI_EN);
+
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bm1390_buffer_postenable(struct iio_dev *idev)
+{
+ /*
+ * If we use data-ready trigger, then the IRQ masks should be handled by
+ * trigger enable and the hardware buffer is not used but we just update
+ * results to the IIO FIFO when data-ready triggers.
+ */
+ if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
+ return 0;
+
+ return bm1390_fifo_enable(idev);
+}
+
+static int bm1390_buffer_predisable(struct iio_dev *idev)
+{
+ if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
+ return 0;
+
+ return bm1390_fifo_disable(idev);
+}
+
+static const struct iio_buffer_setup_ops bm1390_buffer_ops = {
+ .postenable = bm1390_buffer_postenable,
+ .predisable = bm1390_buffer_predisable,
+};
+
+static irqreturn_t bm1390_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct bm1390_data *data = iio_priv(idev);
+ int ret, status;
+
+ /* DRDY is acked by reading status reg */
+ ret = regmap_read(data->regmap, BM1390_REG_STATUS, &status);
+ if (ret || !status)
+ return IRQ_NONE;
+
+ dev_dbg(data->dev, "DRDY trig status 0x%x\n", status);
+
+ if (test_bit(BM1390_CHAN_PRESSURE, idev->active_scan_mask)) {
+ ret = bm1390_pressure_read(data, &data->buf.pressure);
+ if (ret) {
+ dev_warn(data->dev, "sample read failed %d\n", ret);
+ return IRQ_NONE;
+ }
+ }
+
+ if (test_bit(BM1390_CHAN_TEMP, idev->active_scan_mask)) {
+ ret = regmap_bulk_read(data->regmap, BM1390_REG_TEMP_HI,
+ &data->buf.temp, sizeof(data->buf.temp));
+ if (ret) {
+ dev_warn(data->dev, "temp read failed %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, &data->buf, data->timestamp);
+ iio_trigger_notify_done(idev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/* Get timestamps and wake the thread if we need to read data */
+static irqreturn_t bm1390_irq_handler(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct bm1390_data *data = iio_priv(idev);
+
+ data->timestamp = iio_get_time_ns(idev);
+
+ if (data->state == BM1390_STATE_FIFO || data->trigger_enabled)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t bm1390_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct bm1390_data *data = iio_priv(idev);
+ int ret = IRQ_NONE;
+
+ mutex_lock(&data->mutex);
+
+ if (data->trigger_enabled) {
+ iio_trigger_poll_nested(data->trig);
+ ret = IRQ_HANDLED;
+ } else if (data->state == BM1390_STATE_FIFO) {
+ int ok;
+
+ ok = __bm1390_fifo_flush(idev, BM1390_FIFO_LENGTH,
+ data->timestamp);
+ if (ok > 0)
+ ret = IRQ_HANDLED;
+ }
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bm1390_set_drdy_irq(struct bm1390_data *data, bool en)
+{
+ if (en)
+ return regmap_set_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_DRDY_EN);
+ return regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ BM1390_MASK_DRDY_EN);
+}
+
+static int bm1390_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct bm1390_data *data = iio_trigger_get_drvdata(trig);
+ int ret = 0;
+
+ mutex_lock(&data->mutex);
+
+ if (data->trigger_enabled == state)
+ goto unlock_out;
+
+ if (data->state == BM1390_STATE_FIFO) {
+ dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ data->trigger_enabled = state;
+
+ if (state) {
+ ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
+ if (ret)
+ goto unlock_out;
+ } else {
+ int dummy;
+
+ ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
+ if (ret)
+ goto unlock_out;
+
+ /*
+ * We need to read the status register in order to ACK the
+ * data-ready which may have been generated just before we
+ * disabled the measurement.
+ */
+ ret = regmap_read(data->regmap, BM1390_REG_STATUS, &dummy);
+ if (ret)
+ dev_warn(data->dev, "status read failed\n");
+ }
+
+ ret = bm1390_set_drdy_irq(data, state);
+
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops bm1390_trigger_ops = {
+ .set_trigger_state = bm1390_trigger_set_state,
+};
+
+static int bm1390_setup_buffer(struct bm1390_data *data, struct iio_dev *idev)
+{
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(data->dev, idev,
+ &iio_pollfunc_store_time,
+ &bm1390_trigger_handler,
+ &bm1390_buffer_ops);
+
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "iio_triggered_buffer_setup FAIL\n");
+
+ idev->available_scan_masks = bm1390_scan_masks;
+
+ return 0;
+}
+
+static int bm1390_setup_trigger(struct bm1390_data *data, struct iio_dev *idev,
+ int irq)
+{
+ struct iio_trigger *itrig;
+ char *name;
+ int ret;
+
+ itrig = devm_iio_trigger_alloc(data->dev, "%sdata-rdy-dev%d", idev->name,
+ iio_device_id(idev));
+ if (!itrig)
+ return -ENOMEM;
+
+ data->trig = itrig;
+
+ itrig->ops = &bm1390_trigger_ops;
+ iio_trigger_set_drvdata(itrig, data);
+
+ name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-bm1390",
+ dev_name(data->dev));
+ if (name == NULL)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(data->dev, irq, bm1390_irq_handler,
+ &bm1390_irq_thread_handler,
+ IRQF_ONESHOT, name, idev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
+
+
+ ret = devm_iio_trigger_register(data->dev, itrig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Trigger registration failed\n");
+
+ return 0;
+}
+
+static int bm1390_probe(struct i2c_client *i2c)
+{
+ struct bm1390_data *data;
+ struct regmap *regmap;
+ struct iio_dev *idev;
+ struct device *dev;
+ unsigned int part_id;
+ int ret;
+
+ dev = &i2c->dev;
+
+ regmap = devm_regmap_init_i2c(i2c, &bm1390_regmap);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
+ ret = regmap_read(regmap, BM1390_REG_PART_ID, &part_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to access sensor\n");
+
+ if (part_id != BM1390_ID)
+ dev_warn(dev, "unknown device 0x%x\n", part_id);
+
+ idev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!idev)
+ return -ENOMEM;
+
+ data = iio_priv(idev);
+ data->regmap = regmap;
+ data->dev = dev;
+ data->irq = i2c->irq;
+ /*
+ * For now we just allow BM1390_WMI_MIN to BM1390_WMI_MAX and
+ * discard every other configuration when triggered mode is not used.
+ */
+ data->watermark = BM1390_WMI_MAX;
+ mutex_init(&data->mutex);
+
+ idev->channels = bm1390_channels;
+ idev->num_channels = ARRAY_SIZE(bm1390_channels);
+ idev->name = "bm1390";
+ idev->modes = INDIO_DIRECT_MODE;
+
+ ret = bm1390_chip_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "sensor init failed\n");
+
+ ret = bm1390_setup_buffer(data, idev);
+ if (ret)
+ return ret;
+
+ /* No trigger if we don't have IRQ for data-ready and WMI */
+ if (i2c->irq > 0) {
+ idev->info = &bm1390_info;
+ idev->modes |= INDIO_BUFFER_SOFTWARE;
+ ret = bm1390_setup_trigger(data, idev, i2c->irq);
+ if (ret)
+ return ret;
+ } else {
+ idev->info = &bm1390_noirq_info;
+ }
+
+ ret = devm_iio_device_register(dev, idev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Unable to register iio device\n");
+
+ return 0;
+}
+
+static const struct of_device_id bm1390_of_match[] = {
+ { .compatible = "rohm,bm1390glv-z" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bm1390_of_match);
+
+static const struct i2c_device_id bm1390_id[] = {
+ { "bm1390glv-z", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bm1390_id);
+
+static struct i2c_driver bm1390_driver = {
+ .driver = {
+ .name = "bm1390",
+ .of_match_table = bm1390_of_match,
+ /*
+ * Probing explicitly requires a few millisecond of sleep.
+ * Enabling the VDD regulator may include ramp up rates.
+ */
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = bm1390_probe,
+ .id_table = bm1390_id,
+};
+module_i2c_driver(bm1390_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Driver for ROHM BM1390 pressure sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
index 571ea1812246..4df506bb8b38 100644
--- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c
+++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
@@ -239,15 +239,13 @@ static int cros_ec_mkbp_proximity_probe(struct platform_device *pdev)
return 0;
}
-static int cros_ec_mkbp_proximity_remove(struct platform_device *pdev)
+static void cros_ec_mkbp_proximity_remove(struct platform_device *pdev)
{
struct cros_ec_mkbp_proximity_data *data = platform_get_drvdata(pdev);
struct cros_ec_device *ec = data->ec;
blocking_notifier_chain_unregister(&ec->event_notifier,
&data->notifier);
-
- return 0;
}
static const struct of_device_id cros_ec_mkbp_proximity_of_match[] = {
@@ -263,7 +261,7 @@ static struct platform_driver cros_ec_mkbp_proximity_driver = {
.pm = pm_sleep_ptr(&cros_ec_mkbp_proximity_pm_ops),
},
.probe = cros_ec_mkbp_proximity_probe,
- .remove = cros_ec_mkbp_proximity_remove,
+ .remove_new = cros_ec_mkbp_proximity_remove,
};
module_platform_driver(cros_ec_mkbp_proximity_driver);
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index faf2f806ce80..86c57672fc7e 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -344,7 +344,7 @@ static int srf04_probe(struct platform_device *pdev)
return ret;
}
-static int srf04_remove(struct platform_device *pdev)
+static void srf04_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct srf04_data *data = iio_priv(indio_dev);
@@ -355,8 +355,6 @@ static int srf04_remove(struct platform_device *pdev)
pm_runtime_disable(data->dev);
pm_runtime_set_suspended(data->dev);
}
-
- return 0;
}
static int srf04_pm_runtime_suspend(struct device *dev)
@@ -391,7 +389,7 @@ static const struct dev_pm_ops srf04_pm_ops = {
static struct platform_driver srf04_driver = {
.probe = srf04_probe,
- .remove = srf04_remove,
+ .remove_new = srf04_remove,
.driver = {
.name = "srf04-gpio",
.of_match_table = of_srf04_match,
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index d977aacb7491..0d230a0dff56 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -159,6 +159,11 @@ static_assert(SX9310_NUM_CHANNELS <= SX_COMMON_MAX_NUM_CHANNELS);
}
#define SX9310_CHANNEL(idx) SX9310_NAMED_CHANNEL(idx, NULL)
+struct sx931x_info {
+ const char *name;
+ unsigned int whoami;
+};
+
static const struct iio_chan_spec sx9310_channels[] = {
SX9310_CHANNEL(0), /* CS0 */
SX9310_CHANNEL(1), /* CS1 */
@@ -902,7 +907,7 @@ static int sx9310_check_whoami(struct device *dev,
struct iio_dev *indio_dev)
{
struct sx_common_data *data = iio_priv(indio_dev);
- unsigned int long ddata;
+ const struct sx931x_info *ddata;
unsigned int whoami;
int ret;
@@ -910,20 +915,11 @@ static int sx9310_check_whoami(struct device *dev,
if (ret)
return ret;
- ddata = (uintptr_t)device_get_match_data(dev);
- if (ddata != whoami)
- return -EINVAL;
-
- switch (whoami) {
- case SX9310_WHOAMI_VALUE:
- indio_dev->name = "sx9310";
- break;
- case SX9311_WHOAMI_VALUE:
- indio_dev->name = "sx9311";
- break;
- default:
+ ddata = device_get_match_data(dev);
+ if (ddata->whoami != whoami)
return -ENODEV;
- }
+
+ indio_dev->name = ddata->name;
return 0;
}
@@ -1015,23 +1011,33 @@ out:
static DEFINE_SIMPLE_DEV_PM_OPS(sx9310_pm_ops, sx9310_suspend, sx9310_resume);
+static const struct sx931x_info sx9310_info = {
+ .name = "sx9310",
+ .whoami = SX9310_WHOAMI_VALUE,
+};
+
+static const struct sx931x_info sx9311_info = {
+ .name = "sx9311",
+ .whoami = SX9311_WHOAMI_VALUE,
+};
+
static const struct acpi_device_id sx9310_acpi_match[] = {
- { "STH9310", SX9310_WHOAMI_VALUE },
- { "STH9311", SX9311_WHOAMI_VALUE },
+ { "STH9310", (kernel_ulong_t)&sx9310_info },
+ { "STH9311", (kernel_ulong_t)&sx9311_info },
{}
};
MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
static const struct of_device_id sx9310_of_match[] = {
- { .compatible = "semtech,sx9310", (void *)SX9310_WHOAMI_VALUE },
- { .compatible = "semtech,sx9311", (void *)SX9311_WHOAMI_VALUE },
+ { .compatible = "semtech,sx9310", &sx9310_info },
+ { .compatible = "semtech,sx9311", &sx9311_info },
{}
};
MODULE_DEVICE_TABLE(of, sx9310_of_match);
static const struct i2c_device_id sx9310_id[] = {
- { "sx9310", SX9310_WHOAMI_VALUE },
- { "sx9311", SX9311_WHOAMI_VALUE },
+ { "sx9310", (kernel_ulong_t)&sx9310_info },
+ { "sx9311", (kernel_ulong_t)&sx9311_info },
{}
};
MODULE_DEVICE_TABLE(i2c, sx9310_id);
diff --git a/drivers/iio/resolver/Kconfig b/drivers/iio/resolver/Kconfig
index 47dbfead9b31..424529d36080 100644
--- a/drivers/iio/resolver/Kconfig
+++ b/drivers/iio/resolver/Kconfig
@@ -25,4 +25,17 @@ config AD2S1200
To compile this driver as a module, choose M here: the
module will be called ad2s1200.
+
+config AD2S1210
+ tristate "Analog Devices ad2s1210 driver"
+ depends on SPI
+ depends on COMMON_CLK
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say yes here to build support for Analog Devices spi resolver
+ to digital converters, ad2s1210, provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad2s1210.
+
endmenu
diff --git a/drivers/iio/resolver/Makefile b/drivers/iio/resolver/Makefile
index fa558138ce45..7f6c876c35ae 100644
--- a/drivers/iio/resolver/Makefile
+++ b/drivers/iio/resolver/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_AD2S90) += ad2s90.o
obj-$(CONFIG_AD2S1200) += ad2s1200.o
+obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
new file mode 100644
index 000000000000..1bd1b950e7cc
--- /dev/null
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -0,0 +1,1519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ * Copyright (c) 2023 BayLibre, SAS
+ *
+ * Device register to IIO ABI mapping:
+ *
+ * Register | Addr | IIO ABI (sysfs)
+ * ----------------------------|------|-------------------------------------------
+ * DOS Overrange Threshold | 0x89 | events/in_altvoltage0_thresh_rising_value
+ * DOS Mismatch Threshold | 0x8A | events/in_altvoltage0_mag_rising_value
+ * DOS Reset Maximum Threshold | 0x8B | events/in_altvoltage0_mag_rising_reset_max
+ * DOS Reset Minimum Threshold | 0x8C | events/in_altvoltage0_mag_rising_reset_min
+ * LOT High Threshold | 0x8D | events/in_angl1_thresh_rising_value
+ * LOT Low Threshold [1] | 0x8E | events/in_angl1_thresh_rising_hysteresis
+ * Excitation Frequency | 0x91 | out_altvoltage0_frequency
+ * Control | 0x92 | *as bit fields*
+ * Phase lock range | D5 | events/in_phase0_mag_rising_value
+ * Hysteresis | D4 | in_angl0_hysteresis
+ * Encoder resolution | D3:2 | *not implemented*
+ * Resolution | D1:0 | *device tree: assigned-resolution-bits*
+ * Soft Reset | 0xF0 | [2]
+ * Fault | 0xFF | *not implemented*
+ *
+ * [1]: The value written to the LOT low register is high value minus the
+ * hysteresis.
+ * [2]: Soft reset is performed when `out_altvoltage0_frequency` is written.
+ *
+ * Fault to event mapping:
+ *
+ * Fault | | Channel | Type | Direction
+ * ----------------------------------------|----|---------------------------------
+ * Sine/cosine inputs clipped [3] | D7 | altvoltage1 | mag | either
+ * Sine/cosine inputs below LOS | D6 | altvoltage0 | thresh | falling
+ * Sine/cosine inputs exceed DOS overrange | D5 | altvoltage0 | thresh | rising
+ * Sine/cosine inputs exceed DOS mismatch | D4 | altvoltage0 | mag | rising
+ * Tracking error exceeds LOT | D3 | angl1 | thresh | rising
+ * Velocity exceeds maximum tracking rate | D2 | anglvel0 | mag | rising
+ * Phase error exceeds phase lock range | D1 | phase0 | mag | rising
+ * Configuration parity error | D0 | *writes to kernel log*
+ *
+ * [3]: The chip does not differentiate between fault on sine vs. cosine so
+ * there will also be an event on the altvoltage2 channel.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* control register flags */
+#define AD2S1210_ADDRESS_DATA BIT(7)
+#define AD2S1210_PHASE_LOCK_RANGE_44 BIT(5)
+#define AD2S1210_ENABLE_HYSTERESIS BIT(4)
+#define AD2S1210_SET_ENRES GENMASK(3, 2)
+#define AD2S1210_SET_RES GENMASK(1, 0)
+
+/* fault register flags */
+#define AD2S1210_FAULT_CLIP BIT(7)
+#define AD2S1210_FAULT_LOS BIT(6)
+#define AD2S1210_FAULT_DOS_OVR BIT(5)
+#define AD2S1210_FAULT_DOS_MIS BIT(4)
+#define AD2S1210_FAULT_LOT BIT(3)
+#define AD2S1210_FAULT_VELOCITY BIT(2)
+#define AD2S1210_FAULT_PHASE BIT(1)
+#define AD2S1210_FAULT_CONFIG_PARITY BIT(0)
+
+#define AD2S1210_REG_POSITION_MSB 0x80
+#define AD2S1210_REG_POSITION_LSB 0x81
+#define AD2S1210_REG_VELOCITY_MSB 0x82
+#define AD2S1210_REG_VELOCITY_LSB 0x83
+#define AD2S1210_REG_LOS_THRD 0x88
+#define AD2S1210_REG_DOS_OVR_THRD 0x89
+#define AD2S1210_REG_DOS_MIS_THRD 0x8A
+#define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B
+#define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C
+#define AD2S1210_REG_LOT_HIGH_THRD 0x8D
+#define AD2S1210_REG_LOT_LOW_THRD 0x8E
+#define AD2S1210_REG_EXCIT_FREQ 0x91
+#define AD2S1210_REG_CONTROL 0x92
+#define AD2S1210_REG_SOFT_RESET 0xF0
+#define AD2S1210_REG_FAULT 0xFF
+
+#define AD2S1210_MIN_CLKIN 6144000
+#define AD2S1210_MAX_CLKIN 10240000
+#define AD2S1210_MIN_EXCIT 2000
+#define AD2S1210_DEF_EXCIT 10000
+#define AD2S1210_MAX_EXCIT 20000
+#define AD2S1210_MIN_FCW 0x4
+#define AD2S1210_MAX_FCW 0x50
+
+/* 44 degrees ~= 0.767945 radians */
+#define PHASE_44_DEG_TO_RAD_INT 0
+#define PHASE_44_DEG_TO_RAD_MICRO 767945
+/* 360 degrees ~= 6.283185 radians */
+#define PHASE_360_DEG_TO_RAD_INT 6
+#define PHASE_360_DEG_TO_RAD_MICRO 283185
+
+/* Threshold voltage registers have 1 LSB == 38 mV */
+#define THRESHOLD_MILLIVOLT_PER_LSB 38
+/* max voltage for threshold registers is 0x7F * 38 mV */
+#define THRESHOLD_RANGE_STR "[0 38 4826]"
+
+#define FAULT_ONESHOT(bit, new, old) (new & bit && !(old & bit))
+
+enum ad2s1210_mode {
+ MOD_POS = 0b00,
+ MOD_VEL = 0b01,
+ MOD_RESERVED = 0b10,
+ MOD_CONFIG = 0b11,
+};
+
+enum ad2s1210_resolution {
+ AD2S1210_RES_10 = 0b00,
+ AD2S1210_RES_12 = 0b01,
+ AD2S1210_RES_14 = 0b10,
+ AD2S1210_RES_16 = 0b11,
+};
+
+struct ad2s1210_state {
+ struct mutex lock;
+ struct spi_device *sdev;
+ /** GPIO pin connected to SAMPLE line. */
+ struct gpio_desc *sample_gpio;
+ /** GPIO pins connected to A0 and A1 lines. */
+ struct gpio_descs *mode_gpios;
+ /** Used to access config registers. */
+ struct regmap *regmap;
+ /** The external oscillator frequency in Hz. */
+ unsigned long clkin_hz;
+ /** Available raw hysteresis values based on resolution. */
+ int hysteresis_available[2];
+ /** The selected resolution */
+ enum ad2s1210_resolution resolution;
+ /** Copy of fault register from the previous read. */
+ u8 prev_fault_flags;
+ /** For reading raw sample value via SPI. */
+ struct {
+ __be16 raw;
+ u8 fault;
+ } sample __aligned(IIO_DMA_MINALIGN);
+ /** Scan buffer */
+ struct {
+ __be16 chan[2];
+ /* Ensure timestamp is naturally aligned. */
+ s64 timestamp __aligned(8);
+ } scan;
+ /** SPI transmit buffer. */
+ u8 rx[2];
+ /** SPI receive buffer. */
+ u8 tx[2];
+};
+
+static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode)
+{
+ struct gpio_descs *gpios = st->mode_gpios;
+ DECLARE_BITMAP(bitmap, 2);
+
+ bitmap[0] = mode;
+
+ return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info,
+ bitmap);
+}
+
+/*
+ * Writes the given data to the given register address.
+ *
+ * If the mode is configurable, the device will first be placed in
+ * configuration mode.
+ */
+static int ad2s1210_regmap_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct ad2s1210_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
+ };
+ int ret;
+
+ /* values can only be 7 bits, the MSB indicates an address */
+ if (val & ~0x7F)
+ return -EINVAL;
+
+ st->tx[0] = reg;
+ st->tx[1] = val;
+
+ ret = ad2s1210_set_mode(st, MOD_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = spi_sync_transfer(st->sdev, xfers, ARRAY_SIZE(xfers));
+ if (ret < 0)
+ return ret;
+
+ /* soft reset also clears the fault register */
+ if (reg == AD2S1210_REG_SOFT_RESET)
+ st->prev_fault_flags = 0;
+
+ return 0;
+}
+
+/*
+ * Reads value from one of the registers.
+ *
+ * If the mode is configurable, the device will first be placed in
+ * configuration mode.
+ */
+static int ad2s1210_regmap_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad2s1210_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
+ };
+ int ret;
+
+ ret = ad2s1210_set_mode(st, MOD_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ st->tx[0] = reg;
+ /*
+ * Must be valid register address here otherwise this could write data.
+ * It doesn't matter which one as long as reading doesn't have side-
+ * effects.
+ */
+ st->tx[1] = AD2S1210_REG_CONTROL;
+
+ ret = spi_sync_transfer(st->sdev, xfers, ARRAY_SIZE(xfers));
+ if (ret < 0)
+ return ret;
+
+ /* reading the fault register also clears it */
+ if (reg == AD2S1210_REG_FAULT)
+ st->prev_fault_flags = 0;
+
+ /*
+ * If the D7 bit is set on any read/write register, it indicates a
+ * parity error. The fault register is read-only and the D7 bit means
+ * something else there.
+ */
+ if (reg != AD2S1210_REG_FAULT && st->rx[1] & AD2S1210_ADDRESS_DATA)
+ return -EBADMSG;
+
+ *val = st->rx[1];
+
+ return 0;
+}
+
+/*
+ * Toggles the SAMPLE line on the AD2S1210 to latch in the current position,
+ * velocity, and faults.
+ *
+ * Must be called with lock held.
+ */
+static void ad2s1210_toggle_sample_line(struct ad2s1210_state *st)
+{
+ /*
+ * Datasheet specifies minimum hold time t16 = 2 * tck + 20 ns. So the
+ * longest time needed is when CLKIN is 6.144 MHz, in which case t16
+ * ~= 350 ns. The same delay is also needed before re-asserting the
+ * SAMPLE line.
+ */
+ gpiod_set_value(st->sample_gpio, 1);
+ ndelay(350);
+ gpiod_set_value(st->sample_gpio, 0);
+ ndelay(350);
+}
+
+/*
+ * Sets the excitation frequency and performs software reset.
+ *
+ * Must be called with lock held.
+ */
+static int ad2s1210_reinit_excitation_frequency(struct ad2s1210_state *st,
+ u16 fexcit)
+{
+ /* Map resolution to settle time in milliseconds. */
+ static const int track_time_ms[] = { 10, 20, 25, 60 };
+ unsigned int ignored;
+ int ret;
+ u8 fcw;
+
+ fcw = fexcit * (1 << 15) / st->clkin_hz;
+ if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW)
+ return -ERANGE;
+
+ ret = regmap_write(st->regmap, AD2S1210_REG_EXCIT_FREQ, fcw);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Software reset reinitializes the excitation frequency output.
+ * It does not reset any of the configuration registers.
+ */
+ ret = regmap_write(st->regmap, AD2S1210_REG_SOFT_RESET, 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Soft reset always triggers some faults due the change in the output
+ * signal so clear the faults too. We need to delay for some time
+ * (what datasheet calls t[track]) to allow things to settle before
+ * clearing the faults.
+ */
+ msleep(track_time_ms[st->resolution] * 8192000 / st->clkin_hz);
+
+ /* Reading the fault register clears the faults. */
+ ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, &ignored);
+ if (ret < 0)
+ return ret;
+
+ /* Have to toggle sample line to get fault output pins to reset. */
+ ad2s1210_toggle_sample_line(st);
+
+ return 0;
+}
+
+static void ad2s1210_push_events(struct iio_dev *indio_dev,
+ u8 flags, s64 timestamp)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ /* Sine/cosine inputs clipped */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_CLIP, flags, st->prev_fault_flags)) {
+ /*
+ * The chip does not differentiate between fault on sine vs.
+ * cosine channel so we just send an event on both channels.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ALTVOLTAGE, 1,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ALTVOLTAGE, 2,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ }
+
+ /* Sine/cosine inputs below LOS threshold */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_LOS, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ALTVOLTAGE, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ /* Sine/cosine inputs exceed DOS overrange threshold */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_DOS_OVR, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ALTVOLTAGE, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ /* Sine/cosine inputs exceed DOS mismatch threshold */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_DOS_MIS, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ALTVOLTAGE, 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ /* Tracking error exceeds LOT threshold */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_LOT, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ANGL, 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ /* Velocity exceeds maximum tracking rate */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_VELOCITY, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ANGL_VEL, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ /* Phase error exceeds phase lock range */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_PHASE, flags, st->prev_fault_flags))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PHASE, 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ /* Configuration parity error */
+ if (FAULT_ONESHOT(AD2S1210_FAULT_CONFIG_PARITY, flags,
+ st->prev_fault_flags))
+ /*
+ * Userspace should also get notified of this via error return
+ * when trying to write to any attribute that writes a register.
+ */
+ dev_err_ratelimited(&indio_dev->dev,
+ "Configuration parity error\n");
+
+ st->prev_fault_flags = flags;
+}
+
+static int ad2s1210_single_conversion(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+ s64 timestamp;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ad2s1210_toggle_sample_line(st);
+ timestamp = iio_get_time_ns(indio_dev);
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ ret = ad2s1210_set_mode(st, MOD_POS);
+ break;
+ case IIO_ANGL_VEL:
+ ret = ad2s1210_set_mode(st, MOD_VEL);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ *val = be16_to_cpu(st->sample.raw);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_ANGL_VEL:
+ *val = (s16)be16_to_cpu(st->sample.raw);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ad2s1210_push_events(indio_dev, st->sample.fault, timestamp);
+
+ return ret;
+}
+
+static int ad2s1210_get_hysteresis(struct ad2s1210_state *st, int *val)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_test_bits(st->regmap, AD2S1210_REG_CONTROL,
+ AD2S1210_ENABLE_HYSTERESIS);
+ if (ret < 0)
+ return ret;
+
+ *val = ret << (2 * (AD2S1210_RES_16 - st->resolution));
+ return IIO_VAL_INT;
+}
+
+static int ad2s1210_set_hysteresis(struct ad2s1210_state *st, int val)
+{
+ guard(mutex)(&st->lock);
+ return regmap_update_bits(st->regmap, AD2S1210_REG_CONTROL,
+ AD2S1210_ENABLE_HYSTERESIS,
+ val ? AD2S1210_ENABLE_HYSTERESIS : 0);
+}
+
+static int ad2s1210_get_phase_lock_range(struct ad2s1210_state *st,
+ int *val, int *val2)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_test_bits(st->regmap, AD2S1210_REG_CONTROL,
+ AD2S1210_PHASE_LOCK_RANGE_44);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /* 44 degrees as radians */
+ *val = PHASE_44_DEG_TO_RAD_INT;
+ *val2 = PHASE_44_DEG_TO_RAD_MICRO;
+ } else {
+ /* 360 degrees as radians */
+ *val = PHASE_360_DEG_TO_RAD_INT;
+ *val2 = PHASE_360_DEG_TO_RAD_MICRO;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad2s1210_set_phase_lock_range(struct ad2s1210_state *st,
+ int val, int val2)
+{
+ int deg;
+
+ /* convert radians to degrees - only two allowable values */
+ if (val == PHASE_44_DEG_TO_RAD_INT && val2 == PHASE_44_DEG_TO_RAD_MICRO)
+ deg = 44;
+ else if (val == PHASE_360_DEG_TO_RAD_INT &&
+ val2 == PHASE_360_DEG_TO_RAD_MICRO)
+ deg = 360;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+ return regmap_update_bits(st->regmap, AD2S1210_REG_CONTROL,
+ AD2S1210_PHASE_LOCK_RANGE_44,
+ deg == 44 ? AD2S1210_PHASE_LOCK_RANGE_44 : 0);
+}
+
+/* map resolution to microradians/LSB for LOT registers */
+static const int ad2s1210_lot_threshold_urad_per_lsb[] = {
+ 6184, /* 10-bit: ~0.35 deg/LSB, 45 deg max */
+ 2473, /* 12-bit: ~0.14 deg/LSB, 18 deg max */
+ 1237, /* 14-bit: ~0.07 deg/LSB, 9 deg max */
+ 1237, /* 16-bit: same as 14-bit */
+};
+
+static int ad2s1210_get_voltage_threshold(struct ad2s1210_state *st,
+ unsigned int reg, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_read(st->regmap, reg, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ *val = reg_val * THRESHOLD_MILLIVOLT_PER_LSB;
+ return IIO_VAL_INT;
+}
+
+static int ad2s1210_set_voltage_threshold(struct ad2s1210_state *st,
+ unsigned int reg, int val)
+{
+ unsigned int reg_val;
+
+ reg_val = val / THRESHOLD_MILLIVOLT_PER_LSB;
+
+ guard(mutex)(&st->lock);
+ return regmap_write(st->regmap, reg, reg_val);
+}
+
+static int ad2s1210_get_lot_high_threshold(struct ad2s1210_state *st,
+ int *val, int *val2)
+{
+ unsigned int reg_val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_HIGH_THRD, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ *val = 0;
+ *val2 = reg_val * ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad2s1210_set_lot_high_threshold(struct ad2s1210_state *st,
+ int val, int val2)
+{
+ unsigned int high_reg_val, low_reg_val, hysteresis;
+ int ret;
+
+ /* all valid values are between 0 and pi/4 radians */
+ if (val != 0)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+ /*
+ * We need to read both high and low registers first so we can preserve
+ * the hysteresis.
+ */
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_HIGH_THRD, &high_reg_val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_LOW_THRD, &low_reg_val);
+ if (ret < 0)
+ return ret;
+
+ hysteresis = high_reg_val - low_reg_val;
+ high_reg_val = val2 / ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+ low_reg_val = high_reg_val - hysteresis;
+
+ ret = regmap_write(st->regmap, AD2S1210_REG_LOT_HIGH_THRD, high_reg_val);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(st->regmap, AD2S1210_REG_LOT_LOW_THRD, low_reg_val);
+}
+
+static int ad2s1210_get_lot_low_threshold(struct ad2s1210_state *st,
+ int *val, int *val2)
+{
+ unsigned int high_reg_val, low_reg_val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_HIGH_THRD, &high_reg_val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_LOW_THRD, &low_reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* sysfs value is hysteresis rather than actual low value */
+ *val = 0;
+ *val2 = (high_reg_val - low_reg_val) *
+ ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad2s1210_set_lot_low_threshold(struct ad2s1210_state *st,
+ int val, int val2)
+{
+ unsigned int reg_val, hysteresis;
+ int ret;
+
+ /* all valid values are between 0 and pi/4 radians */
+ if (val != 0)
+ return -EINVAL;
+
+ hysteresis = val2 / ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_LOT_HIGH_THRD, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(st->regmap, AD2S1210_REG_LOT_LOW_THRD,
+ reg_val - hysteresis);
+}
+
+static int ad2s1210_get_excitation_frequency(struct ad2s1210_state *st, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_EXCIT_FREQ, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ *val = reg_val * st->clkin_hz / (1 << 15);
+ return IIO_VAL_INT;
+}
+
+static int ad2s1210_set_excitation_frequency(struct ad2s1210_state *st, int val)
+{
+ if (val < AD2S1210_MIN_EXCIT || val > AD2S1210_MAX_EXCIT)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+ return ad2s1210_reinit_excitation_frequency(st, val);
+}
+
+static const int ad2s1210_velocity_scale[] = {
+ 17089132, /* 8.192MHz / (2*pi * 2500 / 2^15) */
+ 42722830, /* 8.192MHz / (2*pi * 1000 / 2^15) */
+ 85445659, /* 8.192MHz / (2*pi * 500 / 2^15) */
+ 341782638, /* 8.192MHz / (2*pi * 125 / 2^15) */
+};
+
+static int ad2s1210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return ad2s1210_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL:
+ /* approx 0.3 arc min converted to radians */
+ *val = 0;
+ *val2 = 95874;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ANGL_VEL:
+ *val = st->clkin_hz;
+ *val2 = ad2s1210_velocity_scale[st->resolution];
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ALTVOLTAGE:
+ return ad2s1210_get_excitation_frequency(st, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HYSTERESIS:
+ switch (chan->type) {
+ case IIO_ANGL:
+ return ad2s1210_get_hysteresis(st, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad2s1210_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ static const int excitation_frequency_available[] = {
+ AD2S1210_MIN_EXCIT,
+ 250, /* step */
+ AD2S1210_MAX_EXCIT,
+ };
+
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ALTVOLTAGE:
+ *type = IIO_VAL_INT;
+ *vals = excitation_frequency_available;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HYSTERESIS:
+ switch (chan->type) {
+ case IIO_ANGL:
+ *vals = st->hysteresis_available;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->hysteresis_available);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad2s1210_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ALTVOLTAGE:
+ return ad2s1210_set_excitation_frequency(st, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HYSTERESIS:
+ switch (chan->type) {
+ case IIO_ANGL:
+ return ad2s1210_set_hysteresis(st, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_event_spec ad2s1210_position_event_spec[] = {
+ {
+ /* Tracking error exceeds LOT threshold fault. */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate =
+ /* Loss of tracking high threshold. */
+ BIT(IIO_EV_INFO_VALUE) |
+ /* Loss of tracking low threshold. */
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+static const struct iio_event_spec ad2s1210_velocity_event_spec[] = {
+ {
+ /* Velocity exceeds maximum tracking rate fault. */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ },
+};
+
+static const struct iio_event_spec ad2s1210_phase_event_spec[] = {
+ {
+ /* Phase error fault. */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ /* Phase lock range. */
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ad2s1210_monitor_signal_event_spec[] = {
+ {
+ /* Sine/cosine below LOS threshold fault. */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ /* Loss of signal threshold. */
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ /* Sine/cosine DOS overrange fault.*/
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ /* Degredation of signal overrange threshold. */
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ /* Sine/cosine DOS mismatch fault.*/
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ad2s1210_sin_cos_event_spec[] = {
+ {
+ /* Sine/cosine clipping fault. */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_EITHER,
+ },
+};
+
+static const struct iio_chan_spec ad2s1210_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_ANGL_VEL,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = ad2s1210_velocity_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_velocity_event_spec),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+ {
+ /* used to configure LOT thresholds and get tracking error */
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = -1,
+ .event_spec = ad2s1210_position_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_position_event_spec),
+ },
+ {
+ /* used to configure phase lock range and get phase lock error */
+ .type = IIO_PHASE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = -1,
+ .event_spec = ad2s1210_phase_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_phase_event_spec),
+ }, {
+ /* excitation frequency output */
+ .type = IIO_ALTVOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .output = 1,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_FREQUENCY),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_FREQUENCY),
+ }, {
+ /* monitor signal */
+ .type = IIO_ALTVOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = -1,
+ .event_spec = ad2s1210_monitor_signal_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_monitor_signal_event_spec),
+ }, {
+ /* sine input */
+ .type = IIO_ALTVOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = -1,
+ .event_spec = ad2s1210_sin_cos_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_sin_cos_event_spec),
+ }, {
+ /* cosine input */
+ .type = IIO_ALTVOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = -1,
+ .event_spec = ad2s1210_sin_cos_event_spec,
+ .num_event_specs = ARRAY_SIZE(ad2s1210_sin_cos_event_spec),
+ },
+};
+
+static ssize_t event_attr_voltage_reg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+ unsigned int value;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_read(st->regmap, iattr->address, &value);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", value * THRESHOLD_MILLIVOLT_PER_LSB);
+}
+
+static ssize_t event_attr_voltage_reg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+ u16 data;
+ int ret;
+
+ ret = kstrtou16(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+ ret = regmap_write(st->regmap, iattr->address,
+ data / THRESHOLD_MILLIVOLT_PER_LSB);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t
+in_angl1_thresh_rising_value_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
+ int step = ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+
+ return sysfs_emit(buf, "[0 0.%06d 0.%06d]\n", step, step * 0x7F);
+}
+
+static ssize_t
+in_angl1_thresh_rising_hysteresis_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
+ int step = ad2s1210_lot_threshold_urad_per_lsb[st->resolution];
+
+ return sysfs_emit(buf, "[0 0.%06d 0.%06d]\n", step, step * 0x7F);
+}
+
+static IIO_CONST_ATTR(in_phase0_mag_rising_value_available,
+ __stringify(PHASE_44_DEG_TO_RAD_INT) "."
+ __stringify(PHASE_44_DEG_TO_RAD_MICRO) " "
+ __stringify(PHASE_360_DEG_TO_RAD_INT) "."
+ __stringify(PHASE_360_DEG_TO_RAD_MICRO));
+static IIO_CONST_ATTR(in_altvoltage0_thresh_falling_value_available,
+ THRESHOLD_RANGE_STR);
+static IIO_CONST_ATTR(in_altvoltage0_thresh_rising_value_available,
+ THRESHOLD_RANGE_STR);
+static IIO_CONST_ATTR(in_altvoltage0_mag_rising_value_available,
+ THRESHOLD_RANGE_STR);
+static IIO_DEVICE_ATTR(in_altvoltage0_mag_rising_reset_max, 0644,
+ event_attr_voltage_reg_show, event_attr_voltage_reg_store,
+ AD2S1210_REG_DOS_RST_MAX_THRD);
+static IIO_CONST_ATTR(in_altvoltage0_mag_rising_reset_max_available, THRESHOLD_RANGE_STR);
+static IIO_DEVICE_ATTR(in_altvoltage0_mag_rising_reset_min, 0644,
+ event_attr_voltage_reg_show, event_attr_voltage_reg_store,
+ AD2S1210_REG_DOS_RST_MIN_THRD);
+static IIO_CONST_ATTR(in_altvoltage0_mag_rising_reset_min_available, THRESHOLD_RANGE_STR);
+static IIO_DEVICE_ATTR_RO(in_angl1_thresh_rising_value_available, 0);
+static IIO_DEVICE_ATTR_RO(in_angl1_thresh_rising_hysteresis_available, 0);
+
+static struct attribute *ad2s1210_event_attributes[] = {
+ &iio_const_attr_in_phase0_mag_rising_value_available.dev_attr.attr,
+ &iio_const_attr_in_altvoltage0_thresh_falling_value_available.dev_attr.attr,
+ &iio_const_attr_in_altvoltage0_thresh_rising_value_available.dev_attr.attr,
+ &iio_const_attr_in_altvoltage0_mag_rising_value_available.dev_attr.attr,
+ &iio_dev_attr_in_altvoltage0_mag_rising_reset_max.dev_attr.attr,
+ &iio_const_attr_in_altvoltage0_mag_rising_reset_max_available.dev_attr.attr,
+ &iio_dev_attr_in_altvoltage0_mag_rising_reset_min.dev_attr.attr,
+ &iio_const_attr_in_altvoltage0_mag_rising_reset_min_available.dev_attr.attr,
+ &iio_dev_attr_in_angl1_thresh_rising_value_available.dev_attr.attr,
+ &iio_dev_attr_in_angl1_thresh_rising_hysteresis_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad2s1210_event_attribute_group = {
+ .attrs = ad2s1210_event_attributes,
+};
+
+static int ad2s1210_initial(struct ad2s1210_state *st)
+{
+ unsigned int data;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ /* Use default config register value plus resolution from devicetree. */
+ data = FIELD_PREP(AD2S1210_PHASE_LOCK_RANGE_44, 1);
+ data |= FIELD_PREP(AD2S1210_ENABLE_HYSTERESIS, 1);
+ data |= FIELD_PREP(AD2S1210_SET_ENRES, 0x3);
+ data |= FIELD_PREP(AD2S1210_SET_RES, st->resolution);
+
+ ret = regmap_write(st->regmap, AD2S1210_REG_CONTROL, data);
+ if (ret < 0)
+ return ret;
+
+ return ad2s1210_reinit_excitation_frequency(st, AD2S1210_DEF_EXCIT);
+}
+
+static int ad2s1210_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ if (chan->type == IIO_ANGL) {
+ if (chan->channel == 0)
+ return sprintf(label, "position\n");
+ if (chan->channel == 1)
+ return sprintf(label, "tracking error\n");
+ }
+ if (chan->type == IIO_ANGL_VEL)
+ return sprintf(label, "velocity\n");
+ if (chan->type == IIO_PHASE)
+ return sprintf(label, "synthetic reference\n");
+ if (chan->type == IIO_ALTVOLTAGE) {
+ if (chan->output)
+ return sprintf(label, "excitation\n");
+ if (chan->channel == 0)
+ return sprintf(label, "monitor signal\n");
+ if (chan->channel == 1)
+ return sprintf(label, "cosine\n");
+ if (chan->channel == 2)
+ return sprintf(label, "sine\n");
+ }
+
+ return -EINVAL;
+}
+
+static int ad2s1210_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return ad2s1210_get_lot_high_threshold(st, val, val2);
+ case IIO_EV_INFO_HYSTERESIS:
+ return ad2s1210_get_lot_low_threshold(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_ALTVOLTAGE:
+ if (chan->output)
+ return -EINVAL;
+ if (type == IIO_EV_TYPE_THRESH && dir == IIO_EV_DIR_FALLING)
+ return ad2s1210_get_voltage_threshold(st,
+ AD2S1210_REG_LOS_THRD, val);
+ if (type == IIO_EV_TYPE_THRESH && dir == IIO_EV_DIR_RISING)
+ return ad2s1210_get_voltage_threshold(st,
+ AD2S1210_REG_DOS_OVR_THRD, val);
+ if (type == IIO_EV_TYPE_MAG)
+ return ad2s1210_get_voltage_threshold(st,
+ AD2S1210_REG_DOS_MIS_THRD, val);
+ return -EINVAL;
+ case IIO_PHASE:
+ return ad2s1210_get_phase_lock_range(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad2s1210_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return ad2s1210_set_lot_high_threshold(st, val, val2);
+ case IIO_EV_INFO_HYSTERESIS:
+ return ad2s1210_set_lot_low_threshold(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_ALTVOLTAGE:
+ if (chan->output)
+ return -EINVAL;
+ if (type == IIO_EV_TYPE_THRESH && dir == IIO_EV_DIR_FALLING)
+ return ad2s1210_set_voltage_threshold(st,
+ AD2S1210_REG_LOS_THRD, val);
+ if (type == IIO_EV_TYPE_THRESH && dir == IIO_EV_DIR_RISING)
+ return ad2s1210_set_voltage_threshold(st,
+ AD2S1210_REG_DOS_OVR_THRD, val);
+ if (type == IIO_EV_TYPE_MAG)
+ return ad2s1210_set_voltage_threshold(st,
+ AD2S1210_REG_DOS_MIS_THRD, val);
+ return -EINVAL;
+ case IIO_PHASE:
+ return ad2s1210_set_phase_lock_range(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad2s1210_read_event_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label)
+{
+ if (chan->type == IIO_ANGL)
+ return sprintf(label, "LOT\n");
+ if (chan->type == IIO_ANGL_VEL)
+ return sprintf(label, "max tracking rate\n");
+ if (chan->type == IIO_PHASE)
+ return sprintf(label, "phase lock\n");
+ if (chan->type == IIO_ALTVOLTAGE) {
+ if (chan->channel == 0) {
+ if (type == IIO_EV_TYPE_THRESH &&
+ dir == IIO_EV_DIR_FALLING)
+ return sprintf(label, "LOS\n");
+ if (type == IIO_EV_TYPE_THRESH &&
+ dir == IIO_EV_DIR_RISING)
+ return sprintf(label, "DOS overrange\n");
+ if (type == IIO_EV_TYPE_MAG)
+ return sprintf(label, "DOS mismatch\n");
+ }
+ if (chan->channel == 1 || chan->channel == 2)
+ return sprintf(label, "clipped\n");
+ }
+
+ return -EINVAL;
+}
+
+static int ad2s1210_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static irqreturn_t ad2s1210_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+ size_t chan = 0;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ memset(&st->scan, 0, sizeof(st->scan));
+ ad2s1210_toggle_sample_line(st);
+
+ if (test_bit(0, indio_dev->active_scan_mask)) {
+ ret = ad2s1210_set_mode(st, MOD_POS);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ goto error_ret;
+
+ memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+ }
+
+ if (test_bit(1, indio_dev->active_scan_mask)) {
+ ret = ad2s1210_set_mode(st, MOD_VEL);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ goto error_ret;
+
+ memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+ }
+
+ ad2s1210_push_events(indio_dev, st->sample.fault, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp);
+
+error_ret:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info ad2s1210_info = {
+ .event_attrs = &ad2s1210_event_attribute_group,
+ .read_raw = ad2s1210_read_raw,
+ .read_avail = ad2s1210_read_avail,
+ .write_raw = ad2s1210_write_raw,
+ .read_label = ad2s1210_read_label,
+ .read_event_value = ad2s1210_read_event_value,
+ .write_event_value = ad2s1210_write_event_value,
+ .read_event_label = ad2s1210_read_event_label,
+ .debugfs_reg_access = &ad2s1210_debugfs_reg_access,
+};
+
+static int ad2s1210_setup_properties(struct ad2s1210_state *st)
+{
+ struct device *dev = &st->sdev->dev;
+ u32 val;
+ int ret;
+
+ ret = device_property_read_u32(dev, "assigned-resolution-bits", &val);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to read assigned-resolution-bits property\n");
+
+ if (val < 10 || val > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "resolution out of range: %u\n", val);
+
+ st->resolution = (val - 10) >> 1;
+ /*
+ * These are values that correlate to the hysteresis bit in the Control
+ * register. 0 = disabled, 1 = enabled. When enabled, the actual
+ * hysteresis is +/- 1 LSB of the raw position value. Which bit is the
+ * LSB depends on the specified resolution.
+ */
+ st->hysteresis_available[0] = 0;
+ st->hysteresis_available[1] = 1 << (2 * (AD2S1210_RES_16 -
+ st->resolution));
+
+ return 0;
+}
+
+static int ad2s1210_setup_clocks(struct ad2s1210_state *st)
+{
+ struct device *dev = &st->sdev->dev;
+ struct clk *clk;
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
+
+ st->clkin_hz = clk_get_rate(clk);
+ if (st->clkin_hz < AD2S1210_MIN_CLKIN || st->clkin_hz > AD2S1210_MAX_CLKIN)
+ return dev_err_probe(dev, -EINVAL,
+ "clock frequency out of range: %lu\n",
+ st->clkin_hz);
+
+ return 0;
+}
+
+static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
+{
+ struct device *dev = &st->sdev->dev;
+ struct gpio_descs *resolution_gpios;
+ DECLARE_BITMAP(bitmap, 2);
+ int ret;
+
+ /* should not be sampling on startup */
+ st->sample_gpio = devm_gpiod_get(dev, "sample", GPIOD_OUT_LOW);
+ if (IS_ERR(st->sample_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->sample_gpio),
+ "failed to request sample GPIO\n");
+
+ /* both pins high means that we start in config mode */
+ st->mode_gpios = devm_gpiod_get_array(dev, "mode", GPIOD_OUT_HIGH);
+ if (IS_ERR(st->mode_gpios))
+ return dev_err_probe(dev, PTR_ERR(st->mode_gpios),
+ "failed to request mode GPIOs\n");
+
+ if (st->mode_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL,
+ "requires exactly 2 mode-gpios\n");
+
+ /*
+ * If resolution gpios are provided, they get set to the required
+ * resolution, otherwise it is assumed the RES0 and RES1 pins are
+ * hard-wired to match the resolution indicated in the devicetree.
+ */
+ resolution_gpios = devm_gpiod_get_array_optional(dev, "resolution",
+ GPIOD_ASIS);
+ if (IS_ERR(resolution_gpios))
+ return dev_err_probe(dev, PTR_ERR(resolution_gpios),
+ "failed to request resolution GPIOs\n");
+
+ if (resolution_gpios) {
+ if (resolution_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL,
+ "requires exactly 2 resolution-gpios\n");
+
+ bitmap[0] = st->resolution;
+
+ ret = gpiod_set_array_value(resolution_gpios->ndescs,
+ resolution_gpios->desc,
+ resolution_gpios->info,
+ bitmap);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to set resolution gpios\n");
+ }
+
+ return 0;
+}
+
+static const struct regmap_range ad2s1210_regmap_readable_ranges[] = {
+ regmap_reg_range(AD2S1210_REG_POSITION_MSB, AD2S1210_REG_VELOCITY_LSB),
+ regmap_reg_range(AD2S1210_REG_LOS_THRD, AD2S1210_REG_LOT_LOW_THRD),
+ regmap_reg_range(AD2S1210_REG_EXCIT_FREQ, AD2S1210_REG_CONTROL),
+ regmap_reg_range(AD2S1210_REG_FAULT, AD2S1210_REG_FAULT),
+};
+
+static const struct regmap_access_table ad2s1210_regmap_rd_table = {
+ .yes_ranges = ad2s1210_regmap_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad2s1210_regmap_readable_ranges),
+};
+
+static const struct regmap_range ad2s1210_regmap_writeable_ranges[] = {
+ regmap_reg_range(AD2S1210_REG_LOS_THRD, AD2S1210_REG_LOT_LOW_THRD),
+ regmap_reg_range(AD2S1210_REG_EXCIT_FREQ, AD2S1210_REG_CONTROL),
+ regmap_reg_range(AD2S1210_REG_SOFT_RESET, AD2S1210_REG_SOFT_RESET),
+ regmap_reg_range(AD2S1210_REG_FAULT, AD2S1210_REG_FAULT),
+};
+
+static const struct regmap_access_table ad2s1210_regmap_wr_table = {
+ .yes_ranges = ad2s1210_regmap_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad2s1210_regmap_writeable_ranges),
+};
+
+static int ad2s1210_setup_regmap(struct ad2s1210_state *st)
+{
+ struct device *dev = &st->sdev->dev;
+ const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .disable_locking = true,
+ .reg_read = ad2s1210_regmap_reg_read,
+ .reg_write = ad2s1210_regmap_reg_write,
+ .rd_table = &ad2s1210_regmap_rd_table,
+ .wr_table = &ad2s1210_regmap_wr_table,
+ .can_sleep = true,
+ };
+
+ st->regmap = devm_regmap_init(dev, NULL, st, &config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "failed to allocate register map\n");
+
+ return 0;
+}
+
+static int ad2s1210_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad2s1210_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ ret = ad2s1210_setup_properties(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad2s1210_setup_clocks(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad2s1210_setup_gpios(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad2s1210_setup_regmap(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad2s1210_initial(st);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->info = &ad2s1210_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad2s1210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels);
+ indio_dev->name = spi_get_device_id(spi)->name;
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad2s1210_trigger_handler, NULL);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "iio triggered buffer setup failed\n");
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id ad2s1210_of_match[] = {
+ { .compatible = "adi,ad2s1210", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad2s1210_of_match);
+
+static const struct spi_device_id ad2s1210_id[] = {
+ { "ad2s1210" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad2s1210_id);
+
+static struct spi_driver ad2s1210_driver = {
+ .driver = {
+ .name = "ad2s1210",
+ .of_match_table = ad2s1210_of_match,
+ },
+ .probe = ad2s1210_probe,
+ .id_table = ad2s1210_id,
+};
+module_spi_driver(ad2s1210_driver);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index d40f235af1d4..0143fd478933 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -257,7 +257,7 @@ error_remove_trigger:
}
/* Function to deinitialize the processing for usage id */
-static int hid_temperature_remove(struct platform_device *pdev)
+static void hid_temperature_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -265,8 +265,6 @@ static int hid_temperature_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
-
- return 0;
}
static const struct platform_device_id hid_temperature_ids[] = {
@@ -285,7 +283,7 @@ static struct platform_driver hid_temperature_platform_driver = {
.pm = &hid_sensor_pm_ops,
},
.probe = hid_temperature_probe,
- .remove = hid_temperature_remove,
+ .remove_new = hid_temperature_remove,
};
module_platform_driver(hid_temperature_platform_driver);
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 07bb5df24ab3..740018d4b3df 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -600,7 +600,7 @@ static int mlx90614_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->lock);
data->wakeup_gpio = mlx90614_probe_wakeup(client);
- data->chip_info = device_get_match_data(&client->dev);
+ data->chip_info = i2c_get_match_data(client);
mlx90614_wakeup(data);
diff --git a/drivers/iio/temperature/tmp117.c b/drivers/iio/temperature/tmp117.c
index fc02f491688b..059953015ae7 100644
--- a/drivers/iio/temperature/tmp117.c
+++ b/drivers/iio/temperature/tmp117.c
@@ -42,6 +42,12 @@ struct tmp117_data {
s16 calibbias;
};
+struct tmp11x_info {
+ const char *name;
+ struct iio_chan_spec const *channels;
+ int num_channels;
+};
+
static int tmp117_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long mask)
@@ -119,57 +125,54 @@ static const struct iio_chan_spec tmp116_channels[] = {
},
};
+static const struct tmp11x_info tmp116_channels_info = {
+ .name = "tmp116",
+ .channels = tmp116_channels,
+ .num_channels = ARRAY_SIZE(tmp116_channels)
+};
+
+static const struct tmp11x_info tmp117_channels_info = {
+ .name = "tmp117",
+ .channels = tmp117_channels,
+ .num_channels = ARRAY_SIZE(tmp117_channels)
+};
+
static const struct iio_info tmp117_info = {
.read_raw = tmp117_read_raw,
.write_raw = tmp117_write_raw,
};
-static int tmp117_identify(struct i2c_client *client)
+static int tmp117_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id;
- unsigned long match_data;
+ const struct tmp11x_info *match_data;
+ struct tmp117_data *data;
+ struct iio_dev *indio_dev;
int dev_id;
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
dev_id = i2c_smbus_read_word_swapped(client, TMP117_REG_DEVICE_ID);
if (dev_id < 0)
return dev_id;
switch (dev_id) {
case TMP116_DEVICE_ID:
+ match_data = &tmp116_channels_info;
+ break;
case TMP117_DEVICE_ID:
- return dev_id;
+ match_data = &tmp117_channels_info;
+ break;
+ default:
+ dev_info(&client->dev,
+ "Unknown device id (0x%x), use fallback compatible\n",
+ dev_id);
+ match_data = i2c_get_match_data(client);
}
- dev_info(&client->dev, "Unknown device id (0x%x), use fallback compatible\n",
- dev_id);
-
- match_data = (uintptr_t)device_get_match_data(&client->dev);
- if (match_data)
- return match_data;
-
- id = i2c_client_get_device_id(client);
- if (id)
- return id->driver_data;
-
- dev_err(&client->dev, "Failed to identify unsupported device\n");
-
- return -ENODEV;
-}
-
-static int tmp117_probe(struct i2c_client *client)
-{
- struct tmp117_data *data;
- struct iio_dev *indio_dev;
- int ret, dev_id;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -EOPNOTSUPP;
-
- ret = tmp117_identify(client);
- if (ret < 0)
- return ret;
-
- dev_id = ret;
+ if (!match_data)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Failed to identify unsupported device\n");
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -181,33 +184,24 @@ static int tmp117_probe(struct i2c_client *client)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &tmp117_info;
+ indio_dev->channels = match_data->channels;
+ indio_dev->num_channels = match_data->num_channels;
+ indio_dev->name = match_data->name;
- switch (dev_id) {
- case TMP116_DEVICE_ID:
- indio_dev->channels = tmp116_channels;
- indio_dev->num_channels = ARRAY_SIZE(tmp116_channels);
- indio_dev->name = "tmp116";
- break;
- case TMP117_DEVICE_ID:
- indio_dev->channels = tmp117_channels;
- indio_dev->num_channels = ARRAY_SIZE(tmp117_channels);
- indio_dev->name = "tmp117";
- break;
- }
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct of_device_id tmp117_of_match[] = {
- { .compatible = "ti,tmp116", .data = (void *)TMP116_DEVICE_ID },
- { .compatible = "ti,tmp117", .data = (void *)TMP117_DEVICE_ID },
+ { .compatible = "ti,tmp116", .data = &tmp116_channels_info },
+ { .compatible = "ti,tmp117", .data = &tmp117_channels_info },
{ }
};
MODULE_DEVICE_TABLE(of, tmp117_of_match);
static const struct i2c_device_id tmp117_id[] = {
- { "tmp116", TMP116_DEVICE_ID },
- { "tmp117", TMP117_DEVICE_ID },
+ { "tmp116", (kernel_ulong_t)&tmp116_channels_info },
+ { "tmp117", (kernel_ulong_t)&tmp117_channels_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp117_id);
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index 5f49cd105fae..dec256bfbd73 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -81,7 +81,7 @@ error_ret:
return ret;
}
-static int iio_interrupt_trigger_remove(struct platform_device *pdev)
+static void iio_interrupt_trigger_remove(struct platform_device *pdev)
{
struct iio_trigger *trig;
struct iio_interrupt_trigger_info *trig_info;
@@ -92,13 +92,11 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev)
free_irq(trig_info->irq, trig);
kfree(trig_info);
iio_trigger_free(trig);
-
- return 0;
}
static struct platform_driver iio_interrupt_trigger_driver = {
.probe = iio_interrupt_trigger_probe,
- .remove = iio_interrupt_trigger_remove,
+ .remove_new = iio_interrupt_trigger_remove,
.driver = {
.name = "iio_interrupt_trigger",
},
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 3643c4afae67..d76444030a28 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -809,7 +809,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
return 0;
}
-static int stm32_timer_trigger_remove(struct platform_device *pdev)
+static void stm32_timer_trigger_remove(struct platform_device *pdev)
{
struct stm32_timer_trigger *priv = platform_get_drvdata(pdev);
u32 val;
@@ -824,8 +824,6 @@ static int stm32_timer_trigger_remove(struct platform_device *pdev)
if (priv->enabled)
clk_disable(priv->clk);
-
- return 0;
}
static int stm32_timer_trigger_suspend(struct device *dev)
@@ -904,7 +902,7 @@ MODULE_DEVICE_TABLE(of, stm32_trig_of_match);
static struct platform_driver stm32_timer_trigger_driver = {
.probe = stm32_timer_trigger_probe,
- .remove = stm32_timer_trigger_remove,
+ .remove_new = stm32_timer_trigger_remove,
.driver = {
.name = "stm32-timer-trigger",
.of_match_table = stm32_trig_of_match,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 95f90699d2b1..51e0c4954600 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -50,7 +50,7 @@ struct evdev_client {
bool revoked;
unsigned long *evmasks[EV_CNT];
unsigned int bufsize;
- struct input_event buffer[];
+ struct input_event buffer[] __counted_by(bufsize);
};
static size_t evdev_get_mask_cnt(unsigned int type)
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 0b11990ade46..0e935914bc3a 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -44,7 +44,7 @@ struct input_led {
struct input_leds {
struct input_handle handle;
unsigned int num_leds;
- struct input_led leds[];
+ struct input_led leds[] __counted_by(num_leds);
};
static enum led_brightness input_leds_brightness_get(struct led_classdev *cdev)
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 56abc8c6c763..27d95d6cf56e 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -296,15 +296,4 @@ static struct parport_driver walkera0701_parport_driver = {
.devmodel = true,
};
-static int __init walkera0701_init(void)
-{
- return parport_register_driver(&walkera0701_parport_driver);
-}
-
-static void __exit walkera0701_exit(void)
-{
- parport_unregister_driver(&walkera0701_parport_driver);
-}
-
-module_init(walkera0701_init);
-module_exit(walkera0701_exit);
+module_parport_driver(walkera0701_parport_driver);
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 7851ffd678a8..10c248f0c1fc 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -168,14 +168,12 @@ static int adp5520_keys_probe(struct platform_device *pdev)
return 0;
}
-static int adp5520_keys_remove(struct platform_device *pdev)
+static void adp5520_keys_remove(struct platform_device *pdev)
{
struct adp5520_keys *dev = platform_get_drvdata(pdev);
adp5520_unregister_notifier(dev->master, &dev->notifier,
ADP5520_KP_IEN | ADP5520_KR_IEN);
-
- return 0;
}
static struct platform_driver adp5520_keys_driver = {
@@ -183,7 +181,7 @@ static struct platform_driver adp5520_keys_driver = {
.name = "adp5520-keys",
},
.probe = adp5520_keys_probe,
- .remove = adp5520_keys_remove,
+ .remove_new = adp5520_keys_remove,
};
module_platform_driver(adp5520_keys_driver);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index e7ecfca838df..30678a34cf64 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -686,10 +686,11 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static const struct attribute_group cros_ec_keyb_attr_group = {
+static const struct attribute_group cros_ec_keyb_group = {
.is_visible = cros_ec_keyb_attr_is_visible,
.attrs = cros_ec_keyb_attrs,
};
+__ATTRIBUTE_GROUPS(cros_ec_keyb);
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
@@ -730,12 +731,6 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return err;
}
- err = devm_device_add_group(dev, &cros_ec_keyb_attr_group);
- if (err) {
- dev_err(dev, "failed to create attributes: %d\n", err);
- return err;
- }
-
ckdev->notifier.notifier_call = cros_ec_keyb_work;
err = blocking_notifier_chain_register(&ckdev->ec->event_notifier,
&ckdev->notifier);
@@ -748,14 +743,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return 0;
}
-static int cros_ec_keyb_remove(struct platform_device *pdev)
+static void cros_ec_keyb_remove(struct platform_device *pdev)
{
struct cros_ec_keyb *ckdev = dev_get_drvdata(&pdev->dev);
blocking_notifier_chain_unregister(&ckdev->ec->event_notifier,
&ckdev->notifier);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -779,9 +772,10 @@ static DEFINE_SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume);
static struct platform_driver cros_ec_keyb_driver = {
.probe = cros_ec_keyb_probe,
- .remove = cros_ec_keyb_remove,
+ .remove_new = cros_ec_keyb_remove,
.driver = {
.name = "cros-ec-keyb",
+ .dev_groups = cros_ec_keyb_groups,
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
.acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
.pm = pm_sleep_ptr(&cros_ec_keyb_pm_ops),
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 55075addcac2..6b811d6bf625 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -308,11 +308,9 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
return 0;
}
-static int ep93xx_keypad_remove(struct platform_device *pdev)
+static void ep93xx_keypad_remove(struct platform_device *pdev)
{
dev_pm_clear_wake_irq(&pdev->dev);
-
- return 0;
}
static struct platform_driver ep93xx_keypad_driver = {
@@ -321,7 +319,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.pm = pm_sleep_ptr(&ep93xx_keypad_pm_ops),
},
.probe = ep93xx_keypad_probe,
- .remove = ep93xx_keypad_remove,
+ .remove_new = ep93xx_keypad_remove,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 02ceebad7bda..688d61244b5f 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -310,7 +310,7 @@ static int iqs62x_keys_probe(struct platform_device *pdev)
return ret;
}
-static int iqs62x_keys_remove(struct platform_device *pdev)
+static void iqs62x_keys_remove(struct platform_device *pdev)
{
struct iqs62x_keys_private *iqs62x_keys = platform_get_drvdata(pdev);
int ret;
@@ -319,8 +319,6 @@ static int iqs62x_keys_remove(struct platform_device *pdev)
&iqs62x_keys->notifier);
if (ret)
dev_err(&pdev->dev, "Failed to unregister notifier: %d\n", ret);
-
- return 0;
}
static struct platform_driver iqs62x_keys_platform_driver = {
@@ -328,7 +326,7 @@ static struct platform_driver iqs62x_keys_platform_driver = {
.name = "iqs62x-keys",
},
.probe = iqs62x_keys_probe,
- .remove = iqs62x_keys_remove,
+ .remove_new = iqs62x_keys_remove,
};
module_platform_driver(iqs62x_keys_platform_driver);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index a1b037891af2..50fa764c82d2 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -549,15 +549,13 @@ err_free_mem:
return err;
}
-static int matrix_keypad_remove(struct platform_device *pdev)
+static void matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
matrix_keypad_free_gpio(keypad);
input_unregister_device(keypad->input_dev);
kfree(keypad);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -570,7 +568,7 @@ MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
static struct platform_driver matrix_keypad_driver = {
.probe = matrix_keypad_probe,
- .remove = matrix_keypad_remove,
+ .remove_new = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
.pm = pm_sleep_ptr(&matrix_keypad_pm_ops),
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 24440b498645..454fb8675657 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -287,7 +287,7 @@ err2:
return -EINVAL;
}
-static int omap_kp_remove(struct platform_device *pdev)
+static void omap_kp_remove(struct platform_device *pdev)
{
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
@@ -303,13 +303,11 @@ static int omap_kp_remove(struct platform_device *pdev)
input_unregister_device(omap_kp->input);
kfree(omap_kp);
-
- return 0;
}
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
- .remove = omap_kp_remove,
+ .remove_new = omap_kp_remove,
.driver = {
.name = "omap-keypad",
},
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 773e55eed88b..d3f8688fdd9c 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -461,11 +461,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
return 0;
}
-static int omap4_keypad_remove(struct platform_device *pdev)
+static void omap4_keypad_remove(struct platform_device *pdev)
{
dev_pm_clear_wake_irq(&pdev->dev);
-
- return 0;
}
static const struct of_device_id omap_keypad_dt_match[] = {
@@ -476,7 +474,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
static struct platform_driver omap4_keypad_driver = {
.probe = omap4_keypad_probe,
- .remove = omap4_keypad_remove,
+ .remove_new = omap4_keypad_remove,
.driver = {
.name = "omap4-keypad",
.of_match_table = omap_keypad_dt_match,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index d85dd2489293..e212eff7687c 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -444,7 +444,7 @@ err_unprepare_clk:
return error;
}
-static int samsung_keypad_remove(struct platform_device *pdev)
+static void samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
@@ -453,8 +453,6 @@ static int samsung_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
clk_unprepare(keypad->clk);
-
- return 0;
}
static int samsung_keypad_runtime_suspend(struct device *dev)
@@ -589,7 +587,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = samsung_keypad_remove,
+ .remove_new = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.of_match_table = of_match_ptr(samsung_keypad_dt_match),
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 2c00320f739f..4ea4fd25c5d2 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -265,7 +265,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
return error;
}
-static int sh_keysc_remove(struct platform_device *pdev)
+static void sh_keysc_remove(struct platform_device *pdev)
{
struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
@@ -279,8 +279,6 @@ static int sh_keysc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
kfree(priv);
-
- return 0;
}
static int sh_keysc_suspend(struct device *dev)
@@ -321,7 +319,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
static struct platform_driver sh_keysc_device_driver = {
.probe = sh_keysc_probe,
- .remove = sh_keysc_remove,
+ .remove_new = sh_keysc_remove,
.driver = {
.name = "sh_keysc",
.pm = pm_sleep_ptr(&sh_keysc_dev_pm_ops),
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index a50fa9915381..557d00a667ce 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -272,14 +272,12 @@ static int spear_kbd_probe(struct platform_device *pdev)
return 0;
}
-static int spear_kbd_remove(struct platform_device *pdev)
+static void spear_kbd_remove(struct platform_device *pdev)
{
struct spear_kbd *kbd = platform_get_drvdata(pdev);
input_unregister_device(kbd->input);
clk_unprepare(kbd->clk);
-
- return 0;
}
static int spear_kbd_suspend(struct device *dev)
@@ -375,7 +373,7 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove = spear_kbd_remove,
+ .remove_new = spear_kbd_remove,
.driver = {
.name = "keyboard",
.pm = pm_sleep_ptr(&spear_kbd_pm_ops),
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 2c6c53290cc0..2013c0afd0c3 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -404,20 +404,18 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
return 0;
}
-static int stmpe_keypad_remove(struct platform_device *pdev)
+static void stmpe_keypad_remove(struct platform_device *pdev)
{
struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
stmpe_disable(keypad->stmpe, STMPE_BLOCK_KEYPAD);
-
- return 0;
}
static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
.probe = stmpe_keypad_probe,
- .remove = stmpe_keypad_remove,
+ .remove_new = stmpe_keypad_remove,
};
module_platform_driver(stmpe_keypad_driver);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index c9a823ea45d0..a1765ed8c825 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -14,7 +14,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
@@ -602,9 +602,6 @@ static int tegra_kbc_probe(struct platform_device *pdev)
unsigned int debounce_cnt;
unsigned int scan_time_rows;
unsigned int keymap_rows;
- const struct of_device_id *match;
-
- match = of_match_device(tegra_kbc_of_match, &pdev->dev);
kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
if (!kbc) {
@@ -613,7 +610,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
}
kbc->dev = &pdev->dev;
- kbc->hw_support = match->data;
+ kbc->hw_support = device_get_match_data(&pdev->dev);
kbc->max_keys = kbc->hw_support->max_rows *
kbc->hw_support->max_columns;
kbc->num_rows_and_columns = kbc->hw_support->max_rows +
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index 51c8a326fd06..31f0702c3d01 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -138,14 +138,13 @@ out:
return err;
}
-static int pm80x_onkey_remove(struct platform_device *pdev)
+static void pm80x_onkey_remove(struct platform_device *pdev)
{
struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
pm80x_free_irq(info->pm80x, info->irq, info);
input_unregister_device(info->idev);
kfree(info);
- return 0;
}
static struct platform_driver pm80x_onkey_driver = {
@@ -154,7 +153,7 @@ static struct platform_driver pm80x_onkey_driver = {
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
- .remove = pm80x_onkey_remove,
+ .remove_new = pm80x_onkey_remove,
};
module_platform_driver(pm80x_onkey_driver);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 4581606a28d6..24f9e9d893de 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -133,20 +133,11 @@ static ssize_t axp20x_store_attr(struct device *dev,
size_t count)
{
struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
- char val_str[20];
- size_t len;
int ret, i;
unsigned int val, idx = 0;
unsigned int best_err = UINT_MAX;
- val_str[sizeof(val_str) - 1] = '\0';
- strncpy(val_str, buf, sizeof(val_str) - 1);
- len = strlen(val_str);
-
- if (len && val_str[len - 1] == '\n')
- val_str[len - 1] = '\0';
-
- ret = kstrtouint(val_str, 10, &val);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 6d1152850a6d..7a1122e1efb9 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -127,7 +127,7 @@ err_free_mem:
return error;
}
-static int da9052_onkey_remove(struct platform_device *pdev)
+static void da9052_onkey_remove(struct platform_device *pdev)
{
struct da9052_onkey *onkey = platform_get_drvdata(pdev);
@@ -136,13 +136,11 @@ static int da9052_onkey_remove(struct platform_device *pdev)
input_unregister_device(onkey->input);
kfree(onkey);
-
- return 0;
}
static struct platform_driver da9052_onkey_driver = {
.probe = da9052_onkey_probe,
- .remove = da9052_onkey_remove,
+ .remove_new = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
},
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index 7a0d3a1d503c..871812f1b398 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -132,7 +132,7 @@ err_free_input:
return err;
}
-static int da9055_onkey_remove(struct platform_device *pdev)
+static void da9055_onkey_remove(struct platform_device *pdev)
{
struct da9055_onkey *onkey = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ONKEY");
@@ -141,13 +141,11 @@ static int da9055_onkey_remove(struct platform_device *pdev)
free_irq(irq, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
-
- return 0;
}
static struct platform_driver da9055_onkey_driver = {
.probe = da9055_onkey_probe,
- .remove = da9055_onkey_remove,
+ .remove_new = da9055_onkey_remove,
.driver = {
.name = "da9055-onkey",
},
diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c
index 68f1c584da05..fa4e7f67d713 100644
--- a/drivers/input/misc/ideapad_slidebar.c
+++ b/drivers/input/misc/ideapad_slidebar.c
@@ -256,20 +256,18 @@ err_release_ports:
return err;
}
-static int ideapad_remove(struct platform_device *pdev)
+static void ideapad_remove(struct platform_device *pdev)
{
i8042_remove_filter(slidebar_i8042_filter);
input_unregister_device(slidebar_input_dev);
release_region(IDEAPAD_BASE, 3);
-
- return 0;
}
static struct platform_driver slidebar_drv = {
.driver = {
.name = "ideapad_slidebar",
},
- .remove = ideapad_remove,
+ .remove_new = ideapad_remove,
};
static int __init ideapad_dmi_check(const struct dmi_system_id *id)
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index c0a085639870..3c636c75e8a1 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -1586,10 +1586,7 @@ static struct attribute *iqs269_attrs[] = {
&dev_attr_ati_trigger.attr,
NULL,
};
-
-static const struct attribute_group iqs269_attr_group = {
- .attrs = iqs269_attrs,
-};
+ATTRIBUTE_GROUPS(iqs269);
static const struct regmap_config iqs269_regmap_config = {
.reg_bits = 8,
@@ -1671,10 +1668,6 @@ static int iqs269_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &iqs269_attr_group);
- if (error)
- dev_err(&client->dev, "Failed to add attributes: %d\n", error);
-
return error;
}
@@ -1743,6 +1736,7 @@ MODULE_DEVICE_TABLE(of, iqs269_of_match);
static struct i2c_driver iqs269_i2c_driver = {
.driver = {
.name = "iqs269a",
+ .dev_groups = iqs269_groups,
.of_match_table = iqs269_of_match,
.pm = pm_sleep_ptr(&iqs269_pm),
},
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index 912e614d039d..d47269b10e9a 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -334,14 +334,25 @@ static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
-static struct attribute *kxtj9_attributes[] = {
+static struct attribute *kxtj9_attrs[] = {
&dev_attr_poll.attr,
NULL
};
-static struct attribute_group kxtj9_attribute_group = {
- .attrs = kxtj9_attributes
+static umode_t kxtj9_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return client->irq ? attr->mode : 0;
+}
+
+static struct attribute_group kxtj9_group = {
+ .attrs = kxtj9_attrs,
+ .is_visible = kxtj9_attr_is_visible,
};
+__ATTRIBUTE_GROUPS(kxtj9);
static void kxtj9_poll(struct input_dev *input)
{
@@ -482,13 +493,6 @@ static int kxtj9_probe(struct i2c_client *client)
dev_err(&client->dev, "request irq failed: %d\n", err);
return err;
}
-
- err = devm_device_add_group(&client->dev,
- &kxtj9_attribute_group);
- if (err) {
- dev_err(&client->dev, "sysfs create failed: %d\n", err);
- return err;
- }
}
return 0;
@@ -535,8 +539,9 @@ MODULE_DEVICE_TABLE(i2c, kxtj9_id);
static struct i2c_driver kxtj9_driver = {
.driver = {
- .name = NAME,
- .pm = pm_sleep_ptr(&kxtj9_pm_ops),
+ .name = NAME,
+ .dev_groups = kxtj9_groups,
+ .pm = pm_sleep_ptr(&kxtj9_pm_ops),
},
.probe = kxtj9_probe,
.id_table = kxtj9_id,
diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c
index 25fcf1467151..3fe0a85c45e0 100644
--- a/drivers/input/misc/m68kspkr.c
+++ b/drivers/input/misc/m68kspkr.c
@@ -75,15 +75,13 @@ static int m68kspkr_probe(struct platform_device *dev)
return 0;
}
-static int m68kspkr_remove(struct platform_device *dev)
+static void m68kspkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
input_unregister_device(input_dev);
/* turn off the speaker */
m68kspkr_event(NULL, EV_SND, SND_BELL, 0);
-
- return 0;
}
static void m68kspkr_shutdown(struct platform_device *dev)
@@ -97,7 +95,7 @@ static struct platform_driver m68kspkr_platform_driver = {
.name = "m68kspkr",
},
.probe = m68kspkr_probe,
- .remove = m68kspkr_remove,
+ .remove_new = m68kspkr_remove,
.shutdown = m68kspkr_shutdown,
};
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index c4dff476d479..8861a67be575 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -351,7 +351,7 @@ err_free_mem:
return error;
}
-static int max8997_haptic_remove(struct platform_device *pdev)
+static void max8997_haptic_remove(struct platform_device *pdev)
{
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -362,8 +362,6 @@ static int max8997_haptic_remove(struct platform_device *pdev)
pwm_put(chip->pwm);
kfree(chip);
-
- return 0;
}
static int max8997_haptic_suspend(struct device *dev)
@@ -391,7 +389,7 @@ static struct platform_driver max8997_haptic_driver = {
.pm = pm_sleep_ptr(&max8997_haptic_pm_ops),
},
.probe = max8997_haptic_probe,
- .remove = max8997_haptic_remove,
+ .remove_new = max8997_haptic_remove,
.id_table = max8997_haptic_id,
};
module_platform_driver(max8997_haptic_driver);
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 0636eee4bb6c..1c8c939638f6 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -229,7 +229,7 @@ free_input_dev:
return err;
}
-static int mc13783_pwrbutton_remove(struct platform_device *pdev)
+static void mc13783_pwrbutton_remove(struct platform_device *pdev)
{
struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
const struct mc13xxx_buttons_platform_data *pdata;
@@ -249,13 +249,11 @@ static int mc13783_pwrbutton_remove(struct platform_device *pdev)
input_unregister_device(priv->pwr);
kfree(priv);
-
- return 0;
}
static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
- .remove = mc13783_pwrbutton_remove,
+ .remove_new = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
},
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 7e361727b0d9..06d5972e8e84 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -245,7 +245,7 @@ err_free_mem:
*
* Return: 0
*/
-static int palmas_pwron_remove(struct platform_device *pdev)
+static void palmas_pwron_remove(struct platform_device *pdev)
{
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -254,8 +254,6 @@ static int palmas_pwron_remove(struct platform_device *pdev)
input_unregister_device(pwron->input_dev);
kfree(pwron);
-
- return 0;
}
/**
@@ -312,7 +310,7 @@ MODULE_DEVICE_TABLE(of, of_palmas_pwr_match);
static struct platform_driver palmas_pwron_driver = {
.probe = palmas_pwron_probe,
- .remove = palmas_pwron_remove,
+ .remove_new = palmas_pwron_remove,
.driver = {
.name = "palmas_pwrbutton",
.of_match_table = of_match_ptr(of_palmas_pwr_match),
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index b5a53636d7e2..8a7e9ada5952 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -99,7 +99,7 @@ fail:
return err;
}
-static int pcap_keys_remove(struct platform_device *pdev)
+static void pcap_keys_remove(struct platform_device *pdev)
{
struct pcap_keys *pcap_keys = platform_get_drvdata(pdev);
@@ -108,13 +108,11 @@ static int pcap_keys_remove(struct platform_device *pdev)
input_unregister_device(pcap_keys->input);
kfree(pcap_keys);
-
- return 0;
}
static struct platform_driver pcap_keys_device_driver = {
.probe = pcap_keys_probe,
- .remove = pcap_keys_remove,
+ .remove_new = pcap_keys_remove,
.driver = {
.name = "pcap-keys",
}
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 4c60c70c4c10..c5c5fe236c18 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -87,7 +87,7 @@ static int pcf50633_input_probe(struct platform_device *pdev)
return 0;
}
-static int pcf50633_input_remove(struct platform_device *pdev)
+static void pcf50633_input_remove(struct platform_device *pdev)
{
struct pcf50633_input *input = platform_get_drvdata(pdev);
@@ -96,8 +96,6 @@ static int pcf50633_input_remove(struct platform_device *pdev)
input_unregister_device(input->input_dev);
kfree(input);
-
- return 0;
}
static struct platform_driver pcf50633_input_driver = {
@@ -105,7 +103,7 @@ static struct platform_driver pcf50633_input_driver = {
.name = "pcf50633-input",
},
.probe = pcf50633_input_probe,
- .remove = pcf50633_input_remove,
+ .remove_new = pcf50633_input_remove,
};
module_platform_driver(pcf50633_input_driver);
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 9c666b2f14fe..897854fd245f 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -95,15 +95,13 @@ static int pcspkr_probe(struct platform_device *dev)
return 0;
}
-static int pcspkr_remove(struct platform_device *dev)
+static void pcspkr_remove(struct platform_device *dev)
{
struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
input_unregister_device(pcspkr_dev);
/* turn off the speaker */
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
-
- return 0;
}
static int pcspkr_suspend(struct device *dev)
@@ -129,7 +127,7 @@ static struct platform_driver pcspkr_platform_driver = {
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
- .remove = pcspkr_remove,
+ .remove_new = pcspkr_remove,
.shutdown = pcspkr_shutdown,
};
module_platform_driver(pcspkr_platform_driver);
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index ba747c5b2b5f..bab710023d8f 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -408,14 +408,12 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return 0;
}
-static int pm8941_pwrkey_remove(struct platform_device *pdev)
+static void pm8941_pwrkey_remove(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey = platform_get_drvdata(pdev);
if (pwrkey->data->supports_ps_hold_poff_config)
unregister_reboot_notifier(&pwrkey->reboot_notifier);
-
- return 0;
}
static const struct pm8941_data pwrkey_data = {
@@ -467,7 +465,7 @@ MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
static struct platform_driver pm8941_pwrkey_driver = {
.probe = pm8941_pwrkey_probe,
- .remove = pm8941_pwrkey_remove,
+ .remove_new = pm8941_pwrkey_remove,
.driver = {
.name = "pm8941-pwrkey",
.pm = pm_sleep_ptr(&pm8941_pwr_key_pm_ops),
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e79f5497948b..08bcee3d6bcc 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -411,7 +411,7 @@ out:
return button_info;
}
-static int soc_button_remove(struct platform_device *pdev)
+static void soc_button_remove(struct platform_device *pdev)
{
struct soc_button_data *priv = platform_get_drvdata(pdev);
@@ -420,8 +420,6 @@ static int soc_button_remove(struct platform_device *pdev)
for (i = 0; i < BUTTON_TYPES; i++)
if (priv->children[i])
platform_device_unregister(priv->children[i]);
-
- return 0;
}
static int soc_button_probe(struct platform_device *pdev)
@@ -609,7 +607,7 @@ MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match);
static struct platform_driver soc_button_driver = {
.probe = soc_button_probe,
- .remove = soc_button_remove,
+ .remove_new = soc_button_remove,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(soc_button_acpi_match),
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index e5dd84725c6e..20020cbc0752 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -231,7 +231,7 @@ out_err:
return err;
}
-static int bbc_remove(struct platform_device *op)
+static void bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = platform_get_drvdata(op);
struct input_dev *input_dev = state->input_dev;
@@ -245,8 +245,6 @@ static int bbc_remove(struct platform_device *op)
of_iounmap(&op->resource[0], info->regs, 6);
kfree(state);
-
- return 0;
}
static const struct of_device_id bbc_beep_match[] = {
@@ -264,7 +262,7 @@ static struct platform_driver bbc_beep_driver = {
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
- .remove = bbc_remove,
+ .remove_new = bbc_remove,
.shutdown = sparcspkr_shutdown,
};
@@ -310,7 +308,7 @@ out_err:
return err;
}
-static int grover_remove(struct platform_device *op)
+static void grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = platform_get_drvdata(op);
struct grover_beep_info *info = &state->u.grover;
@@ -325,8 +323,6 @@ static int grover_remove(struct platform_device *op)
of_iounmap(&op->resource[2], info->freq_regs, 2);
kfree(state);
-
- return 0;
}
static const struct of_device_id grover_beep_match[] = {
@@ -344,7 +340,7 @@ static struct platform_driver grover_beep_driver = {
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
- .remove = grover_remove,
+ .remove_new = grover_remove,
.shutdown = sparcspkr_shutdown,
};
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 111cb70cde46..5c4956678cd0 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -1286,13 +1286,11 @@ static int wistron_probe(struct platform_device *dev)
return 0;
}
-static int wistron_remove(struct platform_device *dev)
+static void wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
input_unregister_device(wistron_idev);
bios_detach();
-
- return 0;
}
static int wistron_suspend(struct device *dev)
@@ -1336,7 +1334,7 @@ static struct platform_driver wistron_driver = {
.pm = pm_sleep_ptr(&wistron_pm_ops),
},
.probe = wistron_probe,
- .remove = wistron_remove,
+ .remove_new = wistron_remove,
};
static int __init wb_module_init(void)
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index a42fe041b73c..e4a06c73b72d 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -123,20 +123,18 @@ err:
return ret;
}
-static int wm831x_on_remove(struct platform_device *pdev)
+static void wm831x_on_remove(struct platform_device *pdev)
{
struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
-
- return 0;
}
static struct platform_driver wm831x_on_driver = {
.probe = wm831x_on_probe,
- .remove = wm831x_on_remove,
+ .remove_new = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
},
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 05851bc32541..a84098448f5b 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -1223,7 +1223,7 @@ static DEVICE_ATTR(baseline, S_IRUGO, cyapa_show_baseline, NULL);
static DEVICE_ATTR(calibrate, S_IWUSR, NULL, cyapa_calibrate_store);
static DEVICE_ATTR(mode, S_IRUGO, cyapa_show_mode, NULL);
-static struct attribute *cyapa_sysfs_entries[] = {
+static struct attribute *cyapa_attrs[] = {
&dev_attr_firmware_version.attr,
&dev_attr_product_id.attr,
&dev_attr_update_fw.attr,
@@ -1232,10 +1232,7 @@ static struct attribute *cyapa_sysfs_entries[] = {
&dev_attr_mode.attr,
NULL,
};
-
-static const struct attribute_group cyapa_sysfs_group = {
- .attrs = cyapa_sysfs_entries,
-};
+ATTRIBUTE_GROUPS(cyapa);
static void cyapa_disable_regulator(void *data)
{
@@ -1302,12 +1299,6 @@ static int cyapa_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(dev, &cyapa_sysfs_group);
- if (error) {
- dev_err(dev, "failed to create sysfs entries: %d\n", error);
- return error;
- }
-
error = cyapa_prepare_wakeup_controls(cyapa);
if (error) {
dev_err(dev, "failed to prepare wakeup controls: %d\n", error);
@@ -1484,6 +1475,7 @@ MODULE_DEVICE_TABLE(of, cyapa_of_match);
static struct i2c_driver cyapa_driver = {
.driver = {
.name = "cyapa",
+ .dev_groups = cyapa_groups,
.pm = pm_ptr(&cyapa_pm_ops),
.acpi_match_table = ACPI_PTR(cyapa_acpi_id),
.of_match_table = of_match_ptr(cyapa_of_match),
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 2b7b86eef280..c00dc1275da2 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -295,7 +295,7 @@ err_free_gpio:
return error;
}
-static int navpoint_remove(struct platform_device *pdev)
+static void navpoint_remove(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -311,8 +311,6 @@ static int navpoint_remove(struct platform_device *pdev)
if (gpio_is_valid(pdata->gpio))
gpio_free(pdata->gpio);
-
- return 0;
}
static int navpoint_suspend(struct device *dev)
@@ -348,7 +346,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(navpoint_pm_ops,
static struct platform_driver navpoint_driver = {
.probe = navpoint_probe,
- .remove = navpoint_remove,
+ .remove_new = navpoint_remove,
.driver = {
.name = "navpoint",
.pm = pm_sleep_ptr(&navpoint_pm_ops),
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index f2e093b0b998..1b45b1d3077d 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
- put_device(&fn->dev);
for (i = 0; i < fn->num_of_irqs; i++)
irq_dispose_mapping(fn->irq[i]);
+ put_device(&fn->dev);
}
/**
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index 0d9a5756e3f5..3b3ac71e53dc 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -471,7 +471,7 @@ static ssize_t rmi_driver_update_fw_store(struct device *dev,
if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
copy_count -= 1;
- strncpy(fw_name, buf, copy_count);
+ memcpy(fw_name, buf, copy_count);
fw_name[copy_count] = '\0';
ret = request_firmware(&fw, fw_name, dev);
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 9f8d7b332d1b..c5b634940cfc 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -125,13 +125,11 @@ static int altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int altera_ps2_remove(struct platform_device *pdev)
+static void altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
serio_unregister_port(ps2if->io);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -148,7 +146,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match);
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = altera_ps2_remove,
+ .remove_new = altera_ps2_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(altera_ps2_match),
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index ec93cb4573c3..0bd6ae106809 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -173,18 +173,16 @@ static int ams_delta_serio_init(struct platform_device *pdev)
return 0;
}
-static int ams_delta_serio_exit(struct platform_device *pdev)
+static void ams_delta_serio_exit(struct platform_device *pdev)
{
struct ams_delta_serio *priv = platform_get_drvdata(pdev);
serio_unregister_port(priv->serio);
-
- return 0;
}
static struct platform_driver ams_delta_serio_driver = {
.probe = ams_delta_serio_init,
- .remove = ams_delta_serio_exit,
+ .remove_new = ams_delta_serio_exit,
.driver = {
.name = DRIVER_NAME
},
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 3f6866d39b86..dbbb10251520 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -187,13 +187,11 @@ static int apbps2_of_probe(struct platform_device *ofdev)
return 0;
}
-static int apbps2_of_remove(struct platform_device *of_dev)
+static void apbps2_of_remove(struct platform_device *of_dev)
{
struct apbps2_priv *priv = platform_get_drvdata(of_dev);
serio_unregister_port(priv->io);
-
- return 0;
}
static const struct of_device_id apbps2_of_match[] = {
@@ -210,7 +208,7 @@ static struct platform_driver apbps2_of_driver = {
.of_match_table = apbps2_of_match,
},
.probe = apbps2_of_probe,
- .remove = apbps2_of_remove,
+ .remove_new = apbps2_of_remove,
};
module_platform_driver(apbps2_of_driver);
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
index a6debb13d527..9d8726830140 100644
--- a/drivers/input/serio/arc_ps2.c
+++ b/drivers/input/serio/arc_ps2.c
@@ -232,7 +232,7 @@ static int arc_ps2_probe(struct platform_device *pdev)
return 0;
}
-static int arc_ps2_remove(struct platform_device *pdev)
+static void arc_ps2_remove(struct platform_device *pdev)
{
struct arc_ps2_data *arc_ps2 = platform_get_drvdata(pdev);
int i;
@@ -244,8 +244,6 @@ static int arc_ps2_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "frame error count = %i\n", arc_ps2->frame_error);
dev_dbg(&pdev->dev, "buffer overflow count = %i\n",
arc_ps2->buf_overflow);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -262,7 +260,7 @@ static struct platform_driver arc_ps2_driver = {
.of_match_table = of_match_ptr(arc_ps2_match),
},
.probe = arc_ps2_probe,
- .remove = arc_ps2_remove,
+ .remove_new = arc_ps2_remove,
};
module_platform_driver(arc_ps2_driver);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 3da751f4a6bf..d5c9bb3d0103 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -180,11 +180,9 @@ static int ct82c710_probe(struct platform_device *dev)
return 0;
}
-static int ct82c710_remove(struct platform_device *dev)
+static void ct82c710_remove(struct platform_device *dev)
{
serio_unregister_port(ct82c710_port);
-
- return 0;
}
static struct platform_driver ct82c710_driver = {
@@ -192,7 +190,7 @@ static struct platform_driver ct82c710_driver = {
.name = "ct82c710",
},
.probe = ct82c710_probe,
- .remove = ct82c710_remove,
+ .remove_new = ct82c710_remove,
};
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index b68793bf05c8..c2fda54dc384 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -82,11 +82,9 @@ static int sparc_i8042_probe(struct platform_device *op)
return 0;
}
-static int sparc_i8042_remove(struct platform_device *op)
+static void sparc_i8042_remove(struct platform_device *op)
{
of_iounmap(kbd_res, kbd_iobase, 8);
-
- return 0;
}
static const struct of_device_id sparc_i8042_match[] = {
@@ -103,7 +101,7 @@ static struct platform_driver sparc_i8042_driver = {
.of_match_table = sparc_i8042_match,
},
.probe = sparc_i8042_probe,
- .remove = sparc_i8042_remove,
+ .remove_new = sparc_i8042_remove,
};
static bool i8042_is_mr_coffee(void)
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 6dac7c1853a5..9fbb8d31575a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1584,13 +1584,11 @@ static int i8042_probe(struct platform_device *dev)
return error;
}
-static int i8042_remove(struct platform_device *dev)
+static void i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
i8042_controller_reset(false);
-
- return 0;
}
static struct platform_driver i8042_driver = {
@@ -1601,7 +1599,7 @@ static struct platform_driver i8042_driver = {
#endif
},
.probe = i8042_probe,
- .remove = i8042_remove,
+ .remove_new = i8042_remove,
.shutdown = i8042_shutdown,
};
diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
index d51bfe912db5..50552dc7b4f5 100644
--- a/drivers/input/serio/ioc3kbd.c
+++ b/drivers/input/serio/ioc3kbd.c
@@ -190,7 +190,7 @@ static int ioc3kbd_probe(struct platform_device *pdev)
return 0;
}
-static int ioc3kbd_remove(struct platform_device *pdev)
+static void ioc3kbd_remove(struct platform_device *pdev)
{
struct ioc3kbd_data *d = platform_get_drvdata(pdev);
@@ -198,13 +198,11 @@ static int ioc3kbd_remove(struct platform_device *pdev)
serio_unregister_port(d->kbd);
serio_unregister_port(d->aux);
-
- return 0;
}
static struct platform_driver ioc3kbd_driver = {
.probe = ioc3kbd_probe,
- .remove = ioc3kbd_remove,
+ .remove_new = ioc3kbd_remove,
.driver = {
.name = "ioc3-kbd",
},
diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c
index 629e15089c21..5ccfb82759b3 100644
--- a/drivers/input/serio/maceps2.c
+++ b/drivers/input/serio/maceps2.c
@@ -148,12 +148,10 @@ static int maceps2_probe(struct platform_device *dev)
return 0;
}
-static int maceps2_remove(struct platform_device *dev)
+static void maceps2_remove(struct platform_device *dev)
{
serio_unregister_port(maceps2_port[0]);
serio_unregister_port(maceps2_port[1]);
-
- return 0;
}
static struct platform_driver maceps2_driver = {
@@ -161,7 +159,7 @@ static struct platform_driver maceps2_driver = {
.name = "maceps2",
},
.probe = maceps2_probe,
- .remove = maceps2_remove,
+ .remove_new = maceps2_remove,
};
static int __init maceps2_init(void)
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 33a8e5889bd8..240a714f7081 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -238,7 +238,7 @@ err_pad:
return error;
}
-static int olpc_apsp_remove(struct platform_device *pdev)
+static void olpc_apsp_remove(struct platform_device *pdev)
{
struct olpc_apsp *priv = platform_get_drvdata(pdev);
@@ -246,8 +246,6 @@ static int olpc_apsp_remove(struct platform_device *pdev)
serio_unregister_port(priv->kbio);
serio_unregister_port(priv->padio);
-
- return 0;
}
static const struct of_device_id olpc_apsp_dt_ids[] = {
@@ -258,7 +256,7 @@ MODULE_DEVICE_TABLE(of, olpc_apsp_dt_ids);
static struct platform_driver olpc_apsp_driver = {
.probe = olpc_apsp_probe,
- .remove = olpc_apsp_remove,
+ .remove_new = olpc_apsp_remove,
.driver = {
.name = "olpc-apsp",
.of_match_table = olpc_apsp_dt_ids,
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index bc1dc484389b..c3ff60859a03 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -476,12 +476,11 @@ err_free_serio:
return error;
}
-static int ps2_gpio_remove(struct platform_device *pdev)
+static void ps2_gpio_remove(struct platform_device *pdev)
{
struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev);
serio_unregister_port(drvdata->serio);
- return 0;
}
#if defined(CONFIG_OF)
@@ -494,7 +493,7 @@ MODULE_DEVICE_TABLE(of, ps2_gpio_match);
static struct platform_driver ps2_gpio_driver = {
.probe = ps2_gpio_probe,
- .remove = ps2_gpio_remove,
+ .remove_new = ps2_gpio_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ps2_gpio_match),
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index ba04058fc3cb..3f81f8749cd5 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -148,7 +148,7 @@ err_free_mem:
return error;
}
-static int q40kbd_remove(struct platform_device *pdev)
+static void q40kbd_remove(struct platform_device *pdev)
{
struct q40kbd *q40kbd = platform_get_drvdata(pdev);
@@ -160,15 +160,13 @@ static int q40kbd_remove(struct platform_device *pdev)
serio_unregister_port(q40kbd->port);
free_irq(Q40_IRQ_KEYBOARD, q40kbd);
kfree(q40kbd);
-
- return 0;
}
static struct platform_driver q40kbd_driver = {
.driver = {
.name = "q40kbd",
},
- .remove = q40kbd_remove,
+ .remove_new = q40kbd_remove,
};
module_platform_driver_probe(q40kbd_driver, q40kbd_probe);
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index e8a9709f32eb..9bbfefd092c0 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -133,20 +133,18 @@ static int rpckbd_probe(struct platform_device *dev)
return 0;
}
-static int rpckbd_remove(struct platform_device *dev)
+static void rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
struct rpckbd_data *rpckbd = serio->port_data;
serio_unregister_port(serio);
kfree(rpckbd);
-
- return 0;
}
static struct platform_driver rpckbd_driver = {
.probe = rpckbd_probe,
- .remove = rpckbd_remove,
+ .remove_new = rpckbd_remove,
.driver = {
.name = "kart",
},
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
index eb262640192e..aec66d9f5176 100644
--- a/drivers/input/serio/sun4i-ps2.c
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -297,7 +297,7 @@ err_free_mem:
return error;
}
-static int sun4i_ps2_remove(struct platform_device *pdev)
+static void sun4i_ps2_remove(struct platform_device *pdev)
{
struct sun4i_ps2data *drvdata = platform_get_drvdata(pdev);
@@ -311,8 +311,6 @@ static int sun4i_ps2_remove(struct platform_device *pdev)
iounmap(drvdata->reg_base);
kfree(drvdata);
-
- return 0;
}
static const struct of_device_id sun4i_ps2_match[] = {
@@ -324,7 +322,7 @@ MODULE_DEVICE_TABLE(of, sun4i_ps2_match);
static struct platform_driver sun4i_ps2_driver = {
.probe = sun4i_ps2_probe,
- .remove = sun4i_ps2_remove,
+ .remove_new = sun4i_ps2_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = sun4i_ps2_match,
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index f3d28da70b75..d8f9faf2b529 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -329,7 +329,7 @@ failed1:
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
-static int xps2_of_remove(struct platform_device *of_dev)
+static void xps2_of_remove(struct platform_device *of_dev)
{
struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
@@ -344,8 +344,6 @@ static int xps2_of_remove(struct platform_device *of_dev)
release_mem_region(r_mem.start, resource_size(&r_mem));
kfree(drvdata);
-
- return 0;
}
/* Match table for of_platform binding */
@@ -361,7 +359,7 @@ static struct platform_driver xps2_of_driver = {
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
- .remove = xps2_of_remove,
+ .remove_new = xps2_of_remove,
};
module_platform_driver(xps2_of_driver);
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index edb36d663f22..a0598e9c7aff 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -612,10 +612,11 @@ static umode_t ad7877_attr_is_visible(struct kobject *kobj,
return mode;
}
-static const struct attribute_group ad7877_attr_group = {
+static const struct attribute_group ad7877_group = {
.is_visible = ad7877_attr_is_visible,
.attrs = ad7877_attributes,
};
+__ATTRIBUTE_GROUPS(ad7877);
static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
{
@@ -777,10 +778,6 @@ static int ad7877_probe(struct spi_device *spi)
return err;
}
- err = devm_device_add_group(&spi->dev, &ad7877_attr_group);
- if (err)
- return err;
-
err = input_register_device(input_dev);
if (err)
return err;
@@ -810,8 +807,9 @@ static DEFINE_SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
static struct spi_driver ad7877_driver = {
.driver = {
- .name = "ad7877",
- .pm = pm_sleep_ptr(&ad7877_pm),
+ .name = "ad7877",
+ .dev_groups = ad7877_groups,
+ .pm = pm_sleep_ptr(&ad7877_pm),
},
.probe = ad7877_probe,
};
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index feaa6f8b01ed..5c094ab74698 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -58,9 +58,10 @@ MODULE_DEVICE_TABLE(of, ad7879_i2c_dt_ids);
static struct i2c_driver ad7879_i2c_driver = {
.driver = {
- .name = "ad7879",
- .pm = &ad7879_pm_ops,
- .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
+ .name = "ad7879",
+ .dev_groups = ad7879_groups,
+ .pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
},
.probe = ad7879_i2c_probe,
.id_table = ad7879_id,
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 50e889846800..064968fe57cf 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -56,9 +56,10 @@ MODULE_DEVICE_TABLE(of, ad7879_spi_dt_ids);
static struct spi_driver ad7879_spi_driver = {
.driver = {
- .name = "ad7879",
- .pm = &ad7879_pm_ops,
- .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
+ .name = "ad7879",
+ .dev_groups = ad7879_groups,
+ .pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
},
.probe = ad7879_spi_probe,
};
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index e850853328f1..e5d69bf2276e 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -391,6 +391,12 @@ static const struct attribute_group ad7879_attr_group = {
.attrs = ad7879_attributes,
};
+const struct attribute_group *ad7879_groups[] = {
+ &ad7879_attr_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ad7879_groups);
+
#ifdef CONFIG_GPIOLIB
static int ad7879_gpio_direction_input(struct gpio_chip *chip,
unsigned gpio)
@@ -612,10 +618,6 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
__ad7879_disable(ts);
- err = devm_device_add_group(dev, &ad7879_attr_group);
- if (err)
- return err;
-
err = ad7879_gpio_add(ts);
if (err)
return err;
diff --git a/drivers/input/touchscreen/ad7879.h b/drivers/input/touchscreen/ad7879.h
index ae8aa1428e56..d71a8e787290 100644
--- a/drivers/input/touchscreen/ad7879.h
+++ b/drivers/input/touchscreen/ad7879.h
@@ -8,11 +8,14 @@
#ifndef _AD7879_H_
#define _AD7879_H_
+#include <linux/pm.h>
#include <linux/types.h>
+struct attribute_group;
struct device;
struct regmap;
+extern const struct attribute_group *ad7879_groups[];
extern const struct dev_pm_ops ad7879_pm_ops;
int ad7879_probe(struct device *dev, struct regmap *regmap,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index faea40dd66d0..d2bbb436a77d 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -625,15 +625,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store);
-static struct attribute *ads784x_attributes[] = {
+static struct attribute *ads784x_attrs[] = {
&dev_attr_pen_down.attr,
&dev_attr_disable.attr,
NULL,
};
-
-static const struct attribute_group ads784x_attr_group = {
- .attrs = ads784x_attributes,
-};
+ATTRIBUTE_GROUPS(ads784x);
/*--------------------------------------------------------------------------*/
@@ -1357,10 +1354,6 @@ static int ads7846_probe(struct spi_device *spi)
else
(void) ads7846_read12_ser(dev, READ_12BIT_SER(vaux));
- err = devm_device_add_group(dev, &ads784x_attr_group);
- if (err)
- return err;
-
err = input_register_device(input_dev);
if (err)
return err;
@@ -1386,9 +1379,10 @@ static void ads7846_remove(struct spi_device *spi)
static struct spi_driver ads7846_driver = {
.driver = {
- .name = "ads7846",
- .pm = pm_sleep_ptr(&ads7846_pm),
- .of_match_table = ads7846_dt_ids,
+ .name = "ads7846",
+ .dev_groups = ads784x_groups,
+ .pm = pm_sleep_ptr(&ads7846_pm),
+ .of_match_table = ads7846_dt_ids,
},
.probe = ads7846_probe,
.remove = ads7846_remove,
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index db5a885ecd72..68527ede5c0e 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -207,7 +207,7 @@ struct cyttsp5 {
int num_prv_rec;
struct regmap *regmap;
struct touchscreen_properties prop;
- struct regulator *vdd;
+ struct regulator_bulk_data supplies[2];
};
/*
@@ -817,7 +817,7 @@ static void cyttsp5_cleanup(void *data)
{
struct cyttsp5 *ts = data;
- regulator_disable(ts->vdd);
+ regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
}
static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
@@ -840,9 +840,12 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
init_completion(&ts->cmd_done);
/* Power up the device */
- ts->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(ts->vdd)) {
- error = PTR_ERR(ts->vdd);
+ ts->supplies[0].supply = "vdd";
+ ts->supplies[1].supply = "vddio";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->supplies),
+ ts->supplies);
+ if (error) {
+ dev_err(ts->dev, "Failed to get regulators, error %d\n", error);
return error;
}
@@ -850,9 +853,11 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
if (error)
return error;
- error = regulator_enable(ts->vdd);
- if (error)
+ error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+ if (error) {
+ dev_err(ts->dev, "Failed to enable regulators, error %d\n", error);
return error;
+ }
ts->input = devm_input_allocate_device(dev);
if (!ts->input) {
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
index f91d0e02ddae..d71690ce6463 100644
--- a/drivers/input/touchscreen/da9052_tsi.c
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -311,7 +311,7 @@ err_free_mem:
return error;
}
-static int da9052_ts_remove(struct platform_device *pdev)
+static void da9052_ts_remove(struct platform_device *pdev)
{
struct da9052_tsi *tsi = platform_get_drvdata(pdev);
@@ -322,13 +322,11 @@ static int da9052_ts_remove(struct platform_device *pdev)
input_unregister_device(tsi->dev);
kfree(tsi);
-
- return 0;
}
static struct platform_driver da9052_tsi_driver = {
.probe = da9052_ts_probe,
- .remove = da9052_ts_remove,
+ .remove_new = da9052_ts_remove,
.driver = {
.name = "da9052-tsi",
},
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 457d53337fbb..3e102bcc4a1c 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -580,10 +580,7 @@ static struct attribute *edt_ft5x06_attrs[] = {
&dev_attr_crc_errors.attr,
NULL
};
-
-static const struct attribute_group edt_ft5x06_attr_group = {
- .attrs = edt_ft5x06_attrs,
-};
+ATTRIBUTE_GROUPS(edt_ft5x06);
static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata)
{
@@ -1330,10 +1327,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &edt_ft5x06_attr_group);
- if (error)
- return error;
-
error = input_register_device(input);
if (error)
return error;
@@ -1502,6 +1495,7 @@ MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
+ .dev_groups = edt_ft5x06_groups,
.of_match_table = edt_ft5x06_of_match,
.pm = pm_sleep_ptr(&edt_ft5x06_ts_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index a1af3de9f310..365765d40e62 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1299,7 +1299,7 @@ static ELANTS_VERSION_ATTR(solution_version);
static ELANTS_VERSION_ATTR(bc_version);
static ELANTS_VERSION_ATTR(iap_version);
-static struct attribute *elants_attributes[] = {
+static struct attribute *elants_i2c_attrs[] = {
&dev_attr_calibrate.attr,
&dev_attr_update_fw.attr,
&dev_attr_iap_mode.attr,
@@ -1313,10 +1313,7 @@ static struct attribute *elants_attributes[] = {
&elants_ver_attr_iap_version.dattr.attr,
NULL
};
-
-static const struct attribute_group elants_attribute_group = {
- .attrs = elants_attributes,
-};
+ATTRIBUTE_GROUPS(elants_i2c);
static int elants_i2c_power_on(struct elants_data *ts)
{
@@ -1552,13 +1549,6 @@ static int elants_i2c_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &elants_attribute_group);
- if (error) {
- dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
- error);
- return error;
- }
-
return 0;
}
@@ -1667,6 +1657,7 @@ static struct i2c_driver elants_i2c_driver = {
.id_table = elants_i2c_id,
.driver = {
.name = DEVICE_NAME,
+ .dev_groups = elants_i2c_groups,
.pm = pm_sleep_ptr(&elants_i2c_pm_ops),
.acpi_match_table = ACPI_PTR(elants_acpi_id),
.of_match_table = of_match_ptr(elants_of_match),
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index 4c0d99aae9e0..a4030cc9ff60 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -325,16 +325,13 @@ static ssize_t type_show(struct device *dev,
}
static DEVICE_ATTR_RO(type);
-static struct attribute *sysfs_attrs[] = {
+static struct attribute *exc3000_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_model.attr,
&dev_attr_type.attr,
NULL
};
-
-static struct attribute_group exc3000_attribute_group = {
- .attrs = sysfs_attrs
-};
+ATTRIBUTE_GROUPS(exc3000);
static int exc3000_probe(struct i2c_client *client)
{
@@ -437,10 +434,6 @@ static int exc3000_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
- error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
- if (error)
- return error;
-
return 0;
}
@@ -473,6 +466,7 @@ MODULE_DEVICE_TABLE(acpi, exc3000_acpi_match);
static struct i2c_driver exc3000_driver = {
.driver = {
.name = "exc3000",
+ .dev_groups = exc3000_groups,
.of_match_table = of_match_ptr(exc3000_of_match),
.acpi_match_table = ACPI_PTR(exc3000_acpi_match),
},
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index 404153338df7..0f58258306bf 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -954,16 +954,13 @@ static DEVICE_ATTR(version, 0664, hideep_fw_version_show, NULL);
static DEVICE_ATTR(product_id, 0664, hideep_product_id_show, NULL);
static DEVICE_ATTR(update_fw, 0664, NULL, hideep_update_fw);
-static struct attribute *hideep_ts_sysfs_entries[] = {
+static struct attribute *hideep_ts_attrs[] = {
&dev_attr_version.attr,
&dev_attr_product_id.attr,
&dev_attr_update_fw.attr,
NULL,
};
-
-static const struct attribute_group hideep_ts_attr_group = {
- .attrs = hideep_ts_sysfs_entries,
-};
+ATTRIBUTE_GROUPS(hideep_ts);
static void hideep_set_work_mode(struct hideep_ts *ts)
{
@@ -1096,13 +1093,6 @@ static int hideep_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &hideep_ts_attr_group);
- if (error) {
- dev_err(&client->dev,
- "failed to add sysfs attributes: %d\n", error);
- return error;
- }
-
return 0;
}
@@ -1131,6 +1121,7 @@ MODULE_DEVICE_TABLE(of, hideep_match_table);
static struct i2c_driver hideep_driver = {
.driver = {
.name = HIDEEP_I2C_NAME,
+ .dev_groups = hideep_ts_groups,
.of_match_table = of_match_ptr(hideep_match_table),
.acpi_match_table = ACPI_PTR(hideep_acpi_id),
.pm = pm_sleep_ptr(&hideep_pm_ops),
diff --git a/drivers/input/touchscreen/hycon-hy46xx.c b/drivers/input/touchscreen/hycon-hy46xx.c
index 2450cfa14de9..d0f257989fd6 100644
--- a/drivers/input/touchscreen/hycon-hy46xx.c
+++ b/drivers/input/touchscreen/hycon-hy46xx.c
@@ -274,10 +274,7 @@ static struct attribute *hycon_hy46xx_attrs[] = {
&hycon_hy46xx_attr_bootloader_version.dattr.attr,
NULL
};
-
-static const struct attribute_group hycon_hy46xx_attr_group = {
- .attrs = hycon_hy46xx_attrs,
-};
+ATTRIBUTE_GROUPS(hycon_hy46xx);
static void hycon_hy46xx_get_defaults(struct device *dev, struct hycon_hy46xx_data *tsdata)
{
@@ -535,10 +532,6 @@ static int hycon_hy46xx_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &hycon_hy46xx_attr_group);
- if (error)
- return error;
-
error = input_register_device(input);
if (error)
return error;
@@ -576,6 +569,7 @@ MODULE_DEVICE_TABLE(of, hycon_hy46xx_of_match);
static struct i2c_driver hycon_hy46xx_driver = {
.driver = {
.name = "hycon_hy46xx",
+ .dev_groups = hycon_hy46xx_groups,
.of_match_table = hycon_hy46xx_of_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index ad6828e4f2e2..31ffdc2a93f3 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -876,7 +876,7 @@ exit:
static DEVICE_ATTR(firmware_update, 0200, NULL, ili210x_firmware_update_store);
-static struct attribute *ili210x_attributes[] = {
+static struct attribute *ili210x_attrs[] = {
&dev_attr_calibrate.attr,
&dev_attr_firmware_update.attr,
&dev_attr_firmware_version.attr,
@@ -904,10 +904,11 @@ static umode_t ili210x_attributes_visible(struct kobject *kobj,
return attr->mode;
}
-static const struct attribute_group ili210x_attr_group = {
- .attrs = ili210x_attributes,
+static const struct attribute_group ili210x_group = {
+ .attrs = ili210x_attrs,
.is_visible = ili210x_attributes_visible,
};
+__ATTRIBUTE_GROUPS(ili210x);
static void ili210x_power_down(void *data)
{
@@ -1013,13 +1014,6 @@ static int ili210x_i2c_probe(struct i2c_client *client)
if (error)
return error;
- error = devm_device_add_group(dev, &ili210x_attr_group);
- if (error) {
- dev_err(dev, "Unable to create sysfs attributes, err: %d\n",
- error);
- return error;
- }
-
error = input_register_device(priv->input);
if (error) {
dev_err(dev, "Cannot register input device, err: %d\n", error);
@@ -1050,6 +1044,7 @@ MODULE_DEVICE_TABLE(of, ili210x_dt_ids);
static struct i2c_driver ili210x_ts_driver = {
.driver = {
.name = "ili210x_i2c",
+ .dev_groups = ili210x_groups,
.of_match_table = ili210x_dt_ids,
},
.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
index 2f872e95fbba..90c4934e750a 100644
--- a/drivers/input/touchscreen/ilitek_ts_i2c.c
+++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
@@ -537,10 +537,7 @@ static struct attribute *ilitek_sysfs_attrs[] = {
&dev_attr_product_id.attr,
NULL
};
-
-static struct attribute_group ilitek_attrs_group = {
- .attrs = ilitek_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(ilitek_sysfs);
static int ilitek_ts_i2c_probe(struct i2c_client *client)
{
@@ -595,12 +592,6 @@ static int ilitek_ts_i2c_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(dev, &ilitek_attrs_group);
- if (error) {
- dev_err(dev, "sysfs create group failed: %d\n", error);
- return error;
- }
-
return 0;
}
@@ -675,6 +666,7 @@ MODULE_DEVICE_TABLE(of, ilitek_ts_i2c_match);
static struct i2c_driver ilitek_ts_i2c_driver = {
.driver = {
.name = ILITEK_TS_NAME,
+ .dev_groups = ilitek_sysfs_groups,
.pm = pm_sleep_ptr(&ilitek_pm_ops),
.of_match_table = of_match_ptr(ilitek_ts_i2c_match),
.acpi_match_table = ACPI_PTR(ilitekts_acpi_id),
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index b4768b66eb10..a3f4fb85bee5 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -974,10 +974,11 @@ static umode_t iqs5xx_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static const struct attribute_group iqs5xx_attr_group = {
+static const struct attribute_group iqs5xx_group = {
.is_visible = iqs5xx_attr_is_visible,
.attrs = iqs5xx_attrs,
};
+__ATTRIBUTE_GROUPS(iqs5xx);
static int iqs5xx_suspend(struct device *dev)
{
@@ -1053,12 +1054,6 @@ static int iqs5xx_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &iqs5xx_attr_group);
- if (error) {
- dev_err(&client->dev, "Failed to add attributes: %d\n", error);
- return error;
- }
-
if (iqs5xx->input) {
error = input_register_device(iqs5xx->input);
if (error)
@@ -1089,6 +1084,7 @@ MODULE_DEVICE_TABLE(of, iqs5xx_of_match);
static struct i2c_driver iqs5xx_i2c_driver = {
.driver = {
.name = "iqs5xx",
+ .dev_groups = iqs5xx_groups,
.of_match_table = iqs5xx_of_match,
.pm = pm_sleep_ptr(&iqs5xx_pm),
},
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 85b95ed461e7..bfbebe245040 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -252,18 +252,16 @@ static int mainstone_wm97xx_probe(struct platform_device *pdev)
return wm97xx_register_mach_ops(wm, &mainstone_mach_ops);
}
-static int mainstone_wm97xx_remove(struct platform_device *pdev)
+static void mainstone_wm97xx_remove(struct platform_device *pdev)
{
struct wm97xx *wm = platform_get_drvdata(pdev);
wm97xx_unregister_mach_ops(wm);
-
- return 0;
}
static struct platform_driver mainstone_wm97xx_driver = {
.probe = mainstone_wm97xx_probe,
- .remove = mainstone_wm97xx_remove,
+ .remove_new = mainstone_wm97xx_remove,
.driver = {
.name = "wm97xx-touch",
},
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index ae0d978c83bf..cbcd6e34efb7 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -217,18 +217,16 @@ err_free_mem:
return ret;
}
-static int mc13783_ts_remove(struct platform_device *pdev)
+static void mc13783_ts_remove(struct platform_device *pdev)
{
struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
input_unregister_device(priv->idev);
kfree(priv);
-
- return 0;
}
static struct platform_driver mc13783_ts_driver = {
- .remove = mc13783_ts_remove,
+ .remove_new = mc13783_ts_remove,
.driver = {
.name = MC13783_TS_NAME,
},
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 2ac4483fbc25..aa325486f618 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1419,10 +1419,7 @@ static struct attribute *mip4_attrs[] = {
&dev_attr_update_fw.attr,
NULL,
};
-
-static const struct attribute_group mip4_attr_group = {
- .attrs = mip4_attrs,
-};
+ATTRIBUTE_GROUPS(mip4);
static int mip4_probe(struct i2c_client *client)
{
@@ -1514,13 +1511,6 @@ static int mip4_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &mip4_attr_group);
- if (error) {
- dev_err(&client->dev,
- "Failed to create sysfs attribute group: %d\n", error);
- return error;
- }
-
return 0;
}
@@ -1589,6 +1579,7 @@ static struct i2c_driver mip4_driver = {
.probe = mip4_probe,
.driver = {
.name = MIP4_DEVICE_NAME,
+ .dev_groups = mip4_groups,
.of_match_table = of_match_ptr(mip4_of_match),
.acpi_match_table = ACPI_PTR(mip4_acpi_match),
.pm = pm_sleep_ptr(&mip4_pm_ops),
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index b2da0194e02a..821245019fea 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -197,7 +197,7 @@ fail:
return err;
}
-static int pcap_ts_remove(struct platform_device *pdev)
+static void pcap_ts_remove(struct platform_device *pdev)
{
struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
@@ -207,8 +207,6 @@ static int pcap_ts_remove(struct platform_device *pdev)
input_unregister_device(pcap_ts->input);
kfree(pcap_ts);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -240,7 +238,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = {
static struct platform_driver pcap_ts_driver = {
.probe = pcap_ts_probe,
- .remove = pcap_ts_remove,
+ .remove_new = pcap_ts_remove,
.driver = {
.name = "pcap-ts",
.pm = PCAP_TS_PM_OPS,
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 78dd3059d585..13c500e776f6 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -1004,7 +1004,7 @@ static DEVICE_ATTR(boot_mode, S_IRUGO, raydium_i2c_boot_mode_show, NULL);
static DEVICE_ATTR(update_fw, S_IWUSR, NULL, raydium_i2c_update_fw_store);
static DEVICE_ATTR(calibrate, S_IWUSR, NULL, raydium_i2c_calibrate_store);
-static struct attribute *raydium_i2c_attributes[] = {
+static struct attribute *raydium_i2c_attrs[] = {
&dev_attr_update_fw.attr,
&dev_attr_boot_mode.attr,
&dev_attr_fw_version.attr,
@@ -1012,10 +1012,7 @@ static struct attribute *raydium_i2c_attributes[] = {
&dev_attr_calibrate.attr,
NULL
};
-
-static const struct attribute_group raydium_i2c_attribute_group = {
- .attrs = raydium_i2c_attributes,
-};
+ATTRIBUTE_GROUPS(raydium_i2c);
static int raydium_i2c_power_on(struct raydium_data *ts)
{
@@ -1174,14 +1171,6 @@ static int raydium_i2c_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev,
- &raydium_i2c_attribute_group);
- if (error) {
- dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
- error);
- return error;
- }
-
return 0;
}
@@ -1265,6 +1254,7 @@ static struct i2c_driver raydium_i2c_driver = {
.id_table = raydium_i2c_id,
.driver = {
.name = "raydium_ts",
+ .dev_groups = raydium_i2c_groups,
.pm = pm_sleep_ptr(&raydium_i2c_pm_ops),
.acpi_match_table = ACPI_PTR(raydium_acpi_id),
.of_match_table = of_match_ptr(raydium_of_match),
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index 240424f06b98..4493ad0c9322 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -854,10 +854,7 @@ static struct attribute *rohm_ts_attrs[] = {
&dev_attr_inv_y.attr,
NULL,
};
-
-static const struct attribute_group rohm_ts_attr_group = {
- .attrs = rohm_ts_attrs,
-};
+ATTRIBUTE_GROUPS(rohm_ts);
static int rohm_ts_device_init(struct i2c_client *client, u8 setup2)
{
@@ -1164,12 +1161,6 @@ static int rohm_bu21023_i2c_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(dev, &rohm_ts_attr_group);
- if (error) {
- dev_err(dev, "failed to create sysfs group: %d\n", error);
- return error;
- }
-
return error;
}
@@ -1182,6 +1173,7 @@ MODULE_DEVICE_TABLE(i2c, rohm_bu21023_i2c_id);
static struct i2c_driver rohm_bu21023_i2c_driver = {
.driver = {
.name = BU21023_NAME,
+ .dev_groups = rohm_ts_groups,
},
.probe = rohm_bu21023_i2c_probe,
.id_table = rohm_bu21023_i2c_id,
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 998d99d18911..149cc2c4925e 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -286,10 +286,7 @@ static struct attribute *s6sy761_sysfs_attrs[] = {
&dev_attr_devid.attr,
NULL
};
-
-static struct attribute_group s6sy761_attribute_group = {
- .attrs = s6sy761_sysfs_attrs
-};
+ATTRIBUTE_GROUPS(s6sy761_sysfs);
static int s6sy761_power_on(struct s6sy761_data *sdata)
{
@@ -465,10 +462,6 @@ static int s6sy761_probe(struct i2c_client *client)
if (err)
return err;
- err = devm_device_add_group(&client->dev, &s6sy761_attribute_group);
- if (err)
- return err;
-
pm_runtime_enable(&client->dev);
return 0;
@@ -535,6 +528,7 @@ MODULE_DEVICE_TABLE(i2c, s6sy761_id);
static struct i2c_driver s6sy761_driver = {
.driver = {
.name = S6SY761_DEV_NAME,
+ .dev_groups = s6sy761_sysfs_groups,
.of_match_table = of_match_ptr(s6sy761_of_match),
.pm = pm_ptr(&s6sy761_pm_ops),
},
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 56e371fd88fa..85010fa07908 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -517,10 +517,7 @@ static struct attribute *stmfts_sysfs_attrs[] = {
&dev_attr_hover_enable.attr,
NULL
};
-
-static struct attribute_group stmfts_attribute_group = {
- .attrs = stmfts_sysfs_attrs
-};
+ATTRIBUTE_GROUPS(stmfts_sysfs);
static int stmfts_power_on(struct stmfts_data *sdata)
{
@@ -727,10 +724,6 @@ static int stmfts_probe(struct i2c_client *client)
}
}
- err = devm_device_add_group(&client->dev, &stmfts_attribute_group);
- if (err)
- return err;
-
pm_runtime_enable(&client->dev);
device_enable_async_suspend(&client->dev);
@@ -804,6 +797,7 @@ MODULE_DEVICE_TABLE(i2c, stmfts_id);
static struct i2c_driver stmfts_driver = {
.driver = {
.name = STMFTS_DEV_NAME,
+ .dev_groups = stmfts_sysfs_groups,
.of_match_table = of_match_ptr(stmfts_of_match),
.pm = pm_ptr(&stmfts_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 25c45c3a3561..b204fdb2d22c 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -350,13 +350,11 @@ static int stmpe_input_probe(struct platform_device *pdev)
return 0;
}
-static int stmpe_ts_remove(struct platform_device *pdev)
+static void stmpe_ts_remove(struct platform_device *pdev)
{
struct stmpe_touch *ts = platform_get_drvdata(pdev);
stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
-
- return 0;
}
static struct platform_driver stmpe_ts_driver = {
@@ -364,7 +362,7 @@ static struct platform_driver stmpe_ts_driver = {
.name = STMPE_TS_NAME,
},
.probe = stmpe_input_probe,
- .remove = stmpe_ts_remove,
+ .remove_new = stmpe_ts_remove,
};
module_platform_driver(stmpe_ts_driver);
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index bb3c6072fc82..92b2b840b4b7 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -375,7 +375,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
return 0;
}
-static int sun4i_ts_remove(struct platform_device *pdev)
+static void sun4i_ts_remove(struct platform_device *pdev)
{
struct sun4i_ts_data *ts = platform_get_drvdata(pdev);
@@ -385,8 +385,6 @@ static int sun4i_ts_remove(struct platform_device *pdev)
/* Deactivate all IRQs */
writel(0, ts->base + TP_INT_FIFOC);
-
- return 0;
}
static const struct of_device_id sun4i_ts_of_match[] = {
@@ -403,7 +401,7 @@ static struct platform_driver sun4i_ts_driver = {
.of_match_table = sun4i_ts_of_match,
},
.probe = sun4i_ts_probe,
- .remove = sun4i_ts_remove,
+ .remove_new = sun4i_ts_remove,
};
module_platform_driver(sun4i_ts_driver);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 9aa4e35fb4f5..34324f8512ac 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -491,7 +491,7 @@ err_free_mem:
return err;
}
-static int titsc_remove(struct platform_device *pdev)
+static void titsc_remove(struct platform_device *pdev)
{
struct titsc *ts_dev = platform_get_drvdata(pdev);
u32 steps;
@@ -508,7 +508,6 @@ static int titsc_remove(struct platform_device *pdev)
input_unregister_device(ts_dev->input);
kfree(ts_dev);
- return 0;
}
static int titsc_suspend(struct device *dev)
@@ -552,7 +551,7 @@ MODULE_DEVICE_TABLE(of, ti_tsc_dt_ids);
static struct platform_driver ti_tsc_driver = {
.probe = titsc_probe,
- .remove = titsc_remove,
+ .remove_new = titsc_remove,
.driver = {
.name = "TI-am335x-tsc",
.pm = pm_sleep_ptr(&titsc_pm_ops),
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index b5e904c5b7c4..89c5248f66f6 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -63,9 +63,10 @@ MODULE_DEVICE_TABLE(of, tsc2004_of_match);
static struct i2c_driver tsc2004_driver = {
.driver = {
- .name = "tsc2004",
- .of_match_table = of_match_ptr(tsc2004_of_match),
- .pm = pm_sleep_ptr(&tsc200x_pm_ops),
+ .name = "tsc2004",
+ .dev_groups = tsc200x_groups,
+ .of_match_table = of_match_ptr(tsc2004_of_match),
+ .pm = pm_sleep_ptr(&tsc200x_pm_ops),
},
.id_table = tsc2004_idtable,
.probe = tsc2004_probe,
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b6dfbcfc8c19..1b40ce0ca1b9 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -79,9 +79,10 @@ MODULE_DEVICE_TABLE(of, tsc2005_of_match);
static struct spi_driver tsc2005_driver = {
.driver = {
- .name = "tsc2005",
- .of_match_table = of_match_ptr(tsc2005_of_match),
- .pm = pm_sleep_ptr(&tsc200x_pm_ops),
+ .name = "tsc2005",
+ .dev_groups = tsc200x_groups,
+ .of_match_table = of_match_ptr(tsc2005_of_match),
+ .pm = pm_sleep_ptr(&tsc200x_pm_ops),
},
.probe = tsc2005_probe,
.remove = tsc2005_remove,
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index b799f26fcf8f..a4c0e9db9bb9 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -356,6 +356,12 @@ static const struct attribute_group tsc200x_attr_group = {
.attrs = tsc200x_attrs,
};
+const struct attribute_group *tsc200x_groups[] = {
+ &tsc200x_attr_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(tsc200x_groups);
+
static void tsc200x_esd_work(struct work_struct *work)
{
struct tsc200x *ts = container_of(work, struct tsc200x, esd_work.work);
@@ -553,25 +559,17 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
return error;
dev_set_drvdata(dev, ts);
- error = sysfs_create_group(&dev->kobj, &tsc200x_attr_group);
- if (error) {
- dev_err(dev,
- "Failed to create sysfs attributes, err: %d\n", error);
- goto disable_regulator;
- }
error = input_register_device(ts->idev);
if (error) {
dev_err(dev,
"Failed to register input device, err: %d\n", error);
- goto err_remove_sysfs;
+ goto disable_regulator;
}
irq_set_irq_wake(irq, 1);
return 0;
-err_remove_sysfs:
- sysfs_remove_group(&dev->kobj, &tsc200x_attr_group);
disable_regulator:
regulator_disable(ts->vio);
return error;
@@ -582,8 +580,6 @@ void tsc200x_remove(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
- sysfs_remove_group(&dev->kobj, &tsc200x_attr_group);
-
regulator_disable(ts->vio);
}
EXPORT_SYMBOL_GPL(tsc200x_remove);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 4ded34425b21..37de91efd78e 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,6 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
+extern const struct attribute_group *tsc200x_groups[];
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index cbc4750c53f9..128341a6696b 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -944,10 +944,7 @@ static struct attribute *wdt87xx_attrs[] = {
&dev_attr_update_fw.attr,
NULL
};
-
-static const struct attribute_group wdt87xx_attr_group = {
- .attrs = wdt87xx_attrs,
-};
+ATTRIBUTE_GROUPS(wdt87xx);
static void wdt87xx_report_contact(struct input_dev *input,
struct wdt87xx_sys_param *param,
@@ -1104,12 +1101,6 @@ static int wdt87xx_ts_probe(struct i2c_client *client)
return error;
}
- error = devm_device_add_group(&client->dev, &wdt87xx_attr_group);
- if (error) {
- dev_err(&client->dev, "create sysfs failed: %d\n", error);
- return error;
- }
-
return 0;
}
@@ -1172,8 +1163,9 @@ static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
.id_table = wdt87xx_dev_id,
.driver = {
- .name = WDT87XX_NAME,
- .pm = pm_sleep_ptr(&wdt87xx_pm_ops),
+ .name = WDT87XX_NAME,
+ .dev_groups = wdt87xx_groups,
+ .pm = pm_sleep_ptr(&wdt87xx_pm_ops),
.acpi_match_table = ACPI_PTR(wdt87xx_acpi_id),
},
};
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 319f57fb9af5..9cee26b63341 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -374,14 +374,12 @@ err_alloc:
return error;
}
-static int wm831x_ts_remove(struct platform_device *pdev)
+static void wm831x_ts_remove(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
-
- return 0;
}
static struct platform_driver wm831x_ts_driver = {
@@ -389,7 +387,7 @@ static struct platform_driver wm831x_ts_driver = {
.name = "wm831x-touch",
},
.probe = wm831x_ts_probe,
- .remove = wm831x_ts_remove,
+ .remove_new = wm831x_ts_remove,
};
module_platform_driver(wm831x_ts_driver);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index ac3b3dd59488..f01f6cc9b59f 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -756,11 +756,9 @@ batt_err:
return ret;
}
-static int wm97xx_mfd_remove(struct platform_device *pdev)
+static void wm97xx_mfd_remove(struct platform_device *pdev)
{
wm97xx_remove(&pdev->dev);
-
- return 0;
}
static int wm97xx_suspend(struct device *dev)
@@ -878,7 +876,7 @@ static struct platform_driver wm97xx_mfd_driver = {
.pm = pm_sleep_ptr(&wm97xx_pm_ops),
},
.probe = wm97xx_mfd_probe,
- .remove = wm97xx_mfd_remove,
+ .remove_new = wm97xx_mfd_remove,
};
static int __init wm97xx_init(void)
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
index 895907cdcb3b..d4d0e9888655 100644
--- a/drivers/interconnect/imx/imx.h
+++ b/drivers/interconnect/imx/imx.h
@@ -10,8 +10,13 @@
#ifndef __DRIVERS_INTERCONNECT_IMX_H
#define __DRIVERS_INTERCONNECT_IMX_H
+#include <linux/args.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
#include <linux/interconnect-provider.h>
-#include <linux/kernel.h>
+
+struct platform_device;
#define IMX_ICC_MAX_LINKS 4
@@ -89,7 +94,7 @@ struct imx_icc_noc_setting {
.id = _id, \
.name = _name, \
.adj = _adj, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .num_links = COUNT_ARGS(__VA_ARGS__), \
.links = { __VA_ARGS__ }, \
}
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 825b647d9169..62b516d38d03 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -182,6 +182,15 @@ config INTERCONNECT_QCOM_SDX65
This is a driver for the Qualcomm Network-on-Chip on sdx65-based
platforms.
+config INTERCONNECT_QCOM_SDX75
+ tristate "Qualcomm SDX75 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdx75-based
+ platforms.
+
config INTERCONNECT_QCOM_SM6350
tristate "Qualcomm SM6350 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 80d9d2da95d1..c5320e293960 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -23,6 +23,7 @@ qnoc-sdm670-objs := sdm670.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
qnoc-sdx65-objs := sdx65.o
+qnoc-sdx75-objs := sdx75.o
qnoc-sm6350-objs := sm6350.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDM670) += qnoc-sdm670.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
diff --git a/drivers/interconnect/qcom/icc-rpm-clocks.c b/drivers/interconnect/qcom/icc-rpm-clocks.c
index 63c82a91bbc7..ac1677de7dfd 100644
--- a/drivers/interconnect/qcom/icc-rpm-clocks.c
+++ b/drivers/interconnect/qcom/icc-rpm-clocks.c
@@ -25,6 +25,12 @@ const struct rpm_clk_resource bimc_clk = {
};
EXPORT_SYMBOL_GPL(bimc_clk);
+const struct rpm_clk_resource mem_1_clk = {
+ .resource_type = QCOM_SMD_RPM_MEM_CLK,
+ .clock_id = 1,
+};
+EXPORT_SYMBOL_GPL(mem_1_clk);
+
const struct rpm_clk_resource bus_0_clk = {
.resource_type = QCOM_SMD_RPM_BUS_CLK,
.clock_id = 0,
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 2c16917ba1fd..628e651c555c 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -291,6 +291,32 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
return 0;
}
+static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx)
+{
+ u64 agg_avg_rate, agg_peak_rate, agg_rate;
+
+ if (qn->channels)
+ agg_avg_rate = div_u64(qn->sum_avg[ctx], qn->channels);
+ else
+ agg_avg_rate = qn->sum_avg[ctx];
+
+ if (qn->ab_coeff) {
+ agg_avg_rate = agg_avg_rate * qn->ab_coeff;
+ agg_avg_rate = div_u64(agg_avg_rate, 100);
+ }
+
+ if (qn->ib_coeff) {
+ agg_peak_rate = qn->max_peak[ctx] * 100;
+ agg_peak_rate = div_u64(qn->max_peak[ctx], qn->ib_coeff);
+ } else {
+ agg_peak_rate = qn->max_peak[ctx];
+ }
+
+ agg_rate = max_t(u64, agg_avg_rate, agg_peak_rate);
+
+ return div_u64(agg_rate, qn->buswidth);
+}
+
/**
* qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
* @provider: generic interconnect provider
@@ -298,10 +324,10 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
*/
static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
{
- u64 agg_avg_rate, agg_rate;
+ struct qcom_icc_provider *qp = to_qcom_provider(provider);
struct qcom_icc_node *qn;
struct icc_node *node;
- int i;
+ int ctx;
/*
* Iterate nodes on the provider, aggregate bandwidth requests for
@@ -309,16 +335,9 @@ static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_r
*/
list_for_each_entry(node, &provider->nodes, node_list) {
qn = node->data;
- for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
- if (qn->channels)
- agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
- else
- agg_avg_rate = qn->sum_avg[i];
-
- agg_rate = max_t(u64, agg_avg_rate, qn->max_peak[i]);
- do_div(agg_rate, qn->buswidth);
-
- agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
+ for (ctx = 0; ctx < QCOM_SMD_RPM_STATE_NUM; ctx++) {
+ agg_clk_rate[ctx] = max_t(u64, agg_clk_rate[ctx],
+ qcom_icc_calc_rate(qp, qn, ctx));
}
}
}
@@ -395,6 +414,33 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
}
+ /* Handle the node-specific clock */
+ if (!src_qn->bus_clk_desc)
+ return 0;
+
+ active_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_ACTIVE_STATE);
+ sleep_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_SLEEP_STATE);
+
+ if (active_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
+ ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
+ active_rate);
+ if (ret)
+ return ret;
+
+ /* Cache the rate after we've successfully committed it to RPM */
+ src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
+ }
+
+ if (sleep_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
+ ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
+ sleep_rate);
+ if (ret)
+ return ret;
+
+ /* Cache the rate after we've successfully committed it to RPM */
+ src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
+ }
+
return 0;
}
@@ -497,7 +543,7 @@ regmap_done:
ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
if (ret)
- return ret;
+ goto err_disable_unprepare_clk;
provider = &qp->provider;
provider->dev = dev;
@@ -512,13 +558,21 @@ regmap_done:
/* If this fails, bus accesses will crash the platform! */
ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
if (ret)
- return ret;
+ goto err_disable_unprepare_clk;
for (i = 0; i < num_nodes; i++) {
size_t j;
+ if (!qnodes[i]->ab_coeff)
+ qnodes[i]->ab_coeff = qp->ab_coeff;
+
+ if (!qnodes[i]->ib_coeff)
+ qnodes[i]->ib_coeff = qp->ib_coeff;
+
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
+ clk_bulk_disable_unprepare(qp->num_intf_clks,
+ qp->intf_clks);
ret = PTR_ERR(node);
goto err_remove_nodes;
}
@@ -534,8 +588,11 @@ regmap_done:
if (qnodes[i]->qos.ap_owned &&
qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
ret = qcom_icc_qos_set(node);
- if (ret)
- return ret;
+ if (ret) {
+ clk_bulk_disable_unprepare(qp->num_intf_clks,
+ qp->intf_clks);
+ goto err_remove_nodes;
+ }
}
data->nodes[i] = node;
@@ -563,6 +620,7 @@ err_deregister_provider:
icc_provider_deregister(provider);
err_remove_nodes:
icc_nodes_remove(provider);
+err_disable_unprepare_clk:
clk_disable_unprepare(qp->bus_clk);
return ret;
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index eed3451af3e6..a13768cfd231 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -44,6 +44,8 @@ struct rpm_clk_resource {
* @type: the ICC provider type
* @regmap: regmap for QoS registers read/write access
* @qos_offset: offset to QoS registers
+ * @ab_coeff: a percentage-based coefficient for compensating the AB calculations
+ * @ib_coeff: an inverse-percentage-based coefficient for compensating the IB calculations
* @bus_clk_rate: bus clock rate in Hz
* @bus_clk_desc: a pointer to a rpm_clk_resource description of bus clocks
* @bus_clk: a pointer to a HLOS-owned bus clock
@@ -57,6 +59,8 @@ struct qcom_icc_provider {
enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
+ u16 ab_coeff;
+ u16 ib_coeff;
u32 bus_clk_rate[QCOM_SMD_RPM_STATE_NUM];
const struct rpm_clk_resource *bus_clk_desc;
struct clk *bus_clk;
@@ -93,11 +97,15 @@ struct qcom_icc_qos {
* @num_links: the total number of @links
* @channels: number of channels at this node (e.g. DDR channels)
* @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @bus_clk_desc: a pointer to a rpm_clk_resource description of bus clocks
* @sum_avg: current sum aggregate value of all avg bw requests
* @max_peak: current max aggregate value of all peak bw requests
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
+ * @ab_coeff: a percentage-based coefficient for compensating the AB calculations
+ * @ib_coeff: an inverse-percentage-based coefficient for compensating the IB calculations
+ * @bus_clk_rate: a pointer to an array containing bus clock rates in Hz
*/
struct qcom_icc_node {
unsigned char *name;
@@ -106,11 +114,15 @@ struct qcom_icc_node {
u16 num_links;
u16 channels;
u16 buswidth;
+ const struct rpm_clk_resource *bus_clk_desc;
u64 sum_avg[QCOM_SMD_RPM_STATE_NUM];
u64 max_peak[QCOM_SMD_RPM_STATE_NUM];
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;
+ u16 ab_coeff;
+ u16 ib_coeff;
+ u32 bus_clk_rate[QCOM_SMD_RPM_STATE_NUM];
};
struct qcom_icc_desc {
@@ -123,6 +135,8 @@ struct qcom_icc_desc {
enum qcom_icc_type type;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
+ u16 ab_coeff;
+ u16 ib_coeff;
};
/* Valid for all bus types */
@@ -138,6 +152,7 @@ extern const struct rpm_clk_resource bimc_clk;
extern const struct rpm_clk_resource bus_0_clk;
extern const struct rpm_clk_resource bus_1_clk;
extern const struct rpm_clk_resource bus_2_clk;
+extern const struct rpm_clk_resource mem_1_clk;
extern const struct rpm_clk_resource mmaxi_0_clk;
extern const struct rpm_clk_resource mmaxi_1_clk;
extern const struct rpm_clk_resource qup_clk;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index b9f27ce3b607..c1aa265c1f4e 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -253,14 +253,12 @@ err_remove_nodes:
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
-int qcom_icc_rpmh_remove(struct platform_device *pdev)
+void qcom_icc_rpmh_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
-
- return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_remove);
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 5f0af8b1fc43..2de29460e808 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -126,6 +126,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
void qcom_icc_pre_aggregate(struct icc_node *node);
int qcom_icc_rpmh_probe(struct platform_device *pdev);
-int qcom_icc_rpmh_remove(struct platform_device *pdev);
+void qcom_icc_rpmh_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 885ca9d6d4ed..21f6c852141e 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -28,6 +28,8 @@
*/
#include <dt-bindings/interconnect/qcom,msm8974.h>
+
+#include <linux/args.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
@@ -231,7 +233,7 @@ struct msm8974_icc_desc {
.buswidth = _buswidth, \
.mas_rpm_id = _mas_rpm_id, \
.slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .num_links = COUNT_ARGS(__VA_ARGS__), \
.links = { __VA_ARGS__ }, \
}
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index 88683dfa468f..b73566c9b21f 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -448,6 +448,7 @@ static struct qcom_icc_node mas_mdp_p0 = {
.name = "mas_mdp_p0",
.id = MSM8996_MASTER_MDP_PORT0,
.buswidth = 32,
+ .ib_coeff = 25,
.mas_rpm_id = 8,
.slv_rpm_id = -1,
.qos.ap_owned = true,
@@ -463,6 +464,7 @@ static struct qcom_icc_node mas_mdp_p1 = {
.name = "mas_mdp_p1",
.id = MSM8996_MASTER_MDP_PORT1,
.buswidth = 32,
+ .ib_coeff = 25,
.mas_rpm_id = 61,
.slv_rpm_id = -1,
.qos.ap_owned = true,
@@ -1889,7 +1891,8 @@ static const struct qcom_icc_desc msm8996_bimc = {
.nodes = bimc_nodes,
.num_nodes = ARRAY_SIZE(bimc_nodes),
.bus_clk_desc = &bimc_clk,
- .regmap_cfg = &msm8996_bimc_regmap_config
+ .regmap_cfg = &msm8996_bimc_regmap_config,
+ .ab_coeff = 154,
};
static struct qcom_icc_node * const cnoc_nodes[] = {
@@ -2004,7 +2007,8 @@ static const struct qcom_icc_desc msm8996_mnoc = {
.bus_clk_desc = &mmaxi_0_clk,
.intf_clocks = mm_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
- .regmap_cfg = &msm8996_mnoc_regmap_config
+ .regmap_cfg = &msm8996_mnoc_regmap_config,
+ .ab_coeff = 154,
};
static struct qcom_icc_node * const pnoc_nodes[] = {
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index dc321bb86d0b..e97478bbc282 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -3,6 +3,7 @@
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
+#include <linux/args.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interconnect-provider.h>
@@ -78,7 +79,7 @@ enum {
.name = #_name, \
.id = _id, \
.buswidth = _buswidth, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .num_links = COUNT_ARGS(__VA_ARGS__), \
.links = { __VA_ARGS__ }, \
}
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 5bc4b7516608..b88cf9a022e0 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -112,6 +112,9 @@ static struct qcom_icc_node mas_appss_proc = {
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.prio_level = 0,
.qos.areq_prio = 0,
+ .bus_clk_desc = &mem_1_clk,
+ .ab_coeff = 159,
+ .ib_coeff = 96,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_appss_proc_links),
@@ -675,7 +678,8 @@ static struct qcom_icc_node mas_gfx3d = {
static struct qcom_icc_node slv_ebi1 = {
.name = "slv_ebi1",
.id = QCM2290_SLAVE_EBI1,
- .buswidth = 8,
+ .buswidth = 4,
+ .channels = 2,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
@@ -1199,6 +1203,7 @@ static const struct qcom_icc_desc qcm2290_bimc = {
.keep_alive = true,
/* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */
.qos_offset = 0x8000,
+ .ab_coeff = 153,
};
static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = {
@@ -1329,6 +1334,7 @@ static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
.regmap_cfg = &qcm2290_snoc_regmap_config,
.keep_alive = true,
.qos_offset = 0x15000,
+ .ab_coeff = 142,
};
static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = {
@@ -1345,6 +1351,7 @@ static const struct qcom_icc_desc qcm2290_mmrt_virt = {
.regmap_cfg = &qcm2290_snoc_regmap_config,
.keep_alive = true,
.qos_offset = 0x15000,
+ .ab_coeff = 139,
};
static const struct of_device_id qcm2290_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
index bf800dd7d4ba..9cb477d2bdfe 100644
--- a/drivers/interconnect/qcom/qdu1000.c
+++ b/drivers/interconnect/qcom/qdu1000.c
@@ -769,6 +769,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
@@ -1045,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qnoc_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-qdu1000",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index ef1b5e326089..dd6281db08ad 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -2519,7 +2519,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sa8775p",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index d94ab9b39f3d..34a1d163d6e1 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -1238,6 +1238,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1806,7 +1807,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc7180",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index 6592839b4d94..7d33694368e8 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -1285,6 +1285,7 @@ static struct qcom_icc_node srvc_snoc = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
@@ -1834,7 +1835,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc7280",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 0fb4898dabcf..20331e119beb 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -1345,6 +1345,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &slv_ebi }
};
@@ -1887,7 +1888,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8180x",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
index b82c5493cbb5..7acd152bf0dd 100644
--- a/drivers/interconnect/qcom/sc8280xp.c
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -1712,6 +1712,7 @@ static struct qcom_icc_node srvc_snoc = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
@@ -2390,7 +2391,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8280xp",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 36962f7bd7bb..7392bebba334 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -602,6 +602,7 @@ static struct qcom_icc_node mas_mdp_p0 = {
.name = "mas_mdp_p0",
.id = SDM660_MASTER_MDP_P0,
.buswidth = 16,
+ .ib_coeff = 50,
.mas_rpm_id = 8,
.slv_rpm_id = -1,
.qos.ap_owned = true,
@@ -621,6 +622,7 @@ static struct qcom_icc_node mas_mdp_p1 = {
.name = "mas_mdp_p1",
.id = SDM660_MASTER_MDP_P1,
.buswidth = 16,
+ .ib_coeff = 50,
.mas_rpm_id = 61,
.slv_rpm_id = -1,
.qos.ap_owned = true,
@@ -1540,6 +1542,7 @@ static const struct qcom_icc_desc sdm660_bimc = {
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &sdm660_bimc_regmap_config,
+ .ab_coeff = 153,
};
static struct qcom_icc_node * const sdm660_cnoc_nodes[] = {
@@ -1659,6 +1662,7 @@ static const struct qcom_icc_desc sdm660_mnoc = {
.intf_clocks = mm_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
.regmap_cfg = &sdm660_mnoc_regmap_config,
+ .ab_coeff = 153,
};
static struct qcom_icc_node * const sdm660_snoc_nodes[] = {
diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
index 540a2108b77c..e5ee7fbaa641 100644
--- a/drivers/interconnect/qcom/sdm670.c
+++ b/drivers/interconnect/qcom/sdm670.c
@@ -1047,6 +1047,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1532,7 +1533,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdm670",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index b9243c0aa626..584800ac871a 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1265,6 +1265,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1801,7 +1802,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdm845",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index 4117db046fa0..e97f28b8d2b2 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -913,7 +913,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdx55",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
index d3a6c6c148e5..2f3f5479d8a5 100644
--- a/drivers/interconnect/qcom/sdx65.c
+++ b/drivers/interconnect/qcom/sdx65.c
@@ -897,7 +897,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdx65",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sdx75.c b/drivers/interconnect/qcom/sdx75.c
new file mode 100644
index 000000000000..7f422c27488d
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx75.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx75.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "sdx75.h"
+
+static struct qcom_icc_node qpic_core_master = {
+ .name = "qpic_core_master",
+ .id = SDX75_MASTER_QPIC_CORE,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_QPIC_CORE },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SDX75_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SDX75_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 4,
+ .links = { SDX75_SLAVE_LAGG_CFG, SDX75_SLAVE_MCCC_MASTER,
+ SDX75_SLAVE_GEM_NOC_CFG, SDX75_SLAVE_SNOOP_BWMON },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SDX75_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SDX75_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
+ SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SDX75_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SERVICE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SDX75_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
+ SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SDX75_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SDX75_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
+ SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SDX75_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_ipa2pcie = {
+ .name = "xm_ipa2pcie",
+ .id = SDX75_MASTER_IPA_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SDX75_MASTER_LLCC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SDX75_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SDX75_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2 = {
+ .name = "xm_pcie3_2",
+ .id = SDX75_MASTER_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_audio = {
+ .name = "qhm_audio",
+ .id = SDX75_MASTER_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = SDX75_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qhm_pcie_rscc = {
+ .name = "qhm_pcie_rscc",
+ .id = SDX75_MASTER_PCIE_RSCC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 31,
+ .links = { SDX75_SLAVE_ETH0_CFG, SDX75_SLAVE_ETH1_CFG,
+ SDX75_SLAVE_AUDIO, SDX75_SLAVE_CLK_CTL,
+ SDX75_SLAVE_CRYPTO_0_CFG, SDX75_SLAVE_IMEM_CFG,
+ SDX75_SLAVE_IPA_CFG, SDX75_SLAVE_IPC_ROUTER_CFG,
+ SDX75_SLAVE_CNOC_MSS, SDX75_SLAVE_ICBDI_MVMSS_CFG,
+ SDX75_SLAVE_PCIE_0_CFG, SDX75_SLAVE_PCIE_1_CFG,
+ SDX75_SLAVE_PCIE_2_CFG, SDX75_SLAVE_PDM,
+ SDX75_SLAVE_PRNG, SDX75_SLAVE_QDSS_CFG,
+ SDX75_SLAVE_QPIC, SDX75_SLAVE_QUP_0,
+ SDX75_SLAVE_SDCC_1, SDX75_SLAVE_SDCC_4,
+ SDX75_SLAVE_SPMI_VGI_COEX, SDX75_SLAVE_TCSR,
+ SDX75_SLAVE_TLMM, SDX75_SLAVE_USB3,
+ SDX75_SLAVE_USB3_PHY_CFG, SDX75_SLAVE_DDRSS_CFG,
+ SDX75_SLAVE_SNOC_CFG, SDX75_SLAVE_PCIE_ANOC_CFG,
+ SDX75_SLAVE_IMEM, SDX75_SLAVE_QDSS_STM,
+ SDX75_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SDX75_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhm_qpic = {
+ .name = "qhm_qpic",
+ .id = SDX75_MASTER_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SDX75_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qnm_aggre_noc = {
+ .name = "qnm_aggre_noc",
+ .id = SDX75_MASTER_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SDX75_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 32,
+ .links = { SDX75_SLAVE_ETH0_CFG, SDX75_SLAVE_ETH1_CFG,
+ SDX75_SLAVE_AUDIO, SDX75_SLAVE_CLK_CTL,
+ SDX75_SLAVE_CRYPTO_0_CFG, SDX75_SLAVE_IMEM_CFG,
+ SDX75_SLAVE_IPA_CFG, SDX75_SLAVE_IPC_ROUTER_CFG,
+ SDX75_SLAVE_CNOC_MSS, SDX75_SLAVE_ICBDI_MVMSS_CFG,
+ SDX75_SLAVE_PCIE_0_CFG, SDX75_SLAVE_PCIE_1_CFG,
+ SDX75_SLAVE_PCIE_2_CFG, SDX75_SLAVE_PCIE_RSC_CFG,
+ SDX75_SLAVE_PDM, SDX75_SLAVE_PRNG,
+ SDX75_SLAVE_QDSS_CFG, SDX75_SLAVE_QPIC,
+ SDX75_SLAVE_QUP_0, SDX75_SLAVE_SDCC_1,
+ SDX75_SLAVE_SDCC_4, SDX75_SLAVE_SPMI_VGI_COEX,
+ SDX75_SLAVE_TCSR, SDX75_SLAVE_TLMM,
+ SDX75_SLAVE_USB3, SDX75_SLAVE_USB3_PHY_CFG,
+ SDX75_SLAVE_DDRSS_CFG, SDX75_SLAVE_SNOC_CFG,
+ SDX75_SLAVE_PCIE_ANOC_CFG, SDX75_SLAVE_IMEM,
+ SDX75_SLAVE_QDSS_STM, SDX75_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SDX75_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX75_SLAVE_PCIE_0, SDX75_SLAVE_PCIE_1,
+ SDX75_SLAVE_PCIE_2 },
+};
+
+static struct qcom_icc_node qnm_system_noc_cfg = {
+ .name = "qnm_system_noc_cfg",
+ .id = SDX75_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_system_noc_pcie_cfg = {
+ .name = "qnm_system_noc_pcie_cfg",
+ .id = SDX75_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SDX75_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SDX75_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qxm_mvmss = {
+ .name = "qxm_mvmss",
+ .id = SDX75_MASTER_MVMSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_emac_0 = {
+ .name = "xm_emac_0",
+ .id = SDX75_MASTER_EMAC_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_emac_1 = {
+ .name = "xm_emac_1",
+ .id = SDX75_MASTER_EMAC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_qdss_etr0 = {
+ .name = "xm_qdss_etr0",
+ .id = SDX75_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_qdss_etr1 = {
+ .name = "xm_qdss_etr1",
+ .id = SDX75_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SDX75_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SDX75_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node xm_usb3 = {
+ .name = "xm_usb3",
+ .id = SDX75_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_SLAVE_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qpic_core_slave = {
+ .name = "qpic_core_slave",
+ .id = SDX75_SLAVE_QPIC_CORE,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SDX75_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lagg = {
+ .name = "qhs_lagg",
+ .id = SDX75_SLAVE_LAGG_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mccc_master = {
+ .name = "qhs_mccc_master",
+ .id = SDX75_SLAVE_MCCC_MASTER,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SDX75_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_snoop_bwmon = {
+ .name = "qss_snoop_bwmon",
+ .id = SDX75_SLAVE_SNOOP_BWMON,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_cnoc = {
+ .name = "qns_gemnoc_cnoc",
+ .id = SDX75_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SDX75_SLAVE_LLCC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX75_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SDX75_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX75_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SDX75_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SDX75_SLAVE_EBI1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_gemnoc = {
+ .name = "qns_pcie_gemnoc",
+ .id = SDX75_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX75_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node ps_eth0_cfg = {
+ .name = "ps_eth0_cfg",
+ .id = SDX75_SLAVE_ETH0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node ps_eth1_cfg = {
+ .name = "ps_eth1_cfg",
+ .id = SDX75_SLAVE_ETH1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_audio = {
+ .name = "qhs_audio",
+ .id = SDX75_SLAVE_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SDX75_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto_cfg = {
+ .name = "qhs_crypto_cfg",
+ .id = SDX75_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SDX75_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SDX75_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SDX75_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SDX75_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mvmss_cfg = {
+ .name = "qhs_mvmss_cfg",
+ .id = SDX75_SLAVE_ICBDI_MVMSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SDX75_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SDX75_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+ .name = "qhs_pcie2_cfg",
+ .id = SDX75_SLAVE_PCIE_2_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+ .name = "qhs_pcie_rscc",
+ .id = SDX75_SLAVE_PCIE_RSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SDX75_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SDX75_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SDX75_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+ .name = "qhs_qpic",
+ .id = SDX75_SLAVE_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SDX75_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SDX75_SLAVE_SDCC_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SDX75_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spmi_vgi_coex = {
+ .name = "qhs_spmi_vgi_coex",
+ .id = SDX75_SLAVE_SPMI_VGI_COEX,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SDX75_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SDX75_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = SDX75_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_phy = {
+ .name = "qhs_usb3_phy",
+ .id = SDX75_SLAVE_USB3_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a1noc = {
+ .name = "qns_a1noc",
+ .id = SDX75_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX75_MASTER_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SDX75_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SDX75_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX75_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_system_noc_cfg = {
+ .name = "qns_system_noc_cfg",
+ .id = SDX75_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qns_system_noc_pcie_cfg = {
+ .name = "qns_system_noc_pcie_cfg",
+ .id = SDX75_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX75_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SDX75_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_pcie_system_noc = {
+ .name = "srvc_pcie_system_noc",
+ .id = SDX75_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_system_noc = {
+ .name = "srvc_system_noc",
+ .id = SDX75_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SDX75_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SDX75_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+ .name = "xs_pcie_2",
+ .id = SDX75_SLAVE_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SDX75_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SDX75_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 39,
+ .nodes = { &qhm_pcie_rscc, &qnm_gemnoc_cnoc,
+ &ps_eth0_cfg, &ps_eth1_cfg,
+ &qhs_audio, &qhs_clk_ctl,
+ &qhs_crypto_cfg, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mvmss_cfg,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie2_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qpic,
+ &qhs_qup0, &qhs_sdc1,
+ &qhs_sdc4, &qhs_spmi_vgi_coex,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_usb3, &qhs_usb3_phy,
+ &qns_ddrss_cfg, &qns_system_noc_cfg,
+ &qns_system_noc_pcie_cfg, &qxs_imem,
+ &srvc_pcie_system_noc, &srvc_system_noc,
+ &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_2, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_qp0 = {
+ .name = "QP0",
+ .num_nodes = 1,
+ .nodes = { &qpic_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 10,
+ .nodes = { &alm_sys_tcu, &chm_apps,
+ &qnm_gemnoc_cfg, &qnm_mdsp,
+ &qnm_snoc_sf, &xm_gic,
+ &xm_ipa2pcie, &qns_gemnoc_cnoc,
+ &qns_pcie, &srvc_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 21,
+ .nodes = { &xm_pcie3_0, &xm_pcie3_1,
+ &xm_pcie3_2, &qhm_audio,
+ &qhm_gic, &qhm_qdss_bam,
+ &qhm_qpic, &qhm_qup0,
+ &qnm_gemnoc_pcie, &qnm_system_noc_cfg,
+ &qnm_system_noc_pcie_cfg, &qxm_crypto,
+ &qxm_ipa, &qxm_mvmss,
+ &xm_emac_0, &xm_emac_1,
+ &xm_qdss_etr0, &xm_qdss_etr1,
+ &xm_sdc1, &xm_sdc4,
+ &xm_usb3 },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre_noc, &qns_a1noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 2,
+ .nodes = { &qnm_pcie, &qns_pcie_gemnoc },
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qp0,
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QPIC_CORE] = &qpic_core_master,
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [SLAVE_QPIC_CORE] = &qpic_core_slave,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+};
+
+static const struct qcom_icc_desc sdx75_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc,
+ [SLAVE_LAGG_CFG] = &qhs_lagg,
+ [SLAVE_MCCC_MASTER] = &qhs_mccc_master,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+ [SLAVE_SNOOP_BWMON] = &qss_snoop_bwmon,
+};
+
+static const struct qcom_icc_desc sdx75_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_IPA_PCIE] = &xm_ipa2pcie,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gemnoc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static const struct qcom_icc_desc sdx75_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx75_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn1,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_2] = &xm_pcie3_2,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
+};
+
+static const struct qcom_icc_desc sdx75_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_AUDIO] = &qhm_audio,
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_PCIE_RSCC] = &qhm_pcie_rscc,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [MASTER_SNOC_CFG] = &qnm_system_noc_cfg,
+ [MASTER_PCIE_ANOC_CFG] = &qnm_system_noc_pcie_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_MVMSS] = &qxm_mvmss,
+ [MASTER_EMAC_0] = &xm_emac_0,
+ [MASTER_EMAC_1] = &xm_emac_1,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr1,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_USB3_0] = &xm_usb3,
+ [SLAVE_ETH0_CFG] = &ps_eth0_cfg,
+ [SLAVE_ETH1_CFG] = &ps_eth1_cfg,
+ [SLAVE_AUDIO] = &qhs_audio,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_ICBDI_MVMSS_CFG] = &qhs_mvmss_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+ [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rscc,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+ [SLAVE_A1NOC_CFG] = &qns_a1noc,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SNOC_CFG] = &qns_system_noc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qns_system_noc_pcie_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_system_noc,
+ [SLAVE_SERVICE_SNOC] = &srvc_system_noc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_2] = &xs_pcie_2,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx75_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdx75-clk-virt", .data = &sdx75_clk_virt },
+ { .compatible = "qcom,sdx75-dc-noc", .data = &sdx75_dc_noc },
+ { .compatible = "qcom,sdx75-gem-noc", .data = &sdx75_gem_noc },
+ { .compatible = "qcom,sdx75-mc-virt", .data = &sdx75_mc_virt },
+ { .compatible = "qcom,sdx75-pcie-anoc", .data = &sdx75_pcie_anoc },
+ { .compatible = "qcom,sdx75-system-noc", .data = &sdx75_system_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove_new = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sdx75",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("SDX75 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sdx75.h b/drivers/interconnect/qcom/sdx75.h
new file mode 100644
index 000000000000..24e887159920
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx75.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX75_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX75_H
+
+#define SDX75_MASTER_ANOC_PCIE_GEM_NOC 0
+#define SDX75_MASTER_ANOC_SNOC 1
+#define SDX75_MASTER_APPSS_PROC 2
+#define SDX75_MASTER_AUDIO 3
+#define SDX75_MASTER_CNOC_DC_NOC 4
+#define SDX75_MASTER_CRYPTO 5
+#define SDX75_MASTER_EMAC_0 6
+#define SDX75_MASTER_EMAC_1 7
+#define SDX75_MASTER_GEM_NOC_CFG 8
+#define SDX75_MASTER_GEM_NOC_CNOC 9
+#define SDX75_MASTER_GEM_NOC_PCIE_SNOC 10
+#define SDX75_MASTER_GIC 11
+#define SDX75_MASTER_GIC_AHB 12
+#define SDX75_MASTER_IPA 13
+#define SDX75_MASTER_IPA_PCIE 14
+#define SDX75_MASTER_LLCC 15
+#define SDX75_MASTER_MSS_PROC 16
+#define SDX75_MASTER_MVMSS 17
+#define SDX75_MASTER_PCIE_0 18
+#define SDX75_MASTER_PCIE_1 19
+#define SDX75_MASTER_PCIE_2 20
+#define SDX75_MASTER_PCIE_ANOC_CFG 21
+#define SDX75_MASTER_PCIE_RSCC 22
+#define SDX75_MASTER_QDSS_BAM 23
+#define SDX75_MASTER_QDSS_ETR 24
+#define SDX75_MASTER_QDSS_ETR_1 25
+#define SDX75_MASTER_QPIC 26
+#define SDX75_MASTER_QPIC_CORE 27
+#define SDX75_MASTER_QUP_0 28
+#define SDX75_MASTER_QUP_CORE_0 29
+#define SDX75_MASTER_SDCC_1 30
+#define SDX75_MASTER_SDCC_4 31
+#define SDX75_MASTER_SNOC_CFG 32
+#define SDX75_MASTER_SNOC_SF_MEM_NOC 33
+#define SDX75_MASTER_SYS_TCU 34
+#define SDX75_MASTER_USB3_0 35
+#define SDX75_SLAVE_A1NOC_CFG 36
+#define SDX75_SLAVE_ANOC_PCIE_GEM_NOC 37
+#define SDX75_SLAVE_AUDIO 38
+#define SDX75_SLAVE_CLK_CTL 39
+#define SDX75_SLAVE_CRYPTO_0_CFG 40
+#define SDX75_SLAVE_CNOC_MSS 41
+#define SDX75_SLAVE_DDRSS_CFG 42
+#define SDX75_SLAVE_EBI1 43
+#define SDX75_SLAVE_ETH0_CFG 44
+#define SDX75_SLAVE_ETH1_CFG 45
+#define SDX75_SLAVE_GEM_NOC_CFG 46
+#define SDX75_SLAVE_GEM_NOC_CNOC 47
+#define SDX75_SLAVE_ICBDI_MVMSS_CFG 48
+#define SDX75_SLAVE_IMEM 49
+#define SDX75_SLAVE_IMEM_CFG 50
+#define SDX75_SLAVE_IPA_CFG 51
+#define SDX75_SLAVE_IPC_ROUTER_CFG 52
+#define SDX75_SLAVE_LAGG_CFG 53
+#define SDX75_SLAVE_LLCC 54
+#define SDX75_SLAVE_MCCC_MASTER 55
+#define SDX75_SLAVE_MEM_NOC_PCIE_SNOC 56
+#define SDX75_SLAVE_PCIE_0 57
+#define SDX75_SLAVE_PCIE_1 58
+#define SDX75_SLAVE_PCIE_2 59
+#define SDX75_SLAVE_PCIE_0_CFG 60
+#define SDX75_SLAVE_PCIE_1_CFG 61
+#define SDX75_SLAVE_PCIE_2_CFG 62
+#define SDX75_SLAVE_PCIE_ANOC_CFG 63
+#define SDX75_SLAVE_PCIE_RSC_CFG 64
+#define SDX75_SLAVE_PDM 65
+#define SDX75_SLAVE_PRNG 66
+#define SDX75_SLAVE_QDSS_CFG 67
+#define SDX75_SLAVE_QDSS_STM 68
+#define SDX75_SLAVE_QPIC 69
+#define SDX75_SLAVE_QPIC_CORE 70
+#define SDX75_SLAVE_QUP_0 71
+#define SDX75_SLAVE_QUP_CORE_0 72
+#define SDX75_SLAVE_SDCC_1 73
+#define SDX75_SLAVE_SDCC_4 74
+#define SDX75_SLAVE_SERVICE_GEM_NOC 75
+#define SDX75_SLAVE_SERVICE_PCIE_ANOC 76
+#define SDX75_SLAVE_SERVICE_SNOC 77
+#define SDX75_SLAVE_SNOC_CFG 78
+#define SDX75_SLAVE_SNOC_GEM_NOC_SF 79
+#define SDX75_SLAVE_SNOOP_BWMON 80
+#define SDX75_SLAVE_SPMI_VGI_COEX 81
+#define SDX75_SLAVE_TCSR 82
+#define SDX75_SLAVE_TCU 83
+#define SDX75_SLAVE_TLMM 84
+#define SDX75_SLAVE_USB3 85
+#define SDX75_SLAVE_USB3_PHY_CFG 86
+
+#endif
diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
index 49aed492e9b8..20923e8e6110 100644
--- a/drivers/interconnect/qcom/sm6350.c
+++ b/drivers/interconnect/qcom/sm6350.c
@@ -1164,6 +1164,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1701,7 +1702,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm6350",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index c7c9cf7f746b..f29b77556a79 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -1282,6 +1282,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1863,7 +1864,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index d4a4ecef11f0..83aeb3eedc19 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -1397,6 +1397,7 @@ static struct qcom_icc_node qup2_core_slave = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1990,7 +1991,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index bdf75839e6d1..b321c3009acb 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -1356,6 +1356,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
@@ -1960,7 +1961,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index eb7e17df32ba..b3cd0087377c 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1884,7 +1884,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8450",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index a10c8b6549ee..629faa4c9aae 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -2219,7 +2219,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
- .remove = qcom_icc_rpmh_remove,
+ .remove_new = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ee9e2a2edbf5..7673bb82945b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -95,7 +95,7 @@ config IOMMU_DEBUGFS
choice
prompt "IOMMU default domain type"
depends on IOMMU_API
- default IOMMU_DEFAULT_DMA_LAZY if X86
+ default IOMMU_DEFAULT_DMA_LAZY if X86 || S390
default IOMMU_DEFAULT_DMA_STRICT
help
Choose the type of IOMMU domain used to manage DMA API usage by
@@ -150,7 +150,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
- def_bool ARM64 || X86
+ def_bool ARM64 || X86 || S390
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
@@ -240,17 +240,6 @@ config SUN50I_IOMMU
help
Support for the IOMMU introduced in the Allwinner H6 SoCs.
-config TEGRA_IOMMU_GART
- bool "Tegra GART IOMMU Support"
- depends on ARCH_TEGRA_2x_SOC
- depends on TEGRA_MC
- select IOMMU_API
- help
- Enables support for remapping discontiguous physical memory
- shared with the operating system into contiguous I/O virtual
- space through the GART (Graphics Address Relocation Table)
- hardware included on Tegra SoCs.
-
config TEGRA_IOMMU_SMMU
bool "NVIDIA Tegra SMMU Support"
depends on ARCH_TEGRA
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 769e43d780ce..95ad9dbfbda0 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
-obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 8bd4c3b183ec..443b2c13c37b 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -23,15 +23,6 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
-config AMD_IOMMU_V2
- tristate "AMD IOMMU Version 2 driver"
- depends on AMD_IOMMU
- select MMU_NOTIFIER
- help
- This option enables support for the AMD IOMMUv2 features of the IOMMU
- hardware. Select this option if you want to use devices that support
- the PCI PRI and PASID interface.
-
config AMD_IOMMU_DEBUGFS
bool "Enable AMD IOMMU internals in DebugFS"
depends on AMD_IOMMU && IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 773d8aa00283..f454fbb1569e 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index e2857109e966..86be1edd50ee 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -38,9 +38,6 @@ extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
bool amd_iommu_v2_supported(void);
struct amd_iommu *get_amd_iommu(unsigned int idx);
u8 amd_iommu_pc_get_max_banks(unsigned int idx);
@@ -51,10 +48,10 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value);
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+/* Device capabilities */
+int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
+void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
+
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
@@ -87,9 +84,25 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
+static inline bool check_feature(u64 mask)
+{
+ return (amd_iommu_efr & mask);
+}
+
+static inline bool check_feature2(u64 mask)
+{
+ return (amd_iommu_efr2 & mask);
+}
+
+static inline int check_feature_gpt_level(void)
+{
+ return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
+}
+
+static inline bool amd_iommu_gt_ppr_supported(void)
{
- return !!(iommu->features & mask);
+ return (check_feature(FEATURE_GT) &&
+ check_feature(FEATURE_PPR));
}
static inline u64 iommu_virt_to_phys(void *vaddr)
@@ -105,7 +118,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
static inline
void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{
- atomic64_set(&domain->iop.pt_root, root);
domain->iop.root = (u64 *)(root & PAGE_MASK);
domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
}
@@ -146,8 +158,5 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-extern u64 amd_iommu_efr;
-extern u64 amd_iommu_efr2;
-
extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index dec4e5c2b66b..90b7d7950a9e 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -462,6 +462,10 @@
#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
+/* Timeout stuff */
+#define LOOP_TIMEOUT 100000
+#define MMIO_STATUS_TIMEOUT 2000000
+
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
do { \
@@ -516,19 +520,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-/*
- * This struct is used to pass information about
- * incoming PPR faults around.
- */
-struct amd_iommu_fault {
- u64 address; /* IO virtual address of the fault*/
- u32 pasid; /* Address space identifier */
- u32 sbdf; /* Originating PCI device id */
- u16 tag; /* PPR tag */
- u16 flags; /* Fault flags */
-
-};
-
struct amd_iommu;
struct iommu_domain;
@@ -555,7 +546,6 @@ struct amd_io_pgtable {
struct io_pgtable iop;
int mode;
u64 *root;
- atomic64_t pt_root; /* pgtable root and pgtable mode */
u64 *pgd; /* v2 pgtable pgd pointer */
};
@@ -688,9 +678,6 @@ struct amd_iommu {
/* Extended features 2 */
u64 features2;
- /* IOMMUv2 */
- bool is_iommu_v2;
-
/* PCI device id of the IOMMU device */
u16 devid;
@@ -811,6 +798,14 @@ struct devid_map {
bool cmd_line;
};
+#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
+/* Device may request execution on memory pages */
+#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8
+/* Device may request super-user privileges */
+#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10
+
/*
* This struct contains device specific data for the IOMMU
*/
@@ -823,13 +818,15 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
struct device *dev;
u16 devid; /* PCI Device ID */
- bool iommu_v2; /* Device can make use of IOMMUv2 */
- struct {
- bool enabled;
- int qdep;
- } ats; /* ATS state */
- bool pri_tlp; /* PASID TLB required for
+
+ u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
+ int ats_qdep;
+ u8 ats_enabled :1; /* ATS state */
+ u8 pri_enabled :1; /* PRI state */
+ u8 pasid_enabled:1; /* PASID state */
+ u8 pri_tlp :1; /* PASID TLB required for
PPR completions */
+ u8 ppr :1; /* Enable device PPR support */
bool use_vapic; /* Enable device to use vapic mode */
bool defer_attach;
@@ -896,16 +893,15 @@ extern unsigned amd_iommu_aperture_order;
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
-/* Smallest max PASID supported by any IOMMU in the system */
-extern u32 amd_iommu_max_pasid;
-
-extern bool amd_iommu_v2_present;
-
extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */
extern int amd_iommu_max_glx_val;
+/* Global EFR and EFR2 registers */
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 45efb7e5d725..64bcf3df37ee 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -83,8 +83,6 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
-#define LOOP_TIMEOUT 2000000
-
#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
| ((dev & 0x1f) << 3) | (fn & 0x7))
@@ -187,9 +185,6 @@ static int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
-u32 amd_iommu_max_pasid __read_mostly = ~0;
-
-bool amd_iommu_v2_present __read_mostly;
static bool amd_iommu_pc_present __read_mostly;
bool amdr_ivrs_remap_support __read_mostly;
@@ -272,7 +267,7 @@ int amd_iommu_get_num_iommus(void)
* Iterate through all the IOMMUs to get common EFR
* masks among all IOMMUs and warn if found inconsistency.
*/
-static void get_global_efr(void)
+static __init void get_global_efr(void)
{
struct amd_iommu *iommu;
@@ -304,16 +299,6 @@ static void get_global_efr(void)
pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
}
-static bool check_feature_on_all_iommus(u64 mask)
-{
- return !!(amd_iommu_efr & mask);
-}
-
-static inline int check_feature_gpt_level(void)
-{
- return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
-}
-
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
@@ -399,7 +384,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
- if (!check_feature_on_all_iommus(FEATURE_SNP))
+ if (!check_feature(FEATURE_SNP))
return;
/* Note:
@@ -869,7 +854,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
- check_feature_on_all_iommus(FEATURE_SNP) &&
+ check_feature(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
@@ -985,14 +970,14 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
udelay(10);
}
- if (WARN_ON(i >= LOOP_TIMEOUT))
+ if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
return -EINVAL;
return 0;
@@ -1048,7 +1033,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu)
static void iommu_enable_gt(struct amd_iommu *iommu)
{
- if (!iommu_feature(iommu, FEATURE_GT))
+ if (!check_feature(FEATURE_GT))
return;
iommu_feature_enable(iommu, CONTROL_GT_EN);
@@ -1987,7 +1972,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
u64 val;
struct pci_dev *pdev = iommu->dev;
- if (!iommu_feature(iommu, FEATURE_PC))
+ if (!check_feature(FEATURE_PC))
return;
amd_iommu_pc_present = true;
@@ -2014,8 +1999,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
+ return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
@@ -2051,9 +2035,9 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
- if (!iommu->features) {
- iommu->features = features;
- iommu->features2 = features2;
+ if (!amd_iommu_efr) {
+ amd_iommu_efr = features;
+ amd_iommu_efr2 = features2;
return;
}
@@ -2061,12 +2045,12 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
- if (features != iommu->features ||
- features2 != iommu->features2) {
+ if (features != amd_iommu_efr ||
+ features2 != amd_iommu_efr2) {
pr_warn(FW_WARN
"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
- features, iommu->features,
- features2, iommu->features2);
+ features, amd_iommu_efr,
+ features2, amd_iommu_efr2);
}
}
@@ -2092,20 +2076,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
late_iommu_features_init(iommu);
- if (iommu_feature(iommu, FEATURE_GT)) {
+ if (check_feature(FEATURE_GT)) {
int glxval;
- u32 max_pasid;
u64 pasmax;
- pasmax = iommu->features & FEATURE_PASID_MASK;
+ pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
pasmax >>= FEATURE_PASID_SHIFT;
- max_pasid = (1 << (pasmax + 1)) - 1;
+ iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
- amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
+ BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
- BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
-
- glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT;
if (amd_iommu_max_glx_val == -1)
@@ -2114,13 +2095,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
}
- if (iommu_feature(iommu, FEATURE_GT) &&
- iommu_feature(iommu, FEATURE_PPR)) {
- iommu->is_iommu_v2 = true;
- amd_iommu_v2_present = true;
- }
-
- if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+ if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
@@ -2132,13 +2107,10 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
if (amd_iommu_pgtable == AMD_IOMMU_V2) {
- if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
- !iommu_feature(iommu, FEATURE_GT)) {
+ if (!check_feature(FEATURE_GIOSUP) ||
+ !check_feature(FEATURE_GT)) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
- } else if (iommu_default_passthrough()) {
- pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
}
}
@@ -2186,35 +2158,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
static void print_iommu_info(void)
{
+ int i;
static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC"
};
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu) {
- struct pci_dev *pdev = iommu->dev;
- int i;
- pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
+ if (amd_iommu_efr) {
+ pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
- if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
-
- for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
- if (iommu_feature(iommu, (1ULL << i)))
- pr_cont(" %s", feat_str[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+ if (check_feature(1ULL << i))
+ pr_cont(" %s", feat_str[i]);
+ }
- if (iommu->features & FEATURE_GAM_VAPIC)
- pr_cont(" GA_vAPIC");
+ if (check_feature(FEATURE_GAM_VAPIC))
+ pr_cont(" GA_vAPIC");
- if (iommu->features & FEATURE_SNP)
- pr_cont(" SNP");
+ if (check_feature(FEATURE_SNP))
+ pr_cont(" SNP");
- pr_cont("\n");
- }
+ pr_cont("\n");
}
+
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
@@ -2900,19 +2866,19 @@ static void enable_iommus_vapic(void)
* Need to set and poll check the GALOGRun bit to zero before
* we can set/ modify GA Log registers safely.
*/
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
break;
udelay(10);
}
- if (WARN_ON(i >= LOOP_TIMEOUT))
+ if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
return;
}
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ !check_feature(FEATURE_GAM_VAPIC)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
return;
}
@@ -3698,9 +3664,8 @@ bool amd_iommu_v2_supported(void)
* (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
* setting up IOMMUv1 page table.
*/
- return amd_iommu_v2_present && !amd_iommu_snp_en;
+ return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
}
-EXPORT_SYMBOL(amd_iommu_v2_supported);
struct amd_iommu *get_amd_iommu(unsigned int idx)
{
@@ -3824,7 +3789,7 @@ int amd_iommu_snp_enable(void)
return -EINVAL;
}
- amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en)
return -EINVAL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index e9ef2e0a62f6..f818a7e254d4 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -363,10 +363,10 @@ static void v2_free_pgtable(struct io_pgtable *iop)
if (!(pdom->flags & PD_IOMMUV2_MASK))
return;
- /*
- * Make changes visible to IOMMUs. No need to clear gcr3 entry
- * as gcr3 table is already freed.
- */
+ /* Clear gcr3 entry */
+ amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
+
+ /* Make changes visible to IOMMUs */
amd_iommu_domain_update(pdom);
/* Free page table */
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b399c5741378..fcc987f5d4ed 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -45,8 +45,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-#define LOOP_TIMEOUT 100000
-
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -68,7 +66,6 @@ LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
const struct iommu_dirty_ops amd_dirty_ops;
-static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
/*
@@ -81,7 +78,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
-static int domain_enable_v2(struct protection_domain *domain, int pasids);
/****************************************************************************
*
@@ -324,24 +320,141 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
return entry->group;
}
-static bool pci_iommuv2_capable(struct pci_dev *pdev)
+static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
{
- static const int caps[] = {
- PCI_EXT_CAP_ID_PRI,
- PCI_EXT_CAP_ID_PASID,
- };
- int i, pos;
+ return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
+}
- if (!pci_ats_supported(pdev))
- return false;
+static u32 pdev_get_caps(struct pci_dev *pdev)
+{
+ int features;
+ u32 flags = 0;
+
+ if (pci_ats_supported(pdev))
+ flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+ if (pci_pri_supported(pdev))
+ flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+ features = pci_pasid_features(pdev);
+ if (features >= 0) {
+ flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+ if (features & PCI_PASID_CAP_EXEC)
+ flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
- for (i = 0; i < 2; ++i) {
- pos = pci_find_ext_capability(pdev, caps[i]);
- if (pos == 0)
- return false;
+ if (features & PCI_PASID_CAP_PRIV)
+ flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
}
- return true;
+ return flags;
+}
+
+static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+ int ret = -EINVAL;
+
+ if (dev_data->ats_enabled)
+ return 0;
+
+ if (amd_iommu_iotlb_sup &&
+ (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
+ ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ if (!ret) {
+ dev_data->ats_enabled = 1;
+ dev_data->ats_qdep = pci_ats_queue_depth(pdev);
+ }
+ }
+
+ return ret;
+}
+
+static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data->ats_enabled) {
+ pci_disable_ats(pdev);
+ dev_data->ats_enabled = 0;
+ }
+}
+
+int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+ int ret = -EINVAL;
+
+ if (dev_data->pri_enabled)
+ return 0;
+
+ if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
+ /*
+ * First reset the PRI state of the device.
+ * FIXME: Hardcode number of outstanding requests for now
+ */
+ if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
+ dev_data->pri_enabled = 1;
+ dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
+
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data->pri_enabled) {
+ pci_disable_pri(pdev);
+ dev_data->pri_enabled = 0;
+ }
+}
+
+static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+ int ret = -EINVAL;
+
+ if (dev_data->pasid_enabled)
+ return 0;
+
+ if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
+ /* Only allow access to user-accessible pages */
+ ret = pci_enable_pasid(pdev, 0);
+ if (!ret)
+ dev_data->pasid_enabled = 1;
+ }
+
+ return ret;
+}
+
+static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data->pasid_enabled) {
+ pci_disable_pasid(pdev);
+ dev_data->pasid_enabled = 0;
+ }
+}
+
+static void pdev_enable_caps(struct pci_dev *pdev)
+{
+ pdev_enable_cap_ats(pdev);
+ pdev_enable_cap_pasid(pdev);
+ amd_iommu_pdev_enable_cap_pri(pdev);
+
+}
+
+static void pdev_disable_caps(struct pci_dev *pdev)
+{
+ pdev_disable_cap_ats(pdev);
+ pdev_disable_cap_pasid(pdev);
+ amd_iommu_pdev_disable_cap_pri(pdev);
}
/*
@@ -401,8 +514,8 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
* it'll be forced to go into translation mode.
*/
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
- dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
- dev_data->iommu_v2 = iommu->is_iommu_v2;
+ dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
+ dev_data->flags = pdev_get_caps(to_pci_dev(dev));
}
dev_iommu_priv_set(dev, dev_data);
@@ -703,24 +816,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
-{
- struct amd_iommu_fault fault;
-
- if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
- pr_err_ratelimited("Unknown PPR request received\n");
- return;
- }
-
- fault.address = raw[1];
- fault.pasid = PPR_PASID(raw[0]);
- fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
- fault.tag = PPR_TAG(raw[0]);
- fault.flags = PPR_FLAGS(raw[0]);
-
- atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
-}
-
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
u32 head, tail;
@@ -766,8 +861,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- /* Handle PPR entry */
- iommu_handle_ppr_entry(iommu, entry);
+ /* TODO: PPR Handler will be added when we add IOPF support */
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
@@ -1096,7 +1190,7 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
}
static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
- int status, int tag, bool gn)
+ int status, int tag, u8 gn)
{
memset(cmd, 0, sizeof(*cmd));
@@ -1300,7 +1394,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
- if (iommu_feature(iommu, FEATURE_IA)) {
+ if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu);
} else {
amd_iommu_flush_dte_all(iommu);
@@ -1319,7 +1413,7 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
struct iommu_cmd cmd;
int qdep;
- qdep = dev_data->ats.qdep;
+ qdep = dev_data->ats_qdep;
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
return -EINVAL;
@@ -1370,7 +1464,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
- if (dev_data->ats.enabled)
+ if (dev_data->ats_enabled)
ret = device_flush_iotlb(dev_data, 0, ~0UL);
return ret;
@@ -1403,7 +1497,7 @@ static void __domain_flush_pages(struct protection_domain *domain,
list_for_each_entry(dev_data, &domain->dev_list, list) {
- if (!dev_data->ats.enabled)
+ if (!dev_data->ats_enabled)
continue;
ret |= device_flush_iotlb(dev_data, address, size);
@@ -1579,6 +1673,42 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
+/*
+ * Number of GCR3 table levels required. Level must be 4-Kbyte
+ * page and can contain up to 512 entries.
+ */
+static int get_gcr3_levels(int pasids)
+{
+ int levels;
+
+ if (pasids == -1)
+ return amd_iommu_max_glx_val;
+
+ levels = get_count_order(pasids);
+
+ return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
+}
+
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+{
+ int levels = get_gcr3_levels(pasids);
+
+ if (levels > amd_iommu_max_glx_val)
+ return -EINVAL;
+
+ domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ return -ENOMEM;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+
+ amd_iommu_domain_update(domain);
+
+ return 0;
+}
+
static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
struct protection_domain *domain, bool ats, bool ppr)
{
@@ -1607,10 +1737,8 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
if (ats)
flags |= DTE_FLAG_IOTLB;
- if (ppr) {
- if (iommu_feature(iommu, FEATURE_EPHSUP))
- pte_root |= 1ULL << DEV_ENTRY_PPR;
- }
+ if (ppr)
+ pte_root |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
pte_root |= DTE_FLAG_HAD;
@@ -1690,7 +1818,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
return;
- ats = dev_data->ats.enabled;
+ ats = dev_data->ats_enabled;
/* Update data structures */
dev_data->domain = domain;
@@ -1706,7 +1834,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
- ats, dev_data->iommu_v2);
+ ats, dev_data->ppr);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
@@ -1741,48 +1869,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
domain->dev_cnt -= 1;
}
-static void pdev_iommuv2_disable(struct pci_dev *pdev)
-{
- pci_disable_ats(pdev);
- pci_disable_pri(pdev);
- pci_disable_pasid(pdev);
-}
-
-static int pdev_pri_ats_enable(struct pci_dev *pdev)
-{
- int ret;
-
- /* Only allow access to user-accessible pages */
- ret = pci_enable_pasid(pdev, 0);
- if (ret)
- return ret;
-
- /* First reset the PRI state of the device */
- ret = pci_reset_pri(pdev);
- if (ret)
- goto out_err_pasid;
-
- /* Enable PRI */
- /* FIXME: Hardcode number of outstanding requests for now */
- ret = pci_enable_pri(pdev, 32);
- if (ret)
- goto out_err_pasid;
-
- ret = pci_enable_ats(pdev, PAGE_SHIFT);
- if (ret)
- goto out_err_pri;
-
- return 0;
-
-out_err_pri:
- pci_disable_pri(pdev);
-
-out_err_pasid:
- pci_disable_pasid(pdev);
-
- return ret;
-}
-
/*
* If a device is not yet associated with a domain, this function makes the
* device visible in the domain
@@ -1791,9 +1877,8 @@ static int attach_device(struct device *dev,
struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- struct pci_dev *pdev;
unsigned long flags;
- int ret;
+ int ret = 0;
spin_lock_irqsave(&domain->lock, flags);
@@ -1801,45 +1886,13 @@ static int attach_device(struct device *dev,
spin_lock(&dev_data->lock);
- ret = -EBUSY;
- if (dev_data->domain != NULL)
+ if (dev_data->domain != NULL) {
+ ret = -EBUSY;
goto out;
-
- if (!dev_is_pci(dev))
- goto skip_ats_check;
-
- pdev = to_pci_dev(dev);
- if (domain->flags & PD_IOMMUV2_MASK) {
- struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
-
- ret = -EINVAL;
-
- /*
- * In case of using AMD_IOMMU_V1 page table mode and the device
- * is enabling for PPR/ATS support (using v2 table),
- * we need to make sure that the domain type is identity map.
- */
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
- def_domain->type != IOMMU_DOMAIN_IDENTITY) {
- goto out;
- }
-
- if (dev_data->iommu_v2) {
- if (pdev_pri_ats_enable(pdev) != 0)
- goto out;
-
- dev_data->ats.enabled = true;
- dev_data->ats.qdep = pci_ats_queue_depth(pdev);
- dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
- }
- } else if (amd_iommu_iotlb_sup &&
- pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
- dev_data->ats.enabled = true;
- dev_data->ats.qdep = pci_ats_queue_depth(pdev);
}
-skip_ats_check:
- ret = 0;
+ if (dev_is_pci(dev))
+ pdev_enable_caps(to_pci_dev(dev));
do_attach(dev_data, domain);
@@ -1887,15 +1940,8 @@ static void detach_device(struct device *dev)
do_detach(dev_data);
- if (!dev_is_pci(dev))
- goto out;
-
- if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
- pdev_iommuv2_disable(to_pci_dev(dev));
- else if (dev_data->ats.enabled)
- pci_disable_ats(to_pci_dev(dev));
-
- dev_data->ats.enabled = false;
+ if (dev_is_pci(dev))
+ pdev_disable_caps(to_pci_dev(dev));
out:
spin_unlock(&dev_data->lock);
@@ -1985,7 +2031,7 @@ static void update_device_table(struct protection_domain *domain)
if (!iommu)
continue;
set_dte_entry(iommu, dev_data->devid, domain,
- dev_data->ats.enabled, dev_data->iommu_v2);
+ dev_data->ats_enabled, dev_data->ppr);
clone_aliases(iommu, dev_data->dev);
}
}
@@ -2019,9 +2065,11 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void cleanup_domain(struct protection_domain *domain)
{
struct iommu_dev_data *entry;
- unsigned long flags;
- spin_lock_irqsave(&domain->lock, flags);
+ lockdep_assert_held(&domain->lock);
+
+ if (!domain->dev_cnt)
+ return;
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
@@ -2029,8 +2077,7 @@ static void cleanup_domain(struct protection_domain *domain)
BUG_ON(!entry->domain);
do_detach(entry);
}
-
- spin_unlock_irqrestore(&domain->lock, flags);
+ WARN_ON(domain->dev_cnt != 0);
}
static void protection_domain_free(struct protection_domain *domain)
@@ -2041,6 +2088,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
+
+ if (domain->iop.root)
+ free_page((unsigned long)domain->iop.root);
+
if (domain->id)
domain_id_free(domain->id);
@@ -2053,18 +2106,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
- spin_lock_init(&domain->lock);
- domain->id = domain_id_alloc();
- if (!domain->id)
- return -ENOMEM;
- INIT_LIST_HEAD(&domain->dev_list);
-
if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root) {
- domain_id_free(domain->id);
+ if (!pt_root)
return -ENOMEM;
- }
}
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2074,20 +2119,12 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
static int protection_domain_init_v2(struct protection_domain *domain)
{
- spin_lock_init(&domain->lock);
- domain->id = domain_id_alloc();
- if (!domain->id)
- return -ENOMEM;
- INIT_LIST_HEAD(&domain->dev_list);
-
domain->flags |= PD_GIOV_MASK;
domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
- if (domain_enable_v2(domain, 1)) {
- domain_id_free(domain->id);
+ if (setup_gcr3_table(domain, 1))
return -ENOMEM;
- }
return 0;
}
@@ -2097,57 +2134,60 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable;
- int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ goto out_err;
+
+ spin_lock_init(&domain->lock);
+ INIT_LIST_HEAD(&domain->dev_list);
+ domain->nid = NUMA_NO_NODE;
+
+ switch (type) {
+ /* No need to allocate io pgtable ops in passthrough mode */
+ case IOMMU_DOMAIN_IDENTITY:
+ return domain;
+ case IOMMU_DOMAIN_DMA:
+ pgtable = amd_iommu_pgtable;
+ break;
/*
- * Force IOMMU v1 page table when iommu=pt and
- * when allocating domain for pass-through devices.
+ * Force IOMMU v1 page table when allocating
+ * domain for pass-through devices.
*/
- if (type == IOMMU_DOMAIN_IDENTITY) {
- pgtable = AMD_IOMMU_V1;
- mode = PAGE_MODE_NONE;
- } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+ case IOMMU_DOMAIN_UNMANAGED:
pgtable = AMD_IOMMU_V1;
- } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
- pgtable = amd_iommu_pgtable;
- } else {
- return NULL;
+ break;
+ default:
+ goto out_err;
}
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
switch (pgtable) {
case AMD_IOMMU_V1:
- ret = protection_domain_init_v1(domain, mode);
+ ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);
break;
default:
ret = -EINVAL;
+ break;
}
if (ret)
goto out_err;
- /* No need to allocate io pgtable ops in passthrough mode */
- if (type == IOMMU_DOMAIN_IDENTITY)
- return domain;
-
- domain->nid = NUMA_NO_NODE;
-
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
- if (!pgtbl_ops) {
- domain_id_free(domain->id);
+ if (!pgtbl_ops)
goto out_err;
- }
return domain;
out_err:
- kfree(domain);
+ protection_domain_free(domain);
return NULL;
}
@@ -2236,19 +2276,18 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
+ unsigned long flags;
- domain = to_pdomain(dom);
+ if (!dom)
+ return;
- if (domain->dev_cnt > 0)
- cleanup_domain(domain);
+ domain = to_pdomain(dom);
- BUG_ON(domain->dev_cnt != 0);
+ spin_lock_irqsave(&domain->lock, flags);
- if (!dom)
- return;
+ cleanup_domain(domain);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ spin_unlock_irqrestore(&domain->lock, flags);
protection_domain_free(domain);
}
@@ -2296,14 +2335,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return ret;
}
-static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
- unsigned long iova, size_t size)
+static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+ unsigned long iova, size_t size)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
+ return 0;
}
static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
@@ -2541,7 +2581,6 @@ bool amd_iommu_is_attach_deferred(struct device *dev)
return dev_data->defer_attach;
}
-EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
@@ -2581,7 +2620,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* and require remapping.
* - SNP is enabled, because it prohibits DTE[Mode]=0.
*/
- if (dev_data->iommu_v2 &&
+ if (pdev_pasid_supported(dev_data) &&
!cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
!amd_iommu_snp_en) {
return IOMMU_DOMAIN_IDENTITY;
@@ -2626,93 +2665,6 @@ const struct iommu_ops amd_iommu_ops = {
}
};
-/*****************************************************************************
- *
- * The next functions do a basic initialization of IOMMU for pass through
- * mode
- *
- * In passthrough mode the IOMMU is initialized and enabled but not used for
- * DMA-API translation.
- *
- *****************************************************************************/
-
-/* IOMMUv2 specific functions */
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
-
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
-
-void amd_iommu_domain_direct_map(struct iommu_domain *dom)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- if (domain->iop.pgtbl_cfg.tlb)
- free_io_pgtable_ops(&domain->iop.iop.ops);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int domain_enable_v2(struct protection_domain *domain, int pasids)
-{
- int levels;
-
- /* Number of GCR3 table levels required */
- for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
- levels += 1;
-
- if (levels > amd_iommu_max_glx_val)
- return -EINVAL;
-
- domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
- return -ENOMEM;
-
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
-
- amd_iommu_domain_update(domain);
-
- return 0;
-}
-
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
-{
- struct protection_domain *pdom = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&pdom->lock, flags);
-
- /*
- * Save us all sanity checks whether devices already in the
- * domain support IOMMUv2. Just force that the domain has no
- * devices attached when it is switched into IOMMUv2 mode.
- */
- ret = -EBUSY;
- if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
- goto out;
-
- if (!pdom->gcr3_tbl)
- ret = domain_enable_v2(pdom, pasids);
-
-out:
- spin_unlock_irqrestore(&pdom->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-
static int __flush_pasid(struct protection_domain *domain, u32 pasid,
u64 address, bool size)
{
@@ -2750,10 +2702,10 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
There might be non-IOMMUv2 capable devices in an IOMMUv2
* domain.
*/
- if (!dev_data->ats.enabled)
+ if (!dev_data->ats_enabled)
continue;
- qdep = dev_data->ats.qdep;
+ qdep = dev_data->ats_qdep;
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
continue;
@@ -2794,7 +2746,6 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
return ret;
}
-EXPORT_SYMBOL(amd_iommu_flush_page);
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
@@ -2814,7 +2765,6 @@ int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
return ret;
}
-EXPORT_SYMBOL(amd_iommu_flush_tlb);
static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
{
@@ -2894,7 +2844,6 @@ int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
return ret;
}
-EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
{
@@ -2908,7 +2857,6 @@ int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
return ret;
}
-EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag)
@@ -2927,49 +2875,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
return iommu_queue_command(iommu, &cmd);
}
-EXPORT_SYMBOL(amd_iommu_complete_ppr);
-
-int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info)
-{
- int max_pasids;
- int pos;
-
- if (pdev == NULL || info == NULL)
- return -EINVAL;
-
- if (!amd_iommu_v2_supported())
- return -EINVAL;
-
- memset(info, 0, sizeof(*info));
-
- if (pci_ats_supported(pdev))
- info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (pos)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (pos) {
- int features;
-
- max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
- max_pasids = min(max_pasids, (1 << 20));
-
- info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
- info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
-
- features = pci_pasid_features(pdev);
- if (features & PCI_PASID_CAP_EXEC)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
- if (features & PCI_PASID_CAP_PRIV)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(amd_iommu_device_info);
#ifdef CONFIG_IRQ_REMAP
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
deleted file mode 100644
index 57c2fb1146e2..000000000000
--- a/drivers/iommu/amd/iommu_v2.c
+++ /dev/null
@@ -1,996 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#define pr_fmt(fmt) "AMD-Vi: " fmt
-
-#include <linux/refcount.h>
-#include <linux/mmu_notifier.h>
-#include <linux/amd-iommu.h>
-#include <linux/mm_types.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/cc_platform.h>
-
-#include "amd_iommu.h"
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
-
-#define PRI_QUEUE_SIZE 512
-
-struct pri_queue {
- atomic_t inflight;
- bool finish;
- int status;
-};
-
-struct pasid_state {
- struct list_head list; /* For global state-list */
- refcount_t count; /* Reference count */
- unsigned mmu_notifier_count; /* Counting nested mmu_notifier
- calls */
- struct mm_struct *mm; /* mm_struct for the faults */
- struct mmu_notifier mn; /* mmu_notifier handle */
- struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
- struct device_state *device_state; /* Link to our device_state */
- u32 pasid; /* PASID index */
- bool invalid; /* Used during setup and
- teardown of the pasid */
- spinlock_t lock; /* Protect pri_queues and
- mmu_notifer_count */
- wait_queue_head_t wq; /* To wait for count == 0 */
-};
-
-struct device_state {
- struct list_head list;
- u32 sbdf;
- atomic_t count;
- struct pci_dev *pdev;
- struct pasid_state **states;
- struct iommu_domain *domain;
- int pasid_levels;
- int max_pasids;
- amd_iommu_invalid_ppr_cb inv_ppr_cb;
- amd_iommu_invalidate_ctx inv_ctx_cb;
- spinlock_t lock;
- wait_queue_head_t wq;
-};
-
-struct fault {
- struct work_struct work;
- struct device_state *dev_state;
- struct pasid_state *state;
- struct mm_struct *mm;
- u64 address;
- u32 pasid;
- u16 tag;
- u16 finish;
- u16 flags;
-};
-
-static LIST_HEAD(state_list);
-static DEFINE_SPINLOCK(state_lock);
-
-static struct workqueue_struct *iommu_wq;
-
-static void free_pasid_states(struct device_state *dev_state);
-
-static struct device_state *__get_device_state(u32 sbdf)
-{
- struct device_state *dev_state;
-
- list_for_each_entry(dev_state, &state_list, list) {
- if (dev_state->sbdf == sbdf)
- return dev_state;
- }
-
- return NULL;
-}
-
-static struct device_state *get_device_state(u32 sbdf)
-{
- struct device_state *dev_state;
- unsigned long flags;
-
- spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(sbdf);
- if (dev_state != NULL)
- atomic_inc(&dev_state->count);
- spin_unlock_irqrestore(&state_lock, flags);
-
- return dev_state;
-}
-
-static void free_device_state(struct device_state *dev_state)
-{
- struct iommu_group *group;
-
- /* Get rid of any remaining pasid states */
- free_pasid_states(dev_state);
-
- /*
- * Wait until the last reference is dropped before freeing
- * the device state.
- */
- wait_event(dev_state->wq, !atomic_read(&dev_state->count));
-
- /*
- * First detach device from domain - No more PRI requests will arrive
- * from that device after it is unbound from the IOMMUv2 domain.
- */
- group = iommu_group_get(&dev_state->pdev->dev);
- if (WARN_ON(!group))
- return;
-
- iommu_detach_group(dev_state->domain, group);
-
- iommu_group_put(group);
-
- /* Everything is down now, free the IOMMUv2 domain */
- iommu_domain_free(dev_state->domain);
-
- /* Finally get rid of the device-state */
- kfree(dev_state);
-}
-
-static void put_device_state(struct device_state *dev_state)
-{
- if (atomic_dec_and_test(&dev_state->count))
- wake_up(&dev_state->wq);
-}
-
-/* Must be called under dev_state->lock */
-static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
- u32 pasid, bool alloc)
-{
- struct pasid_state **root, **ptr;
- int level, index;
-
- level = dev_state->pasid_levels;
- root = dev_state->states;
-
- while (true) {
-
- index = (pasid >> (9 * level)) & 0x1ff;
- ptr = &root[index];
-
- if (level == 0)
- break;
-
- if (*ptr == NULL) {
- if (!alloc)
- return NULL;
-
- *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
- if (*ptr == NULL)
- return NULL;
- }
-
- root = (struct pasid_state **)*ptr;
- level -= 1;
- }
-
- return ptr;
-}
-
-static int set_pasid_state(struct device_state *dev_state,
- struct pasid_state *pasid_state,
- u32 pasid)
-{
- struct pasid_state **ptr;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dev_state->lock, flags);
- ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
- ret = -ENOMEM;
- if (ptr == NULL)
- goto out_unlock;
-
- ret = -ENOMEM;
- if (*ptr != NULL)
- goto out_unlock;
-
- *ptr = pasid_state;
-
- ret = 0;
-
-out_unlock:
- spin_unlock_irqrestore(&dev_state->lock, flags);
-
- return ret;
-}
-
-static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
-{
- struct pasid_state **ptr;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_state->lock, flags);
- ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
- if (ptr == NULL)
- goto out_unlock;
-
- *ptr = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&dev_state->lock, flags);
-}
-
-static struct pasid_state *get_pasid_state(struct device_state *dev_state,
- u32 pasid)
-{
- struct pasid_state **ptr, *ret = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_state->lock, flags);
- ptr = __get_pasid_state_ptr(dev_state, pasid, false);
-
- if (ptr == NULL)
- goto out_unlock;
-
- ret = *ptr;
- if (ret)
- refcount_inc(&ret->count);
-
-out_unlock:
- spin_unlock_irqrestore(&dev_state->lock, flags);
-
- return ret;
-}
-
-static void free_pasid_state(struct pasid_state *pasid_state)
-{
- kfree(pasid_state);
-}
-
-static void put_pasid_state(struct pasid_state *pasid_state)
-{
- if (refcount_dec_and_test(&pasid_state->count))
- wake_up(&pasid_state->wq);
-}
-
-static void put_pasid_state_wait(struct pasid_state *pasid_state)
-{
- if (!refcount_dec_and_test(&pasid_state->count))
- wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
- free_pasid_state(pasid_state);
-}
-
-static void unbind_pasid(struct pasid_state *pasid_state)
-{
- struct iommu_domain *domain;
-
- domain = pasid_state->device_state->domain;
-
- /*
- * Mark pasid_state as invalid, no more faults will we added to the
- * work queue after this is visible everywhere.
- */
- pasid_state->invalid = true;
-
- /* Make sure this is visible */
- smp_wmb();
-
- /* After this the device/pasid can't access the mm anymore */
- amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
-
- /* Make sure no more pending faults are in the queue */
- flush_workqueue(iommu_wq);
-}
-
-static void free_pasid_states_level1(struct pasid_state **tbl)
-{
- int i;
-
- for (i = 0; i < 512; ++i) {
- if (tbl[i] == NULL)
- continue;
-
- free_page((unsigned long)tbl[i]);
- }
-}
-
-static void free_pasid_states_level2(struct pasid_state **tbl)
-{
- struct pasid_state **ptr;
- int i;
-
- for (i = 0; i < 512; ++i) {
- if (tbl[i] == NULL)
- continue;
-
- ptr = (struct pasid_state **)tbl[i];
- free_pasid_states_level1(ptr);
- }
-}
-
-static void free_pasid_states(struct device_state *dev_state)
-{
- struct pasid_state *pasid_state;
- int i;
-
- for (i = 0; i < dev_state->max_pasids; ++i) {
- pasid_state = get_pasid_state(dev_state, i);
- if (pasid_state == NULL)
- continue;
-
- put_pasid_state(pasid_state);
-
- /* Clear the pasid state so that the pasid can be re-used */
- clear_pasid_state(dev_state, pasid_state->pasid);
-
- /*
- * This will call the mn_release function and
- * unbind the PASID
- */
- mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
- put_pasid_state_wait(pasid_state); /* Reference taken in
- amd_iommu_bind_pasid */
-
- /* Drop reference taken in amd_iommu_bind_pasid */
- put_device_state(dev_state);
- }
-
- if (dev_state->pasid_levels == 2)
- free_pasid_states_level2(dev_state->states);
- else if (dev_state->pasid_levels == 1)
- free_pasid_states_level1(dev_state->states);
- else
- BUG_ON(dev_state->pasid_levels != 0);
-
- free_page((unsigned long)dev_state->states);
-}
-
-static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
-{
- return container_of(mn, struct pasid_state, mn);
-}
-
-static void mn_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
-
- pasid_state = mn_to_state(mn);
- dev_state = pasid_state->device_state;
-
- if ((start ^ (end - 1)) < PAGE_SIZE)
- amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
- start);
- else
- amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
-}
-
-static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- bool run_inv_ctx_cb;
-
- might_sleep();
-
- pasid_state = mn_to_state(mn);
- dev_state = pasid_state->device_state;
- run_inv_ctx_cb = !pasid_state->invalid;
-
- if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
- dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
-
- unbind_pasid(pasid_state);
-}
-
-static const struct mmu_notifier_ops iommu_mn = {
- .release = mn_release,
- .arch_invalidate_secondary_tlbs = mn_arch_invalidate_secondary_tlbs,
-};
-
-static void set_pri_tag_status(struct pasid_state *pasid_state,
- u16 tag, int status)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pasid_state->lock, flags);
- pasid_state->pri[tag].status = status;
- spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void finish_pri_tag(struct device_state *dev_state,
- struct pasid_state *pasid_state,
- u16 tag)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pasid_state->lock, flags);
- if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
- pasid_state->pri[tag].finish) {
- amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
- pasid_state->pri[tag].status, tag);
- pasid_state->pri[tag].finish = false;
- pasid_state->pri[tag].status = PPR_SUCCESS;
- }
- spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void handle_fault_error(struct fault *fault)
-{
- int status;
-
- if (!fault->dev_state->inv_ppr_cb) {
- set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
- return;
- }
-
- status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
- fault->pasid,
- fault->address,
- fault->flags);
- switch (status) {
- case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
- set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
- break;
- case AMD_IOMMU_INV_PRI_RSP_INVALID:
- set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
- break;
- case AMD_IOMMU_INV_PRI_RSP_FAIL:
- set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
- break;
- default:
- BUG();
- }
-}
-
-static bool access_error(struct vm_area_struct *vma, struct fault *fault)
-{
- unsigned long requested = 0;
-
- if (fault->flags & PPR_FAULT_EXEC)
- requested |= VM_EXEC;
-
- if (fault->flags & PPR_FAULT_READ)
- requested |= VM_READ;
-
- if (fault->flags & PPR_FAULT_WRITE)
- requested |= VM_WRITE;
-
- return (requested & ~vma->vm_flags) != 0;
-}
-
-static void do_fault(struct work_struct *work)
-{
- struct fault *fault = container_of(work, struct fault, work);
- struct vm_area_struct *vma;
- vm_fault_t ret = VM_FAULT_ERROR;
- unsigned int flags = 0;
- struct mm_struct *mm;
- u64 address;
-
- mm = fault->state->mm;
- address = fault->address;
-
- if (fault->flags & PPR_FAULT_USER)
- flags |= FAULT_FLAG_USER;
- if (fault->flags & PPR_FAULT_WRITE)
- flags |= FAULT_FLAG_WRITE;
- flags |= FAULT_FLAG_REMOTE;
-
- mmap_read_lock(mm);
- vma = vma_lookup(mm, address);
- if (!vma)
- /* failed to get a vma in the right range */
- goto out;
-
- /* Check if we have the right permissions on the vma */
- if (access_error(vma, fault))
- goto out;
-
- ret = handle_mm_fault(vma, address, flags, NULL);
-out:
- mmap_read_unlock(mm);
-
- if (ret & VM_FAULT_ERROR)
- /* failed to service fault */
- handle_fault_error(fault);
-
- finish_pri_tag(fault->dev_state, fault->state, fault->tag);
-
- put_pasid_state(fault->state);
-
- kfree(fault);
-}
-
-static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
-{
- struct amd_iommu_fault *iommu_fault;
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- struct pci_dev *pdev = NULL;
- unsigned long flags;
- struct fault *fault;
- bool finish;
- u16 tag, devid, seg_id;
- int ret;
-
- iommu_fault = data;
- tag = iommu_fault->tag & 0x1ff;
- finish = (iommu_fault->tag >> 9) & 1;
-
- seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
- devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
- pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
- devid & 0xff);
- if (!pdev)
- return -ENODEV;
-
- ret = NOTIFY_DONE;
-
- /* In kdump kernel pci dev is not initialized yet -> send INVALID */
- if (amd_iommu_is_attach_deferred(&pdev->dev)) {
- amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
- PPR_INVALID, tag);
- goto out;
- }
-
- dev_state = get_device_state(iommu_fault->sbdf);
- if (dev_state == NULL)
- goto out;
-
- pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
- if (pasid_state == NULL || pasid_state->invalid) {
- /* We know the device but not the PASID -> send INVALID */
- amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
- PPR_INVALID, tag);
- goto out_drop_state;
- }
-
- spin_lock_irqsave(&pasid_state->lock, flags);
- atomic_inc(&pasid_state->pri[tag].inflight);
- if (finish)
- pasid_state->pri[tag].finish = true;
- spin_unlock_irqrestore(&pasid_state->lock, flags);
-
- fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
- if (fault == NULL) {
- /* We are OOM - send success and let the device re-fault */
- finish_pri_tag(dev_state, pasid_state, tag);
- goto out_drop_state;
- }
-
- fault->dev_state = dev_state;
- fault->address = iommu_fault->address;
- fault->state = pasid_state;
- fault->tag = tag;
- fault->finish = finish;
- fault->pasid = iommu_fault->pasid;
- fault->flags = iommu_fault->flags;
- INIT_WORK(&fault->work, do_fault);
-
- queue_work(iommu_wq, &fault->work);
-
- ret = NOTIFY_OK;
-
-out_drop_state:
-
- if (ret != NOTIFY_OK && pasid_state)
- put_pasid_state(pasid_state);
-
- put_device_state(dev_state);
-
-out:
- pci_dev_put(pdev);
- return ret;
-}
-
-static struct notifier_block ppr_nb = {
- .notifier_call = ppr_notifier,
-};
-
-int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
- struct task_struct *task)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- struct mm_struct *mm;
- u32 sbdf;
- int ret;
-
- might_sleep();
-
- if (!amd_iommu_v2_supported())
- return -ENODEV;
-
- sbdf = get_pci_sbdf_id(pdev);
- dev_state = get_device_state(sbdf);
-
- if (dev_state == NULL)
- return -EINVAL;
-
- ret = -EINVAL;
- if (pasid >= dev_state->max_pasids)
- goto out;
-
- ret = -ENOMEM;
- pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
- if (pasid_state == NULL)
- goto out;
-
-
- refcount_set(&pasid_state->count, 1);
- init_waitqueue_head(&pasid_state->wq);
- spin_lock_init(&pasid_state->lock);
-
- mm = get_task_mm(task);
- pasid_state->mm = mm;
- pasid_state->device_state = dev_state;
- pasid_state->pasid = pasid;
- pasid_state->invalid = true; /* Mark as valid only if we are
- done with setting up the pasid */
- pasid_state->mn.ops = &iommu_mn;
-
- if (pasid_state->mm == NULL)
- goto out_free;
-
- ret = mmu_notifier_register(&pasid_state->mn, mm);
- if (ret)
- goto out_free;
-
- ret = set_pasid_state(dev_state, pasid_state, pasid);
- if (ret)
- goto out_unregister;
-
- ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
- __pa(pasid_state->mm->pgd));
- if (ret)
- goto out_clear_state;
-
- /* Now we are ready to handle faults */
- pasid_state->invalid = false;
-
- /*
- * Drop the reference to the mm_struct here. We rely on the
- * mmu_notifier release call-back to inform us when the mm
- * is going away.
- */
- mmput(mm);
-
- return 0;
-
-out_clear_state:
- clear_pasid_state(dev_state, pasid);
-
-out_unregister:
- mmu_notifier_unregister(&pasid_state->mn, mm);
- mmput(mm);
-
-out_free:
- free_pasid_state(pasid_state);
-
-out:
- put_device_state(dev_state);
-
- return ret;
-}
-EXPORT_SYMBOL(amd_iommu_bind_pasid);
-
-void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- u32 sbdf;
-
- might_sleep();
-
- if (!amd_iommu_v2_supported())
- return;
-
- sbdf = get_pci_sbdf_id(pdev);
- dev_state = get_device_state(sbdf);
- if (dev_state == NULL)
- return;
-
- if (pasid >= dev_state->max_pasids)
- goto out;
-
- pasid_state = get_pasid_state(dev_state, pasid);
- if (pasid_state == NULL)
- goto out;
- /*
- * Drop reference taken here. We are safe because we still hold
- * the reference taken in the amd_iommu_bind_pasid function.
- */
- put_pasid_state(pasid_state);
-
- /* Clear the pasid state so that the pasid can be re-used */
- clear_pasid_state(dev_state, pasid_state->pasid);
-
- /*
- * Call mmu_notifier_unregister to drop our reference
- * to pasid_state->mm
- */
- mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
- put_pasid_state_wait(pasid_state); /* Reference taken in
- amd_iommu_bind_pasid */
-out:
- /* Drop reference taken in this function */
- put_device_state(dev_state);
-
- /* Drop reference taken in amd_iommu_bind_pasid */
- put_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_unbind_pasid);
-
-int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
-{
- struct device_state *dev_state;
- struct iommu_group *group;
- unsigned long flags;
- int ret, tmp;
- u32 sbdf;
-
- might_sleep();
-
- /*
- * When memory encryption is active the device is likely not in a
- * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
- */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -ENODEV;
-
- if (!amd_iommu_v2_supported())
- return -ENODEV;
-
- if (pasids <= 0 || pasids > (PASID_MASK + 1))
- return -EINVAL;
-
- sbdf = get_pci_sbdf_id(pdev);
-
- dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
- if (dev_state == NULL)
- return -ENOMEM;
-
- spin_lock_init(&dev_state->lock);
- init_waitqueue_head(&dev_state->wq);
- dev_state->pdev = pdev;
- dev_state->sbdf = sbdf;
-
- tmp = pasids;
- for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
- dev_state->pasid_levels += 1;
-
- atomic_set(&dev_state->count, 1);
- dev_state->max_pasids = pasids;
-
- ret = -ENOMEM;
- dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
- if (dev_state->states == NULL)
- goto out_free_dev_state;
-
- dev_state->domain = iommu_domain_alloc(&pci_bus_type);
- if (dev_state->domain == NULL)
- goto out_free_states;
-
- /* See iommu_is_default_domain() */
- dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
- amd_iommu_domain_direct_map(dev_state->domain);
-
- ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
- if (ret)
- goto out_free_domain;
-
- group = iommu_group_get(&pdev->dev);
- if (!group) {
- ret = -EINVAL;
- goto out_free_domain;
- }
-
- ret = iommu_attach_group(dev_state->domain, group);
- if (ret != 0)
- goto out_drop_group;
-
- iommu_group_put(group);
-
- spin_lock_irqsave(&state_lock, flags);
-
- if (__get_device_state(sbdf) != NULL) {
- spin_unlock_irqrestore(&state_lock, flags);
- ret = -EBUSY;
- goto out_free_domain;
- }
-
- list_add_tail(&dev_state->list, &state_list);
-
- spin_unlock_irqrestore(&state_lock, flags);
-
- return 0;
-
-out_drop_group:
- iommu_group_put(group);
-
-out_free_domain:
- iommu_domain_free(dev_state->domain);
-
-out_free_states:
- free_page((unsigned long)dev_state->states);
-
-out_free_dev_state:
- kfree(dev_state);
-
- return ret;
-}
-EXPORT_SYMBOL(amd_iommu_init_device);
-
-void amd_iommu_free_device(struct pci_dev *pdev)
-{
- struct device_state *dev_state;
- unsigned long flags;
- u32 sbdf;
-
- if (!amd_iommu_v2_supported())
- return;
-
- sbdf = get_pci_sbdf_id(pdev);
-
- spin_lock_irqsave(&state_lock, flags);
-
- dev_state = __get_device_state(sbdf);
- if (dev_state == NULL) {
- spin_unlock_irqrestore(&state_lock, flags);
- return;
- }
-
- list_del(&dev_state->list);
-
- spin_unlock_irqrestore(&state_lock, flags);
-
- put_device_state(dev_state);
- free_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_free_device);
-
-int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb)
-{
- struct device_state *dev_state;
- unsigned long flags;
- u32 sbdf;
- int ret;
-
- if (!amd_iommu_v2_supported())
- return -ENODEV;
-
- sbdf = get_pci_sbdf_id(pdev);
-
- spin_lock_irqsave(&state_lock, flags);
-
- ret = -EINVAL;
- dev_state = __get_device_state(sbdf);
- if (dev_state == NULL)
- goto out_unlock;
-
- dev_state->inv_ppr_cb = cb;
-
- ret = 0;
-
-out_unlock:
- spin_unlock_irqrestore(&state_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
-
-int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb)
-{
- struct device_state *dev_state;
- unsigned long flags;
- u32 sbdf;
- int ret;
-
- if (!amd_iommu_v2_supported())
- return -ENODEV;
-
- sbdf = get_pci_sbdf_id(pdev);
-
- spin_lock_irqsave(&state_lock, flags);
-
- ret = -EINVAL;
- dev_state = __get_device_state(sbdf);
- if (dev_state == NULL)
- goto out_unlock;
-
- dev_state->inv_ctx_cb = cb;
-
- ret = 0;
-
-out_unlock:
- spin_unlock_irqrestore(&state_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
-
-static int __init amd_iommu_v2_init(void)
-{
- int ret;
-
- if (!amd_iommu_v2_supported()) {
- pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
- /*
- * Load anyway to provide the symbols to other modules
- * which may use AMD IOMMUv2 optionally.
- */
- return 0;
- }
-
- ret = -ENOMEM;
- iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
- if (iommu_wq == NULL)
- goto out;
-
- amd_iommu_register_ppr_notifier(&ppr_nb);
-
- pr_info("AMD IOMMUv2 loaded and initialized\n");
-
- return 0;
-
-out:
- return ret;
-}
-
-static void __exit amd_iommu_v2_exit(void)
-{
- struct device_state *dev_state, *next;
- unsigned long flags;
- LIST_HEAD(freelist);
-
- if (!amd_iommu_v2_supported())
- return;
-
- amd_iommu_unregister_ppr_notifier(&ppr_nb);
-
- flush_workqueue(iommu_wq);
-
- /*
- * The loop below might call flush_workqueue(), so call
- * destroy_workqueue() after it
- */
- spin_lock_irqsave(&state_lock, flags);
-
- list_for_each_entry_safe(dev_state, next, &state_list, list) {
- WARN_ON_ONCE(1);
-
- put_device_state(dev_state);
- list_del(&dev_state->list);
- list_add_tail(&dev_state->list, &freelist);
- }
-
- spin_unlock_irqrestore(&state_lock, flags);
-
- /*
- * Since free_device_state waits on the count to be zero,
- * we need to free dev_state outside the spinlock.
- */
- list_for_each_entry_safe(dev_state, next, &freelist, list) {
- list_del(&dev_state->list);
- free_device_state(dev_state);
- }
-
- destroy_workqueue(iommu_wq);
-}
-
-module_init(amd_iommu_v2_init);
-module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 0b8927508427..ee05f4824bfa 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -196,7 +196,6 @@ struct apple_dart_hw {
* @lock: lock for hardware operations involving this dart
* @pgsize: pagesize supported by this DART
* @supports_bypass: indicates if this DART supports bypass mode
- * @force_bypass: force bypass mode due to pagesize mismatch?
* @sid2group: maps stream ids to iommu_groups
* @iommu: iommu core device
*/
@@ -217,7 +216,6 @@ struct apple_dart {
u32 pgsize;
u32 num_streams;
u32 supports_bypass : 1;
- u32 force_bypass : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
@@ -506,10 +504,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain,
apple_dart_domain_flush_tlb(to_dart_domain(domain));
}
-static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
apple_dart_domain_flush_tlb(to_dart_domain(domain));
+ return 0;
}
static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
@@ -568,15 +567,17 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
stream_map->dart->hw->invalidate_tlb(stream_map);
}
-static int apple_dart_finalize_domain(struct iommu_domain *domain,
+static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
struct apple_dart_master_cfg *cfg)
{
- struct apple_dart_domain *dart_domain = to_dart_domain(domain);
struct apple_dart *dart = cfg->stream_maps[0].dart;
struct io_pgtable_cfg pgtbl_cfg;
int ret = 0;
int i, j;
+ if (dart->pgsize > PAGE_SIZE)
+ return -EINVAL;
+
mutex_lock(&dart_domain->init_lock);
if (dart_domain->finalized)
@@ -597,17 +598,18 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
.iommu_dev = dart->dev,
};
- dart_domain->pgtbl_ops =
- alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
+ dart_domain->pgtbl_ops = alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg,
+ &dart_domain->domain);
if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM;
goto done;
}
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias);
- domain->geometry.force_aperture = true;
+ dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ dart_domain->domain.geometry.aperture_start = 0;
+ dart_domain->domain.geometry.aperture_end =
+ (dma_addr_t)DMA_BIT_MASK(dart->ias);
+ dart_domain->domain.geometry.force_aperture = true;
dart_domain->finalized = true;
@@ -651,47 +653,72 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
true);
}
-static int apple_dart_attach_dev(struct iommu_domain *domain,
- struct device *dev)
+static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
+ struct device *dev)
{
int ret, i;
struct apple_dart_stream_map *stream_map;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (cfg->stream_maps[0].dart->force_bypass &&
- domain->type != IOMMU_DOMAIN_IDENTITY)
- return -EINVAL;
- if (!cfg->stream_maps[0].dart->supports_bypass &&
- domain->type == IOMMU_DOMAIN_IDENTITY)
- return -EINVAL;
+ ret = apple_dart_finalize_domain(dart_domain, cfg);
+ if (ret)
+ return ret;
- ret = apple_dart_finalize_domain(domain, cfg);
+ ret = apple_dart_domain_add_streams(dart_domain, cfg);
if (ret)
return ret;
- switch (domain->type) {
- default:
- ret = apple_dart_domain_add_streams(dart_domain, cfg);
- if (ret)
- return ret;
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_setup_translation(dart_domain, stream_map);
+ return 0;
+}
- for_each_stream_map(i, cfg, stream_map)
- apple_dart_setup_translation(dart_domain, stream_map);
- break;
- case IOMMU_DOMAIN_BLOCKED:
- for_each_stream_map(i, cfg, stream_map)
- apple_dart_hw_disable_dma(stream_map);
- break;
- case IOMMU_DOMAIN_IDENTITY:
- for_each_stream_map(i, cfg, stream_map)
- apple_dart_hw_enable_bypass(stream_map);
- break;
- }
+static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_stream_map *stream_map;
+ int i;
- return ret;
+ if (!cfg->stream_maps[0].dart->supports_bypass)
+ return -EINVAL;
+
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_hw_enable_bypass(stream_map);
+ return 0;
}
+static const struct iommu_domain_ops apple_dart_identity_ops = {
+ .attach_dev = apple_dart_attach_dev_identity,
+};
+
+static struct iommu_domain apple_dart_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &apple_dart_identity_ops,
+};
+
+static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart_stream_map *stream_map;
+ int i;
+
+ for_each_stream_map(i, cfg, stream_map)
+ apple_dart_hw_disable_dma(stream_map);
+ return 0;
+}
+
+static const struct iommu_domain_ops apple_dart_blocked_ops = {
+ .attach_dev = apple_dart_attach_dev_blocked,
+};
+
+static struct iommu_domain apple_dart_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &apple_dart_blocked_ops,
+};
+
static struct iommu_device *apple_dart_probe_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
@@ -717,24 +744,26 @@ static void apple_dart_release_device(struct device *dev)
kfree(cfg);
}
-static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
+static struct iommu_domain *apple_dart_domain_alloc_paging(struct device *dev)
{
struct apple_dart_domain *dart_domain;
- if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
- return NULL;
-
dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
if (!dart_domain)
return NULL;
mutex_init(&dart_domain->init_lock);
- /* no need to allocate pgtbl_ops or do any other finalization steps */
- if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
- dart_domain->finalized = true;
+ if (dev) {
+ struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ int ret;
+ ret = apple_dart_finalize_domain(dart_domain, cfg);
+ if (ret) {
+ kfree(dart_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &dart_domain->domain;
}
@@ -770,8 +799,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
if (cfg_dart) {
if (cfg_dart->supports_bypass != dart->supports_bypass)
return -EINVAL;
- if (cfg_dart->force_bypass != dart->force_bypass)
- return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
}
@@ -913,7 +940,7 @@ static int apple_dart_def_domain_type(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (cfg->stream_maps[0].dart->force_bypass)
+ if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
return IOMMU_DOMAIN_IDENTITY;
if (!cfg->stream_maps[0].dart->supports_bypass)
return IOMMU_DOMAIN_DMA;
@@ -947,7 +974,9 @@ static void apple_dart_get_resv_regions(struct device *dev,
}
static const struct iommu_ops apple_dart_iommu_ops = {
- .domain_alloc = apple_dart_domain_alloc,
+ .identity_domain = &apple_dart_identity_domain,
+ .blocked_domain = &apple_dart_blocked_domain,
+ .domain_alloc_paging = apple_dart_domain_alloc_paging,
.probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device,
.device_group = apple_dart_device_group,
@@ -957,7 +986,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = apple_dart_attach_dev,
+ .attach_dev = apple_dart_attach_dev_paging,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
@@ -1111,8 +1140,6 @@ static int apple_dart_probe(struct platform_device *pdev)
goto err_clk_disable;
}
- dart->force_bypass = dart->pgsize > PAGE_SIZE;
-
ret = apple_dart_hw_reset(dart);
if (ret)
goto err_clk_disable;
@@ -1136,7 +1163,8 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
- dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
+ dart->pgsize, dart->num_streams, dart->supports_bypass,
+ dart->pgsize > PAGE_SIZE);
return 0;
err_sysfs_remove:
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 8a16cd3ef487..353248ab18e7 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -25,11 +25,9 @@ struct arm_smmu_mmu_notifier {
#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
struct arm_smmu_bond {
- struct iommu_sva sva;
struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list;
- refcount_t refs;
};
#define sva_to_bond(handle) \
@@ -38,6 +36,25 @@ struct arm_smmu_bond {
static DEFINE_MUTEX(sva_lock);
/*
+ * Write the CD to the CD tables for all masters that this domain is attached
+ * to. Note that this is only used to update existing CD entries in the target
+ * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
+ */
+static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
+ int ssid,
+ struct arm_smmu_ctx_desc *cd)
+{
+ struct arm_smmu_master *master;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ arm_smmu_write_ctx_desc(master, ssid, cd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+}
+
+/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
*/
@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return cd;
}
- smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
+ smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
smmu = smmu_domain->smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
+ arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -247,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
- arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
+ arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
@@ -273,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
+ unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
+ struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -304,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd);
+ if (ret) {
+ list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head)
+ arm_smmu_write_ctx_desc(master, mm->pasid, NULL);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret)
goto err_put_notifier;
@@ -329,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
return;
list_del(&smmu_mn->list);
- arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
+
+ arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL);
/*
* If we went through clear(), we've already invalidated, and no
@@ -345,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
-static struct iommu_sva *
-__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
@@ -355,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (!master || !master->sva_enabled)
- return ERR_PTR(-ENODEV);
-
- /* If bind() was already called for this {dev, mm} pair, reuse it. */
- list_for_each_entry(bond, &master->bonds, list) {
- if (bond->mm == mm) {
- refcount_inc(&bond->refs);
- return &bond->sva;
- }
- }
+ return -ENODEV;
bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
bond->mm = mm;
- bond->sva.dev = dev;
- refcount_set(&bond->refs, 1);
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
@@ -380,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
}
list_add(&bond->list, &master->bonds);
- return &bond->sva;
+ return 0;
err_free_bond:
kfree(bond);
- return ERR_PTR(ret);
+ return ret;
}
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
@@ -550,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
}
}
- if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
+ if (!WARN_ON(!bond)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);
@@ -562,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id)
{
int ret = 0;
- struct iommu_sva *handle;
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
- handle = __arm_smmu_sva_bind(dev, mm);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
+ ret = __arm_smmu_sva_bind(dev, mm);
mutex_unlock(&sva_lock);
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index bd0a596f9863..7445454c2af2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -971,14 +971,12 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
size_t i;
- unsigned long flags;
- struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
.cfgi = {
@@ -988,15 +986,10 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
};
cmds.num = 0;
-
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- for (i = 0; i < master->num_streams; i++) {
- cmd.cfgi.sid = master->streams[i].id;
- arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
- }
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
@@ -1026,34 +1019,33 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
- u32 ssid)
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
{
__le64 *l1ptr;
unsigned int idx;
struct arm_smmu_l1_ctx_desc *l1_desc;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
- return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+ if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+ return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
idx = ssid >> CTXDESC_SPLIT;
- l1_desc = &cdcfg->l1_desc[idx];
+ l1_desc = &cd_table->l1_desc[idx];
if (!l1_desc->l2ptr) {
if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
return NULL;
- l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
/* An invalid L1CD can be cached */
- arm_smmu_sync_cd(smmu_domain, ssid, false);
+ arm_smmu_sync_cd(master, ssid, false);
}
idx = ssid & (CTXDESC_L2_ENTRIES - 1);
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
}
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
struct arm_smmu_ctx_desc *cd)
{
/*
@@ -1070,11 +1062,12 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
u64 val;
bool cd_live;
__le64 *cdptr;
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+ if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
return -E2BIG;
- cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+ cdptr = arm_smmu_get_cd_ptr(master, ssid);
if (!cdptr)
return -ENOMEM;
@@ -1098,11 +1091,11 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
cdptr[3] = cpu_to_le64(cd->mair);
/*
- * STE is live, and the SMMU might read dwords of this CD in any
+ * STE may be live, and the SMMU might read dwords of this CD in any
* order. Ensure that it observes valid values before reading
* V=1.
*/
- arm_smmu_sync_cd(smmu_domain, ssid, true);
+ arm_smmu_sync_cd(master, ssid, true);
val = cd->tcr |
#ifdef __BIG_ENDIAN
@@ -1114,7 +1107,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
CTXDESC_CD_0_V;
- if (smmu_domain->stall_enabled)
+ if (cd_table->stall_enabled)
val |= CTXDESC_CD_0_S;
}
@@ -1128,44 +1121,45 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
* without first making the structure invalid.
*/
WRITE_ONCE(cdptr[0], cpu_to_le64(val));
- arm_smmu_sync_cd(smmu_domain, ssid, true);
+ arm_smmu_sync_cd(master, ssid, true);
return 0;
}
-static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
{
int ret;
size_t l1size;
size_t max_contexts;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
- struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- max_contexts = 1 << cfg->s1cdmax;
+ cd_table->stall_enabled = master->stall_enabled;
+ cd_table->s1cdmax = master->ssid_bits;
+ max_contexts = 1 << cd_table->s1cdmax;
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
max_contexts <= CTXDESC_L2_ENTRIES) {
- cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
- cdcfg->num_l1_ents = max_contexts;
+ cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+ cd_table->num_l1_ents = max_contexts;
l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
} else {
- cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
- cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+ cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+ cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
CTXDESC_L2_ENTRIES);
- cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
- sizeof(*cdcfg->l1_desc),
+ cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents,
+ sizeof(*cd_table->l1_desc),
GFP_KERNEL);
- if (!cdcfg->l1_desc)
+ if (!cd_table->l1_desc)
return -ENOMEM;
- l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
}
- cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+ cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma,
GFP_KERNEL);
- if (!cdcfg->cdtab) {
+ if (!cd_table->cdtab) {
dev_warn(smmu->dev, "failed to allocate context descriptor\n");
ret = -ENOMEM;
goto err_free_l1;
@@ -1174,42 +1168,42 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
return 0;
err_free_l1:
- if (cdcfg->l1_desc) {
- devm_kfree(smmu->dev, cdcfg->l1_desc);
- cdcfg->l1_desc = NULL;
+ if (cd_table->l1_desc) {
+ devm_kfree(smmu->dev, cd_table->l1_desc);
+ cd_table->l1_desc = NULL;
}
return ret;
}
-static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
{
int i;
size_t size, l1size;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (cdcfg->l1_desc) {
+ if (cd_table->l1_desc) {
size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
- for (i = 0; i < cdcfg->num_l1_ents; i++) {
- if (!cdcfg->l1_desc[i].l2ptr)
+ for (i = 0; i < cd_table->num_l1_ents; i++) {
+ if (!cd_table->l1_desc[i].l2ptr)
continue;
dmam_free_coherent(smmu->dev, size,
- cdcfg->l1_desc[i].l2ptr,
- cdcfg->l1_desc[i].l2ptr_dma);
+ cd_table->l1_desc[i].l2ptr,
+ cd_table->l1_desc[i].l2ptr_dma);
}
- devm_kfree(smmu->dev, cdcfg->l1_desc);
- cdcfg->l1_desc = NULL;
+ devm_kfree(smmu->dev, cd_table->l1_desc);
+ cd_table->l1_desc = NULL;
- l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
} else {
- l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+ l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
}
- dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
- cdcfg->cdtab_dma = 0;
- cdcfg->cdtab = NULL;
+ dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma);
+ cd_table->cdtab_dma = 0;
+ cd_table->cdtab = NULL;
}
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
@@ -1276,7 +1270,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
u64 val = le64_to_cpu(dst[0]);
bool ste_live = false;
struct arm_smmu_device *smmu = NULL;
- struct arm_smmu_s1_cfg *s1_cfg = NULL;
+ struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL;
struct arm_smmu_domain *smmu_domain = NULL;
struct arm_smmu_cmdq_ent prefetch_cmd = {
@@ -1294,7 +1288,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (smmu_domain) {
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
- s1_cfg = &smmu_domain->s1_cfg;
+ cd_table = &master->cd_table;
break;
case ARM_SMMU_DOMAIN_S2:
case ARM_SMMU_DOMAIN_NESTED:
@@ -1325,7 +1319,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
val = STRTAB_STE_0_V;
/* Bypass/fault */
- if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+ if (!smmu_domain || !(cd_table || s2_cfg)) {
if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
@@ -1344,7 +1338,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
return;
}
- if (s1_cfg) {
+ if (cd_table) {
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
@@ -1360,10 +1354,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
!master->stall_enabled)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
- val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
- FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
- FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
}
if (s2_cfg) {
@@ -1869,7 +1863,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
* careful, 007.
*/
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
+ arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid);
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
@@ -1962,7 +1956,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
- cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
+ cmd.tlbi.asid = smmu_domain->cd.asid;
} else {
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
@@ -2067,15 +2061,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
- /* Free the CD and ASID, if we allocated them */
+ /* Free the ASID or VMID */
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
-
/* Prevent SVA from touching the CD while we're freeing it */
mutex_lock(&arm_smmu_asid_lock);
- if (cfg->cdcfg.cdtab)
- arm_smmu_free_cd_tables(smmu_domain);
- arm_smmu_free_asid(&cfg->cd);
+ arm_smmu_free_asid(&smmu_domain->cd);
mutex_unlock(&arm_smmu_asid_lock);
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
@@ -2087,66 +2077,43 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
}
static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master *master,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- refcount_set(&cfg->cd.refs, 1);
+ refcount_set(&cd->refs, 1);
/* Prevent SVA from modifying the ASID until it is written to the CD */
mutex_lock(&arm_smmu_asid_lock);
- ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
+ ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
goto out_unlock;
- cfg->s1cdmax = master->ssid_bits;
-
- smmu_domain->stall_enabled = master->stall_enabled;
-
- ret = arm_smmu_alloc_cd_tables(smmu_domain);
- if (ret)
- goto out_free_asid;
-
- cfg->cd.asid = (u16)asid;
- cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
- cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+ cd->asid = (u16)asid;
+ cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
- cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
-
- /*
- * Note that this will end up calling arm_smmu_sync_cd() before
- * the master has been added to the devices list for this domain.
- * This isn't an issue because the STE hasn't been installed yet.
- */
- ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
- if (ret)
- goto out_free_cd_tables;
+ cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
mutex_unlock(&arm_smmu_asid_lock);
return 0;
-out_free_cd_tables:
- arm_smmu_free_cd_tables(smmu_domain);
-out_free_asid:
- arm_smmu_free_asid(&cfg->cd);
out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master *master,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
@@ -2173,8 +2140,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain,
- struct arm_smmu_master *master)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain)
{
int ret;
unsigned long ias, oas;
@@ -2182,7 +2148,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_domain *,
- struct arm_smmu_master *,
struct io_pgtable_cfg *);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2234,7 +2199,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -2403,6 +2368,14 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
master->domain = NULL;
master->ats_enabled = false;
arm_smmu_install_ste_for_dev(master);
+ /*
+ * Clearing the CD entry isn't strictly required to detach the domain
+ * since the table is uninstalled anyway, but it helps avoid confusion
+ * in the call to arm_smmu_write_ctx_desc on the next attach (which
+ * expects the entry to be empty).
+ */
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -2436,23 +2409,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!smmu_domain->smmu) {
smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain, master);
- if (ret) {
+ ret = arm_smmu_domain_finalise(domain);
+ if (ret)
smmu_domain->smmu = NULL;
- goto out_unlock;
- }
- } else if (smmu_domain->smmu != smmu) {
- ret = -EINVAL;
- goto out_unlock;
- } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
- master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+ } else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
- goto out_unlock;
- } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
- smmu_domain->stall_enabled != master->stall_enabled) {
- ret = -EINVAL;
- goto out_unlock;
- }
+
+ mutex_unlock(&smmu_domain->init_mutex);
+ if (ret)
+ return ret;
master->domain = smmu_domain;
@@ -2466,16 +2431,42 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
master->ats_enabled = arm_smmu_ats_supported(master);
- arm_smmu_install_ste_for_dev(master);
-
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ if (!master->cd_table.cdtab) {
+ ret = arm_smmu_alloc_cd_tables(master);
+ if (ret) {
+ master->domain = NULL;
+ goto out_list_del;
+ }
+ }
+
+ /*
+ * Prevent SVA from concurrently modifying the CD or writing to
+ * the CD entry
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+ ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
+ mutex_unlock(&arm_smmu_asid_lock);
+ if (ret) {
+ master->domain = NULL;
+ goto out_list_del;
+ }
+ }
+
+ arm_smmu_install_ste_for_dev(master);
+
arm_smmu_enable_ats(master);
+ return 0;
+
+out_list_del:
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_del(&master->domain_head);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-out_unlock:
- mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -2720,6 +2711,8 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
+ if (master->cd_table.cdtab)
+ arm_smmu_free_cd_tables(master);
kfree(master);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 9915850dd4db..961205ba86d2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -595,13 +595,11 @@ struct arm_smmu_ctx_desc_cfg {
dma_addr_t cdtab_dma;
struct arm_smmu_l1_ctx_desc *l1_desc;
unsigned int num_l1_ents;
-};
-
-struct arm_smmu_s1_cfg {
- struct arm_smmu_ctx_desc_cfg cdcfg;
- struct arm_smmu_ctx_desc cd;
u8 s1fmt;
+ /* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax;
+ /* Whether CD entries in this table have the stall bit set. */
+ u8 stall_enabled:1;
};
struct arm_smmu_s2_cfg {
@@ -697,6 +695,8 @@ struct arm_smmu_master {
struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
+ /* Locked by the iommu core using the group mutex */
+ struct arm_smmu_ctx_desc_cfg cd_table;
unsigned int num_streams;
bool ats_enabled;
bool stall_enabled;
@@ -719,13 +719,12 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
- bool stall_enabled;
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
union {
- struct arm_smmu_s1_cfg s1_cfg;
- struct arm_smmu_s2_cfg s2_cfg;
+ struct arm_smmu_ctx_desc cd;
+ struct arm_smmu_s2_cfg s2_cfg;
};
struct iommu_domain domain;
@@ -745,7 +744,7 @@ extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd;
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7f52ac67495f..549ae4dba3a6 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -251,6 +251,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sc8280xp-mdss" },
+ { .compatible = "qcom,sdm670-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ .compatible = "qcom,sm6350-mdss" },
@@ -532,6 +533,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm7150-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 775a3cbaff4e..97b2122032b2 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -332,12 +332,10 @@ out_unlock:
return ret;
}
-static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
{
struct qcom_iommu_domain *qcom_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
- return NULL;
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -400,6 +398,44 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
return 0;
}
+static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct qcom_iommu_domain *qcom_domain;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ unsigned int i;
+
+ if (domain == identity_domain || !domain)
+ return 0;
+
+ qcom_domain = to_qcom_iommu_domain(domain);
+ if (WARN_ON(!qcom_domain->iommu))
+ return -EINVAL;
+
+ pm_runtime_get_sync(qcom_iommu->dev);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+
+ /* Disable the context bank: */
+ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ ctx->domain = NULL;
+ }
+ pm_runtime_put_sync(qcom_iommu->dev);
+ return 0;
+}
+
+static struct iommu_domain_ops qcom_iommu_identity_ops = {
+ .attach_dev = qcom_iommu_identity_attach,
+};
+
+static struct iommu_domain qcom_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &qcom_iommu_identity_ops,
+};
+
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -565,8 +601,9 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
}
static const struct iommu_ops qcom_iommu_ops = {
+ .identity_domain = &qcom_iommu_identity_domain,
.capable = qcom_iommu_capable,
- .domain_alloc = qcom_iommu_domain_alloc,
+ .domain_alloc_paging = qcom_iommu_domain_alloc_paging,
.probe_device = qcom_iommu_probe_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4b1a88f514c9..85163a83df2f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -43,14 +43,28 @@ enum iommu_dma_cookie_type {
IOMMU_DMA_MSI_COOKIE,
};
+enum iommu_dma_queue_type {
+ IOMMU_DMA_OPTS_PER_CPU_QUEUE,
+ IOMMU_DMA_OPTS_SINGLE_QUEUE,
+};
+
+struct iommu_dma_options {
+ enum iommu_dma_queue_type qt;
+ size_t fq_size;
+ unsigned int fq_timeout;
+};
+
struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct {
struct iova_domain iovad;
-
- struct iova_fq __percpu *fq; /* Flush queue */
+ /* Flush queue */
+ union {
+ struct iova_fq *single_fq;
+ struct iova_fq __percpu *percpu_fq;
+ };
/* Number of TLB flushes that have been started */
atomic64_t fq_flush_start_cnt;
/* Number of TLB flushes that have been finished */
@@ -67,6 +81,8 @@ struct iommu_dma_cookie {
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
+ /* Options for dma-iommu use */
+ struct iommu_dma_options options;
struct mutex mutex;
};
@@ -84,10 +100,12 @@ static int __init iommu_dma_forcedac_setup(char *str)
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
/* Number of entries per flush queue */
-#define IOVA_FQ_SIZE 256
+#define IOVA_DEFAULT_FQ_SIZE 256
+#define IOVA_SINGLE_FQ_SIZE 32768
/* Timeout (in ms) after which entries are flushed from the queue */
-#define IOVA_FQ_TIMEOUT 10
+#define IOVA_DEFAULT_FQ_TIMEOUT 10
+#define IOVA_SINGLE_FQ_TIMEOUT 1000
/* Flush queue entry for deferred flushing */
struct iova_fq_entry {
@@ -99,18 +117,19 @@ struct iova_fq_entry {
/* Per-CPU flush queue structure */
struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned int head, tail;
spinlock_t lock;
+ unsigned int head, tail;
+ unsigned int mod_mask;
+ struct iova_fq_entry entries[];
};
#define fq_ring_for_each(i, fq) \
- for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
- return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+ return (((fq->tail + 1) & fq->mod_mask) == fq->head);
}
static inline unsigned int fq_ring_add(struct iova_fq *fq)
@@ -119,12 +138,12 @@ static inline unsigned int fq_ring_add(struct iova_fq *fq)
assert_spin_locked(&fq->lock);
- fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+ fq->tail = (idx + 1) & fq->mod_mask;
return idx;
}
-static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{
u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
unsigned int idx;
@@ -141,10 +160,19 @@ static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
- fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+ fq->head = (fq->head + 1) & fq->mod_mask;
}
}
+static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fq->lock, flags);
+ fq_ring_free_locked(cookie, fq);
+ spin_unlock_irqrestore(&fq->lock, flags);
+}
+
static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{
atomic64_inc(&cookie->fq_flush_start_cnt);
@@ -160,14 +188,11 @@ static void fq_flush_timeout(struct timer_list *t)
atomic_set(&cookie->fq_timer_on, 0);
fq_flush_iotlb(cookie);
- for_each_possible_cpu(cpu) {
- unsigned long flags;
- struct iova_fq *fq;
-
- fq = per_cpu_ptr(cookie->fq, cpu);
- spin_lock_irqsave(&fq->lock, flags);
- fq_ring_free(cookie, fq);
- spin_unlock_irqrestore(&fq->lock, flags);
+ if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
+ fq_ring_free(cookie, cookie->single_fq);
+ } else {
+ for_each_possible_cpu(cpu)
+ fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
}
}
@@ -188,7 +213,11 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
*/
smp_mb();
- fq = raw_cpu_ptr(cookie->fq);
+ if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+ fq = cookie->single_fq;
+ else
+ fq = raw_cpu_ptr(cookie->percpu_fq);
+
spin_lock_irqsave(&fq->lock, flags);
/*
@@ -196,11 +225,11 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
* flushed out on another CPU. This makes the fq_full() check below less
* likely to be true.
*/
- fq_ring_free(cookie, fq);
+ fq_ring_free_locked(cookie, fq);
if (fq_full(fq)) {
fq_flush_iotlb(cookie);
- fq_ring_free(cookie, fq);
+ fq_ring_free_locked(cookie, fq);
}
idx = fq_ring_add(fq);
@@ -216,34 +245,95 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
- jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+ jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
}
-static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+static void iommu_dma_free_fq_single(struct iova_fq *fq)
{
- int cpu, idx;
+ int idx;
- if (!cookie->fq)
- return;
+ fq_ring_for_each(idx, fq)
+ put_pages_list(&fq->entries[idx].freelist);
+ vfree(fq);
+}
+
+static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
+{
+ int cpu, idx;
- del_timer_sync(&cookie->fq_timer);
/* The IOVAs will be torn down separately, so just free our queued pages */
for_each_possible_cpu(cpu) {
- struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
+ struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
fq_ring_for_each(idx, fq)
put_pages_list(&fq->entries[idx].freelist);
}
- free_percpu(cookie->fq);
+ free_percpu(percpu_fq);
+}
+
+static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+{
+ if (!cookie->fq_domain)
+ return;
+
+ del_timer_sync(&cookie->fq_timer);
+ if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+ iommu_dma_free_fq_single(cookie->single_fq);
+ else
+ iommu_dma_free_fq_percpu(cookie->percpu_fq);
+}
+
+static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
+{
+ int i;
+
+ fq->head = 0;
+ fq->tail = 0;
+ fq->mod_mask = fq_size - 1;
+
+ spin_lock_init(&fq->lock);
+
+ for (i = 0; i < fq_size; i++)
+ INIT_LIST_HEAD(&fq->entries[i].freelist);
+}
+
+static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
+{
+ size_t fq_size = cookie->options.fq_size;
+ struct iova_fq *queue;
+
+ queue = vmalloc(struct_size(queue, entries, fq_size));
+ if (!queue)
+ return -ENOMEM;
+ iommu_dma_init_one_fq(queue, fq_size);
+ cookie->single_fq = queue;
+
+ return 0;
+}
+
+static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
+{
+ size_t fq_size = cookie->options.fq_size;
+ struct iova_fq __percpu *queue;
+ int cpu;
+
+ queue = __alloc_percpu(struct_size(queue, entries, fq_size),
+ __alignof__(*queue));
+ if (!queue)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
+ cookie->percpu_fq = queue;
+ return 0;
}
/* sysfs updates are serialised by the mutex of the group owning @domain */
int iommu_dma_init_fq(struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_fq __percpu *queue;
- int i, cpu;
+ int rc;
if (cookie->fq_domain)
return 0;
@@ -251,26 +341,16 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
atomic64_set(&cookie->fq_flush_start_cnt, 0);
atomic64_set(&cookie->fq_flush_finish_cnt, 0);
- queue = alloc_percpu(struct iova_fq);
- if (!queue) {
+ if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+ rc = iommu_dma_init_fq_single(cookie);
+ else
+ rc = iommu_dma_init_fq_percpu(cookie);
+
+ if (rc) {
pr_warn("iova flush queue initialization failed\n");
return -ENOMEM;
}
- for_each_possible_cpu(cpu) {
- struct iova_fq *fq = per_cpu_ptr(queue, cpu);
-
- fq->head = 0;
- fq->tail = 0;
-
- spin_lock_init(&fq->lock);
-
- for (i = 0; i < IOVA_FQ_SIZE; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
- }
-
- cookie->fq = queue;
-
timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
atomic_set(&cookie->fq_timer_on, 0);
/*
@@ -555,6 +635,28 @@ static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
}
/**
+ * iommu_dma_init_options - Initialize dma-iommu options
+ * @options: The options to be initialized
+ * @dev: Device the options are set for
+ *
+ * This allows tuning dma-iommu specific to device properties
+ */
+static void iommu_dma_init_options(struct iommu_dma_options *options,
+ struct device *dev)
+{
+ /* Shadowing IOTLB flushes do better with a single large queue */
+ if (dev->iommu->shadow_on_flush) {
+ options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
+ options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
+ options->fq_size = IOVA_SINGLE_FQ_SIZE;
+ } else {
+ options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
+ options->fq_size = IOVA_DEFAULT_FQ_SIZE;
+ options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
+ }
+}
+
+/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts
@@ -614,6 +716,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (ret)
goto done_unlock;
+ iommu_dma_init_options(&cookie->options, dev);
+
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c275fe71c4db..2c6e9094f1e9 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -24,6 +24,7 @@
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
+static struct iommu_domain exynos_identity_domain;
/* We do not consider super section mapping (16MB) */
#define SECT_ORDER 20
@@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (data->domain) {
+ if (&data->domain->domain != &exynos_identity_domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
@@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (data->domain) {
+ if (&data->domain->domain != &exynos_identity_domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}
@@ -886,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
DMA_TO_DEVICE);
}
-static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
{
struct exynos_iommu_domain *domain;
dma_addr_t handle;
@@ -895,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
/* Check if correct PTE offsets are initialized */
BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
- if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -980,17 +978,20 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
kfree(domain);
}
-static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
- struct device *dev)
+static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
- struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
- phys_addr_t pagetable = virt_to_phys(domain->pgtable);
+ struct exynos_iommu_domain *domain;
+ phys_addr_t pagetable;
struct sysmmu_drvdata *data, *next;
unsigned long flags;
- if (!has_sysmmu(dev) || owner->domain != iommu_domain)
- return;
+ if (owner->domain == identity_domain)
+ return 0;
+
+ domain = to_exynos_domain(owner->domain);
+ pagetable = virt_to_phys(domain->pgtable);
mutex_lock(&owner->rpm_lock);
@@ -1009,15 +1010,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
list_del_init(&data->domain_node);
spin_unlock(&data->lock);
}
- owner->domain = NULL;
+ owner->domain = identity_domain;
spin_unlock_irqrestore(&domain->lock, flags);
mutex_unlock(&owner->rpm_lock);
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
- &pagetable);
+ dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n",
+ __func__, &pagetable);
+ return 0;
}
+static struct iommu_domain_ops exynos_identity_ops = {
+ .attach_dev = exynos_iommu_identity_attach,
+};
+
+static struct iommu_domain exynos_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &exynos_identity_ops,
+};
+
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
@@ -1026,12 +1037,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
+ int err;
- if (!has_sysmmu(dev))
- return -ENODEV;
-
- if (owner->domain)
- exynos_iommu_detach_device(owner->domain, dev);
+ err = exynos_iommu_identity_attach(&exynos_identity_domain, dev);
+ if (err)
+ return err;
mutex_lock(&owner->rpm_lock);
@@ -1219,7 +1229,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot, gfp_t gfp)
+ size_t count, int prot, gfp_t gfp, size_t *mapped)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
@@ -1253,6 +1263,8 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
if (ret)
pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
__func__, ret, size, iova);
+ else
+ *mapped = size;
spin_unlock_irqrestore(&domain->pgtablelock, flags);
@@ -1274,7 +1286,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
}
static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
- unsigned long l_iova, size_t size,
+ unsigned long l_iova, size_t size, size_t count,
struct iommu_iotlb_gather *gather)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
@@ -1407,26 +1419,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
return &data->iommu;
}
-static void exynos_iommu_set_platform_dma(struct device *dev)
-{
- struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
-
- if (owner->domain) {
- struct iommu_group *group = iommu_group_get(dev);
-
- if (group) {
- exynos_iommu_detach_device(owner->domain, dev);
- iommu_group_put(group);
- }
- }
-}
-
static void exynos_iommu_release_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- exynos_iommu_set_platform_dma(dev);
+ WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev));
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
@@ -1457,6 +1455,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
+ owner->domain = &exynos_identity_domain;
dev_iommu_priv_set(dev, owner);
}
@@ -1471,19 +1470,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
}
static const struct iommu_ops exynos_iommu_ops = {
- .domain_alloc = exynos_iommu_domain_alloc,
+ .identity_domain = &exynos_identity_domain,
+ .domain_alloc_paging = exynos_iommu_domain_alloc_paging,
.device_group = generic_device_group,
-#ifdef CONFIG_ARM
- .set_platform_dma_ops = exynos_iommu_set_platform_dma,
-#endif
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device,
- .map = exynos_iommu_map,
- .unmap = exynos_iommu_unmap,
+ .map_pages = exynos_iommu_map,
+ .unmap_pages = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.free = exynos_iommu_domain_free,
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 4ac0e247ec2b..e9d2bff4659b 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -196,6 +196,13 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{
struct fsl_dma_domain *dma_domain;
+ /*
+ * FIXME: This isn't creating an unmanaged domain since the
+ * default_domain_ops do not have any map/unmap function it doesn't meet
+ * the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to
+ * allow drivers/soc/fsl/qbman/qman_portal.c to do
+ * fsl_pamu_configure_l1_stash()
+ */
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -283,16 +290,34 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
return ret;
}
-static void fsl_pamu_set_platform_dma(struct device *dev)
+/*
+ * FIXME: fsl/pamu is completely broken in terms of how it works with the iommu
+ * API. Immediately after probe the HW is left in an IDENTITY translation and
+ * the driver provides a non-working UNMANAGED domain that it can switch over
+ * to. However it cannot switch back to an IDENTITY translation, instead it
+ * switches to what looks like BLOCKING.
+ */
+static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
+ struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ struct fsl_dma_domain *dma_domain;
const u32 *prop;
int len;
struct pci_dev *pdev = NULL;
struct pci_controller *pci_ctl;
/*
+ * Hack to keep things working as they always have, only leaving an
+ * UNMANAGED domain makes it BLOCKING.
+ */
+ if (domain == platform_domain || !domain ||
+ domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return 0;
+
+ dma_domain = to_fsl_dma_domain(domain);
+
+ /*
* Use LIODN of the PCI controller while detaching a
* PCI device.
*/
@@ -312,8 +337,18 @@ static void fsl_pamu_set_platform_dma(struct device *dev)
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
+ return 0;
}
+static struct iommu_domain_ops fsl_pamu_platform_ops = {
+ .attach_dev = fsl_pamu_platform_attach,
+};
+
+static struct iommu_domain fsl_pamu_platform_domain = {
+ .type = IOMMU_DOMAIN_PLATFORM,
+ .ops = &fsl_pamu_platform_ops,
+};
+
/* Set the domain stash attribute */
int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
{
@@ -395,11 +430,11 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
}
static const struct iommu_ops fsl_pamu_ops = {
+ .default_domain = &fsl_pamu_platform_domain,
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
.device_group = fsl_pamu_device_group,
- .set_platform_dma_ops = fsl_pamu_set_platform_dma,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index 1f925285104e..dee61e513be6 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -111,6 +111,8 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(VCRSP),
};
+static struct dentry *intel_iommu_debug;
+
static int iommu_regset_show(struct seq_file *m, void *unused)
{
struct dmar_drhd_unit *drhd;
@@ -311,9 +313,14 @@ static inline unsigned long level_to_directory_size(int level)
static inline void
dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
{
- seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
- iova >> VTD_PAGE_SHIFT, path[5], path[4],
- path[3], path[2], path[1]);
+ seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx",
+ iova >> VTD_PAGE_SHIFT, path[5], path[4], path[3]);
+ if (path[2]) {
+ seq_printf(m, "\t0x%016llx", path[2]);
+ if (path[1])
+ seq_printf(m, "\t0x%016llx", path[1]);
+ }
+ seq_putc(m, '\n');
}
static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
@@ -340,58 +347,140 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
}
}
-static int __show_device_domain_translation(struct device *dev, void *data)
+static int domain_translation_struct_show(struct seq_file *m,
+ struct device_domain_info *info,
+ ioasid_t pasid)
{
- struct dmar_domain *domain;
- struct seq_file *m = data;
- u64 path[6] = { 0 };
-
- domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
- if (!domain)
- return 0;
+ bool scalable, found = false;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ u16 devfn, bus, seg;
- seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
- (u64)virt_to_phys(domain->pgd));
- seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+ bus = info->bus;
+ devfn = info->devfn;
+ seg = info->segment;
- pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
- seq_putc(m, '\n');
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ struct context_entry *context;
+ u64 pgd, path[6] = { 0 };
+ u32 sts, agaw;
- /* Don't iterate */
- return 1;
-}
+ if (seg != iommu->segment)
+ continue;
-static int show_device_domain_translation(struct device *dev, void *data)
-{
- struct iommu_group *group;
+ sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_TES)) {
+ seq_printf(m, "DMA Remapping is not enabled on %s\n",
+ iommu->name);
+ continue;
+ }
+ if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT)
+ scalable = true;
+ else
+ scalable = false;
- group = iommu_group_get(dev);
- if (group) {
/*
- * The group->mutex is held across the callback, which will
- * block calls to iommu_attach/detach_group/device. Hence,
+ * The iommu->lock is held across the callback, which will
+ * block calls to domain_attach/domain_detach. Hence,
* the domain of the device will not change during traversal.
*
- * All devices in an iommu group share a single domain, hence
- * we only dump the domain of the first device. Even though,
- * this code still possibly races with the iommu_unmap()
+ * Traversing page table possibly races with the iommu_unmap()
* interface. This could be solved by RCU-freeing the page
* table pages in the iommu_unmap() path.
*/
- iommu_group_for_each_dev(group, data,
- __show_device_domain_translation);
- iommu_group_put(group);
+ spin_lock(&iommu->lock);
+
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (!context || !context_present(context))
+ goto iommu_unlock;
+
+ if (scalable) { /* scalable mode */
+ struct pasid_entry *pasid_tbl, *pasid_tbl_entry;
+ struct pasid_dir_entry *dir_tbl, *dir_entry;
+ u16 dir_idx, tbl_idx, pgtt;
+ u64 pasid_dir_ptr;
+
+ pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+
+ /* Dump specified device domain mappings with PASID. */
+ dir_idx = pasid >> PASID_PDE_SHIFT;
+ tbl_idx = pasid & PASID_PTE_MASK;
+
+ dir_tbl = phys_to_virt(pasid_dir_ptr);
+ dir_entry = &dir_tbl[dir_idx];
+
+ pasid_tbl = get_pasid_table_from_pde(dir_entry);
+ if (!pasid_tbl)
+ goto iommu_unlock;
+
+ pasid_tbl_entry = &pasid_tbl[tbl_idx];
+ if (!pasid_pte_is_present(pasid_tbl_entry))
+ goto iommu_unlock;
+
+ /*
+ * According to PASID Granular Translation Type(PGTT),
+ * get the page table pointer.
+ */
+ pgtt = (u16)(pasid_tbl_entry->val[0] & GENMASK_ULL(8, 6)) >> 6;
+ agaw = (u8)(pasid_tbl_entry->val[0] & GENMASK_ULL(4, 2)) >> 2;
+
+ switch (pgtt) {
+ case PASID_ENTRY_PGTT_FL_ONLY:
+ pgd = pasid_tbl_entry->val[2];
+ break;
+ case PASID_ENTRY_PGTT_SL_ONLY:
+ case PASID_ENTRY_PGTT_NESTED:
+ pgd = pasid_tbl_entry->val[0];
+ break;
+ default:
+ goto iommu_unlock;
+ }
+ pgd &= VTD_PAGE_MASK;
+ } else { /* legacy mode */
+ pgd = context->lo & VTD_PAGE_MASK;
+ agaw = context->hi & 7;
+ }
+
+ seq_printf(m, "Device %04x:%02x:%02x.%x ",
+ iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ if (scalable)
+ seq_printf(m, "with pasid %x @0x%llx\n", pasid, pgd);
+ else
+ seq_printf(m, "@0x%llx\n", pgd);
+
+ seq_printf(m, "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n",
+ "IOVA_PFN", "PML5E", "PML4E", "PDPE", "PDE", "PTE");
+ pgtable_walk_level(m, phys_to_virt(pgd), agaw + 2, 0, path);
+
+ found = true;
+iommu_unlock:
+ spin_unlock(&iommu->lock);
+ if (found)
+ break;
}
+ rcu_read_unlock();
return 0;
}
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int dev_domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct device_domain_info *info = (struct device_domain_info *)m->private;
+
+ return domain_translation_struct_show(m, info, IOMMU_NO_PASID);
+}
+DEFINE_SHOW_ATTRIBUTE(dev_domain_translation_struct);
+
+static int pasid_domain_translation_struct_show(struct seq_file *m, void *unused)
{
- return bus_for_each_dev(&pci_bus_type, NULL, m,
- show_device_domain_translation);
+ struct dev_pasid_info *dev_pasid = (struct dev_pasid_info *)m->private;
+ struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
+
+ return domain_translation_struct_show(m, info, dev_pasid->pasid);
}
-DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+DEFINE_SHOW_ATTRIBUTE(pasid_domain_translation_struct);
static void invalidation_queue_entry_show(struct seq_file *m,
struct intel_iommu *iommu)
@@ -666,16 +755,12 @@ static const struct file_operations dmar_perf_latency_fops = {
void __init intel_iommu_debugfs_init(void)
{
- struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
- iommu_debugfs_dir);
+ intel_iommu_debug = debugfs_create_dir("intel", iommu_debugfs_dir);
debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
&iommu_regset_fops);
debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
NULL, &dmar_translation_struct_fops);
- debugfs_create_file("domain_translation_struct", 0444,
- intel_iommu_debug, NULL,
- &domain_translation_struct_fops);
debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
NULL, &invalidation_queue_fops);
#ifdef CONFIG_IRQ_REMAP
@@ -685,3 +770,51 @@ void __init intel_iommu_debugfs_init(void)
debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
NULL, &dmar_perf_latency_fops);
}
+
+/*
+ * Create a debugfs directory for each device, and then create a
+ * debugfs file in this directory for users to dump the page table
+ * of the default domain. e.g.
+ * /sys/kernel/debug/iommu/intel/0000:00:01.0/domain_translation_struct
+ */
+void intel_iommu_debugfs_create_dev(struct device_domain_info *info)
+{
+ info->debugfs_dentry = debugfs_create_dir(dev_name(info->dev), intel_iommu_debug);
+
+ debugfs_create_file("domain_translation_struct", 0444, info->debugfs_dentry,
+ info, &dev_domain_translation_struct_fops);
+}
+
+/* Remove the device debugfs directory. */
+void intel_iommu_debugfs_remove_dev(struct device_domain_info *info)
+{
+ debugfs_remove_recursive(info->debugfs_dentry);
+}
+
+/*
+ * Create a debugfs directory per pair of {device, pasid}, then create the
+ * corresponding debugfs file in this directory for users to dump its page
+ * table. e.g.
+ * /sys/kernel/debug/iommu/intel/0000:00:01.0/1/domain_translation_struct
+ *
+ * The debugfs only dumps the page tables whose mappings are created and
+ * destroyed by the iommu_map/unmap() interfaces. Check the mapping type
+ * of the domain before creating debugfs directory.
+ */
+void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
+ char dir_name[10];
+
+ sprintf(dir_name, "%x", dev_pasid->pasid);
+ dev_pasid->debugfs_dentry = debugfs_create_dir(dir_name, info->debugfs_dentry);
+
+ debugfs_create_file("domain_translation_struct", 0444, dev_pasid->debugfs_dentry,
+ dev_pasid, &pasid_domain_translation_struct_fops);
+}
+
+/* Remove the device pasid debugfs directory. */
+void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid)
+{
+ debugfs_remove_recursive(dev_pasid->debugfs_dentry);
+}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index d1037280abf7..3531b956556c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4016,9 +4016,9 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
}
static struct iommu_domain blocking_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocking_domain_attach_dev,
- .free = intel_iommu_domain_free
}
};
@@ -4028,8 +4028,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
struct iommu_domain *domain;
switch (type) {
- case IOMMU_DOMAIN_BLOCKED:
- return &blocking_domain;
case IOMMU_DOMAIN_DMA:
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(type);
@@ -4111,7 +4109,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
- if (domain != &si_domain->domain && domain != &blocking_domain)
+ if (domain != &si_domain->domain)
domain_exit(to_dmar_domain(domain));
}
@@ -4465,6 +4463,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
}
+ intel_iommu_debugfs_create_dev(info);
+
return &iommu->iommu;
}
@@ -4474,6 +4474,7 @@ static void intel_iommu_release_device(struct device *dev)
dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
+ intel_iommu_debugfs_remove_dev(info);
dev_iommu_priv_set(dev, NULL);
kfree(info);
set_dma_ops(dev, NULL);
@@ -4718,8 +4719,8 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
-static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long pages = aligned_nrpages(iova, size);
@@ -4729,6 +4730,7 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
xa_for_each(&dmar_domain->iommu_array, i, info)
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
+ return 0;
}
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
@@ -4766,6 +4768,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
spin_unlock_irqrestore(&dmar_domain->lock, flags);
domain_detach_iommu(dmar_domain, iommu);
+ intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
@@ -4821,6 +4824,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ intel_iommu_debugfs_create_dev_pasid(dev_pasid);
+
return 0;
out_detach_iommu:
domain_detach_iommu(dmar_domain, iommu);
@@ -4925,6 +4931,7 @@ const struct iommu_dirty_ops intel_dirty_ops = {
};
const struct iommu_ops intel_iommu_ops = {
+ .blocked_domain = &blocking_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d796d0d9b114..65d37a138c75 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -749,12 +749,18 @@ struct device_domain_info {
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+ struct dentry *debugfs_dentry; /* pointer to device directory dentry */
+#endif
};
struct dev_pasid_info {
struct list_head link_domain; /* link to domain siblings */
struct device *dev;
ioasid_t pasid;
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+ struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */
+#endif
};
static inline void __iommu_flush_cache(
@@ -935,8 +941,16 @@ static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
void intel_iommu_debugfs_init(void);
+void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
+void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
+void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
+void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
#else
static inline void intel_iommu_debugfs_init(void) {}
+static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
+static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
+static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
+static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c146378c7d03..f17a1113f3d6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -37,7 +37,6 @@
#include "iommu-priv.h"
#include "iommu-sva.h"
-#include "iommu-priv.h"
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -96,8 +95,8 @@ static const char * const iommu_group_resv_type_string[] = {
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void iommu_release_device(struct device *dev);
-static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
- unsigned type);
+static struct iommu_domain *
+__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
@@ -184,6 +183,8 @@ static const char *iommu_domain_type_str(unsigned int t)
case IOMMU_DOMAIN_DMA:
case IOMMU_DOMAIN_DMA_FQ:
return "Translated";
+ case IOMMU_DOMAIN_PLATFORM:
+ return "Platform";
default:
return "Unknown";
}
@@ -290,6 +291,10 @@ void iommu_device_unregister(struct iommu_device *iommu)
spin_lock(&iommu_device_lock);
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
+
+ /* Pairs with the alloc in generic_single_device_group() */
+ iommu_group_put(iommu->singleton_group);
+ iommu->singleton_group = NULL;
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
@@ -404,6 +409,7 @@ static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
ret = PTR_ERR(iommu_dev);
goto err_module_put;
}
+ dev->iommu->iommu_dev = iommu_dev;
ret = iommu_device_link(iommu_dev, dev);
if (ret)
@@ -418,7 +424,6 @@ static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
}
dev->iommu_group = group;
- dev->iommu->iommu_dev = iommu_dev;
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
if (ops->is_attach_deferred)
dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
@@ -432,6 +437,7 @@ err_release:
err_module_put:
module_put(ops->owner);
err_free:
+ dev->iommu->iommu_dev = NULL;
dev_iommu_free(dev);
return ret;
}
@@ -1637,6 +1643,27 @@ struct iommu_group *generic_device_group(struct device *dev)
EXPORT_SYMBOL_GPL(generic_device_group);
/*
+ * Generic device_group call-back function. It just allocates one
+ * iommu-group per iommu driver instance shared by every device
+ * probed by that iommu driver.
+ */
+struct iommu_group *generic_single_device_group(struct device *dev)
+{
+ struct iommu_device *iommu = dev->iommu->iommu_dev;
+
+ if (!iommu->singleton_group) {
+ struct iommu_group *group;
+
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return group;
+ iommu->singleton_group = group;
+ }
+ return iommu_group_ref_get(iommu->singleton_group);
+}
+EXPORT_SYMBOL_GPL(generic_single_device_group);
+
+/*
* Use standard PCI bus topology, isolation features, and DMA alias quirks
* to find or create an IOMMU group for a device.
*/
@@ -1717,26 +1744,29 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
-static int iommu_get_def_domain_type(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
- return IOMMU_DOMAIN_DMA;
-
- if (ops->def_domain_type)
- return ops->def_domain_type(dev);
-
- return 0;
-}
-
static struct iommu_domain *
-__iommu_group_alloc_default_domain(const struct bus_type *bus,
- struct iommu_group *group, int req_type)
+__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{
if (group->default_domain && group->default_domain->type == req_type)
return group->default_domain;
- return __iommu_domain_alloc(bus, req_type);
+ return __iommu_group_domain_alloc(group, req_type);
+}
+
+/*
+ * Returns the iommu_ops for the devices in an iommu group.
+ *
+ * It is assumed that all devices in an iommu group are managed by a single
+ * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
+ * in the group.
+ */
+static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
+{
+ struct group_device *device =
+ list_first_entry(&group->devices, struct group_device, list);
+
+ lockdep_assert_held(&group->mutex);
+
+ return dev_iommu_ops(device->dev);
}
/*
@@ -1746,25 +1776,34 @@ __iommu_group_alloc_default_domain(const struct bus_type *bus,
static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{
- const struct bus_type *bus =
- list_first_entry(&group->devices, struct group_device, list)
- ->dev->bus;
+ const struct iommu_ops *ops = group_iommu_ops(group);
struct iommu_domain *dom;
lockdep_assert_held(&group->mutex);
+ /*
+ * Allow legacy drivers to specify the domain that will be the default
+ * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM
+ * domain. Do not use in new drivers.
+ */
+ if (ops->default_domain) {
+ if (req_type)
+ return NULL;
+ return ops->default_domain;
+ }
+
if (req_type)
- return __iommu_group_alloc_default_domain(bus, group, req_type);
+ return __iommu_group_alloc_default_domain(group, req_type);
/* The driver gave no guidance on what type to use, try the default */
- dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type);
+ dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
if (dom)
return dom;
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
return NULL;
- dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA);
+ dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
if (!dom)
return NULL;
@@ -1808,40 +1847,109 @@ static int iommu_bus_notifier(struct notifier_block *nb,
return 0;
}
-/* A target_type of 0 will select the best domain type and cannot fail */
+/*
+ * Combine the driver's chosen def_domain_type across all the devices in a
+ * group. Drivers must give a consistent result.
+ */
+static int iommu_get_def_domain_type(struct iommu_group *group,
+ struct device *dev, int cur_type)
+{
+ const struct iommu_ops *ops = group_iommu_ops(group);
+ int type;
+
+ if (!ops->def_domain_type)
+ return cur_type;
+
+ type = ops->def_domain_type(dev);
+ if (!type || cur_type == type)
+ return cur_type;
+ if (!cur_type)
+ return type;
+
+ dev_err_ratelimited(
+ dev,
+ "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n",
+ iommu_domain_type_str(cur_type), iommu_domain_type_str(type),
+ group->id);
+
+ /*
+ * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
+ * takes precedence.
+ */
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ return type;
+ return cur_type;
+}
+
+/*
+ * A target_type of 0 will select the best domain type. 0 can be returned in
+ * this case meaning the global default should be used.
+ */
static int iommu_get_default_domain_type(struct iommu_group *group,
int target_type)
{
- int best_type = target_type;
+ struct device *untrusted = NULL;
struct group_device *gdev;
- struct device *last_dev;
+ int driver_type = 0;
lockdep_assert_held(&group->mutex);
+ /*
+ * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an
+ * identity_domain and it will automatically become their default
+ * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain.
+ * Override the selection to IDENTITY.
+ */
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) &&
+ IS_ENABLED(CONFIG_IOMMU_DMA)));
+ driver_type = IOMMU_DOMAIN_IDENTITY;
+ }
+
for_each_group_device(group, gdev) {
- unsigned int type = iommu_get_def_domain_type(gdev->dev);
-
- if (best_type && type && best_type != type) {
- if (target_type) {
- dev_err_ratelimited(
- gdev->dev,
- "Device cannot be in %s domain\n",
- iommu_domain_type_str(target_type));
+ driver_type = iommu_get_def_domain_type(group, gdev->dev,
+ driver_type);
+
+ if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) {
+ /*
+ * No ARM32 using systems will set untrusted, it cannot
+ * work.
+ */
+ if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)))
return -1;
- }
+ untrusted = gdev->dev;
+ }
+ }
- dev_warn(
- gdev->dev,
- "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
- iommu_domain_type_str(type), dev_name(last_dev),
- iommu_domain_type_str(best_type));
- return 0;
+ /*
+ * If the common dma ops are not selected in kconfig then we cannot use
+ * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been
+ * selected.
+ */
+ if (!IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA))
+ return -1;
+ if (!driver_type)
+ driver_type = IOMMU_DOMAIN_IDENTITY;
+ }
+
+ if (untrusted) {
+ if (driver_type && driver_type != IOMMU_DOMAIN_DMA) {
+ dev_err_ratelimited(
+ untrusted,
+ "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n",
+ group->id, iommu_domain_type_str(driver_type));
+ return -1;
}
- if (!best_type)
- best_type = type;
- last_dev = gdev->dev;
+ driver_type = IOMMU_DOMAIN_DMA;
}
- return best_type;
+
+ if (target_type) {
+ if (driver_type && target_type != driver_type)
+ return -1;
+ return target_type;
+ }
+ return driver_type;
}
static void iommu_group_do_probe_finalize(struct device *dev)
@@ -1970,16 +2078,24 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
-static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
- unsigned type)
+static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
+ struct device *dev,
+ unsigned int type)
{
struct iommu_domain *domain;
unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS;
- if (bus == NULL || bus->iommu_ops == NULL)
+ if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain)
+ return ops->identity_domain;
+ else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain)
+ return ops->blocked_domain;
+ else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging)
+ domain = ops->domain_alloc_paging(dev);
+ else if (ops->domain_alloc)
+ domain = ops->domain_alloc(alloc_type);
+ else
return NULL;
- domain = bus->iommu_ops->domain_alloc(alloc_type);
if (!domain)
return NULL;
@@ -1989,10 +2105,10 @@ static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
* may override this later
*/
if (!domain->pgsize_bitmap)
- domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ domain->pgsize_bitmap = ops->pgsize_bitmap;
if (!domain->ops)
- domain->ops = bus->iommu_ops->default_domain_ops;
+ domain->ops = ops->default_domain_ops;
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
@@ -2001,9 +2117,22 @@ static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
return domain;
}
+static struct iommu_domain *
+__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
+{
+ struct device *dev =
+ list_first_entry(&group->devices, struct group_device, list)
+ ->dev;
+
+ return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
+}
+
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
- return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
+ if (bus == NULL || bus->iommu_ops == NULL)
+ return NULL;
+ return __iommu_domain_alloc(bus->iommu_ops, NULL,
+ IOMMU_DOMAIN_UNMANAGED);
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
@@ -2012,7 +2141,8 @@ void iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_SVA)
mmdrop(domain->mm);
iommu_put_dma_cookie(domain);
- domain->ops->free(domain);
+ if (domain->ops->free)
+ domain->ops->free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -2062,10 +2192,10 @@ static int __iommu_attach_device(struct iommu_domain *domain,
*/
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_group *group;
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
int ret;
- group = iommu_group_get(dev);
if (!group)
return -ENODEV;
@@ -2082,8 +2212,6 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
out_unlock:
mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
return ret;
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
@@ -2098,9 +2226,9 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_group *group;
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
- group = iommu_group_get(dev);
if (!group)
return;
@@ -2112,24 +2240,18 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
out_unlock:
mutex_unlock(&group->mutex);
- iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{
- struct iommu_domain *domain;
- struct iommu_group *group;
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
- group = iommu_group_get(dev);
if (!group)
return NULL;
- domain = group->domain;
-
- iommu_group_put(group);
-
- return domain;
+ return group->domain;
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
@@ -2275,21 +2397,8 @@ static int __iommu_group_set_domain_internal(struct iommu_group *group,
if (group->domain == new_domain)
return 0;
- /*
- * New drivers should support default domains, so set_platform_dma()
- * op will never be called. Otherwise the NULL domain represents some
- * platform specific behavior.
- */
- if (!new_domain) {
- for_each_group_device(group, gdev) {
- const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
-
- if (!WARN_ON(!ops->set_platform_dma_ops))
- ops->set_platform_dma_ops(gdev->dev);
- }
- group->domain = NULL;
- return 0;
- }
+ if (WARN_ON(!new_domain))
+ return -EINVAL;
/*
* Changing the domain is done by calling attach_dev() on the new
@@ -2325,19 +2434,15 @@ err_revert:
*/
last_gdev = gdev;
for_each_group_device(group, gdev) {
- const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
-
/*
- * If set_platform_dma_ops is not present a NULL domain can
- * happen only for first probe, in which case we leave
- * group->domain as NULL and let release clean everything up.
+ * A NULL domain can happen only for first probe, in which case
+ * we leave group->domain as NULL and let release clean
+ * everything up.
*/
if (group->domain)
WARN_ON(__iommu_device_set_domain(
group, gdev->dev, group->domain,
IOMMU_SET_DOMAIN_MUST_SUCCEED));
- else if (ops->set_platform_dma_ops)
- ops->set_platform_dma_ops(gdev->dev);
if (gdev == last_gdev)
break;
}
@@ -2418,30 +2523,6 @@ out_set_count:
return pgsize;
}
-static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot,
- gfp_t gfp, size_t *mapped)
-{
- const struct iommu_domain_ops *ops = domain->ops;
- size_t pgsize, count;
- int ret;
-
- pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
-
- pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
- iova, &paddr, pgsize, count);
-
- if (ops->map_pages) {
- ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
- gfp, mapped);
- } else {
- ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- *mapped = ret ? 0 : pgsize;
- }
-
- return ret;
-}
-
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
@@ -2452,13 +2533,12 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
- if (unlikely(!(ops->map || ops->map_pages) ||
- domain->pgsize_bitmap == 0UL))
- return -ENODEV;
-
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
+ if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
+ return -ENODEV;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
@@ -2476,10 +2556,14 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
- size_t mapped = 0;
+ size_t pgsize, count, mapped = 0;
+
+ pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
- ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
- &mapped);
+ pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
+ iova, &paddr, pgsize, count);
+ ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
+ gfp, &mapped);
/*
* Some pages may have been mapped, even if an error occurred,
* so we should account for those so they can be unmapped.
@@ -2516,25 +2600,21 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -EINVAL;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
- if (ret == 0 && ops->iotlb_sync_map)
- ops->iotlb_sync_map(domain, iova, size);
+ if (ret == 0 && ops->iotlb_sync_map) {
+ ret = ops->iotlb_sync_map(domain, iova, size);
+ if (ret)
+ goto out_err;
+ }
return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_map);
-static size_t __iommu_unmap_pages(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *iotlb_gather)
-{
- const struct iommu_domain_ops *ops = domain->ops;
- size_t pgsize, count;
+out_err:
+ /* undo mappings already done */
+ iommu_unmap(domain, iova, size);
- pgsize = iommu_pgsize(domain, iova, iova, size, &count);
- return ops->unmap_pages ?
- ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
- ops->unmap(domain, iova, pgsize, iotlb_gather);
+ return ret;
}
+EXPORT_SYMBOL_GPL(iommu_map);
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
@@ -2545,11 +2625,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long orig_iova = iova;
unsigned int min_pagesz;
- if (unlikely(!(ops->unmap || ops->unmap_pages) ||
- domain->pgsize_bitmap == 0UL))
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return 0;
- if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL))
return 0;
/* find out the minimum page size supported */
@@ -2573,9 +2652,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- unmapped_page = __iommu_unmap_pages(domain, iova,
- size - unmapped,
- iotlb_gather);
+ size_t pgsize, count;
+
+ pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count);
+ unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather);
if (!unmapped_page)
break;
@@ -2658,8 +2738,11 @@ next:
sg = sg_next(sg);
}
- if (ops->iotlb_sync_map)
- ops->iotlb_sync_map(domain, iova, mapped);
+ if (ops->iotlb_sync_map) {
+ ret = ops->iotlb_sync_map(domain, iova, mapped);
+ if (ret)
+ goto out_err;
+ }
return mapped;
out_err:
@@ -2957,21 +3040,9 @@ static int iommu_setup_default_domain(struct iommu_group *group,
if (req_type < 0)
return -EINVAL;
- /*
- * There are still some drivers which don't support default domains, so
- * we ignore the failure and leave group->default_domain NULL.
- *
- * We assume that the iommu driver starts up the device in
- * 'set_platform_dma_ops' mode if it does not support default domains.
- */
dom = iommu_group_alloc_default_domain(group, req_type);
- if (!dom) {
- /* Once in default_domain mode we never leave */
- if (group->default_domain)
- return -ENODEV;
- group->default_domain = NULL;
- return 0;
- }
+ if (!dom)
+ return -ENODEV;
if (group->default_domain == dom)
return 0;
@@ -3114,24 +3185,6 @@ out_unlock:
return ret ?: count;
}
-static bool iommu_is_default_domain(struct iommu_group *group)
-{
- if (group->domain == group->default_domain)
- return true;
-
- /*
- * If the default domain was set to identity and it is still an identity
- * domain then we consider this a pass. This happens because of
- * amd_iommu_init_device() replacing the default idenytity domain with an
- * identity domain that has a different configuration for AMDGPU.
- */
- if (group->default_domain &&
- group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
- group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
- return true;
- return false;
-}
-
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
@@ -3142,7 +3195,8 @@ static bool iommu_is_default_domain(struct iommu_group *group)
*/
int iommu_device_use_default_domain(struct device *dev)
{
- struct iommu_group *group = iommu_group_get(dev);
+ /* Caller is the driver core during the pre-probe path */
+ struct iommu_group *group = dev->iommu_group;
int ret = 0;
if (!group)
@@ -3150,7 +3204,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->owner || !iommu_is_default_domain(group) ||
+ if (group->domain != group->default_domain || group->owner ||
!xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
@@ -3161,8 +3215,6 @@ int iommu_device_use_default_domain(struct device *dev)
unlock_out:
mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
return ret;
}
@@ -3176,7 +3228,8 @@ unlock_out:
*/
void iommu_device_unuse_default_domain(struct device *dev)
{
- struct iommu_group *group = iommu_group_get(dev);
+ /* Caller is the driver core during the post-probe path */
+ struct iommu_group *group = dev->iommu_group;
if (!group)
return;
@@ -3186,26 +3239,22 @@ void iommu_device_unuse_default_domain(struct device *dev)
group->owner_cnt--;
mutex_unlock(&group->mutex);
- iommu_group_put(group);
}
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{
- struct group_device *dev =
- list_first_entry(&group->devices, struct group_device, list);
-
if (group->blocking_domain)
return 0;
group->blocking_domain =
- __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
+ __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
if (!group->blocking_domain) {
/*
* For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
* create an empty domain instead.
*/
- group->blocking_domain = __iommu_domain_alloc(
- dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
+ group->blocking_domain = __iommu_group_domain_alloc(
+ group, IOMMU_DOMAIN_UNMANAGED);
if (!group->blocking_domain)
return -EINVAL;
}
@@ -3273,13 +3322,13 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
*/
int iommu_device_claim_dma_owner(struct device *dev, void *owner)
{
- struct iommu_group *group;
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
int ret = 0;
if (WARN_ON(!owner))
return -EINVAL;
- group = iommu_group_get(dev);
if (!group)
return -ENODEV;
@@ -3296,8 +3345,6 @@ int iommu_device_claim_dma_owner(struct device *dev, void *owner)
ret = __iommu_take_dma_ownership(group, owner);
unlock_out:
mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
return ret;
}
EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner);
@@ -3335,7 +3382,8 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
*/
void iommu_device_release_dma_owner(struct device *dev)
{
- struct iommu_group *group = iommu_group_get(dev);
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
mutex_lock(&group->mutex);
if (group->owner_cnt > 1)
@@ -3343,7 +3391,6 @@ void iommu_device_release_dma_owner(struct device *dev)
else
__iommu_release_dma_ownership(group);
mutex_unlock(&group->mutex);
- iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner);
@@ -3404,14 +3451,14 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
int iommu_attach_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
- struct iommu_group *group;
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
void *curr;
int ret;
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- group = iommu_group_get(dev);
if (!group)
return -ENODEV;
@@ -3429,8 +3476,6 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
}
out_unlock:
mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
return ret;
}
EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
@@ -3447,14 +3492,13 @@ EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
{
- struct iommu_group *group = iommu_group_get(dev);
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
mutex_lock(&group->mutex);
__iommu_remove_group_pasid(group, pasid);
WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
mutex_unlock(&group->mutex);
-
- iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
@@ -3476,10 +3520,10 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
ioasid_t pasid,
unsigned int type)
{
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
struct iommu_domain *domain;
- struct iommu_group *group;
- group = iommu_group_get(dev);
if (!group)
return NULL;
@@ -3488,7 +3532,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
if (type && domain && domain->type != type)
domain = ERR_PTR(-EBUSY);
xa_unlock(&group->pasid_array);
- iommu_group_put(group);
return domain;
}
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d43a87737c1e..5d93434003d8 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -123,10 +123,6 @@ struct selftest_obj {
};
};
-static void mock_domain_blocking_free(struct iommu_domain *domain)
-{
-}
-
static int mock_domain_nop_attach(struct iommu_domain *domain,
struct device *dev)
{
@@ -139,7 +135,6 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
}
static const struct iommu_domain_ops mock_blocking_ops = {
- .free = mock_domain_blocking_free,
.attach_dev = mock_domain_nop_attach,
};
@@ -258,15 +253,6 @@ __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
return &mock_nested->domain;
}
-static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
-{
- if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
- return &mock_blocking_domain;
- if (iommu_domain_type == IOMMU_DOMAIN_UNMANAGED)
- return mock_domain_alloc_paging(NULL);
- return NULL;
-}
-
static struct iommu_domain *
mock_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
@@ -446,14 +432,6 @@ static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
return false;
}
-static void mock_domain_set_plaform_dma_ops(struct device *dev)
-{
- /*
- * mock doesn't setup default domains because we can't hook into the
- * normal probe path
- */
-}
-
static struct iommu_device mock_iommu_device = {
};
@@ -463,13 +441,18 @@ static struct iommu_device *mock_probe_device(struct device *dev)
}
static const struct iommu_ops mock_ops = {
+ /*
+ * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
+ * because it is zero.
+ */
+ .default_domain = &mock_blocking_domain,
+ .blocked_domain = &mock_blocking_domain,
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
- .domain_alloc = mock_domain_alloc,
+ .domain_alloc_paging = mock_domain_alloc_paging,
.domain_alloc_user = mock_domain_alloc_user,
.capable = mock_domain_capable,
- .set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.default_domain_ops =
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 10b964600948..d30e453d0fb4 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -11,6 +11,7 @@
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
+#include <linux/workqueue.h>
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL
@@ -622,15 +623,21 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/*
* As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
* assure size of 'iova_magazine' to be 1024 bytes, so that no memory
- * will be wasted.
+ * will be wasted. Since only full magazines are inserted into the depot,
+ * we don't need to waste PFN capacity on a separate list head either.
*/
#define IOVA_MAG_SIZE 127
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
+
+#define IOVA_DEPOT_DELAY msecs_to_jiffies(100)
struct iova_magazine {
- unsigned long size;
+ union {
+ unsigned long size;
+ struct iova_magazine *next;
+ };
unsigned long pfns[IOVA_MAG_SIZE];
};
+static_assert(!(sizeof(struct iova_magazine) & (sizeof(struct iova_magazine) - 1)));
struct iova_cpu_rcache {
spinlock_t lock;
@@ -640,9 +647,11 @@ struct iova_cpu_rcache {
struct iova_rcache {
spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+ unsigned int depot_size;
+ struct iova_magazine *depot;
struct iova_cpu_rcache __percpu *cpu_rcaches;
+ struct iova_domain *iovad;
+ struct delayed_work work;
};
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
@@ -717,6 +726,41 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
mag->pfns[mag->size++] = pfn;
}
+static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache)
+{
+ struct iova_magazine *mag = rcache->depot;
+
+ rcache->depot = mag->next;
+ mag->size = IOVA_MAG_SIZE;
+ rcache->depot_size--;
+ return mag;
+}
+
+static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *mag)
+{
+ mag->next = rcache->depot;
+ rcache->depot = mag;
+ rcache->depot_size++;
+}
+
+static void iova_depot_work_func(struct work_struct *work)
+{
+ struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work);
+ struct iova_magazine *mag = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcache->lock, flags);
+ if (rcache->depot_size > num_online_cpus())
+ mag = iova_depot_pop(rcache);
+ spin_unlock_irqrestore(&rcache->lock, flags);
+
+ if (mag) {
+ iova_magazine_free_pfns(mag, rcache->iovad);
+ iova_magazine_free(mag);
+ schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
+ }
+}
+
int iova_domain_init_rcaches(struct iova_domain *iovad)
{
unsigned int cpu;
@@ -734,7 +778,8 @@ int iova_domain_init_rcaches(struct iova_domain *iovad)
rcache = &iovad->rcaches[i];
spin_lock_init(&rcache->lock);
- rcache->depot_size = 0;
+ rcache->iovad = iovad;
+ INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func);
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
cache_line_size());
if (!rcache->cpu_rcaches) {
@@ -776,7 +821,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
struct iova_rcache *rcache,
unsigned long iova_pfn)
{
- struct iova_magazine *mag_to_free = NULL;
struct iova_cpu_rcache *cpu_rcache;
bool can_insert = false;
unsigned long flags;
@@ -794,13 +838,9 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
if (new_mag) {
spin_lock(&rcache->lock);
- if (rcache->depot_size < MAX_GLOBAL_MAGS) {
- rcache->depot[rcache->depot_size++] =
- cpu_rcache->loaded;
- } else {
- mag_to_free = cpu_rcache->loaded;
- }
+ iova_depot_push(rcache, cpu_rcache->loaded);
spin_unlock(&rcache->lock);
+ schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
cpu_rcache->loaded = new_mag;
can_insert = true;
@@ -812,11 +852,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
- if (mag_to_free) {
- iova_magazine_free_pfns(mag_to_free, iovad);
- iova_magazine_free(mag_to_free);
- }
-
return can_insert;
}
@@ -854,9 +889,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
has_pfn = true;
} else {
spin_lock(&rcache->lock);
- if (rcache->depot_size > 0) {
+ if (rcache->depot) {
iova_magazine_free(cpu_rcache->loaded);
- cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+ cpu_rcache->loaded = iova_depot_pop(rcache);
has_pfn = true;
}
spin_unlock(&rcache->lock);
@@ -895,9 +930,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
struct iova_rcache *rcache;
struct iova_cpu_rcache *cpu_rcache;
unsigned int cpu;
- int i, j;
- for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
if (!rcache->cpu_rcaches)
break;
@@ -907,8 +941,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
iova_magazine_free(cpu_rcache->prev);
}
free_percpu(rcache->cpu_rcaches);
- for (j = 0; j < rcache->depot_size; ++j)
- iova_magazine_free(rcache->depot[j]);
+ cancel_delayed_work_sync(&rcache->work);
+ while (rcache->depot)
+ iova_magazine_free(iova_depot_pop(rcache));
}
kfree(iovad->rcaches);
@@ -942,16 +977,16 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
{
struct iova_rcache *rcache;
unsigned long flags;
- int i, j;
- for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
spin_lock_irqsave(&rcache->lock, flags);
- for (j = 0; j < rcache->depot_size; ++j) {
- iova_magazine_free_pfns(rcache->depot[j], iovad);
- iova_magazine_free(rcache->depot[j]);
+ while (rcache->depot) {
+ struct iova_magazine *mag = iova_depot_pop(rcache);
+
+ iova_magazine_free_pfns(mag, iovad);
+ iova_magazine_free(mag);
}
- rcache->depot_size = 0;
spin_unlock_irqrestore(&rcache->lock, flags);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 65ff69477c43..ace1fc4bd34b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -64,7 +64,6 @@ struct ipmmu_vmsa_device {
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
s8 utlb_ctx[IPMMU_UTLB_MAX];
- struct iommu_group *group;
struct dma_iommu_mapping *mapping;
};
@@ -295,6 +294,18 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
mmu->utlb_ctx[utlb] = domain->context_id;
}
+/*
+ * Disable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
+ unsigned int utlb)
+{
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+ ipmmu_imuctr_write(mmu, utlb, 0);
+ mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
+}
+
static void ipmmu_tlb_flush_all(void *cookie)
{
struct ipmmu_vmsa_domain *domain = cookie;
@@ -551,13 +562,10 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
* IOMMU Operations
*/
-static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
{
struct ipmmu_vmsa_domain *domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
- return NULL;
-
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -627,6 +635,36 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return 0;
}
+static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
+{
+ struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct ipmmu_vmsa_domain *domain;
+ unsigned int i;
+
+ if (io_domain == identity_domain || !io_domain)
+ return 0;
+
+ domain = to_vmsa_domain(io_domain);
+ for (i = 0; i < fwspec->num_ids; ++i)
+ ipmmu_utlb_disable(domain, fwspec->ids[i]);
+
+ /*
+ * TODO: Optimize by disabling the context when no device is attached.
+ */
+ return 0;
+}
+
+static struct iommu_domain_ops ipmmu_iommu_identity_ops = {
+ .attach_dev = ipmmu_iommu_identity_attach,
+};
+
+static struct iommu_domain ipmmu_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &ipmmu_iommu_identity_ops,
+};
+
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -833,28 +871,18 @@ static void ipmmu_release_device(struct device *dev)
arm_iommu_release_mapping(mmu->mapping);
}
-static struct iommu_group *ipmmu_find_group(struct device *dev)
-{
- struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
- struct iommu_group *group;
-
- if (mmu->group)
- return iommu_group_ref_get(mmu->group);
-
- group = iommu_group_alloc();
- if (!IS_ERR(group))
- mmu->group = group;
-
- return group;
-}
-
static const struct iommu_ops ipmmu_ops = {
- .domain_alloc = ipmmu_domain_alloc,
+ .identity_domain = &ipmmu_iommu_identity_domain,
+ .domain_alloc_paging = ipmmu_domain_alloc_paging,
.probe_device = ipmmu_probe_device,
.release_device = ipmmu_release_device,
.probe_finalize = ipmmu_probe_finalize,
+ /*
+ * FIXME: The device grouping is a fixed property of the hardware's
+ * ability to isolate and control DMA, it should not depend on kconfig.
+ */
.device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
- ? generic_device_group : ipmmu_find_group,
+ ? generic_device_group : generic_single_device_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 79d89bad5132..f86af9815d6f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -302,13 +302,10 @@ static void __program_context(void __iomem *base, int ctx,
SET_M(base, ctx, 1);
}
-static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
{
struct msm_priv *priv;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail_nomem;
@@ -443,15 +440,20 @@ fail:
return ret;
}
-static void msm_iommu_set_platform_dma(struct device *dev)
+static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_priv *priv;
unsigned long flags;
struct msm_iommu_dev *iommu;
struct msm_iommu_ctx_dev *master;
- int ret;
+ int ret = 0;
+
+ if (domain == identity_domain || !domain)
+ return 0;
+ priv = to_msm_priv(domain);
free_io_pgtable_ops(priv->iop);
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -468,8 +470,18 @@ static void msm_iommu_set_platform_dma(struct device *dev)
}
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return ret;
}
+static struct iommu_domain_ops msm_iommu_identity_ops = {
+ .attach_dev = msm_iommu_identity_attach,
+};
+
+static struct iommu_domain msm_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &msm_iommu_identity_ops,
+};
+
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -486,12 +498,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
struct msm_priv *priv = to_msm_priv(domain);
__flush_iotlb_range(iova, size, SZ_4K, false, priv);
+ return 0;
}
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -675,10 +688,10 @@ fail:
}
static struct iommu_ops msm_iommu_ops = {
- .domain_alloc = msm_iommu_domain_alloc,
+ .identity_domain = &msm_iommu_identity_domain,
+ .domain_alloc_paging = msm_iommu_domain_alloc_paging,
.probe_device = msm_iommu_probe_device,
.device_group = generic_device_group,
- .set_platform_dma_ops = msm_iommu_set_platform_dma,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index fab6c347ce57..75279500a4a8 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -688,13 +688,10 @@ update_iova_region:
return 0;
}
-static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev)
{
struct mtk_iommu_domain *dom;
- if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
@@ -776,6 +773,28 @@ err_unlock:
return ret;
}
+static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+
+ if (domain == identity_domain || !domain)
+ return 0;
+
+ mtk_iommu_config(data, dev, false, 0);
+ return 0;
+}
+
+static struct iommu_domain_ops mtk_iommu_identity_ops = {
+ .attach_dev = mtk_iommu_identity_attach,
+};
+
+static struct iommu_domain mtk_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &mtk_iommu_identity_ops,
+};
+
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -817,12 +836,13 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
}
-static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
+ return 0;
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -995,7 +1015,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
}
static const struct iommu_ops mtk_iommu_ops = {
- .domain_alloc = mtk_iommu_domain_alloc,
+ .identity_domain = &mtk_iommu_identity_domain,
+ .domain_alloc_paging = mtk_iommu_domain_alloc_paging,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 8a0a5e5d049f..67e044c1a7d9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -270,13 +270,10 @@ static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
return 0;
}
-static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
{
struct mtk_iommu_v1_domain *dom;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
@@ -319,13 +316,24 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device
return 0;
}
-static void mtk_iommu_v1_set_platform_dma(struct device *dev)
+static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
mtk_iommu_v1_config(data, dev, false);
+ return 0;
}
+static struct iommu_domain_ops mtk_iommu_v1_identity_ops = {
+ .attach_dev = mtk_iommu_v1_identity_attach,
+};
+
+static struct iommu_domain mtk_iommu_v1_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &mtk_iommu_v1_identity_ops,
+};
+
static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -441,11 +449,6 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_arg
return 0;
}
-static int mtk_iommu_v1_def_domain_type(struct device *dev)
-{
- return IOMMU_DOMAIN_UNMANAGED;
-}
-
static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -578,14 +581,13 @@ static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
}
static const struct iommu_ops mtk_iommu_v1_ops = {
- .domain_alloc = mtk_iommu_v1_domain_alloc,
+ .identity_domain = &mtk_iommu_v1_identity_domain,
+ .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
.probe_device = mtk_iommu_v1_probe_device,
.probe_finalize = mtk_iommu_v1_probe_finalize,
.release_device = mtk_iommu_v1_release_device,
- .def_domain_type = mtk_iommu_v1_def_domain_type,
.device_group = generic_device_group,
.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE,
- .set_platform_dma_ops = mtk_iommu_v1_set_platform_dma,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_v1_attach_device,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 537e402f9bba..c66b070841dd 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1225,18 +1225,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, obj);
if (omap_iommu_can_register(pdev)) {
- obj->group = iommu_group_alloc();
- if (IS_ERR(obj->group))
- return PTR_ERR(obj->group);
-
err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
obj->name);
if (err)
- goto out_group;
+ return err;
err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
if (err)
goto out_sysfs;
+ obj->has_iommu_driver = true;
}
pm_runtime_enable(obj->dev);
@@ -1252,8 +1249,6 @@ static int omap_iommu_probe(struct platform_device *pdev)
out_sysfs:
iommu_device_sysfs_remove(&obj->iommu);
-out_group:
- iommu_group_put(obj->group);
return err;
}
@@ -1261,10 +1256,7 @@ static void omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
- if (obj->group) {
- iommu_group_put(obj->group);
- obj->group = NULL;
-
+ if (obj->has_iommu_driver) {
iommu_device_sysfs_remove(&obj->iommu);
iommu_device_unregister(&obj->iommu);
}
@@ -1318,7 +1310,8 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
+ phys_addr_t pa, size_t bytes, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
@@ -1356,13 +1349,15 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
oiommu = iommu->iommu_dev;
iopgtable_clear_entry(oiommu, da);
}
+ } else {
+ *mapped = bytes;
}
return ret;
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
@@ -1555,23 +1550,35 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->dev = NULL;
}
-static void omap_iommu_set_platform_dma(struct device *dev)
+static int omap_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ struct omap_iommu_domain *omap_domain;
+
+ if (domain == identity_domain || !domain)
+ return 0;
+ omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
+ return 0;
}
-static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
+static struct iommu_domain_ops omap_iommu_identity_ops = {
+ .attach_dev = omap_iommu_identity_attach,
+};
+
+static struct iommu_domain omap_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &omap_iommu_identity_ops,
+};
+
+static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
{
struct omap_iommu_domain *omap_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain)
return NULL;
@@ -1717,31 +1724,17 @@ static void omap_iommu_release_device(struct device *dev)
}
-static struct iommu_group *omap_iommu_device_group(struct device *dev)
-{
- struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
- struct iommu_group *group = ERR_PTR(-EINVAL);
-
- if (!arch_data)
- return ERR_PTR(-ENODEV);
-
- if (arch_data->iommu_dev)
- group = iommu_group_ref_get(arch_data->iommu_dev->group);
-
- return group;
-}
-
static const struct iommu_ops omap_iommu_ops = {
- .domain_alloc = omap_iommu_domain_alloc,
+ .identity_domain = &omap_iommu_identity_domain,
+ .domain_alloc_paging = omap_iommu_domain_alloc_paging,
.probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device,
- .device_group = omap_iommu_device_group,
- .set_platform_dma_ops = omap_iommu_set_platform_dma,
+ .device_group = generic_single_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = omap_iommu_attach_dev,
- .map = omap_iommu_map,
- .unmap = omap_iommu_unmap,
+ .map_pages = omap_iommu_map,
+ .unmap_pages = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.free = omap_iommu_domain_free,
}
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 18ee713ede78..27697109ec79 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -80,7 +80,7 @@ struct omap_iommu {
u32 id;
struct iommu_device iommu;
- struct iommu_group *group;
+ bool has_iommu_driver;
u8 pwrst;
};
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 8ff69fbf9f65..2685861c0a12 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -113,7 +113,6 @@ struct rk_iommu {
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
- struct iommu_group *group;
};
struct rk_iommudata {
@@ -817,7 +816,8 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t size, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -850,12 +850,14 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
paddr, size, prot);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ if (!ret)
+ *mapped = size;
return ret;
}
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -989,13 +991,8 @@ static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
return 0;
}
-static void rk_iommu_identity_free(struct iommu_domain *domain)
-{
-}
-
static struct iommu_domain_ops rk_identity_ops = {
.attach_dev = rk_iommu_identity_attach,
- .free = rk_iommu_identity_free,
};
static struct iommu_domain rk_identity_domain = {
@@ -1003,13 +1000,6 @@ static struct iommu_domain rk_identity_domain = {
.ops = &rk_identity_ops,
};
-#ifdef CONFIG_ARM
-static void rk_iommu_set_platform_dma(struct device *dev)
-{
- WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
-}
-#endif
-
static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1055,16 +1045,10 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return ret;
}
-static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
{
struct rk_iommu_domain *rk_domain;
- if (type == IOMMU_DOMAIN_IDENTITY)
- return &rk_identity_domain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
- return NULL;
-
if (!dma_dev)
return NULL;
@@ -1155,15 +1139,6 @@ static void rk_iommu_release_device(struct device *dev)
device_link_del(data->link);
}
-static struct iommu_group *rk_iommu_device_group(struct device *dev)
-{
- struct rk_iommu *iommu;
-
- iommu = rk_iommu_from_dev(dev);
-
- return iommu_group_ref_get(iommu->group);
-}
-
static int rk_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -1186,19 +1161,17 @@ static int rk_iommu_of_xlate(struct device *dev,
}
static const struct iommu_ops rk_iommu_ops = {
- .domain_alloc = rk_iommu_domain_alloc,
+ .identity_domain = &rk_identity_domain,
+ .domain_alloc_paging = rk_iommu_domain_alloc_paging,
.probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device,
- .device_group = rk_iommu_device_group,
-#ifdef CONFIG_ARM
- .set_platform_dma_ops = rk_iommu_set_platform_dma,
-#endif
+ .device_group = generic_single_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = rk_iommu_attach_device,
- .map = rk_iommu_map,
- .unmap = rk_iommu_unmap,
+ .map_pages = rk_iommu_map,
+ .unmap_pages = rk_iommu_unmap,
.iova_to_phys = rk_iommu_iova_to_phys,
.free = rk_iommu_domain_free,
}
@@ -1280,15 +1253,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
return err;
- iommu->group = iommu_group_alloc();
- if (IS_ERR(iommu->group)) {
- err = PTR_ERR(iommu->group);
- goto err_unprepare_clocks;
- }
-
err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
if (err)
- goto err_put_group;
+ goto err_unprepare_clocks;
err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
if (err)
@@ -1325,8 +1292,6 @@ err_pm_disable:
pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
-err_put_group:
- iommu_group_put(iommu->group);
err_unprepare_clocks:
clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
return err;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index fbf59a8db29b..9a5196f523de 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -14,16 +14,300 @@
#include <linux/rcupdate.h>
#include <asm/pci_dma.h>
+#include "dma-iommu.h"
+
static const struct iommu_ops s390_iommu_ops;
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+
+static u64 s390_iommu_aperture;
+static u32 s390_iommu_aperture_factor = 1;
+
struct s390_domain {
struct iommu_domain domain;
struct list_head devices;
+ struct zpci_iommu_ctrs ctrs;
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
};
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
+{
+ *entry &= ZPCI_PTE_FLAG_MASK;
+ *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (sto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
+{
+ *entry &= ZPCI_STE_FLAG_MASK;
+ *entry |= (pto & ZPCI_STE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+ dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+ ZPCI_TABLE_SIZE,
+ ZPCI_TABLE_ALIGN,
+ 0, NULL);
+ if (!dma_region_table_cache)
+ return -ENOMEM;
+
+ dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+ ZPCI_PT_SIZE,
+ ZPCI_PT_ALIGN,
+ 0, NULL);
+ if (!dma_page_table_cache) {
+ kmem_cache_destroy(dma_region_table_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_region_table_cache, gfp);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+ *entry = ZPCI_TABLE_INVALID;
+ return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+ kmem_cache_free(dma_region_table_cache, table);
+}
+
+static void dma_free_page_table(void *table)
+{
+ kmem_cache_free(dma_page_table_cache, table);
+}
+
+static void dma_free_seg_table(unsigned long entry)
+{
+ unsigned long *sto = get_rt_sto(entry);
+ int sx;
+
+ for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+ if (reg_entry_isvalid(sto[sx]))
+ dma_free_page_table(get_st_pto(sto[sx]));
+
+ dma_free_cpu_table(sto);
+}
+
+static void dma_cleanup_tables(unsigned long *table)
+{
+ int rtx;
+
+ if (!table)
+ return;
+
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(table[rtx]))
+ dma_free_seg_table(table[rtx]);
+
+ dma_free_cpu_table(table);
+}
+
+static unsigned long *dma_alloc_page_table(gfp_t gfp)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_page_table_cache, gfp);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+ *entry = ZPCI_PTE_INVALID;
+ return table;
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
+{
+ unsigned long old_rte, rte;
+ unsigned long *sto;
+
+ rte = READ_ONCE(*rtep);
+ if (reg_entry_isvalid(rte)) {
+ sto = get_rt_sto(rte);
+ } else {
+ sto = dma_alloc_cpu_table(gfp);
+ if (!sto)
+ return NULL;
+
+ set_rt_sto(&rte, virt_to_phys(sto));
+ validate_rt_entry(&rte);
+ entry_clr_protected(&rte);
+
+ old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
+ if (old_rte != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(sto);
+ sto = get_rt_sto(old_rte);
+ }
+ }
+ return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
+{
+ unsigned long old_ste, ste;
+ unsigned long *pto;
+
+ ste = READ_ONCE(*step);
+ if (reg_entry_isvalid(ste)) {
+ pto = get_st_pto(ste);
+ } else {
+ pto = dma_alloc_page_table(gfp);
+ if (!pto)
+ return NULL;
+ set_st_pto(&ste, virt_to_phys(pto));
+ validate_st_entry(&ste);
+ entry_clr_protected(&ste);
+
+ old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
+ if (old_ste != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_page_table(pto);
+ pto = get_st_pto(old_ste);
+ }
+ }
+ return pto;
+}
+
+static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned long *sto, *pto;
+ unsigned int rtx, sx, px;
+
+ rtx = calc_rtx(dma_addr);
+ sto = dma_get_seg_table_origin(&rto[rtx], gfp);
+ if (!sto)
+ return NULL;
+
+ sx = calc_sx(dma_addr);
+ pto = dma_get_page_table_origin(&sto[sx], gfp);
+ if (!pto)
+ return NULL;
+
+ px = calc_px(dma_addr);
+ return &pto[px];
+}
+
+static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
+{
+ unsigned long pte;
+
+ pte = READ_ONCE(*ptep);
+ if (flags & ZPCI_PTE_INVALID) {
+ invalidate_pt_entry(&pte);
+ } else {
+ set_pt_pfaa(&pte, page_addr);
+ validate_pt_entry(&pte);
+ }
+
+ if (flags & ZPCI_TABLE_PROTECTED)
+ entry_set_protected(&pte);
+ else
+ entry_clr_protected(&pte);
+
+ xchg(ptep, pte);
+}
+
static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
{
return container_of(dom, struct s390_domain, domain);
@@ -31,21 +315,22 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return zdev->pft != PCI_FUNC_TYPE_ISM;
default:
return false;
}
}
-static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
+static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{
struct s390_domain *s390_domain;
- if (domain_type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain)
return NULL;
@@ -84,14 +369,13 @@ static void s390_domain_free(struct iommu_domain *domain)
call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
}
-static void __s390_iommu_detach_device(struct zpci_dev *zdev)
+static void s390_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
{
- struct s390_domain *s390_domain = zdev->s390_domain;
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev = to_zpci_dev(dev);
unsigned long flags;
- if (!s390_domain)
- return;
-
spin_lock_irqsave(&s390_domain->list_lock, flags);
list_del_rcu(&zdev->iommu_list);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
@@ -118,9 +402,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return -EINVAL;
if (zdev->s390_domain)
- __s390_iommu_detach_device(zdev);
- else if (zdev->dma_table)
- zpci_dma_exit_device(zdev);
+ s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(s390_domain->dma_table), &status);
@@ -130,7 +412,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
*/
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
return -EIO;
- zdev->dma_table = s390_domain->dma_table;
zdev->dma_table = s390_domain->dma_table;
zdev->s390_domain = s390_domain;
@@ -142,14 +423,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return 0;
}
-static void s390_iommu_set_platform_dma(struct device *dev)
-{
- struct zpci_dev *zdev = to_zpci_dev(dev);
-
- __s390_iommu_detach_device(zdev);
- zpci_dma_init_device(zdev);
-}
-
static void s390_iommu_get_resv_regions(struct device *dev,
struct list_head *list)
{
@@ -190,6 +463,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
+ if (zdev->tlb_refresh)
+ dev->iommu->shadow_on_flush = 1;
+
return &zdev->iommu_dev;
}
@@ -202,7 +478,13 @@ static void s390_iommu_release_device(struct device *dev)
* to the device, but keep it attached to other devices in the group.
*/
if (zdev)
- __s390_iommu_detach_device(zdev);
+ s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
+}
+
+static int zpci_refresh_all(struct zpci_dev *zdev)
+{
+ return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
}
static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
@@ -212,8 +494,8 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
- zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
+ atomic64_inc(&s390_domain->ctrs.global_rpcits);
+ zpci_refresh_all(zdev);
}
rcu_read_unlock();
}
@@ -231,26 +513,40 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ atomic64_inc(&s390_domain->ctrs.sync_rpcits);
zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
size);
}
rcu_read_unlock();
}
-static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev;
+ int ret = 0;
rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
if (!zdev->tlb_refresh)
continue;
- zpci_refresh_trans((u64)zdev->fh << 32,
- iova, size);
+ atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
+ ret = zpci_refresh_trans((u64)zdev->fh << 32,
+ iova, size);
+ /*
+ * let the hypervisor discover invalidated entries
+ * allowing it to free IOVAs and unpin pages
+ */
+ if (ret == -ENOMEM) {
+ ret = zpci_refresh_all(zdev);
+ if (ret)
+ break;
+ }
}
rcu_read_unlock();
+
+ return ret;
}
static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
@@ -330,16 +626,15 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
if (!IS_ALIGNED(iova | paddr, pgsize))
return -EINVAL;
- if (!(prot & IOMMU_READ))
- return -EINVAL;
-
if (!(prot & IOMMU_WRITE))
flags |= ZPCI_TABLE_PROTECTED;
rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
- pgcount, flags, gfp);
- if (!rc)
+ pgcount, flags, gfp);
+ if (!rc) {
*mapped = size;
+ atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
+ }
return rc;
}
@@ -395,12 +690,26 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
return 0;
iommu_iotlb_gather_add_range(gather, iova, size);
+ atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
return size;
}
+static void s390_iommu_probe_finalize(struct device *dev)
+{
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
+}
+
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
+{
+ if (!zdev || !zdev->s390_domain)
+ return NULL;
+ return &zdev->s390_domain->ctrs;
+}
+
int zpci_init_iommu(struct zpci_dev *zdev)
{
+ u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -412,6 +721,12 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
+ zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
+ aperture_size = min3(s390_iommu_aperture,
+ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
return 0;
out_sysfs:
@@ -427,13 +742,52 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
iommu_device_sysfs_remove(&zdev->iommu_dev);
}
+static int __init s390_iommu_setup(char *str)
+{
+ if (!strcmp(str, "strict")) {
+ pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
+ iommu_set_dma_strict();
+ }
+ return 1;
+}
+
+__setup("s390_iommu=", s390_iommu_setup);
+
+static int __init s390_iommu_aperture_setup(char *str)
+{
+ if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
+ s390_iommu_aperture_factor = 1;
+ return 1;
+}
+
+__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
+
+static int __init s390_iommu_init(void)
+{
+ int rc;
+
+ iommu_dma_forcedac = true;
+ s390_iommu_aperture = (u64)virt_to_phys(high_memory);
+ if (!s390_iommu_aperture_factor)
+ s390_iommu_aperture = ULONG_MAX;
+ else
+ s390_iommu_aperture *= s390_iommu_aperture_factor;
+
+ rc = dma_alloc_cpu_table_caches();
+ if (rc)
+ return rc;
+
+ return rc;
+}
+subsys_initcall(s390_iommu_init);
+
static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable,
- .domain_alloc = s390_domain_alloc,
+ .domain_alloc_paging = s390_domain_alloc_paging,
.probe_device = s390_iommu_probe_device,
+ .probe_finalize = s390_iommu_probe_finalize,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
- .set_platform_dma_ops = s390_iommu_set_platform_dma,
.pgsize_bitmap = SZ_4K,
.get_resv_regions = s390_iommu_get_resv_regions,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 2fa9afebd4f5..2eb9fb46703b 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -70,7 +70,6 @@ struct sprd_iommu_device {
void __iomem *base;
struct device *dev;
struct iommu_device iommu;
- struct iommu_group *group;
struct clk *eb;
};
@@ -134,13 +133,10 @@ sprd_iommu_pgt_size(struct iommu_domain *domain)
SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
}
-static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev)
{
struct sprd_iommu_domain *dom;
- if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
@@ -345,8 +341,8 @@ static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
-static void sprd_iommu_sync_map(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int sprd_iommu_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
unsigned int reg;
@@ -358,6 +354,7 @@ static void sprd_iommu_sync_map(struct iommu_domain *domain,
/* clear IOMMU TLB buffer after page table updated */
sprd_iommu_write(dom->sdev, reg, 0xffffffff);
+ return 0;
}
static void sprd_iommu_sync(struct iommu_domain *domain,
@@ -399,13 +396,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static struct iommu_group *sprd_iommu_device_group(struct device *dev)
-{
- struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
-
- return iommu_group_ref_get(sdev->group);
-}
-
static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct platform_device *pdev;
@@ -421,9 +411,9 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
- .domain_alloc = sprd_iommu_domain_alloc,
+ .domain_alloc_paging = sprd_iommu_domain_alloc_paging,
.probe_device = sprd_iommu_probe_device,
- .device_group = sprd_iommu_device_group,
+ .device_group = generic_single_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
@@ -496,16 +486,9 @@ static int sprd_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdev);
sdev->dev = dev;
- /* All the client devices are in the same iommu-group */
- sdev->group = iommu_group_alloc();
- if (IS_ERR(sdev->group)) {
- ret = PTR_ERR(sdev->group);
- goto free_page;
- }
-
ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
if (ret)
- goto put_group;
+ goto free_page;
ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
if (ret)
@@ -530,8 +513,6 @@ unregister_iommu:
iommu_device_unregister(&sdev->iommu);
remove_sysfs:
iommu_device_sysfs_remove(&sdev->iommu);
-put_group:
- iommu_group_put(sdev->group);
free_page:
dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
return ret;
@@ -543,9 +524,6 @@ static void sprd_iommu_remove(struct platform_device *pdev)
dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
- iommu_group_put(sdev->group);
- sdev->group = NULL;
-
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 74c5cb93e900..41484a5a399b 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -107,7 +107,6 @@ struct sun50i_iommu {
struct clk *clk;
struct iommu_domain *domain;
- struct iommu_group *group;
struct kmem_cache *pt_pool;
};
@@ -402,8 +401,8 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
}
-static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
struct sun50i_iommu *iommu = sun50i_domain->iommu;
@@ -412,6 +411,8 @@ static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
spin_lock_irqsave(&iommu->iommu_lock, flags);
sun50i_iommu_zap_range(iommu, iova, size);
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+ return 0;
}
static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -589,7 +590,8 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
}
static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t size, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
struct sun50i_iommu *iommu = sun50i_domain->iommu;
@@ -616,13 +618,14 @@ static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
*pte_addr = sun50i_mk_pte(paddr, prot);
sun50i_table_flush(sun50i_domain, pte_addr, 1);
+ *mapped = size;
out:
return ret;
}
static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
phys_addr_t pt_phys;
@@ -667,14 +670,11 @@ static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
sun50i_iova_get_page_offset(iova);
}
-static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *
+sun50i_iommu_domain_alloc_paging(struct device *dev)
{
struct sun50i_iommu_domain *sun50i_domain;
- if (type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
if (!sun50i_domain)
return NULL;
@@ -757,21 +757,32 @@ static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
iommu->domain = NULL;
}
-static void sun50i_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
- struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
+ struct sun50i_iommu_domain *sun50i_domain;
dev_dbg(dev, "Detaching from IOMMU domain\n");
- if (iommu->domain != domain)
- return;
+ if (iommu->domain == identity_domain)
+ return 0;
+ sun50i_domain = to_sun50i_domain(iommu->domain);
if (refcount_dec_and_test(&sun50i_domain->refcnt))
sun50i_iommu_detach_domain(iommu, sun50i_domain);
+ return 0;
}
+static struct iommu_domain_ops sun50i_iommu_identity_ops = {
+ .attach_dev = sun50i_iommu_identity_attach,
+};
+
+static struct iommu_domain sun50i_iommu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &sun50i_iommu_identity_ops,
+};
+
static int sun50i_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -789,8 +800,7 @@ static int sun50i_iommu_attach_device(struct iommu_domain *domain,
if (iommu->domain == domain)
return 0;
- if (iommu->domain)
- sun50i_iommu_detach_device(iommu->domain, dev);
+ sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev);
sun50i_iommu_attach_domain(iommu, sun50i_domain);
@@ -808,13 +818,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
-{
- struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
-
- return iommu_group_ref_get(iommu->group);
-}
-
static int sun50i_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -827,9 +830,10 @@ static int sun50i_iommu_of_xlate(struct device *dev,
}
static const struct iommu_ops sun50i_iommu_ops = {
+ .identity_domain = &sun50i_iommu_identity_domain,
.pgsize_bitmap = SZ_4K,
- .device_group = sun50i_iommu_device_group,
- .domain_alloc = sun50i_iommu_domain_alloc,
+ .device_group = generic_single_device_group,
+ .domain_alloc_paging = sun50i_iommu_domain_alloc_paging,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -838,8 +842,8 @@ static const struct iommu_ops sun50i_iommu_ops = {
.iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
- .map = sun50i_iommu_map,
- .unmap = sun50i_iommu_unmap,
+ .map_pages = sun50i_iommu_map,
+ .unmap_pages = sun50i_iommu_unmap,
.free = sun50i_iommu_domain_free,
}
};
@@ -985,6 +989,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (!iommu)
return -ENOMEM;
spin_lock_init(&iommu->iommu_lock);
+ iommu->domain = &sun50i_iommu_identity_domain;
platform_set_drvdata(pdev, iommu);
iommu->dev = &pdev->dev;
@@ -995,42 +1000,36 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (!iommu->pt_pool)
return -ENOMEM;
- iommu->group = iommu_group_alloc();
- if (IS_ERR(iommu->group)) {
- ret = PTR_ERR(iommu->group);
- goto err_free_cache;
- }
-
iommu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iommu->base)) {
ret = PTR_ERR(iommu->base);
- goto err_free_group;
+ goto err_free_cache;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto err_free_group;
+ goto err_free_cache;
}
iommu->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(iommu->clk)) {
dev_err(&pdev->dev, "Couldn't get our clock.\n");
ret = PTR_ERR(iommu->clk);
- goto err_free_group;
+ goto err_free_cache;
}
iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(iommu->reset)) {
dev_err(&pdev->dev, "Couldn't get our reset line.\n");
ret = PTR_ERR(iommu->reset);
- goto err_free_group;
+ goto err_free_cache;
}
ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
NULL, dev_name(&pdev->dev));
if (ret)
- goto err_free_group;
+ goto err_free_cache;
ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
if (ret)
@@ -1049,9 +1048,6 @@ err_unregister:
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
-err_free_group:
- iommu_group_put(iommu->group);
-
err_free_cache:
kmem_cache_destroy(iommu->pt_pool);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
deleted file mode 100644
index a482ff838b53..000000000000
--- a/drivers/iommu/tegra-gart.c
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IOMMU API for Graphics Address Relocation Table on Tegra20
- *
- * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
- *
- * Author: Hiroshi DOYU <hdoyu@nvidia.com>
- */
-
-#define dev_fmt(fmt) "gart: " fmt
-
-#include <linux/io.h>
-#include <linux/iommu.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-
-#include <soc/tegra/mc.h>
-
-#define GART_REG_BASE 0x24
-#define GART_CONFIG (0x24 - GART_REG_BASE)
-#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
-#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
-
-#define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
-
-#define GART_PAGE_SHIFT 12
-#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
-#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
-
-/* bitmap of the page sizes currently supported */
-#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
-
-struct gart_device {
- void __iomem *regs;
- u32 *savedata;
- unsigned long iovmm_base; /* offset to vmm_area start */
- unsigned long iovmm_end; /* offset to vmm_area end */
- spinlock_t pte_lock; /* for pagetable */
- spinlock_t dom_lock; /* for active domain */
- unsigned int active_devices; /* number of active devices */
- struct iommu_domain *active_domain; /* current active domain */
- struct iommu_device iommu; /* IOMMU Core handle */
- struct device *dev;
-};
-
-static struct gart_device *gart_handle; /* unique for a system */
-
-static bool gart_debug;
-
-/*
- * Any interaction between any block on PPSB and a block on APB or AHB
- * must have these read-back to ensure the APB/AHB bus transaction is
- * complete before initiating activity on the PPSB block.
- */
-#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
-
-#define for_each_gart_pte(gart, iova) \
- for (iova = gart->iovmm_base; \
- iova < gart->iovmm_end; \
- iova += GART_PAGE_SIZE)
-
-static inline void gart_set_pte(struct gart_device *gart,
- unsigned long iova, unsigned long pte)
-{
- writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
- writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
-}
-
-static inline unsigned long gart_read_pte(struct gart_device *gart,
- unsigned long iova)
-{
- unsigned long pte;
-
- writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
- pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
-
- return pte;
-}
-
-static void do_gart_setup(struct gart_device *gart, const u32 *data)
-{
- unsigned long iova;
-
- for_each_gart_pte(gart, iova)
- gart_set_pte(gart, iova, data ? *(data++) : 0);
-
- writel_relaxed(1, gart->regs + GART_CONFIG);
- FLUSH_GART_REGS(gart);
-}
-
-static inline bool gart_iova_range_invalid(struct gart_device *gart,
- unsigned long iova, size_t bytes)
-{
- return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
- iova + bytes > gart->iovmm_end);
-}
-
-static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
-{
- return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
-}
-
-static int gart_iommu_attach_dev(struct iommu_domain *domain,
- struct device *dev)
-{
- struct gart_device *gart = gart_handle;
- int ret = 0;
-
- spin_lock(&gart->dom_lock);
-
- if (gart->active_domain && gart->active_domain != domain) {
- ret = -EINVAL;
- } else if (dev_iommu_priv_get(dev) != domain) {
- dev_iommu_priv_set(dev, domain);
- gart->active_domain = domain;
- gart->active_devices++;
- }
-
- spin_unlock(&gart->dom_lock);
-
- return ret;
-}
-
-static void gart_iommu_set_platform_dma(struct device *dev)
-{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct gart_device *gart = gart_handle;
-
- spin_lock(&gart->dom_lock);
-
- if (dev_iommu_priv_get(dev) == domain) {
- dev_iommu_priv_set(dev, NULL);
-
- if (--gart->active_devices == 0)
- gart->active_domain = NULL;
- }
-
- spin_unlock(&gart->dom_lock);
-}
-
-static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
-{
- struct iommu_domain *domain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (domain) {
- domain->geometry.aperture_start = gart_handle->iovmm_base;
- domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
- domain->geometry.force_aperture = true;
- }
-
- return domain;
-}
-
-static void gart_iommu_domain_free(struct iommu_domain *domain)
-{
- WARN_ON(gart_handle->active_domain == domain);
- kfree(domain);
-}
-
-static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
- unsigned long pa)
-{
- if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
- dev_err(gart->dev, "Page entry is in-use\n");
- return -EINVAL;
- }
-
- gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
-
- return 0;
-}
-
-static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
-{
- struct gart_device *gart = gart_handle;
- int ret;
-
- if (gart_iova_range_invalid(gart, iova, bytes))
- return -EINVAL;
-
- spin_lock(&gart->pte_lock);
- ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
- spin_unlock(&gart->pte_lock);
-
- return ret;
-}
-
-static inline int __gart_iommu_unmap(struct gart_device *gart,
- unsigned long iova)
-{
- if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
- dev_err(gart->dev, "Page entry is invalid\n");
- return -EINVAL;
- }
-
- gart_set_pte(gart, iova, 0);
-
- return 0;
-}
-
-static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t bytes, struct iommu_iotlb_gather *gather)
-{
- struct gart_device *gart = gart_handle;
- int err;
-
- if (gart_iova_range_invalid(gart, iova, bytes))
- return 0;
-
- spin_lock(&gart->pte_lock);
- err = __gart_iommu_unmap(gart, iova);
- spin_unlock(&gart->pte_lock);
-
- return err ? 0 : bytes;
-}
-
-static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct gart_device *gart = gart_handle;
- unsigned long pte;
-
- if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
- return -EINVAL;
-
- spin_lock(&gart->pte_lock);
- pte = gart_read_pte(gart, iova);
- spin_unlock(&gart->pte_lock);
-
- return pte & GART_PAGE_MASK;
-}
-
-static struct iommu_device *gart_iommu_probe_device(struct device *dev)
-{
- if (!dev_iommu_fwspec_get(dev))
- return ERR_PTR(-ENODEV);
-
- return &gart_handle->iommu;
-}
-
-static int gart_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
-{
- return 0;
-}
-
-static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- FLUSH_GART_REGS(gart_handle);
-}
-
-static void gart_iommu_sync(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather)
-{
- size_t length = gather->end - gather->start + 1;
-
- gart_iommu_sync_map(domain, gather->start, length);
-}
-
-static const struct iommu_ops gart_iommu_ops = {
- .domain_alloc = gart_iommu_domain_alloc,
- .probe_device = gart_iommu_probe_device,
- .device_group = generic_device_group,
- .set_platform_dma_ops = gart_iommu_set_platform_dma,
- .pgsize_bitmap = GART_IOMMU_PGSIZES,
- .of_xlate = gart_iommu_of_xlate,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = gart_iommu_attach_dev,
- .map = gart_iommu_map,
- .unmap = gart_iommu_unmap,
- .iova_to_phys = gart_iommu_iova_to_phys,
- .iotlb_sync_map = gart_iommu_sync_map,
- .iotlb_sync = gart_iommu_sync,
- .free = gart_iommu_domain_free,
- }
-};
-
-int tegra_gart_suspend(struct gart_device *gart)
-{
- u32 *data = gart->savedata;
- unsigned long iova;
-
- /*
- * All GART users shall be suspended at this point. Disable
- * address translation to trap all GART accesses as invalid
- * memory accesses.
- */
- writel_relaxed(0, gart->regs + GART_CONFIG);
- FLUSH_GART_REGS(gart);
-
- for_each_gart_pte(gart, iova)
- *(data++) = gart_read_pte(gart, iova);
-
- return 0;
-}
-
-int tegra_gart_resume(struct gart_device *gart)
-{
- do_gart_setup(gart, gart->savedata);
-
- return 0;
-}
-
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
- struct gart_device *gart;
- struct resource *res;
- int err;
-
- BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
-
- /* the GART memory aperture is required */
- res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(dev, "Memory aperture resource unavailable\n");
- return ERR_PTR(-ENXIO);
- }
-
- gart = kzalloc(sizeof(*gart), GFP_KERNEL);
- if (!gart)
- return ERR_PTR(-ENOMEM);
-
- gart_handle = gart;
-
- gart->dev = dev;
- gart->regs = mc->regs + GART_REG_BASE;
- gart->iovmm_base = res->start;
- gart->iovmm_end = res->end + 1;
- spin_lock_init(&gart->pte_lock);
- spin_lock_init(&gart->dom_lock);
-
- do_gart_setup(gart, NULL);
-
- err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
- if (err)
- goto free_gart;
-
- err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
- if (err)
- goto remove_sysfs;
-
- gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
- sizeof(u32));
- if (!gart->savedata) {
- err = -ENOMEM;
- goto unregister_iommu;
- }
-
- return gart;
-
-unregister_iommu:
- iommu_device_unregister(&gart->iommu);
-remove_sysfs:
- iommu_device_sysfs_remove(&gart->iommu);
-free_gart:
- kfree(gart);
-
- return ERR_PTR(err);
-}
-
-module_param(gart_debug, bool, 0644);
-MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e445f80d0226..310871728ab4 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -272,13 +272,10 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
clear_bit(id, smmu->asids);
}
-static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
{
struct tegra_smmu_as *as;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as)
return NULL;
@@ -511,23 +508,39 @@ disable:
return err;
}
-static void tegra_smmu_set_platform_dma(struct device *dev)
+static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu_as *as = to_smmu_as(domain);
- struct tegra_smmu *smmu = as->smmu;
+ struct tegra_smmu_as *as;
+ struct tegra_smmu *smmu;
unsigned int index;
if (!fwspec)
- return;
+ return -ENODEV;
+ if (domain == identity_domain || !domain)
+ return 0;
+
+ as = to_smmu_as(domain);
+ smmu = as->smmu;
for (index = 0; index < fwspec->num_ids; index++) {
tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
tegra_smmu_as_unprepare(smmu, as);
}
+ return 0;
}
+static struct iommu_domain_ops tegra_smmu_identity_ops = {
+ .attach_dev = tegra_smmu_identity_attach,
+};
+
+static struct iommu_domain tegra_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &tegra_smmu_identity_ops,
+};
+
static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
u32 value)
{
@@ -751,7 +764,8 @@ __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t size, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
unsigned long flags;
@@ -761,11 +775,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
spin_unlock_irqrestore(&as->lock, flags);
+ if (!ret)
+ *mapped = size;
+
return ret;
}
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
unsigned long flags;
@@ -962,17 +979,28 @@ static int tegra_smmu_of_xlate(struct device *dev,
return iommu_fwspec_add_ids(dev, &id, 1);
}
+static int tegra_smmu_def_domain_type(struct device *dev)
+{
+ /*
+ * FIXME: For now we want to run all translation in IDENTITY mode, due
+ * to some device quirks. Better would be to just quirk the troubled
+ * devices.
+ */
+ return IOMMU_DOMAIN_IDENTITY;
+}
+
static const struct iommu_ops tegra_smmu_ops = {
- .domain_alloc = tegra_smmu_domain_alloc,
+ .identity_domain = &tegra_smmu_identity_domain,
+ .def_domain_type = &tegra_smmu_def_domain_type,
+ .domain_alloc_paging = tegra_smmu_domain_alloc_paging,
.probe_device = tegra_smmu_probe_device,
.device_group = tegra_smmu_device_group,
- .set_platform_dma_ops = tegra_smmu_set_platform_dma,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = tegra_smmu_attach_dev,
- .map = tegra_smmu_map,
- .unmap = tegra_smmu_unmap,
+ .map_pages = tegra_smmu_map,
+ .unmap_pages = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
.free = tegra_smmu_domain_free,
}
@@ -1056,8 +1084,6 @@ DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
{
smmu->debugfs = debugfs_create_dir("smmu", NULL);
- if (!smmu->debugfs)
- return;
debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
&tegra_smmu_swgroups_fops);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 17dcd826f5c2..379ebe03efb6 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -85,7 +85,7 @@ struct viommu_request {
void *writeback;
unsigned int write_offset;
unsigned int len;
- char buf[];
+ char buf[] __counted_by(len);
};
#define VIOMMU_FAULT_RESV_MASK 0xffffff00
@@ -230,7 +230,7 @@ static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
if (write_offset <= 0)
return -EINVAL;
- req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
+ req = kzalloc(struct_size(req, buf, len), GFP_ATOMIC);
if (!req)
return -ENOMEM;
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index bf03abb94e68..68d82a682bf6 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -1085,7 +1085,6 @@ static const struct pwm_ops lpg_pwm_ops = {
.request = lpg_pwm_request,
.apply = lpg_pwm_apply,
.get_state = lpg_pwm_get_state,
- .owner = THIS_MODULE,
};
static int lpg_add_pwm(struct lpg *lpg)
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 55a9f8c3a150..779f1268286e 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -123,6 +123,7 @@ int macio_init(void)
irq = irq_of_parse_and_map(adbs, 0);
of_node_put(adbs);
if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
+ iounmap(adb);
printk(KERN_ERR "ADB: can't get irq %d\n", irq);
return -EAGAIN;
}
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index d67db63b482d..778faeced81e 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -33,10 +33,9 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/brcm-message.h>
@@ -1494,7 +1493,6 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
{
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *match;
const int *hw_type;
int err;
@@ -1509,11 +1507,9 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
pdcs->hw_type = PDC_HW;
- match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
- if (match != NULL) {
- hw_type = match->data;
+ hw_type = device_get_match_data(dev);
+ if (hw_type)
pdcs->hw_type = *hw_type;
- }
return 0;
}
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 3ef4dd8adf5d..0af739ab571c 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -20,7 +20,9 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#define IMX_MU_CHANS 17
+#include "mailbox.h"
+
+#define IMX_MU_CHANS 24
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
@@ -39,6 +41,7 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
IMX_MU_TYPE_RST = 4, /* Reset */
+ IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */
};
enum imx_mu_xcr {
@@ -226,6 +229,9 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
tasklet_schedule(&cp->txdb_tasklet);
break;
+ case IMX_MU_TYPE_TXDB_V2:
+ imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+ break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
return -EINVAL;
@@ -554,6 +560,9 @@ static int imx_mu_startup(struct mbox_chan *chan)
int ret;
pm_runtime_get_sync(priv->dev);
+ if (cp->type == IMX_MU_TYPE_TXDB_V2)
+ return 0;
+
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -595,6 +604,11 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
int ret;
u32 sr;
+ if (cp->type == IMX_MU_TYPE_TXDB_V2) {
+ pm_runtime_put_sync(priv->dev);
+ return;
+ }
+
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
pm_runtime_put_sync(priv->dev);
@@ -671,6 +685,7 @@ static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
+ struct mbox_chan *p_chan;
u32 type, idx, chan;
if (sp->args_count != 2) {
@@ -680,14 +695,25 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
type = sp->args[0]; /* channel type */
idx = sp->args[1]; /* index */
- chan = type * 4 + idx;
+ /* RST only supports 1 channel */
+ if ((type == IMX_MU_TYPE_RST) && idx) {
+ dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan = type * 4 + idx;
if (chan >= mbox->num_chans) {
dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
return ERR_PTR(-EINVAL);
}
- return &mbox->chans[chan];
+ p_chan = &mbox->chans[chan];
+
+ if (type == IMX_MU_TYPE_TXDB_V2)
+ p_chan->txdone_method = TXDONE_BY_ACK;
+
+ return p_chan;
}
static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index 823061dd8c8e..b4b5bdd503cf 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -17,8 +17,8 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "mailbox.h"
@@ -403,7 +403,6 @@ MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct mbox_controller *mbox;
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
@@ -411,12 +410,11 @@ static int sti_mbox_probe(struct platform_device *pdev)
int irq;
int ret;
- match = of_match_device(sti_mailbox_match, &pdev->dev);
- if (!match) {
+ pdev->dev.platform_data = (struct sti_mbox_pdata *)device_get_match_data(&pdev->dev);
+ if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "No configuration found\n");
return -ENODEV;
}
- pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 4d62b07c1411..de862e9137d5 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -13,10 +13,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
#include <linux/of.h>
+#define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100
+
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
#define CMDQ_GCE_NUM_MAX (2)
@@ -283,10 +286,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
break;
}
- if (list_empty(&thread->task_busy_list)) {
+ if (list_empty(&thread->task_busy_list))
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
- }
}
static irqreturn_t cmdq_irq_handler(int irq, void *dev)
@@ -307,9 +308,26 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
spin_unlock_irqrestore(&thread->chan->lock, flags);
}
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+
return IRQ_HANDLED;
}
+static int cmdq_runtime_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+}
+
+static int cmdq_runtime_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+ return 0;
+}
+
static int cmdq_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
@@ -333,16 +351,14 @@ static int cmdq_suspend(struct device *dev)
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
- clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
-
- return 0;
+ return pm_runtime_force_suspend(dev);
}
static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
- WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
+ WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
if (cmdq->pdata->sw_ddr_en)
@@ -358,6 +374,9 @@ static int cmdq_remove(struct platform_device *pdev)
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
+ if (!IS_ENABLED(CONFIG_PM))
+ cmdq_runtime_suspend(&pdev->dev);
+
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -369,13 +388,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
unsigned long curr_pa, end_pa;
+ int ret;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
+ ret = pm_runtime_get_sync(cmdq->mbox.dev);
+ if (ret < 0)
+ return ret;
+
task = kzalloc(sizeof(*task), GFP_ATOMIC);
- if (!task)
+ if (!task) {
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return -ENOMEM;
+ }
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
@@ -384,8 +410,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -424,6 +448,9 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
+
return 0;
}
@@ -439,6 +466,8 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
struct cmdq_task *task, *tmp;
unsigned long flags;
+ WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
+
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
goto done;
@@ -457,7 +486,6 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
done:
/*
@@ -467,6 +495,9 @@ done:
* to do any operation here, only unlock and leave.
*/
spin_unlock_irqrestore(&thread->chan->lock, flags);
+
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -477,6 +508,11 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
struct cmdq_task *task, *tmp;
unsigned long flags;
u32 enable;
+ int ret;
+
+ ret = pm_runtime_get_sync(cmdq->mbox.dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
@@ -497,10 +533,12 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
+
return 0;
wait:
@@ -513,6 +551,8 @@ wait:
return -EFAULT;
}
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -642,12 +682,28 @@ static int cmdq_probe(struct platform_device *pdev)
return err;
}
+ /* If Runtime PM is not available enable the clocks now. */
+ if (!IS_ENABLED(CONFIG_PM)) {
+ err = cmdq_runtime_resume(dev);
+ if (err)
+ return err;
+ }
+
+ err = devm_pm_runtime_enable(dev);
+ if (err)
+ return err;
+
+ pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
return 0;
}
static const struct dev_pm_ops cmdq_pm_ops = {
.suspend = cmdq_suspend,
.resume = cmdq_resume,
+ SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
+ cmdq_runtime_resume, NULL)
};
static const struct gce_plat gce_plat_v2 = {
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index a94577f16a47..9d2d4ff6cda4 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -15,10 +15,10 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/soc/ti/ti-msgmgr.h>
#define Q_DATA_OFFSET(proxy, queue, reg) \
@@ -810,7 +810,6 @@ MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
static int ti_msgmgr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct device_node *np;
const struct ti_msgmgr_desc *desc;
struct ti_msgmgr_inst *inst;
@@ -828,19 +827,12 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
}
np = dev->of_node;
- of_id = of_match_device(ti_msgmgr_of_match, dev);
- if (!of_id) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
- desc = of_id->data;
-
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
inst->dev = dev;
- inst->desc = desc;
+ inst->desc = desc = device_get_match_data(dev);
inst->queue_proxy_region =
devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 0cac5bead84f..ba4530459de8 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
return 0;
out:
+ put_device(&dev->dev);
return ret;
}
@@ -288,7 +289,7 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
bus->dev.parent = carrier;
bus->dev.bus = &mcb_bus_type;
bus->dev.type = &mcb_carrier_device_type;
- bus->dev.release = &mcb_free_bus;
+ bus->dev.release = mcb_free_bus;
dev_set_name(&bus->dev, "mcb:%d", bus_nr);
rc = device_add(&bus->dev);
@@ -311,7 +312,7 @@ static int __mcb_devices_unregister(struct device *dev, void *data)
static void mcb_devices_unregister(struct mcb_bus *bus)
{
- bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister);
+ bus_for_each_dev(bus->dev.bus, NULL, NULL, __mcb_devices_unregister);
}
/**
* mcb_release_bus() - Free a @mcb_bus
@@ -406,7 +407,7 @@ static int __mcb_bus_add_devices(struct device *dev, void *data)
*/
void mcb_bus_add_devices(const struct mcb_bus *bus)
{
- bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
+ bus_for_each_dev(bus->dev.bus, NULL, NULL, __mcb_bus_add_devices);
}
EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 656b6b71c768..1ae37e693de0 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
return 0;
err:
- put_device(&mdev->dev);
+ mcb_free_dev(mdev);
return ret;
}
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
index 26d2bc778394..a51e98ab4958 100644
--- a/drivers/media/cec/platform/Makefile
+++ b/drivers/media/cec/platform/Makefile
@@ -6,7 +6,7 @@
# Please keep it in alphabetic order
obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
obj-$(CONFIG_CEC_GPIO) += cec-gpio/
-obj-$(CONFIG_CEC_MESON_AO) += meson/
+obj-y += meson/
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index c17faf002877..42dde3f0dbde 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -22,50 +22,124 @@
#define DRV_NAME "cros-ec-cec"
/**
- * struct cros_ec_cec - Driver data for EC CEC
+ * struct cros_ec_cec_port - Driver data for a single EC CEC port
*
- * @cros_ec: Pointer to EC device
- * @notifier: Notifier info for responding to EC events
+ * @port_num: port number
* @adap: CEC adapter
* @notify: CEC notifier pointer
* @rx_msg: storage for a received message
+ * @cros_ec_cec: pointer to the parent struct
*/
-struct cros_ec_cec {
- struct cros_ec_device *cros_ec;
- struct notifier_block notifier;
+struct cros_ec_cec_port {
+ int port_num;
struct cec_adapter *adap;
struct cec_notifier *notify;
struct cec_msg rx_msg;
+ struct cros_ec_cec *cros_ec_cec;
+};
+
+/**
+ * struct cros_ec_cec - Driver data for EC CEC
+ *
+ * @cros_ec: Pointer to EC device
+ * @notifier: Notifier info for responding to EC events
+ * @write_cmd_version: Highest supported version of EC_CMD_CEC_WRITE_MSG.
+ * @num_ports: Number of CEC ports
+ * @ports: Array of ports
+ */
+struct cros_ec_cec {
+ struct cros_ec_device *cros_ec;
+ struct notifier_block notifier;
+ int write_cmd_version;
+ int num_ports;
+ struct cros_ec_cec_port *ports[EC_CEC_MAX_PORTS];
};
+static void cros_ec_cec_received_message(struct cros_ec_cec_port *port,
+ uint8_t *msg, uint8_t len)
+{
+ if (len > CEC_MAX_MSG_SIZE)
+ len = CEC_MAX_MSG_SIZE;
+
+ port->rx_msg.len = len;
+ memcpy(port->rx_msg.msg, msg, len);
+
+ cec_received_msg(port->adap, &port->rx_msg);
+}
+
static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
{
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
+ struct cros_ec_cec_port *port;
+ /*
+ * There are two ways of receiving CEC messages:
+ * 1. Old EC firmware which only supports one port sends the data in a
+ * cec_message MKBP event.
+ * 2. New EC firmware which supports multiple ports uses
+ * EC_MKBP_CEC_HAVE_DATA to notify that data is ready and
+ * EC_CMD_CEC_READ_MSG to read it.
+ * Check that the EC only has one CEC port, and then we can assume the
+ * message is from port 0.
+ */
+ if (cros_ec_cec->num_ports != 1) {
+ dev_err(cros_ec->dev,
+ "received cec_message on device with %d ports\n",
+ cros_ec_cec->num_ports);
+ return;
+ }
+ port = cros_ec_cec->ports[0];
- if (len > CEC_MAX_MSG_SIZE)
- len = CEC_MAX_MSG_SIZE;
- cros_ec_cec->rx_msg.len = len;
- memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+ cros_ec_cec_received_message(port, cec_message, len);
+}
+
+static void cros_ec_cec_read_message(struct cros_ec_cec_port *port)
+{
+ struct cros_ec_device *cros_ec = port->cros_ec_cec->cros_ec;
+ struct ec_params_cec_read params = {
+ .port = port->port_num,
+ };
+ struct ec_response_cec_read response;
+ int ret;
- cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_READ_MSG, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error reading CEC message on EC: %d\n", ret);
+ return;
+ }
+
+ cros_ec_cec_received_message(port, response.msg, response.msg_len);
}
static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
{
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
- uint32_t events = cros_ec->event_data.data.cec_events;
+ uint32_t cec_events = cros_ec->event_data.data.cec_events;
+ uint32_t port_num = EC_MKBP_EVENT_CEC_GET_PORT(cec_events);
+ uint32_t events = EC_MKBP_EVENT_CEC_GET_EVENTS(cec_events);
+ struct cros_ec_cec_port *port;
+
+ if (port_num >= cros_ec_cec->num_ports) {
+ dev_err(cros_ec->dev,
+ "received CEC event for invalid port %d\n", port_num);
+ return;
+ }
+ port = cros_ec_cec->ports[port_num];
if (events & EC_MKBP_CEC_SEND_OK)
- cec_transmit_attempt_done(cros_ec_cec->adap,
- CEC_TX_STATUS_OK);
+ cec_transmit_attempt_done(port->adap, CEC_TX_STATUS_OK);
/* FW takes care of all retries, tell core to avoid more retries */
if (events & EC_MKBP_CEC_SEND_FAILED)
- cec_transmit_attempt_done(cros_ec_cec->adap,
+ cec_transmit_attempt_done(port->adap,
CEC_TX_STATUS_MAX_RETRIES |
CEC_TX_STATUS_NACK);
+
+ if (events & EC_MKBP_CEC_HAVE_DATA)
+ cros_ec_cec_read_message(port);
}
static int cros_ec_cec_event(struct notifier_block *nb,
@@ -93,20 +167,18 @@ static int cros_ec_cec_event(struct notifier_block *nb,
static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
- struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_cec_port *port = adap->priv;
+ struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
- struct {
- struct cros_ec_command msg;
- struct ec_params_cec_set data;
- } __packed msg = {};
+ struct ec_params_cec_set params = {
+ .cmd = CEC_CMD_LOGICAL_ADDRESS,
+ .port = port->port_num,
+ .val = logical_addr,
+ };
int ret;
- msg.msg.command = EC_CMD_CEC_SET;
- msg.msg.outsize = sizeof(msg.data);
- msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
- msg.data.val = logical_addr;
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_SET, &params, sizeof(params),
+ NULL, 0);
if (ret < 0) {
dev_err(cros_ec->dev,
"error setting CEC logical address on EC: %d\n", ret);
@@ -119,19 +191,26 @@ static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *cec_msg)
{
- struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_cec_port *port = adap->priv;
+ struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
- struct {
- struct cros_ec_command msg;
- struct ec_params_cec_write data;
- } __packed msg = {};
+ struct ec_params_cec_write params;
+ struct ec_params_cec_write_v1 params_v1;
int ret;
- msg.msg.command = EC_CMD_CEC_WRITE_MSG;
- msg.msg.outsize = cec_msg->len;
- memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
+ if (cros_ec_cec->write_cmd_version == 0) {
+ memcpy(params.msg, cec_msg->msg, cec_msg->len);
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_WRITE_MSG, &params,
+ cec_msg->len, NULL, 0);
+ } else {
+ params_v1.port = port->port_num;
+ params_v1.msg_len = cec_msg->len;
+ memcpy(params_v1.msg, cec_msg->msg, cec_msg->len);
+ ret = cros_ec_cmd(cros_ec, cros_ec_cec->write_cmd_version,
+ EC_CMD_CEC_WRITE_MSG, &params_v1,
+ sizeof(params_v1), NULL, 0);
+ }
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
if (ret < 0) {
dev_err(cros_ec->dev,
"error writing CEC msg on EC: %d\n", ret);
@@ -143,20 +222,18 @@ static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_cec_port *port = adap->priv;
+ struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
- struct {
- struct cros_ec_command msg;
- struct ec_params_cec_set data;
- } __packed msg = {};
+ struct ec_params_cec_set params = {
+ .cmd = CEC_CMD_ENABLE,
+ .port = port->port_num,
+ .val = enable,
+ };
int ret;
- msg.msg.command = EC_CMD_CEC_SET;
- msg.msg.outsize = sizeof(msg.data);
- msg.data.cmd = CEC_CMD_ENABLE;
- msg.data.val = enable;
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_SET, &params, sizeof(params),
+ NULL, 0);
if (ret < 0) {
dev_err(cros_ec->dev,
"error %sabling CEC on EC: %d\n",
@@ -203,38 +280,54 @@ static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
/*
- * The Firmware only handles a single CEC interface tied to a single HDMI
- * connector we specify along with the DRM device name handling the HDMI output
+ * Specify the DRM device name handling the HDMI output and the HDMI connector
+ * corresponding to each CEC port. The order of connectors must match the order
+ * in the EC (first connector is EC port 0, ...), and the number of connectors
+ * must match the number of ports in the EC (which can be queried using the
+ * EC_CMD_CEC_PORT_COUNT host command).
*/
struct cec_dmi_match {
const char *sys_vendor;
const char *product_name;
const char *devname;
- const char *conn;
+ const char *const *conns;
};
+static const char *const port_b_conns[] = { "Port B", NULL };
+static const char *const port_db_conns[] = { "Port D", "Port B", NULL };
+static const char *const port_ba_conns[] = { "Port B", "Port A", NULL };
+static const char *const port_d_conns[] = { "Port D", NULL };
+
static const struct cec_dmi_match cec_dmi_match_table[] = {
/* Google Fizz */
- { "Google", "Fizz", "0000:00:02.0", "Port B" },
+ { "Google", "Fizz", "0000:00:02.0", port_b_conns },
/* Google Brask */
- { "Google", "Brask", "0000:00:02.0", "Port B" },
+ { "Google", "Brask", "0000:00:02.0", port_b_conns },
/* Google Moli */
- { "Google", "Moli", "0000:00:02.0", "Port B" },
+ { "Google", "Moli", "0000:00:02.0", port_b_conns },
/* Google Kinox */
- { "Google", "Kinox", "0000:00:02.0", "Port B" },
+ { "Google", "Kinox", "0000:00:02.0", port_b_conns },
/* Google Kuldax */
- { "Google", "Kuldax", "0000:00:02.0", "Port B" },
+ { "Google", "Kuldax", "0000:00:02.0", port_b_conns },
/* Google Aurash */
- { "Google", "Aurash", "0000:00:02.0", "Port B" },
+ { "Google", "Aurash", "0000:00:02.0", port_b_conns },
/* Google Gladios */
- { "Google", "Gladios", "0000:00:02.0", "Port B" },
+ { "Google", "Gladios", "0000:00:02.0", port_b_conns },
/* Google Lisbon */
- { "Google", "Lisbon", "0000:00:02.0", "Port B" },
+ { "Google", "Lisbon", "0000:00:02.0", port_b_conns },
+ /* Google Dibbi */
+ { "Google", "Dibbi", "0000:00:02.0", port_db_conns },
+ /* Google Constitution */
+ { "Google", "Constitution", "0000:00:02.0", port_ba_conns },
+ /* Google Boxy */
+ { "Google", "Boxy", "0000:00:02.0", port_d_conns },
+ /* Google Taranza */
+ { "Google", "Taranza", "0000:00:02.0", port_db_conns },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
- const char **conn)
+ const char * const **conns)
{
int i;
@@ -251,7 +344,7 @@ static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
if (!d)
return ERR_PTR(-EPROBE_DEFER);
put_device(d);
- *conn = m->conn;
+ *conns = m->conns;
return d;
}
}
@@ -265,23 +358,137 @@ static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
#else
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
- const char **conn)
+ const char * const **conns)
{
return ERR_PTR(-ENODEV);
}
#endif
+static int cros_ec_cec_get_num_ports(struct cros_ec_cec *cros_ec_cec)
+{
+ struct ec_response_cec_port_count response;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec_cec->cros_ec, 0, EC_CMD_CEC_PORT_COUNT, NULL,
+ 0, &response, sizeof(response));
+ if (ret < 0) {
+ /*
+ * Old EC firmware only supports one port and does not support
+ * the port count command, so fall back to assuming one port.
+ */
+ cros_ec_cec->num_ports = 1;
+ return 0;
+ }
+
+ if (response.port_count == 0) {
+ dev_err(cros_ec_cec->cros_ec->dev,
+ "EC reports 0 CEC ports\n");
+ return -ENODEV;
+ }
+
+ if (response.port_count > EC_CEC_MAX_PORTS) {
+ dev_err(cros_ec_cec->cros_ec->dev,
+ "EC reports too many ports: %d\n", response.port_count);
+ return -EINVAL;
+ }
+
+ cros_ec_cec->num_ports = response.port_count;
+ return 0;
+}
+
+static int cros_ec_cec_get_write_cmd_version(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct ec_params_get_cmd_versions_v1 params = {
+ .cmd = EC_CMD_CEC_WRITE_MSG,
+ };
+ struct ec_response_get_cmd_versions response;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_GET_CMD_VERSIONS, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error getting CEC write command version: %d\n", ret);
+ return ret;
+ }
+
+ if (response.version_mask & EC_VER_MASK(1)) {
+ cros_ec_cec->write_cmd_version = 1;
+ } else {
+ if (cros_ec_cec->num_ports != 1) {
+ dev_err(cros_ec->dev,
+ "v0 write command only supports 1 port, %d reported\n",
+ cros_ec_cec->num_ports);
+ return -EINVAL;
+ }
+ cros_ec_cec->write_cmd_version = 0;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_init_port(struct device *dev,
+ struct cros_ec_cec *cros_ec_cec,
+ int port_num, struct device *hdmi_dev,
+ const char * const *conns)
+{
+ struct cros_ec_cec_port *port;
+ int ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->cros_ec_cec = cros_ec_cec;
+ port->port_num = port_num;
+
+ port->adap = cec_allocate_adapter(&cros_ec_cec_ops, port, DRV_NAME,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
+ if (IS_ERR(port->adap))
+ return PTR_ERR(port->adap);
+
+ if (!conns[port_num]) {
+ dev_err(dev, "no conn for port %d\n", port_num);
+ ret = -ENODEV;
+ goto out_probe_adapter;
+ }
+
+ port->notify = cec_notifier_cec_adap_register(hdmi_dev, conns[port_num],
+ port->adap);
+ if (!port->notify) {
+ ret = -ENOMEM;
+ goto out_probe_adapter;
+ }
+
+ ret = cec_register_adapter(port->adap, dev);
+ if (ret < 0)
+ goto out_probe_notify;
+
+ cros_ec_cec->ports[port_num] = port;
+
+ return 0;
+
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(port->notify, port->adap);
+out_probe_adapter:
+ cec_delete_adapter(port->adap);
+ return ret;
+}
+
static int cros_ec_cec_probe(struct platform_device *pdev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
struct cros_ec_device *cros_ec = ec_dev->ec_dev;
struct cros_ec_cec *cros_ec_cec;
+ struct cros_ec_cec_port *port;
struct device *hdmi_dev;
- const char *conn = NULL;
+ const char * const *conns = NULL;
int ret;
- hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conn);
+ hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conns);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
@@ -295,18 +502,19 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
- DRV_NAME,
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO, 1);
- if (IS_ERR(cros_ec_cec->adap))
- return PTR_ERR(cros_ec_cec->adap);
+ ret = cros_ec_cec_get_num_ports(cros_ec_cec);
+ if (ret)
+ return ret;
- cros_ec_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, conn,
- cros_ec_cec->adap);
- if (!cros_ec_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
+ ret = cros_ec_cec_get_write_cmd_version(cros_ec_cec);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+ ret = cros_ec_cec_init_port(&pdev->dev, cros_ec_cec, i,
+ hdmi_dev, conns);
+ if (ret)
+ goto unregister_ports;
}
/* Get CEC events from the EC. */
@@ -315,20 +523,24 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
&cros_ec_cec->notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register notifier\n");
- goto out_probe_notify;
+ goto unregister_ports;
}
- ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
- if (ret < 0)
- goto out_probe_notify;
-
return 0;
-out_probe_notify:
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
- cros_ec_cec->adap);
-out_probe_adapter:
- cec_delete_adapter(cros_ec_cec->adap);
+unregister_ports:
+ /*
+ * Unregister any adapters which have been registered. We don't add the
+ * port to the array until the adapter has been registered successfully,
+ * so any non-NULL ports must have been registered.
+ */
+ for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+ port = cros_ec_cec->ports[i];
+ if (!port)
+ break;
+ cec_notifier_cec_adap_unregister(port->notify, port->adap);
+ cec_unregister_adapter(port->adap);
+ }
return ret;
}
@@ -336,6 +548,7 @@ static void cros_ec_cec_remove(struct platform_device *pdev)
{
struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ struct cros_ec_cec_port *port;
int ret;
/*
@@ -349,9 +562,11 @@ static void cros_ec_cec_remove(struct platform_device *pdev)
if (ret)
dev_err(dev, "failed to unregister notifier\n");
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
- cros_ec_cec->adap);
- cec_unregister_adapter(cros_ec_cec->adap);
+ for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+ port = cros_ec_cec->ports[i];
+ cec_notifier_cec_adap_unregister(port->notify, port->adap);
+ cec_unregister_adapter(port->adap);
+ }
}
static struct platform_driver cros_ec_cec_driver = {
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index e0beefd80d7b..73990e469df9 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -353,31 +353,21 @@ static const struct file_operations debugfs_stats_ops = {
int smsdvb_debugfs_create(struct smsdvb_client_t *client)
{
struct smscore_device_t *coredev = client->coredev;
- struct dentry *d;
struct smsdvb_debugfs *debug_data;
if (!smsdvb_debugfs_usb_root || !coredev->is_usb_device)
return -ENODEV;
- client->debugfs = debugfs_create_dir(coredev->devpath,
- smsdvb_debugfs_usb_root);
- if (IS_ERR_OR_NULL(client->debugfs)) {
- pr_info("Unable to create debugfs %s directory.\n",
- coredev->devpath);
- return -ENODEV;
- }
-
- d = debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs,
- client, &debugfs_stats_ops);
- if (!d) {
- debugfs_remove(client->debugfs);
- return -ENOMEM;
- }
-
debug_data = kzalloc(sizeof(*client->debug_data), GFP_KERNEL);
if (!debug_data)
return -ENOMEM;
+ client->debugfs = debugfs_create_dir(coredev->devpath,
+ smsdvb_debugfs_usb_root);
+
+ debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs,
+ client, &debugfs_stats_ops);
+
client->debug_data = debug_data;
client->prt_dvb_stats = smsdvb_print_dvb_stats;
client->prt_isdb_stats = smsdvb_print_isdb_stats;
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
index fd87747be9b1..41f289c75cbb 100644
--- a/drivers/media/common/videobuf2/frame_vector.c
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(frame_vector_to_pfns);
struct frame_vector *frame_vector_create(unsigned int nr_frames)
{
struct frame_vector *vec;
- int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
+ int size = struct_size(vec, ptrs, nr_frames);
if (WARN_ON_ONCE(nr_frames == 0))
return NULL;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index cf6727d9c81f..27aee92f3eea 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2890,7 +2890,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (copy_timestamp)
b->timestamp = ktime_get_ns();
ret = vb2_core_qbuf(q, index, NULL, NULL);
- dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
+ dprintk(q, 5, "vb2_qbuf result: %d\n", ret);
if (ret)
return ret;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 2fa455d4a048..3d4fd4ef5310 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -542,13 +542,14 @@ static void vb2_dc_put_userptr(void *buf_priv)
*/
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
- pages = frame_vector_pages(buf->vec);
- /* sgt should exist only if vector contains pages... */
- BUG_ON(IS_ERR(pages));
if (buf->dma_dir == DMA_FROM_DEVICE ||
- buf->dma_dir == DMA_BIDIRECTIONAL)
- for (i = 0; i < frame_vector_count(buf->vec); i++)
- set_page_dirty_lock(pages[i]);
+ buf->dma_dir == DMA_BIDIRECTIONAL) {
+ pages = frame_vector_pages(buf->vec);
+ /* sgt should exist only if vector contains pages... */
+ if (!WARN_ON_ONCE(IS_ERR(pages)))
+ for (i = 0; i < frame_vector_count(buf->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ }
sg_free_table(sgt);
kfree(sgt);
} else {
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 7c635e292106..7d953706f3f8 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -133,13 +133,15 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
if (!buf->vec->is_pfns) {
n_pages = frame_vector_count(buf->vec);
- pages = frame_vector_pages(buf->vec);
if (vaddr)
vm_unmap_ram((void *)vaddr, n_pages);
if (buf->dma_dir == DMA_FROM_DEVICE ||
- buf->dma_dir == DMA_BIDIRECTIONAL)
- for (i = 0; i < n_pages; i++)
- set_page_dirty_lock(pages[i]);
+ buf->dma_dir == DMA_BIDIRECTIONAL) {
+ pages = frame_vector_pages(buf->vec);
+ if (!WARN_ON_ONCE(IS_ERR(pages)))
+ for (i = 0; i < n_pages; i++)
+ set_page_dirty_lock(pages[i]);
+ }
} else {
iounmap((__force void __iomem *)buf->vaddr);
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index a738573c8cd7..19d8de400a68 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4779,8 +4779,8 @@ set_frequency(struct drx_demod_instance *demod,
bool image_to_select;
s32 fm_frequency_shift = 0;
- rf_mirror = (ext_attr->mirror == DRX_MIRROR_YES) ? true : false;
- tuner_mirror = demod->my_common_attr->mirror_freq_spect ? false : true;
+ rf_mirror = ext_attr->mirror == DRX_MIRROR_YES;
+ tuner_mirror = !demod->my_common_attr->mirror_freq_spect;
/*
Program frequency shifter
No need to account for mirroring on RF
@@ -8765,7 +8765,7 @@ static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *c
goto rw_error;
}
ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs;
- ext_attr->pos_image = (ext_attr->pos_image) ? false : true;
+ ext_attr->pos_image = !ext_attr->pos_image;
/* freeze dq/fq updating */
rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index cf037b61b226..26c67ef05d13 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1920,8 +1920,7 @@ static void m88ds3103_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- if (dev->dt_client)
- i2c_unregister_device(dev->dt_client);
+ i2c_unregister_device(dev->dt_client);
i2c_mux_del_adapters(dev->muxc);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 74ff833ff48c..59ee0ca2c978 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -99,6 +99,7 @@ config VIDEO_IMX214
config VIDEO_IMX219
tristate "Sony IMX219 sensor support"
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX219 camera.
@@ -215,6 +216,16 @@ config VIDEO_MT9M111
This driver supports MT9M111, MT9M112 and MT9M131 cameras from
Micron/Aptina
+config VIDEO_MT9M114
+ tristate "onsemi MT9M114 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor-level driver for the onsemi MT9M114
+ camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt9m114.
+
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
select VIDEO_APTINA_PLL
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 80b00d39b48f..f5010f80a21f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
+obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 98ca417b8004..5ace7b5804d4 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -411,43 +411,44 @@ static int adp1653_of_init(struct i2c_client *client,
struct device_node *node)
{
struct adp1653_platform_data *pd;
- struct device_node *child;
+ struct device_node *node_indicator = NULL;
+ struct device_node *node_flash;
pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
flash->platform_data = pd;
- child = of_get_child_by_name(node, "flash");
- if (!child)
+ node_flash = of_get_child_by_name(node, "flash");
+ if (!node_flash)
return -EINVAL;
- if (of_property_read_u32(child, "flash-timeout-us",
+ if (of_property_read_u32(node_flash, "flash-timeout-us",
&pd->max_flash_timeout))
goto err;
- if (of_property_read_u32(child, "flash-max-microamp",
+ if (of_property_read_u32(node_flash, "flash-max-microamp",
&pd->max_flash_intensity))
goto err;
pd->max_flash_intensity /= 1000;
- if (of_property_read_u32(child, "led-max-microamp",
+ if (of_property_read_u32(node_flash, "led-max-microamp",
&pd->max_torch_intensity))
goto err;
pd->max_torch_intensity /= 1000;
- of_node_put(child);
- child = of_get_child_by_name(node, "indicator");
- if (!child)
- return -EINVAL;
+ node_indicator = of_get_child_by_name(node, "indicator");
+ if (!node_indicator)
+ goto err;
- if (of_property_read_u32(child, "led-max-microamp",
+ if (of_property_read_u32(node_indicator, "led-max-microamp",
&pd->max_indicator_intensity))
goto err;
- of_node_put(child);
+ of_node_put(node_flash);
+ of_node_put(node_indicator);
pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(pd->enable_gpio)) {
@@ -458,7 +459,8 @@ static int adp1653_of_init(struct i2c_client *client,
return 0;
err:
dev_err(&client->dev, "Required property not found\n");
- of_node_put(child);
+ of_node_put(node_flash);
+ of_node_put(node_indicator);
return -EINVAL;
}
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 99ba925e8ec8..54134473186b 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -5,6 +5,7 @@
* Copyright (C) 2013 Cogent Embedded, Inc.
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -1395,7 +1396,6 @@ out_unlock:
static int adv7180_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device_node *np = client->dev.of_node;
struct adv7180_state *state;
struct v4l2_subdev *sd;
@@ -1411,7 +1411,7 @@ static int adv7180_probe(struct i2c_client *client)
state->client = client;
state->field = V4L2_FIELD_ALTERNATE;
- state->chip_info = (struct adv7180_chip_info *)id->driver_data;
+ state->chip_info = i2c_get_match_data(client);
state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
GPIOD_OUT_HIGH);
@@ -1536,22 +1536,6 @@ static void adv7180_remove(struct i2c_client *client)
mutex_destroy(&state->mutex);
}
-static const struct i2c_device_id adv7180_id[] = {
- { "adv7180", (kernel_ulong_t)&adv7180_info },
- { "adv7180cp", (kernel_ulong_t)&adv7180_info },
- { "adv7180st", (kernel_ulong_t)&adv7180_info },
- { "adv7182", (kernel_ulong_t)&adv7182_info },
- { "adv7280", (kernel_ulong_t)&adv7280_info },
- { "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
- { "adv7281", (kernel_ulong_t)&adv7281_info },
- { "adv7281-m", (kernel_ulong_t)&adv7281_m_info },
- { "adv7281-ma", (kernel_ulong_t)&adv7281_ma_info },
- { "adv7282", (kernel_ulong_t)&adv7282_info },
- { "adv7282-m", (kernel_ulong_t)&adv7282_m_info },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, adv7180_id);
-
#ifdef CONFIG_PM_SLEEP
static int adv7180_suspend(struct device *dev)
{
@@ -1585,30 +1569,43 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#define ADV7180_PM_OPS NULL
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id adv7180_of_id[] = {
- { .compatible = "adi,adv7180", },
- { .compatible = "adi,adv7180cp", },
- { .compatible = "adi,adv7180st", },
- { .compatible = "adi,adv7182", },
- { .compatible = "adi,adv7280", },
- { .compatible = "adi,adv7280-m", },
- { .compatible = "adi,adv7281", },
- { .compatible = "adi,adv7281-m", },
- { .compatible = "adi,adv7281-ma", },
- { .compatible = "adi,adv7282", },
- { .compatible = "adi,adv7282-m", },
- { },
+static const struct i2c_device_id adv7180_id[] = {
+ { "adv7180", (kernel_ulong_t)&adv7180_info },
+ { "adv7180cp", (kernel_ulong_t)&adv7180_info },
+ { "adv7180st", (kernel_ulong_t)&adv7180_info },
+ { "adv7182", (kernel_ulong_t)&adv7182_info },
+ { "adv7280", (kernel_ulong_t)&adv7280_info },
+ { "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
+ { "adv7281", (kernel_ulong_t)&adv7281_info },
+ { "adv7281-m", (kernel_ulong_t)&adv7281_m_info },
+ { "adv7281-ma", (kernel_ulong_t)&adv7281_ma_info },
+ { "adv7282", (kernel_ulong_t)&adv7282_info },
+ { "adv7282-m", (kernel_ulong_t)&adv7282_m_info },
+ {}
};
+MODULE_DEVICE_TABLE(i2c, adv7180_id);
+static const struct of_device_id adv7180_of_id[] = {
+ { .compatible = "adi,adv7180", &adv7180_info },
+ { .compatible = "adi,adv7180cp", &adv7180_info },
+ { .compatible = "adi,adv7180st", &adv7180_info },
+ { .compatible = "adi,adv7182", &adv7182_info },
+ { .compatible = "adi,adv7280", &adv7280_info },
+ { .compatible = "adi,adv7280-m", &adv7280_m_info },
+ { .compatible = "adi,adv7281", &adv7281_info },
+ { .compatible = "adi,adv7281-m", &adv7281_m_info },
+ { .compatible = "adi,adv7281-ma", &adv7281_ma_info },
+ { .compatible = "adi,adv7282", &adv7282_info },
+ { .compatible = "adi,adv7282-m", &adv7282_m_info },
+ {}
+};
MODULE_DEVICE_TABLE(of, adv7180_of_id);
-#endif
static struct i2c_driver adv7180_driver = {
.driver = {
.name = KBUILD_MODNAME,
.pm = ADV7180_PM_OPS,
- .of_match_table = of_match_ptr(adv7180_of_id),
+ .of_match_table = adv7180_of_id,
},
.probe = adv7180_probe,
.remove = adv7180_remove,
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index a4e39871e8f7..701f36345f1e 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -133,8 +133,6 @@ struct ar0521_dev {
u16 mult2;
u16 vt_pix;
} pll;
-
- bool streaming;
};
static inline struct ar0521_dev *to_ar0521_dev(struct v4l2_subdev *sd)
@@ -991,12 +989,9 @@ static int ar0521_s_stream(struct v4l2_subdev *sd, int enable)
int ret;
mutex_lock(&sensor->lock);
-
ret = ar0521_set_stream(sensor, enable);
- if (!ret)
- sensor->streaming = enable;
-
mutex_unlock(&sensor->lock);
+
return ret;
}
@@ -1023,28 +1018,6 @@ static const struct v4l2_subdev_ops ar0521_subdev_ops = {
.pad = &ar0521_pad_ops,
};
-static int __maybe_unused ar0521_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ar0521_dev *sensor = to_ar0521_dev(sd);
-
- if (sensor->streaming)
- ar0521_set_stream(sensor, 0);
-
- return 0;
-}
-
-static int __maybe_unused ar0521_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ar0521_dev *sensor = to_ar0521_dev(sd);
-
- if (sensor->streaming)
- return ar0521_set_stream(sensor, 1);
-
- return 0;
-}
-
static int ar0521_probe(struct i2c_client *client)
{
struct v4l2_fwnode_endpoint ep = {
@@ -1183,7 +1156,6 @@ static void ar0521_remove(struct i2c_client *client)
}
static const struct dev_pm_ops ar0521_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ar0521_suspend, ar0521_resume)
SET_RUNTIME_PM_OPS(ar0521_power_off, ar0521_power_on, NULL)
};
static const struct of_device_id ar0521_dt_ids[] = {
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 49e0d9a09530..12e6f0a26fc8 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -508,9 +508,8 @@ static void __ccs_update_exposure_limits(struct ccs_sensor *sensor)
struct v4l2_ctrl *ctrl = sensor->exposure;
int max;
- max = sensor->pixel_array->crop[CCS_PA_PAD_SRC].height
- + sensor->vblank->val
- - CCS_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN);
+ max = sensor->pa_src.height + sensor->vblank->val -
+ CCS_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN);
__v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, max);
}
@@ -728,15 +727,12 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_VBLANK:
rval = ccs_write(sensor, FRAME_LENGTH_LINES,
- sensor->pixel_array->crop[
- CCS_PA_PAD_SRC].height
- + ctrl->val);
+ sensor->pa_src.height + ctrl->val);
break;
case V4L2_CID_HBLANK:
rval = ccs_write(sensor, LINE_LENGTH_PCK,
- sensor->pixel_array->crop[CCS_PA_PAD_SRC].width
- + ctrl->val);
+ sensor->pa_src.width + ctrl->val);
break;
case V4L2_CID_TEST_PATTERN:
@@ -1214,15 +1210,13 @@ static void ccs_update_blanking(struct ccs_sensor *sensor)
min = max_t(int,
CCS_LIM(sensor, MIN_FRAME_BLANKING_LINES),
- min_fll - sensor->pixel_array->crop[CCS_PA_PAD_SRC].height);
- max = max_fll - sensor->pixel_array->crop[CCS_PA_PAD_SRC].height;
+ min_fll - sensor->pa_src.height);
+ max = max_fll - sensor->pa_src.height;
__v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
- min = max_t(int,
- min_llp - sensor->pixel_array->crop[CCS_PA_PAD_SRC].width,
- min_lbp);
- max = max_llp - sensor->pixel_array->crop[CCS_PA_PAD_SRC].width;
+ min = max_t(int, min_llp - sensor->pa_src.width, min_lbp);
+ max = max_llp - sensor->pa_src.width;
__v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
@@ -1246,10 +1240,8 @@ static int ccs_pll_blanking_update(struct ccs_sensor *sensor)
dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
sensor->pll.pixel_rate_pixel_array /
- ((sensor->pixel_array->crop[CCS_PA_PAD_SRC].width
- + sensor->hblank->val) *
- (sensor->pixel_array->crop[CCS_PA_PAD_SRC].height
- + sensor->vblank->val) / 100));
+ ((sensor->pa_src.width + sensor->hblank->val) *
+ (sensor->pa_src.height + sensor->vblank->val) / 100));
return 0;
}
@@ -1756,28 +1748,22 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
goto out;
/* Analog crop start coordinates */
- rval = ccs_write(sensor, X_ADDR_START,
- sensor->pixel_array->crop[CCS_PA_PAD_SRC].left);
+ rval = ccs_write(sensor, X_ADDR_START, sensor->pa_src.left);
if (rval < 0)
goto out;
- rval = ccs_write(sensor, Y_ADDR_START,
- sensor->pixel_array->crop[CCS_PA_PAD_SRC].top);
+ rval = ccs_write(sensor, Y_ADDR_START, sensor->pa_src.top);
if (rval < 0)
goto out;
/* Analog crop end coordinates */
- rval = ccs_write(
- sensor, X_ADDR_END,
- sensor->pixel_array->crop[CCS_PA_PAD_SRC].left
- + sensor->pixel_array->crop[CCS_PA_PAD_SRC].width - 1);
+ rval = ccs_write(sensor, X_ADDR_END,
+ sensor->pa_src.left + sensor->pa_src.width - 1);
if (rval < 0)
goto out;
- rval = ccs_write(
- sensor, Y_ADDR_END,
- sensor->pixel_array->crop[CCS_PA_PAD_SRC].top
- + sensor->pixel_array->crop[CCS_PA_PAD_SRC].height - 1);
+ rval = ccs_write(sensor, Y_ADDR_END,
+ sensor->pa_src.top + sensor->pa_src.height - 1);
if (rval < 0)
goto out;
@@ -1789,27 +1775,23 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
/* Digital crop */
if (CCS_LIM(sensor, DIGITAL_CROP_CAPABILITY)
== CCS_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
- rval = ccs_write(
- sensor, DIGITAL_CROP_X_OFFSET,
- sensor->scaler->crop[CCS_PAD_SINK].left);
+ rval = ccs_write(sensor, DIGITAL_CROP_X_OFFSET,
+ sensor->scaler_sink.left);
if (rval < 0)
goto out;
- rval = ccs_write(
- sensor, DIGITAL_CROP_Y_OFFSET,
- sensor->scaler->crop[CCS_PAD_SINK].top);
+ rval = ccs_write(sensor, DIGITAL_CROP_Y_OFFSET,
+ sensor->scaler_sink.top);
if (rval < 0)
goto out;
- rval = ccs_write(
- sensor, DIGITAL_CROP_IMAGE_WIDTH,
- sensor->scaler->crop[CCS_PAD_SINK].width);
+ rval = ccs_write(sensor, DIGITAL_CROP_IMAGE_WIDTH,
+ sensor->scaler_sink.width);
if (rval < 0)
goto out;
- rval = ccs_write(
- sensor, DIGITAL_CROP_IMAGE_HEIGHT,
- sensor->scaler->crop[CCS_PAD_SINK].height);
+ rval = ccs_write(sensor, DIGITAL_CROP_IMAGE_HEIGHT,
+ sensor->scaler_sink.height);
if (rval < 0)
goto out;
}
@@ -1827,12 +1809,10 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
}
/* Output size from sensor */
- rval = ccs_write(sensor, X_OUTPUT_SIZE,
- sensor->src->crop[CCS_PAD_SRC].width);
+ rval = ccs_write(sensor, X_OUTPUT_SIZE, sensor->src_src.width);
if (rval < 0)
goto out;
- rval = ccs_write(sensor, Y_OUTPUT_SIZE,
- sensor->src->crop[CCS_PAD_SRC].height);
+ rval = ccs_write(sensor, Y_OUTPUT_SIZE, sensor->src_src.height);
if (rval < 0)
goto out;
@@ -1923,9 +1903,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
- if (sensor->streaming == enable)
- return 0;
-
if (!enable) {
ccs_stop_streaming(sensor);
sensor->streaming = false;
@@ -2053,24 +2030,8 @@ static int __ccs_get_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct ccs_subdev *ssd = to_ccs_subdev(subdev);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(subdev, sd_state,
- fmt->pad);
- } else {
- struct v4l2_rect *r;
-
- if (fmt->pad == ssd->source_pad)
- r = &ssd->crop[ssd->source_pad];
- else
- r = &ssd->sink_fmt;
-
- fmt->format.code = __ccs_get_mbus_code(subdev, fmt->pad);
- fmt->format.width = r->width;
- fmt->format.height = r->height;
- fmt->format.field = V4L2_FIELD_NONE;
- }
+ fmt->format = *v4l2_subdev_get_pad_format(subdev, sd_state, fmt->pad);
+ fmt->format.code = __ccs_get_mbus_code(subdev, fmt->pad);
return 0;
}
@@ -2092,28 +2053,18 @@ static int ccs_get_format(struct v4l2_subdev *subdev,
static void ccs_get_crop_compose(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect **crops,
- struct v4l2_rect **comps, int which)
+ struct v4l2_rect **comps)
{
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
unsigned int i;
- if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (crops)
- for (i = 0; i < subdev->entity.num_pads; i++)
- crops[i] = &ssd->crop[i];
- if (comps)
- *comps = &ssd->compose;
- } else {
- if (crops) {
- for (i = 0; i < subdev->entity.num_pads; i++)
- crops[i] = v4l2_subdev_get_try_crop(subdev,
- sd_state,
- i);
- }
- if (comps)
- *comps = v4l2_subdev_get_try_compose(subdev, sd_state,
- CCS_PAD_SINK);
- }
+ if (crops)
+ for (i = 0; i < subdev->entity.num_pads; i++)
+ crops[i] =
+ v4l2_subdev_get_pad_crop(subdev, sd_state, i);
+ if (comps)
+ *comps = v4l2_subdev_get_pad_compose(subdev, sd_state,
+ ssd->sink_pad);
}
/* Changes require propagation only on sink pad. */
@@ -2124,8 +2075,9 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
struct v4l2_rect *comp, *crops[CCS_PADS];
+ struct v4l2_mbus_framefmt *fmt;
- ccs_get_crop_compose(subdev, sd_state, crops, &comp, which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp);
switch (target) {
case V4L2_SEL_TGT_CROP:
@@ -2136,6 +2088,7 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
sensor->scale_m = CCS_LIM(sensor, SCALER_N_MIN);
sensor->scaling_mode =
CCS_SCALING_MODE_NO_SCALING;
+ sensor->scaler_sink = *comp;
} else if (ssd == sensor->binner) {
sensor->binning_horizontal = 1;
sensor->binning_vertical = 1;
@@ -2144,6 +2097,11 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
fallthrough;
case V4L2_SEL_TGT_COMPOSE:
*crops[CCS_PAD_SRC] = *comp;
+ fmt = v4l2_subdev_get_pad_format(subdev, sd_state, CCS_PAD_SRC);
+ fmt->width = comp->width;
+ fmt->height = comp->height;
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && ssd == sensor->src)
+ sensor->src_src = *crops[CCS_PAD_SRC];
break;
default:
WARN_ON_ONCE(1);
@@ -2252,14 +2210,12 @@ static int ccs_set_format(struct v4l2_subdev *subdev,
CCS_LIM(sensor, MIN_Y_OUTPUT_SIZE),
CCS_LIM(sensor, MAX_Y_OUTPUT_SIZE));
- ccs_get_crop_compose(subdev, sd_state, crops, NULL, fmt->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, NULL);
crops[ssd->sink_pad]->left = 0;
crops[ssd->sink_pad]->top = 0;
crops[ssd->sink_pad]->width = fmt->format.width;
crops[ssd->sink_pad]->height = fmt->format.height;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- ssd->sink_fmt = *crops[ssd->sink_pad];
ccs_propagate(subdev, sd_state, fmt->which, V4L2_SEL_TGT_CROP);
mutex_unlock(&sensor->mutex);
@@ -2482,7 +2438,7 @@ static int ccs_set_compose(struct v4l2_subdev *subdev,
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
struct v4l2_rect *comp, *crops[CCS_PADS];
- ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp);
sel->r.top = 0;
sel->r.left = 0;
@@ -2501,8 +2457,8 @@ static int ccs_set_compose(struct v4l2_subdev *subdev,
return 0;
}
-static int __ccs_sel_supported(struct v4l2_subdev *subdev,
- struct v4l2_subdev_selection *sel)
+static int ccs_sel_supported(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
@@ -2545,33 +2501,18 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
- struct v4l2_rect *src_size, *crops[CCS_PADS];
- struct v4l2_rect _r;
+ struct v4l2_rect src_size = { 0 }, *crops[CCS_PADS], *comp;
- ccs_get_crop_compose(subdev, sd_state, crops, NULL, sel->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp);
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (sel->pad == ssd->sink_pad)
- src_size = &ssd->sink_fmt;
- else
- src_size = &ssd->compose;
+ if (sel->pad == ssd->sink_pad) {
+ struct v4l2_mbus_framefmt *mfmt =
+ v4l2_subdev_get_pad_format(subdev, sd_state, sel->pad);
+
+ src_size.width = mfmt->width;
+ src_size.height = mfmt->height;
} else {
- if (sel->pad == ssd->sink_pad) {
- _r.left = 0;
- _r.top = 0;
- _r.width = v4l2_subdev_get_try_format(subdev,
- sd_state,
- sel->pad)
- ->width;
- _r.height = v4l2_subdev_get_try_format(subdev,
- sd_state,
- sel->pad)
- ->height;
- src_size = &_r;
- } else {
- src_size = v4l2_subdev_get_try_compose(
- subdev, sd_state, ssd->sink_pad);
- }
+ src_size = *comp;
}
if (ssd == sensor->src && sel->pad == CCS_PAD_SRC) {
@@ -2579,16 +2520,19 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
sel->r.top = 0;
}
- sel->r.width = min(sel->r.width, src_size->width);
- sel->r.height = min(sel->r.height, src_size->height);
+ sel->r.width = min(sel->r.width, src_size.width);
+ sel->r.height = min(sel->r.height, src_size.height);
- sel->r.left = min_t(int, sel->r.left, src_size->width - sel->r.width);
- sel->r.top = min_t(int, sel->r.top, src_size->height - sel->r.height);
+ sel->r.left = min_t(int, sel->r.left, src_size.width - sel->r.width);
+ sel->r.top = min_t(int, sel->r.top, src_size.height - sel->r.height);
*crops[sel->pad] = sel->r;
if (ssd != sensor->pixel_array && sel->pad == CCS_PAD_SINK)
ccs_propagate(subdev, sd_state, sel->which, V4L2_SEL_TGT_CROP);
+ else if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+ ssd == sensor->pixel_array)
+ sensor->pa_src = sel->r;
return 0;
}
@@ -2601,44 +2545,36 @@ static void ccs_get_native_size(struct ccs_subdev *ssd, struct v4l2_rect *r)
r->height = CCS_LIM(ssd->sensor, Y_ADDR_MAX) + 1;
}
-static int __ccs_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
+static int ccs_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
struct v4l2_rect *comp, *crops[CCS_PADS];
- struct v4l2_rect sink_fmt;
int ret;
- ret = __ccs_sel_supported(subdev, sel);
+ ret = ccs_sel_supported(subdev, sel);
if (ret)
return ret;
- ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
-
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- sink_fmt = ssd->sink_fmt;
- } else {
- struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(subdev, sd_state,
- ssd->sink_pad);
-
- sink_fmt.left = 0;
- sink_fmt.top = 0;
- sink_fmt.width = fmt->width;
- sink_fmt.height = fmt->height;
- }
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_NATIVE_SIZE:
- if (ssd == sensor->pixel_array)
+ if (ssd == sensor->pixel_array) {
ccs_get_native_size(ssd, &sel->r);
- else if (sel->pad == ssd->sink_pad)
- sel->r = sink_fmt;
- else
+ } else if (sel->pad == ssd->sink_pad) {
+ struct v4l2_mbus_framefmt *sink_fmt =
+ v4l2_subdev_get_pad_format(subdev, sd_state,
+ ssd->sink_pad);
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = sink_fmt->width;
+ sel->r.height = sink_fmt->height;
+ } else {
sel->r = *comp;
+ }
break;
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -2652,20 +2588,6 @@ static int __ccs_get_selection(struct v4l2_subdev *subdev,
return 0;
}
-static int ccs_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct ccs_sensor *sensor = to_ccs_sensor(subdev);
- int rval;
-
- mutex_lock(&sensor->mutex);
- rval = __ccs_get_selection(subdev, sd_state, sel);
- mutex_unlock(&sensor->mutex);
-
- return rval;
-}
-
static int ccs_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
@@ -2673,7 +2595,7 @@ static int ccs_set_selection(struct v4l2_subdev *subdev,
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
int ret;
- ret = __ccs_sel_supported(subdev, sel);
+ ret = ccs_sel_supported(subdev, sel);
if (ret)
return ret;
@@ -2945,7 +2867,6 @@ static int ccs_identify_module(struct ccs_sensor *sensor)
}
static const struct v4l2_subdev_ops ccs_ops;
-static const struct v4l2_subdev_internal_ops ccs_internal_ops;
static const struct media_entity_operations ccs_entity_ops;
static int ccs_register_subdev(struct ccs_sensor *sensor,
@@ -2959,12 +2880,6 @@ static int ccs_register_subdev(struct ccs_sensor *sensor,
if (!sink_ssd)
return 0;
- rval = media_entity_pads_init(&ssd->sd.entity, ssd->npads, ssd->pads);
- if (rval) {
- dev_err(&client->dev, "media_entity_pads_init failed\n");
- return rval;
- }
-
rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev, &ssd->sd);
if (rval) {
dev_err(&client->dev, "v4l2_device_register_subdev failed\n");
@@ -3025,6 +2940,12 @@ out_err:
static void ccs_cleanup(struct ccs_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int i;
+
+ for (i = 0; i < sensor->ssds_used; i++) {
+ v4l2_subdev_cleanup(&sensor->ssds[2].sd);
+ media_entity_cleanup(&sensor->ssds[i].sd.entity);
+ }
device_remove_file(&client->dev, &dev_attr_nvm);
device_remove_file(&client->dev, &dev_attr_ident);
@@ -3032,14 +2953,17 @@ static void ccs_cleanup(struct ccs_sensor *sensor)
ccs_free_controls(sensor);
}
-static void ccs_create_subdev(struct ccs_sensor *sensor,
- struct ccs_subdev *ssd, const char *name,
- unsigned short num_pads, u32 function)
+static int ccs_init_subdev(struct ccs_sensor *sensor,
+ struct ccs_subdev *ssd, const char *name,
+ unsigned short num_pads, u32 function,
+ const char *lock_name,
+ struct lock_class_key *lock_key)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
if (!ssd)
- return;
+ return 0;
if (ssd != sensor->src)
v4l2_subdev_init(&ssd->sd, &ccs_ops);
@@ -3053,57 +2977,70 @@ static void ccs_create_subdev(struct ccs_sensor *sensor,
v4l2_i2c_subdev_set_name(&ssd->sd, client, sensor->minfo.name, name);
- ccs_get_native_size(ssd, &ssd->sink_fmt);
-
- ssd->compose.width = ssd->sink_fmt.width;
- ssd->compose.height = ssd->sink_fmt.height;
- ssd->crop[ssd->source_pad] = ssd->compose;
ssd->pads[ssd->source_pad].flags = MEDIA_PAD_FL_SOURCE;
- if (ssd != sensor->pixel_array) {
- ssd->crop[ssd->sink_pad] = ssd->compose;
+ if (ssd != sensor->pixel_array)
ssd->pads[ssd->sink_pad].flags = MEDIA_PAD_FL_SINK;
- }
ssd->sd.entity.ops = &ccs_entity_ops;
- if (ssd == sensor->src)
- return;
+ if (ssd != sensor->src) {
+ ssd->sd.owner = THIS_MODULE;
+ ssd->sd.dev = &client->dev;
+ v4l2_set_subdevdata(&ssd->sd, client);
+ }
+
+ rval = media_entity_pads_init(&ssd->sd.entity, ssd->npads, ssd->pads);
+ if (rval) {
+ dev_err(&client->dev, "media_entity_pads_init failed\n");
+ return rval;
+ }
- ssd->sd.internal_ops = &ccs_internal_ops;
- ssd->sd.owner = THIS_MODULE;
- ssd->sd.dev = &client->dev;
- v4l2_set_subdevdata(&ssd->sd, client);
+ rval = __v4l2_subdev_init_finalize(&ssd->sd, lock_name, lock_key);
+ if (rval) {
+ media_entity_cleanup(&ssd->sd.entity);
+ return rval;
+ }
+
+ return 0;
}
-static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int ccs_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct ccs_subdev *ssd = to_ccs_subdev(sd);
struct ccs_sensor *sensor = ssd->sensor;
- unsigned int i;
+ unsigned int pad = ssd == sensor->pixel_array ?
+ CCS_PA_PAD_SRC : CCS_PAD_SINK;
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_pad_format(sd, sd_state, pad);
+ struct v4l2_rect *crop =
+ v4l2_subdev_get_pad_crop(sd, sd_state, pad);
+ bool is_active = !sd->active_state || sd->active_state == sd_state;
mutex_lock(&sensor->mutex);
- for (i = 0; i < ssd->npads; i++) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, i);
- struct v4l2_rect *try_crop =
- v4l2_subdev_get_try_crop(sd, fh->state, i);
- struct v4l2_rect *try_comp;
+ ccs_get_native_size(ssd, crop);
- ccs_get_native_size(ssd, try_crop);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
+ fmt->code = sensor->internal_csi_format->code;
+ fmt->field = V4L2_FIELD_NONE;
- try_fmt->width = try_crop->width;
- try_fmt->height = try_crop->height;
- try_fmt->code = sensor->internal_csi_format->code;
- try_fmt->field = V4L2_FIELD_NONE;
+ if (ssd == sensor->pixel_array) {
+ if (is_active)
+ sensor->pa_src = *crop;
- if (ssd != sensor->pixel_array)
- continue;
-
- try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
- *try_comp = *try_crop;
+ mutex_unlock(&sensor->mutex);
+ return 0;
}
+ fmt = v4l2_subdev_get_pad_format(sd, sd_state, CCS_PAD_SRC);
+ fmt->code = ssd == sensor->src ?
+ sensor->csi_format->code : sensor->internal_csi_format->code;
+ fmt->field = V4L2_FIELD_NONE;
+
+ ccs_propagate(sd, sd_state, is_active, V4L2_SEL_TGT_CROP);
+
mutex_unlock(&sensor->mutex);
return 0;
@@ -3116,6 +3053,7 @@ static const struct v4l2_subdev_video_ops ccs_video_ops = {
};
static const struct v4l2_subdev_pad_ops ccs_pad_ops = {
+ .init_cfg = ccs_init_cfg,
.enum_mbus_code = ccs_enum_mbus_code,
.get_fmt = ccs_get_format,
.set_fmt = ccs_set_format,
@@ -3141,53 +3079,12 @@ static const struct media_entity_operations ccs_entity_ops = {
static const struct v4l2_subdev_internal_ops ccs_internal_src_ops = {
.registered = ccs_registered,
.unregistered = ccs_unregistered,
- .open = ccs_open,
-};
-
-static const struct v4l2_subdev_internal_ops ccs_internal_ops = {
- .open = ccs_open,
};
/* -----------------------------------------------------------------------------
* I2C Driver
*/
-static int __maybe_unused ccs_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *subdev = i2c_get_clientdata(client);
- struct ccs_sensor *sensor = to_ccs_sensor(subdev);
- bool streaming = sensor->streaming;
- int rval;
-
- rval = pm_runtime_resume_and_get(dev);
- if (rval < 0)
- return rval;
-
- if (sensor->streaming)
- ccs_stop_streaming(sensor);
-
- /* save state for resume */
- sensor->streaming = streaming;
-
- return 0;
-}
-
-static int __maybe_unused ccs_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *subdev = i2c_get_clientdata(client);
- struct ccs_sensor *sensor = to_ccs_sensor(subdev);
- int rval = 0;
-
- pm_runtime_put(dev);
-
- if (sensor->streaming)
- rval = ccs_start_streaming(sensor);
-
- return rval;
-}
-
static int ccs_get_hwconfig(struct ccs_sensor *sensor, struct device *dev)
{
struct ccs_hwconfig *hwcfg = &sensor->hwcfg;
@@ -3311,6 +3208,8 @@ static int ccs_firmware_name(struct i2c_client *client,
static int ccs_probe(struct i2c_client *client)
{
+ static struct lock_class_key pixel_array_lock_key, binner_lock_key,
+ scaler_lock_key;
const struct ccs_device *ccsdev = device_get_match_data(&client->dev);
struct ccs_sensor *sensor;
const struct firmware *fw;
@@ -3587,12 +3486,27 @@ static int ccs_probe(struct i2c_client *client)
sensor->pll.ext_clk_freq_hz = sensor->hwcfg.ext_clk;
sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
- ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
- MEDIA_ENT_F_PROC_VIDEO_SCALER);
- ccs_create_subdev(sensor, sensor->binner, " binner", 2,
- MEDIA_ENT_F_PROC_VIDEO_SCALER);
- ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
- MEDIA_ENT_F_CAM_SENSOR);
+ rval = ccs_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ rval = ccs_init_subdev(sensor, sensor->scaler, " scaler", 2,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER,
+ "ccs scaler mutex", &scaler_lock_key);
+ if (rval)
+ goto out_cleanup;
+ rval = ccs_init_subdev(sensor, sensor->binner, " binner", 2,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER,
+ "ccs binner mutex", &binner_lock_key);
+ if (rval)
+ goto out_cleanup;
+ rval = ccs_init_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
+ MEDIA_ENT_F_CAM_SENSOR, "ccs pixel array mutex",
+ &pixel_array_lock_key);
+ if (rval)
+ goto out_cleanup;
rval = ccs_init_controls(sensor);
if (rval < 0)
@@ -3602,12 +3516,6 @@ static int ccs_probe(struct i2c_client *client)
if (rval)
goto out_cleanup;
- rval = ccs_get_mbus_formats(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_cleanup;
- }
-
rval = ccs_init_late_controls(sensor);
if (rval) {
rval = -ENODEV;
@@ -3625,14 +3533,9 @@ static int ccs_probe(struct i2c_client *client)
sensor->streaming = false;
sensor->dev_init_done = true;
- rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
- sensor->src->pads);
- if (rval < 0)
- goto out_media_entity_cleanup;
-
rval = ccs_write_msr_regs(sensor);
if (rval)
- goto out_media_entity_cleanup;
+ goto out_cleanup;
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
@@ -3652,9 +3555,6 @@ out_disable_runtime_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
-out_media_entity_cleanup:
- media_entity_cleanup(&sensor->src->sd.entity);
-
out_cleanup:
ccs_cleanup(sensor);
@@ -3687,10 +3587,8 @@ static void ccs_remove(struct i2c_client *client)
ccs_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
- for (i = 0; i < sensor->ssds_used; i++) {
+ for (i = 0; i < sensor->ssds_used; i++)
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
- media_entity_cleanup(&sensor->ssds[i].sd.entity);
- }
ccs_cleanup(sensor);
mutex_destroy(&sensor->mutex);
kfree(sensor->ccs_limits);
@@ -3720,7 +3618,6 @@ static const struct of_device_id ccs_of_table[] = {
MODULE_DEVICE_TABLE(of, ccs_of_table);
static const struct dev_pm_ops ccs_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ccs_suspend, ccs_resume)
SET_RUNTIME_PM_OPS(ccs_power_off, ccs_power_on, NULL)
};
diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
index 5838fcda92fd..0b1a64958d71 100644
--- a/drivers/media/i2c/ccs/ccs-quirk.h
+++ b/drivers/media/i2c/ccs/ccs-quirk.h
@@ -32,12 +32,10 @@ struct ccs_sensor;
* @reg: Pointer to the register to access
* @value: Register value, set by the caller on write, or
* by the quirk on read
- *
- * @flags: Quirk flags
- *
* @return: 0 on success, -ENOIOCTLCMD if no register
* access may be done by the caller (default read
* value is zero), else negative error code on error
+ * @flags: Quirk flags
*/
struct ccs_quirk {
int (*limits)(struct ccs_sensor *sensor);
diff --git a/drivers/media/i2c/ccs/ccs.h b/drivers/media/i2c/ccs/ccs.h
index a94c796cea48..9c3587b2fbe7 100644
--- a/drivers/media/i2c/ccs/ccs.h
+++ b/drivers/media/i2c/ccs/ccs.h
@@ -182,9 +182,6 @@ struct ccs_binning_subtype {
struct ccs_subdev {
struct v4l2_subdev sd;
struct media_pad pads[CCS_PADS];
- struct v4l2_rect sink_fmt;
- struct v4l2_rect crop[CCS_PADS];
- struct v4l2_rect compose; /* compose on sink */
unsigned short sink_pad;
unsigned short source_pad;
int npads;
@@ -220,6 +217,7 @@ struct ccs_sensor {
u32 mbus_frame_fmts;
const struct ccs_csi_data_format *csi_format;
const struct ccs_csi_data_format *internal_csi_format;
+ struct v4l2_rect pa_src, scaler_sink, src_src;
u32 default_mbus_frame_fmts;
int default_pixel_order;
struct ccs_data_container sdata, mdata;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 5aec25289062..04461c893d90 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -2738,10 +2738,801 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
#define DIF_BPF_COEFF3435 (0x38c)
#define DIF_BPF_COEFF36 (0x390)
+static const u32 ifhz_coeffs[][19] = {
+ { // 3.0 MHz
+ 0x00000002, 0x00080012, 0x001e0024, 0x001bfff8,
+ 0xffb4ff50, 0xfed8fe68, 0xfe24fe34, 0xfebaffc7,
+ 0x014d031f, 0x04f0065d, 0x07010688, 0x04c901d6,
+ 0xfe00f9d3, 0xf600f342, 0xf235f337, 0xf64efb22,
+ 0x0105070f, 0x0c460fce, 0x110d0000,
+ }, { // 3.1 MHz
+ 0x00000001, 0x00070012, 0x00220032, 0x00370026,
+ 0xfff0ff91, 0xff0efe7c, 0xfe01fdcc, 0xfe0afedb,
+ 0x00440224, 0x0434060c, 0x0738074e, 0x06090361,
+ 0xff99fb39, 0xf6fef3b6, 0xf21af2a5, 0xf573fa33,
+ 0x0034067d, 0x0bfb0fb9, 0x110d0000,
+ }, { // 3.2 MHz
+ 0x00000000, 0x0004000e, 0x00200038, 0x004c004f,
+ 0x002fffdf, 0xff5cfeb6, 0xfe0dfd92, 0xfd7ffe03,
+ 0xff36010a, 0x03410575, 0x072607d2, 0x071804d5,
+ 0x0134fcb7, 0xf81ff451, 0xf223f22e, 0xf4a7f94b,
+ 0xff6405e8, 0x0bae0fa4, 0x110d0000,
+ }, { // 3.3 MHz
+ 0x0000ffff, 0x00000008, 0x001a0036, 0x0056006d,
+ 0x00670030, 0xffbdff10, 0xfe46fd8d, 0xfd25fd4f,
+ 0xfe35ffe0, 0x0224049f, 0x06c9080e, 0x07ef0627,
+ 0x02c9fe45, 0xf961f513, 0xf250f1d2, 0xf3ecf869,
+ 0xfe930552, 0x0b5f0f8f, 0x110d0000,
+ }, { // 3.4 MHz
+ 0xfffffffe, 0xfffd0001, 0x000f002c, 0x0054007d,
+ 0x0093007c, 0x0024ff82, 0xfea6fdbb, 0xfd03fcca,
+ 0xfd51feb9, 0x00eb0392, 0x06270802, 0x08880750,
+ 0x044dffdb, 0xfabdf5f8, 0xf2a0f193, 0xf342f78f,
+ 0xfdc404b9, 0x0b0e0f78, 0x110d0000,
+ }, { // 3.5 MHz
+ 0xfffffffd, 0xfffafff9, 0x0002001b, 0x0046007d,
+ 0x00ad00ba, 0x00870000, 0xff26fe1a, 0xfd1bfc7e,
+ 0xfc99fda4, 0xffa5025c, 0x054507ad, 0x08dd0847,
+ 0x05b80172, 0xfc2ef6ff, 0xf313f170, 0xf2abf6bd,
+ 0xfcf6041f, 0x0abc0f61, 0x110d0000,
+ }, { // 3.6 MHz
+ 0xfffffffd, 0xfff8fff3, 0xfff50006, 0x002f006c,
+ 0x00b200e3, 0x00dc007e, 0xffb9fea0, 0xfd6bfc71,
+ 0xfc17fcb1, 0xfe65010b, 0x042d0713, 0x08ec0906,
+ 0x07020302, 0xfdaff823, 0xf3a7f16a, 0xf228f5f5,
+ 0xfc2a0384, 0x0a670f4a, 0x110d0000,
+ }, { // 3.7 MHz
+ 0x0000fffd, 0xfff7ffef, 0xffe9fff1, 0x0010004d,
+ 0x00a100f2, 0x011a00f0, 0x0053ff44, 0xfdedfca2,
+ 0xfbd3fbef, 0xfd39ffae, 0x02ea0638, 0x08b50987,
+ 0x08230483, 0xff39f960, 0xf45bf180, 0xf1b8f537,
+ 0xfb6102e7, 0x0a110f32, 0x110d0000,
+ }, { // 3.8 MHz
+ 0x0000fffe, 0xfff9ffee, 0xffe1ffdd, 0xfff00024,
+ 0x007c00e5, 0x013a014a, 0x00e6fff8, 0xfe98fd0f,
+ 0xfbd3fb67, 0xfc32fe54, 0x01880525, 0x083909c7,
+ 0x091505ee, 0x00c7fab3, 0xf52df1b4, 0xf15df484,
+ 0xfa9b0249, 0x09ba0f19, 0x110d0000,
+ }, { // 3.9 MHz
+ 0x00000000, 0xfffbfff0, 0xffdeffcf, 0xffd1fff6,
+ 0x004800be, 0x01390184, 0x016300ac, 0xff5efdb1,
+ 0xfc17fb23, 0xfb5cfd0d, 0x001703e4, 0x077b09c4,
+ 0x09d2073c, 0x0251fc18, 0xf61cf203, 0xf118f3dc,
+ 0xf9d801aa, 0x09600eff, 0x110d0000,
+ }, { // 4.0 MHz
+ 0x00000001, 0xfffefff4, 0xffe1ffc8, 0xffbaffca,
+ 0x000b0082, 0x01170198, 0x01c10152, 0x0030fe7b,
+ 0xfc99fb24, 0xfac3fbe9, 0xfea5027f, 0x0683097f,
+ 0x0a560867, 0x03d2fd89, 0xf723f26f, 0xf0e8f341,
+ 0xf919010a, 0x09060ee5, 0x110d0000,
+ }, { // 4.1 MHz
+ 0x00010002, 0x0002fffb, 0xffe8ffca, 0xffacffa4,
+ 0xffcd0036, 0x00d70184, 0x01f601dc, 0x00ffff60,
+ 0xfd51fb6d, 0xfa6efaf5, 0xfd410103, 0x055708f9,
+ 0x0a9e0969, 0x0543ff02, 0xf842f2f5, 0xf0cef2b2,
+ 0xf85e006b, 0x08aa0ecb, 0x110d0000,
+ }, { // 4.2 MHz
+ 0x00010003, 0x00050003, 0xfff3ffd3, 0xffaaff8b,
+ 0xff95ffe5, 0x0080014a, 0x01fe023f, 0x01ba0050,
+ 0xfe35fbf8, 0xfa62fa3b, 0xfbf9ff7e, 0x04010836,
+ 0x0aa90a3d, 0x069f007f, 0xf975f395, 0xf0cbf231,
+ 0xf7a9ffcb, 0x084c0eaf, 0x110d0000,
+ }, { // 4.3 MHz
+ 0x00010003, 0x0008000a, 0x0000ffe4, 0xffb4ff81,
+ 0xff6aff96, 0x001c00f0, 0x01d70271, 0x0254013b,
+ 0xff36fcbd, 0xfa9ff9c5, 0xfadbfdfe, 0x028c073b,
+ 0x0a750adf, 0x07e101fa, 0xfab8f44e, 0xf0ddf1be,
+ 0xf6f9ff2b, 0x07ed0e94, 0x110d0000,
+ }, { // 4.4 MHz
+ 0x00000003, 0x0009000f, 0x000efff8, 0xffc9ff87,
+ 0xff52ff54, 0xffb5007e, 0x01860270, 0x02c00210,
+ 0x0044fdb2, 0xfb22f997, 0xf9f2fc90, 0x0102060f,
+ 0x0a050b4c, 0x0902036e, 0xfc0af51e, 0xf106f15a,
+ 0xf64efe8b, 0x078d0e77, 0x110d0000,
+ }, { // 4.5 MHz
+ 0x00000002, 0x00080012, 0x0019000e, 0xffe5ff9e,
+ 0xff4fff25, 0xff560000, 0x0112023b, 0x02f702c0,
+ 0x014dfec8, 0xfbe5f9b3, 0xf947fb41, 0xff7004b9,
+ 0x095a0b81, 0x0a0004d8, 0xfd65f603, 0xf144f104,
+ 0xf5aafdec, 0x072b0e5a, 0x110d0000,
+ }, { // 4.6 MHz
+ 0x00000001, 0x00060012, 0x00200022, 0x0005ffc1,
+ 0xff61ff10, 0xff09ff82, 0x008601d7, 0x02f50340,
+ 0x0241fff0, 0xfcddfa19, 0xf8e2fa1e, 0xfde30343,
+ 0x08790b7f, 0x0ad50631, 0xfec7f6fc, 0xf198f0bd,
+ 0xf50dfd4e, 0x06c90e3d, 0x110d0000,
+ }, { // 4.7 MHz
+ 0x0000ffff, 0x0003000f, 0x00220030, 0x0025ffed,
+ 0xff87ff15, 0xfed6ff10, 0xffed014c, 0x02b90386,
+ 0x03110119, 0xfdfefac4, 0xf8c6f92f, 0xfc6701b7,
+ 0x07670b44, 0x0b7e0776, 0x002df807, 0xf200f086,
+ 0xf477fcb1, 0x06650e1e, 0x110d0000,
+ }, { // 4.8 MHz
+ 0xfffffffe, 0xffff0009, 0x001e0038, 0x003f001b,
+ 0xffbcff36, 0xfec2feb6, 0xff5600a5, 0x0248038d,
+ 0x03b00232, 0xff39fbab, 0xf8f4f87f, 0xfb060020,
+ 0x062a0ad2, 0x0bf908a3, 0x0192f922, 0xf27df05e,
+ 0xf3e8fc14, 0x06000e00, 0x110d0000,
+ }, { // 4.9 MHz
+ 0xfffffffd, 0xfffc0002, 0x00160037, 0x00510046,
+ 0xfff9ff6d, 0xfed0fe7c, 0xfecefff0, 0x01aa0356,
+ 0x0413032b, 0x007ffcc5, 0xf96cf812, 0xf9cefe87,
+ 0x04c90a2c, 0x0c4309b4, 0x02f3fa4a, 0xf30ef046,
+ 0xf361fb7a, 0x059b0de0, 0x110d0000,
+ }, { // 5.0 MHz
+ 0xfffffffd, 0xfff9fffa, 0x000a002d, 0x00570067,
+ 0x0037ffb5, 0xfefffe68, 0xfe62ff3d, 0x00ec02e3,
+ 0x043503f6, 0x01befe05, 0xfa27f7ee, 0xf8c6fcf8,
+ 0x034c0954, 0x0c5c0aa4, 0x044cfb7e, 0xf3b1f03f,
+ 0xf2e2fae1, 0x05340dc0, 0x110d0000,
+ }, { // 5.1 MHz
+ 0x0000fffd, 0xfff8fff4, 0xfffd001e, 0x0051007b,
+ 0x006e0006, 0xff48fe7c, 0xfe1bfe9a, 0x001d023e,
+ 0x04130488, 0x02e6ff5b, 0xfb1ef812, 0xf7f7fb7f,
+ 0x01bc084e, 0x0c430b72, 0x059afcba, 0xf467f046,
+ 0xf26cfa4a, 0x04cd0da0, 0x110d0000,
+ }, { // 5.2 MHz
+ 0x0000fffe, 0xfff8ffef, 0xfff00009, 0x003f007f,
+ 0x00980056, 0xffa5feb6, 0xfe00fe15, 0xff4b0170,
+ 0x03b004d7, 0x03e800b9, 0xfc48f87f, 0xf768fa23,
+ 0x0022071f, 0x0bf90c1b, 0x06dafdfd, 0xf52df05e,
+ 0xf1fef9b5, 0x04640d7f, 0x110d0000,
+ }, { // 5.3 MHz
+ 0x0000ffff, 0xfff9ffee, 0xffe6fff3, 0x00250072,
+ 0x00af009c, 0x000cff10, 0xfe13fdb8, 0xfe870089,
+ 0x031104e1, 0x04b8020f, 0xfd98f92f, 0xf71df8f0,
+ 0xfe8805ce, 0x0b7e0c9c, 0x0808ff44, 0xf603f086,
+ 0xf19af922, 0x03fb0d5e, 0x110d0000,
+ }, { // 5.4 MHz
+ 0x00000001, 0xfffcffef, 0xffe0ffe0, 0x00050056,
+ 0x00b000d1, 0x0071ff82, 0xfe53fd8c, 0xfddfff99,
+ 0x024104a3, 0x054a034d, 0xff01fa1e, 0xf717f7ed,
+ 0xfcf50461, 0x0ad50cf4, 0x0921008d, 0xf6e7f0bd,
+ 0xf13ff891, 0x03920d3b, 0x110d0000,
+ }, { // 5.5 MHz
+ 0x00010002, 0xfffffff3, 0xffdeffd1, 0xffe5002f,
+ 0x009c00ed, 0x00cb0000, 0xfebafd94, 0xfd61feb0,
+ 0x014d0422, 0x05970464, 0x0074fb41, 0xf759f721,
+ 0xfb7502de, 0x0a000d21, 0x0a2201d4, 0xf7d9f104,
+ 0xf0edf804, 0x03280d19, 0x110d0000,
+ }, { // 5.6 MHz
+ 0x00010003, 0x0003fffa, 0xffe3ffc9, 0xffc90002,
+ 0x007500ef, 0x010e007e, 0xff3dfdcf, 0xfd16fddd,
+ 0x00440365, 0x059b0548, 0x01e3fc90, 0xf7dff691,
+ 0xfa0f014d, 0x09020d23, 0x0b0a0318, 0xf8d7f15a,
+ 0xf0a5f779, 0x02bd0cf6, 0x110d0000,
+ }, { // 5.7 MHz
+ 0x00010003, 0x00060001, 0xffecffc9, 0xffb4ffd4,
+ 0x004000d5, 0x013600f0, 0xffd3fe39, 0xfd04fd31,
+ 0xff360277, 0x055605ef, 0x033efdfe, 0xf8a5f642,
+ 0xf8cbffb6, 0x07e10cfb, 0x0bd50456, 0xf9dff1be,
+ 0xf067f6f2, 0x02520cd2, 0x110d0000,
+ }, { // 5.8 MHz
+ 0x00000003, 0x00080009, 0xfff8ffd2, 0xffaaffac,
+ 0x000200a3, 0x013c014a, 0x006dfec9, 0xfd2bfcb7,
+ 0xfe350165, 0x04cb0651, 0x0477ff7e, 0xf9a5f635,
+ 0xf7b1fe20, 0x069f0ca8, 0x0c81058b, 0xfaf0f231,
+ 0xf033f66d, 0x01e60cae, 0x110d0000,
+ }, { // 5.9 MHz
+ 0x00000002, 0x0009000e, 0x0005ffe1, 0xffacff90,
+ 0xffc5005f, 0x01210184, 0x00fcff72, 0xfd8afc77,
+ 0xfd51003f, 0x04020669, 0x05830103, 0xfad7f66b,
+ 0xf6c8fc93, 0x05430c2b, 0x0d0d06b5, 0xfc08f2b2,
+ 0xf00af5ec, 0x017b0c89, 0x110d0000,
+ }, { // 6.0 MHz
+ 0x00000001, 0x00070012, 0x0012fff5, 0xffbaff82,
+ 0xff8e000f, 0x00e80198, 0x01750028, 0xfe18fc75,
+ 0xfc99ff15, 0x03050636, 0x0656027f, 0xfc32f6e2,
+ 0xf614fb17, 0x03d20b87, 0x0d7707d2, 0xfd26f341,
+ 0xefeaf56f, 0x010f0c64, 0x110d0000,
+ }, { // 6.1 MHz
+ 0xffff0000, 0x00050012, 0x001c000b, 0xffd1ff84,
+ 0xff66ffbe, 0x00960184, 0x01cd00da, 0xfeccfcb2,
+ 0xfc17fdf9, 0x01e005bc, 0x06e703e4, 0xfdabf798,
+ 0xf599f9b3, 0x02510abd, 0x0dbf08df, 0xfe48f3dc,
+ 0xefd5f4f6, 0x00a20c3e, 0x110d0000,
+ }, { // 6.2 MHz
+ 0xfffffffe, 0x0002000f, 0x0021001f, 0xfff0ff97,
+ 0xff50ff74, 0x0034014a, 0x01fa0179, 0xff97fd2a,
+ 0xfbd3fcfa, 0x00a304fe, 0x07310525, 0xff37f886,
+ 0xf55cf86e, 0x00c709d0, 0x0de209db, 0xff6df484,
+ 0xefcbf481, 0x00360c18, 0x110d0000,
+ }, { // 6.3 MHz
+ 0xfffffffd, 0xfffe000a, 0x0021002f, 0x0010ffb8,
+ 0xff50ff3b, 0xffcc00f0, 0x01fa01fa, 0x0069fdd4,
+ 0xfbd3fc26, 0xff5d0407, 0x07310638, 0x00c9f9a8,
+ 0xf55cf74e, 0xff3908c3, 0x0de20ac3, 0x0093f537,
+ 0xefcbf410, 0xffca0bf2, 0x110d0000,
+ }, { // 6.4 MHz
+ 0xfffffffd, 0xfffb0003, 0x001c0037, 0x002fffe2,
+ 0xff66ff17, 0xff6a007e, 0x01cd0251, 0x0134fea5,
+ 0xfc17fb8b, 0xfe2002e0, 0x06e70713, 0x0255faf5,
+ 0xf599f658, 0xfdaf0799, 0x0dbf0b96, 0x01b8f5f5,
+ 0xefd5f3a3, 0xff5e0bca, 0x110d0000,
+ }, { // 6.5 MHz
+ 0x0000fffd, 0xfff9fffb, 0x00120037, 0x00460010,
+ 0xff8eff0f, 0xff180000, 0x01750276, 0x01e8ff8d,
+ 0xfc99fb31, 0xfcfb0198, 0x065607ad, 0x03cefc64,
+ 0xf614f592, 0xfc2e0656, 0x0d770c52, 0x02daf6bd,
+ 0xefeaf33b, 0xfef10ba3, 0x110d0000,
+ }, { // 6.6 MHz
+ 0x0000fffe, 0xfff7fff5, 0x0005002f, 0x0054003c,
+ 0xffc5ff22, 0xfedfff82, 0x00fc0267, 0x0276007e,
+ 0xfd51fb1c, 0xfbfe003e, 0x05830802, 0x0529fdec,
+ 0xf6c8f4fe, 0xfabd04ff, 0x0d0d0cf6, 0x03f8f78f,
+ 0xf00af2d7, 0xfe850b7b, 0x110d0000,
+ }, { // 6.7 MHz
+ 0x0000ffff, 0xfff8fff0, 0xfff80020, 0x00560060,
+ 0x0002ff4e, 0xfec4ff10, 0x006d0225, 0x02d50166,
+ 0xfe35fb4e, 0xfb35fee1, 0x0477080e, 0x065bff82,
+ 0xf7b1f4a0, 0xf9610397, 0x0c810d80, 0x0510f869,
+ 0xf033f278, 0xfe1a0b52, 0x110d0000,
+ }, { // 6.8 MHz
+ 0x00010000, 0xfffaffee, 0xffec000c, 0x004c0078,
+ 0x0040ff8e, 0xfecafeb6, 0xffd301b6, 0x02fc0235,
+ 0xff36fbc5, 0xfaaafd90, 0x033e07d2, 0x075b011b,
+ 0xf8cbf47a, 0xf81f0224, 0x0bd50def, 0x0621f94b,
+ 0xf067f21e, 0xfdae0b29, 0x110d0000,
+ }, { // 6.9 MHz
+ 0x00010001, 0xfffdffef, 0xffe3fff6, 0x0037007f,
+ 0x0075ffdc, 0xfef2fe7c, 0xff3d0122, 0x02ea02dd,
+ 0x0044fc79, 0xfa65fc5d, 0x01e3074e, 0x082102ad,
+ 0xfa0ff48c, 0xf6fe00a9, 0x0b0a0e43, 0x0729fa33,
+ 0xf0a5f1c9, 0xfd430b00, 0x110d0000,
+ }, { // 7.0 MHz
+ 0x00010002, 0x0001fff3, 0xffdeffe2, 0x001b0076,
+ 0x009c002d, 0xff35fe68, 0xfeba0076, 0x029f0352,
+ 0x014dfd60, 0xfa69fb53, 0x00740688, 0x08a7042d,
+ 0xfb75f4d6, 0xf600ff2d, 0x0a220e7a, 0x0827fb22,
+ 0xf0edf17a, 0xfcd80ad6, 0x110d0000,
+ }, { // 7.1 MHz
+ 0x00000003, 0x0004fff9, 0xffe0ffd2, 0xfffb005e,
+ 0x00b0007a, 0xff8ffe7c, 0xfe53ffc1, 0x0221038c,
+ 0x0241fe6e, 0xfab6fa80, 0xff010587, 0x08e90590,
+ 0xfcf5f556, 0xf52bfdb3, 0x09210e95, 0x0919fc15,
+ 0xf13ff12f, 0xfc6e0aab, 0x110d0000,
+ }, { // 7.2 MHz
+ 0x00000003, 0x00070000, 0xffe6ffc9, 0xffdb0039,
+ 0x00af00b8, 0xfff4feb6, 0xfe13ff10, 0x01790388,
+ 0x0311ff92, 0xfb48f9ed, 0xfd980453, 0x08e306cd,
+ 0xfe88f60a, 0xf482fc40, 0x08080e93, 0x09fdfd0c,
+ 0xf19af0ea, 0xfc050a81, 0x110d0000,
+ }, { // 7.3 MHz
+ 0x00000002, 0x00080008, 0xfff0ffc9, 0xffc1000d,
+ 0x009800e2, 0x005bff10, 0xfe00fe74, 0x00b50345,
+ 0x03b000bc, 0xfc18f9a1, 0xfc4802f9, 0x089807dc,
+ 0x0022f6f0, 0xf407fada, 0x06da0e74, 0x0ad3fe06,
+ 0xf1fef0ab, 0xfb9c0a55, 0x110d0000,
+ }, { // 7.4 MHz
+ 0x00000001, 0x0008000e, 0xfffdffd0, 0xffafffdf,
+ 0x006e00f2, 0x00b8ff82, 0xfe1bfdf8, 0xffe302c8,
+ 0x041301dc, 0xfd1af99e, 0xfb1e0183, 0x080908b5,
+ 0x01bcf801, 0xf3bdf985, 0x059a0e38, 0x0b99ff03,
+ 0xf26cf071, 0xfb330a2a, 0x110d0000,
+ }, { // 7.5 MHz
+ 0xffff0000, 0x00070011, 0x000affdf, 0xffa9ffb5,
+ 0x003700e6, 0x01010000, 0xfe62fda8, 0xff140219,
+ 0x043502e1, 0xfe42f9e6, 0xfa270000, 0x073a0953,
+ 0x034cf939, 0xf3a4f845, 0x044c0de1, 0x0c4f0000,
+ 0xf2e2f03c, 0xfacc09fe, 0x110d0000,
+ }, { // 7.6 MHz
+ 0xffffffff, 0x00040012, 0x0016fff3, 0xffafff95,
+ 0xfff900c0, 0x0130007e, 0xfecefd89, 0xfe560146,
+ 0x041303bc, 0xff81fa76, 0xf96cfe7d, 0x063209b1,
+ 0x04c9fa93, 0xf3bdf71e, 0x02f30d6e, 0x0cf200fd,
+ 0xf361f00e, 0xfa6509d1, 0x110d0000,
+ }, { // 7.7 MHz
+ 0xfffffffe, 0x00010010, 0x001e0008, 0xffc1ff84,
+ 0xffbc0084, 0x013e00f0, 0xff56fd9f, 0xfdb8005c,
+ 0x03b00460, 0x00c7fb45, 0xf8f4fd07, 0x04fa09ce,
+ 0x062afc07, 0xf407f614, 0x01920ce0, 0x0d8301fa,
+ 0xf3e8efe5, 0xfa0009a4, 0x110d0000,
+ }, { // 7.8 MHz
+ 0x0000fffd, 0xfffd000b, 0x0022001d, 0xffdbff82,
+ 0xff870039, 0x012a014a, 0xffedfde7, 0xfd47ff6b,
+ 0x031104c6, 0x0202fc4c, 0xf8c6fbad, 0x039909a7,
+ 0x0767fd8e, 0xf482f52b, 0x002d0c39, 0x0e0002f4,
+ 0xf477efc2, 0xf99b0977, 0x110d0000,
+ }, { // 7.9 MHz
+ 0x0000fffd, 0xfffa0004, 0x0020002d, 0xfffbff91,
+ 0xff61ffe8, 0x00f70184, 0x0086fe5c, 0xfd0bfe85,
+ 0x024104e5, 0x0323fd7d, 0xf8e2fa79, 0x021d093f,
+ 0x0879ff22, 0xf52bf465, 0xfec70b79, 0x0e6803eb,
+ 0xf50defa5, 0xf937094a, 0x110d0000,
+ }, { // 8.0 MHz
+ 0x0000fffe, 0xfff8fffd, 0x00190036, 0x001bffaf,
+ 0xff4fff99, 0x00aa0198, 0x0112fef3, 0xfd09fdb9,
+ 0x014d04be, 0x041bfecc, 0xf947f978, 0x00900897,
+ 0x095a00b9, 0xf600f3c5, 0xfd650aa3, 0x0ebc04de,
+ 0xf5aaef8e, 0xf8d5091c, 0x110d0000,
+ }, { // 8.1 MHz
+ 0x0000ffff, 0xfff7fff6, 0x000e0038, 0x0037ffd7,
+ 0xff52ff56, 0x004b0184, 0x0186ffa1, 0xfd40fd16,
+ 0x00440452, 0x04de0029, 0xf9f2f8b2, 0xfefe07b5,
+ 0x0a05024d, 0xf6fef34d, 0xfc0a09b8, 0x0efa05cd,
+ 0xf64eef7d, 0xf87308ed, 0x110d0000,
+ }, { // 8.2 MHz
+ 0x00010000, 0xfff8fff0, 0x00000031, 0x004c0005,
+ 0xff6aff27, 0xffe4014a, 0x01d70057, 0xfdacfca6,
+ 0xff3603a7, 0x05610184, 0xfadbf82e, 0xfd74069f,
+ 0x0a7503d6, 0xf81ff2ff, 0xfab808b9, 0x0f2306b5,
+ 0xf6f9ef72, 0xf81308bf, 0x110d0000,
+ }, { // 8.3 MHz
+ 0x00010001, 0xfffbffee, 0xfff30022, 0x00560032,
+ 0xff95ff10, 0xff8000f0, 0x01fe0106, 0xfe46fc71,
+ 0xfe3502c7, 0x059e02ce, 0xfbf9f7f2, 0xfbff055b,
+ 0x0aa9054c, 0xf961f2db, 0xf97507aa, 0x0f350797,
+ 0xf7a9ef6d, 0xf7b40890, 0x110d0000,
+ }, { // 8.4 MHz
+ 0x00010002, 0xfffeffee, 0xffe8000f, 0x00540058,
+ 0xffcdff14, 0xff29007e, 0x01f6019e, 0xff01fc7c,
+ 0xfd5101bf, 0x059203f6, 0xfd41f7fe, 0xfaa903f3,
+ 0x0a9e06a9, 0xfabdf2e2, 0xf842068b, 0x0f320871,
+ 0xf85eef6e, 0xf7560860, 0x110d0000,
+ }, { // 8.5 MHz
+ 0x00000003, 0x0002fff2, 0xffe1fff9, 0x00460073,
+ 0x000bff34, 0xfee90000, 0x01c10215, 0xffd0fcc5,
+ 0xfc99009d, 0x053d04f1, 0xfea5f853, 0xf97d0270,
+ 0x0a5607e4, 0xfc2ef314, 0xf723055f, 0x0f180943,
+ 0xf919ef75, 0xf6fa0830, 0x110d0000,
+ }, { // 8.6 MHz
+ 0x00000003, 0x0005fff8, 0xffdeffe4, 0x002f007f,
+ 0x0048ff6b, 0xfec7ff82, 0x0163025f, 0x00a2fd47,
+ 0xfc17ff73, 0x04a405b2, 0x0017f8ed, 0xf88500dc,
+ 0x09d208f9, 0xfdaff370, 0xf61c0429, 0x0ee80a0b,
+ 0xf9d8ef82, 0xf6a00800, 0x110d0000,
+ }, { // 8.7 MHz
+ 0x00000003, 0x0007ffff, 0xffe1ffd4, 0x0010007a,
+ 0x007cffb2, 0xfec6ff10, 0x00e60277, 0x0168fdf9,
+ 0xfbd3fe50, 0x03ce0631, 0x0188f9c8, 0xf7c7ff43,
+ 0x091509e3, 0xff39f3f6, 0xf52d02ea, 0x0ea30ac9,
+ 0xfa9bef95, 0xf64607d0, 0x110d0000,
+ }, { // 8.8 MHz
+ 0x00000002, 0x00090007, 0xffe9ffca, 0xfff00065,
+ 0x00a10003, 0xfee6feb6, 0x0053025b, 0x0213fed0,
+ 0xfbd3fd46, 0x02c70668, 0x02eafadb, 0xf74bfdae,
+ 0x08230a9c, 0x00c7f4a3, 0xf45b01a6, 0x0e480b7c,
+ 0xfb61efae, 0xf5ef079f, 0x110d0000,
+ }, { // 8.9 MHz
+ 0xffff0000, 0x0008000d, 0xfff5ffc8, 0xffd10043,
+ 0x00b20053, 0xff24fe7c, 0xffb9020c, 0x0295ffbb,
+ 0xfc17fc64, 0x019b0654, 0x042dfc1c, 0xf714fc2a,
+ 0x07020b21, 0x0251f575, 0xf3a7005e, 0x0dd80c24,
+ 0xfc2aefcd, 0xf599076e, 0x110d0000,
+ }, { // 9.0 MHz
+ 0xffffffff, 0x00060011, 0x0002ffcf, 0xffba0018,
+ 0x00ad009a, 0xff79fe68, 0xff260192, 0x02e500ab,
+ 0xfc99fbb6, 0x005b05f7, 0x0545fd81, 0xf723fabf,
+ 0x05b80b70, 0x03d2f669, 0xf313ff15, 0x0d550cbf,
+ 0xfcf6eff2, 0xf544073d, 0x110d0000,
+ }, { // 9.1 MHz
+ 0xfffffffe, 0x00030012, 0x000fffdd, 0xffacffea,
+ 0x009300cf, 0xffdcfe7c, 0xfea600f7, 0x02fd0190,
+ 0xfd51fb46, 0xff150554, 0x0627fefd, 0xf778f978,
+ 0x044d0b87, 0x0543f77d, 0xf2a0fdcf, 0x0cbe0d4e,
+ 0xfdc4f01d, 0xf4f2070b, 0x110d0000,
+ }, { // 9.2 MHz
+ 0x0000fffd, 0x00000010, 0x001afff0, 0xffaaffbf,
+ 0x006700ed, 0x0043feb6, 0xfe460047, 0x02db0258,
+ 0xfe35fb1b, 0xfddc0473, 0x06c90082, 0xf811f85e,
+ 0x02c90b66, 0x069ff8ad, 0xf250fc8d, 0x0c140dcf,
+ 0xfe93f04d, 0xf4a106d9, 0x110d0000,
+ }, { // 9.3 MHz
+ 0x0000fffd, 0xfffc000c, 0x00200006, 0xffb4ff9c,
+ 0x002f00ef, 0x00a4ff10, 0xfe0dff92, 0x028102f7,
+ 0xff36fb37, 0xfcbf035e, 0x07260202, 0xf8e8f778,
+ 0x01340b0d, 0x07e1f9f4, 0xf223fb51, 0x0b590e42,
+ 0xff64f083, 0xf45206a7, 0x110d0000,
+ }, { // 9.4 MHz
+ 0x0000fffd, 0xfff90005, 0x0022001a, 0xffc9ff86,
+ 0xfff000d7, 0x00f2ff82, 0xfe01fee5, 0x01f60362,
+ 0x0044fb99, 0xfbcc0222, 0x07380370, 0xf9f7f6cc,
+ 0xff990a7e, 0x0902fb50, 0xf21afa1f, 0x0a8d0ea6,
+ 0x0034f0bf, 0xf4050675, 0x110d0000,
+ }, { // 9.5 MHz
+ 0x0000fffe, 0xfff8fffe, 0x001e002b, 0xffe5ff81,
+ 0xffb400a5, 0x01280000, 0xfe24fe50, 0x01460390,
+ 0x014dfc3a, 0xfb1000ce, 0x070104bf, 0xfb37f65f,
+ 0xfe0009bc, 0x0a00fcbb, 0xf235f8f8, 0x09b20efc,
+ 0x0105f101, 0xf3ba0642, 0x110d0000,
+ }, { // 9.6 MHz
+ 0x0001ffff, 0xfff8fff7, 0x00150036, 0x0005ff8c,
+ 0xff810061, 0x013d007e, 0xfe71fddf, 0x007c0380,
+ 0x0241fd13, 0xfa94ff70, 0x068005e2, 0xfc9bf633,
+ 0xfc7308ca, 0x0ad5fe30, 0xf274f7e0, 0x08c90f43,
+ 0x01d4f147, 0xf371060f, 0x110d0000,
+ }, { // 9.7 MHz
+ 0x00010001, 0xfff9fff1, 0x00090038, 0x0025ffa7,
+ 0xff5e0012, 0x013200f0, 0xfee3fd9b, 0xffaa0331,
+ 0x0311fe15, 0xfa60fe18, 0x05bd06d1, 0xfe1bf64a,
+ 0xfafa07ae, 0x0b7effab, 0xf2d5f6d7, 0x07d30f7a,
+ 0x02a3f194, 0xf32905dc, 0x110d0000,
+ }, { // 9.8 MHz
+ 0x00010002, 0xfffcffee, 0xfffb0032, 0x003fffcd,
+ 0xff4effc1, 0x0106014a, 0xff6efd8a, 0xfedd02aa,
+ 0x03b0ff34, 0xfa74fcd7, 0x04bf0781, 0xffaaf6a3,
+ 0xf99e066b, 0x0bf90128, 0xf359f5e1, 0x06d20fa2,
+ 0x0370f1e5, 0xf2e405a8, 0x110d0000,
+ }, { // 9.9 MHz
+ 0x00000003, 0xffffffee, 0xffef0024, 0x0051fffa,
+ 0xff54ff77, 0x00be0184, 0x0006fdad, 0xfe2701f3,
+ 0x0413005e, 0xfad1fbba, 0x039007ee, 0x013bf73d,
+ 0xf868050a, 0x0c4302a1, 0xf3fdf4fe, 0x05c70fba,
+ 0x043bf23c, 0xf2a10575, 0x110d0000,
+ }, { // 10.0 MHz
+ 0x00000003, 0x0003fff1, 0xffe50011, 0x00570027,
+ 0xff70ff3c, 0x00620198, 0x009efe01, 0xfd95011a,
+ 0x04350183, 0xfb71fad0, 0x023c0812, 0x02c3f811,
+ 0xf75e0390, 0x0c5c0411, 0xf4c1f432, 0x04b30fc1,
+ 0x0503f297, 0xf2610541, 0x110d0000,
+ }, { // 10.1 MHz
+ 0x00000003, 0x0006fff7, 0xffdffffc, 0x00510050,
+ 0xff9dff18, 0xfffc0184, 0x0128fe80, 0xfd32002e,
+ 0x04130292, 0xfc4dfa21, 0x00d107ee, 0x0435f91c,
+ 0xf6850205, 0x0c430573, 0xf5a1f37d, 0x03990fba,
+ 0x05c7f2f8, 0xf222050d, 0x110d0000,
+ }, { // 10.2 MHz
+ 0x00000002, 0x0008fffe, 0xffdfffe7, 0x003f006e,
+ 0xffd6ff0f, 0xff96014a, 0x0197ff1f, 0xfd05ff3e,
+ 0x03b0037c, 0xfd59f9b7, 0xff5d0781, 0x0585fa56,
+ 0xf5e4006f, 0x0bf906c4, 0xf69df2e0, 0x02790fa2,
+ 0x0688f35d, 0xf1e604d8, 0x110d0000,
+ }, { // 10.3 MHz
+ 0xffff0001, 0x00090005, 0xffe4ffd6, 0x0025007e,
+ 0x0014ff20, 0xff3c00f0, 0x01e1ffd0, 0xfd12fe5c,
+ 0x03110433, 0xfe88f996, 0xfdf106d1, 0x06aafbb7,
+ 0xf57efed8, 0x0b7e07ff, 0xf7b0f25e, 0x01560f7a,
+ 0x0745f3c7, 0xf1ac04a4, 0x110d0000,
+ }, { // 10.4 MHz
+ 0xffffffff, 0x0008000c, 0xffedffcb, 0x0005007d,
+ 0x0050ff4c, 0xfef6007e, 0x01ff0086, 0xfd58fd97,
+ 0x024104ad, 0xffcaf9c0, 0xfc9905e2, 0x079afd35,
+ 0xf555fd46, 0x0ad50920, 0xf8d9f1f6, 0x00310f43,
+ 0x07fdf435, 0xf174046f, 0x110d0000,
+ }, { // 10.5 MHz
+ 0xfffffffe, 0x00050011, 0xfffaffc8, 0xffe5006b,
+ 0x0082ff8c, 0xfecc0000, 0x01f00130, 0xfdd2fcfc,
+ 0x014d04e3, 0x010efa32, 0xfb6404bf, 0x084efec5,
+ 0xf569fbc2, 0x0a000a23, 0xfa15f1ab, 0xff0b0efc,
+ 0x08b0f4a7, 0xf13f043a, 0x110d0000,
+ }, { // 10.6 MHz
+ 0x0000fffd, 0x00020012, 0x0007ffcd, 0xffc9004c,
+ 0x00a4ffd9, 0xfec3ff82, 0x01b401c1, 0xfe76fc97,
+ 0x004404d2, 0x0245fae8, 0xfa5f0370, 0x08c1005f,
+ 0xf5bcfa52, 0x09020b04, 0xfb60f17b, 0xfde70ea6,
+ 0x095df51e, 0xf10c0405, 0x110d0000,
+ }, { // 10.7 MHz
+ 0x0000fffd, 0xffff0011, 0x0014ffdb, 0xffb40023,
+ 0x00b2002a, 0xfedbff10, 0x0150022d, 0xff38fc6f,
+ 0xff36047b, 0x035efbda, 0xf9940202, 0x08ee01f5,
+ 0xf649f8fe, 0x07e10bc2, 0xfcb6f169, 0xfcc60e42,
+ 0x0a04f599, 0xf0db03d0, 0x110d0000,
+ }, { // 10.8 MHz
+ 0x0000fffd, 0xfffb000d, 0x001dffed, 0xffaafff5,
+ 0x00aa0077, 0xff13feb6, 0x00ce026b, 0x000afc85,
+ 0xfe3503e3, 0x044cfcfb, 0xf90c0082, 0x08d5037f,
+ 0xf710f7cc, 0x069f0c59, 0xfe16f173, 0xfbaa0dcf,
+ 0x0aa5f617, 0xf0ad039b, 0x110d0000,
+ }, { // 10.9 MHz
+ 0x0000fffe, 0xfff90006, 0x00210003, 0xffacffc8,
+ 0x008e00b6, 0xff63fe7c, 0x003a0275, 0x00dafcda,
+ 0xfd510313, 0x0501fe40, 0xf8cbfefd, 0x087604f0,
+ 0xf80af6c2, 0x05430cc8, 0xff7af19a, 0xfa940d4e,
+ 0x0b3ff699, 0xf0810365, 0x110d0000,
+ }, { // 11.0 MHz
+ 0x0001ffff, 0xfff8ffff, 0x00210018, 0xffbaffa3,
+ 0x006000e1, 0xffc4fe68, 0xffa0024b, 0x019afd66,
+ 0xfc990216, 0x0575ff99, 0xf8d4fd81, 0x07d40640,
+ 0xf932f5e6, 0x03d20d0d, 0x00dff1de, 0xf9860cbf,
+ 0x0bd1f71e, 0xf058032f, 0x110d0000,
+ }, { // 11.1 MHz
+ 0x00010000, 0xfff8fff8, 0x001b0029, 0xffd1ff8a,
+ 0x002600f2, 0x002cfe7c, 0xff0f01f0, 0x023bfe20,
+ 0xfc1700fa, 0x05a200f7, 0xf927fc1c, 0x06f40765,
+ 0xfa82f53b, 0x02510d27, 0x0243f23d, 0xf8810c24,
+ 0x0c5cf7a7, 0xf03102fa, 0x110d0000,
+ }, { // 11.2 MHz
+ 0x00010002, 0xfffafff2, 0x00110035, 0xfff0ff81,
+ 0xffe700e7, 0x008ffeb6, 0xfe94016d, 0x02b0fefb,
+ 0xfbd3ffd1, 0x05850249, 0xf9c1fadb, 0x05de0858,
+ 0xfbf2f4c4, 0x00c70d17, 0x03a0f2b8, 0xf7870b7c,
+ 0x0cdff833, 0xf00d02c4, 0x110d0000,
+ }, { // 11.3 MHz
+ 0x00000003, 0xfffdffee, 0x00040038, 0x0010ff88,
+ 0xffac00c2, 0x00e2ff10, 0xfe3900cb, 0x02f1ffe9,
+ 0xfbd3feaa, 0x05210381, 0xfa9cf9c8, 0x04990912,
+ 0xfd7af484, 0xff390cdb, 0x04f4f34d, 0xf69a0ac9,
+ 0x0d5af8c1, 0xefec028e, 0x110d0000,
+ }, { // 11.4 MHz
+ 0x00000003, 0x0000ffee, 0xfff60033, 0x002fff9f,
+ 0xff7b0087, 0x011eff82, 0xfe080018, 0x02f900d8,
+ 0xfc17fd96, 0x04790490, 0xfbadf8ed, 0x032f098e,
+ 0xff10f47d, 0xfdaf0c75, 0x063cf3fc, 0xf5ba0a0b,
+ 0x0dccf952, 0xefcd0258, 0x110d0000,
+ }, { // 11.5 MHz
+ 0x00000003, 0x0004fff1, 0xffea0026, 0x0046ffc3,
+ 0xff5a003c, 0x013b0000, 0xfe04ff63, 0x02c801b8,
+ 0xfc99fca6, 0x0397056a, 0xfcecf853, 0x01ad09c9,
+ 0x00acf4ad, 0xfc2e0be7, 0x0773f4c2, 0xf4e90943,
+ 0x0e35f9e6, 0xefb10221, 0x110d0000,
+ }, { // 11.6 MHz
+ 0x00000002, 0x0007fff6, 0xffe20014, 0x0054ffee,
+ 0xff4effeb, 0x0137007e, 0xfe2efebb, 0x0260027a,
+ 0xfd51fbe6, 0x02870605, 0xfe4af7fe, 0x001d09c1,
+ 0x0243f515, 0xfabd0b32, 0x0897f59e, 0xf4280871,
+ 0x0e95fa7c, 0xef9701eb, 0x110d0000,
+ }, { // 11.7 MHz
+ 0xffff0001, 0x0008fffd, 0xffdeffff, 0x0056001d,
+ 0xff57ff9c, 0x011300f0, 0xfe82fe2e, 0x01ca0310,
+ 0xfe35fb62, 0x0155065a, 0xffbaf7f2, 0xfe8c0977,
+ 0x03cef5b2, 0xf9610a58, 0x09a5f68f, 0xf3790797,
+ 0x0eebfb14, 0xef8001b5, 0x110d0000,
+ }, { // 11.8 MHz
+ 0xffff0000, 0x00080004, 0xffe0ffe9, 0x004c0047,
+ 0xff75ff58, 0x00d1014a, 0xfef9fdc8, 0x0111036f,
+ 0xff36fb21, 0x00120665, 0x012df82e, 0xfd0708ec,
+ 0x0542f682, 0xf81f095c, 0x0a9af792, 0xf2db06b5,
+ 0x0f38fbad, 0xef6c017e, 0x110d0000,
+ }, { // 11.9 MHz
+ 0xffffffff, 0x0007000b, 0xffe7ffd8, 0x00370068,
+ 0xffa4ff28, 0x00790184, 0xff87fd91, 0x00430392,
+ 0x0044fb26, 0xfece0626, 0x0294f8b2, 0xfb990825,
+ 0x0698f77f, 0xf6fe0842, 0x0b73f8a7, 0xf25105cd,
+ 0x0f7bfc48, 0xef5a0148, 0x110d0000,
+ }, { // 12.0 MHz
+ 0x0000fffe, 0x00050010, 0xfff2ffcc, 0x001b007b,
+ 0xffdfff10, 0x00140198, 0x0020fd8e, 0xff710375,
+ 0x014dfb73, 0xfd9a059f, 0x03e0f978, 0xfa4e0726,
+ 0x07c8f8a7, 0xf600070c, 0x0c2ff9c9, 0xf1db04de,
+ 0x0fb4fce5, 0xef4b0111, 0x110d0000,
+ }, { // 12.1 MHz
+ 0x0000fffd, 0x00010012, 0xffffffc8, 0xfffb007e,
+ 0x001dff14, 0xffad0184, 0x00b7fdbe, 0xfea9031b,
+ 0x0241fc01, 0xfc8504d6, 0x0504fa79, 0xf93005f6,
+ 0x08caf9f2, 0xf52b05c0, 0x0ccbfaf9, 0xf17903eb,
+ 0x0fe3fd83, 0xef3f00db, 0x110d0000,
+ }, { // 12.2 MHz
+ 0x0000fffd, 0xfffe0011, 0x000cffcc, 0xffdb0071,
+ 0x0058ff32, 0xff4f014a, 0x013cfe1f, 0xfdfb028a,
+ 0x0311fcc9, 0xfb9d03d6, 0x05f4fbad, 0xf848049d,
+ 0x0999fb5b, 0xf4820461, 0x0d46fc32, 0xf12d02f4,
+ 0x1007fe21, 0xef3600a4, 0x110d0000,
+ }, { // 12.3 MHz
+ 0x0000fffe, 0xfffa000e, 0x0017ffd9, 0xffc10055,
+ 0x0088ff68, 0xff0400f0, 0x01a6fea7, 0xfd7501cc,
+ 0x03b0fdc0, 0xfaef02a8, 0x06a7fd07, 0xf79d0326,
+ 0x0a31fcda, 0xf40702f3, 0x0d9ffd72, 0xf0f601fa,
+ 0x1021fec0, 0xef2f006d, 0x110d0000,
+ }, { // 12.4 MHz
+ 0x0001ffff, 0xfff80007, 0x001fffeb, 0xffaf002d,
+ 0x00a8ffb0, 0xfed3007e, 0x01e9ff4c, 0xfd2000ee,
+ 0x0413fed8, 0xfa82015c, 0x0715fe7d, 0xf7340198,
+ 0x0a8dfe69, 0xf3bd017c, 0x0dd5feb8, 0xf0d500fd,
+ 0x1031ff60, 0xef2b0037, 0x110d0000,
+ }, { // 12.5 MHz
+ 0x00010000, 0xfff70000, 0x00220000, 0xffa90000,
+ 0x00b30000, 0xfec20000, 0x02000000, 0xfd030000,
+ 0x04350000, 0xfa5e0000, 0x073b0000, 0xf7110000,
+ 0x0aac0000, 0xf3a40000, 0x0de70000, 0xf0c90000,
+ 0x10360000, 0xef290000, 0x110d0000,
+ }, { // 12.6 MHz
+ 0x00010001, 0xfff8fff9, 0x001f0015, 0xffafffd3,
+ 0x00a80050, 0xfed3ff82, 0x01e900b4, 0xfd20ff12,
+ 0x04130128, 0xfa82fea4, 0x07150183, 0xf734fe68,
+ 0x0a8d0197, 0xf3bdfe84, 0x0dd50148, 0xf0d5ff03,
+ 0x103100a0, 0xef2bffc9, 0x110d0000,
+ }, { // 12.7 MHz
+ 0x00000002, 0xfffafff2, 0x00170027, 0xffc1ffab,
+ 0x00880098, 0xff04ff10, 0x01a60159, 0xfd75fe34,
+ 0x03b00240, 0xfaeffd58, 0x06a702f9, 0xf79dfcda,
+ 0x0a310326, 0xf407fd0d, 0x0d9f028e, 0xf0f6fe06,
+ 0x10210140, 0xef2fff93, 0x110d0000,
+ }, { // 12.8 MHz
+ 0x00000003, 0xfffeffef, 0x000c0034, 0xffdbff8f,
+ 0x005800ce, 0xff4ffeb6, 0x013c01e1, 0xfdfbfd76,
+ 0x03110337, 0xfb9dfc2a, 0x05f40453, 0xf848fb63,
+ 0x099904a5, 0xf482fb9f, 0x0d4603ce, 0xf12dfd0c,
+ 0x100701df, 0xef36ff5c, 0x110d0000,
+ }, { // 12.9 MHz
+ 0x00000003, 0x0001ffee, 0xffff0038, 0xfffbff82,
+ 0x001d00ec, 0xffadfe7c, 0x00b70242, 0xfea9fce5,
+ 0x024103ff, 0xfc85fb2a, 0x05040587, 0xf930fa0a,
+ 0x08ca060e, 0xf52bfa40, 0x0ccb0507, 0xf179fc15,
+ 0x0fe3027d, 0xef3fff25, 0x110d0000,
+ }, { // 13.0 MHz
+ 0x00000002, 0x0005fff0, 0xfff20034, 0x001bff85,
+ 0xffdf00f0, 0x0014fe68, 0x00200272, 0xff71fc8b,
+ 0x014d048d, 0xfd9afa61, 0x03e00688, 0xfa4ef8da,
+ 0x07c80759, 0xf600f8f4, 0x0c2f0637, 0xf1dbfb22,
+ 0x0fb4031b, 0xef4bfeef, 0x110d0000,
+ }, { // 13.1 MHz
+ 0xffff0001, 0x0007fff5, 0xffe70028, 0x0037ff98,
+ 0xffa400d8, 0x0079fe7c, 0xff87026f, 0x0043fc6e,
+ 0x004404da, 0xfecef9da, 0x0294074e, 0xfb99f7db,
+ 0x06980881, 0xf6fef7be, 0x0b730759, 0xf251fa33,
+ 0x0f7b03b8, 0xef5afeb8, 0x110d0000,
+ }, { // 13.2 MHz
+ 0xffff0000, 0x0008fffc, 0xffe00017, 0x004cffb9,
+ 0xff7500a8, 0x00d1feb6, 0xfef90238, 0x0111fc91,
+ 0xff3604df, 0x0012f99b, 0x012d07d2, 0xfd07f714,
+ 0x0542097e, 0xf81ff6a4, 0x0a9a086e, 0xf2dbf94b,
+ 0x0f380453, 0xef6cfe82, 0x110d0000,
+ }, { // 13.3 MHz
+ 0xffffffff, 0x00080003, 0xffde0001, 0x0056ffe3,
+ 0xff570064, 0x0113ff10, 0xfe8201d2, 0x01cafcf0,
+ 0xfe35049e, 0x0155f9a6, 0xffba080e, 0xfe8cf689,
+ 0x03ce0a4e, 0xf961f5a8, 0x09a50971, 0xf379f869,
+ 0x0eeb04ec, 0xef80fe4b, 0x110d0000,
+ }, { // 13.4 MHz
+ 0x0000fffe, 0x0007000a, 0xffe2ffec, 0x00540012,
+ 0xff4e0015, 0x0137ff82, 0xfe2e0145, 0x0260fd86,
+ 0xfd51041a, 0x0287f9fb, 0xfe4a0802, 0x001df63f,
+ 0x02430aeb, 0xfabdf4ce, 0x08970a62, 0xf428f78f,
+ 0x0e950584, 0xef97fe15, 0x110d0000,
+ }, { // 13.5 MHz
+ 0x0000fffd, 0x0004000f, 0xffeaffda, 0x0046003d,
+ 0xff5affc4, 0x013b0000, 0xfe04009d, 0x02c8fe48,
+ 0xfc99035a, 0x0397fa96, 0xfcec07ad, 0x01adf637,
+ 0x00ac0b53, 0xfc2ef419, 0x07730b3e, 0xf4e9f6bd,
+ 0x0e35061a, 0xefb1fddf, 0x110d0000,
+ }, { // 13.6 MHz
+ 0x0000fffd, 0x00000012, 0xfff6ffcd, 0x002f0061,
+ 0xff7bff79, 0x011e007e, 0xfe08ffe8, 0x02f9ff28,
+ 0xfc17026a, 0x0479fb70, 0xfbad0713, 0x032ff672,
+ 0xff100b83, 0xfdaff38b, 0x063c0c04, 0xf5baf5f5,
+ 0x0dcc06ae, 0xefcdfda8, 0x110d0000,
+ }, { // 13.7 MHz
+ 0x0000fffd, 0xfffd0012, 0x0004ffc8, 0x00100078,
+ 0xffacff3e, 0x00e200f0, 0xfe39ff35, 0x02f10017,
+ 0xfbd30156, 0x0521fc7f, 0xfa9c0638, 0x0499f6ee,
+ 0xfd7a0b7c, 0xff39f325, 0x04f40cb3, 0xf69af537,
+ 0x0d5a073f, 0xefecfd72, 0x110d0000,
+ }, { // 13.8 MHz
+ 0x0001fffe, 0xfffa000e, 0x0011ffcb, 0xfff0007f,
+ 0xffe7ff19, 0x008f014a, 0xfe94fe93, 0x02b00105,
+ 0xfbd3002f, 0x0585fdb7, 0xf9c10525, 0x05def7a8,
+ 0xfbf20b3c, 0x00c7f2e9, 0x03a00d48, 0xf787f484,
+ 0x0cdf07cd, 0xf00dfd3c, 0x110d0000,
+ }, { // 13.9 MHz
+ 0x00010000, 0xfff80008, 0x001bffd7, 0xffd10076,
+ 0x0026ff0e, 0x002c0184, 0xff0ffe10, 0x023b01e0,
+ 0xfc17ff06, 0x05a2ff09, 0xf92703e4, 0x06f4f89b,
+ 0xfa820ac5, 0x0251f2d9, 0x02430dc3, 0xf881f3dc,
+ 0x0c5c0859, 0xf031fd06, 0x110d0000,
+ }, { // 14.0 MHz
+ 0x00010001, 0xfff80001, 0x0021ffe8, 0xffba005d,
+ 0x0060ff1f, 0xffc40198, 0xffa0fdb5, 0x019a029a,
+ 0xfc99fdea, 0x05750067, 0xf8d4027f, 0x07d4f9c0,
+ 0xf9320a1a, 0x03d2f2f3, 0x00df0e22, 0xf986f341,
+ 0x0bd108e2, 0xf058fcd1, 0x110d0000,
+ }, { // 14.1 MHz
+ 0x00000002, 0xfff9fffa, 0x0021fffd, 0xffac0038,
+ 0x008eff4a, 0xff630184, 0x003afd8b, 0x00da0326,
+ 0xfd51fced, 0x050101c0, 0xf8cb0103, 0x0876fb10,
+ 0xf80a093e, 0x0543f338, 0xff7a0e66, 0xfa94f2b2,
+ 0x0b3f0967, 0xf081fc9b, 0x110d0000,
+ }, { // 14.2 MHz
+ 0x00000003, 0xfffbfff3, 0x001d0013, 0xffaa000b,
+ 0x00aaff89, 0xff13014a, 0x00cefd95, 0x000a037b,
+ 0xfe35fc1d, 0x044c0305, 0xf90cff7e, 0x08d5fc81,
+ 0xf7100834, 0x069ff3a7, 0xfe160e8d, 0xfbaaf231,
+ 0x0aa509e9, 0xf0adfc65, 0x110d0000,
+ }, { // 14.3 MHz
+ 0x00000003, 0xffffffef, 0x00140025, 0xffb4ffdd,
+ 0x00b2ffd6, 0xfedb00f0, 0x0150fdd3, 0xff380391,
+ 0xff36fb85, 0x035e0426, 0xf994fdfe, 0x08eefe0b,
+ 0xf6490702, 0x07e1f43e, 0xfcb60e97, 0xfcc6f1be,
+ 0x0a040a67, 0xf0dbfc30, 0x110d0000,
+ }, { // 14.4 MHz
+ 0x00000003, 0x0002ffee, 0x00070033, 0xffc9ffb4,
+ 0x00a40027, 0xfec3007e, 0x01b4fe3f, 0xfe760369,
+ 0x0044fb2e, 0x02450518, 0xfa5ffc90, 0x08c1ffa1,
+ 0xf5bc05ae, 0x0902f4fc, 0xfb600e85, 0xfde7f15a,
+ 0x095d0ae2, 0xf10cfbfb, 0x110d0000,
+ }, { // 14.5 MHz
+ 0xffff0002, 0x0005ffef, 0xfffa0038, 0xffe5ff95,
+ 0x00820074, 0xfecc0000, 0x01f0fed0, 0xfdd20304,
+ 0x014dfb1d, 0x010e05ce, 0xfb64fb41, 0x084e013b,
+ 0xf569043e, 0x0a00f5dd, 0xfa150e55, 0xff0bf104,
+ 0x08b00b59, 0xf13ffbc6, 0x110d0000,
+ }, { // 14.6 MHz
+ 0xffff0001, 0x0008fff4, 0xffed0035, 0x0005ff83,
+ 0x005000b4, 0xfef6ff82, 0x01ffff7a, 0xfd580269,
+ 0x0241fb53, 0xffca0640, 0xfc99fa1e, 0x079a02cb,
+ 0xf55502ba, 0x0ad5f6e0, 0xf8d90e0a, 0x0031f0bd,
+ 0x07fd0bcb, 0xf174fb91, 0x110d0000,
+ }, { // 14.7 MHz
+ 0xffffffff, 0x0009fffb, 0xffe4002a, 0x0025ff82,
+ 0x001400e0, 0xff3cff10, 0x01e10030, 0xfd1201a4,
+ 0x0311fbcd, 0xfe88066a, 0xfdf1f92f, 0x06aa0449,
+ 0xf57e0128, 0x0b7ef801, 0xf7b00da2, 0x0156f086,
+ 0x07450c39, 0xf1acfb5c, 0x110d0000,
+ }, { // 14.8 MHz
+ 0x0000fffe, 0x00080002, 0xffdf0019, 0x003fff92,
+ 0xffd600f1, 0xff96feb6, 0x019700e1, 0xfd0500c2,
+ 0x03b0fc84, 0xfd590649, 0xff5df87f, 0x058505aa,
+ 0xf5e4ff91, 0x0bf9f93c, 0xf69d0d20, 0x0279f05e,
+ 0x06880ca3, 0xf1e6fb28, 0x110d0000,
+ }, { // 14.9 MHz
+ 0x0000fffd, 0x00060009, 0xffdf0004, 0x0051ffb0,
+ 0xff9d00e8, 0xfffcfe7c, 0x01280180, 0xfd32ffd2,
+ 0x0413fd6e, 0xfc4d05df, 0x00d1f812, 0x043506e4,
+ 0xf685fdfb, 0x0c43fa8d, 0xf5a10c83, 0x0399f046,
+ 0x05c70d08, 0xf222faf3, 0x110d0000,
+ }, { // 15.0 MHz
+ 0x0000fffd, 0x0003000f, 0xffe5ffef, 0x0057ffd9,
+ 0xff7000c4, 0x0062fe68, 0x009e01ff, 0xfd95fee6,
+ 0x0435fe7d, 0xfb710530, 0x023cf7ee, 0x02c307ef,
+ 0xf75efc70, 0x0c5cfbef, 0xf4c10bce, 0x04b3f03f,
+ 0x05030d69, 0xf261fabf, 0x110d0000,
+ }, { // 15.1 MHz
+ 0x0000fffd, 0xffff0012, 0xffefffdc, 0x00510006,
+ 0xff540089, 0x00befe7c, 0x00060253, 0xfe27fe0d,
+ 0x0413ffa2, 0xfad10446, 0x0390f812, 0x013b08c3,
+ 0xf868faf6, 0x0c43fd5f, 0xf3fd0b02, 0x05c7f046,
+ 0x043b0dc4, 0xf2a1fa8b, 0x110d0000,
+ }, { // 15.2 MHz
+ 0x0001fffe, 0xfffc0012, 0xfffbffce, 0x003f0033,
+ 0xff4e003f, 0x0106feb6, 0xff6e0276, 0xfeddfd56,
+ 0x03b000cc, 0xfa740329, 0x04bff87f, 0xffaa095d,
+ 0xf99ef995, 0x0bf9fed8, 0xf3590a1f, 0x06d2f05e,
+ 0x03700e1b, 0xf2e4fa58, 0x110d0000,
+ }, { // 15.3 MHz
+ 0x0001ffff, 0xfff9000f, 0x0009ffc8, 0x00250059,
+ 0xff5effee, 0x0132ff10, 0xfee30265, 0xffaafccf,
+ 0x031101eb, 0xfa6001e8, 0x05bdf92f, 0xfe1b09b6,
+ 0xfafaf852, 0x0b7e0055, 0xf2d50929, 0x07d3f086,
+ 0x02a30e6c, 0xf329fa24, 0x110d0000,
+ }, { // 15.4 MHz
+ 0x00010001, 0xfff80009, 0x0015ffca, 0x00050074,
+ 0xff81ff9f, 0x013dff82, 0xfe710221, 0x007cfc80,
+ 0x024102ed, 0xfa940090, 0x0680fa1e, 0xfc9b09cd,
+ 0xfc73f736, 0x0ad501d0, 0xf2740820, 0x08c9f0bd,
+ 0x01d40eb9, 0xf371f9f1, 0x110d0000,
+ }, { // 15.5 MHz
+ 0x00000002, 0xfff80002, 0x001effd5, 0xffe5007f,
+ 0xffb4ff5b, 0x01280000, 0xfe2401b0, 0x0146fc70,
+ 0x014d03c6, 0xfb10ff32, 0x0701fb41, 0xfb3709a1,
+ 0xfe00f644, 0x0a000345, 0xf2350708, 0x09b2f104,
+ 0x01050eff, 0xf3baf9be, 0x110d0000,
+ }, { // 15.6 MHz
+ 0x00000003, 0xfff9fffb, 0x0022ffe6, 0xffc9007a,
+ 0xfff0ff29, 0x00f2007e, 0xfe01011b, 0x01f6fc9e,
+ 0x00440467, 0xfbccfdde, 0x0738fc90, 0xf9f70934,
+ 0xff99f582, 0x090204b0, 0xf21a05e1, 0x0a8df15a,
+ 0x00340f41, 0xf405f98b, 0x110d0000,
+ }, { // 15.7 MHz
+ 0x00000003, 0xfffcfff4, 0x0020fffa, 0xffb40064,
+ 0x002fff11, 0x00a400f0, 0xfe0d006e, 0x0281fd09,
+ 0xff3604c9, 0xfcbffca2, 0x0726fdfe, 0xf8e80888,
+ 0x0134f4f3, 0x07e1060c, 0xf22304af, 0x0b59f1be,
+ 0xff640f7d, 0xf452f959, 0x110d0000,
+ }, { // 15.8 MHz
+ 0x00000003, 0x0000fff0, 0x001a0010, 0xffaa0041,
+ 0x0067ff13, 0x0043014a, 0xfe46ffb9, 0x02dbfda8,
+ 0xfe3504e5, 0xfddcfb8d, 0x06c9ff7e, 0xf81107a2,
+ 0x02c9f49a, 0x069f0753, 0xf2500373, 0x0c14f231,
+ 0xfe930fb3, 0xf4a1f927, 0x110d0000,
+ }, { // 15.9 MHz
+ 0xffff0002, 0x0003ffee, 0x000f0023, 0xffac0016,
+ 0x0093ff31, 0xffdc0184, 0xfea6ff09, 0x02fdfe70,
+ 0xfd5104ba, 0xff15faac, 0x06270103, 0xf7780688,
+ 0x044df479, 0x05430883, 0xf2a00231, 0x0cbef2b2,
+ 0xfdc40fe3, 0xf4f2f8f5, 0x110d0000,
+ }, { // 16.0 MHz
+ 0xffff0001, 0x0006ffef, 0x00020031, 0xffbaffe8,
+ 0x00adff66, 0xff790198, 0xff26fe6e, 0x02e5ff55,
+ 0xfc99044a, 0x005bfa09, 0x0545027f, 0xf7230541,
+ 0x05b8f490, 0x03d20997, 0xf31300eb, 0x0d55f341,
+ 0xfcf6100e, 0xf544f8c3, 0x110d0000,
+ }
+};
+
static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
{
u64 pll_freq;
u32 pll_freq_word;
+ const u32 *coeffs;
v4l_dbg(1, cx25840_debug, client, "%s(%d)\n", __func__, ifHz);
@@ -2763,2889 +3554,26 @@ static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
v4l_dbg(1, cx25840_debug, client, "%s(%d) again\n", __func__, ifHz);
- switch (ifHz) {
- case 3000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0024);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x001bfff8);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff50);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed8fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe34);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfebaffc7);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d031f);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04f0065d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07010688);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x04c901d6);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f9d3);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f342);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f337);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf64efb22);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105070f);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0c460fce);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00220032);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00370026);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff91);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0efe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fdcc);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe0afedb);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440224);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0434060c);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738074e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x06090361);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99fb39);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef3b6);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21af2a5);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf573fa33);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034067d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bfb0fb9);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000e);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00200038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x004c004f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x002fffdf);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff5cfeb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dfd92);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7ffe03);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36010a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x03410575);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x072607d2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x071804d5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134fcb7);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff451);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223f22e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4a7f94b);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xff6405e8);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bae0fa4);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00000008);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0036);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0056006d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00670030);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffbdff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46fd8d);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd25fd4f);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35ffe0);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0224049f);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9080e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x07ef0627);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9fe45);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f513);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250f1d2);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3ecf869);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930552);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b5f0f8f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0001);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000f002c);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0054007d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0093007c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0024ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6fdbb);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd03fcca);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51feb9);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x00eb0392);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270802);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08880750);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x044dffdb);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf5f8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0f193);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf342f78f);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc404b9);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b0e0f78);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff9);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0002001b);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0046007d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad00ba);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00870000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe1a);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1bfc7e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fda4);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xffa5025c);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x054507ad);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08dd0847);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80172);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef6ff);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313f170);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2abf6bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6041f);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0abc0f61);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff3);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff50006);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x002f006c);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b200e3);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00dc007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9fea0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd6bfc71);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fcb1);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe65010b);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x042d0713);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ec0906);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020302);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff823);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7f16a);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf228f5f5);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2a0384);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a670f4a);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7ffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9fff1);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0010004d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a100f2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x011a00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053ff44);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdedfca2);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fbef);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd39ffae);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x02ea0638);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08b50987);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230483);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f960);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45bf180);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1b8f537);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb6102e7);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a110f32);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffdd);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00024);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x007c00e5);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013a014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e6fff8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe98fd0f);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fb67);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc32fe54);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x01880525);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x083909c7);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x091505ee);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7fab3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52df1b4);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf15df484);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9b0249);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x09ba0f19);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 3900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff0);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffcf);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1fff6);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x004800be);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01390184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x016300ac);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff5efdb1);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb23);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb5cfd0d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x001703e4);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x077b09c4);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d2073c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251fc18);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61cf203);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf118f3dc);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d801aa);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x09600eff);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffefff4);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffca);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x000b0082);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01170198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10152);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0030fe7b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb24);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfac3fbe9);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5027f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0683097f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a560867);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2fd89);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723f26f);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0e8f341);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919010a);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x09060ee5);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fffb);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8ffca);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffa4);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffcd0036);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d70184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f601dc);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ffff60);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb6d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6efaf5);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd410103);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x055708f9);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e0969);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543ff02);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842f2f5);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cef2b2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85e006b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x08aa0ecb);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00050003);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff3ffd3);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaff8b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ffe5);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0080014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe023f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ba0050);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fbf8);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa62fa3b);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9ff7e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x04010836);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa90a3d);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f007f);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf975f395);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cbf231);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ffcb);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x084c0eaf);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000a);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0000ffe4);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff81);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff96);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x001c00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70271);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0254013b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fcbd);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa9ff9c5);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbfdfe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x028c073b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a750adf);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e101fa);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab8f44e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0ddf1be);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ff2b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x07ed0e94);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000efff8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff87);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff54);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffb5007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01860270);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c00210);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fdb2);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb22f997);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2fc90);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0102060f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a050b4c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902036e);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0af51e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf106f15a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64efe8b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x078d0e77);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0019000e);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff9e);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff25);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff560000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112023b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f702c0);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfec8);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbe5f9b3);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947fb41);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xff7004b9);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a0b81);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a0004d8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd65f603);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf144f104);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aafdec);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x072b0e5a);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00060012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00200022);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ffc1);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ff10);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff09ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x008601d7);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f50340);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fff0);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcddfa19);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa1e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfde30343);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x08790b7f);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50631);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec7f6fc);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf198f0bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50dfd4e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x06c90e3d);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00220030);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffed);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff87ff15);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed6ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffed014c);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b90386);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110119);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfdfefac4);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6f92f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc6701b7);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x07670b44);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0776);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x002df807);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf200f086);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477fcb1);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x06650e1e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0009);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003f001b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffbcff36);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec2feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff5600a5);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0248038d);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00232);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xff39fbab);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4f87f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb060020);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x062a0ad2);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf908a3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0192f922);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf27df05e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8fc14);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x06000e00);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 4900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0002);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00160037);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00510046);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xfff9ff6d);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed0fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefff0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01aa0356);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413032b);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x007ffcc5);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cf812);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9cefe87);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c90a2c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4309b4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f3fa4a);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf30ef046);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361fb7a);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x059b0de0);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000a002d);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00570067);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0037ffb5);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfefffe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62ff3d);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ec02e3);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x043503f6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x01befe05);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa27f7ee);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8c6fcf8);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x034c0954);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0aa4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x044cfb7e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3b1f03f);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2fae1);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x05340dc0);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff4);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffd001e);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0051007b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x006e0006);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff48fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfe9a);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x001d023e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130488);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x02e6ff5b);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1ef812);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7f7fb7f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bc084e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430b72);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x059afcba);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf467f046);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cfa4a);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x04cd0da0);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff00009);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003f007f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00980056);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffa5feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe15);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff4b0170);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b004d7);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x03e800b9);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc48f87f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf768fa23);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022071f);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90c1b);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x06dafdfd);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf52df05e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef9b5);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x04640d7f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6fff3);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00250072);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00af009c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x000cff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13fdb8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe870089);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104e1);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04b8020f);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd98f92f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf71df8f0);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe8805ce);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0c9c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0808ff44);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf603f086);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af922);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x03fb0d5e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe0);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00050056);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b000d1);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0071ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53fd8c);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfddfff99);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104a3);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x054a034d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xff01fa1e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf717f7ed);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf50461);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50cf4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0921008d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf6e7f0bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff891);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x03920d3b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffffff3);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffd1);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5002f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x009c00ed);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00cb0000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfebafd94);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd61feb0);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d0422);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05970464);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0074fb41);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf759f721);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb7502de);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000d21);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a2201d4);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7d9f104);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf804);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x03280d19);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fffa);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3ffc9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc90002);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x007500ef);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x010e007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3dfdcf);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd16fddd);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440365);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x059b0548);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3fc90);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7dff691);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0f014d);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020d23);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0318);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8d7f15a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f779);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x02bd0cf6);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00060001);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffecffc9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffd4);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x004000d5);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013600f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd3fe39);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd04fd31);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff360277);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x055605ef);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x033efdfe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8a5f642);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbffb6);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10cfb);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50456);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9dff1be);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f6f2);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x02520cd2);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080009);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff8ffd2);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffac);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x000200a3);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013c014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x006dfec9);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2bfcb7);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe350165);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04cb0651);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477ff7e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9a5f635);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1fe20);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0ca8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c81058b);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfaf0f231);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f66d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x01e60cae);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 5900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000e);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0005ffe1);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffacff90);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5005f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01210184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fcff72);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd8afc77);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51003f);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04020669);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830103);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfad7f66b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8fc93);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430c2b);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d06b5);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfc08f2b2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af5ec);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x017b0c89);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0012fff5);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaff82);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff8e000f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e80198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750028);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe18fc75);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99ff15);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x03050636);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0656027f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc32f6e2);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614fb17);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20b87);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d7707d2);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfd26f341);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf56f);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x010f0c64);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00050012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001c000b);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff84);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ffbe);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00960184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd00da);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeccfcb2);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fdf9);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x01e005bc);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e703e4);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfdabf798);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f9b3);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510abd);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf08df);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfe48f3dc);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f4f6);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x00a20c3e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0002000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0021001f);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff97);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff74);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0034014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa0179);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff97fd2a);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fcfa);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x00a304fe);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310525);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xff37f886);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf86e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c709d0);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de209db);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xff6df484);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf481);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0x00360c18);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe000a);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0021002f);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ffb8);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff3b);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffcc00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa01fa);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0069fdd4);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fc26);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xff5d0407);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310638);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x00c9f9a8);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf74e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xff3908c3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de20ac3);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0093f537);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf410);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xffca0bf2);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb0003);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001c0037);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x002fffe2);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ff17);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff6a007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd0251);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0134fea5);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb8b);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe2002e0);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e70713);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0255faf5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f658);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0799);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf0b96);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x01b8f5f5);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f3a3);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xff5e0bca);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00120037);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00460010);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff8eff0f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff180000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750276);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01e8ff8d);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb31);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcfb0198);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x065607ad);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x03cefc64);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614f592);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0656);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d770c52);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x02daf6bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf33b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfef10ba3);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff5);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0005002f);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0054003c);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5ff22);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedfff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fc0267);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0276007e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb1c);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbfe003e);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830802);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0529fdec);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8f4fe);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd04ff);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d0cf6);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x03f8f78f);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af2d7);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe850b7b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff80020);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00560060);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0002ff4e);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec4ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x006d0225);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02d50166);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb4e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb35fee1);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477080e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x065bff82);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1f4a0);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610397);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c810d80);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0510f869);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f278);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe1a0b52);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffaffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffec000c);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0078);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0040ff8e);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecafeb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd301b6);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fc0235);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fbc5);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaaafd90);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x033e07d2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x075b011b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbf47a);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f0224);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50def);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0621f94b);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f21e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfdae0b29);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 6900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3fff6);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0037007f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0075ffdc);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef2fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3d0122);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02ea02dd);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fc79);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa65fc5d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3074e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x082102ad);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0ff48c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe00a9);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0e43);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0729fa33);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f1c9);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfd430b00);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0001fff3);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe2);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x001b0076);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x009c002d);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff35fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfeba0076);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x029f0352);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfd60);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa69fb53);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x00740688);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08a7042d);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb75f4d6);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600ff2d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a220e7a);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0827fb22);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf17a);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfcd80ad6);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff9);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffd2);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb005e);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b0007a);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8ffe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53ffc1);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0221038c);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fe6e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfab6fa80);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xff010587);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e90590);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf5f556);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfdb3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x09210e95);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0919fc15);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff12f);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc6e0aab);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00070000);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6ffc9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0039);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00af00b8);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfff4feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13ff10);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01790388);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311ff92);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb48f9ed);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd980453);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e306cd);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe88f60a);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fc40);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x08080e93);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x09fdfd0c);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af0ea);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc050a81);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080008);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff0ffc9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1000d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x009800e2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x005bff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe74);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00b50345);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000bc);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc18f9a1);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc4802f9);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x089807dc);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022f6f0);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fada);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x06da0e74);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ad3fe06);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef0ab);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb9c0a55);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000e);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffdffd0);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffdf);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x006e00f2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00b8ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfdf8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xffe302c8);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x041301dc);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd1af99e);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1e0183);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x080908b5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bcf801);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf985);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x059a0e38);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b99ff03);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cf071);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb330a2a);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00070011);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000affdf);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffa9ffb5);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x003700e6);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01010000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62fda8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff140219);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x043502e1);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe42f9e6);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa270000);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x073a0953);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x034cf939);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a4f845);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x044c0de1);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c4f0000);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2f03c);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfacc09fe);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00040012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0016fff3);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffafff95);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xfff900c0);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0130007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefd89);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe560146);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x041303bc);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xff81fa76);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cfe7d);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x063209b1);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c9fa93);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf71e);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f30d6e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cf200fd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361f00e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa6509d1);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00010010);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0008);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ff84);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffbc0084);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013e00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff56fd9f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdb8005c);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00460);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x00c7fb45);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4fd07);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x04fa09ce);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x062afc07);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407f614);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x01920ce0);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d8301fa);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8efe5);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa0009a4);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd000b);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001d);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff82);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff870039);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x012a014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffedfde7);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd47ff6b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104c6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0202fc4c);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6fbad);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x039909a7);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0767fd8e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482f52b);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x002d0c39);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e0002f4);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477efc2);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf99b0977);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 7900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa0004);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0020002d);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff91);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ffe8);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f70184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0086fe5c);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0bfe85);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104e5);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0323fd7d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa79);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x021d093f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0879ff22);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bf465);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec70b79);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e6803eb);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50defa5);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf937094a);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffd);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00190036);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x001bffaf);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff99);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00aa0198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112fef3);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd09fdb9);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04be);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x041bfecc);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947f978);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x00900897);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a00b9);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f3c5);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd650aa3);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ebc04de);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aaef8e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf8d5091c);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff6);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000e0038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ffd7);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff56);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x004b0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0186ffa1);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd40fd16);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440452);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04de0029);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2f8b2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfefe07b5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a05024d);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef34d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0a09b8);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0efa05cd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64eef7d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf87308ed);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00000031);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0005);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff27);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffe4014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70057);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdacfca6);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3603a7);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05610184);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbf82e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd74069f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a7503d6);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff2ff);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab808b9);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f2306b5);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ef72);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf81308bf);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff30022);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00560032);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ff10);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8000f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe0106);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe46fc71);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3502c7);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x059e02ce);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9f7f2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfbff055b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa9054c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f2db);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf97507aa);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f350797);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ef6d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7b40890);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8000f);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00540058);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffcdff14);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff29007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f6019e);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff01fc7c);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5101bf);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x059203f6);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd41f7fe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfaa903f3);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e06a9);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf2e2);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842068b);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f320871);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85eef6e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7560860);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fff2);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1fff9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00460073);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x000bff34);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee90000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10215);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xffd0fcc5);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99009d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x053d04f1);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5f853);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf97d0270);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a5607e4);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef314);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723055f);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f180943);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919ef75);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6fa0830);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff8);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe4);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x002f007f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0048ff6b);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec7ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0163025f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00a2fd47);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff73);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04a405b2);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0017f8ed);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf88500dc);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d208f9);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff370);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61c0429);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ee80a0b);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d8ef82);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6a00800);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0007ffff);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffd4);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0010007a);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x007cffb2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec6ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e60277);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0168fdf9);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fe50);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x03ce0631);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0188f9c8);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7c7ff43);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x091509e3);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f3f6);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52d02ea);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ea30ac9);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9bef95);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf64607d0);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00090007);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9ffca);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00065);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a10003);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee6feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053025b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0213fed0);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fd46);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x02c70668);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x02eafadb);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf74bfdae);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230a9c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f4a3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45b01a6);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e480b7c);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb61efae);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf5ef079f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 8900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000d);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff5ffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10043);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b20053);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff24fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9020c);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0295ffbb);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fc64);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x019b0654);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x042dfc1c);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf714fc2a);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020b21);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f575);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7005e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0dd80c24);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2aefcd);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf599076e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00060011);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0002ffcf);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffba0018);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad009a);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff79fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff260192);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e500ab);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fbb6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x005b05f7);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545fd81);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf723fabf);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80b70);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f669);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313ff15);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d550cbf);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6eff2);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544073d);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00030012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000fffdd);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffea);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x009300cf);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdcfe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea600f7);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fd0190);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb46);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xff150554);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0627fefd);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf778f978);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x044d0b87);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f77d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0fdcf);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbe0d4e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc4f01d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2070b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00000010);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001afff0);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffbf);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x006700ed);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe460047);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02db0258);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb1b);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddc0473);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c90082);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf811f85e);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c90b66);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff8ad);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250fc8d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c140dcf);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe93f04d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a106d9);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc000c);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00200006);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff9c);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x002f00ef);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a4ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dff92);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x028102f7);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb37);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbf035e);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07260202);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e8f778);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x01340b0d);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f9f4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223fb51);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b590e42);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xff64f083);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf45206a7);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90005);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001a);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff86);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xfff000d7);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fee5);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f60362);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb99);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbcc0222);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07380370);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f7f6cc);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xff990a7e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902fb50);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21afa1f);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8d0ea6);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034f0bf);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4050675);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffe);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001e002b);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff81);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffb400a5);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe50);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01460390);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfc3a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb1000ce);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x070104bf);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb37f65f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe0009bc);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00fcbb);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f8f8);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b20efc);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105f101);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3ba0642);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff7);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00150036);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff8c);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff810061);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013d007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe71fddf);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x007c0380);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fd13);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa94ff70);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x068005e2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9bf633);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc7308ca);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5fe30);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf274f7e0);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c90f43);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d4f147);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371060f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fff1);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00090038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffa7);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff5e0012);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013200f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee3fd9b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaa0331);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fe15);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa60fe18);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bd06d1);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1bf64a);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafa07ae);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7effab);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d5f6d7);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d30f7a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a3f194);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf32905dc);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffb0032);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003fffcd);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effc1);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6efd8a);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfedd02aa);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0ff34);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa74fcd7);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bf0781);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaaf6a3);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99e066b);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90128);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf359f5e1);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d20fa2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0370f1e5);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e405a8);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 9900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffef0024);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0051fffa);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff54ff77);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00be0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0006fdad);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe2701f3);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413005e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad1fbba);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x039007ee);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x013bf73d);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868050a);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4302a1);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fdf4fe);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c70fba);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x043bf23c);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a10575);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fff1);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe50011);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00570027);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff70ff3c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00620198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x009efe01);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95011a);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350183);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb71fad0);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x023c0812);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c3f811);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75e0390);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0411);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c1f432);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b30fc1);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0503f297);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2610541);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0006fff7);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdffffc);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00510050);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff9dff18);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffc0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0128fe80);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32002e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130292);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4dfa21);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d107ee);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0435f91c);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6850205);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430573);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a1f37d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x03990fba);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c7f2f8);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222050d);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffe);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdfffe7);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003f006e);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffd6ff0f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0197ff1f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd05ff3e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0037c);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd59f9b7);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5d0781);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0585fa56);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4006f);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf906c4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69df2e0);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x02790fa2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0688f35d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e604d8);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00090005);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4ffd6);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0025007e);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0014ff20);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3c00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e1ffd0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd12fe5c);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110433);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88f996);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf106d1);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aafbb7);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57efed8);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e07ff);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b0f25e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x01560f7a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0745f3c7);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1ac04a4);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000c);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffedffcb);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0005007d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0050ff4c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ff0086);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd58fd97);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104ad);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xffcaf9c0);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc9905e2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x079afd35);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf555fd46);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50920);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d9f1f6);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x00310f43);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fdf435);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174046f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00050011);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffaffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5006b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0082ff8c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f00130);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd2fcfc);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04e3);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x010efa32);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb6404bf);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x084efec5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569fbc2);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000a23);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa15f1ab);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0b0efc);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b0f4a7);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13f043a);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00020012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0007ffcd);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9004c);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a4ffd9);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b401c1);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe76fc97);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404d2);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0245fae8);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5f0370);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1005f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bcfa52);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020b04);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb60f17b);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde70ea6);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x095df51e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10c0405);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0011);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0014ffdb);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40023);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2002a);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedbff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150022d);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff38fc6f);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36047b);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x035efbda);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9940202);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ee01f5);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf649f8fe);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10bc2);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb6f169);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc60e42);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a04f599);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0db03d0);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb000d);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001dffed);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaafff5);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00aa0077);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00ce026b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x000afc85);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3503e3);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x044cfcfb);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90c0082);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5037f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf710f7cc);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0c59);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe16f173);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaa0dcf);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa5f617);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0ad039b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 10900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90006);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00210003);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffc8);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x008e00b6);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff63fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x003a0275);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00dafcda);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd510313);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0501fe40);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cbfefd);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x087604f0);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80af6c2);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430cc8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7af19a);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa940d4e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3ff699);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0810365);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffff);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00210018);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffa3);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x006000e1);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc4fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0024b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x019afd66);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc990216);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0575ff99);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4fd81);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d40640);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf932f5e6);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20d0d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x00dff1de);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9860cbf);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd1f71e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058032f);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff8);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001b0029);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff8a);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x002600f2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x002cfe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0f01f0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x023bfe20);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc1700fa);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a200f7);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf927fc1c);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f40765);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa82f53b);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510d27);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0243f23d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8810c24);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5cf7a7);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf03102fa);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00110035);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff81);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffe700e7);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x008ffeb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94016d);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b0fefb);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3ffd1);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05850249);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c1fadb);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x05de0858);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf2f4c4);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c70d17);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a0f2b8);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7870b7c);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdff833);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00d02c4);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00040038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ff88);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffac00c2);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e2ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe3900cb);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f1ffe9);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3feaa);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05210381);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9cf9c8);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x04990912);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7af484);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xff390cdb);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f4f34d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69a0ac9);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5af8c1);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefec028e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0000ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff60033);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x002fff9f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff7b0087);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x011eff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe080018);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f900d8);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fd96);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x04790490);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbadf8ed);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x032f098e);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xff10f47d);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0c75);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x063cf3fc);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5ba0a0b);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dccf952);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcd0258);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff1);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffea0026);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0046ffc3);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff5a003c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04ff63);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c801b8);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fca6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397056a);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcecf853);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x01ad09c9);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x00acf4ad);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0be7);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0773f4c2);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e90943);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35f9e6);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb10221);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff6);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe20014);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0054ffee);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effeb);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2efebb);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260027a);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fbe6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x02870605);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4af7fe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x001d09c1);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0243f515);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd0b32);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0897f59e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4280871);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e95fa7c);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef9701eb);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffd);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffff);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0056001d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff57ff9c);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x011300f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe82fe2e);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ca0310);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb62);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155065a);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xffbaf7f2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8c0977);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x03cef5b2);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610a58);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a5f68f);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3790797);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eebfb14);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef8001b5);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080004);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0047);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff75ff58);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef9fdc8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111036f);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb21);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x00120665);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x012df82e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd0708ec);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542f682);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f095c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9af792);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2db06b5);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f38fbad);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6c017e);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 11900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000b);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe7ffd8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00370068);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffa4ff28);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00790184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87fd91);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00430392);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb26);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfece0626);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294f8b2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb990825);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0698f77f);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe0842);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b73f8a7);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf25105cd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7bfc48);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5a0148);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00050010);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff2ffcc);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x001b007b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffdfff10);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00140198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0020fd8e);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff710375);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb73);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9a059f);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e0f978);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4e0726);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c8f8a7);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600070c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2ff9c9);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1db04de);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4fce5);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4b0111);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00010012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffffffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb007e);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x001dff14);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffad0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b7fdbe);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9031b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fc01);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc8504d6);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0504fa79);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf93005f6);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x08caf9f2);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52b05c0);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccbfaf9);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf17903eb);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3fd83);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3f00db);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe0011);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000cffcc);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0071);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0058ff32);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4f014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x013cfe1f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfb028a);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fcc9);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9d03d6);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f4fbad);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848049d);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0999fb5b);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf4820461);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d46fc32);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12d02f4);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x1007fe21);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3600a4);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0017ffd9);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc10055);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0088ff68);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0400f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a6fea7);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7501cc);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fdc0);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaef02a8);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a7fd07);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79d0326);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a31fcda);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf40702f3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9ffd72);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f601fa);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x1021fec0);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2f006d);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80007);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001fffeb);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaf002d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a8ffb0);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e9ff4c);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2000ee);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fed8);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82015c);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0715fe7d);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7340198);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8dfe69);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bd017c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd5feb8);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d500fd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x1031ff60);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2b0037);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff70000);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00220000);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffa90000);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b30000);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec20000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x02000000);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd030000);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350000);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa5e0000);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x073b0000);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7110000);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aac0000);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a40000);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de70000);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0c90000);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x10360000);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef290000);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff9);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001f0015);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffd3);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a80050);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e900b4);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd20ff12);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130128);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82fea4);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x07150183);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf734fe68);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8d0197);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdfe84);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd50148);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d5ff03);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x103100a0);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2bffc9);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00170027);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ffab);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00880098);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff04ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a60159);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd75fe34);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00240);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaeffd58);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a702f9);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79dfcda);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a310326);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fd0d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9f028e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f6fe06);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x10210140);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2fff93);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000c0034);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff8f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x005800ce);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4ffeb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x013c01e1);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfbfd76);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110337);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9dfc2a);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f40453);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848fb63);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x099904a5);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fb9f);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d4603ce);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12dfd0c);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x100701df);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef36ff5c);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 12900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0001ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffff0038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff82);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x001d00ec);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffadfe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b70242);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9fce5);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x024103ff);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc85fb2a);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05040587);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf930fa0a);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x08ca060e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfa40);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccb0507);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf179fc15);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3027d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3fff25);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff0);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff20034);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x001bff85);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffdf00f0);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0014fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00200272);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff71fc8b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d048d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9afa61);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e00688);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4ef8da);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c80759);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f8f4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2f0637);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1dbfb22);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4031b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4bfeef);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff5);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe70028);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ff98);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffa400d8);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0079fe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87026f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0043fc6e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404da);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfecef9da);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294074e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb99f7db);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x06980881);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef7be);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b730759);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf251fa33);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7b03b8);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5afeb8);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffc);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe00017);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x004cffb9);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff7500a8);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef90238);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111fc91);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604df);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0012f99b);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x012d07d2);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd07f714);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542097e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff6a4);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9a086e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2dbf94b);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f380453);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6cfe82);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080003);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffde0001);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0056ffe3);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff570064);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0113ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe8201d2);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01cafcf0);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35049e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155f9a6);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xffba080e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8cf689);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x03ce0a4e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f5a8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a50971);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf379f869);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eeb04ec);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef80fe4b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000a);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe2ffec);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00540012);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e0015);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2e0145);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260fd86);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51041a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0287f9fb);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4a0802);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x001df63f);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x02430aeb);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf4ce);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x08970a62);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf428f78f);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e950584);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xef97fe15);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffeaffda);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0046003d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff5affc4);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04009d);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c8fe48);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99035a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397fa96);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcec07ad);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x01adf637);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x00ac0b53);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef419);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x07730b3e);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e9f6bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35061a);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb1fddf);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00000012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfff6ffcd);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x002f0061);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff7bff79);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x011e007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe08ffe8);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f9ff28);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17026a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0479fb70);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbad0713);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x032ff672);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xff100b83);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff38b);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x063c0c04);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5baf5f5);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dcc06ae);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcdfda8);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0004ffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00100078);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffacff3e);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e200f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe39ff35);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f10017);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd30156);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0521fc7f);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9c0638);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0499f6ee);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7a0b7c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f325);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f40cb3);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69af537);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5a073f);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xefecfd72);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0011ffcb);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0007f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffe7ff19);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x008f014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94fe93);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b00105);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3002f);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x0585fdb7);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c10525);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x05def7a8);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf20b3c);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f2e9);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a00d48);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf787f484);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdf07cd);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00dfd3c);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 13900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80008);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001bffd7);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10076);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0026ff0e);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x002c0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0ffe10);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x023b01e0);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff06);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a2ff09);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf92703e4);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f4f89b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa820ac5);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f2d9);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x02430dc3);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf881f3dc);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5c0859);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf031fd06);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80001);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0021ffe8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffba005d);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0060ff1f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc40198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0fdb5);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x019a029a);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fdea);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x05750067);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4027f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d4f9c0);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf9320a1a);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f2f3);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0x00df0e22);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xf986f341);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd108e2);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058fcd1);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0021fffd);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0038);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x008eff4a);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff630184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x003afd8b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x00da0326);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fced);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x050101c0);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cb0103);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x0876fb10);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80a093e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f338);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7a0e66);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa94f2b2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3f0967);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf081fc9b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff3);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001d0013);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa000b);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00aaff89);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00cefd95);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x000a037b);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fc1d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x044c0305);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90cff7e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5fc81);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7100834);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff3a7);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe160e8d);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaaf231);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa509e9);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0adfc65);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00140025);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffdd);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2ffd6);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedb00f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150fdd3);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xff380391);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb85);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x035e0426);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xf994fdfe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08eefe0b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6490702);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f43e);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb60e97);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc6f1be);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a040a67);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0dbfc30);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0002ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00070033);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ffb4);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00a40027);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b4fe3f);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe760369);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb2e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x02450518);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5ffc90);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1ffa1);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bc05ae);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902f4fc);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb600e85);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde7f15a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x095d0ae2);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10cfbfb);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0005ffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffa0038);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff95);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00820074);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f0fed0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd20304);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb1d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x010e05ce);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb64fb41);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x084e013b);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569043e);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00f5dd);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa150e55);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0bf104);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b00b59);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13ffbc6);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fff4);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffed0035);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff83);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x005000b4);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6ff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ffff7a);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd580269);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fb53);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xffca0640);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc99fa1e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x079a02cb);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55502ba);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5f6e0);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d90e0a);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0031f0bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fd0bcb);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174fb91);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0009fffb);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4002a);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ff82);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x001400e0);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3cff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e10030);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1201a4);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fbcd);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88066a);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf1f92f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aa0449);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57e0128);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7ef801);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b00da2);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0156f086);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x07450c39);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1acfb5c);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00080002);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0019);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003fff92);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffd600f1);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x019700e1);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0500c2);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fc84);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd590649);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5df87f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x058505aa);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4ff91);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9f93c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69d0d20);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0279f05e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x06880ca3);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e6fb28);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 14900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x00060009);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0004);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0051ffb0);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff9d00e8);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffcfe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x01280180);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32ffd2);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fd6e);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4d05df);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d1f812);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x043506e4);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf685fdfb);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fa8d);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a10c83);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0399f046);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c70d08);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222faf3);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffe5ffef);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x0057ffd9);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff7000c4);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0062fe68);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x009e01ff);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95fee6);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0435fe7d);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb710530);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x023cf7ee);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c307ef);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75efc70);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5cfbef);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c10bce);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b3f03f);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x05030d69);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf261fabf);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15100000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xffefffdc);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00510006);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff540089);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00befe7c);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0x00060253);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe27fe0d);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413ffa2);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad10446);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0390f812);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0x013b08c3);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868faf6);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fd5f);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fd0b02);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c7f046);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x043b0dc4);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a1fa8b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15200000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0012);
- cx25840_write4(client, DIF_BPF_COEFF45, 0xfffbffce);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x003f0033);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e003f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106feb6);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6e0276);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeddfd56);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000cc);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa740329);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bff87f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaa095d);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99ef995);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9fed8);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3590a1f);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d2f05e);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x03700e1b);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e4fa58);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15300000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9000f);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0009ffc8);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00250059);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff5effee);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0132ff10);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee30265);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaafccf);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x031101eb);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6001e8);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bdf92f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1b09b6);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafaf852);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0055);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d50929);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d3f086);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a30e6c);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf329fa24);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15400000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80009);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0015ffca);
- cx25840_write4(client, DIF_BPF_COEFF67, 0x00050074);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xff81ff9f);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x013dff82);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe710221);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x007cfc80);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x024102ed);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa940090);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0680fa1e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9b09cd);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc73f736);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad501d0);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2740820);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c9f0bd);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d40eb9);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371f9f1);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15500000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80002);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001effd5);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5007f);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff5b);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2401b0);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0146fc70);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d03c6);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb10ff32);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0701fb41);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb3709a1);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f644);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000345);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2350708);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b2f104);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x01050eff);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3baf9be);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15600000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0022ffe6);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9007a);
- cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff29);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2007e);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01011b);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f6fc9e);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440467);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbccfdde);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738fc90);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f70934);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99f582);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x090204b0);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21a05e1);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8df15a);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0x00340f41);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf405f98b);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15700000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcfff4);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x0020fffa);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40064);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x002fff11);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a400f0);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0d006e);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x0281fd09);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604c9);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbffca2);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0726fdfe);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e80888);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134f4f3);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1060c);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf22304af);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b59f1be);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xff640f7d);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf452f959);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15800000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0000fff0);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0010);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa0041);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0067ff13);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043014a);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46ffb9);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02dbfda8);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3504e5);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddcfb8d);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9ff7e);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf81107a2);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9f49a);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0753);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2500373);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c14f231);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930fb3);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a1f927);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 15900000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0003ffee);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x000f0023);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0016);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x0093ff31);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdc0184);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6ff09);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fdfe70);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5104ba);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0xff15faac);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270103);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7780688);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x044df479);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430883);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a00231);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbef2b2);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc40fe3);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2f8f5);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
-
- case 16000000:
- cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
- cx25840_write4(client, DIF_BPF_COEFF23, 0x0006ffef);
- cx25840_write4(client, DIF_BPF_COEFF45, 0x00020031);
- cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffe8);
- cx25840_write4(client, DIF_BPF_COEFF89, 0x00adff66);
- cx25840_write4(client, DIF_BPF_COEFF1011, 0xff790198);
- cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe6e);
- cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e5ff55);
- cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99044a);
- cx25840_write4(client, DIF_BPF_COEFF1819, 0x005bfa09);
- cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545027f);
- cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7230541);
- cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b8f490);
- cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20997);
- cx25840_write4(client, DIF_BPF_COEFF2829, 0xf31300eb);
- cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d55f341);
- cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6100e);
- cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544f8c3);
- cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
- break;
- }
+ coeffs = ifhz_coeffs[(ifHz - 3000000) / 100000];
+ cx25840_write4(client, DIF_BPF_COEFF01, coeffs[0]);
+ cx25840_write4(client, DIF_BPF_COEFF23, coeffs[1]);
+ cx25840_write4(client, DIF_BPF_COEFF45, coeffs[2]);
+ cx25840_write4(client, DIF_BPF_COEFF67, coeffs[3]);
+ cx25840_write4(client, DIF_BPF_COEFF89, coeffs[4]);
+ cx25840_write4(client, DIF_BPF_COEFF1011, coeffs[5]);
+ cx25840_write4(client, DIF_BPF_COEFF1213, coeffs[6]);
+ cx25840_write4(client, DIF_BPF_COEFF1415, coeffs[7]);
+ cx25840_write4(client, DIF_BPF_COEFF1617, coeffs[8]);
+ cx25840_write4(client, DIF_BPF_COEFF1819, coeffs[9]);
+ cx25840_write4(client, DIF_BPF_COEFF2021, coeffs[10]);
+ cx25840_write4(client, DIF_BPF_COEFF2223, coeffs[11]);
+ cx25840_write4(client, DIF_BPF_COEFF2425, coeffs[12]);
+ cx25840_write4(client, DIF_BPF_COEFF2627, coeffs[13]);
+ cx25840_write4(client, DIF_BPF_COEFF2829, coeffs[14]);
+ cx25840_write4(client, DIF_BPF_COEFF3031, coeffs[15]);
+ cx25840_write4(client, DIF_BPF_COEFF3233, coeffs[16]);
+ cx25840_write4(client, DIF_BPF_COEFF3435, coeffs[17]);
+ cx25840_write4(client, DIF_BPF_COEFF36, coeffs[18]);
}
static void cx23888_std_setup(struct i2c_client *client)
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index 4bfa3b3cf619..8e9ebed09f64 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -362,8 +362,6 @@ static int ub913_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
if (ret)
return ret;
- memset(fd, 0, sizeof(*fd));
-
fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
state = v4l2_subdev_lock_and_get_active_state(sd);
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index dc394e22a42c..644022312833 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -499,8 +499,6 @@ static int ub953_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
if (ret)
return ret;
- memset(fd, 0, sizeof(*fd));
-
fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
state = v4l2_subdev_lock_and_get_active_state(sd);
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 8ba5750f5a23..b8f3e5ca03ef 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -2786,8 +2786,6 @@ static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
if (!ub960_pad_is_source(priv, pad))
return -EINVAL;
- memset(fd, 0, sizeof(*fd));
-
fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index fd56ba138739..f6ea9b7b9700 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -477,6 +477,50 @@ static const struct hi556_reg mode_1296x972_regs[] = {
{0x0958, 0xbb80},
};
+static const struct hi556_reg mode_1296x722_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8259},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7167},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0122},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0404},
+ {0x0012, 0x000c},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0022},
+ {0x002a, 0x002b},
+ {0x0026, 0x012a},
+ {0x002c, 0x06cf},
+ {0x002e, 0x3311},
+ {0x0030, 0x3311},
+ {0x0032, 0x3311},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0510},
+ {0x0a14, 0x02d2},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0308},
+ {0x0806, 0x0100},
+ {0x0a04, 0x016a},
+ {0x090c, 0x09c0},
+ {0x090e, 0x0010},
+ {0x0902, 0x4319},
+ {0x0914, 0xc106},
+ {0x0916, 0x040e},
+ {0x0918, 0x0304},
+ {0x091a, 0x0708},
+ {0x091c, 0x0e06},
+ {0x091e, 0x0300},
+ {0x0958, 0xbb80},
+};
+
static const char * const hi556_test_pattern_menu[] = {
"Disabled",
"Solid Colour",
@@ -556,7 +600,25 @@ static const struct hi556_mode supported_modes[] = {
.regs = mode_1296x972_regs,
},
.link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
- }
+ },
+ {
+ .width = 1296,
+ .height = 722,
+ .crop = {
+ .left = HI556_PIXEL_ARRAY_LEFT,
+ .top = 250,
+ .width = HI556_PIXEL_ARRAY_WIDTH,
+ .height = 1444
+ },
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x722_regs),
+ .regs = mode_1296x722_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ },
};
struct hi556 {
@@ -577,9 +639,6 @@ struct hi556 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
/* True if the device has been identified */
bool identified;
};
@@ -976,9 +1035,6 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (hi556->streaming == enable)
- return 0;
-
mutex_lock(&hi556->mutex);
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -998,47 +1054,8 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- hi556->streaming = enable;
- mutex_unlock(&hi556->mutex);
-
- return ret;
-}
-
-static int __maybe_unused hi556_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct hi556 *hi556 = to_hi556(sd);
-
- mutex_lock(&hi556->mutex);
- if (hi556->streaming)
- hi556_stop_streaming(hi556);
-
- mutex_unlock(&hi556->mutex);
-
- return 0;
-}
-
-static int __maybe_unused hi556_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct hi556 *hi556 = to_hi556(sd);
- int ret;
-
- mutex_lock(&hi556->mutex);
- if (hi556->streaming) {
- ret = hi556_start_streaming(hi556);
- if (ret)
- goto error;
- }
-
mutex_unlock(&hi556->mutex);
- return 0;
-
-error:
- hi556_stop_streaming(hi556);
- hi556->streaming = 0;
- mutex_unlock(&hi556->mutex);
return ret;
}
@@ -1331,10 +1348,6 @@ probe_error_v4l2_ctrl_handler_free:
return ret;
}
-static const struct dev_pm_ops hi556_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id hi556_acpi_ids[] = {
{"INT3537"},
@@ -1347,7 +1360,6 @@ MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
static struct i2c_driver hi556_i2c_driver = {
.driver = {
.name = "hi556",
- .pm = &hi556_pm_ops,
.acpi_match_table = ACPI_PTR(hi556_acpi_ids),
},
.probe = hi556_probe,
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index fa0038749a3b..825fc8dc48f5 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -1607,17 +1607,12 @@ static int hi846_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (hi846->streaming == enable)
- return 0;
-
mutex_lock(&hi846->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
goto out;
- }
ret = hi846_start_streaming(hi846);
}
@@ -1680,9 +1675,6 @@ static int __maybe_unused hi846_suspend(struct device *dev)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi846 *hi846 = to_hi846(sd);
- if (hi846->streaming)
- hi846_stop_streaming(hi846);
-
return hi846_power_off(hi846);
}
@@ -1691,26 +1683,8 @@ static int __maybe_unused hi846_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi846 *hi846 = to_hi846(sd);
- int ret;
-
- ret = hi846_power_on(hi846);
- if (ret)
- return ret;
- if (hi846->streaming) {
- ret = hi846_start_streaming(hi846);
- if (ret) {
- dev_err(dev, "%s: start streaming failed: %d\n",
- __func__, ret);
- goto error;
- }
- }
-
- return 0;
-
-error:
- hi846_power_off(hi846);
- return ret;
+ return hi846_power_on(hi846);
}
static int hi846_set_format(struct v4l2_subdev *sd,
@@ -2173,8 +2147,6 @@ static void hi846_remove(struct i2c_client *client)
}
static const struct dev_pm_ops hi846_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(hi846_suspend, hi846_resume, NULL)
};
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 32547d7a2659..4075c389804c 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -2184,9 +2184,6 @@ struct hi847 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
static u64 to_pixel_rate(u32 f_index)
@@ -2618,14 +2615,10 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (hi847->streaming == enable)
- return 0;
-
mutex_lock(&hi847->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret) {
mutex_unlock(&hi847->mutex);
return ret;
}
@@ -2641,49 +2634,8 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- hi847->streaming = enable;
- mutex_unlock(&hi847->mutex);
-
- return ret;
-}
-
-static int __maybe_unused hi847_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct hi847 *hi847 = to_hi847(sd);
-
- mutex_lock(&hi847->mutex);
- if (hi847->streaming)
- hi847_stop_streaming(hi847);
-
- mutex_unlock(&hi847->mutex);
-
- return 0;
-}
-
-static int __maybe_unused hi847_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct hi847 *hi847 = to_hi847(sd);
- int ret;
-
- mutex_lock(&hi847->mutex);
- if (hi847->streaming) {
- ret = hi847_start_streaming(hi847);
- if (ret)
- goto error;
- }
-
mutex_unlock(&hi847->mutex);
- return 0;
-
-error:
- hi847_stop_streaming(hi847);
- hi847->streaming = 0;
- mutex_unlock(&hi847->mutex);
return ret;
}
@@ -2980,10 +2932,6 @@ probe_error_v4l2_ctrl_handler_free:
return ret;
}
-static const struct dev_pm_ops hi847_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(hi847_suspend, hi847_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id hi847_acpi_ids[] = {
{"HYV0847"},
@@ -2996,7 +2944,6 @@ MODULE_DEVICE_TABLE(acpi, hi847_acpi_ids);
static struct i2c_driver hi847_i2c_driver = {
.driver = {
.name = "hi847",
- .pm = &hi847_pm_ops,
.acpi_match_table = ACPI_PTR(hi847_acpi_ids),
},
.probe = hi847_probe,
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index ee5a28675388..a9b0aea1ae3b 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -290,9 +290,6 @@ struct imx208 {
*/
struct mutex imx208_mx;
- /* Streaming on/off */
- bool streaming;
-
/* OTP data */
bool otp_read;
char otp_data[IMX208_OTP_SIZE];
@@ -714,15 +711,13 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&imx208->imx208_mx);
- if (imx208->streaming == enable) {
- mutex_unlock(&imx208->imx208_mx);
- return 0;
- }
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0)
- goto err_rpm_put;
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret) {
+ mutex_unlock(&imx208->imx208_mx);
+ return ret;
+ }
/*
* Apply default & customized values
@@ -736,7 +731,6 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- imx208->streaming = enable;
mutex_unlock(&imx208->imx208_mx);
/* vflip and hflip cannot change during streaming */
@@ -752,40 +746,6 @@ err_rpm_put:
return ret;
}
-static int __maybe_unused imx208_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx208 *imx208 = to_imx208(sd);
-
- if (imx208->streaming)
- imx208_stop_streaming(imx208);
-
- return 0;
-}
-
-static int __maybe_unused imx208_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx208 *imx208 = to_imx208(sd);
- int ret;
-
- if (imx208->streaming) {
- ret = imx208_start_streaming(imx208);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- imx208_stop_streaming(imx208);
- imx208->streaming = 0;
-
- return ret;
-}
-
/* Verify chip ID */
static const struct v4l2_subdev_video_ops imx208_video_ops = {
.s_stream = imx208_set_stream,
@@ -819,11 +779,9 @@ static int imx208_read_otp(struct imx208 *imx208)
if (imx208->otp_read)
goto out_unlock;
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
goto out_unlock;
- }
ret = imx208_identify_module(imx208);
if (ret)
@@ -1081,10 +1039,6 @@ static void imx208_remove(struct i2c_client *client)
mutex_destroy(&imx208->imx208_mx);
}
-static const struct dev_pm_ops imx208_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx208_suspend, imx208_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id imx208_acpi_ids[] = {
{ "INT3478" },
@@ -1097,7 +1051,6 @@ MODULE_DEVICE_TABLE(acpi, imx208_acpi_ids);
static struct i2c_driver imx208_i2c_driver = {
.driver = {
.name = "imx208",
- .pm = &imx208_pm_ops,
.acpi_match_table = ACPI_PTR(imx208_acpi_ids),
},
.probe = imx208_probe,
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 2f9c8582f940..4f77ea02cc27 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -58,8 +58,6 @@ struct imx214 {
* and start streaming.
*/
struct mutex mutex;
-
- bool streaming;
};
struct reg_8 {
@@ -775,9 +773,6 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
struct imx214 *imx214 = to_imx214(subdev);
int ret;
- if (imx214->streaming == enable)
- return 0;
-
if (enable) {
ret = pm_runtime_resume_and_get(imx214->dev);
if (ret < 0)
@@ -793,7 +788,6 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
pm_runtime_put(imx214->dev);
}
- imx214->streaming = enable;
return 0;
err_rpm_put:
@@ -909,39 +903,6 @@ done:
return ret;
}
-static int __maybe_unused imx214_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx214 *imx214 = to_imx214(sd);
-
- if (imx214->streaming)
- imx214_stop_streaming(imx214);
-
- return 0;
-}
-
-static int __maybe_unused imx214_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx214 *imx214 = to_imx214(sd);
- int ret;
-
- if (imx214->streaming) {
- ret = imx214_start_streaming(imx214);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- imx214_stop_streaming(imx214);
- imx214->streaming = 0;
- return ret;
-}
-
static int imx214_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1102,7 +1063,6 @@ static const struct of_device_id imx214_of_match[] = {
MODULE_DEVICE_TABLE(of, imx214_of_match);
static const struct dev_pm_ops imx214_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx214_suspend, imx214_resume)
SET_RUNTIME_PM_OPS(imx214_power_off, imx214_power_on, NULL)
};
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index ec53abe2e84e..8436880dcf7a 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -18,91 +18,99 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mediabus.h>
-#include <asm/unaligned.h>
-
-#define IMX219_REG_VALUE_08BIT 1
-#define IMX219_REG_VALUE_16BIT 2
-
-#define IMX219_REG_MODE_SELECT 0x0100
-#define IMX219_MODE_STANDBY 0x00
-#define IMX219_MODE_STREAMING 0x01
/* Chip ID */
-#define IMX219_REG_CHIP_ID 0x0000
+#define IMX219_REG_CHIP_ID CCI_REG16(0x0000)
#define IMX219_CHIP_ID 0x0219
-/* External clock frequency is 24.0M */
-#define IMX219_XCLK_FREQ 24000000
-
-/* Pixel rate is fixed for all the modes */
-#define IMX219_PIXEL_RATE 182400000
-#define IMX219_PIXEL_RATE_4LANE 280800000
-
-#define IMX219_DEFAULT_LINK_FREQ 456000000
-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
+#define IMX219_REG_MODE_SELECT CCI_REG8(0x0100)
+#define IMX219_MODE_STANDBY 0x00
+#define IMX219_MODE_STREAMING 0x01
-#define IMX219_REG_CSI_LANE_MODE 0x0114
+#define IMX219_REG_CSI_LANE_MODE CCI_REG8(0x0114)
#define IMX219_CSI_2_LANE_MODE 0x01
#define IMX219_CSI_4_LANE_MODE 0x03
-/* V_TIMING internal */
-#define IMX219_REG_VTS 0x0160
-#define IMX219_VTS_15FPS 0x0dc6
-#define IMX219_VTS_30FPS_1080P 0x06e3
-#define IMX219_VTS_30FPS_BINNED 0x06e3
-#define IMX219_VTS_30FPS_640x480 0x06e3
-#define IMX219_VTS_MAX 0xffff
+#define IMX219_REG_DPHY_CTRL CCI_REG8(0x0128)
+#define IMX219_DPHY_CTRL_TIMING_AUTO 0
+#define IMX219_DPHY_CTRL_TIMING_MANUAL 1
-#define IMX219_VBLANK_MIN 4
-
-/*Frame Length Line*/
-#define IMX219_FLL_MIN 0x08a6
-#define IMX219_FLL_MAX 0xffff
-#define IMX219_FLL_STEP 1
-#define IMX219_FLL_DEFAULT 0x0c98
-
-/* HBLANK control - read only */
-#define IMX219_PPL_DEFAULT 3448
-
-/* Exposure control */
-#define IMX219_REG_EXPOSURE 0x015a
-#define IMX219_EXPOSURE_MIN 4
-#define IMX219_EXPOSURE_STEP 1
-#define IMX219_EXPOSURE_DEFAULT 0x640
-#define IMX219_EXPOSURE_MAX 65535
+#define IMX219_REG_EXCK_FREQ CCI_REG16(0x012a)
+#define IMX219_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
/* Analog gain control */
-#define IMX219_REG_ANALOG_GAIN 0x0157
+#define IMX219_REG_ANALOG_GAIN CCI_REG8(0x0157)
#define IMX219_ANA_GAIN_MIN 0
#define IMX219_ANA_GAIN_MAX 232
#define IMX219_ANA_GAIN_STEP 1
#define IMX219_ANA_GAIN_DEFAULT 0x0
/* Digital gain control */
-#define IMX219_REG_DIGITAL_GAIN 0x0158
+#define IMX219_REG_DIGITAL_GAIN CCI_REG16(0x0158)
#define IMX219_DGTL_GAIN_MIN 0x0100
#define IMX219_DGTL_GAIN_MAX 0x0fff
#define IMX219_DGTL_GAIN_DEFAULT 0x0100
#define IMX219_DGTL_GAIN_STEP 1
-#define IMX219_REG_ORIENTATION 0x0172
+/* Exposure control */
+#define IMX219_REG_EXPOSURE CCI_REG16(0x015a)
+#define IMX219_EXPOSURE_MIN 4
+#define IMX219_EXPOSURE_STEP 1
+#define IMX219_EXPOSURE_DEFAULT 0x640
+#define IMX219_EXPOSURE_MAX 65535
+
+/* V_TIMING internal */
+#define IMX219_REG_VTS CCI_REG16(0x0160)
+#define IMX219_VTS_MAX 0xffff
+
+#define IMX219_VBLANK_MIN 4
+
+/* HBLANK control - read only */
+#define IMX219_PPL_DEFAULT 3448
+
+#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
+#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
+#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
+#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
+#define IMX219_REG_Y_ADD_END_A CCI_REG16(0x016a)
+#define IMX219_REG_X_OUTPUT_SIZE CCI_REG16(0x016c)
+#define IMX219_REG_Y_OUTPUT_SIZE CCI_REG16(0x016e)
+#define IMX219_REG_X_ODD_INC_A CCI_REG8(0x0170)
+#define IMX219_REG_Y_ODD_INC_A CCI_REG8(0x0171)
+#define IMX219_REG_ORIENTATION CCI_REG8(0x0172)
/* Binning Mode */
-#define IMX219_REG_BINNING_MODE 0x0174
-#define IMX219_BINNING_NONE 0x0000
-#define IMX219_BINNING_2X2 0x0101
-#define IMX219_BINNING_2X2_ANALOG 0x0303
+#define IMX219_REG_BINNING_MODE_H CCI_REG8(0x0174)
+#define IMX219_REG_BINNING_MODE_V CCI_REG8(0x0175)
+#define IMX219_BINNING_NONE 0x00
+#define IMX219_BINNING_X2 0x01
+#define IMX219_BINNING_X2_ANALOG 0x03
+
+#define IMX219_REG_CSI_DATA_FORMAT_A CCI_REG16(0x018c)
+
+/* PLL Settings */
+#define IMX219_REG_VTPXCK_DIV CCI_REG8(0x0301)
+#define IMX219_REG_VTSYCK_DIV CCI_REG8(0x0303)
+#define IMX219_REG_PREPLLCK_VT_DIV CCI_REG8(0x0304)
+#define IMX219_REG_PREPLLCK_OP_DIV CCI_REG8(0x0305)
+#define IMX219_REG_PLL_VT_MPY CCI_REG16(0x0306)
+#define IMX219_REG_OPPXCK_DIV CCI_REG8(0x0309)
+#define IMX219_REG_OPSYCK_DIV CCI_REG8(0x030b)
+#define IMX219_REG_PLL_OP_MPY CCI_REG16(0x030c)
/* Test Pattern Control */
-#define IMX219_REG_TEST_PATTERN 0x0600
+#define IMX219_REG_TEST_PATTERN CCI_REG16(0x0600)
#define IMX219_TEST_PATTERN_DISABLE 0
#define IMX219_TEST_PATTERN_SOLID_COLOR 1
#define IMX219_TEST_PATTERN_COLOR_BARS 2
@@ -110,17 +118,26 @@
#define IMX219_TEST_PATTERN_PN9 4
/* Test pattern colour components */
-#define IMX219_REG_TESTP_RED 0x0602
-#define IMX219_REG_TESTP_GREENR 0x0604
-#define IMX219_REG_TESTP_BLUE 0x0606
-#define IMX219_REG_TESTP_GREENB 0x0608
+#define IMX219_REG_TESTP_RED CCI_REG16(0x0602)
+#define IMX219_REG_TESTP_GREENR CCI_REG16(0x0604)
+#define IMX219_REG_TESTP_BLUE CCI_REG16(0x0606)
+#define IMX219_REG_TESTP_GREENB CCI_REG16(0x0608)
#define IMX219_TESTP_COLOUR_MIN 0
#define IMX219_TESTP_COLOUR_MAX 0x03ff
#define IMX219_TESTP_COLOUR_STEP 1
-#define IMX219_TESTP_RED_DEFAULT IMX219_TESTP_COLOUR_MAX
-#define IMX219_TESTP_GREENR_DEFAULT 0
-#define IMX219_TESTP_BLUE_DEFAULT 0
-#define IMX219_TESTP_GREENB_DEFAULT 0
+
+#define IMX219_REG_TP_WINDOW_WIDTH CCI_REG16(0x0624)
+#define IMX219_REG_TP_WINDOW_HEIGHT CCI_REG16(0x0626)
+
+/* External clock frequency is 24.0M */
+#define IMX219_XCLK_FREQ 24000000
+
+/* Pixel rate is fixed for all the modes */
+#define IMX219_PIXEL_RATE 182400000
+#define IMX219_PIXEL_RATE_4LANE 280800000
+
+#define IMX219_DEFAULT_LINK_FREQ 456000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
/* IMX219 native and active pixel array size. */
#define IMX219_NATIVE_WIDTH 3296U
@@ -130,16 +147,6 @@
#define IMX219_PIXEL_ARRAY_WIDTH 3280U
#define IMX219_PIXEL_ARRAY_HEIGHT 2464U
-struct imx219_reg {
- u16 address;
- u8 val;
-};
-
-struct imx219_reg_list {
- unsigned int num_of_regs;
- const struct imx219_reg *regs;
-};
-
/* Mode : resolution and related config&values */
struct imx219_mode {
/* Frame width */
@@ -147,159 +154,52 @@ struct imx219_mode {
/* Frame height */
unsigned int height;
- /* Analog crop rectangle. */
- struct v4l2_rect crop;
-
/* V-timing */
unsigned int vts_def;
-
- /* Default register values */
- struct imx219_reg_list reg_list;
-
- /* 2x2 binning is used */
- bool binning;
};
-static const struct imx219_reg imx219_common_regs[] = {
- {0x0100, 0x00}, /* Mode Select */
+static const struct cci_reg_sequence imx219_common_regs[] = {
+ { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */
/* To Access Addresses 3000-5fff, send the following commands */
- {0x30eb, 0x0c},
- {0x30eb, 0x05},
- {0x300a, 0xff},
- {0x300b, 0xff},
- {0x30eb, 0x05},
- {0x30eb, 0x09},
+ { CCI_REG8(0x30eb), 0x0c },
+ { CCI_REG8(0x30eb), 0x05 },
+ { CCI_REG8(0x300a), 0xff },
+ { CCI_REG8(0x300b), 0xff },
+ { CCI_REG8(0x30eb), 0x05 },
+ { CCI_REG8(0x30eb), 0x09 },
/* PLL Clock Table */
- {0x0301, 0x05}, /* VTPXCK_DIV */
- {0x0303, 0x01}, /* VTSYSCK_DIV */
- {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */
- {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
- {0x0306, 0x00}, /* PLL_VT_MPY */
- {0x0307, 0x39},
- {0x030b, 0x01}, /* OP_SYS_CLK_DIV */
- {0x030c, 0x00}, /* PLL_OP_MPY */
- {0x030d, 0x72},
+ { IMX219_REG_VTPXCK_DIV, 5 },
+ { IMX219_REG_VTSYCK_DIV, 1 },
+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PLL_VT_MPY, 57 },
+ { IMX219_REG_OPSYCK_DIV, 1 },
+ { IMX219_REG_PLL_OP_MPY, 114 },
/* Undocumented registers */
- {0x455e, 0x00},
- {0x471e, 0x4b},
- {0x4767, 0x0f},
- {0x4750, 0x14},
- {0x4540, 0x00},
- {0x47b4, 0x14},
- {0x4713, 0x30},
- {0x478b, 0x10},
- {0x478f, 0x10},
- {0x4793, 0x10},
- {0x4797, 0x0e},
- {0x479b, 0x0e},
+ { CCI_REG8(0x455e), 0x00 },
+ { CCI_REG8(0x471e), 0x4b },
+ { CCI_REG8(0x4767), 0x0f },
+ { CCI_REG8(0x4750), 0x14 },
+ { CCI_REG8(0x4540), 0x00 },
+ { CCI_REG8(0x47b4), 0x14 },
+ { CCI_REG8(0x4713), 0x30 },
+ { CCI_REG8(0x478b), 0x10 },
+ { CCI_REG8(0x478f), 0x10 },
+ { CCI_REG8(0x4793), 0x10 },
+ { CCI_REG8(0x4797), 0x0e },
+ { CCI_REG8(0x479b), 0x0e },
/* Frame Bank Register Group "A" */
- {0x0162, 0x0d}, /* Line_Length_A */
- {0x0163, 0x78},
- {0x0170, 0x01}, /* X_ODD_INC_A */
- {0x0171, 0x01}, /* Y_ODD_INC_A */
+ { IMX219_REG_LINE_LENGTH_A, 3448 },
+ { IMX219_REG_X_ODD_INC_A, 1 },
+ { IMX219_REG_Y_ODD_INC_A, 1 },
/* Output setup registers */
- {0x0114, 0x01}, /* CSI 2-Lane Mode */
- {0x0128, 0x00}, /* DPHY Auto Mode */
- {0x012a, 0x18}, /* EXCK_Freq */
- {0x012b, 0x00},
-};
-
-/*
- * Register sets lifted off the i2C interface from the Raspberry Pi firmware
- * driver.
- * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
- */
-static const struct imx219_reg mode_3280x2464_regs[] = {
- {0x0164, 0x00},
- {0x0165, 0x00},
- {0x0166, 0x0c},
- {0x0167, 0xcf},
- {0x0168, 0x00},
- {0x0169, 0x00},
- {0x016a, 0x09},
- {0x016b, 0x9f},
- {0x016c, 0x0c},
- {0x016d, 0xd0},
- {0x016e, 0x09},
- {0x016f, 0xa0},
- {0x0624, 0x0c},
- {0x0625, 0xd0},
- {0x0626, 0x09},
- {0x0627, 0xa0},
-};
-
-static const struct imx219_reg mode_1920_1080_regs[] = {
- {0x0164, 0x02},
- {0x0165, 0xa8},
- {0x0166, 0x0a},
- {0x0167, 0x27},
- {0x0168, 0x02},
- {0x0169, 0xb4},
- {0x016a, 0x06},
- {0x016b, 0xeb},
- {0x016c, 0x07},
- {0x016d, 0x80},
- {0x016e, 0x04},
- {0x016f, 0x38},
- {0x0624, 0x07},
- {0x0625, 0x80},
- {0x0626, 0x04},
- {0x0627, 0x38},
-};
-
-static const struct imx219_reg mode_1640_1232_regs[] = {
- {0x0164, 0x00},
- {0x0165, 0x00},
- {0x0166, 0x0c},
- {0x0167, 0xcf},
- {0x0168, 0x00},
- {0x0169, 0x00},
- {0x016a, 0x09},
- {0x016b, 0x9f},
- {0x016c, 0x06},
- {0x016d, 0x68},
- {0x016e, 0x04},
- {0x016f, 0xd0},
- {0x0624, 0x06},
- {0x0625, 0x68},
- {0x0626, 0x04},
- {0x0627, 0xd0},
-};
-
-static const struct imx219_reg mode_640_480_regs[] = {
- {0x0164, 0x03},
- {0x0165, 0xe8},
- {0x0166, 0x08},
- {0x0167, 0xe7},
- {0x0168, 0x02},
- {0x0169, 0xf0},
- {0x016a, 0x06},
- {0x016b, 0xaf},
- {0x016c, 0x02},
- {0x016d, 0x80},
- {0x016e, 0x01},
- {0x016f, 0xe0},
- {0x0624, 0x06},
- {0x0625, 0x68},
- {0x0626, 0x04},
- {0x0627, 0xd0},
-};
-
-static const struct imx219_reg raw8_framefmt_regs[] = {
- {0x018c, 0x08},
- {0x018d, 0x08},
- {0x0309, 0x08},
-};
-
-static const struct imx219_reg raw10_framefmt_regs[] = {
- {0x018c, 0x0a},
- {0x018d, 0x0a},
- {0x0309, 0x0a},
+ { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO },
+ { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
};
static const s64 imx219_link_freq_menu[] = {
@@ -390,69 +290,25 @@ static const struct imx219_mode supported_modes[] = {
/* 8MPix 15fps mode */
.width = 3280,
.height = 2464,
- .crop = {
- .left = IMX219_PIXEL_ARRAY_LEFT,
- .top = IMX219_PIXEL_ARRAY_TOP,
- .width = 3280,
- .height = 2464
- },
- .vts_def = IMX219_VTS_15FPS,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
- .regs = mode_3280x2464_regs,
- },
- .binning = false,
+ .vts_def = 3526,
},
{
/* 1080P 30fps cropped */
.width = 1920,
.height = 1080,
- .crop = {
- .left = 688,
- .top = 700,
- .width = 1920,
- .height = 1080
- },
- .vts_def = IMX219_VTS_30FPS_1080P,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
- .regs = mode_1920_1080_regs,
- },
- .binning = false,
+ .vts_def = 1763,
},
{
/* 2x2 binned 30fps mode */
.width = 1640,
.height = 1232,
- .crop = {
- .left = IMX219_PIXEL_ARRAY_LEFT,
- .top = IMX219_PIXEL_ARRAY_TOP,
- .width = 3280,
- .height = 2464
- },
- .vts_def = IMX219_VTS_30FPS_BINNED,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
- .regs = mode_1640_1232_regs,
- },
- .binning = true,
+ .vts_def = 1763,
},
{
/* 640x480 30fps mode */
.width = 640,
.height = 480,
- .crop = {
- .left = 1008,
- .top = 760,
- .width = 1280,
- .height = 960
- },
- .vts_def = IMX219_VTS_30FPS_640x480,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_640_480_regs),
- .regs = mode_640_480_regs,
- },
- .binning = true,
+ .vts_def = 1763,
},
};
@@ -460,6 +316,7 @@ struct imx219 {
struct v4l2_subdev sd;
struct media_pad pad;
+ struct regmap *regmap;
struct clk *xclk; /* system clock to IMX219 */
u32 xclk_freq;
@@ -476,12 +333,6 @@ struct imx219 {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *hblank;
- /* Current mode */
- const struct imx219_mode *mode;
-
- /* Streaming on/off */
- bool streaming;
-
/* Two or Four lanes */
u8 lanes;
};
@@ -491,78 +342,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
return container_of(_sd, struct imx219, sd);
}
-/* Read registers up to 2 at a time */
-static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- struct i2c_msg msgs[2];
- u8 addr_buf[2] = { reg >> 8, reg & 0xff };
- u8 data_buf[4] = { 0, };
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- /* Write register address */
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- /* Read data from register */
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_buf[4 - len];
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs))
- return -EIO;
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-}
-
-/* Write registers up to 2 at a time */
-static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- u8 buf[6];
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
-
- return 0;
-}
-
-/* Write a list of registers */
-static int imx219_write_regs(struct imx219 *imx219,
- const struct imx219_reg *regs, u32 len)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- unsigned int i;
- int ret;
-
- for (i = 0; i < len; i++) {
- ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
- if (ret) {
- dev_err_ratelimited(&client->dev,
- "Failed to write reg 0x%4.4x. error = %d\n",
- regs[i].address, ret);
-
- return ret;
- }
- }
-
- return 0;
-}
-
/* Get bayer order based on flip setting. */
static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
{
@@ -581,18 +360,27 @@ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
return imx219_mbus_formats[i];
}
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx219 *imx219 =
container_of(ctrl->handler, struct imx219, ctrl_handler);
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- int ret;
+ const struct v4l2_mbus_framefmt *format;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&imx219->sd);
+ format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
if (ctrl->id == V4L2_CID_VBLANK) {
int exposure_max, exposure_def;
/* Update max exposure while meeting expected vblanking */
- exposure_max = imx219->mode->height + ctrl->val - 4;
+ exposure_max = format->height + ctrl->val - 4;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
exposure_max : IMX219_EXPOSURE_DEFAULT;
__v4l2_ctrl_modify_range(imx219->exposure,
@@ -610,48 +398,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_ANALOGUE_GAIN:
- ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
- IMX219_REG_VALUE_08BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN,
+ ctrl->val, &ret);
break;
case V4L2_CID_EXPOSURE:
- ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
+ ctrl->val, &ret);
break;
case V4L2_CID_DIGITAL_GAIN:
- ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
+ ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN:
- ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
- IMX219_REG_VALUE_16BIT,
- imx219_test_pattern_val[ctrl->val]);
+ cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN,
+ imx219_test_pattern_val[ctrl->val], &ret);
break;
case V4L2_CID_HFLIP:
case V4L2_CID_VFLIP:
- ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
- imx219->hflip->val |
- imx219->vflip->val << 1);
+ cci_write(imx219->regmap, IMX219_REG_ORIENTATION,
+ imx219->hflip->val | imx219->vflip->val << 1, &ret);
break;
case V4L2_CID_VBLANK:
- ret = imx219_write_reg(imx219, IMX219_REG_VTS,
- IMX219_REG_VALUE_16BIT,
- imx219->mode->height + ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_VTS,
+ format->height + ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_RED:
- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
+ ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_GREENR:
- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR,
+ ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_BLUE:
- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE,
+ ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_GREENB:
- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
- IMX219_REG_VALUE_16BIT, ctrl->val);
+ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB,
+ ctrl->val, &ret);
break;
default:
dev_info(&client->dev,
@@ -670,226 +455,224 @@ static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
.s_ctrl = imx219_set_ctrl,
};
-static void imx219_update_pad_format(struct imx219 *imx219,
- const struct imx219_mode *mode,
- struct v4l2_mbus_framefmt *fmt, u32 code)
+static unsigned long imx219_get_pixel_rate(struct imx219 *imx219)
{
- /* Bayer order varies with flips */
- fmt->code = imx219_get_format_code(imx219, code);
- fmt->width = mode->width;
- fmt->height = mode->height;
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_RAW;
- fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ return (imx219->lanes == 2) ? IMX219_PIXEL_RATE : IMX219_PIXEL_RATE_4LANE;
}
-static int imx219_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+/* Initialize control handlers */
+static int imx219_init_controls(struct imx219 *imx219)
{
- struct imx219 *imx219 = to_imx219(sd);
- struct v4l2_mbus_framefmt *format;
- struct v4l2_rect *crop;
-
- /* Initialize the format. */
- format = v4l2_subdev_get_pad_format(sd, state, 0);
- imx219_update_pad_format(imx219, &supported_modes[0], format,
- MEDIA_BUS_FMT_SRGGB10_1X10);
-
- /* Initialize the crop rectangle. */
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
- crop->top = IMX219_PIXEL_ARRAY_TOP;
- crop->left = IMX219_PIXEL_ARRAY_LEFT;
- crop->width = IMX219_PIXEL_ARRAY_WIDTH;
- crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
-
- return 0;
-}
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ const struct imx219_mode *mode = &supported_modes[0];
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ struct v4l2_fwnode_device_properties props;
+ int exposure_max, exposure_def, hblank;
+ int i, ret;
-static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct imx219 *imx219 = to_imx219(sd);
+ ctrl_hdlr = &imx219->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
+ if (ret)
+ return ret;
- if (code->index >= (ARRAY_SIZE(imx219_mbus_formats) / 4))
- return -EINVAL;
+ /* By default, PIXEL_RATE is read only */
+ imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ imx219_get_pixel_rate(imx219),
+ imx219_get_pixel_rate(imx219), 1,
+ imx219_get_pixel_rate(imx219));
- code->code = imx219_get_format_code(imx219, imx219_mbus_formats[code->index * 4]);
+ imx219->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx219_link_freq_menu) - 1, 0,
+ (imx219->lanes == 2) ? imx219_link_freq_menu :
+ imx219_link_freq_4lane_menu);
+ if (imx219->link_freq)
+ imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- return 0;
-}
+ /* Initial vblank/hblank/exposure parameters based on current mode */
+ imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
+ IMX219_VTS_MAX - mode->height, 1,
+ mode->vts_def - mode->height);
+ hblank = IMX219_PPL_DEFAULT - mode->width;
+ imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx219->hblank)
+ imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ exposure_max = mode->vts_def - 4;
+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX219_EXPOSURE_MIN, exposure_max,
+ IMX219_EXPOSURE_STEP,
+ exposure_def);
-static int imx219_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct imx219 *imx219 = to_imx219(sd);
- u32 code;
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
+ IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
- if (fse->index >= ARRAY_SIZE(supported_modes))
- return -EINVAL;
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
+ IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
- code = imx219_get_format_code(imx219, fse->code);
- if (fse->code != code)
- return -EINVAL;
+ imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (imx219->hflip)
+ imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- fse->min_width = supported_modes[fse->index].width;
- fse->max_width = fse->min_width;
- fse->min_height = supported_modes[fse->index].height;
- fse->max_height = fse->min_height;
+ imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (imx219->vflip)
+ imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- return 0;
-}
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx219_test_pattern_menu) - 1,
+ 0, 0, imx219_test_pattern_menu);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The assumption is that
+ * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
+ * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
+ * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
+ */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_RED + i,
+ IMX219_TESTP_COLOUR_MIN,
+ IMX219_TESTP_COLOUR_MAX,
+ IMX219_TESTP_COLOUR_STEP,
+ IMX219_TESTP_COLOUR_MAX);
+ /* The "Solid color" pattern is white by default */
+ }
-static int imx219_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct imx219 *imx219 = to_imx219(sd);
- const struct imx219_mode *mode;
- int exposure_max, exposure_def, hblank;
- struct v4l2_mbus_framefmt *format;
- struct v4l2_rect *crop;
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
- mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes),
- width, height,
- fmt->format.width, fmt->format.height);
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
- imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
- format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
- crop = v4l2_subdev_get_pad_crop(sd, sd_state, 0);
+ imx219->sd.ctrl_handler = ctrl_hdlr;
- *format = fmt->format;
- *crop = mode->crop;
+ return 0;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- imx219->mode = mode;
- /* Update limits and set FPS to default */
- __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - mode->height, 1,
- mode->vts_def - mode->height);
- __v4l2_ctrl_s_ctrl(imx219->vblank,
- mode->vts_def - mode->height);
- /* Update max exposure while meeting expected vblanking */
- exposure_max = mode->vts_def - 4;
- exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
- exposure_max : IMX219_EXPOSURE_DEFAULT;
- __v4l2_ctrl_modify_range(imx219->exposure,
- imx219->exposure->minimum,
- exposure_max, imx219->exposure->step,
- exposure_def);
- /*
- * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
- * depends on mode->width only, and is not changeble in any
- * way other than changing the mode.
- */
- hblank = IMX219_PPL_DEFAULT - mode->width;
- __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
- hblank);
- }
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
- return 0;
+ return ret;
}
-static int imx219_set_framefmt(struct imx219 *imx219,
- const struct v4l2_mbus_framefmt *format)
+static void imx219_free_controls(struct imx219 *imx219)
{
- switch (format->code) {
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- return imx219_write_regs(imx219, raw8_framefmt_regs,
- ARRAY_SIZE(raw8_framefmt_regs));
-
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- return imx219_write_regs(imx219, raw10_framefmt_regs,
- ARRAY_SIZE(raw10_framefmt_regs));
- }
-
- return -EINVAL;
+ v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
}
-static int imx219_set_binning(struct imx219 *imx219,
- const struct v4l2_mbus_framefmt *format)
+/* -----------------------------------------------------------------------------
+ * Subdev operations
+ */
+
+static int imx219_set_framefmt(struct imx219 *imx219,
+ struct v4l2_subdev_state *state)
{
- if (!imx219->mode->binning) {
- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
- IMX219_REG_VALUE_16BIT,
- IMX219_BINNING_NONE);
- }
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ unsigned int bpp;
+ u64 bin_h, bin_v;
+ int ret = 0;
+
+ format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
+ crop = v4l2_subdev_get_pad_crop(&imx219->sd, state, 0);
switch (format->code) {
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SBGGR8_1X8:
- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
- IMX219_REG_VALUE_16BIT,
- IMX219_BINNING_2X2_ANALOG);
+ bpp = 8;
+ break;
case MEDIA_BUS_FMT_SRGGB10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SBGGR10_1X10:
- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
- IMX219_REG_VALUE_16BIT,
- IMX219_BINNING_2X2);
+ default:
+ bpp = 10;
+ break;
}
- return -EINVAL;
-}
+ cci_write(imx219->regmap, IMX219_REG_X_ADD_STA_A,
+ crop->left - IMX219_PIXEL_ARRAY_LEFT, &ret);
+ cci_write(imx219->regmap, IMX219_REG_X_ADD_END_A,
+ crop->left - IMX219_PIXEL_ARRAY_LEFT + crop->width - 1, &ret);
+ cci_write(imx219->regmap, IMX219_REG_Y_ADD_STA_A,
+ crop->top - IMX219_PIXEL_ARRAY_TOP, &ret);
+ cci_write(imx219->regmap, IMX219_REG_Y_ADD_END_A,
+ crop->top - IMX219_PIXEL_ARRAY_TOP + crop->height - 1, &ret);
+
+ switch (crop->width / format->width) {
+ case 1:
+ default:
+ bin_h = IMX219_BINNING_NONE;
+ break;
+ case 2:
+ bin_h = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
+ break;
+ }
-static int imx219_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP: {
- sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, 0);
- return 0;
+ switch (crop->height / format->height) {
+ case 1:
+ default:
+ bin_v = IMX219_BINNING_NONE;
+ break;
+ case 2:
+ bin_v = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
+ break;
}
- case V4L2_SEL_TGT_NATIVE_SIZE:
- sel->r.top = 0;
- sel->r.left = 0;
- sel->r.width = IMX219_NATIVE_WIDTH;
- sel->r.height = IMX219_NATIVE_HEIGHT;
+ cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_H, bin_h, &ret);
+ cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_V, bin_v, &ret);
- return 0;
+ cci_write(imx219->regmap, IMX219_REG_X_OUTPUT_SIZE,
+ format->width, &ret);
+ cci_write(imx219->regmap, IMX219_REG_Y_OUTPUT_SIZE,
+ format->height, &ret);
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.top = IMX219_PIXEL_ARRAY_TOP;
- sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
- sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
- sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
+ cci_write(imx219->regmap, IMX219_REG_TP_WINDOW_WIDTH,
+ format->width, &ret);
+ cci_write(imx219->regmap, IMX219_REG_TP_WINDOW_HEIGHT,
+ format->height, &ret);
- return 0;
- }
+ cci_write(imx219->regmap, IMX219_REG_CSI_DATA_FORMAT_A,
+ (bpp << 8) | bpp, &ret);
+ cci_write(imx219->regmap, IMX219_REG_OPPXCK_DIV, bpp, &ret);
- return -EINVAL;
+ return ret;
}
static int imx219_configure_lanes(struct imx219 *imx219)
{
- return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE,
- IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ?
- IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
+ return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
+ imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
+ IMX219_CSI_4_LANE_MODE, NULL);
};
static int imx219_start_streaming(struct imx219 *imx219,
struct v4l2_subdev_state *state)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- const struct v4l2_mbus_framefmt *format;
- const struct imx219_reg_list *reg_list;
int ret;
ret = pm_runtime_resume_and_get(&client->dev);
@@ -897,7 +680,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
return ret;
/* Send all registers that are common to all modes */
- ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
+ ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs,
+ ARRAY_SIZE(imx219_common_regs), NULL);
if (ret) {
dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
goto err_rpm_put;
@@ -910,37 +694,22 @@ static int imx219_start_streaming(struct imx219 *imx219,
goto err_rpm_put;
}
- /* Apply default values of current mode */
- reg_list = &imx219->mode->reg_list;
- ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
- if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
- goto err_rpm_put;
- }
-
- format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
- ret = imx219_set_framefmt(imx219, format);
+ /* Apply format and crop settings. */
+ ret = imx219_set_framefmt(imx219, state);
if (ret) {
dev_err(&client->dev, "%s failed to set frame format: %d\n",
__func__, ret);
goto err_rpm_put;
}
- ret = imx219_set_binning(imx219, format);
- if (ret) {
- dev_err(&client->dev, "%s failed to set binning: %d\n",
- __func__, ret);
- goto err_rpm_put;
- }
-
/* Apply customized values from user */
ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
if (ret)
goto err_rpm_put;
/* set stream on register */
- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+ IMX219_MODE_STREAMING, NULL);
if (ret)
goto err_rpm_put;
@@ -961,8 +730,8 @@ static void imx219_stop_streaming(struct imx219 *imx219)
int ret;
/* set stream off register */
- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+ IMX219_MODE_STANDBY, NULL);
if (ret)
dev_err(&client->dev, "%s failed to set stream\n", __func__);
@@ -980,142 +749,176 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
state = v4l2_subdev_lock_and_get_active_state(sd);
- if (imx219->streaming == enable)
- goto unlock;
-
- if (enable) {
- /*
- * Apply default & customized values
- * and then start streaming.
- */
+ if (enable)
ret = imx219_start_streaming(imx219, state);
- if (ret)
- goto unlock;
- } else {
+ else
imx219_stop_streaming(imx219);
- }
-
- imx219->streaming = enable;
-unlock:
v4l2_subdev_unlock_state(state);
return ret;
}
-/* Power/clock management functions */
-static int imx219_power_on(struct device *dev)
+static void imx219_update_pad_format(struct imx219 *imx219,
+ const struct imx219_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx219 *imx219 = to_imx219(sd);
- int ret;
+ /* Bayer order varies with flips */
+ fmt->code = imx219_get_format_code(imx219, code);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
- ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES,
- imx219->supplies);
- if (ret) {
- dev_err(dev, "%s: failed to enable regulators\n",
- __func__);
- return ret;
- }
+static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx219 *imx219 = to_imx219(sd);
- ret = clk_prepare_enable(imx219->xclk);
- if (ret) {
- dev_err(dev, "%s: failed to enable clock\n",
- __func__);
- goto reg_off;
- }
+ if (code->index >= (ARRAY_SIZE(imx219_mbus_formats) / 4))
+ return -EINVAL;
- gpiod_set_value_cansleep(imx219->reset_gpio, 1);
- usleep_range(IMX219_XCLR_MIN_DELAY_US,
- IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
+ code->code = imx219_get_format_code(imx219, imx219_mbus_formats[code->index * 4]);
return 0;
-
-reg_off:
- regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
-
- return ret;
}
-static int imx219_power_off(struct device *dev)
+static int imx219_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx219 *imx219 = to_imx219(sd);
+ u32 code;
- gpiod_set_value_cansleep(imx219->reset_gpio, 0);
- regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
- clk_disable_unprepare(imx219->xclk);
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ code = imx219_get_format_code(imx219, fse->code);
+ if (fse->code != code)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
return 0;
}
-static int __maybe_unused imx219_suspend(struct device *dev)
+static int imx219_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx219 *imx219 = to_imx219(sd);
+ const struct imx219_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ unsigned int bin_h, bin_v;
- if (imx219->streaming)
- imx219_stop_streaming(imx219);
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
- return 0;
-}
+ imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
-static int __maybe_unused imx219_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx219 *imx219 = to_imx219(sd);
- struct v4l2_subdev_state *state;
- int ret;
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+ *format = fmt->format;
- if (imx219->streaming) {
- state = v4l2_subdev_lock_and_get_active_state(sd);
- ret = imx219_start_streaming(imx219, state);
- v4l2_subdev_unlock_state(state);
- if (ret)
- goto error;
- }
+ /*
+ * Use binning to maximize the crop rectangle size, and centre it in the
+ * sensor.
+ */
+ bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U);
+ bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
- return 0;
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop->width = format->width * bin_h;
+ crop->height = format->height * bin_v;
+ crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2;
+ crop->top = (IMX219_NATIVE_HEIGHT - crop->height) / 2;
-error:
- imx219_stop_streaming(imx219);
- imx219->streaming = false;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ int exposure_max;
+ int exposure_def;
+ int hblank;
- return ret;
+ /* Update limits and set FPS to default */
+ __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
+ IMX219_VTS_MAX - mode->height, 1,
+ mode->vts_def - mode->height);
+ __v4l2_ctrl_s_ctrl(imx219->vblank,
+ mode->vts_def - mode->height);
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = mode->vts_def - 4;
+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ __v4l2_ctrl_modify_range(imx219->exposure,
+ imx219->exposure->minimum,
+ exposure_max, imx219->exposure->step,
+ exposure_def);
+ /*
+ * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
+ * depends on mode->width only, and is not changeble in any
+ * way other than changing the mode.
+ */
+ hblank = IMX219_PPL_DEFAULT - mode->width;
+ __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
+ hblank);
+ }
+
+ return 0;
}
-static int imx219_get_regulators(struct imx219 *imx219)
+static int imx219_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- unsigned int i;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+ return 0;
+ }
- for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
- imx219->supplies[i].supply = imx219_supply_name[i];
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = IMX219_NATIVE_WIDTH;
+ sel->r.height = IMX219_NATIVE_HEIGHT;
- return devm_regulator_bulk_get(&client->dev,
- IMX219_NUM_SUPPLIES,
- imx219->supplies);
-}
+ return 0;
-/* Verify chip ID */
-static int imx219_identify_module(struct imx219 *imx219)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- int ret;
- u32 val;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = IMX219_PIXEL_ARRAY_TOP;
+ sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
+ sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
+ sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
- ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
- IMX219_REG_VALUE_16BIT, &val);
- if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
- IMX219_CHIP_ID);
- return ret;
+ return 0;
}
- if (val != IMX219_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- IMX219_CHIP_ID, val);
- return -EIO;
- }
+ return -EINVAL;
+}
+
+static int imx219_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ imx219_set_pad_format(sd, state, &fmt);
return 0;
}
@@ -1145,129 +948,93 @@ static const struct v4l2_subdev_ops imx219_subdev_ops = {
};
-static unsigned long imx219_get_pixel_rate(struct imx219 *imx219)
-{
- return (imx219->lanes == 2) ? IMX219_PIXEL_RATE : IMX219_PIXEL_RATE_4LANE;
-}
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
-/* Initialize control handlers */
-static int imx219_init_controls(struct imx219 *imx219)
+static int imx219_power_on(struct device *dev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- struct v4l2_ctrl_handler *ctrl_hdlr;
- unsigned int height = imx219->mode->height;
- struct v4l2_fwnode_device_properties props;
- int exposure_max, exposure_def, hblank;
- int i, ret;
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx219 *imx219 = to_imx219(sd);
+ int ret;
- ctrl_hdlr = &imx219->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
- if (ret)
+ ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES,
+ imx219->supplies);
+ if (ret) {
+ dev_err(dev, "%s: failed to enable regulators\n",
+ __func__);
return ret;
+ }
- /* By default, PIXEL_RATE is read only */
- imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_PIXEL_RATE,
- imx219_get_pixel_rate(imx219),
- imx219_get_pixel_rate(imx219), 1,
- imx219_get_pixel_rate(imx219));
+ ret = clk_prepare_enable(imx219->xclk);
+ if (ret) {
+ dev_err(dev, "%s: failed to enable clock\n",
+ __func__);
+ goto reg_off;
+ }
- imx219->link_freq =
- v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(imx219_link_freq_menu) - 1, 0,
- (imx219->lanes == 2) ? imx219_link_freq_menu :
- imx219_link_freq_4lane_menu);
- if (imx219->link_freq)
- imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ gpiod_set_value_cansleep(imx219->reset_gpio, 1);
+ usleep_range(IMX219_XCLR_MIN_DELAY_US,
+ IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
- /* Initial vblank/hblank/exposure parameters based on current mode */
- imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - height, 1,
- imx219->mode->vts_def - height);
- hblank = IMX219_PPL_DEFAULT - imx219->mode->width;
- imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_HBLANK, hblank, hblank,
- 1, hblank);
- if (imx219->hblank)
- imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = imx219->mode->vts_def - 4;
- exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
- exposure_max : IMX219_EXPOSURE_DEFAULT;
- imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_EXPOSURE,
- IMX219_EXPOSURE_MIN, exposure_max,
- IMX219_EXPOSURE_STEP,
- exposure_def);
+ return 0;
- v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
- IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
- IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
+reg_off:
+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
- v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
- IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
- IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
+ return ret;
+}
- imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- if (imx219->hflip)
- imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+static int imx219_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx219 *imx219 = to_imx219(sd);
- imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- if (imx219->vflip)
- imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ gpiod_set_value_cansleep(imx219->reset_gpio, 0);
+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
+ clk_disable_unprepare(imx219->xclk);
- v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(imx219_test_pattern_menu) - 1,
- 0, 0, imx219_test_pattern_menu);
- for (i = 0; i < 4; i++) {
- /*
- * The assumption is that
- * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
- * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
- * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
- */
- v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_TEST_PATTERN_RED + i,
- IMX219_TESTP_COLOUR_MIN,
- IMX219_TESTP_COLOUR_MAX,
- IMX219_TESTP_COLOUR_STEP,
- IMX219_TESTP_COLOUR_MAX);
- /* The "Solid color" pattern is white by default */
- }
+ return 0;
+}
- if (ctrl_hdlr->error) {
- ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
- __func__, ret);
- goto error;
- }
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
- if (ret)
- goto error;
+static int imx219_get_regulators(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ unsigned int i;
- ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops,
- &props);
- if (ret)
- goto error;
+ for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
+ imx219->supplies[i].supply = imx219_supply_name[i];
- imx219->sd.ctrl_handler = ctrl_hdlr;
+ return devm_regulator_bulk_get(&client->dev,
+ IMX219_NUM_SUPPLIES,
+ imx219->supplies);
+}
- return 0;
+/* Verify chip ID */
+static int imx219_identify_module(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+ u64 val;
-error:
- v4l2_ctrl_handler_free(ctrl_hdlr);
+ ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX219_CHIP_ID);
+ return ret;
+ }
- return ret;
-}
+ if (val != IMX219_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ IMX219_CHIP_ID, val);
+ return -EIO;
+ }
-static void imx219_free_controls(struct imx219 *imx219)
-{
- v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
+ return 0;
}
static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
@@ -1336,6 +1103,13 @@ static int imx219_probe(struct i2c_client *client)
if (imx219_check_hwcfg(dev, imx219))
return -EINVAL;
+ imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx219->regmap)) {
+ ret = PTR_ERR(imx219->regmap);
+ dev_err(dev, "failed to initialize CCI: %d\n", ret);
+ return ret;
+ }
+
/* Get system clock (xclk) */
imx219->xclk = devm_clk_get(dev, NULL);
if (IS_ERR(imx219->xclk)) {
@@ -1372,24 +1146,24 @@ static int imx219_probe(struct i2c_client *client)
if (ret)
goto error_power_off;
- /* Set default mode to max resolution */
- imx219->mode = &supported_modes[0];
-
- /* sensor doesn't enter LP-11 state upon power up until and unless
+ /*
+ * Sensor doesn't enter LP-11 state upon power up until and unless
* streaming is started, so upon power up switch the modes to:
* streaming -> standby
*/
- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+ IMX219_MODE_STREAMING, NULL);
if (ret < 0)
goto error_power_off;
+
usleep_range(100, 110);
/* put sensor back to standby mode */
- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+ IMX219_MODE_STANDBY, NULL);
if (ret < 0)
goto error_power_off;
+
usleep_range(100, 110);
ret = imx219_init_controls(imx219);
@@ -1468,7 +1242,6 @@ static const struct of_device_id imx219_dt_ids[] = {
MODULE_DEVICE_TABLE(of, imx219_dt_ids);
static const struct dev_pm_ops imx219_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx219_suspend, imx219_resume)
SET_RUNTIME_PM_OPS(imx219_power_off, imx219_power_on, NULL)
};
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index e196565e846e..b3827f4bc0eb 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -622,9 +622,6 @@ struct imx258 {
*/
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
struct clk *clk;
};
@@ -1035,10 +1032,6 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&imx258->mutex);
- if (imx258->streaming == enable) {
- mutex_unlock(&imx258->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -1057,7 +1050,6 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- imx258->streaming = enable;
mutex_unlock(&imx258->mutex);
return ret;
@@ -1070,37 +1062,6 @@ err_unlock:
return ret;
}
-static int __maybe_unused imx258_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx258 *imx258 = to_imx258(sd);
-
- if (imx258->streaming)
- imx258_stop_streaming(imx258);
-
- return 0;
-}
-
-static int __maybe_unused imx258_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx258 *imx258 = to_imx258(sd);
- int ret;
-
- if (imx258->streaming) {
- ret = imx258_start_streaming(imx258);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- imx258_stop_streaming(imx258);
- imx258->streaming = 0;
- return ret;
-}
-
/* Verify chip ID */
static int imx258_identify_module(struct imx258 *imx258)
{
@@ -1369,7 +1330,6 @@ static void imx258_remove(struct i2c_client *client)
}
static const struct dev_pm_ops imx258_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx258_suspend, imx258_resume)
SET_RUNTIME_PM_OPS(imx258_power_off, imx258_power_on, NULL)
};
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 3b4539b622b4..94aac9d2732f 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -201,8 +201,6 @@ struct imx296 {
const struct imx296_clk_params *clk_params;
bool mono;
- bool streaming;
-
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -321,7 +319,7 @@ static int imx296_s_ctrl(struct v4l2_ctrl *ctrl)
unsigned int vmax;
int ret = 0;
- if (!sensor->streaming)
+ if (!pm_runtime_get_if_in_use(sensor->dev))
return 0;
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
@@ -376,6 +374,8 @@ static int imx296_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
+ pm_runtime_put(sensor->dev);
+
return ret;
}
@@ -607,8 +607,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
- sensor->streaming = false;
-
goto unlock;
}
@@ -620,13 +618,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_pm;
- /*
- * Set streaming to true to ensure __v4l2_ctrl_handler_setup() will set
- * the controls. The flag is reset to false further down if an error
- * occurs.
- */
- sensor->streaming = true;
-
ret = __v4l2_ctrl_handler_setup(&sensor->ctrls);
if (ret < 0)
goto err_pm;
@@ -646,7 +637,6 @@ err_pm:
* likely has no other chance to recover.
*/
pm_runtime_put_sync(sensor->dev);
- sensor->streaming = false;
goto unlock;
}
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 52ebb096e107..5378f607f340 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -138,8 +138,6 @@ struct imx319 {
*/
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
/* True if the device has been identified */
bool identified;
};
@@ -2166,10 +2164,6 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&imx319->mutex);
- if (imx319->streaming == enable) {
- mutex_unlock(&imx319->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -2188,8 +2182,6 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- imx319->streaming = enable;
-
/* vflip and hflip cannot change during streaming */
__v4l2_ctrl_grab(imx319->vflip, enable);
__v4l2_ctrl_grab(imx319->hflip, enable);
@@ -2206,37 +2198,6 @@ err_unlock:
return ret;
}
-static int __maybe_unused imx319_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx319 *imx319 = to_imx319(sd);
-
- if (imx319->streaming)
- imx319_stop_streaming(imx319);
-
- return 0;
-}
-
-static int __maybe_unused imx319_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx319 *imx319 = to_imx319(sd);
- int ret;
-
- if (imx319->streaming) {
- ret = imx319_start_streaming(imx319);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- imx319_stop_streaming(imx319);
- imx319->streaming = 0;
- return ret;
-}
-
static const struct v4l2_subdev_core_ops imx319_subdev_core_ops = {
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
@@ -2542,10 +2503,6 @@ static void imx319_remove(struct i2c_client *client)
mutex_destroy(&imx319->mutex);
}
-static const struct dev_pm_ops imx319_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx319_suspend, imx319_resume)
-};
-
static const struct acpi_device_id imx319_acpi_ids[] __maybe_unused = {
{ "SONY319A" },
{ /* sentinel */ }
@@ -2555,7 +2512,6 @@ MODULE_DEVICE_TABLE(acpi, imx319_acpi_ids);
static struct i2c_driver imx319_i2c_driver = {
.driver = {
.name = "imx319",
- .pm = &imx319_pm_ops,
.acpi_match_table = ACPI_PTR(imx319_acpi_ids),
},
.probe = imx319_probe,
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index d722c9b7cd31..1196fe93506b 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -56,6 +56,24 @@
#define IMX334_REG_MIN 0x00
#define IMX334_REG_MAX 0xfffff
+/* Test Pattern Control */
+#define IMX334_REG_TP 0x329e
+#define IMX334_TP_COLOR_HBARS 0xA
+#define IMX334_TP_COLOR_VBARS 0xB
+
+#define IMX334_TPG_EN_DOUT 0x329c
+#define IMX334_TP_ENABLE 0x1
+#define IMX334_TP_DISABLE 0x0
+
+#define IMX334_TPG_COLORW 0x32a0
+#define IMX334_TPG_COLORW_120P 0x13
+
+#define IMX334_TP_CLK_EN 0x3148
+#define IMX334_TP_CLK_EN_VAL 0x10
+#define IMX334_TP_CLK_DIS_VAL 0x0
+
+#define IMX334_DIG_CLP_MODE 0x3280
+
/**
* struct imx334_reg - imx334 sensor register
* @address: Register address
@@ -120,7 +138,6 @@ struct imx334_mode {
* @mutex: Mutex for serializing sensor controls
* @menu_skip_mask: Menu skip mask for link_freq_ctrl
* @cur_code: current selected format code
- * @streaming: Flag indicating streaming state
*/
struct imx334 {
struct device *dev;
@@ -143,7 +160,6 @@ struct imx334 {
struct mutex mutex;
unsigned long menu_skip_mask;
u32 cur_code;
- bool streaming;
};
static const s64 link_freq[] = {
@@ -430,6 +446,18 @@ static const struct imx334_reg mode_3840x2160_regs[] = {
{0x3a29, 0x00},
};
+static const char * const imx334_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bars",
+ "Horizontal Color Bars",
+};
+
+static const int imx334_test_pattern_val[] = {
+ IMX334_TP_DISABLE,
+ IMX334_TP_COLOR_HBARS,
+ IMX334_TP_COLOR_VBARS,
+};
+
static const struct imx334_reg raw10_framefmt_regs[] = {
{0x3050, 0x00},
{0x319d, 0x00},
@@ -716,6 +744,26 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_HBLANK:
ret = 0;
break;
+ case V4L2_CID_TEST_PATTERN:
+ if (ctrl->val) {
+ imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
+ IMX334_TP_CLK_EN_VAL);
+ imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x0);
+ imx334_write_reg(imx334, IMX334_TPG_COLORW, 1,
+ IMX334_TPG_COLORW_120P);
+ imx334_write_reg(imx334, IMX334_REG_TP, 1,
+ imx334_test_pattern_val[ctrl->val]);
+ imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
+ IMX334_TP_ENABLE);
+ } else {
+ imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x1);
+ imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
+ IMX334_TP_CLK_DIS_VAL);
+ imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
+ IMX334_TP_DISABLE);
+ }
+ ret = 0;
+ break;
default:
dev_err(imx334->dev, "Invalid control %d", ctrl->id);
ret = -EINVAL;
@@ -1001,11 +1049,6 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&imx334->mutex);
- if (imx334->streaming == enable) {
- mutex_unlock(&imx334->mutex);
- return 0;
- }
-
if (enable) {
ret = pm_runtime_resume_and_get(imx334->dev);
if (ret < 0)
@@ -1019,8 +1062,6 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(imx334->dev);
}
- imx334->streaming = enable;
-
mutex_unlock(&imx334->mutex);
return 0;
@@ -1222,7 +1263,7 @@ static int imx334_init_controls(struct imx334 *imx334)
u32 lpfr;
int ret;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 6);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 7);
if (ret)
return ret;
@@ -1282,6 +1323,11 @@ static int imx334_init_controls(struct imx334 *imx334)
if (imx334->hblank_ctrl)
imx334->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx334_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx334_test_pattern_menu) - 1,
+ 0, 0, imx334_test_pattern_menu);
+
if (ctrl_hdlr->error) {
dev_err(imx334->dev, "control init failed: %d",
ctrl_hdlr->error);
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 482a0b7f040a..ec729126274b 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -119,7 +119,6 @@ struct imx335_mode {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
*/
struct imx335 {
struct device *dev;
@@ -140,7 +139,6 @@ struct imx335 {
u32 vblank;
const struct imx335_mode *cur_mode;
struct mutex mutex;
- bool streaming;
};
static const s64 link_freq[] = {
@@ -705,11 +703,6 @@ static int imx335_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&imx335->mutex);
- if (imx335->streaming == enable) {
- mutex_unlock(&imx335->mutex);
- return 0;
- }
-
if (enable) {
ret = pm_runtime_resume_and_get(imx335->dev);
if (ret)
@@ -723,8 +716,6 @@ static int imx335_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(imx335->dev);
}
- imx335->streaming = enable;
-
mutex_unlock(&imx335->mutex);
return 0;
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 9c79ae8dc842..9c58c1a80cba 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -123,9 +123,6 @@ struct imx355 {
* Protect access to sensor v4l2 controls.
*/
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
static const struct imx355_reg imx355_global_regs[] = {
@@ -1436,10 +1433,6 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&imx355->mutex);
- if (imx355->streaming == enable) {
- mutex_unlock(&imx355->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -1458,8 +1451,6 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- imx355->streaming = enable;
-
/* vflip and hflip cannot change during streaming */
__v4l2_ctrl_grab(imx355->vflip, enable);
__v4l2_ctrl_grab(imx355->hflip, enable);
@@ -1476,37 +1467,6 @@ err_unlock:
return ret;
}
-static int __maybe_unused imx355_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx355 *imx355 = to_imx355(sd);
-
- if (imx355->streaming)
- imx355_stop_streaming(imx355);
-
- return 0;
-}
-
-static int __maybe_unused imx355_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx355 *imx355 = to_imx355(sd);
- int ret;
-
- if (imx355->streaming) {
- ret = imx355_start_streaming(imx355);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- imx355_stop_streaming(imx355);
- imx355->streaming = 0;
- return ret;
-}
-
/* Verify chip ID */
static int imx355_identify_module(struct imx355 *imx355)
{
@@ -1829,10 +1789,6 @@ static void imx355_remove(struct i2c_client *client)
mutex_destroy(&imx355->mutex);
}
-static const struct dev_pm_ops imx355_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx355_suspend, imx355_resume)
-};
-
static const struct acpi_device_id imx355_acpi_ids[] __maybe_unused = {
{ "SONY355A" },
{ /* sentinel */ }
@@ -1842,7 +1798,6 @@ MODULE_DEVICE_TABLE(acpi, imx355_acpi_ids);
static struct i2c_driver imx355_i2c_driver = {
.driver = {
.name = "imx355",
- .pm = &imx355_pm_ops,
.acpi_match_table = ACPI_PTR(imx355_acpi_ids),
},
.probe = imx355_probe,
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index c7e862ae4040..962b3136c31e 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -127,7 +127,6 @@ static const char * const imx412_supply_names[] = {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
*/
struct imx412 {
struct device *dev;
@@ -149,7 +148,6 @@ struct imx412 {
u32 vblank;
const struct imx412_mode *cur_mode;
struct mutex mutex;
- bool streaming;
};
static const s64 link_freq[] = {
@@ -857,11 +855,6 @@ static int imx412_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&imx412->mutex);
- if (imx412->streaming == enable) {
- mutex_unlock(&imx412->mutex);
- return 0;
- }
-
if (enable) {
ret = pm_runtime_resume_and_get(imx412->dev);
if (ret)
@@ -875,8 +868,6 @@ static int imx412_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(imx412->dev);
}
- imx412->streaming = enable;
-
mutex_unlock(&imx412->mutex);
return 0;
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 3f00172df3cc..b3fa71a16839 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -353,8 +353,6 @@ struct imx415 {
const struct imx415_clk_params *clk_params;
- bool streaming;
-
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -542,8 +540,9 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
struct v4l2_subdev_state *state;
unsigned int vmax;
unsigned int flip;
+ int ret;
- if (!sensor->streaming)
+ if (!pm_runtime_get_if_in_use(sensor->dev))
return 0;
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
@@ -554,24 +553,33 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
/* clamp the exposure value to VMAX. */
vmax = format->height + sensor->vblank->cur.val;
ctrl->val = min_t(int, ctrl->val, vmax);
- return imx415_write(sensor, IMX415_SHR0, vmax - ctrl->val);
+ ret = imx415_write(sensor, IMX415_SHR0, vmax - ctrl->val);
+ break;
case V4L2_CID_ANALOGUE_GAIN:
/* analogue gain in 0.3 dB step size */
- return imx415_write(sensor, IMX415_GAIN_PCG_0, ctrl->val);
+ ret = imx415_write(sensor, IMX415_GAIN_PCG_0, ctrl->val);
+ break;
case V4L2_CID_HFLIP:
case V4L2_CID_VFLIP:
flip = (sensor->hflip->val << IMX415_HREVERSE_SHIFT) |
(sensor->vflip->val << IMX415_VREVERSE_SHIFT);
- return imx415_write(sensor, IMX415_REVERSE, flip);
+ ret = imx415_write(sensor, IMX415_REVERSE, flip);
+ break;
case V4L2_CID_TEST_PATTERN:
- return imx415_set_testpattern(sensor, ctrl->val);
+ ret = imx415_set_testpattern(sensor, ctrl->val);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ pm_runtime_put(sensor->dev);
+
+ return ret;
}
static const struct v4l2_ctrl_ops imx415_ctrl_ops = {
@@ -766,8 +774,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
- sensor->streaming = false;
-
goto unlock;
}
@@ -779,13 +785,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
if (ret)
goto err_pm;
- /*
- * Set streaming to true to ensure __v4l2_ctrl_handler_setup() will set
- * the controls. The flag is reset to false further down if an error
- * occurs.
- */
- sensor->streaming = true;
-
ret = __v4l2_ctrl_handler_setup(&sensor->ctrls);
if (ret < 0)
goto err_pm;
@@ -807,7 +806,6 @@ err_pm:
* likely has no other chance to recover.
*/
pm_runtime_put_sync(sensor->dev);
- sensor->streaming = false;
goto unlock;
}
@@ -842,15 +840,6 @@ static int imx415_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int imx415_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- struct v4l2_subdev_format *fmt)
-{
- fmt->format = *v4l2_subdev_get_pad_format(sd, state, fmt->pad);
-
- return 0;
-}
-
static int imx415_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *fmt)
@@ -913,7 +902,7 @@ static const struct v4l2_subdev_video_ops imx415_subdev_video_ops = {
static const struct v4l2_subdev_pad_ops imx415_subdev_pad_ops = {
.enum_mbus_code = imx415_enum_mbus_code,
.enum_frame_size = imx415_enum_frame_size,
- .get_fmt = imx415_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx415_set_format,
.get_selection = imx415_get_selection,
.init_cfg = imx415_init_cfg,
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index be84ff1e2b17..fc1cf196ef01 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
i2c_mux_mask |= BIT(id);
}
- of_node_put(node);
of_node_put(i2c_mux);
/* Parse the endpoints */
@@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
priv->source_mask |= BIT(ep.port);
priv->nsources++;
}
- of_node_put(node);
of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
switch (priv->bus_width) {
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index bec76801487a..0ed8561edfee 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -561,7 +561,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
const char *p;
- char prefix[V4L2_SUBDEV_NAME_SIZE + 20];
+ char prefix[sizeof(sd->name) + 20];
if (state->opmode == OPMODE_AUTOSELECT)
msp_detect_stereo(client);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ce9568e8391c..79192cf79d28 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -93,7 +93,6 @@ struct mt9m001 {
struct v4l2_ctrl *autoexposure;
struct v4l2_ctrl *exposure;
};
- bool streaming;
struct mutex mutex;
struct v4l2_rect rect; /* Sensor window */
struct clk *clk;
@@ -213,9 +212,6 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&mt9m001->mutex);
- if (mt9m001->streaming == enable)
- goto done;
-
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0)
@@ -239,8 +235,6 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- mt9m001->streaming = enable;
-done:
mutex_unlock(&mt9m001->mutex);
return 0;
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index df8d9c9e6a96..1f44b72e8a70 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -244,9 +244,7 @@ struct mt9m111 {
bool is_streaming;
/* user point of view - 0: falling 1: rising edge */
unsigned int pclk_sample:1;
-#ifdef CONFIG_MEDIA_CONTROLLER
struct media_pad pad;
-#endif
};
static const struct mt9m111_mode_info mt9m111_mode_data[MT9M111_NUM_MODES] = {
@@ -527,13 +525,9 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mf = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format = *mf;
return 0;
-#else
- return -EINVAL;
-#endif
}
mf->width = mt9m111->width;
@@ -1120,7 +1114,6 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
static int mt9m111_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *format =
v4l2_subdev_get_try_format(sd, sd_state, 0);
@@ -1132,7 +1125,7 @@ static int mt9m111_init_cfg(struct v4l2_subdev *sd,
format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
format->quantization = V4L2_QUANTIZATION_DEFAULT;
format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-#endif
+
return 0;
}
@@ -1315,13 +1308,11 @@ static int mt9m111_probe(struct i2c_client *client)
return ret;
}
-#ifdef CONFIG_MEDIA_CONTROLLER
mt9m111->pad.flags = MEDIA_PAD_FL_SOURCE;
mt9m111->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&mt9m111->subdev.entity, 1, &mt9m111->pad);
if (ret < 0)
goto out_hdlfree;
-#endif
mt9m111->current_mode = &mt9m111_mode_data[MT9M111_MODE_SXGA_15FPS];
mt9m111->frame_interval.numerator = 1;
@@ -1350,10 +1341,8 @@ static int mt9m111_probe(struct i2c_client *client)
return 0;
out_entityclean:
-#ifdef CONFIG_MEDIA_CONTROLLER
media_entity_cleanup(&mt9m111->subdev.entity);
out_hdlfree:
-#endif
v4l2_ctrl_handler_free(&mt9m111->hdl);
return ret;
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
new file mode 100644
index 000000000000..ac19078ceda3
--- /dev/null
+++ b/drivers/media/i2c/mt9m114.c
@@ -0,0 +1,2481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * mt9m114.c onsemi MT9M114 sensor driver
+ *
+ * Copyright (c) 2020-2023 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * Almost complete rewrite of work by Scott Jiang <Scott.Jiang.Linux@gmail.com>
+ * itself based on work from Andrew Chew <achew@nvidia.com>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+/* Sysctl registers */
+#define MT9M114_CHIP_ID CCI_REG16(0x0000)
+#define MT9M114_COMMAND_REGISTER CCI_REG16(0x0080)
+#define MT9M114_COMMAND_REGISTER_APPLY_PATCH BIT(0)
+#define MT9M114_COMMAND_REGISTER_SET_STATE BIT(1)
+#define MT9M114_COMMAND_REGISTER_REFRESH BIT(2)
+#define MT9M114_COMMAND_REGISTER_WAIT_FOR_EVENT BIT(3)
+#define MT9M114_COMMAND_REGISTER_OK BIT(15)
+#define MT9M114_RESET_AND_MISC_CONTROL CCI_REG16(0x001a)
+#define MT9M114_RESET_SOC BIT(0)
+#define MT9M114_PAD_SLEW CCI_REG16(0x001e)
+#define MT9M114_PAD_CONTROL CCI_REG16(0x0032)
+
+/* XDMA registers */
+#define MT9M114_ACCESS_CTL_STAT CCI_REG16(0x0982)
+#define MT9M114_PHYSICAL_ADDRESS_ACCESS CCI_REG16(0x098a)
+#define MT9M114_LOGICAL_ADDRESS_ACCESS CCI_REG16(0x098e)
+
+/* Sensor Core registers */
+#define MT9M114_COARSE_INTEGRATION_TIME CCI_REG16(0x3012)
+#define MT9M114_FINE_INTEGRATION_TIME CCI_REG16(0x3014)
+#define MT9M114_RESET_REGISTER CCI_REG16(0x301a)
+#define MT9M114_RESET_REGISTER_LOCK_REG BIT(3)
+#define MT9M114_RESET_REGISTER_MASK_BAD BIT(9)
+#define MT9M114_FLASH CCI_REG16(0x3046)
+#define MT9M114_GREEN1_GAIN CCI_REG16(0x3056)
+#define MT9M114_BLUE_GAIN CCI_REG16(0x3058)
+#define MT9M114_RED_GAIN CCI_REG16(0x305a)
+#define MT9M114_GREEN2_GAIN CCI_REG16(0x305c)
+#define MT9M114_GLOBAL_GAIN CCI_REG16(0x305e)
+#define MT9M114_GAIN_DIGITAL_GAIN(n) ((n) << 12)
+#define MT9M114_GAIN_DIGITAL_GAIN_MASK (0xf << 12)
+#define MT9M114_GAIN_ANALOG_GAIN(n) ((n) << 0)
+#define MT9M114_GAIN_ANALOG_GAIN_MASK (0xff << 0)
+#define MT9M114_CUSTOMER_REV CCI_REG16(0x31fe)
+
+/* Monitor registers */
+#define MT9M114_MON_MAJOR_VERSION CCI_REG16(0x8000)
+#define MT9M114_MON_MINOR_VERSION CCI_REG16(0x8002)
+#define MT9M114_MON_RELEASE_VERSION CCI_REG16(0x8004)
+
+/* Auto-Exposure Track registers */
+#define MT9M114_AE_TRACK_ALGO CCI_REG16(0xa804)
+#define MT9M114_AE_TRACK_EXEC_AUTOMATIC_EXPOSURE BIT(0)
+#define MT9M114_AE_TRACK_AE_TRACKING_DAMPENING_SPEED CCI_REG8(0xa80a)
+
+/* Color Correction Matrix registers */
+#define MT9M114_CCM_ALGO CCI_REG16(0xb404)
+#define MT9M114_CCM_EXEC_CALC_CCM_MATRIX BIT(4)
+#define MT9M114_CCM_DELTA_GAIN CCI_REG8(0xb42a)
+
+/* Camera Control registers */
+#define MT9M114_CAM_SENSOR_CFG_Y_ADDR_START CCI_REG16(0xc800)
+#define MT9M114_CAM_SENSOR_CFG_X_ADDR_START CCI_REG16(0xc802)
+#define MT9M114_CAM_SENSOR_CFG_Y_ADDR_END CCI_REG16(0xc804)
+#define MT9M114_CAM_SENSOR_CFG_X_ADDR_END CCI_REG16(0xc806)
+#define MT9M114_CAM_SENSOR_CFG_PIXCLK CCI_REG32(0xc808)
+#define MT9M114_CAM_SENSOR_CFG_ROW_SPEED CCI_REG16(0xc80c)
+#define MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MIN CCI_REG16(0xc80e)
+#define MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX CCI_REG16(0xc810)
+#define MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES CCI_REG16(0xc812)
+#define MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES_MAX 65535
+#define MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK CCI_REG16(0xc814)
+#define MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK_MAX 8191
+#define MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION CCI_REG16(0xc816)
+#define MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW CCI_REG16(0xc818)
+#define MT9M114_CAM_SENSOR_CFG_REG_0_DATA CCI_REG16(0xc826)
+#define MT9M114_CAM_SENSOR_CONTROL_READ_MODE CCI_REG16(0xc834)
+#define MT9M114_CAM_SENSOR_CONTROL_HORZ_MIRROR_EN BIT(0)
+#define MT9M114_CAM_SENSOR_CONTROL_VERT_FLIP_EN BIT(1)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_NORMAL (0 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SKIPPING (1 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_AVERAGE (2 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SUMMING (3 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_MASK (3 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_NORMAL (0 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SKIPPING (1 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SUMMING (3 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_MASK (3 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN CCI_REG16(0xc836)
+#define MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME CCI_REG16(0xc83c)
+#define MT9M114_CAM_SENSOR_CONTROL_FINE_INTEGRATION_TIME CCI_REG16(0xc83e)
+#define MT9M114_CAM_MODE_SELECT CCI_REG8(0xc84c)
+#define MT9M114_CAM_MODE_SELECT_NORMAL (0 << 0)
+#define MT9M114_CAM_MODE_SELECT_LENS_CALIBRATION (1 << 0)
+#define MT9M114_CAM_MODE_SELECT_TEST_PATTERN (2 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT CCI_REG8(0xc84d)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID (1 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID_BARS (4 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_RANDOM (5 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_FADING_BARS (8 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_10B (10 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_8B (11 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_RED CCI_REG16(0xc84e)
+#define MT9M114_CAM_MODE_TEST_PATTERN_GREEN CCI_REG16(0xc850)
+#define MT9M114_CAM_MODE_TEST_PATTERN_BLUE CCI_REG16(0xc852)
+#define MT9M114_CAM_CROP_WINDOW_XOFFSET CCI_REG16(0xc854)
+#define MT9M114_CAM_CROP_WINDOW_YOFFSET CCI_REG16(0xc856)
+#define MT9M114_CAM_CROP_WINDOW_WIDTH CCI_REG16(0xc858)
+#define MT9M114_CAM_CROP_WINDOW_HEIGHT CCI_REG16(0xc85a)
+#define MT9M114_CAM_CROP_CROPMODE CCI_REG8(0xc85c)
+#define MT9M114_CAM_CROP_MODE_AE_AUTO_CROP_EN BIT(0)
+#define MT9M114_CAM_CROP_MODE_AWB_AUTO_CROP_EN BIT(1)
+#define MT9M114_CAM_OUTPUT_WIDTH CCI_REG16(0xc868)
+#define MT9M114_CAM_OUTPUT_HEIGHT CCI_REG16(0xc86a)
+#define MT9M114_CAM_OUTPUT_FORMAT CCI_REG16(0xc86c)
+#define MT9M114_CAM_OUTPUT_FORMAT_SWAP_RED_BLUE BIT(0)
+#define MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES BIT(1)
+#define MT9M114_CAM_OUTPUT_FORMAT_MONO_ENABLE BIT(2)
+#define MT9M114_CAM_OUTPUT_FORMAT_BT656_ENABLE BIT(3)
+#define MT9M114_CAM_OUTPUT_FORMAT_BT656_CROP_SCALE_DISABLE BIT(4)
+#define MT9M114_CAM_OUTPUT_FORMAT_FVLV_DISABLE BIT(5)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV (0 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB (1 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER (2 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_NONE (3 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_MASK (3 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_RAWR10 (0 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PRELSC_8_2 (1 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_POSTLSC_8_2 (2 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PROCESSED8 (3 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_MASK (3 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB (0 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_555RGB (1 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_444xRGB (2 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_444RGBx (3 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_MASK (3 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV CCI_REG16(0xc86e)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_CLIP BIT(5)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_AUV_OFFSET BIT(4)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SELECT_601 BIT(3)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_NORMALISE BIT(2)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_EVEN_UV (0 << 0)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_ODD_UV (1 << 0)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_EVENU_ODDV (2 << 0)
+#define MT9M114_CAM_OUTPUT_Y_OFFSET CCI_REG8(0xc870)
+#define MT9M114_CAM_AET_AEMODE CCI_REG8(0xc878)
+#define MT9M114_CAM_AET_EXEC_SET_INDOOR BIT(0)
+#define MT9M114_CAM_AET_DISCRETE_FRAMERATE BIT(1)
+#define MT9M114_CAM_AET_ADAPTATIVE_TARGET_LUMA BIT(2)
+#define MT9M114_CAM_AET_ADAPTATIVE_SKIP_FRAMES BIT(3)
+#define MT9M114_CAM_AET_SKIP_FRAMES CCI_REG8(0xc879)
+#define MT9M114_CAM_AET_TARGET_AVERAGE_LUMA CCI_REG8(0xc87a)
+#define MT9M114_CAM_AET_TARGET_AVERAGE_LUMA_DARK CCI_REG8(0xc87b)
+#define MT9M114_CAM_AET_BLACK_CLIPPING_TARGET CCI_REG16(0xc87c)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_INT_TIME_PCLK CCI_REG16(0xc87e)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_DGAIN CCI_REG16(0xc880)
+#define MT9M114_CAM_AET_AE_MAX_VIRT_DGAIN CCI_REG16(0xc882)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_AGAIN CCI_REG16(0xc884)
+#define MT9M114_CAM_AET_AE_MAX_VIRT_AGAIN CCI_REG16(0xc886)
+#define MT9M114_CAM_AET_AE_VIRT_GAIN_TH_EG CCI_REG16(0xc888)
+#define MT9M114_CAM_AET_AE_EG_GATE_PERCENTAGE CCI_REG8(0xc88a)
+#define MT9M114_CAM_AET_FLICKER_FREQ_HZ CCI_REG8(0xc88b)
+#define MT9M114_CAM_AET_MAX_FRAME_RATE CCI_REG16(0xc88c)
+#define MT9M114_CAM_AET_MIN_FRAME_RATE CCI_REG16(0xc88e)
+#define MT9M114_CAM_AET_TARGET_GAIN CCI_REG16(0xc890)
+#define MT9M114_CAM_AWB_CCM_L(n) CCI_REG16(0xc892 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_M(n) CCI_REG16(0xc8a4 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_R(n) CCI_REG16(0xc8b6 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_L_RG_GAIN CCI_REG16(0xc8c8)
+#define MT9M114_CAM_AWB_CCM_L_BG_GAIN CCI_REG16(0xc8ca)
+#define MT9M114_CAM_AWB_CCM_M_RG_GAIN CCI_REG16(0xc8cc)
+#define MT9M114_CAM_AWB_CCM_M_BG_GAIN CCI_REG16(0xc8ce)
+#define MT9M114_CAM_AWB_CCM_R_RG_GAIN CCI_REG16(0xc8d0)
+#define MT9M114_CAM_AWB_CCM_R_BG_GAIN CCI_REG16(0xc8d2)
+#define MT9M114_CAM_AWB_CCM_L_CTEMP CCI_REG16(0xc8d4)
+#define MT9M114_CAM_AWB_CCM_M_CTEMP CCI_REG16(0xc8d6)
+#define MT9M114_CAM_AWB_CCM_R_CTEMP CCI_REG16(0xc8d8)
+#define MT9M114_CAM_AWB_AWB_XSCALE CCI_REG8(0xc8f2)
+#define MT9M114_CAM_AWB_AWB_YSCALE CCI_REG8(0xc8f3)
+#define MT9M114_CAM_AWB_AWB_WEIGHTS(n) CCI_REG16(0xc8f4 + (n) * 2)
+#define MT9M114_CAM_AWB_AWB_XSHIFT_PRE_ADJ CCI_REG16(0xc904)
+#define MT9M114_CAM_AWB_AWB_YSHIFT_PRE_ADJ CCI_REG16(0xc906)
+#define MT9M114_CAM_AWB_AWBMODE CCI_REG8(0xc909)
+#define MT9M114_CAM_AWB_MODE_AUTO BIT(1)
+#define MT9M114_CAM_AWB_MODE_EXCLUSIVE_AE BIT(0)
+#define MT9M114_CAM_AWB_K_R_L CCI_REG8(0xc90c)
+#define MT9M114_CAM_AWB_K_G_L CCI_REG8(0xc90d)
+#define MT9M114_CAM_AWB_K_B_L CCI_REG8(0xc90e)
+#define MT9M114_CAM_AWB_K_R_R CCI_REG8(0xc90f)
+#define MT9M114_CAM_AWB_K_G_R CCI_REG8(0xc910)
+#define MT9M114_CAM_AWB_K_B_R CCI_REG8(0xc911)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XSTART CCI_REG16(0xc914)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YSTART CCI_REG16(0xc916)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND CCI_REG16(0xc918)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND CCI_REG16(0xc91a)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XSTART CCI_REG16(0xc91c)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YSTART CCI_REG16(0xc91e)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND CCI_REG16(0xc920)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND CCI_REG16(0xc922)
+#define MT9M114_CAM_LL_LLMODE CCI_REG16(0xc924)
+#define MT9M114_CAM_LL_START_BRIGHTNESS CCI_REG16(0xc926)
+#define MT9M114_CAM_LL_STOP_BRIGHTNESS CCI_REG16(0xc928)
+#define MT9M114_CAM_LL_START_SATURATION CCI_REG8(0xc92a)
+#define MT9M114_CAM_LL_END_SATURATION CCI_REG8(0xc92b)
+#define MT9M114_CAM_LL_START_DESATURATION CCI_REG8(0xc92c)
+#define MT9M114_CAM_LL_END_DESATURATION CCI_REG8(0xc92d)
+#define MT9M114_CAM_LL_START_DEMOSAICING CCI_REG8(0xc92e)
+#define MT9M114_CAM_LL_START_AP_GAIN CCI_REG8(0xc92f)
+#define MT9M114_CAM_LL_START_AP_THRESH CCI_REG8(0xc930)
+#define MT9M114_CAM_LL_STOP_DEMOSAICING CCI_REG8(0xc931)
+#define MT9M114_CAM_LL_STOP_AP_GAIN CCI_REG8(0xc932)
+#define MT9M114_CAM_LL_STOP_AP_THRESH CCI_REG8(0xc933)
+#define MT9M114_CAM_LL_START_NR_RED CCI_REG8(0xc934)
+#define MT9M114_CAM_LL_START_NR_GREEN CCI_REG8(0xc935)
+#define MT9M114_CAM_LL_START_NR_BLUE CCI_REG8(0xc936)
+#define MT9M114_CAM_LL_START_NR_THRESH CCI_REG8(0xc937)
+#define MT9M114_CAM_LL_STOP_NR_RED CCI_REG8(0xc938)
+#define MT9M114_CAM_LL_STOP_NR_GREEN CCI_REG8(0xc939)
+#define MT9M114_CAM_LL_STOP_NR_BLUE CCI_REG8(0xc93a)
+#define MT9M114_CAM_LL_STOP_NR_THRESH CCI_REG8(0xc93b)
+#define MT9M114_CAM_LL_START_CONTRAST_BM CCI_REG16(0xc93c)
+#define MT9M114_CAM_LL_STOP_CONTRAST_BM CCI_REG16(0xc93e)
+#define MT9M114_CAM_LL_GAMMA CCI_REG16(0xc940)
+#define MT9M114_CAM_LL_START_CONTRAST_GRADIENT CCI_REG8(0xc942)
+#define MT9M114_CAM_LL_STOP_CONTRAST_GRADIENT CCI_REG8(0xc943)
+#define MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE CCI_REG8(0xc944)
+#define MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE CCI_REG8(0xc945)
+#define MT9M114_CAM_LL_START_GAIN_METRIC CCI_REG16(0xc946)
+#define MT9M114_CAM_LL_STOP_GAIN_METRIC CCI_REG16(0xc948)
+#define MT9M114_CAM_LL_START_FADE_TO_BLACK_LUMA CCI_REG16(0xc94a)
+#define MT9M114_CAM_LL_STOP_FADE_TO_BLACK_LUMA CCI_REG16(0xc94c)
+#define MT9M114_CAM_LL_CLUSTER_DC_TH_BM CCI_REG16(0xc94e)
+#define MT9M114_CAM_LL_CLUSTER_DC_GATE_PERCENTAGE CCI_REG8(0xc950)
+#define MT9M114_CAM_LL_SUMMING_SENSITIVITY_FACTOR CCI_REG8(0xc951)
+#define MT9M114_CAM_LL_START_TARGET_LUMA_BM CCI_REG16(0xc952)
+#define MT9M114_CAM_LL_STOP_TARGET_LUMA_BM CCI_REG16(0xc954)
+#define MT9M114_CAM_PGA_PGA_CONTROL CCI_REG16(0xc95e)
+#define MT9M114_CAM_SYSCTL_PLL_ENABLE CCI_REG8(0xc97e)
+#define MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE BIT(0)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N CCI_REG16(0xc980)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(m, n) (((n) << 8) | (m))
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_P CCI_REG16(0xc982)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(p) ((p) << 8)
+#define MT9M114_CAM_PORT_OUTPUT_CONTROL CCI_REG16(0xc984)
+#define MT9M114_CAM_PORT_PORT_SELECT_PARALLEL (0 << 0)
+#define MT9M114_CAM_PORT_PORT_SELECT_MIPI (1 << 0)
+#define MT9M114_CAM_PORT_CLOCK_SLOWDOWN BIT(3)
+#define MT9M114_CAM_PORT_TRUNCATE_RAW_BAYER BIT(4)
+#define MT9M114_CAM_PORT_PIXCLK_GATE BIT(5)
+#define MT9M114_CAM_PORT_CONT_MIPI_CLK BIT(6)
+#define MT9M114_CAM_PORT_CHAN_NUM(vc) ((vc) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_ZERO CCI_REG16(0xc988)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_ZERO_VALUE(n) ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_EXIT_TRAIL CCI_REG16(0xc98a)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_EXIT_VALUE(n) ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_TRAIL_VALUE(n) ((n) << 0)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_POST_PRE CCI_REG16(0xc98c)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_POST_VALUE(n) ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_PRE_VALUE(n) ((n) << 0)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_ZERO CCI_REG16(0xc98e)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_VALUE(n) ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_ZERO_VALUE(n) ((n) << 0)
+
+/* System Manager registers */
+#define MT9M114_SYSMGR_NEXT_STATE CCI_REG8(0xdc00)
+#define MT9M114_SYSMGR_CURRENT_STATE CCI_REG8(0xdc01)
+#define MT9M114_SYSMGR_CMD_STATUS CCI_REG8(0xdc02)
+
+/* Patch Loader registers */
+#define MT9M114_PATCHLDR_LOADER_ADDRESS CCI_REG16(0xe000)
+#define MT9M114_PATCHLDR_PATCH_ID CCI_REG16(0xe002)
+#define MT9M114_PATCHLDR_FIRMWARE_ID CCI_REG32(0xe004)
+#define MT9M114_PATCHLDR_APPLY_STATUS CCI_REG8(0xe008)
+#define MT9M114_PATCHLDR_NUM_PATCHES CCI_REG8(0xe009)
+#define MT9M114_PATCHLDR_PATCH_ID_0 CCI_REG16(0xe00a)
+#define MT9M114_PATCHLDR_PATCH_ID_1 CCI_REG16(0xe00c)
+#define MT9M114_PATCHLDR_PATCH_ID_2 CCI_REG16(0xe00e)
+#define MT9M114_PATCHLDR_PATCH_ID_3 CCI_REG16(0xe010)
+#define MT9M114_PATCHLDR_PATCH_ID_4 CCI_REG16(0xe012)
+#define MT9M114_PATCHLDR_PATCH_ID_5 CCI_REG16(0xe014)
+#define MT9M114_PATCHLDR_PATCH_ID_6 CCI_REG16(0xe016)
+#define MT9M114_PATCHLDR_PATCH_ID_7 CCI_REG16(0xe018)
+
+/* SYS_STATE values (for SYSMGR_NEXT_STATE and SYSMGR_CURRENT_STATE) */
+#define MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE 0x28
+#define MT9M114_SYS_STATE_STREAMING 0x31
+#define MT9M114_SYS_STATE_START_STREAMING 0x34
+#define MT9M114_SYS_STATE_ENTER_SUSPEND 0x40
+#define MT9M114_SYS_STATE_SUSPENDED 0x41
+#define MT9M114_SYS_STATE_ENTER_STANDBY 0x50
+#define MT9M114_SYS_STATE_STANDBY 0x52
+#define MT9M114_SYS_STATE_LEAVE_STANDBY 0x54
+
+/* Result status of last SET_STATE comamnd */
+#define MT9M114_SET_STATE_RESULT_ENOERR 0x00
+#define MT9M114_SET_STATE_RESULT_EINVAL 0x0c
+#define MT9M114_SET_STATE_RESULT_ENOSPC 0x0d
+
+/*
+ * The minimum amount of horizontal and vertical blanking is undocumented. The
+ * minimum values that have been seen in register lists are 303 and 38, use
+ * them.
+ *
+ * Set the default to achieve 1280x960 at 30fps.
+ */
+#define MT9M114_MIN_HBLANK 303
+#define MT9M114_MIN_VBLANK 38
+#define MT9M114_DEF_HBLANK 323
+#define MT9M114_DEF_VBLANK 39
+
+#define MT9M114_DEF_FRAME_RATE 30
+#define MT9M114_MAX_FRAME_RATE 120
+
+#define MT9M114_PIXEL_ARRAY_WIDTH 1296U
+#define MT9M114_PIXEL_ARRAY_HEIGHT 976U
+
+/*
+ * These values are not well documented and are semi-arbitrary. The pixel array
+ * minimum output size is 8 pixels larger than the minimum scaler cropped input
+ * width to account for the demosaicing.
+ */
+#define MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH (32U + 8U)
+#define MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT (32U + 8U)
+#define MT9M114_SCALER_CROPPED_INPUT_WIDTH 32U
+#define MT9M114_SCALER_CROPPED_INPUT_HEIGHT 32U
+
+/* Indices into the mt9m114.ifp.tpg array. */
+#define MT9M114_TPG_PATTERN 0
+#define MT9M114_TPG_RED 1
+#define MT9M114_TPG_GREEN 2
+#define MT9M114_TPG_BLUE 3
+
+/* -----------------------------------------------------------------------------
+ * Data Structures
+ */
+
+enum mt9m114_format_flag {
+ MT9M114_FMT_FLAG_PARALLEL = BIT(0),
+ MT9M114_FMT_FLAG_CSI2 = BIT(1),
+};
+
+struct mt9m114_format_info {
+ u32 code;
+ u32 output_format;
+ u32 flags;
+};
+
+struct mt9m114 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+
+ struct clk *clk;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data supplies[3];
+ struct v4l2_fwnode_endpoint bus_cfg;
+
+ struct {
+ unsigned int m;
+ unsigned int n;
+ unsigned int p;
+ } pll;
+
+ unsigned int pixrate;
+ bool streaming;
+
+ /* Pixel Array */
+ struct {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ } pa;
+
+ /* Image Flow Processor */
+ struct {
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+
+ struct v4l2_ctrl_handler hdl;
+ unsigned int frame_rate;
+
+ struct v4l2_ctrl *tpg[4];
+ } ifp;
+};
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+static const struct mt9m114_format_info mt9m114_format_infos[] = {
+ {
+ /*
+ * The first two entries are used as defaults, for parallel and
+ * CSI-2 buses respectively. Keep them in that order.
+ */
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .flags = MT9M114_FMT_FLAG_PARALLEL,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .flags = MT9M114_FMT_FLAG_CSI2,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = MT9M114_FMT_FLAG_PARALLEL,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV
+ | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .flags = MT9M114_FMT_FLAG_CSI2,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV
+ | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .flags = MT9M114_FMT_FLAG_PARALLEL,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+ | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB
+ | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .flags = MT9M114_FMT_FLAG_PARALLEL,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+ | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .flags = MT9M114_FMT_FLAG_CSI2,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+ | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PROCESSED8
+ | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER,
+ .flags = MT9M114_FMT_FLAG_PARALLEL | MT9M114_FMT_FLAG_CSI2,
+ }, {
+ /* Keep the format compatible with the IFP sink pad last. */
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .output_format = MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_RAWR10
+ | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER,
+ .flags = MT9M114_FMT_FLAG_PARALLEL | MT9M114_FMT_FLAG_CSI2,
+ }
+};
+
+static const struct mt9m114_format_info *
+mt9m114_default_format_info(struct mt9m114 *sensor)
+{
+ if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+ return &mt9m114_format_infos[1];
+ else
+ return &mt9m114_format_infos[0];
+}
+
+static const struct mt9m114_format_info *
+mt9m114_format_info(struct mt9m114 *sensor, unsigned int pad, u32 code)
+{
+ const unsigned int num_formats = ARRAY_SIZE(mt9m114_format_infos);
+ unsigned int flag;
+ unsigned int i;
+
+ switch (pad) {
+ case 0:
+ return &mt9m114_format_infos[num_formats - 1];
+
+ case 1:
+ if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+ flag = MT9M114_FMT_FLAG_CSI2;
+ else
+ flag = MT9M114_FMT_FLAG_PARALLEL;
+
+ for (i = 0; i < num_formats; ++i) {
+ const struct mt9m114_format_info *info =
+ &mt9m114_format_infos[i];
+
+ if (info->code == code && info->flags & flag)
+ return info;
+ }
+
+ return mt9m114_default_format_info(sensor);
+
+ default:
+ return NULL;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static const struct cci_reg_sequence mt9m114_init[] = {
+ { MT9M114_RESET_REGISTER, MT9M114_RESET_REGISTER_MASK_BAD |
+ MT9M114_RESET_REGISTER_LOCK_REG |
+ 0x0010 },
+
+ /* Sensor optimization */
+ { CCI_REG16(0x316a), 0x8270 },
+ { CCI_REG16(0x316c), 0x8270 },
+ { CCI_REG16(0x3ed0), 0x2305 },
+ { CCI_REG16(0x3ed2), 0x77cf },
+ { CCI_REG16(0x316e), 0x8202 },
+ { CCI_REG16(0x3180), 0x87ff },
+ { CCI_REG16(0x30d4), 0x6080 },
+ { CCI_REG16(0xa802), 0x0008 },
+
+ { CCI_REG16(0x3e14), 0xff39 },
+
+ /* APGA */
+ { MT9M114_CAM_PGA_PGA_CONTROL, 0x0000 },
+
+ /* Automatic White balance */
+ { MT9M114_CAM_AWB_CCM_L(0), 0x0267 },
+ { MT9M114_CAM_AWB_CCM_L(1), 0xff1a },
+ { MT9M114_CAM_AWB_CCM_L(2), 0xffb3 },
+ { MT9M114_CAM_AWB_CCM_L(3), 0xff80 },
+ { MT9M114_CAM_AWB_CCM_L(4), 0x0166 },
+ { MT9M114_CAM_AWB_CCM_L(5), 0x0003 },
+ { MT9M114_CAM_AWB_CCM_L(6), 0xff9a },
+ { MT9M114_CAM_AWB_CCM_L(7), 0xfeb4 },
+ { MT9M114_CAM_AWB_CCM_L(8), 0x024d },
+ { MT9M114_CAM_AWB_CCM_M(0), 0x01bf },
+ { MT9M114_CAM_AWB_CCM_M(1), 0xff01 },
+ { MT9M114_CAM_AWB_CCM_M(2), 0xfff3 },
+ { MT9M114_CAM_AWB_CCM_M(3), 0xff75 },
+ { MT9M114_CAM_AWB_CCM_M(4), 0x0198 },
+ { MT9M114_CAM_AWB_CCM_M(5), 0xfffd },
+ { MT9M114_CAM_AWB_CCM_M(6), 0xff9a },
+ { MT9M114_CAM_AWB_CCM_M(7), 0xfee7 },
+ { MT9M114_CAM_AWB_CCM_M(8), 0x02a8 },
+ { MT9M114_CAM_AWB_CCM_R(0), 0x01d9 },
+ { MT9M114_CAM_AWB_CCM_R(1), 0xff26 },
+ { MT9M114_CAM_AWB_CCM_R(2), 0xfff3 },
+ { MT9M114_CAM_AWB_CCM_R(3), 0xffb3 },
+ { MT9M114_CAM_AWB_CCM_R(4), 0x0132 },
+ { MT9M114_CAM_AWB_CCM_R(5), 0xffe8 },
+ { MT9M114_CAM_AWB_CCM_R(6), 0xffda },
+ { MT9M114_CAM_AWB_CCM_R(7), 0xfecd },
+ { MT9M114_CAM_AWB_CCM_R(8), 0x02c2 },
+ { MT9M114_CAM_AWB_CCM_L_RG_GAIN, 0x0075 },
+ { MT9M114_CAM_AWB_CCM_L_BG_GAIN, 0x011c },
+ { MT9M114_CAM_AWB_CCM_M_RG_GAIN, 0x009a },
+ { MT9M114_CAM_AWB_CCM_M_BG_GAIN, 0x0105 },
+ { MT9M114_CAM_AWB_CCM_R_RG_GAIN, 0x00a4 },
+ { MT9M114_CAM_AWB_CCM_R_BG_GAIN, 0x00ac },
+ { MT9M114_CAM_AWB_CCM_L_CTEMP, 0x0a8c },
+ { MT9M114_CAM_AWB_CCM_M_CTEMP, 0x0f0a },
+ { MT9M114_CAM_AWB_CCM_R_CTEMP, 0x1964 },
+ { MT9M114_CAM_AWB_AWB_XSHIFT_PRE_ADJ, 51 },
+ { MT9M114_CAM_AWB_AWB_YSHIFT_PRE_ADJ, 60 },
+ { MT9M114_CAM_AWB_AWB_XSCALE, 3 },
+ { MT9M114_CAM_AWB_AWB_YSCALE, 2 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(0), 0x0000 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(1), 0x0000 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(2), 0x0000 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(3), 0xe724 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(4), 0x1583 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(5), 0x2045 },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(6), 0x03ff },
+ { MT9M114_CAM_AWB_AWB_WEIGHTS(7), 0x007c },
+ { MT9M114_CAM_AWB_K_R_L, 0x80 },
+ { MT9M114_CAM_AWB_K_G_L, 0x80 },
+ { MT9M114_CAM_AWB_K_B_L, 0x80 },
+ { MT9M114_CAM_AWB_K_R_R, 0x88 },
+ { MT9M114_CAM_AWB_K_G_R, 0x80 },
+ { MT9M114_CAM_AWB_K_B_R, 0x80 },
+
+ /* Low-Light Image Enhancements */
+ { MT9M114_CAM_LL_START_BRIGHTNESS, 0x0020 },
+ { MT9M114_CAM_LL_STOP_BRIGHTNESS, 0x009a },
+ { MT9M114_CAM_LL_START_GAIN_METRIC, 0x0070 },
+ { MT9M114_CAM_LL_STOP_GAIN_METRIC, 0x00f3 },
+ { MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE, 0x20 },
+ { MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE, 0x9a },
+ { MT9M114_CAM_LL_START_SATURATION, 0x80 },
+ { MT9M114_CAM_LL_END_SATURATION, 0x4b },
+ { MT9M114_CAM_LL_START_DESATURATION, 0x00 },
+ { MT9M114_CAM_LL_END_DESATURATION, 0xff },
+ { MT9M114_CAM_LL_START_DEMOSAICING, 0x3c },
+ { MT9M114_CAM_LL_START_AP_GAIN, 0x02 },
+ { MT9M114_CAM_LL_START_AP_THRESH, 0x06 },
+ { MT9M114_CAM_LL_STOP_DEMOSAICING, 0x64 },
+ { MT9M114_CAM_LL_STOP_AP_GAIN, 0x01 },
+ { MT9M114_CAM_LL_STOP_AP_THRESH, 0x0c },
+ { MT9M114_CAM_LL_START_NR_RED, 0x3c },
+ { MT9M114_CAM_LL_START_NR_GREEN, 0x3c },
+ { MT9M114_CAM_LL_START_NR_BLUE, 0x3c },
+ { MT9M114_CAM_LL_START_NR_THRESH, 0x0f },
+ { MT9M114_CAM_LL_STOP_NR_RED, 0x64 },
+ { MT9M114_CAM_LL_STOP_NR_GREEN, 0x64 },
+ { MT9M114_CAM_LL_STOP_NR_BLUE, 0x64 },
+ { MT9M114_CAM_LL_STOP_NR_THRESH, 0x32 },
+ { MT9M114_CAM_LL_START_CONTRAST_BM, 0x0020 },
+ { MT9M114_CAM_LL_STOP_CONTRAST_BM, 0x009a },
+ { MT9M114_CAM_LL_GAMMA, 0x00dc },
+ { MT9M114_CAM_LL_START_CONTRAST_GRADIENT, 0x38 },
+ { MT9M114_CAM_LL_STOP_CONTRAST_GRADIENT, 0x30 },
+ { MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE, 0x50 },
+ { MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE, 0x19 },
+ { MT9M114_CAM_LL_START_FADE_TO_BLACK_LUMA, 0x0230 },
+ { MT9M114_CAM_LL_STOP_FADE_TO_BLACK_LUMA, 0x0010 },
+ { MT9M114_CAM_LL_CLUSTER_DC_TH_BM, 0x01cd },
+ { MT9M114_CAM_LL_CLUSTER_DC_GATE_PERCENTAGE, 0x05 },
+ { MT9M114_CAM_LL_SUMMING_SENSITIVITY_FACTOR, 0x40 },
+
+ /* Auto-Exposure */
+ { MT9M114_CAM_AET_TARGET_AVERAGE_LUMA_DARK, 0x1b },
+ { MT9M114_CAM_AET_AEMODE, 0x00 },
+ { MT9M114_CAM_AET_TARGET_GAIN, 0x0080 },
+ { MT9M114_CAM_AET_AE_MAX_VIRT_AGAIN, 0x0100 },
+ { MT9M114_CAM_AET_BLACK_CLIPPING_TARGET, 0x005a },
+
+ { MT9M114_CCM_DELTA_GAIN, 0x05 },
+ { MT9M114_AE_TRACK_AE_TRACKING_DAMPENING_SPEED, 0x20 },
+
+ /* Pixel array timings and integration time */
+ { MT9M114_CAM_SENSOR_CFG_ROW_SPEED, 1 },
+ { MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MIN, 219 },
+ { MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX, 1459 },
+ { MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION, 96 },
+ { MT9M114_CAM_SENSOR_CFG_REG_0_DATA, 32 },
+
+ /* Miscellaneous settings */
+ { MT9M114_PAD_SLEW, 0x0777 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+/* Wait for a command to complete. */
+static int mt9m114_poll_command(struct mt9m114 *sensor, u32 command)
+{
+ unsigned int i;
+ u64 value;
+ int ret;
+
+ for (i = 0; i < 100; ++i) {
+ ret = cci_read(sensor->regmap, MT9M114_COMMAND_REGISTER, &value,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ if (!(value & command))
+ break;
+
+ usleep_range(5000, 6000);
+ }
+
+ if (value & command) {
+ dev_err(&sensor->client->dev, "Command %u completion timeout\n",
+ command);
+ return -ETIMEDOUT;
+ }
+
+ if (!(value & MT9M114_COMMAND_REGISTER_OK)) {
+ dev_err(&sensor->client->dev, "Command %u failed\n", command);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Wait for a state to be entered. */
+static int mt9m114_poll_state(struct mt9m114 *sensor, u32 state)
+{
+ unsigned int i;
+ u64 value;
+ int ret;
+
+ for (i = 0; i < 100; ++i) {
+ ret = cci_read(sensor->regmap, MT9M114_SYSMGR_CURRENT_STATE,
+ &value, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (value == state)
+ return 0;
+
+ usleep_range(1000, 1500);
+ }
+
+ dev_err(&sensor->client->dev, "Timeout waiting for state 0x%02x\n",
+ state);
+ return -ETIMEDOUT;
+}
+
+static int mt9m114_set_state(struct mt9m114 *sensor, u8 next_state)
+{
+ int ret = 0;
+
+ /* Set the next desired state and start the state transition. */
+ cci_write(sensor->regmap, MT9M114_SYSMGR_NEXT_STATE, next_state, &ret);
+ cci_write(sensor->regmap, MT9M114_COMMAND_REGISTER,
+ MT9M114_COMMAND_REGISTER_OK |
+ MT9M114_COMMAND_REGISTER_SET_STATE, &ret);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the state transition to complete. */
+ ret = mt9m114_poll_command(sensor, MT9M114_COMMAND_REGISTER_SET_STATE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mt9m114_initialize(struct mt9m114 *sensor)
+{
+ u32 value;
+ int ret;
+
+ ret = cci_multi_reg_write(sensor->regmap, mt9m114_init,
+ ARRAY_SIZE(mt9m114_init), NULL);
+ if (ret < 0) {
+ dev_err(&sensor->client->dev,
+ "Failed to initialize the sensor\n");
+ return ret;
+ }
+
+ /* Configure the PLL. */
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_ENABLE,
+ MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N,
+ MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(sensor->pll.m,
+ sensor->pll.n),
+ &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_P,
+ MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(sensor->pll.p), &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_PIXCLK,
+ sensor->pixrate, &ret);
+
+ /* Configure the output mode. */
+ if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ value = MT9M114_CAM_PORT_PORT_SELECT_MIPI
+ | MT9M114_CAM_PORT_CHAN_NUM(0)
+ | 0x8000;
+ if (!(sensor->bus_cfg.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK))
+ value |= MT9M114_CAM_PORT_CONT_MIPI_CLK;
+ } else {
+ value = MT9M114_CAM_PORT_PORT_SELECT_PARALLEL
+ | 0x8000;
+ }
+ cci_write(sensor->regmap, MT9M114_CAM_PORT_OUTPUT_CONTROL, value, &ret);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mt9m114_configure(struct mt9m114 *sensor,
+ struct v4l2_subdev_state *pa_state,
+ struct v4l2_subdev_state *ifp_state)
+{
+ const struct v4l2_mbus_framefmt *pa_format;
+ const struct v4l2_rect *pa_crop;
+ const struct mt9m114_format_info *ifp_info;
+ const struct v4l2_mbus_framefmt *ifp_format;
+ const struct v4l2_rect *ifp_crop;
+ const struct v4l2_rect *ifp_compose;
+ unsigned int hratio, vratio;
+ u64 output_format;
+ u64 read_mode;
+ int ret = 0;
+
+ pa_format = v4l2_subdev_get_pad_format(&sensor->pa.sd, pa_state, 0);
+ pa_crop = v4l2_subdev_get_pad_crop(&sensor->pa.sd, pa_state, 0);
+
+ ifp_format = v4l2_subdev_get_pad_format(&sensor->ifp.sd, ifp_state, 1);
+ ifp_info = mt9m114_format_info(sensor, 1, ifp_format->code);
+ ifp_crop = v4l2_subdev_get_pad_crop(&sensor->ifp.sd, ifp_state, 0);
+ ifp_compose = v4l2_subdev_get_pad_compose(&sensor->ifp.sd, ifp_state, 0);
+
+ ret = cci_read(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+ &read_mode, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = cci_read(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
+ &output_format, NULL);
+ if (ret < 0)
+ return ret;
+
+ hratio = pa_crop->width / pa_format->width;
+ vratio = pa_crop->height / pa_format->height;
+
+ /*
+ * Pixel array crop and binning. The CAM_SENSOR_CFG_CPIPE_LAST_ROW
+ * register isn't clearly documented, but is always set to the number
+ * of active rows minus 4 divided by the vertical binning factor in all
+ * example sensor modes.
+ */
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_START,
+ pa_crop->left, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_START,
+ pa_crop->top, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_END,
+ pa_crop->width + pa_crop->left - 1, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_END,
+ pa_crop->height + pa_crop->top - 1, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW,
+ (pa_crop->height - 4) / vratio - 1, &ret);
+
+ read_mode &= ~(MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_MASK |
+ MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_MASK);
+
+ if (hratio > 1)
+ read_mode |= MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SUMMING;
+ if (vratio > 1)
+ read_mode |= MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SUMMING;
+
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+ read_mode, &ret);
+
+ /*
+ * Color pipeline (IFP) cropping and scaling. Subtract 4 from the left
+ * and top coordinates to compensate for the lines and columns removed
+ * by demosaicing that are taken into account in the crop rectangle but
+ * not in the hardware.
+ */
+ cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_XOFFSET,
+ ifp_crop->left - 4, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_YOFFSET,
+ ifp_crop->top - 4, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_WIDTH,
+ ifp_crop->width, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_HEIGHT,
+ ifp_crop->height, &ret);
+
+ cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_WIDTH,
+ ifp_compose->width, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_HEIGHT,
+ ifp_compose->height, &ret);
+
+ /* AWB and AE windows, use the full frame. */
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XSTART,
+ 0, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YSTART,
+ 0, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND,
+ ifp_compose->width - 1, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND,
+ ifp_compose->height - 1, &ret);
+
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XSTART,
+ 0, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YSTART,
+ 0, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND,
+ ifp_compose->width / 5 - 1, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND,
+ ifp_compose->height / 5 - 1, &ret);
+
+ cci_write(sensor->regmap, MT9M114_CAM_CROP_CROPMODE,
+ MT9M114_CAM_CROP_MODE_AWB_AUTO_CROP_EN |
+ MT9M114_CAM_CROP_MODE_AE_AUTO_CROP_EN, &ret);
+
+ /* Set the media bus code. */
+ output_format &= ~(MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_MASK |
+ MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_MASK |
+ MT9M114_CAM_OUTPUT_FORMAT_FORMAT_MASK |
+ MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES |
+ MT9M114_CAM_OUTPUT_FORMAT_SWAP_RED_BLUE);
+ output_format |= ifp_info->output_format;
+
+ cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
+ output_format, &ret);
+
+ return ret;
+}
+
+static int mt9m114_set_frame_rate(struct mt9m114 *sensor)
+{
+ u16 frame_rate = sensor->ifp.frame_rate << 8;
+ int ret = 0;
+
+ cci_write(sensor->regmap, MT9M114_CAM_AET_MIN_FRAME_RATE,
+ frame_rate, &ret);
+ cci_write(sensor->regmap, MT9M114_CAM_AET_MAX_FRAME_RATE,
+ frame_rate, &ret);
+
+ return ret;
+}
+
+static int mt9m114_start_streaming(struct mt9m114 *sensor,
+ struct v4l2_subdev_state *pa_state,
+ struct v4l2_subdev_state *ifp_state)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&sensor->client->dev);
+ if (ret)
+ return ret;
+
+ ret = mt9m114_configure(sensor, pa_state, ifp_state);
+ if (ret)
+ goto error;
+
+ ret = mt9m114_set_frame_rate(sensor);
+ if (ret)
+ goto error;
+
+ ret = __v4l2_ctrl_handler_setup(&sensor->pa.hdl);
+ if (ret)
+ goto error;
+
+ ret = __v4l2_ctrl_handler_setup(&sensor->ifp.hdl);
+ if (ret)
+ goto error;
+
+ /*
+ * The Change-Config state is transient and moves to the streaming
+ * state automatically.
+ */
+ ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ if (ret)
+ goto error;
+
+ sensor->streaming = true;
+
+ return 0;
+
+error:
+ pm_runtime_mark_last_busy(&sensor->client->dev);
+ pm_runtime_put_autosuspend(&sensor->client->dev);
+
+ return ret;
+}
+
+static int mt9m114_stop_streaming(struct mt9m114 *sensor)
+{
+ int ret;
+
+ sensor->streaming = false;
+
+ ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
+
+ pm_runtime_mark_last_busy(&sensor->client->dev);
+ pm_runtime_put_autosuspend(&sensor->client->dev);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Common Subdev Operations
+ */
+
+static const struct media_entity_operations mt9m114_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Pixel Array Control Operations
+ */
+
+static inline struct mt9m114 *pa_ctrl_to_mt9m114(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mt9m114, pa.hdl);
+}
+
+static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114 *sensor = pa_ctrl_to_mt9m114(ctrl);
+ u64 value;
+ int ret;
+
+ if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ret = cci_read(sensor->regmap,
+ MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME,
+ &value, NULL);
+ if (ret)
+ break;
+
+ ctrl->val = value;
+ break;
+
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_read(sensor->regmap,
+ MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN,
+ &value, NULL);
+ if (ret)
+ break;
+
+ ctrl->val = value;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(&sensor->client->dev);
+ pm_runtime_put_autosuspend(&sensor->client->dev);
+
+ return ret;
+}
+
+static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114 *sensor = pa_ctrl_to_mt9m114(ctrl);
+ const struct v4l2_mbus_framefmt *format;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+ u64 mask;
+
+ /* V4L2 controls values are applied only when power is up. */
+ if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+ return 0;
+
+ state = v4l2_subdev_get_locked_active_state(&sensor->pa.sd);
+ format = v4l2_subdev_get_pad_format(&sensor->pa.sd, state, 0);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK,
+ ctrl->val + format->width, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES,
+ ctrl->val + format->height, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ cci_write(sensor->regmap,
+ MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_ANALOGUE_GAIN:
+ /*
+ * The CAM_SENSOR_CONTROL_ANALOG_GAIN contains linear analog
+ * gain values that are mapped to the GLOBAL_GAIN register
+ * values by the sensor firmware.
+ */
+ cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_HFLIP:
+ mask = MT9M114_CAM_SENSOR_CONTROL_HORZ_MIRROR_EN;
+ ret = cci_update_bits(sensor->regmap,
+ MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+ mask, ctrl->val ? mask : 0, NULL);
+ break;
+
+ case V4L2_CID_VFLIP:
+ mask = MT9M114_CAM_SENSOR_CONTROL_VERT_FLIP_EN;
+ ret = cci_update_bits(sensor->regmap,
+ MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+ mask, ctrl->val ? mask : 0, NULL);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(&sensor->client->dev);
+ pm_runtime_put_autosuspend(&sensor->client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9m114_pa_ctrl_ops = {
+ .g_volatile_ctrl = mt9m114_pa_g_ctrl,
+ .s_ctrl = mt9m114_pa_s_ctrl,
+};
+
+static void mt9m114_pa_ctrl_update_exposure(struct mt9m114 *sensor, bool manual)
+{
+ /*
+ * Update the volatile flag on the manual exposure and gain controls.
+ * If the controls have switched to manual, read their current value
+ * from the hardware to ensure that control read and write operations
+ * will behave correctly
+ */
+ if (manual) {
+ mt9m114_pa_g_ctrl(sensor->pa.exposure);
+ sensor->pa.exposure->cur.val = sensor->pa.exposure->val;
+ sensor->pa.exposure->flags &= ~V4L2_CTRL_FLAG_VOLATILE;
+
+ mt9m114_pa_g_ctrl(sensor->pa.gain);
+ sensor->pa.gain->cur.val = sensor->pa.gain->val;
+ sensor->pa.gain->flags &= ~V4L2_CTRL_FLAG_VOLATILE;
+ } else {
+ sensor->pa.exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ sensor->pa.gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+}
+
+static void mt9m114_pa_ctrl_update_blanking(struct mt9m114 *sensor,
+ const struct v4l2_mbus_framefmt *format)
+{
+ unsigned int max_blank;
+
+ /* Update the blanking controls ranges based on the output size. */
+ max_blank = MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK_MAX
+ - format->width;
+ __v4l2_ctrl_modify_range(sensor->pa.hblank, MT9M114_MIN_HBLANK,
+ max_blank, 1, MT9M114_DEF_HBLANK);
+
+ max_blank = MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES_MAX
+ - format->height;
+ __v4l2_ctrl_modify_range(sensor->pa.vblank, MT9M114_MIN_VBLANK,
+ max_blank, 1, MT9M114_DEF_VBLANK);
+}
+
+/* -----------------------------------------------------------------------------
+ * Pixel Array Subdev Operations
+ */
+
+static inline struct mt9m114 *pa_to_mt9m114(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9m114, pa.sd);
+}
+
+static int mt9m114_pa_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = MT9M114_PIXEL_ARRAY_WIDTH;
+ crop->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+ format->width = MT9M114_PIXEL_ARRAY_WIDTH;
+ format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+ format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_RAW;
+ format->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ format->xfer_func = V4L2_XFER_FUNC_NONE;
+
+ return 0;
+}
+
+static int mt9m114_pa_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int mt9m114_pa_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index > 1)
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ /* Report binning capability through frame size enumeration. */
+ fse->min_width = MT9M114_PIXEL_ARRAY_WIDTH / (fse->index + 1);
+ fse->max_width = MT9M114_PIXEL_ARRAY_WIDTH / (fse->index + 1);
+ fse->min_height = MT9M114_PIXEL_ARRAY_HEIGHT / (fse->index + 1);
+ fse->max_height = MT9M114_PIXEL_ARRAY_HEIGHT / (fse->index + 1);
+
+ return 0;
+}
+
+static int mt9m114_pa_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9m114 *sensor = pa_to_mt9m114(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ unsigned int hscale;
+ unsigned int vscale;
+
+ crop = v4l2_subdev_get_pad_crop(sd, state, fmt->pad);
+ format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+
+ /* The sensor can bin horizontally and vertically. */
+ hscale = DIV_ROUND_CLOSEST(crop->width, fmt->format.width ? : 1);
+ vscale = DIV_ROUND_CLOSEST(crop->height, fmt->format.height ? : 1);
+ format->width = crop->width / clamp(hscale, 1U, 2U);
+ format->height = crop->height / clamp(vscale, 1U, 2U);
+
+ fmt->format = *format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+ return 0;
+}
+
+static int mt9m114_pa_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = MT9M114_PIXEL_ARRAY_WIDTH;
+ sel->r.height = MT9M114_PIXEL_ARRAY_HEIGHT;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mt9m114 *sensor = pa_to_mt9m114(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ crop = v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+ format = v4l2_subdev_get_pad_format(sd, state, sel->pad);
+
+ /*
+ * Clamp the crop rectangle. The vertical coordinates must be even, and
+ * the horizontal coordinates must be a multiple of 4.
+ *
+ * FIXME: The horizontal coordinates must be a multiple of 8 when
+ * binning, but binning is configured after setting the selection, so
+ * we can't know tell here if it will be used.
+ */
+ crop->left = ALIGN(sel->r.left, 4);
+ crop->top = ALIGN(sel->r.top, 2);
+ crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+ MT9M114_PIXEL_ARRAY_WIDTH - crop->left);
+ crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+ MT9M114_PIXEL_ARRAY_HEIGHT - crop->top);
+
+ sel->r = *crop;
+
+ /* Reset the format. */
+ format->width = crop->width;
+ format->height = crop->height;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
+ .init_cfg = mt9m114_pa_init_cfg,
+ .enum_mbus_code = mt9m114_pa_enum_mbus_code,
+ .enum_frame_size = mt9m114_pa_enum_framesizes,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = mt9m114_pa_set_fmt,
+ .get_selection = mt9m114_pa_get_selection,
+ .set_selection = mt9m114_pa_set_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m114_pa_ops = {
+ .pad = &mt9m114_pa_pad_ops,
+};
+
+static int mt9m114_pa_init(struct mt9m114 *sensor)
+{
+ struct v4l2_ctrl_handler *hdl = &sensor->pa.hdl;
+ struct v4l2_subdev *sd = &sensor->pa.sd;
+ struct media_pad *pads = &sensor->pa.pad;
+ const struct v4l2_mbus_framefmt *format;
+ struct v4l2_subdev_state *state;
+ unsigned int max_exposure;
+ int ret;
+
+ /* Initialize the subdev. */
+ v4l2_subdev_init(sd, &mt9m114_pa_ops);
+ v4l2_i2c_subdev_set_name(sd, sensor->client, NULL, " pixel array");
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->owner = THIS_MODULE;
+ sd->dev = &sensor->client->dev;
+ v4l2_set_subdevdata(sd, sensor->client);
+
+ /* Initialize the media entity. */
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ sd->entity.ops = &mt9m114_entity_ops;
+ pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, 1, pads);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(hdl, 7);
+
+ /* The range of the HBLANK and VBLANK controls will be updated below. */
+ sensor->pa.hblank = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_HBLANK,
+ MT9M114_DEF_HBLANK,
+ MT9M114_DEF_HBLANK, 1,
+ MT9M114_DEF_HBLANK);
+ sensor->pa.vblank = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_VBLANK,
+ MT9M114_DEF_VBLANK,
+ MT9M114_DEF_VBLANK, 1,
+ MT9M114_DEF_VBLANK);
+
+ /*
+ * The maximum coarse integration time is the frame length in lines
+ * minus two. The default is taken directly from the datasheet, but
+ * makes little sense as auto-exposure is enabled by default.
+ */
+ max_exposure = MT9M114_PIXEL_ARRAY_HEIGHT + MT9M114_MIN_VBLANK - 2;
+ sensor->pa.exposure = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1,
+ max_exposure, 1, 16);
+ if (sensor->pa.exposure)
+ sensor->pa.exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ sensor->pa.gain = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, 1,
+ 511, 1, 32);
+ if (sensor->pa.gain)
+ sensor->pa.gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ sensor->pixrate, sensor->pixrate, 1,
+ sensor->pixrate);
+
+ v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto error;
+ }
+
+ sd->state_lock = hdl->lock;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto error;
+
+ /* Update the range of the blanking controls based on the format. */
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+ v4l2_subdev_unlock_state(state);
+
+ sd->ctrl_handler = hdl;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&sensor->pa.hdl);
+ media_entity_cleanup(&sensor->pa.sd.entity);
+ return ret;
+}
+
+static void mt9m114_pa_cleanup(struct mt9m114 *sensor)
+{
+ v4l2_ctrl_handler_free(&sensor->pa.hdl);
+ media_entity_cleanup(&sensor->pa.sd.entity);
+}
+
+/* -----------------------------------------------------------------------------
+ * Image Flow Processor Control Operations
+ */
+
+static const char * const mt9m114_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "100% Color Bars",
+ "Pseudo-Random",
+ "Fade-to-Gray Color Bars",
+ "Walking Ones 10-bit",
+ "Walking Ones 8-bit",
+};
+
+/* Keep in sync with mt9m114_test_pattern_menu */
+static const unsigned int mt9m114_test_pattern_value[] = {
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID_BARS,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_RANDOM,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_FADING_BARS,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_10B,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_8B,
+};
+
+static inline struct mt9m114 *ifp_ctrl_to_mt9m114(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mt9m114, ifp.hdl);
+}
+
+static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114 *sensor = ifp_ctrl_to_mt9m114(ctrl);
+ u32 value;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_EXPOSURE_AUTO)
+ mt9m114_pa_ctrl_update_exposure(sensor,
+ ctrl->val != V4L2_EXPOSURE_AUTO);
+
+ /* V4L2 controls values are applied only when power is up. */
+ if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ /* Control both the AWB mode and the CCM algorithm. */
+ if (ctrl->val)
+ value = MT9M114_CAM_AWB_MODE_AUTO
+ | MT9M114_CAM_AWB_MODE_EXCLUSIVE_AE;
+ else
+ value = 0;
+
+ cci_write(sensor->regmap, MT9M114_CAM_AWB_AWBMODE, value, &ret);
+
+ if (ctrl->val)
+ value = MT9M114_CCM_EXEC_CALC_CCM_MATRIX | 0x22;
+ else
+ value = 0;
+
+ cci_write(sensor->regmap, MT9M114_CCM_ALGO, value, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_AUTO)
+ value = MT9M114_AE_TRACK_EXEC_AUTOMATIC_EXPOSURE
+ | 0x00fe;
+ else
+ value = 0;
+
+ cci_write(sensor->regmap, MT9M114_AE_TRACK_ALGO, value, &ret);
+ if (ret)
+ break;
+
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_RED:
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ case V4L2_CID_TEST_PATTERN_BLUE: {
+ unsigned int pattern = sensor->ifp.tpg[MT9M114_TPG_PATTERN]->val;
+
+ if (pattern) {
+ cci_write(sensor->regmap, MT9M114_CAM_MODE_SELECT,
+ MT9M114_CAM_MODE_SELECT_TEST_PATTERN, &ret);
+ cci_write(sensor->regmap,
+ MT9M114_CAM_MODE_TEST_PATTERN_SELECT,
+ mt9m114_test_pattern_value[pattern - 1], &ret);
+ cci_write(sensor->regmap,
+ MT9M114_CAM_MODE_TEST_PATTERN_RED,
+ sensor->ifp.tpg[MT9M114_TPG_RED]->val, &ret);
+ cci_write(sensor->regmap,
+ MT9M114_CAM_MODE_TEST_PATTERN_GREEN,
+ sensor->ifp.tpg[MT9M114_TPG_GREEN]->val, &ret);
+ cci_write(sensor->regmap,
+ MT9M114_CAM_MODE_TEST_PATTERN_BLUE,
+ sensor->ifp.tpg[MT9M114_TPG_BLUE]->val, &ret);
+ } else {
+ cci_write(sensor->regmap, MT9M114_CAM_MODE_SELECT,
+ MT9M114_CAM_MODE_SELECT_NORMAL, &ret);
+ }
+
+ /*
+ * A Config-Change needs to be issued for the change to take
+ * effect. If we're not streaming ignore this, the change will
+ * be applied when the stream is started.
+ */
+ if (ret || !sensor->streaming)
+ break;
+
+ ret = mt9m114_set_state(sensor,
+ MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(&sensor->client->dev);
+ pm_runtime_put_autosuspend(&sensor->client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9m114_ifp_ctrl_ops = {
+ .s_ctrl = mt9m114_ifp_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * Image Flow Processor Subdev Operations
+ */
+
+static inline struct mt9m114 *ifp_to_mt9m114(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9m114, ifp.sd);
+}
+
+static int mt9m114_ifp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ struct v4l2_subdev_state *pa_state;
+ struct v4l2_subdev_state *ifp_state;
+ int ret;
+
+ if (!enable)
+ return mt9m114_stop_streaming(sensor);
+
+ ifp_state = v4l2_subdev_lock_and_get_active_state(&sensor->ifp.sd);
+ pa_state = v4l2_subdev_lock_and_get_active_state(&sensor->pa.sd);
+
+ ret = mt9m114_start_streaming(sensor, pa_state, ifp_state);
+
+ v4l2_subdev_unlock_state(pa_state);
+ v4l2_subdev_unlock_state(ifp_state);
+
+ return ret;
+}
+
+static int mt9m114_ifp_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct v4l2_fract *ival = &interval->interval;
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+ mutex_lock(sensor->ifp.hdl.lock);
+
+ ival->numerator = 1;
+ ival->denominator = sensor->ifp.frame_rate;
+
+ mutex_unlock(sensor->ifp.hdl.lock);
+
+ return 0;
+}
+
+static int mt9m114_ifp_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct v4l2_fract *ival = &interval->interval;
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ int ret = 0;
+
+ mutex_lock(sensor->ifp.hdl.lock);
+
+ if (ival->numerator != 0 && ival->denominator != 0)
+ sensor->ifp.frame_rate = min_t(unsigned int,
+ ival->denominator / ival->numerator,
+ MT9M114_MAX_FRAME_RATE);
+ else
+ sensor->ifp.frame_rate = MT9M114_MAX_FRAME_RATE;
+
+ ival->numerator = 1;
+ ival->denominator = sensor->ifp.frame_rate;
+
+ if (sensor->streaming)
+ ret = mt9m114_set_frame_rate(sensor);
+
+ mutex_unlock(sensor->ifp.hdl.lock);
+
+ return ret;
+}
+
+static int mt9m114_ifp_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *compose;
+
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+ format->width = MT9M114_PIXEL_ARRAY_WIDTH;
+ format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+ format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_RAW;
+ format->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ format->xfer_func = V4L2_XFER_FUNC_NONE;
+
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+ crop->left = 4;
+ crop->top = 4;
+ crop->width = format->width - 8;
+ crop->height = format->height - 8;
+
+ compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = crop->width;
+ compose->height = crop->height;
+
+ format = v4l2_subdev_get_pad_format(sd, state, 1);
+
+ format->width = compose->width;
+ format->height = compose->height;
+ format->code = mt9m114_default_format_info(sensor)->code;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+ format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
+static int mt9m114_ifp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const unsigned int num_formats = ARRAY_SIZE(mt9m114_format_infos);
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ unsigned int index = 0;
+ unsigned int flag;
+ unsigned int i;
+
+ switch (code->pad) {
+ case 0:
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = mt9m114_format_infos[num_formats - 1].code;
+ return 0;
+
+ case 1:
+ if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+ flag = MT9M114_FMT_FLAG_CSI2;
+ else
+ flag = MT9M114_FMT_FLAG_PARALLEL;
+
+ for (i = 0; i < num_formats; ++i) {
+ const struct mt9m114_format_info *info =
+ &mt9m114_format_infos[i];
+
+ if (info->flags & flag) {
+ if (index == code->index) {
+ code->code = info->code;
+ return 0;
+ }
+
+ index++;
+ }
+ }
+
+ return -EINVAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt9m114_ifp_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ const struct mt9m114_format_info *info;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ info = mt9m114_format_info(sensor, fse->pad, fse->code);
+ if (!info || info->code != fse->code)
+ return -EINVAL;
+
+ if (fse->pad == 0) {
+ fse->min_width = MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH;
+ fse->max_width = MT9M114_PIXEL_ARRAY_WIDTH;
+ fse->min_height = MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT;
+ fse->max_height = MT9M114_PIXEL_ARRAY_HEIGHT;
+ } else {
+ const struct v4l2_rect *crop;
+
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+ fse->max_width = crop->width;
+ fse->max_height = crop->height;
+
+ fse->min_width = fse->max_width / 4;
+ fse->min_height = fse->max_height / 4;
+ }
+
+ return 0;
+}
+
+static int mt9m114_ifp_enum_frameintervals(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ const struct mt9m114_format_info *info;
+
+ if (fie->index > 0)
+ return -EINVAL;
+
+ info = mt9m114_format_info(sensor, fie->pad, fie->code);
+ if (!info || info->code != fie->code)
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = MT9M114_MAX_FRAME_RATE;
+
+ return 0;
+}
+
+static int mt9m114_ifp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+
+ if (fmt->pad == 0) {
+ /* Only the size can be changed on the sink pad. */
+ format->width = clamp(ALIGN(fmt->format.width, 8),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+ MT9M114_PIXEL_ARRAY_WIDTH);
+ format->height = clamp(ALIGN(fmt->format.height, 8),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+ MT9M114_PIXEL_ARRAY_HEIGHT);
+ } else {
+ const struct mt9m114_format_info *info;
+
+ /* Only the media bus code can be changed on the source pad. */
+ info = mt9m114_format_info(sensor, 1, fmt->format.code);
+
+ format->code = info->code;
+
+ /* If the output format is RAW10, bypass the scaler. */
+ if (format->code == MEDIA_BUS_FMT_SGRBG10_1X10)
+ *format = *v4l2_subdev_get_pad_format(sd, state, 0);
+ }
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ int ret = 0;
+
+ /* Crop and compose are only supported on the sink pad. */
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+ break;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /*
+ * The crop default and bounds are equal to the sink
+ * format size minus 4 pixels on each side for demosaicing.
+ */
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+ sel->r.left = 4;
+ sel->r.top = 4;
+ sel->r.width = format->width - 8;
+ sel->r.height = format->height - 8;
+ break;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = *v4l2_subdev_get_pad_compose(sd, state, 0);
+ break;
+
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /*
+ * The compose default and bounds sizes are equal to the sink
+ * crop rectangle size.
+ */
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mt9m114_ifp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *compose;
+
+ if (sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ /* Crop and compose are only supported on the sink pad. */
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ /*
+ * Clamp the crop rectangle. Demosaicing removes 4 pixels on
+ * each side of the image.
+ */
+ crop->left = clamp_t(unsigned int, ALIGN(sel->r.left, 2), 4,
+ format->width - 4 -
+ MT9M114_SCALER_CROPPED_INPUT_WIDTH);
+ crop->top = clamp_t(unsigned int, ALIGN(sel->r.top, 2), 4,
+ format->height - 4 -
+ MT9M114_SCALER_CROPPED_INPUT_HEIGHT);
+ crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+ MT9M114_SCALER_CROPPED_INPUT_WIDTH,
+ format->width - 4 - crop->left);
+ crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_SCALER_CROPPED_INPUT_HEIGHT,
+ format->height - 4 - crop->top);
+
+ sel->r = *crop;
+
+ /* Propagate to the compose rectangle. */
+ compose->width = crop->width;
+ compose->height = crop->height;
+ } else {
+ /*
+ * Clamp the compose rectangle. The scaler can only downscale.
+ */
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+ MT9M114_SCALER_CROPPED_INPUT_WIDTH,
+ crop->width);
+ compose->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_SCALER_CROPPED_INPUT_HEIGHT,
+ crop->height);
+
+ sel->r = *compose;
+ }
+
+ /* Propagate the compose rectangle to the source format. */
+ format = v4l2_subdev_get_pad_format(sd, state, 1);
+ format->width = compose->width;
+ format->height = compose->height;
+
+ return 0;
+}
+
+static void mt9m114_ifp_unregistered(struct v4l2_subdev *sd)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+ v4l2_device_unregister_subdev(&sensor->pa.sd);
+}
+
+static int mt9m114_ifp_registered(struct v4l2_subdev *sd)
+{
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ int ret;
+
+ ret = v4l2_device_register_subdev(sd->v4l2_dev, &sensor->pa.sd);
+ if (ret < 0) {
+ dev_err(&sensor->client->dev,
+ "Failed to register pixel array subdev\n");
+ return ret;
+ }
+
+ ret = media_create_pad_link(&sensor->pa.sd.entity, 0,
+ &sensor->ifp.sd.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0) {
+ dev_err(&sensor->client->dev,
+ "Failed to link pixel array to ifp\n");
+ v4l2_device_unregister_subdev(&sensor->pa.sd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9m114_ifp_video_ops = {
+ .s_stream = mt9m114_ifp_s_stream,
+ .g_frame_interval = mt9m114_ifp_g_frame_interval,
+ .s_frame_interval = mt9m114_ifp_s_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mt9m114_ifp_pad_ops = {
+ .init_cfg = mt9m114_ifp_init_cfg,
+ .enum_mbus_code = mt9m114_ifp_enum_mbus_code,
+ .enum_frame_size = mt9m114_ifp_enum_framesizes,
+ .enum_frame_interval = mt9m114_ifp_enum_frameintervals,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = mt9m114_ifp_set_fmt,
+ .get_selection = mt9m114_ifp_get_selection,
+ .set_selection = mt9m114_ifp_set_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m114_ifp_ops = {
+ .video = &mt9m114_ifp_video_ops,
+ .pad = &mt9m114_ifp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops mt9m114_ifp_internal_ops = {
+ .registered = mt9m114_ifp_registered,
+ .unregistered = mt9m114_ifp_unregistered,
+};
+
+static int mt9m114_ifp_init(struct mt9m114 *sensor)
+{
+ struct v4l2_subdev *sd = &sensor->ifp.sd;
+ struct media_pad *pads = sensor->ifp.pads;
+ struct v4l2_ctrl_handler *hdl = &sensor->ifp.hdl;
+ struct v4l2_ctrl *link_freq;
+ int ret;
+
+ /* Initialize the subdev. */
+ v4l2_i2c_subdev_init(sd, sensor->client, &mt9m114_ifp_ops);
+ v4l2_i2c_subdev_set_name(sd, sensor->client, NULL, " ifp");
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &mt9m114_ifp_internal_ops;
+
+ /* Initialize the media entity. */
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ sd->entity.ops = &mt9m114_entity_ops;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, 2, pads);
+ if (ret < 0)
+ return ret;
+
+ sensor->ifp.frame_rate = MT9M114_DEF_FRAME_RATE;
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(hdl, 8);
+ v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std_menu(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ link_freq = v4l2_ctrl_new_int_menu(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ sensor->bus_cfg.nr_of_link_frequencies - 1,
+ 0, sensor->bus_cfg.link_frequencies);
+ if (link_freq)
+ link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ sensor->pixrate, sensor->pixrate, 1,
+ sensor->pixrate);
+
+ sensor->ifp.tpg[MT9M114_TPG_PATTERN] =
+ v4l2_ctrl_new_std_menu_items(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(mt9m114_test_pattern_menu) - 1,
+ 0, 0, mt9m114_test_pattern_menu);
+ sensor->ifp.tpg[MT9M114_TPG_RED] =
+ v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_RED,
+ 0, 1023, 1, 1023);
+ sensor->ifp.tpg[MT9M114_TPG_GREEN] =
+ v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_GREENR,
+ 0, 1023, 1, 1023);
+ sensor->ifp.tpg[MT9M114_TPG_BLUE] =
+ v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_BLUE,
+ 0, 1023, 1, 1023);
+
+ v4l2_ctrl_cluster(ARRAY_SIZE(sensor->ifp.tpg), sensor->ifp.tpg);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto error;
+ }
+
+ sd->ctrl_handler = hdl;
+ sd->state_lock = hdl->lock;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&sensor->ifp.hdl);
+ media_entity_cleanup(&sensor->ifp.sd.entity);
+ return ret;
+}
+
+static void mt9m114_ifp_cleanup(struct mt9m114 *sensor)
+{
+ v4l2_ctrl_handler_free(&sensor->ifp.hdl);
+ media_entity_cleanup(&sensor->ifp.sd.entity);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int mt9m114_power_on(struct mt9m114 *sensor)
+{
+ int ret;
+
+ /* Enable power and clocks. */
+ ret = regulator_bulk_enable(ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(sensor->clk);
+ if (ret < 0)
+ goto error_regulator;
+
+ /* Perform a hard reset if available, or a soft reset otherwise. */
+ if (sensor->reset) {
+ long freq = clk_get_rate(sensor->clk);
+ unsigned int duration;
+
+ /*
+ * The minimum duration is 50 clock cycles, thus typically
+ * around 2µs. Double it to be safe.
+ */
+ duration = DIV_ROUND_UP(2 * 50 * 1000000, freq);
+
+ gpiod_set_value(sensor->reset, 1);
+ udelay(duration);
+ gpiod_set_value(sensor->reset, 0);
+ } else {
+ /*
+ * The power may have just been turned on, we need to wait for
+ * the sensor to be ready to accept I2C commands.
+ */
+ usleep_range(44500, 50000);
+
+ cci_write(sensor->regmap, MT9M114_RESET_AND_MISC_CONTROL,
+ MT9M114_RESET_SOC, &ret);
+ cci_write(sensor->regmap, MT9M114_RESET_AND_MISC_CONTROL, 0,
+ &ret);
+
+ if (ret < 0) {
+ dev_err(&sensor->client->dev, "Soft reset failed\n");
+ goto error_clock;
+ }
+ }
+
+ /*
+ * Wait for the sensor to be ready to accept I2C commands by polling the
+ * command register to wait for initialization to complete.
+ */
+ usleep_range(44500, 50000);
+
+ ret = mt9m114_poll_command(sensor, MT9M114_COMMAND_REGISTER_SET_STATE);
+ if (ret < 0)
+ goto error_clock;
+
+ if (sensor->bus_cfg.bus_type == V4L2_MBUS_PARALLEL) {
+ /*
+ * In parallel mode (OE set to low), the sensor will enter the
+ * streaming state after initialization. Enter the standby
+ * manually to stop streaming.
+ */
+ ret = mt9m114_set_state(sensor,
+ MT9M114_SYS_STATE_ENTER_STANDBY);
+ if (ret < 0)
+ goto error_clock;
+ }
+
+ /*
+ * Before issuing any Set-State command, we must ensure that the sensor
+ * reaches the standby mode (either initiated manually above in
+ * parallel mode, or automatically after reset in MIPI mode).
+ */
+ ret = mt9m114_poll_state(sensor, MT9M114_SYS_STATE_STANDBY);
+ if (ret < 0)
+ goto error_clock;
+
+ return 0;
+
+error_clock:
+ clk_disable_unprepare(sensor->clk);
+error_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+ return ret;
+}
+
+static void mt9m114_power_off(struct mt9m114 *sensor)
+{
+ clk_disable_unprepare(sensor->clk);
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+}
+
+static int __maybe_unused mt9m114_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ int ret;
+
+ ret = mt9m114_power_on(sensor);
+ if (ret)
+ return ret;
+
+ ret = mt9m114_initialize(sensor);
+ if (ret) {
+ mt9m114_power_off(sensor);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mt9m114_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+ mt9m114_power_off(sensor);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mt9m114_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt9m114_runtime_suspend, mt9m114_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int mt9m114_clk_init(struct mt9m114 *sensor)
+{
+ unsigned int link_freq;
+
+ /* Hardcode the PLL multiplier and dividers to default settings. */
+ sensor->pll.m = 32;
+ sensor->pll.n = 1;
+ sensor->pll.p = 7;
+
+ /*
+ * Calculate the pixel rate and link frequency. The CSI-2 bus is clocked
+ * for 16-bit per pixel, transmitted in DDR over a single lane. For
+ * parallel mode, the sensor ouputs one pixel in two PIXCLK cycles.
+ */
+ sensor->pixrate = clk_get_rate(sensor->clk) * sensor->pll.m
+ / ((sensor->pll.n + 1) * (sensor->pll.p + 1));
+
+ link_freq = sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY
+ ? sensor->pixrate * 8 : sensor->pixrate * 2;
+
+ if (sensor->bus_cfg.nr_of_link_frequencies != 1 ||
+ sensor->bus_cfg.link_frequencies[0] != link_freq) {
+ dev_err(&sensor->client->dev, "Unsupported DT link-frequencies\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt9m114_identify(struct mt9m114 *sensor)
+{
+ u64 major, minor, release, customer;
+ u64 value;
+ int ret;
+
+ ret = cci_read(sensor->regmap, MT9M114_CHIP_ID, &value, NULL);
+ if (ret) {
+ dev_err(&sensor->client->dev, "Failed to read chip ID\n");
+ return -ENXIO;
+ }
+
+ if (value != 0x2481) {
+ dev_err(&sensor->client->dev, "Invalid chip ID 0x%04llx\n",
+ value);
+ return -ENXIO;
+ }
+
+ cci_read(sensor->regmap, MT9M114_MON_MAJOR_VERSION, &major, &ret);
+ cci_read(sensor->regmap, MT9M114_MON_MINOR_VERSION, &minor, &ret);
+ cci_read(sensor->regmap, MT9M114_MON_RELEASE_VERSION, &release, &ret);
+ cci_read(sensor->regmap, MT9M114_CUSTOMER_REV, &customer, &ret);
+ if (ret) {
+ dev_err(&sensor->client->dev, "Failed to read version\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&sensor->client->dev,
+ "monitor v%llu.%llu.%04llx customer rev 0x%04llx\n",
+ major, minor, release, customer);
+
+ return 0;
+}
+
+static int mt9m114_parse_dt(struct mt9m114 *sensor)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(&sensor->client->dev);
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep) {
+ dev_err(&sensor->client->dev, "No endpoint found\n");
+ return -EINVAL;
+ }
+
+ sensor->bus_cfg.bus_type = V4L2_MBUS_UNKNOWN;
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &sensor->bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret < 0) {
+ dev_err(&sensor->client->dev, "Failed to parse endpoint\n");
+ goto error;
+ }
+
+ switch (sensor->bus_cfg.bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ case V4L2_MBUS_PARALLEL:
+ break;
+
+ default:
+ dev_err(&sensor->client->dev, "unsupported bus type %u\n",
+ sensor->bus_cfg.bus_type);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+ return ret;
+}
+
+static int mt9m114_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mt9m114 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->client = client;
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap)) {
+ dev_err(dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
+
+ ret = mt9m114_parse_dt(sensor);
+ if (ret < 0)
+ return ret;
+
+ /* Acquire clocks, GPIOs and regulators. */
+ sensor->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->clk)) {
+ ret = PTR_ERR(sensor->clk);
+ dev_err_probe(dev, ret, "Failed to get clock\n");
+ goto error_ep_free;
+ }
+
+ sensor->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->reset)) {
+ ret = PTR_ERR(sensor->reset);
+ dev_err_probe(dev, ret, "Failed to get reset GPIO\n");
+ goto error_ep_free;
+ }
+
+ sensor->supplies[0].supply = "vddio";
+ sensor->supplies[1].supply = "vdd";
+ sensor->supplies[2].supply = "vaa";
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to get regulators\n");
+ goto error_ep_free;
+ }
+
+ ret = mt9m114_clk_init(sensor);
+ if (ret)
+ goto error_ep_free;
+
+ /*
+ * Identify the sensor. The driver supports runtime PM, but needs to
+ * work when runtime PM is disabled in the kernel. To that end, power
+ * the sensor on manually here, and initialize it after identification
+ * to reach the same state as if resumed through runtime PM.
+ */
+ ret = mt9m114_power_on(sensor);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Could not power on the device\n");
+ goto error_ep_free;
+ }
+
+ ret = mt9m114_identify(sensor);
+ if (ret < 0)
+ goto error_power_off;
+
+ ret = mt9m114_initialize(sensor);
+ if (ret < 0)
+ goto error_power_off;
+
+ /*
+ * Enable runtime PM with autosuspend. As the device has been powered
+ * manually, mark it as active, and increase the usage count without
+ * resuming the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ /* Initialize the subdevices. */
+ ret = mt9m114_pa_init(sensor);
+ if (ret < 0)
+ goto error_pm_cleanup;
+
+ ret = mt9m114_ifp_init(sensor);
+ if (ret < 0)
+ goto error_pa_cleanup;
+
+ ret = v4l2_async_register_subdev(&sensor->ifp.sd);
+ if (ret < 0)
+ goto error_ifp_cleanup;
+
+ /*
+ * Decrease the PM usage count. The device will get suspended after the
+ * autosuspend delay, turning the power off.
+ */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+error_ifp_cleanup:
+ mt9m114_ifp_cleanup(sensor);
+error_pa_cleanup:
+ mt9m114_pa_cleanup(sensor);
+error_pm_cleanup:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+error_power_off:
+ mt9m114_power_off(sensor);
+error_ep_free:
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+ return ret;
+}
+
+static void mt9m114_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ struct device *dev = &client->dev;
+
+ v4l2_async_unregister_subdev(&sensor->ifp.sd);
+
+ mt9m114_ifp_cleanup(sensor);
+ mt9m114_pa_cleanup(sensor);
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ mt9m114_power_off(sensor);
+ pm_runtime_set_suspended(dev);
+}
+
+static const struct of_device_id mt9m114_of_ids[] = {
+ { .compatible = "onnn,mt9m114" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt9m114_of_ids);
+
+static struct i2c_driver mt9m114_driver = {
+ .driver = {
+ .name = "mt9m114",
+ .pm = &mt9m114_pm_ops,
+ .of_match_table = mt9m114_of_ids,
+ },
+ .probe = mt9m114_probe,
+ .remove = mt9m114_remove,
+};
+
+module_i2c_driver(mt9m114_driver);
+
+MODULE_DESCRIPTION("onsemi MT9M114 Sensor Driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 774861ba7747..37a634b92cd5 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -49,9 +49,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
struct mt9v011 {
struct v4l2_subdev sd;
-#ifdef CONFIG_MEDIA_CONTROLLER
struct media_pad pad;
-#endif
struct v4l2_ctrl_handler ctrls;
unsigned width, height;
unsigned xtal;
@@ -483,9 +481,7 @@ static int mt9v011_probe(struct i2c_client *c)
u16 version;
struct mt9v011 *core;
struct v4l2_subdev *sd;
-#ifdef CONFIG_MEDIA_CONTROLLER
int ret;
-#endif
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
@@ -499,14 +495,12 @@ static int mt9v011_probe(struct i2c_client *c)
sd = &core->sd;
v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
-#ifdef CONFIG_MEDIA_CONTROLLER
core->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
if (ret < 0)
return ret;
-#endif
/* Check if the sensor is really a MT9V011 */
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 00e7bc6e3235..1c6f6cea1204 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -1046,7 +1047,6 @@ done:
static int mt9v032_probe(struct i2c_client *client)
{
- const struct i2c_device_id *did = i2c_client_get_device_id(client);
struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
struct mt9v032 *mt9v032;
unsigned int i;
@@ -1076,7 +1076,7 @@ static int mt9v032_probe(struct i2c_client *client)
mutex_init(&mt9v032->power_lock);
mt9v032->pdata = pdata;
- mt9v032->model = (const void *)did->driver_data;
+ mt9v032->model = i2c_get_match_data(client);
v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
ARRAY_SIZE(mt9v032_aegc_controls));
@@ -1272,29 +1272,27 @@ static const struct i2c_device_id mt9v032_id[] = {
{ "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
{ "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
{ "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
- { }
+ { /* Sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mt9v032_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id mt9v032_of_match[] = {
- { .compatible = "aptina,mt9v022" },
- { .compatible = "aptina,mt9v022m" },
- { .compatible = "aptina,mt9v024" },
- { .compatible = "aptina,mt9v024m" },
- { .compatible = "aptina,mt9v032" },
- { .compatible = "aptina,mt9v032m" },
- { .compatible = "aptina,mt9v034" },
- { .compatible = "aptina,mt9v034m" },
+ { .compatible = "aptina,mt9v022", .data = &mt9v032_models[MT9V032_MODEL_V022_COLOR] },
+ { .compatible = "aptina,mt9v022m", .data = &mt9v032_models[MT9V032_MODEL_V022_MONO] },
+ { .compatible = "aptina,mt9v024", .data = &mt9v032_models[MT9V032_MODEL_V024_COLOR] },
+ { .compatible = "aptina,mt9v024m", .data = &mt9v032_models[MT9V032_MODEL_V024_MONO] },
+ { .compatible = "aptina,mt9v032", .data = &mt9v032_models[MT9V032_MODEL_V032_COLOR] },
+ { .compatible = "aptina,mt9v032m", .data = &mt9v032_models[MT9V032_MODEL_V032_MONO] },
+ { .compatible = "aptina,mt9v034", .data = &mt9v032_models[MT9V032_MODEL_V034_COLOR] },
+ { .compatible = "aptina,mt9v034m", .data = &mt9v032_models[MT9V032_MODEL_V034_MONO] },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, mt9v032_of_match);
-#endif
static struct i2c_driver mt9v032_driver = {
.driver = {
.name = "mt9v032",
- .of_match_table = of_match_ptr(mt9v032_of_match),
+ .of_match_table = mt9v032_of_match,
},
.probe = mt9v032_probe,
.remove = mt9v032_remove,
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 1f7edc0f5b1a..f859b49e13bf 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -121,9 +121,7 @@ struct mt9v111_dev {
u8 addr_space;
struct v4l2_subdev sd;
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
struct media_pad pad;
-#endif
struct v4l2_ctrl *auto_awb;
struct v4l2_ctrl *auto_exp;
@@ -797,11 +795,7 @@ static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
-#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
return v4l2_subdev_get_try_format(&mt9v111->sd, sd_state, pad);
-#else
- return &sd_state->pads->try_fmt;
-#endif
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v111->fmt;
default:
@@ -987,11 +981,9 @@ static const struct v4l2_subdev_ops mt9v111_ops = {
.pad = &mt9v111_pad_ops,
};
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
static const struct media_entity_operations mt9v111_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
-#endif
/* --- V4L2 ctrl --- */
static int mt9v111_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -1203,7 +1195,6 @@ static int mt9v111_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&mt9v111->sd, client, &mt9v111_ops);
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
mt9v111->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
mt9v111->sd.entity.ops = &mt9v111_subdev_entity_ops;
mt9v111->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -1212,7 +1203,6 @@ static int mt9v111_probe(struct i2c_client *client)
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
if (ret)
goto error_free_entity;
-#endif
ret = mt9v111_chip_probe(mt9v111);
if (ret)
@@ -1225,9 +1215,7 @@ static int mt9v111_probe(struct i2c_client *client)
return 0;
error_free_entity:
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&mt9v111->sd.entity);
-#endif
error_free_ctrls:
v4l2_ctrl_handler_free(&mt9v111->ctrls);
@@ -1245,9 +1233,7 @@ static void mt9v111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
-#endif
v4l2_ctrl_handler_free(&mt9v111->ctrls);
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 365ce5684583..51378ba16a5d 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -434,9 +434,6 @@ struct og01a1b {
/* To serialize asynchronus callbacks */
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
static u64 to_pixel_rate(u32 f_index)
@@ -732,14 +729,10 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (og01a1b->streaming == enable)
- return 0;
-
mutex_lock(&og01a1b->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret) {
mutex_unlock(&og01a1b->mutex);
return ret;
}
@@ -755,50 +748,11 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- og01a1b->streaming = enable;
mutex_unlock(&og01a1b->mutex);
return ret;
}
-static int __maybe_unused og01a1b_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct og01a1b *og01a1b = to_og01a1b(sd);
-
- mutex_lock(&og01a1b->mutex);
- if (og01a1b->streaming)
- og01a1b_stop_streaming(og01a1b);
-
- mutex_unlock(&og01a1b->mutex);
-
- return 0;
-}
-
-static int __maybe_unused og01a1b_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct og01a1b *og01a1b = to_og01a1b(sd);
- int ret;
-
- mutex_lock(&og01a1b->mutex);
- if (og01a1b->streaming) {
- ret = og01a1b_start_streaming(og01a1b);
- if (ret) {
- og01a1b->streaming = false;
- og01a1b_stop_streaming(og01a1b);
- mutex_unlock(&og01a1b->mutex);
- return ret;
- }
- }
-
- mutex_unlock(&og01a1b->mutex);
-
- return 0;
-}
-
static int og01a1b_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -1096,10 +1050,6 @@ probe_error_v4l2_ctrl_handler_free:
return ret;
}
-static const struct dev_pm_ops og01a1b_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(og01a1b_suspend, og01a1b_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id og01a1b_acpi_ids[] = {
{"OVTI01AC"},
@@ -1112,7 +1062,6 @@ MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids);
static struct i2c_driver og01a1b_i2c_driver = {
.driver = {
.name = "og01a1b",
- .pm = &og01a1b_pm_ops,
.acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
},
.probe = og01a1b_probe,
diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
index 2b9e1b3a3bf4..bbd5740d2280 100644
--- a/drivers/media/i2c/ov01a10.c
+++ b/drivers/media/i2c/ov01a10.c
@@ -287,9 +287,6 @@ struct ov01a10 {
struct v4l2_ctrl *exposure;
const struct ov01a10_mode *cur_mode;
-
- /* streaming state */
- bool streaming;
};
static inline struct ov01a10 *to_ov01a10(struct v4l2_subdev *subdev)
@@ -672,8 +669,6 @@ static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
state = v4l2_subdev_lock_and_get_active_state(sd);
- if (ov01a10->streaming == enable)
- goto unlock;
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -685,55 +680,12 @@ static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
goto unlock;
}
-
- goto done;
- }
-
- ov01a10_stop_streaming(ov01a10);
- pm_runtime_put(&client->dev);
-done:
- ov01a10->streaming = enable;
-unlock:
- v4l2_subdev_unlock_state(state);
-
- return ret;
-}
-
-static int __maybe_unused ov01a10_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov01a10 *ov01a10 = to_ov01a10(sd);
- struct v4l2_subdev_state *state;
-
- state = v4l2_subdev_lock_and_get_active_state(sd);
- if (ov01a10->streaming)
- ov01a10_stop_streaming(ov01a10);
-
- v4l2_subdev_unlock_state(state);
-
- return 0;
-}
-
-static int __maybe_unused ov01a10_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov01a10 *ov01a10 = to_ov01a10(sd);
- struct v4l2_subdev_state *state;
- int ret = 0;
-
- state = v4l2_subdev_lock_and_get_active_state(sd);
- if (!ov01a10->streaming)
- goto exit;
-
- ret = ov01a10_start_streaming(ov01a10);
- if (ret) {
- ov01a10->streaming = false;
+ } else {
ov01a10_stop_streaming(ov01a10);
+ pm_runtime_put(&client->dev);
}
-exit:
+unlock:
v4l2_subdev_unlock_state(state);
return ret;
@@ -973,10 +925,6 @@ err_handler_free:
return ret;
}
-static const struct dev_pm_ops ov01a10_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov01a10_suspend, ov01a10_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov01a10_acpi_ids[] = {
{ "OVTI01A0" },
@@ -989,7 +937,6 @@ MODULE_DEVICE_TABLE(acpi, ov01a10_acpi_ids);
static struct i2c_driver ov01a10_i2c_driver = {
.driver = {
.name = "ov01a10",
- .pm = &ov01a10_pm_ops,
.acpi_match_table = ACPI_PTR(ov01a10_acpi_ids),
},
.probe = ov01a10_probe,
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 741d977a76f3..848e47a464ac 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -570,8 +570,6 @@ unlock_and_return:
}
static const struct dev_pm_ops ov02a10_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(ov02a10_power_off, ov02a10_power_on, NULL)
};
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index 7d55d4ca24de..3d49e3fa8e56 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -536,9 +536,6 @@ struct ov08d10 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
/* lanes index */
u8 nlanes;
@@ -1103,9 +1100,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (ov08d10->streaming == enable)
- return 0;
-
mutex_lock(&ov08d10->mutex);
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -1125,8 +1119,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov08d10->streaming = enable;
-
/* vflip and hflip cannot change during streaming */
__v4l2_ctrl_grab(ov08d10->vflip, enable);
__v4l2_ctrl_grab(ov08d10->hflip, enable);
@@ -1136,45 +1128,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
-static int __maybe_unused ov08d10_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov08d10 *ov08d10 = to_ov08d10(sd);
-
- mutex_lock(&ov08d10->mutex);
- if (ov08d10->streaming)
- ov08d10_stop_streaming(ov08d10);
-
- mutex_unlock(&ov08d10->mutex);
-
- return 0;
-}
-
-static int __maybe_unused ov08d10_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov08d10 *ov08d10 = to_ov08d10(sd);
- int ret;
-
- mutex_lock(&ov08d10->mutex);
-
- if (ov08d10->streaming) {
- ret = ov08d10_start_streaming(ov08d10);
- if (ret) {
- ov08d10->streaming = false;
- ov08d10_stop_streaming(ov08d10);
- mutex_unlock(&ov08d10->mutex);
- return ret;
- }
- }
-
- mutex_unlock(&ov08d10->mutex);
-
- return 0;
-}
-
static int ov08d10_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -1501,10 +1454,6 @@ probe_error_v4l2_ctrl_handler_free:
return ret;
}
-static const struct dev_pm_ops ov08d10_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov08d10_suspend, ov08d10_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov08d10_acpi_ids[] = {
{ "OVTI08D1" },
@@ -1517,7 +1466,6 @@ MODULE_DEVICE_TABLE(acpi, ov08d10_acpi_ids);
static struct i2c_driver ov08d10_i2c_driver = {
.driver = {
.name = "ov08d10",
- .pm = &ov08d10_pm_ops,
.acpi_match_table = ACPI_PTR(ov08d10_acpi_ids),
},
.probe = ov08d10_probe,
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index 637da4df6901..b41b6866a0ab 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -2432,9 +2432,6 @@ struct ov08x40 {
/* Mutex for serialized access */
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
#define to_ov08x40(_sd) container_of(_sd, struct ov08x40, sd)
@@ -2915,10 +2912,6 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov08x->mutex);
- if (ov08x->streaming == enable) {
- mutex_unlock(&ov08x->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -2937,7 +2930,6 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov08x->streaming = enable;
mutex_unlock(&ov08x->mutex);
return ret;
@@ -2950,37 +2942,6 @@ err_unlock:
return ret;
}
-static int __maybe_unused ov08x40_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov08x40 *ov08x = to_ov08x40(sd);
-
- if (ov08x->streaming)
- ov08x40_stop_streaming(ov08x);
-
- return 0;
-}
-
-static int __maybe_unused ov08x40_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov08x40 *ov08x = to_ov08x40(sd);
- int ret;
-
- if (ov08x->streaming) {
- ret = ov08x40_start_streaming(ov08x);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- ov08x40_stop_streaming(ov08x);
- ov08x->streaming = false;
- return ret;
-}
-
/* Verify chip ID */
static int ov08x40_identify_module(struct ov08x40 *ov08x)
{
@@ -3294,10 +3255,6 @@ static void ov08x40_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
}
-static const struct dev_pm_ops ov08x40_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov08x40_suspend, ov08x40_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov08x40_acpi_ids[] = {
{"OVTI08F4"},
@@ -3310,7 +3267,6 @@ MODULE_DEVICE_TABLE(acpi, ov08x40_acpi_ids);
static struct i2c_driver ov08x40_i2c_driver = {
.driver = {
.name = "ov08x40",
- .pm = &ov08x40_pm_ops,
.acpi_match_table = ACPI_PTR(ov08x40_acpi_ids),
},
.probe = ov08x40_probe,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 35652b362347..4c419014dd7b 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1044,9 +1044,6 @@ struct ov13858 {
/* Mutex for serialized access */
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
#define to_ov13858(_sd) container_of(_sd, struct ov13858, sd)
@@ -1467,10 +1464,6 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov13858->mutex);
- if (ov13858->streaming == enable) {
- mutex_unlock(&ov13858->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -1489,7 +1482,6 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov13858->streaming = enable;
mutex_unlock(&ov13858->mutex);
return ret;
@@ -1502,37 +1494,6 @@ err_unlock:
return ret;
}
-static int __maybe_unused ov13858_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13858 *ov13858 = to_ov13858(sd);
-
- if (ov13858->streaming)
- ov13858_stop_streaming(ov13858);
-
- return 0;
-}
-
-static int __maybe_unused ov13858_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13858 *ov13858 = to_ov13858(sd);
- int ret;
-
- if (ov13858->streaming) {
- ret = ov13858_start_streaming(ov13858);
- if (ret)
- goto error;
- }
-
- return 0;
-
-error:
- ov13858_stop_streaming(ov13858);
- ov13858->streaming = false;
- return ret;
-}
-
/* Verify chip ID */
static int ov13858_identify_module(struct ov13858 *ov13858)
{
@@ -1787,10 +1748,6 @@ static const struct i2c_device_id ov13858_id_table[] = {
MODULE_DEVICE_TABLE(i2c, ov13858_id_table);
-static const struct dev_pm_ops ov13858_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov13858_suspend, ov13858_resume)
-};
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov13858_acpi_ids[] = {
{"OVTID858"},
@@ -1803,7 +1760,6 @@ MODULE_DEVICE_TABLE(acpi, ov13858_acpi_ids);
static struct i2c_driver ov13858_i2c_driver = {
.driver = {
.name = "ov13858",
- .pm = &ov13858_pm_ops,
.acpi_match_table = ACPI_PTR(ov13858_acpi_ids),
},
.probe = ov13858_probe,
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index dbc642c5995b..970d2caeb3d6 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -31,6 +31,7 @@
#define OV13B10_REG_VTS 0x380e
#define OV13B10_VTS_30FPS 0x0c7c
#define OV13B10_VTS_60FPS 0x063e
+#define OV13B10_VTS_120FPS 0x0320
#define OV13B10_VTS_MAX 0x7fff
/* HBLANK control - read only */
@@ -468,6 +469,50 @@ static const struct ov13b10_reg mode_2080x1170_regs[] = {
{0x5001, 0x0d},
};
+static const struct ov13b10_reg mode_1364x768_120fps_regs[] = {
+ {0x0305, 0xaf},
+ {0x3011, 0x7c},
+ {0x3501, 0x03},
+ {0x3502, 0x00},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x37e4, 0x00},
+ {0x3800, 0x02},
+ {0x3801, 0xe4},
+ {0x3802, 0x03},
+ {0x3803, 0x48},
+ {0x3804, 0x0d},
+ {0x3805, 0xab},
+ {0x3806, 0x09},
+ {0x3807, 0x60},
+ {0x3808, 0x05},
+ {0x3809, 0x54},
+ {0x380a, 0x03},
+ {0x380b, 0x00},
+ {0x380c, 0x04},
+ {0x380d, 0x8e},
+ {0x380e, 0x03},
+ {0x380f, 0x20},
+ {0x3811, 0x07},
+ {0x3813, 0x07},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x04},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
static const char * const ov13b10_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bar Type 1",
@@ -568,7 +613,18 @@ static const struct ov13b10_mode supported_modes[] = {
.regs = mode_2080x1170_regs,
},
.link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
- }
+ },
+ {
+ .width = 1364,
+ .height = 768,
+ .vts_def = OV13B10_VTS_120FPS,
+ .vts_min = OV13B10_VTS_120FPS,
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1364x768_120fps_regs),
+ .regs = mode_1364x768_120fps_regs,
+ },
+ },
};
struct ov13b10 {
@@ -594,9 +650,6 @@ struct ov13b10 {
/* Mutex for serialized access */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
/* True if the device has been identified */
bool identified;
};
@@ -1161,10 +1214,6 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov13b->mutex);
- if (ov13b->streaming == enable) {
- mutex_unlock(&ov13b->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -1183,7 +1232,6 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov13b->streaming = enable;
mutex_unlock(&ov13b->mutex);
return ret;
@@ -1198,12 +1246,6 @@ err_unlock:
static int ov13b10_suspend(struct device *dev)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13b10 *ov13b = to_ov13b10(sd);
-
- if (ov13b->streaming)
- ov13b10_stop_streaming(ov13b);
-
ov13b10_power_off(dev);
return 0;
@@ -1211,29 +1253,7 @@ static int ov13b10_suspend(struct device *dev)
static int ov13b10_resume(struct device *dev)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13b10 *ov13b = to_ov13b10(sd);
- int ret;
-
- ret = ov13b10_power_on(dev);
- if (ret)
- goto pm_fail;
-
- if (ov13b->streaming) {
- ret = ov13b10_start_streaming(ov13b);
- if (ret)
- goto stop_streaming;
- }
-
- return 0;
-
-stop_streaming:
- ov13b10_stop_streaming(ov13b);
- ov13b10_power_off(dev);
-pm_fail:
- ov13b->streaming = false;
-
- return ret;
+ return ov13b10_power_on(dev);
}
static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
@@ -1501,7 +1521,7 @@ static int ov13b10_probe(struct i2c_client *client)
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
- ov13b10_power_on(&client->dev);
+ ret = ov13b10_power_on(&client->dev);
if (ret) {
dev_err(&client->dev, "failed to power on\n");
return ret;
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index bb6c9863a546..28a01c6eff64 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -293,9 +293,7 @@ struct ov2640_win_size {
struct ov2640_priv {
struct v4l2_subdev subdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pad;
-#endif
struct v4l2_ctrl_handler hdl;
u32 cfmt_code;
struct clk *clk;
@@ -922,13 +920,9 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mf;
return 0;
-#else
- return -EINVAL;
-#endif
}
mf->width = priv->win->width;
@@ -1005,7 +999,6 @@ out:
static int ov2640_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *try_fmt =
v4l2_subdev_get_try_format(sd, sd_state, 0);
const struct ov2640_win_size *win =
@@ -1019,7 +1012,7 @@ static int ov2640_init_cfg(struct v4l2_subdev *sd,
try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-#endif
+
return 0;
}
@@ -1205,17 +1198,14 @@ static int ov2640_probe(struct i2c_client *client)
return -ENOMEM;
if (client->dev.of_node) {
- priv->clk = devm_clk_get(&client->dev, "xvclk");
+ priv->clk = devm_clk_get_enabled(&client->dev, "xvclk");
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
}
ret = ov2640_probe_dt(client, priv);
if (ret)
- goto err_clk;
+ return ret;
priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
@@ -1239,13 +1229,11 @@ static int ov2640_probe(struct i2c_client *client)
ret = priv->hdl.error;
goto err_hdl;
}
-#if defined(CONFIG_MEDIA_CONTROLLER)
priv->pad.flags = MEDIA_PAD_FL_SOURCE;
priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
if (ret < 0)
goto err_hdl;
-#endif
ret = ov2640_video_probe(client);
if (ret < 0)
@@ -1264,8 +1252,6 @@ err_videoprobe:
err_hdl:
v4l2_ctrl_handler_free(&priv->hdl);
mutex_destroy(&priv->lock);
-err_clk:
- clk_disable_unprepare(priv->clk);
return ret;
}
@@ -1278,7 +1264,6 @@ static void ov2640_remove(struct i2c_client *client)
mutex_destroy(&priv->lock);
media_entity_cleanup(&priv->subdev.entity);
v4l2_device_unregister_subdev(&priv->subdev);
- clk_disable_unprepare(priv->clk);
}
static const struct i2c_device_id ov2640_id[] = {
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 5429bd2eb053..2c3dbe164eb6 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1031,7 +1031,6 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
dev_dbg(&client->dev, "ov2659_get_fmt\n");
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mf;
mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
@@ -1039,9 +1038,6 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
fmt->format = *mf;
mutex_unlock(&ov2659->lock);
return 0;
-#else
- return -EINVAL;
-#endif
}
mutex_lock(&ov2659->lock);
@@ -1113,10 +1109,8 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov2659->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
-#endif
} else {
s64 val;
@@ -1306,7 +1300,6 @@ static int ov2659_power_on(struct device *dev)
* V4L2 subdev internal operations
*/
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1319,7 +1312,6 @@ static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return 0;
}
-#endif
static const struct v4l2_subdev_core_ops ov2659_subdev_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
@@ -1338,7 +1330,6 @@ static const struct v4l2_subdev_pad_ops ov2659_subdev_pad_ops = {
.set_fmt = ov2659_set_fmt,
};
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static const struct v4l2_subdev_ops ov2659_subdev_ops = {
.core = &ov2659_subdev_core_ops,
.video = &ov2659_subdev_video_ops,
@@ -1348,7 +1339,6 @@ static const struct v4l2_subdev_ops ov2659_subdev_ops = {
static const struct v4l2_subdev_internal_ops ov2659_subdev_internal_ops = {
.open = ov2659_open,
};
-#endif
static int ov2659_detect(struct v4l2_subdev *sd)
{
@@ -1489,15 +1479,12 @@ static int ov2659_probe(struct i2c_client *client)
sd = &ov2659->sd;
client->flags |= I2C_CLIENT_SCCB;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops);
+ v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops);
sd->internal_ops = &ov2659_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
ov2659->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &ov2659->pad);
@@ -1505,7 +1492,6 @@ static int ov2659_probe(struct i2c_client *client)
v4l2_ctrl_handler_free(&ov2659->ctrls);
return ret;
}
-#endif
mutex_init(&ov2659->lock);
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 303793e1f97d..396583826ae9 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -91,7 +91,6 @@ struct ov2685 {
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[OV2685_NUM_SUPPLIES];
- bool streaming;
struct mutex mutex;
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -513,10 +512,6 @@ static int ov2685_s_stream(struct v4l2_subdev *sd, int on)
mutex_lock(&ov2685->mutex);
- on = !!on;
- if (on == ov2685->streaming)
- goto unlock_and_return;
-
if (on) {
ret = pm_runtime_resume_and_get(&ov2685->client->dev);
if (ret < 0)
@@ -539,15 +534,12 @@ static int ov2685_s_stream(struct v4l2_subdev *sd, int on)
pm_runtime_put(&ov2685->client->dev);
}
- ov2685->streaming = on;
-
unlock_and_return:
mutex_unlock(&ov2685->mutex);
return ret;
}
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -563,7 +555,6 @@ static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return 0;
}
-#endif
static int __maybe_unused ov2685_runtime_resume(struct device *dev)
{
@@ -660,11 +651,9 @@ static const struct v4l2_subdev_ops ov2685_subdev_ops = {
.pad = &ov2685_pad_ops,
};
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static const struct v4l2_subdev_internal_ops ov2685_internal_ops = {
.open = ov2685_open,
};
-#endif
static const struct v4l2_ctrl_ops ov2685_ctrl_ops = {
.s_ctrl = ov2685_set_ctrl,
@@ -833,17 +822,13 @@ static int ov2685_probe(struct i2c_client *client)
if (ret)
goto err_power_off;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
ov2685->subdev.internal_ops = &ov2685_internal_ops;
ov2685->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
ov2685->pad.flags = MEDIA_PAD_FL_SOURCE;
ov2685->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&ov2685->subdev.entity, 1, &ov2685->pad);
if (ret < 0)
goto err_power_off;
-#endif
ret = v4l2_async_register_subdev(&ov2685->subdev);
if (ret) {
@@ -858,9 +843,7 @@ static int ov2685_probe(struct i2c_client *client)
return 0;
err_clean_entity:
-#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&ov2685->subdev.entity);
-#endif
err_power_off:
__ov2685_power_off(ov2685);
err_free_handler:
@@ -877,9 +860,7 @@ static void ov2685_remove(struct i2c_client *client)
struct ov2685 *ov2685 = to_ov2685(sd);
v4l2_async_unregister_subdev(sd);
-#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
-#endif
v4l2_ctrl_handler_free(&ov2685->ctrl_handler);
mutex_destroy(&ov2685->mutex);
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 41d4f85470fd..24e468485fbf 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -336,12 +336,6 @@ struct ov2740 {
/* Current mode */
const struct ov2740_mode *cur_mode;
- /* To serialize asynchronus callbacks */
- struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
-
/* NVM data inforamtion */
struct nvm_data *nvm;
@@ -582,7 +576,6 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
if (ret)
return ret;
- ctrl_hdlr->lock = &ov2740->mutex;
cur_mode = ov2740->cur_mode;
size = ARRAY_SIZE(link_freq_menu_items);
@@ -792,18 +785,15 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov2740 *ov2740 = to_ov2740(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_state *sd_state;
int ret = 0;
- if (ov2740->streaming == enable)
- return 0;
+ sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
- mutex_lock(&ov2740->mutex);
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
- if (ret < 0) {
- mutex_unlock(&ov2740->mutex);
- return ret;
- }
+ if (ret < 0)
+ goto out_unlock;
ret = ov2740_start_streaming(ov2740);
if (ret) {
@@ -816,44 +806,9 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov2740->streaming = enable;
- mutex_unlock(&ov2740->mutex);
-
- return ret;
-}
-
-static int ov2740_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2740 *ov2740 = to_ov2740(sd);
-
- mutex_lock(&ov2740->mutex);
- if (ov2740->streaming)
- ov2740_stop_streaming(ov2740);
-
- mutex_unlock(&ov2740->mutex);
-
- return 0;
-}
-
-static int ov2740_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2740 *ov2740 = to_ov2740(sd);
- int ret = 0;
-
- mutex_lock(&ov2740->mutex);
- if (!ov2740->streaming)
- goto exit;
-
- ret = ov2740_start_streaming(ov2740);
- if (ret) {
- ov2740->streaming = false;
- ov2740_stop_streaming(ov2740);
- }
+out_unlock:
+ v4l2_subdev_unlock_state(sd_state);
-exit:
- mutex_unlock(&ov2740->mutex);
return ret;
}
@@ -870,48 +825,26 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
height, fmt->format.width,
fmt->format.height);
- mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(mode, &fmt->format);
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
- } else {
- ov2740->cur_mode = mode;
- __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
- __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate,
- to_pixel_rate(mode->link_freq_index));
-
- /* Update limits and set FPS to default */
- vblank_def = mode->vts_def - mode->height;
- __v4l2_ctrl_modify_range(ov2740->vblank,
- mode->vts_min - mode->height,
- OV2740_VTS_MAX - mode->height, 1,
- vblank_def);
- __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
- h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
- mode->width;
- __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1,
- h_blank);
- }
- mutex_unlock(&ov2740->mutex);
-
- return 0;
-}
-
-static int ov2740_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct ov2740 *ov2740 = to_ov2740(sd);
+ *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad) = fmt->format;
- mutex_lock(&ov2740->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd,
- sd_state,
- fmt->pad);
- else
- ov2740_update_pad_format(ov2740->cur_mode, &fmt->format);
+ return 0;
- mutex_unlock(&ov2740->mutex);
+ ov2740->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov2740->vblank,
+ mode->vts_min - mode->height,
+ OV2740_VTS_MAX - mode->height, 1, vblank_def);
+ __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1, h_blank);
return 0;
}
@@ -946,14 +879,11 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int ov2740_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
- struct ov2740 *ov2740 = to_ov2740(sd);
-
- mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
- mutex_unlock(&ov2740->mutex);
+ v4l2_subdev_get_pad_format(sd, sd_state, 0));
return 0;
}
@@ -963,10 +893,11 @@ static const struct v4l2_subdev_video_ops ov2740_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov2740_pad_ops = {
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = ov2740_set_format,
- .get_fmt = ov2740_get_format,
.enum_mbus_code = ov2740_enum_mbus_code,
.enum_frame_size = ov2740_enum_frame_size,
+ .init_cfg = ov2740_init_cfg,
};
static const struct v4l2_subdev_ops ov2740_subdev_ops = {
@@ -978,10 +909,6 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
-static const struct v4l2_subdev_internal_ops ov2740_internal_ops = {
- .open = ov2740_open,
-};
-
static int ov2740_check_hwcfg(struct device *dev)
{
struct fwnode_handle *ep;
@@ -1004,7 +931,7 @@ static int ov2740_check_hwcfg(struct device *dev)
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
- return -ENXIO;
+ return -EPROBE_DEFER;
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
@@ -1047,13 +974,12 @@ check_hwcfg_error:
static void ov2740_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov2740 *ov2740 = to_ov2740(sd);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
+ v4l2_subdev_cleanup(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
- mutex_destroy(&ov2740->mutex);
}
static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
@@ -1062,9 +988,11 @@ static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
struct nvm_data *nvm = priv;
struct device *dev = regmap_get_device(nvm->regmap);
struct ov2740 *ov2740 = to_ov2740(dev_get_drvdata(dev));
+ struct v4l2_subdev_state *sd_state;
int ret = 0;
- mutex_lock(&ov2740->mutex);
+ /* Serialise sensor access */
+ sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
if (nvm->nvm_buffer) {
memcpy(val, nvm->nvm_buffer + off, count);
@@ -1082,7 +1010,7 @@ static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
pm_runtime_put(dev);
exit:
- mutex_unlock(&ov2740->mutex);
+ v4l2_subdev_unlock_state(sd_state);
return ret;
}
@@ -1153,7 +1081,6 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, ret, "failed to find sensor\n");
}
- mutex_init(&ov2740->mutex);
ov2740->cur_mode = &supported_modes[0];
ret = ov2740_init_controls(ov2740);
if (ret) {
@@ -1161,7 +1088,7 @@ static int ov2740_probe(struct i2c_client *client)
goto probe_error_v4l2_ctrl_handler_free;
}
- ov2740->sd.internal_ops = &ov2740_internal_ops;
+ ov2740->sd.state_lock = ov2740->ctrl_handler.lock;
ov2740->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ov2740->sd.entity.ops = &ov2740_subdev_entity_ops;
ov2740->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -1172,36 +1099,42 @@ static int ov2740_probe(struct i2c_client *client)
goto probe_error_v4l2_ctrl_handler_free;
}
+ ret = v4l2_subdev_init_finalize(&ov2740->sd);
+ if (ret)
+ goto probe_error_media_entity_cleanup;
+
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
ret = v4l2_async_register_subdev_sensor(&ov2740->sd);
if (ret < 0) {
dev_err_probe(dev, ret, "failed to register V4L2 subdev\n");
- goto probe_error_media_entity_cleanup;
+ goto probe_error_v4l2_subdev_cleanup;
}
ret = ov2740_register_nvmem(client, ov2740);
if (ret)
dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
- /* Set the device's state to active if it's in D0 state. */
- if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
-
return 0;
+probe_error_v4l2_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov2740->sd);
+
probe_error_media_entity_cleanup:
media_entity_cleanup(&ov2740->sd.entity);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
- mutex_destroy(&ov2740->mutex);
return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(ov2740_pm_ops, ov2740_suspend, ov2740_resume);
-
static const struct acpi_device_id ov2740_acpi_ids[] = {
{"INT3474"},
{}
@@ -1212,7 +1145,6 @@ MODULE_DEVICE_TABLE(acpi, ov2740_acpi_ids);
static struct i2c_driver ov2740_i2c_driver = {
.driver = {
.name = "ov2740",
- .pm = pm_sleep_ptr(&ov2740_pm_ops),
.acpi_match_table = ov2740_acpi_ids,
},
.probe = ov2740_probe,
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index fda217d2cb10..3bd972a822e7 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -99,8 +99,7 @@ struct ov4689 {
u32 clock_rate;
- struct mutex mutex; /* lock to protect streaming, ctrls and cur_mode */
- bool streaming;
+ struct mutex mutex; /* lock to protect ctrls and cur_mode */
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *exposure;
@@ -468,10 +467,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
mutex_lock(&ov4689->mutex);
- on = !!on;
- if (on == ov4689->streaming)
- goto unlock_and_return;
-
if (on) {
ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0)
@@ -504,8 +499,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
pm_runtime_put(&client->dev);
}
- ov4689->streaming = on;
-
unlock_and_return:
mutex_unlock(&ov4689->mutex);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5fe85aa2d2ec..40532f7bcabe 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
return 0;
}
+static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+
+ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+ OV5640_MAX_VTS - mode->height, 1, vblank);
+
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+}
+
static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
{
const struct ov5640_mode_info *mode = sensor->current_mode;
enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
- const struct ov5640_timings *timings;
+ const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
s32 exposure_val, exposure_max;
unsigned int hblank;
unsigned int i = 0;
@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
ov5640_calc_pixel_rate(sensor));
+ __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
+
return 0;
}
@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
__v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
- timings = ov5640_timings(sensor, mode);
hblank = timings->htot - mode->width;
__v4l2_ctrl_modify_range(sensor->ctrls.hblank,
hblank, hblank, 1, hblank);
vblank = timings->vblank_def;
- __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
- OV5640_MAX_VTS - mode->height, 1, vblank);
- __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+ __v4l2_ctrl_vblank_update(sensor, vblank);
exposure_max = timings->crop.height + vblank - 4;
exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
ret = ov5640_sensor_resume(dev);
if (ret) {
dev_err(dev, "failed to power on\n");
- goto entity_cleanup;
+ goto free_ctrls;
}
pm_runtime_set_active(dev);
@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
err_pm_runtime:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
ov5640_sensor_suspend(dev);
+free_ctrls:
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
media_entity_cleanup(&sensor->sd.entity);
mutex_destroy(&sensor->lock);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 8de398423b7c..dcfe3129c63a 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -109,7 +109,6 @@ struct ov5647 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *exposure;
- bool streaming;
};
static inline struct ov5647 *to_sensor(struct v4l2_subdev *sd)
@@ -898,10 +897,6 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
int ret;
mutex_lock(&sensor->lock);
- if (sensor->streaming == enable) {
- mutex_unlock(&sensor->lock);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -922,7 +917,6 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- sensor->streaming = enable;
mutex_unlock(&sensor->lock);
return 0;
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 29e773a997dd..e80db3ecd4f8 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1882,8 +1882,6 @@ struct ov5670 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
/* True if the device has been identified */
bool identified;
};
@@ -2471,8 +2469,6 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov5670->mutex);
- if (ov5670->streaming == enable)
- goto unlock_and_return;
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -2486,7 +2482,6 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
ret = ov5670_stop_streaming(ov5670);
pm_runtime_put(&client->dev);
}
- ov5670->streaming = enable;
goto unlock_and_return;
error:
@@ -2541,34 +2536,6 @@ static int __maybe_unused ov5670_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ov5670_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov5670 *ov5670 = to_ov5670(sd);
-
- if (ov5670->streaming)
- ov5670_stop_streaming(ov5670);
-
- return 0;
-}
-
-static int __maybe_unused ov5670_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov5670 *ov5670 = to_ov5670(sd);
- int ret;
-
- if (ov5670->streaming) {
- ret = ov5670_start_streaming(ov5670);
- if (ret) {
- ov5670_stop_streaming(ov5670);
- return ret;
- }
- }
-
- return 0;
-}
-
static const struct v4l2_subdev_core_ops ov5670_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
@@ -2771,8 +2738,6 @@ static int ov5670_probe(struct i2c_client *client)
goto error_handler_free;
}
- ov5670->streaming = false;
-
/* Set the device's state to active if it's in D0 state. */
if (full_power)
pm_runtime_set_active(&client->dev);
@@ -2827,7 +2792,6 @@ static void ov5670_remove(struct i2c_client *client)
}
static const struct dev_pm_ops ov5670_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov5670_suspend, ov5670_resume)
SET_RUNTIME_PM_OPS(ov5670_runtime_suspend, ov5670_runtime_resume, NULL)
};
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index d5a2a5f82312..e63d9d402d34 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -513,9 +513,6 @@ struct ov5675 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
/* True if the device has been identified */
bool identified;
};
@@ -949,9 +946,6 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (ov5675->streaming == enable)
- return 0;
-
mutex_lock(&ov5675->mutex);
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -971,7 +965,6 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov5675->streaming = enable;
mutex_unlock(&ov5675->mutex);
return ret;
@@ -1027,42 +1020,6 @@ static int ov5675_power_on(struct device *dev)
return 0;
}
-static int __maybe_unused ov5675_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov5675 *ov5675 = to_ov5675(sd);
-
- mutex_lock(&ov5675->mutex);
- if (ov5675->streaming)
- ov5675_stop_streaming(ov5675);
-
- mutex_unlock(&ov5675->mutex);
-
- return 0;
-}
-
-static int __maybe_unused ov5675_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov5675 *ov5675 = to_ov5675(sd);
- int ret;
-
- mutex_lock(&ov5675->mutex);
- if (ov5675->streaming) {
- ret = ov5675_start_streaming(ov5675);
- if (ret) {
- ov5675->streaming = false;
- ov5675_stop_streaming(ov5675);
- mutex_unlock(&ov5675->mutex);
- return ret;
- }
- }
-
- mutex_unlock(&ov5675->mutex);
-
- return 0;
-}
-
static int ov5675_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -1409,7 +1366,6 @@ probe_power_off:
}
static const struct dev_pm_ops ov5675_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov5675_suspend, ov5675_resume)
SET_RUNTIME_PM_OPS(ov5675_power_off, ov5675_power_on, NULL)
};
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 488ee6d9d301..819425e21349 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -154,7 +154,6 @@ struct ov5693_device {
unsigned int inc_y_odd;
unsigned int vts;
} mode;
- bool streaming;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -975,9 +974,9 @@ static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
- ret = pm_runtime_get_sync(ov5693->dev);
- if (ret < 0)
- goto err_power_down;
+ ret = pm_runtime_resume_and_get(ov5693->dev);
+ if (ret)
+ return ret;
mutex_lock(&ov5693->lock);
ret = __v4l2_ctrl_handler_setup(&ov5693->ctrls.handler);
@@ -996,8 +995,6 @@ static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
if (ret)
goto err_power_down;
- ov5693->streaming = !!enable;
-
if (!enable)
pm_runtime_put(ov5693->dev);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 3023b7254167..c8f57ce1578d 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -108,7 +108,6 @@ struct ov5695 {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *test_pattern;
struct mutex mutex;
- bool streaming;
const struct ov5695_mode *cur_mode;
};
@@ -821,9 +820,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
fmt->format.height = mode->height;
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
-#endif
} else {
ov5695->cur_mode = mode;
h_blank = mode->hts_def - mode->width;
@@ -849,13 +846,8 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov5695->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
fmt->pad);
-#else
- mutex_unlock(&ov5695->mutex);
- return -EINVAL;
-#endif
} else {
fmt->format.width = mode->width;
fmt->format.height = mode->height;
@@ -942,9 +934,6 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on)
int ret = 0;
mutex_lock(&ov5695->mutex);
- on = !!on;
- if (on == ov5695->streaming)
- goto unlock_and_return;
if (on) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -962,8 +951,6 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on)
pm_runtime_put(&client->dev);
}
- ov5695->streaming = on;
-
unlock_and_return:
mutex_unlock(&ov5695->mutex);
@@ -1048,7 +1035,6 @@ static int __maybe_unused ov5695_runtime_suspend(struct device *dev)
return 0;
}
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -1068,18 +1054,15 @@ static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return 0;
}
-#endif
static const struct dev_pm_ops ov5695_pm_ops = {
SET_RUNTIME_PM_OPS(ov5695_runtime_suspend,
ov5695_runtime_resume, NULL)
};
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static const struct v4l2_subdev_internal_ops ov5695_internal_ops = {
.open = ov5695_open,
};
-#endif
static const struct v4l2_subdev_video_ops ov5695_video_ops = {
.s_stream = ov5695_s_stream,
@@ -1322,17 +1305,13 @@ static int ov5695_probe(struct i2c_client *client)
if (ret)
goto err_power_off;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
sd->internal_ops = &ov5695_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
ov5695->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &ov5695->pad);
if (ret < 0)
goto err_power_off;
-#endif
ret = v4l2_async_register_subdev_sensor(sd);
if (ret) {
@@ -1347,9 +1326,7 @@ static int ov5695_probe(struct i2c_client *client)
return 0;
err_clean_entity:
-#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
-#endif
err_power_off:
__ov5695_power_off(ov5695);
err_free_handler:
@@ -1366,9 +1343,7 @@ static void ov5695_remove(struct i2c_client *client)
struct ov5695 *ov5695 = to_ov5695(sd);
v4l2_async_unregister_subdev(sd);
-#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
-#endif
v4l2_ctrl_handler_free(&ov5695->ctrl_handler);
mutex_destroy(&ov5695->mutex);
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 675fb37a6fea..6582cc0e2384 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1340,9 +1340,11 @@ static int ov7251_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_lock(&ov7251->lock);
if (enable) {
- ret = pm_runtime_get_sync(ov7251->dev);
- if (ret < 0)
- goto err_power_down;
+ ret = pm_runtime_resume_and_get(ov7251->dev);
+ if (ret) {
+ mutex_unlock(&ov7251->lock);
+ return ret;
+ }
ret = ov7251_pll_configure(ov7251);
if (ret) {
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 2f55491ef571..172483597c54 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -186,11 +187,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
#define REG_BD60MAX 0xab /* 60hz banding step limit */
-enum ov7670_model {
- MODEL_OV7670 = 0,
- MODEL_OV7675,
-};
-
struct ov7670_win_size {
int width;
int height;
@@ -217,9 +213,7 @@ struct ov7670_devtype {
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
-#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pad;
-#endif
struct v4l2_ctrl_handler hdl;
struct {
/* gain cluster */
@@ -1108,9 +1102,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
int ret;
if (format->pad)
@@ -1120,11 +1112,9 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
return ret;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
format->pad);
*mbus_fmt = format->format;
-#endif
return 0;
}
@@ -1148,18 +1138,12 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
return 0;
-#else
- return -EINVAL;
-#endif
} else {
format->format = info->format;
}
@@ -1720,7 +1704,6 @@ static void ov7670_get_default_format(struct v4l2_subdev *sd,
format->field = V4L2_FIELD_NONE;
}
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
@@ -1730,7 +1713,6 @@ static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return 0;
}
-#endif
/* ----------------------------------------------------------------------- */
@@ -1766,29 +1748,12 @@ static const struct v4l2_subdev_ops ov7670_ops = {
.pad = &ov7670_pad_ops,
};
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static const struct v4l2_subdev_internal_ops ov7670_subdev_internal_ops = {
.open = ov7670_open,
};
-#endif
/* ----------------------------------------------------------------------- */
-static const struct ov7670_devtype ov7670_devdata[] = {
- [MODEL_OV7670] = {
- .win_sizes = ov7670_win_sizes,
- .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
- .set_framerate = ov7670_set_framerate_legacy,
- .get_framerate = ov7670_get_framerate_legacy,
- },
- [MODEL_OV7675] = {
- .win_sizes = ov7675_win_sizes,
- .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
- .set_framerate = ov7675_set_framerate,
- .get_framerate = ov7675_get_framerate,
- },
-};
-
static int ov7670_init_gpio(struct i2c_client *client, struct ov7670_info *info)
{
info->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
@@ -1849,7 +1814,6 @@ static int ov7670_parse_dt(struct device *dev,
static int ov7670_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct v4l2_fract tpf;
struct v4l2_subdev *sd;
struct ov7670_info *info;
@@ -1861,10 +1825,8 @@ static int ov7670_probe(struct i2c_client *client)
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
sd->internal_ops = &ov7670_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
info->clock_speed = 30; /* default: a guess */
@@ -1923,7 +1885,7 @@ static int ov7670_probe(struct i2c_client *client)
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
- info->devtype = &ov7670_devdata[id->driver_data];
+ info->devtype = i2c_get_match_data(client);
info->fmt = &ov7670_formats[0];
info->wsize = &info->devtype->win_sizes[0];
@@ -1977,13 +1939,11 @@ static int ov7670_probe(struct i2c_client *client)
V4L2_EXPOSURE_MANUAL, false);
v4l2_ctrl_cluster(2, &info->saturation);
-#if defined(CONFIG_MEDIA_CONTROLLER)
info->pad.flags = MEDIA_PAD_FL_SOURCE;
info->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&info->sd.entity, 1, &info->pad);
if (ret < 0)
goto hdl_free;
-#endif
v4l2_ctrl_handler_setup(&info->hdl);
@@ -2013,25 +1973,37 @@ static void ov7670_remove(struct i2c_client *client)
media_entity_cleanup(&info->sd.entity);
}
+static const struct ov7670_devtype ov7670_devdata = {
+ .win_sizes = ov7670_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
+ .set_framerate = ov7670_set_framerate_legacy,
+ .get_framerate = ov7670_get_framerate_legacy,
+};
+
+static const struct ov7670_devtype ov7675_devdata = {
+ .win_sizes = ov7675_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
+ .set_framerate = ov7675_set_framerate,
+ .get_framerate = ov7675_get_framerate,
+};
+
static const struct i2c_device_id ov7670_id[] = {
- { "ov7670", MODEL_OV7670 },
- { "ov7675", MODEL_OV7675 },
- { }
+ { "ov7670", (kernel_ulong_t)&ov7670_devdata },
+ { "ov7675", (kernel_ulong_t)&ov7675_devdata },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id ov7670_of_match[] = {
- { .compatible = "ovti,ov7670", },
- { /* sentinel */ },
+ { .compatible = "ovti,ov7670", &ov7670_devdata },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ov7670_of_match);
-#endif
static struct i2c_driver ov7670_driver = {
.driver = {
.name = "ov7670",
- .of_match_table = of_match_ptr(ov7670_of_match),
+ .of_match_table = ov7670_of_match,
},
.probe = ov7670_probe,
.remove = ov7670_remove,
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 386d69c8e074..7618b58a7ad0 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -433,9 +433,7 @@ struct ov772x_priv {
struct mutex lock;
int power_count;
int streaming;
-#ifdef CONFIG_MEDIA_CONTROLLER
struct media_pad pad;
-#endif
enum v4l2_mbus_type bus_type;
};
@@ -1488,13 +1486,11 @@ static int ov772x_probe(struct i2c_client *client)
if (ret < 0)
goto error_gpio_put;
-#ifdef CONFIG_MEDIA_CONTROLLER
priv->pad.flags = MEDIA_PAD_FL_SOURCE;
priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
if (ret < 0)
goto error_gpio_put;
-#endif
priv->cfmt = &ov772x_cfmts[0];
priv->win = &ov772x_win_sizes[0];
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index dffdb475e433..356a45e65b81 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -83,9 +83,7 @@
struct ov7740 {
struct v4l2_subdev subdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pad;
-#endif
struct v4l2_mbus_framefmt format;
const struct ov7740_pixfmt *fmt; /* Current format */
const struct ov7740_framesize *frmsize;
@@ -120,7 +118,6 @@ struct ov7740 {
struct v4l2_ctrl *contrast;
struct mutex mutex; /* To serialize asynchronus callbacks */
- bool streaming; /* Streaming on/off */
struct gpio_desc *resetb_gpio;
struct gpio_desc *pwdn_gpio;
@@ -618,10 +615,6 @@ static int ov7740_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov7740->mutex);
- if (ov7740->streaming == enable) {
- mutex_unlock(&ov7740->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -635,8 +628,6 @@ static int ov7740_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov7740->streaming = enable;
-
mutex_unlock(&ov7740->mutex);
return ret;
@@ -807,9 +798,7 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
const struct ov7740_pixfmt *ovfmt;
const struct ov7740_framesize *fsize;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
int ret;
mutex_lock(&ov7740->mutex);
@@ -822,11 +811,10 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
ret = ov7740_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
goto error;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+
mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
format->pad);
*mbus_fmt = format->format;
-#endif
mutex_unlock(&ov7740->mutex);
return 0;
}
@@ -851,26 +839,18 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
- int ret = 0;
mutex_lock(&ov7740->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
- ret = 0;
-#else
- ret = -EINVAL;
-#endif
} else {
format->format = ov7740->format;
}
mutex_unlock(&ov7740->mutex);
- return ret;
+ return 0;
}
static const struct v4l2_subdev_pad_ops ov7740_subdev_pad_ops = {
@@ -899,7 +879,6 @@ static void ov7740_get_default_format(struct v4l2_subdev *sd,
format->field = V4L2_FIELD_NONE;
}
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -916,7 +895,6 @@ static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_internal_ops ov7740_subdev_internal_ops = {
.open = ov7740_open,
};
-#endif
static int ov7740_probe_dt(struct i2c_client *client,
struct ov7740 *ov7740)
@@ -1094,18 +1072,14 @@ static int ov7740_probe(struct i2c_client *client)
sd = &ov7740->subdev;
v4l2_i2c_subdev_init(sd, client, &ov7740_subdev_ops);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
sd->internal_ops = &ov7740_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
ov7740->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &ov7740->pad);
if (ret)
return ret;
-#endif
ret = ov7740_set_power(ov7740, 1);
if (ret)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index f053c3a7676a..a0f673a24e52 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1438,9 +1438,6 @@ struct ov8856 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
- /* Streaming on/off */
- bool streaming;
-
/* lanes index */
u8 nlanes;
@@ -2042,9 +2039,6 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- if (ov8856->streaming == enable)
- return 0;
-
mutex_lock(&ov8856->mutex);
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -2064,7 +2058,6 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov8856->streaming = enable;
mutex_unlock(&ov8856->mutex);
return ret;
@@ -2125,45 +2118,6 @@ static int ov8856_power_off(struct device *dev)
return 0;
}
-static int __maybe_unused ov8856_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov8856 *ov8856 = to_ov8856(sd);
-
- mutex_lock(&ov8856->mutex);
- if (ov8856->streaming)
- ov8856_stop_streaming(ov8856);
-
- ov8856_power_off(dev);
- mutex_unlock(&ov8856->mutex);
-
- return 0;
-}
-
-static int __maybe_unused ov8856_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov8856 *ov8856 = to_ov8856(sd);
- int ret;
-
- mutex_lock(&ov8856->mutex);
-
- ov8856_power_on(dev);
- if (ov8856->streaming) {
- ret = ov8856_start_streaming(ov8856);
- if (ret) {
- ov8856->streaming = false;
- ov8856_stop_streaming(ov8856);
- mutex_unlock(&ov8856->mutex);
- return ret;
- }
- }
-
- mutex_unlock(&ov8856->mutex);
-
- return 0;
-}
-
static int ov8856_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -2501,7 +2455,6 @@ probe_power_off:
}
static const struct dev_pm_ops ov8856_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume)
SET_RUNTIME_PM_OPS(ov8856_power_off, ov8856_power_on, NULL)
};
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 068c7449f50e..bf6dfce1b5dd 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -165,7 +165,6 @@ struct ov9282_mode {
* @cur_mode: Pointer to current selected sensor mode
* @code: Mbus code currently selected
* @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
*/
struct ov9282 {
struct device *dev;
@@ -188,7 +187,6 @@ struct ov9282 {
const struct ov9282_mode *cur_mode;
u32 code;
struct mutex mutex;
- bool streaming;
};
static const s64 link_freq[] = {
@@ -1037,11 +1035,6 @@ static int ov9282_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&ov9282->mutex);
- if (ov9282->streaming == enable) {
- mutex_unlock(&ov9282->mutex);
- return 0;
- }
-
if (enable) {
ret = pm_runtime_resume_and_get(ov9282->dev);
if (ret)
@@ -1055,8 +1048,6 @@ static int ov9282_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(ov9282->dev);
}
- ov9282->streaming = enable;
-
mutex_unlock(&ov9282->mutex);
return 0;
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index b6244772bc59..ee3315299605 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -337,9 +337,6 @@ struct ov9734 {
/* To serialize asynchronus callbacks */
struct mutex mutex;
-
- /* Streaming on/off */
- bool streaming;
};
static inline struct ov9734 *to_ov9734(struct v4l2_subdev *subdev)
@@ -660,10 +657,6 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
mutex_lock(&ov9734->mutex);
- if (ov9734->streaming == enable) {
- mutex_unlock(&ov9734->mutex);
- return 0;
- }
if (enable) {
ret = pm_runtime_resume_and_get(&client->dev);
@@ -683,46 +676,8 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
pm_runtime_put(&client->dev);
}
- ov9734->streaming = enable;
- mutex_unlock(&ov9734->mutex);
-
- return ret;
-}
-
-static int __maybe_unused ov9734_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov9734 *ov9734 = to_ov9734(sd);
-
- mutex_lock(&ov9734->mutex);
- if (ov9734->streaming)
- ov9734_stop_streaming(ov9734);
-
mutex_unlock(&ov9734->mutex);
- return 0;
-}
-
-static int __maybe_unused ov9734_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov9734 *ov9734 = to_ov9734(sd);
- int ret = 0;
-
- mutex_lock(&ov9734->mutex);
- if (!ov9734->streaming)
- goto exit;
-
- ret = ov9734_start_streaming(ov9734);
- if (ret) {
- ov9734->streaming = false;
- ov9734_stop_streaming(ov9734);
- }
-
-exit:
- mutex_unlock(&ov9734->mutex);
return ret;
}
@@ -1011,10 +966,6 @@ probe_error_v4l2_ctrl_handler_free:
return ret;
}
-static const struct dev_pm_ops ov9734_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov9734_suspend, ov9734_resume)
-};
-
static const struct acpi_device_id ov9734_acpi_ids[] = {
{ "OVTI9734", },
{}
@@ -1025,7 +976,6 @@ MODULE_DEVICE_TABLE(acpi, ov9734_acpi_ids);
static struct i2c_driver ov9734_i2c_driver = {
.driver = {
.name = "ov9734",
- .pm = &ov9734_pm_ops,
.acpi_match_table = ov9734_acpi_ids,
},
.probe = ov9734_probe,
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index f4e2e2f3972a..b4647bda8c21 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -625,8 +625,7 @@ error_free_ctrls:
v4l2_ctrl_handler_free(&dev->ctrls);
error:
media_entity_cleanup(&dev->sd.entity);
- if (dev->sensor)
- i2c_unregister_device(dev->sensor);
+ i2c_unregister_device(dev->sensor);
dev_err(&client->dev, "probe failed\n");
diff --git a/drivers/media/i2c/st-vgxy61.c b/drivers/media/i2c/st-vgxy61.c
index 30f82ca344c4..5dbfb04b3124 100644
--- a/drivers/media/i2c/st-vgxy61.c
+++ b/drivers/media/i2c/st-vgxy61.c
@@ -1170,14 +1170,9 @@ static int vgxy61_stream_enable(struct vgxy61_dev *sensor)
if (ret)
return ret;
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
return ret;
- }
-
- /* pm_runtime_get_sync() can return 1 as a valid return code */
- ret = 0;
vgxy61_write_reg(sensor, VGXY61_REG_FORMAT_CTRL,
get_bpp_by_code(sensor->fmt.code), &ret);
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 566f5eaddd57..ce612a47ba84 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -784,8 +784,12 @@ static int tc358746_set_fmt(struct v4l2_subdev *sd,
sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, TC358746_SINK);
fmt = tc358746_get_format_by_code(format->pad, format->format.code);
- if (IS_ERR(fmt))
+ if (IS_ERR(fmt)) {
fmt = tc358746_get_format_by_code(format->pad, tc358746_def_fmt.code);
+ // Can't happen, but just in case...
+ if (WARN_ON(IS_ERR(fmt)))
+ return -EINVAL;
+ }
format->format.code = fmt->code;
format->format.field = V4L2_FIELD_NONE;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index aa6d4b67b6d5..c37f605cb75f 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -16,23 +16,24 @@
* Prabhakar Lad <prabhakar.lad@ti.com>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/v4l2-mediabus.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+#include <media/i2c/tvp514x.h>
+#include <media/media-entity.h>
#include <media/v4l2-async.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-fwnode.h>
#include <media/v4l2-ctrls.h>
-#include <media/i2c/tvp514x.h>
-#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
#include "tvp514x_regs.h"
@@ -118,7 +119,7 @@ struct tvp514x_decoder {
struct media_pad pad;
struct v4l2_mbus_framefmt format;
- struct tvp514x_reg *int_seq;
+ const struct tvp514x_reg *int_seq;
};
/* TVP514x default register values */
@@ -1024,7 +1025,6 @@ done:
static int
tvp514x_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct tvp514x_platform_data *pdata = tvp514x_get_pdata(client);
struct tvp514x_decoder *decoder;
struct v4l2_subdev *sd;
@@ -1049,7 +1049,7 @@ tvp514x_probe(struct i2c_client *client)
memcpy(decoder->tvp514x_regs, tvp514x_reg_list_default,
sizeof(tvp514x_reg_list_default));
- decoder->int_seq = (struct tvp514x_reg *)id->driver_data;
+ decoder->int_seq = i2c_get_match_data(client);
/* Copy board specific information here */
decoder->pdata = pdata;
@@ -1183,29 +1183,26 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
* driver_data - Driver data
*/
static const struct i2c_device_id tvp514x_id[] = {
- {"tvp5146", (unsigned long)tvp5146_init_reg_seq},
- {"tvp5146m2", (unsigned long)tvp514xm_init_reg_seq},
- {"tvp5147", (unsigned long)tvp5147_init_reg_seq},
- {"tvp5147m1", (unsigned long)tvp514xm_init_reg_seq},
- {},
+ {"tvp5146", (kernel_ulong_t)tvp5146_init_reg_seq },
+ {"tvp5146m2", (kernel_ulong_t)tvp514xm_init_reg_seq },
+ {"tvp5147", (kernel_ulong_t)tvp5147_init_reg_seq },
+ {"tvp5147m1", (kernel_ulong_t)tvp514xm_init_reg_seq },
+ { /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(i2c, tvp514x_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id tvp514x_of_match[] = {
- { .compatible = "ti,tvp5146", },
- { .compatible = "ti,tvp5146m2", },
- { .compatible = "ti,tvp5147", },
- { .compatible = "ti,tvp5147m1", },
- { /* sentinel */ },
+ { .compatible = "ti,tvp5146", .data = tvp5146_init_reg_seq },
+ { .compatible = "ti,tvp5146m2", .data = tvp514xm_init_reg_seq },
+ { .compatible = "ti,tvp5147", .data = tvp5147_init_reg_seq },
+ { .compatible = "ti,tvp5147m1", .data = tvp514xm_init_reg_seq },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tvp514x_of_match);
-#endif
static struct i2c_driver tvp514x_driver = {
.driver = {
- .of_match_table = of_match_ptr(tvp514x_of_match),
+ .of_match_table = tvp514x_of_match,
.name = TVP514X_MODULE_NAME,
},
.probe = tvp514x_probe,
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 537ebd9fa8d7..178bd06cc2ed 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -759,7 +759,6 @@ static void video_i2c_release(struct video_device *vdev)
static int video_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct video_i2c_data *data;
struct v4l2_device *v4l2_dev;
struct vb2_queue *queue;
@@ -769,11 +768,8 @@ static int video_i2c_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- if (dev_fwnode(&client->dev))
- data->chip = device_get_match_data(&client->dev);
- else if (id)
- data->chip = &video_i2c_chip[id->driver_data];
- else
+ data->chip = i2c_get_match_data(client);
+ if (!data->chip)
goto error_free_device;
data->regmap = regmap_init_i2c(client, data->chip->regmap_config);
@@ -940,8 +936,8 @@ static const struct dev_pm_ops video_i2c_pm_ops = {
};
static const struct i2c_device_id video_i2c_id_table[] = {
- { "amg88xx", AMG88XX },
- { "mlx90640", MLX90640 },
+ { "amg88xx", (kernel_ulong_t)&video_i2c_chip[AMG88XX] },
+ { "mlx90640", (kernel_ulong_t)&video_i2c_chip[MLX90640] },
{}
};
MODULE_DEVICE_TABLE(i2c, video_i2c_id_table);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 83468d4a440b..543a392f8635 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -197,6 +197,7 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_device *mdev = entity->graph_obj.mdev;
struct media_pad *iter;
unsigned int i = 0;
+ int ret = 0;
if (num_pads >= MEDIA_ENTITY_MAX_PADS)
return -E2BIG;
@@ -210,15 +211,27 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
media_entity_for_each_pad(entity, iter) {
iter->entity = entity;
iter->index = i++;
+
+ if (hweight32(iter->flags & (MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_SOURCE)) != 1) {
+ ret = -EINVAL;
+ break;
+ }
+
if (mdev)
media_gobj_create(mdev, MEDIA_GRAPH_PAD,
&iter->graph_obj);
}
+ if (ret && mdev) {
+ media_entity_for_each_pad(entity, iter)
+ media_gobj_destroy(&iter->graph_obj);
+ }
+
if (mdev)
mutex_unlock(&mdev->graph_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(media_entity_pads_init);
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index ee095bde0b68..7f65aa609388 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -13,6 +13,7 @@ if MEDIA_PCI_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Media capture support"
+source "drivers/media/pci/mgb4/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw5864/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 8bed619b7130..f18c7e15abe3 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_DT3155) += dt3155/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
+obj-$(CONFIG_VIDEO_MGB4) += mgb4/
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index ec78f7fc5e1b..867c1308de23 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -126,6 +126,7 @@ MODULE_PARM_DESC(audiodev, "specify audio device:\n"
"\t\t 3 = tvaudio");
MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition.");
+MODULE_FIRMWARE("hcwamc.rbf");
/* I2C addresses list */
#define I2C_ADDR_TDA7432 0x8a
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index aa708a0e5eac..09a193bb87df 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3474,6 +3474,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
/* free resources */
free_irq(btv->c.pci->irq,btv);
+ del_timer_sync(&btv->timeout);
iounmap(btv->bt848_mmio);
release_mem_region(pci_resource_start(btv->c.pci,0),
pci_resource_len(btv->c.pci,0));
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 4cb890b949c3..390cbba6c065 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -190,11 +190,15 @@ static int cx24108_tuner_set_params(struct dvb_frontend *fe)
u32 freq = c->frequency;
int i, a, n, pump;
u32 band, pll;
- u32 osci[]={950000,1019000,1075000,1178000,1296000,1432000,
- 1576000,1718000,1856000,2036000,2150000};
- u32 bandsel[]={0,0x00020000,0x00040000,0x00100800,0x00101000,
- 0x00102000,0x00104000,0x00108000,0x00110000,
- 0x00120000,0x00140000};
+ static const u32 osci[] = {
+ 950000, 1019000, 1075000, 1178000, 1296000, 1432000,
+ 1576000, 1718000, 1856000, 2036000, 2150000
+ };
+ static const u32 bandsel[] = {
+ 0, 0x00020000, 0x00040000, 0x00100800, 0x00101000,
+ 0x00102000, 0x00104000, 0x00108000, 0x00110000,
+ 0x00120000, 0x00140000
+ };
#define XTAL 1011100 /* Hz, really 1.0111 MHz and a /10 prescaler */
dprintk("cx24108 debug: entering SetTunerFreq, freq=%d\n", freq);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 74edcc76d12f..6e1a0614e6d0 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -8,6 +8,7 @@
* All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <media/i2c/adv7604.h>
#include <media/i2c/adv7842.h>
@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
cobalt_info("PCIe link control 0x%04x\n", ctrl);
cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
stat, get_link_speed(stat),
- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
/* Bus */
pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
/* Slot */
pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
if (!pci_is_pcie(pci_dev))
return 0;
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
+ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
}
static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
if (!pci_is_pcie(pci_dev))
return 0;
pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
}
static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 4bfbcca14f60..26bf58d17a3d 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -107,11 +107,9 @@ static void chain_all_buffers(struct cobalt_stream *s)
{
struct sg_dma_desc_info *desc[NR_BUFS];
struct cobalt_buffer *cb;
- struct list_head *p;
int i = 0;
- list_for_each(p, &s->bufs) {
- cb = list_entry(p, struct cobalt_buffer, list);
+ list_for_each_entry(cb, &s->bufs, list) {
desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
if (i > 0)
descriptor_list_chain(desc[i-1], desc[i]);
@@ -348,7 +346,6 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
struct cobalt *cobalt = s->cobalt;
struct sg_dma_desc_info *desc;
struct cobalt_buffer *cb;
- struct list_head *p;
unsigned long flags;
int timeout_msec = 100;
int rx = s->video_channel;
@@ -367,8 +364,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
/* Try to stop the DMA engine gracefully */
spin_lock_irqsave(&s->irqlock, flags);
- list_for_each(p, &s->bufs) {
- cb = list_entry(p, struct cobalt_buffer, list);
+ list_for_each_entry(cb, &s->bufs, list) {
desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
/* Stop DMA after this descriptor chain */
descriptor_list_end_of_chain(desc);
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 887d2aa36447..af05bde75816 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -631,7 +631,7 @@ struct cx18 {
u32 hw2_irq_mask;
struct workqueue_struct *in_work_queue;
- char in_workq_name[11]; /* "cx18-NN-in" */
+ char in_workq_name[39]; /* "cx18-NN-in" */
struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index 3b283f3c6726..a6457c23d18c 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -831,7 +831,7 @@ int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
int i;
if (cx == NULL) {
- CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
+ pr_err("cx == NULL (cmd=%x)\n", cmd);
return 0;
}
if (args > MAX_MB_ARGUMENTS) {
diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig
index a8cb981544f7..407a800c81bc 100644
--- a/drivers/media/pci/intel/ivsc/Kconfig
+++ b/drivers/media/pci/intel/ivsc/Kconfig
@@ -10,6 +10,12 @@ config INTEL_VSC
help
This adds support for Intel Visual Sensing Controller (IVSC).
- Enables the IVSC firmware services required for controlling
- camera sensor ownership and CSI-2 link through Image Processing
- Unit(IPU) driver of Intel.
+ The IVSC support is split into two devices, ACE (Algorithm
+ Context Engine) and CSI (Camera Serial Interface), each of which
+ have their own drivers. The ACE is used to select the ownership
+ of the sensor between the IVSC and the host CPU while the CSI is
+ used to both select the routing destination for the data the
+ sensor transmits over the CSI-2 bus between the IVSC and the
+ host CPU and to configure the CSI-2 bus itself.
+
+ The modules will be called ivsc-ace and ivsc-csi.
diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
index a0491f307831..3622271c71c8 100644
--- a/drivers/media/pci/intel/ivsc/mei_ace.c
+++ b/drivers/media/pci/intel/ivsc/mei_ace.c
@@ -30,8 +30,6 @@
#include <linux/uuid.h>
#include <linux/workqueue.h>
-#define MEI_ACE_DRIVER_NAME "ivsc_ace"
-
/* indicating driver message */
#define ACE_DRV_MSG 1
/* indicating set command */
@@ -408,6 +406,9 @@ static int mei_ace_setup_dev_link(struct mei_ace *ace)
if (!csi_dev) {
ret = -EPROBE_DEFER;
goto err;
+ } else if (!dev_fwnode(csi_dev)) {
+ ret = -EPROBE_DEFER;
+ goto err_put;
}
/* setup link between mei_ace and mei_csi */
@@ -554,14 +555,14 @@ static const struct dev_pm_ops mei_ace_pm_ops = {
0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
static const struct mei_cl_device_id mei_ace_tbl[] = {
- { MEI_ACE_DRIVER_NAME, MEI_ACE_UUID, MEI_CL_VERSION_ANY },
+ { .uuid = MEI_ACE_UUID, .version = MEI_CL_VERSION_ANY },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
static struct mei_cl_driver mei_ace_driver = {
.id_table = mei_ace_tbl,
- .name = MEI_ACE_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.probe = mei_ace_probe,
.remove = mei_ace_remove,
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 00ba611e0f68..2a6b828fd8dd 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -30,7 +30,6 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define MEI_CSI_DRIVER_NAME "ivsc_csi"
#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
@@ -804,14 +803,14 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
static const struct mei_cl_device_id mei_csi_tbl[] = {
- { MEI_CSI_DRIVER_NAME, MEI_CSI_UUID, MEI_CL_VERSION_ANY },
+ { .uuid = MEI_CSI_UUID, .version = MEI_CL_VERSION_ANY },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
static struct mei_cl_driver mei_csi_driver = {
.id_table = mei_csi_tbl,
- .name = MEI_CSI_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.probe = mei_csi_probe,
.remove = mei_csi_remove,
diff --git a/drivers/media/pci/mgb4/Kconfig b/drivers/media/pci/mgb4/Kconfig
new file mode 100644
index 000000000000..13fad15a434c
--- /dev/null
+++ b/drivers/media/pci/mgb4/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MGB4
+ tristate "Digiteq Automotive MGB4 support"
+ depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
+ select VIDEOBUF2_DMA_SG
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select I2C_XILINX
+ select SPI_XILINX
+ select MTD_SPI_NOR
+ select XILINX_XDMA
+ help
+ This is a video4linux driver for Digiteq Automotive MGB4 grabber
+ cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mgb4.
diff --git a/drivers/media/pci/mgb4/Makefile b/drivers/media/pci/mgb4/Makefile
new file mode 100644
index 000000000000..e92ead18bed0
--- /dev/null
+++ b/drivers/media/pci/mgb4/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+mgb4-objs := mgb4_regs.o mgb4_core.o mgb4_vin.o mgb4_vout.o \
+ mgb4_sysfs_pci.o mgb4_sysfs_in.o mgb4_sysfs_out.o \
+ mgb4_i2c.o mgb4_cmt.o mgb4_trigger.o mgb4_dma.o
+
+obj-$(CONFIG_VIDEO_MGB4) += mgb4.o
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
new file mode 100644
index 000000000000..70dc78ef193c
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_cmt.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * The CMT module configures the FPGA Clock Management Tile (CMT) registers. For
+ * different video signal frequencies (FPGA input signal frequencies), the FPGA
+ * CMT registers need to be adjusted for the FPGA to work properly. The values
+ * are precomputed based on formulas given by Xilinx in their FPGA documentation
+ * (which are in turn full of some magic values/tables...).
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "mgb4_core.h"
+#include "mgb4_cmt.h"
+
+static const u16 cmt_vals_out[][15] = {
+ {0x1208, 0x0000, 0x171C, 0x0000, 0x1E38, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x130D, 0x0080, 0x0041, 0x0090, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+ {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1619, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x179F, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17DF, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+ {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x169B, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1493, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x124A, 0x0080, 0x0041, 0x010D, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1619, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x13D0, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1820, 0x0000, 0x00C3, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1209, 0x0080, 0x0041, 0x013F, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x1100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1556, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179F, 0x0080, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1105, 0x0080, 0x1041, 0x01E8, 0x6401, 0x65E9, 0xFFFF, 0x9800, 0x1100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1820, 0x0000, 0x00C4, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1493, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x138E, 0x0000, 0x0042, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175E, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x165A, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x16DC, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x169A, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x169B, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x171D, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x16DB, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1146, 0x0080, 0x1041, 0x0184, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x171D, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1452, 0x0080, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1104, 0x0000, 0x1041, 0x01E8, 0x5801, 0x59E9, 0xFFFF, 0x9900, 0x0900, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179F, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1515, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1659, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1555, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x14D3, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1556, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1452, 0x0080, 0x0082, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1514, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17E0, 0x0080, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x16DC, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1493, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x15D8, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171D, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1618, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D4, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1619, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179F, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1515, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x13D0, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169A, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1820, 0x0000, 0x00C3, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1556, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x16DB, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1411, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1597, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171D, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x12CC, 0x0080, 0x0041, 0x00A9, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175D, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1452, 0x0080, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175E, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1492, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179F, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1619, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1493, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x130D, 0x0080, 0x0041, 0x0090, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D3, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D4, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+ {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+};
+
+static const u16 cmt_vals_in[][13] = {
+ {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
+ {0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000},
+};
+
+static const u32 cmt_addrs_out[][15] = {
+ {0x420, 0x424, 0x428, 0x42C, 0x430, 0x434, 0x450, 0x454, 0x458, 0x460, 0x464, 0x468, 0x4A0, 0x538, 0x53C},
+ {0x620, 0x624, 0x628, 0x62C, 0x630, 0x634, 0x650, 0x654, 0x658, 0x660, 0x664, 0x668, 0x6A0, 0x738, 0x73C},
+};
+
+static const u32 cmt_addrs_in[][13] = {
+ {0x020, 0x024, 0x028, 0x02C, 0x050, 0x054, 0x058, 0x060, 0x064, 0x068, 0x0A0, 0x138, 0x13C},
+ {0x220, 0x224, 0x228, 0x22C, 0x250, 0x254, 0x258, 0x260, 0x264, 0x268, 0x2A0, 0x338, 0x33C},
+};
+
+static const u32 cmt_freq[] = {
+ 25000, 25510, 26020, 26530, 26983, 27551, 28000, 28570,
+ 29046, 29522, 30000, 30476, 30952, 31546, 32000, 32539,
+ 33035, 33571, 33928, 34522, 35000, 35428, 36000, 36571,
+ 36904, 37500, 38093, 38571, 39047, 39453, 40000, 40476,
+ 40952, 41494, 41964, 42857, 43535, 44047, 44444, 45000,
+ 45535, 46029, 46428, 46823, 47617, 48214, 48571, 49107,
+ 49523, 50000, 50476, 50892, 51428, 52380, 53333, 53967,
+ 54285, 55238, 55555, 55952, 57142, 58095, 58571, 59047,
+ 59521, 60000, 60316, 60952, 61428, 61904, 62500, 63092,
+ 63491, 64282, 65078, 65476, 66071, 66664, 67142, 67854,
+ 68571, 69044, 69642, 70000, 71425, 72616, 73214, 73808,
+ 74285, 75000, 75714, 76187, 76785, 77142, 78570, 80000,
+ 80357, 80951, 81428, 82142, 82857, 83332, 83928, 84285,
+ 85713, 87142, 87500, 88094, 88571, 89285, 90000, 90475,
+ 91071, 91428, 92856, 94642,
+};
+
+static size_t freq_srch(u32 key, const u32 *array, size_t size)
+{
+ int l = 0;
+ int r = size - 1;
+ int m = 0;
+
+ while (l <= r) {
+ m = (l + r) / 2;
+ if (array[m] < key)
+ l = m + 1;
+ else if (array[m] > key)
+ r = m - 1;
+ else
+ return m;
+ }
+
+ if (r < 0 || l > size - 1)
+ return m;
+ else
+ return (abs(key - array[l]) < abs(key - array[r])) ? l : r;
+}
+
+u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq)
+{
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+ const u16 *reg_set;
+ const u32 *addr;
+ u32 config;
+ size_t i, index;
+
+ index = freq_srch(freq, cmt_freq, ARRAY_SIZE(cmt_freq));
+ addr = cmt_addrs_out[voutdev->config->id];
+ reg_set = cmt_vals_out[index];
+
+ config = mgb4_read_reg(video, regs->config);
+
+ mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+
+ for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++)
+ mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]);
+
+ mgb4_mask_reg(video, regs->config, 0x100, 0x100);
+ mgb4_mask_reg(video, regs->config, 0x100, 0x0);
+
+ mgb4_write_reg(video, regs->config, config & ~0x1);
+
+ return cmt_freq[index];
+}
+
+void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
+ unsigned int freq_range)
+{
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ const struct mgb4_vin_regs *regs = &vindev->config->regs;
+ const u16 *reg_set;
+ const u32 *addr;
+ u32 config;
+ size_t i;
+
+ addr = cmt_addrs_in[vindev->config->id];
+ reg_set = cmt_vals_in[freq_range];
+
+ config = mgb4_read_reg(video, regs->config);
+
+ mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+
+ for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++)
+ mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]);
+
+ mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
+ mgb4_mask_reg(video, regs->config, 0x1000, 0x0);
+
+ mgb4_write_reg(video, regs->config, config & ~0x1);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.h b/drivers/media/pci/mgb4/mgb4_cmt.h
new file mode 100644
index 000000000000..b15df56ca059
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_cmt.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_CMT_H__
+#define __MGB4_CMT_H__
+
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+
+u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq);
+void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
+ unsigned int freq_range);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
new file mode 100644
index 000000000000..3efb33fbf40c
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is the driver for the MGB4 video grabber card by Digiteq Automotive.
+ *
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the main driver module. The DMA, I2C and SPI sub-drivers are
+ * initialized here and the input/output v4l2 devices are created.
+ *
+ * The mgb4 card uses different expansion modules for different video sources
+ * (GMSL and FPDL3 for now) so in probe() we detect the module type based on
+ * what we see on the I2C bus and check if it matches the FPGA bitstream (there
+ * are different bitstreams for different expansion modules). When no expansion
+ * module is present, we still let the driver initialize to allow flashing of
+ * the FPGA firmware using the SPI FLASH device. No v4l2 video devices are
+ * created in this case.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/dma/amd_xdma.h>
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/spi/xilinx_spi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/hwmon.h>
+#include <linux/debugfs.h>
+#include "mgb4_dma.h"
+#include "mgb4_i2c.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+#include "mgb4_trigger.h"
+#include "mgb4_core.h"
+
+#define MGB4_USER_IRQS 16
+
+ATTRIBUTE_GROUPS(mgb4_pci);
+
+static int flashid;
+
+static struct xdma_chan_info h2c_chan_info = {
+ .dir = DMA_MEM_TO_DEV,
+};
+
+static struct xdma_chan_info c2h_chan_info = {
+ .dir = DMA_DEV_TO_MEM,
+};
+
+static struct xspi_platform_data spi_platform_data = {
+ .num_chipselect = 1,
+ .bits_per_word = 8
+};
+
+static const struct i2c_board_info extender_info = {
+ I2C_BOARD_INFO("extender", 0x21)
+};
+
+#if IS_REACHABLE(CONFIG_HWMON)
+static umode_t temp_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp &&
+ (attr == hwmon_temp_input || attr == hwmon_temp_label))
+ return 0444;
+ else
+ return 0;
+}
+
+static int temp_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+ u32 val10, raw;
+
+ if (type != hwmon_temp || attr != hwmon_temp_input)
+ return -EOPNOTSUPP;
+
+ raw = mgb4_read_reg(&mgbdev->video, 0xD0);
+ /* register value -> Celsius degrees formula given by Xilinx */
+ val10 = ((((raw >> 20) & 0xFFF) * 503975) - 1118822400) / 409600;
+ *val = val10 * 100;
+
+ return 0;
+}
+
+static int temp_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ if (type != hwmon_temp || attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = "FPGA Temperature";
+
+ return 0;
+}
+
+static const struct hwmon_ops temp_ops = {
+ .is_visible = temp_is_visible,
+ .read = temp_read,
+ .read_string = temp_read_string
+};
+
+static const struct hwmon_channel_info *temp_channel_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info temp_chip_info = {
+ .ops = &temp_ops,
+ .info = temp_channel_info,
+};
+#endif
+
+static int match_i2c_adap(struct device *dev, void *data)
+{
+ return i2c_verify_adapter(dev) ? 1 : 0;
+}
+
+static struct i2c_adapter *get_i2c_adap(struct platform_device *pdev)
+{
+ struct device *dev;
+
+ mutex_lock(&pdev->dev.mutex);
+ dev = device_find_child(&pdev->dev, NULL, match_i2c_adap);
+ mutex_unlock(&pdev->dev.mutex);
+
+ return dev ? to_i2c_adapter(dev) : NULL;
+}
+
+static int match_spi_adap(struct device *dev, void *data)
+{
+ return to_spi_device(dev) ? 1 : 0;
+}
+
+static struct spi_master *get_spi_adap(struct platform_device *pdev)
+{
+ struct device *dev;
+
+ mutex_lock(&pdev->dev.mutex);
+ dev = device_find_child(&pdev->dev, NULL, match_spi_adap);
+ mutex_unlock(&pdev->dev.mutex);
+
+ return dev ? container_of(dev, struct spi_master, dev) : NULL;
+}
+
+static int init_spi(struct mgb4_dev *mgbdev)
+{
+ struct resource spi_resources[] = {
+ {
+ .start = 0x400,
+ .end = 0x47f,
+ .flags = IORESOURCE_MEM,
+ .name = "io-memory",
+ },
+ {
+ .start = 14,
+ .end = 14,
+ .flags = IORESOURCE_IRQ,
+ .name = "irq",
+ },
+ };
+ struct spi_board_info spi_info = {
+ .max_speed_hz = 10000000,
+ .modalias = "m25p80",
+ .chip_select = 0,
+ .mode = SPI_MODE_3,
+ };
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct spi_device *spi_dev;
+ u32 irq;
+ int rv, id;
+ resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+ request_module("platform:xilinx_spi");
+
+ irq = xdma_get_user_irq(mgbdev->xdev, 14);
+ xdma_enable_user_irq(mgbdev->xdev, irq);
+
+ spi_resources[0].parent = &pdev->resource[MGB4_MGB4_BAR_ID];
+ spi_resources[0].start += mapbase;
+ spi_resources[0].end += mapbase;
+ spi_resources[1].start = irq;
+ spi_resources[1].end = irq;
+
+ id = pci_dev_id(pdev);
+ mgbdev->spi_pdev = platform_device_register_resndata(dev, "xilinx_spi",
+ id, spi_resources,
+ ARRAY_SIZE(spi_resources),
+ &spi_platform_data,
+ sizeof(spi_platform_data));
+ if (IS_ERR(mgbdev->spi_pdev)) {
+ dev_err(dev, "failed to register SPI device\n");
+ return PTR_ERR(mgbdev->spi_pdev);
+ }
+
+ master = get_spi_adap(mgbdev->spi_pdev);
+ if (!master) {
+ dev_err(dev, "failed to get SPI adapter\n");
+ rv = -EINVAL;
+ goto err_pdev;
+ }
+
+ snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
+ "mgb4-fw.%d", flashid);
+ mgbdev->partitions[0].name = mgbdev->fw_part_name;
+ mgbdev->partitions[0].size = 0x400000;
+ mgbdev->partitions[0].offset = 0x400000;
+ mgbdev->partitions[0].mask_flags = 0;
+
+ snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
+ "mgb4-data.%d", flashid);
+ mgbdev->partitions[1].name = mgbdev->data_part_name;
+ mgbdev->partitions[1].size = 0x10000;
+ mgbdev->partitions[1].offset = 0xFF0000;
+ mgbdev->partitions[1].mask_flags = MTD_CAP_NORFLASH;
+
+ snprintf(mgbdev->flash_name, sizeof(mgbdev->flash_name),
+ "mgb4-flash.%d", flashid);
+ mgbdev->flash_data.name = mgbdev->flash_name;
+ mgbdev->flash_data.parts = mgbdev->partitions;
+ mgbdev->flash_data.nr_parts = ARRAY_SIZE(mgbdev->partitions);
+ mgbdev->flash_data.type = "spi-nor";
+
+ spi_info.platform_data = &mgbdev->flash_data;
+
+ spi_dev = spi_new_device(master, &spi_info);
+ put_device(&master->dev);
+ if (!spi_dev) {
+ dev_err(dev, "failed to create MTD device\n");
+ rv = -EINVAL;
+ goto err_pdev;
+ }
+
+ return 0;
+
+err_pdev:
+ platform_device_unregister(mgbdev->spi_pdev);
+
+ return rv;
+}
+
+static void free_spi(struct mgb4_dev *mgbdev)
+{
+ platform_device_unregister(mgbdev->spi_pdev);
+}
+
+static int init_i2c(struct mgb4_dev *mgbdev)
+{
+ struct resource i2c_resources[] = {
+ {
+ .start = 0x200,
+ .end = 0x3ff,
+ .flags = IORESOURCE_MEM,
+ .name = "io-memory",
+ },
+ {
+ .start = 15,
+ .end = 15,
+ .flags = IORESOURCE_IRQ,
+ .name = "irq",
+ },
+ };
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+ char clk_name[16];
+ u32 irq;
+ int rv, id;
+ resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+ request_module("platform:xiic-i2c");
+
+ irq = xdma_get_user_irq(mgbdev->xdev, 15);
+ xdma_enable_user_irq(mgbdev->xdev, irq);
+
+ i2c_resources[0].parent = &pdev->resource[MGB4_MGB4_BAR_ID];
+ i2c_resources[0].start += mapbase;
+ i2c_resources[0].end += mapbase;
+ i2c_resources[1].start = irq;
+ i2c_resources[1].end = irq;
+
+ id = pci_dev_id(pdev);
+
+ /* create dummy clock required by the xiic-i2c adapter */
+ snprintf(clk_name, sizeof(clk_name), "xiic-i2c.%d", id);
+ mgbdev->i2c_clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL,
+ 0, 125000000);
+ if (IS_ERR(mgbdev->i2c_clk)) {
+ dev_err(dev, "failed to register I2C clock\n");
+ return PTR_ERR(mgbdev->i2c_clk);
+ }
+ mgbdev->i2c_cl = clkdev_hw_create(mgbdev->i2c_clk, NULL, "xiic-i2c.%d",
+ id);
+ if (!mgbdev->i2c_cl) {
+ dev_err(dev, "failed to register I2C clockdev\n");
+ rv = -ENOMEM;
+ goto err_clk;
+ }
+
+ mgbdev->i2c_pdev = platform_device_register_resndata(dev, "xiic-i2c",
+ id, i2c_resources,
+ ARRAY_SIZE(i2c_resources),
+ NULL, 0);
+ if (IS_ERR(mgbdev->i2c_pdev)) {
+ dev_err(dev, "failed to register I2C device\n");
+ rv = PTR_ERR(mgbdev->i2c_pdev);
+ goto err_clkdev;
+ }
+
+ mgbdev->i2c_adap = get_i2c_adap(mgbdev->i2c_pdev);
+ if (!mgbdev->i2c_adap) {
+ dev_err(dev, "failed to get I2C adapter\n");
+ rv = -EINVAL;
+ goto err_pdev;
+ }
+
+ mutex_init(&mgbdev->i2c_lock);
+
+ return 0;
+
+err_pdev:
+ platform_device_unregister(mgbdev->i2c_pdev);
+err_clkdev:
+ clkdev_drop(mgbdev->i2c_cl);
+err_clk:
+ clk_hw_unregister(mgbdev->i2c_clk);
+
+ return rv;
+}
+
+static void free_i2c(struct mgb4_dev *mgbdev)
+{
+ put_device(&mgbdev->i2c_adap->dev);
+ platform_device_unregister(mgbdev->i2c_pdev);
+ clkdev_drop(mgbdev->i2c_cl);
+ clk_hw_unregister(mgbdev->i2c_clk);
+}
+
+static int get_serial_number(struct mgb4_dev *mgbdev)
+{
+ struct device *dev = &mgbdev->pdev->dev;
+ struct mtd_info *mtd;
+ size_t rs;
+ int rv;
+
+ mgbdev->serial_number = 0;
+
+ mtd = get_mtd_device_nm(mgbdev->data_part_name);
+ if (IS_ERR(mtd)) {
+ dev_warn(dev, "failed to get data MTD device\n");
+ return -ENOENT;
+ }
+ rv = mtd_read(mtd, 0, sizeof(mgbdev->serial_number), &rs,
+ (u_char *)&mgbdev->serial_number);
+ put_mtd_device(mtd);
+ if (rv < 0 || rs != sizeof(mgbdev->serial_number)) {
+ dev_warn(dev, "error reading MTD device\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int get_module_version(struct mgb4_dev *mgbdev)
+{
+ struct device *dev = &mgbdev->pdev->dev;
+ struct mgb4_i2c_client extender;
+ s32 version;
+ u32 fw_version;
+ int rv;
+
+ rv = mgb4_i2c_init(&extender, mgbdev->i2c_adap, &extender_info, 8);
+ if (rv < 0) {
+ dev_err(dev, "failed to create extender I2C device\n");
+ return rv;
+ }
+ version = mgb4_i2c_read_byte(&extender, 0x00);
+ mgb4_i2c_free(&extender);
+ if (version < 0) {
+ dev_err(dev, "error reading module version\n");
+ return -EIO;
+ }
+
+ mgbdev->module_version = ~((u32)version) & 0xff;
+ if (!(MGB4_IS_FPDL3(mgbdev) || MGB4_IS_GMSL(mgbdev))) {
+ dev_err(dev, "unknown module type\n");
+ return -EINVAL;
+ }
+ fw_version = mgb4_read_reg(&mgbdev->video, 0xC4);
+ if (fw_version >> 24 != mgbdev->module_version >> 4) {
+ dev_err(dev, "module/firmware type mismatch\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "%s module detected\n",
+ MGB4_IS_FPDL3(mgbdev) ? "FPDL3" : "GMSL");
+
+ return 0;
+}
+
+static int map_regs(struct pci_dev *pdev, struct resource *res,
+ struct mgb4_regs *regs)
+{
+ int rv;
+ resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+ res->start += mapbase;
+ res->end += mapbase;
+
+ rv = mgb4_regs_map(res, regs);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "failed to map %s registers\n", res->name);
+ return rv;
+ }
+
+ return 0;
+}
+
+static int init_xdma(struct mgb4_dev *mgbdev)
+{
+ struct xdma_platdata data;
+ struct resource res[2] = { 0 };
+ struct dma_slave_map *map;
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+ int i;
+
+ res[0].start = pci_resource_start(pdev, MGB4_XDMA_BAR_ID);
+ res[0].end = pci_resource_end(pdev, MGB4_XDMA_BAR_ID);
+ res[0].flags = IORESOURCE_MEM;
+ res[0].parent = &pdev->resource[MGB4_XDMA_BAR_ID];
+ res[1].start = pci_irq_vector(pdev, 0);
+ res[1].end = res[1].start + MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES
+ + MGB4_USER_IRQS - 1;
+ res[1].flags = IORESOURCE_IRQ;
+
+ data.max_dma_channels = MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES;
+ data.device_map = mgbdev->slave_map;
+ data.device_map_cnt = MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES;
+
+ for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+ sprintf(mgbdev->channel_names[i], "c2h%d", i);
+ map = &data.device_map[i];
+ map->slave = mgbdev->channel_names[i];
+ map->devname = dev_name(dev);
+ map->param = XDMA_FILTER_PARAM(&c2h_chan_info);
+ }
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+ sprintf(mgbdev->channel_names[i + MGB4_VIN_DEVICES], "h2c%d", i);
+ map = &data.device_map[i + MGB4_VIN_DEVICES];
+ map->slave = mgbdev->channel_names[i + MGB4_VIN_DEVICES];
+ map->devname = dev_name(dev);
+ map->param = XDMA_FILTER_PARAM(&h2c_chan_info);
+ }
+
+ mgbdev->xdev = platform_device_register_resndata(dev, "xdma",
+ PLATFORM_DEVID_AUTO, res,
+ 2, &data, sizeof(data));
+ if (IS_ERR(mgbdev->xdev)) {
+ dev_err(dev, "failed to register XDMA device\n");
+ return PTR_ERR(mgbdev->xdev);
+ }
+
+ return 0;
+}
+
+static void free_xdma(struct mgb4_dev *mgbdev)
+{
+ platform_device_unregister(mgbdev->xdev);
+}
+
+static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int i, rv;
+ struct mgb4_dev *mgbdev;
+ struct resource video = {
+ .start = 0x0,
+ .end = 0x100,
+ .flags = IORESOURCE_MEM,
+ .name = "mgb4-video",
+ };
+ struct resource cmt = {
+ .start = 0x1000,
+ .end = 0x1800,
+ .flags = IORESOURCE_MEM,
+ .name = "mgb4-cmt",
+ };
+ int irqs = pci_msix_vec_count(pdev);
+
+ mgbdev = kzalloc(sizeof(*mgbdev), GFP_KERNEL);
+ if (!mgbdev)
+ return -ENOMEM;
+
+ mgbdev->pdev = pdev;
+ pci_set_drvdata(pdev, mgbdev);
+
+ /* PCIe related stuff */
+ rv = pci_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "error enabling PCI device\n");
+ goto err_mgbdev;
+ }
+
+ rv = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+ if (rv)
+ dev_warn(&pdev->dev, "error enabling PCIe relaxed ordering\n");
+ rv = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_EXT_TAG);
+ if (rv)
+ dev_warn(&pdev->dev, "error enabling PCIe extended tag field\n");
+ rv = pcie_set_readrq(pdev, 512);
+ if (rv)
+ dev_warn(&pdev->dev, "error setting PCIe max. memory read size\n");
+ pci_set_master(pdev);
+
+ rv = pci_alloc_irq_vectors(pdev, irqs, irqs, PCI_IRQ_MSIX);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "error allocating MSI-X IRQs\n");
+ goto err_enable_pci;
+ }
+
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv) {
+ dev_err(&pdev->dev, "error setting DMA mask\n");
+ goto err_enable_pci;
+ }
+
+ /* DMA + IRQ engine */
+ rv = init_xdma(mgbdev);
+ if (rv)
+ goto err_alloc_irq;
+ rv = mgb4_dma_channel_init(mgbdev);
+ if (rv)
+ goto err_dma_chan;
+
+ /* mgb4 video registers */
+ rv = map_regs(pdev, &video, &mgbdev->video);
+ if (rv < 0)
+ goto err_dma_chan;
+ /* mgb4 cmt registers */
+ rv = map_regs(pdev, &cmt, &mgbdev->cmt);
+ if (rv < 0)
+ goto err_video_regs;
+
+ /* SPI FLASH */
+ rv = init_spi(mgbdev);
+ if (rv < 0)
+ goto err_cmt_regs;
+
+ /* I2C controller */
+ rv = init_i2c(mgbdev);
+ if (rv < 0)
+ goto err_spi;
+
+ /* PCI card related sysfs attributes */
+ rv = device_add_groups(&pdev->dev, mgb4_pci_groups);
+ if (rv < 0)
+ goto err_i2c;
+
+#if IS_REACHABLE(CONFIG_HWMON)
+ /* HWmon (card temperature) */
+ mgbdev->hwmon_dev = hwmon_device_register_with_info(&pdev->dev, "mgb4",
+ mgbdev,
+ &temp_chip_info,
+ NULL);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+ mgbdev->debugfs = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+#endif
+
+ /* Get card serial number. On systems without MTD flash support we may
+ * get an error thus ignore the return value. An invalid serial number
+ * should not break anything...
+ */
+ if (get_serial_number(mgbdev) < 0)
+ dev_warn(&pdev->dev, "error reading card serial number\n");
+
+ /* Get module type. If no valid module is found, skip the video device
+ * creation part but do not exit with error to allow flashing the card.
+ */
+ rv = get_module_version(mgbdev);
+ if (rv < 0)
+ goto exit;
+
+ /* Video input v4l2 devices */
+ for (i = 0; i < MGB4_VIN_DEVICES; i++)
+ mgbdev->vin[i] = mgb4_vin_create(mgbdev, i);
+
+ /* Video output v4l2 devices */
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+
+ /* Triggers */
+ mgbdev->indio_dev = mgb4_trigger_create(mgbdev);
+
+exit:
+ flashid++;
+
+ return 0;
+
+err_i2c:
+ free_i2c(mgbdev);
+err_spi:
+ free_spi(mgbdev);
+err_cmt_regs:
+ mgb4_regs_free(&mgbdev->cmt);
+err_video_regs:
+ mgb4_regs_free(&mgbdev->video);
+err_dma_chan:
+ mgb4_dma_channel_free(mgbdev);
+ free_xdma(mgbdev);
+err_alloc_irq:
+ pci_disable_msix(pdev);
+err_enable_pci:
+ pci_disable_device(pdev);
+err_mgbdev:
+ kfree(mgbdev);
+
+ return rv;
+}
+
+static void mgb4_remove(struct pci_dev *pdev)
+{
+ struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
+ int i;
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mgbdev->debugfs);
+#endif
+#if IS_REACHABLE(CONFIG_HWMON)
+ hwmon_device_unregister(mgbdev->hwmon_dev);
+#endif
+
+ if (mgbdev->indio_dev)
+ mgb4_trigger_free(mgbdev->indio_dev);
+
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ if (mgbdev->vout[i])
+ mgb4_vout_free(mgbdev->vout[i]);
+ for (i = 0; i < MGB4_VIN_DEVICES; i++)
+ if (mgbdev->vin[i])
+ mgb4_vin_free(mgbdev->vin[i]);
+
+ device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
+ free_spi(mgbdev);
+ free_i2c(mgbdev);
+ mgb4_regs_free(&mgbdev->video);
+ mgb4_regs_free(&mgbdev->cmt);
+
+ mgb4_dma_channel_free(mgbdev);
+ free_xdma(mgbdev);
+
+ pci_disable_msix(mgbdev->pdev);
+ pci_disable_device(mgbdev->pdev);
+
+ kfree(mgbdev);
+}
+
+static const struct pci_device_id mgb4_pci_ids[] = {
+ { PCI_DEVICE(0x1ed8, 0x0101), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);
+
+static struct pci_driver mgb4_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mgb4_pci_ids,
+ .probe = mgb4_probe,
+ .remove = mgb4_remove,
+};
+
+module_pci_driver(mgb4_pci_driver);
+
+MODULE_AUTHOR("Digiteq Automotive s.r.o.");
+MODULE_DESCRIPTION("Digiteq Automotive MGB4 Driver");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: platform:xiic-i2c platform:xilinx_spi spi-nor");
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
new file mode 100644
index 000000000000..2a946e46aec1
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_CORE_H__
+#define __MGB4_CORE_H__
+
+#include <linux/spi/flash.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/dmaengine.h>
+#include "mgb4_regs.h"
+
+#define MGB4_VIN_DEVICES 2
+#define MGB4_VOUT_DEVICES 2
+
+#define MGB4_MGB4_BAR_ID 0
+#define MGB4_XDMA_BAR_ID 1
+
+#define MGB4_IS_GMSL(mgbdev) \
+ ((mgbdev)->module_version >> 4 == 2)
+#define MGB4_IS_FPDL3(mgbdev) \
+ ((mgbdev)->module_version >> 4 == 1)
+
+struct mgb4_dma_channel {
+ struct dma_chan *chan;
+ struct completion req_compl;
+};
+
+struct mgb4_dev {
+ struct pci_dev *pdev;
+ struct platform_device *xdev;
+ struct mgb4_vin_dev *vin[MGB4_VIN_DEVICES];
+ struct mgb4_vout_dev *vout[MGB4_VOUT_DEVICES];
+
+ struct mgb4_dma_channel c2h_chan[MGB4_VIN_DEVICES];
+ struct mgb4_dma_channel h2c_chan[MGB4_VOUT_DEVICES];
+ struct dma_slave_map slave_map[MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES];
+
+ struct mgb4_regs video;
+ struct mgb4_regs cmt;
+
+ struct clk_hw *i2c_clk;
+ struct clk_lookup *i2c_cl;
+ struct platform_device *i2c_pdev;
+ struct i2c_adapter *i2c_adap;
+ struct mutex i2c_lock; /* I2C bus access lock */
+
+ struct platform_device *spi_pdev;
+ struct flash_platform_data flash_data;
+ struct mtd_partition partitions[2];
+ char flash_name[16];
+ char fw_part_name[16];
+ char data_part_name[16];
+ char channel_names[MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES][16];
+
+ struct iio_dev *indio_dev;
+#if IS_REACHABLE(CONFIG_HWMON)
+ struct device *hwmon_dev;
+#endif
+
+ unsigned long io_reconfig;
+
+ u8 module_version;
+ u32 serial_number;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_dma.c b/drivers/media/pci/mgb4/mgb4_dma.c
new file mode 100644
index 000000000000..cae888e6504b
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_dma.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles the DMA transfers. A standard dmaengine API as provided
+ * by the XDMA module is used.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-direction.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+
+static void chan_irq(void *param)
+{
+ struct mgb4_dma_channel *chan = param;
+
+ complete(&chan->req_compl);
+}
+
+int mgb4_dma_transfer(struct mgb4_dev *mgbdev, u32 channel, bool write,
+ u64 paddr, struct sg_table *sgt)
+{
+ struct dma_slave_config cfg;
+ struct mgb4_dma_channel *chan;
+ struct dma_async_tx_descriptor *tx;
+ struct pci_dev *pdev = mgbdev->pdev;
+ int ret;
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ if (write) {
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = paddr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ chan = &mgbdev->h2c_chan[channel];
+ } else {
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = paddr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ chan = &mgbdev->c2h_chan[channel];
+ }
+
+ ret = dmaengine_slave_config(chan->chan, &cfg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to config dma: %d\n", ret);
+ return ret;
+ }
+
+ tx = dmaengine_prep_slave_sg(chan->chan, sgt->sgl, sgt->nents,
+ cfg.direction, 0);
+ if (!tx) {
+ dev_err(&pdev->dev, "failed to prep slave sg\n");
+ return -EIO;
+ }
+
+ tx->callback = chan_irq;
+ tx->callback_param = chan;
+
+ ret = dma_submit_error(dmaengine_submit(tx));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to submit sg\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan->chan);
+
+ if (!wait_for_completion_timeout(&chan->req_compl,
+ msecs_to_jiffies(10000))) {
+ dev_err(&pdev->dev, "dma timeout\n");
+ dmaengine_terminate_sync(chan->chan);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mgb4_dma_channel_init(struct mgb4_dev *mgbdev)
+{
+ int i, ret;
+ char name[16];
+ struct pci_dev *pdev = mgbdev->pdev;
+
+ for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+ sprintf(name, "c2h%d", i);
+ mgbdev->c2h_chan[i].chan = dma_request_chan(&pdev->dev, name);
+ if (IS_ERR(mgbdev->c2h_chan[i].chan)) {
+ dev_err(&pdev->dev, "failed to initialize %s", name);
+ ret = PTR_ERR(mgbdev->c2h_chan[i].chan);
+ mgbdev->c2h_chan[i].chan = NULL;
+ return ret;
+ }
+ init_completion(&mgbdev->c2h_chan[i].req_compl);
+ }
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+ sprintf(name, "h2c%d", i);
+ mgbdev->h2c_chan[i].chan = dma_request_chan(&pdev->dev, name);
+ if (IS_ERR(mgbdev->h2c_chan[i].chan)) {
+ dev_err(&pdev->dev, "failed to initialize %s", name);
+ ret = PTR_ERR(mgbdev->h2c_chan[i].chan);
+ mgbdev->h2c_chan[i].chan = NULL;
+ return ret;
+ }
+ init_completion(&mgbdev->h2c_chan[i].req_compl);
+ }
+
+ return 0;
+}
+
+void mgb4_dma_channel_free(struct mgb4_dev *mgbdev)
+{
+ int i;
+
+ for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+ if (mgbdev->c2h_chan[i].chan)
+ dma_release_channel(mgbdev->c2h_chan[i].chan);
+ }
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+ if (mgbdev->h2c_chan[i].chan)
+ dma_release_channel(mgbdev->h2c_chan[i].chan);
+ }
+}
diff --git a/drivers/media/pci/mgb4/mgb4_dma.h b/drivers/media/pci/mgb4/mgb4_dma.h
new file mode 100644
index 000000000000..4ebc2b1ce9b7
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_dma.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_DMA_H__
+#define __MGB4_DMA_H__
+
+#include "mgb4_core.h"
+
+int mgb4_dma_channel_init(struct mgb4_dev *mgbdev);
+void mgb4_dma_channel_free(struct mgb4_dev *mgbdev);
+
+int mgb4_dma_transfer(struct mgb4_dev *mgbdev, u32 channel, bool write,
+ u64 paddr, struct sg_table *sgt);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_i2c.c b/drivers/media/pci/mgb4/mgb4_i2c.c
new file mode 100644
index 000000000000..2697b67e290e
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_i2c.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * The i2c module unifies the I2C access to the serializes/deserializes. The I2C
+ * chips on the GMSL module use 16b addressing, the FPDL3 chips use standard
+ * 8b addressing.
+ */
+
+#include "mgb4_i2c.h"
+
+static int read_r16(struct i2c_client *client, u16 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[2];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EREMOTEIO;
+ else
+ return 0;
+}
+
+static int write_r16(struct i2c_client *client, u16 reg, const u8 *val, int len)
+{
+ int ret;
+ u8 buf[4];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2 + len,
+ .buf = buf,
+ }
+ };
+
+ if (2 + len > sizeof(buf))
+ return -EINVAL;
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+ memcpy(&buf[2], val, len);
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EREMOTEIO;
+ else
+ return 0;
+}
+
+int mgb4_i2c_init(struct mgb4_i2c_client *client, struct i2c_adapter *adap,
+ struct i2c_board_info const *info, int addr_size)
+{
+ client->client = i2c_new_client_device(adap, info);
+ if (IS_ERR(client->client))
+ return PTR_ERR(client->client);
+
+ client->addr_size = addr_size;
+
+ return 0;
+}
+
+void mgb4_i2c_free(struct mgb4_i2c_client *client)
+{
+ i2c_unregister_device(client->client);
+}
+
+s32 mgb4_i2c_read_byte(struct mgb4_i2c_client *client, u16 reg)
+{
+ int ret;
+ u8 b;
+
+ if (client->addr_size == 8)
+ return i2c_smbus_read_byte_data(client->client, reg);
+
+ ret = read_r16(client->client, reg, &b, 1);
+ if (ret < 0)
+ return ret;
+
+ return (s32)b;
+}
+
+s32 mgb4_i2c_write_byte(struct mgb4_i2c_client *client, u16 reg, u8 val)
+{
+ if (client->addr_size == 8)
+ return i2c_smbus_write_byte_data(client->client, reg, val);
+ else
+ return write_r16(client->client, reg, &val, 1);
+}
+
+s32 mgb4_i2c_mask_byte(struct mgb4_i2c_client *client, u16 reg, u8 mask, u8 val)
+{
+ s32 ret;
+
+ if (mask != 0xFF) {
+ ret = mgb4_i2c_read_byte(client, reg);
+ if (ret < 0)
+ return ret;
+ val |= (u8)ret & ~mask;
+ }
+
+ return mgb4_i2c_write_byte(client, reg, val);
+}
+
+int mgb4_i2c_configure(struct mgb4_i2c_client *client,
+ const struct mgb4_i2c_kv *values, size_t count)
+{
+ size_t i;
+ s32 res;
+
+ for (i = 0; i < count; i++) {
+ res = mgb4_i2c_mask_byte(client, values[i].reg, values[i].mask,
+ values[i].val);
+ if (res < 0)
+ return res;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/pci/mgb4/mgb4_i2c.h b/drivers/media/pci/mgb4/mgb4_i2c.h
new file mode 100644
index 000000000000..fac6a1634474
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_i2c.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_I2C_H__
+#define __MGB4_I2C_H__
+
+#include <linux/i2c.h>
+
+struct mgb4_i2c_client {
+ struct i2c_client *client;
+ int addr_size;
+};
+
+struct mgb4_i2c_kv {
+ u16 reg;
+ u8 mask;
+ u8 val;
+};
+
+int mgb4_i2c_init(struct mgb4_i2c_client *client, struct i2c_adapter *adap,
+ struct i2c_board_info const *info, int addr_size);
+void mgb4_i2c_free(struct mgb4_i2c_client *client);
+
+s32 mgb4_i2c_read_byte(struct mgb4_i2c_client *client, u16 reg);
+s32 mgb4_i2c_write_byte(struct mgb4_i2c_client *client, u16 reg, u8 val);
+s32 mgb4_i2c_mask_byte(struct mgb4_i2c_client *client, u16 reg, u8 mask,
+ u8 val);
+
+int mgb4_i2c_configure(struct mgb4_i2c_client *client,
+ const struct mgb4_i2c_kv *values, size_t count);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_io.h b/drivers/media/pci/mgb4/mgb4_io.h
new file mode 100644
index 000000000000..8698db1be4a9
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_io.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_IO_H__
+#define __MGB4_IO_H__
+
+#include <media/v4l2-dev.h>
+
+#define MGB4_DEFAULT_WIDTH 1280
+#define MGB4_DEFAULT_HEIGHT 640
+#define MGB4_DEFAULT_PERIOD (125000000 / 60)
+
+/* Register access error indication */
+#define MGB4_ERR_NO_REG 0xFFFFFFFE
+/* Frame buffer addresses greater than 0xFFFFFFFA indicate HW errors */
+#define MGB4_ERR_QUEUE_TIMEOUT 0xFFFFFFFD
+#define MGB4_ERR_QUEUE_EMPTY 0xFFFFFFFC
+#define MGB4_ERR_QUEUE_FULL 0xFFFFFFFB
+
+struct mgb4_frame_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct mgb4_frame_buffer *to_frame_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct mgb4_frame_buffer, vb);
+}
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_regs.c b/drivers/media/pci/mgb4/mgb4_regs.c
new file mode 100644
index 000000000000..53d4e4503a74
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_regs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#include <linux/ioport.h>
+#include "mgb4_regs.h"
+
+int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
+{
+ regs->mapbase = res->start;
+ regs->mapsize = res->end - res->start;
+
+ if (!request_mem_region(regs->mapbase, regs->mapsize, res->name))
+ return -EINVAL;
+ regs->membase = ioremap(regs->mapbase, regs->mapsize);
+ if (!regs->membase) {
+ release_mem_region(regs->mapbase, regs->mapsize);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mgb4_regs_free(struct mgb4_regs *regs)
+{
+ iounmap(regs->membase);
+ release_mem_region(regs->mapbase, regs->mapsize);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_regs.h b/drivers/media/pci/mgb4/mgb4_regs.h
new file mode 100644
index 000000000000..c45180890730
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_regs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_REGS_H__
+#define __MGB4_REGS_H__
+
+#include <linux/io.h>
+
+struct mgb4_regs {
+ resource_size_t mapbase;
+ resource_size_t mapsize;
+ void __iomem *membase;
+};
+
+#define mgb4_write_reg(regs, offset, val) \
+ iowrite32(val, (regs)->membase + (offset))
+#define mgb4_read_reg(regs, offset) \
+ ioread32((regs)->membase + (offset))
+
+static inline void mgb4_mask_reg(struct mgb4_regs *regs, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 ret = mgb4_read_reg(regs, reg);
+
+ val |= ret & ~mask;
+ mgb4_write_reg(regs, reg, val);
+}
+
+int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs);
+void mgb4_regs_free(struct mgb4_regs *regs);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs.h b/drivers/media/pci/mgb4/mgb4_sysfs.h
new file mode 100644
index 000000000000..017d82c0624e
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_sysfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_SYSFS_H__
+#define __MGB4_SYSFS_H__
+
+#include <linux/sysfs.h>
+
+extern struct attribute *mgb4_pci_attrs[];
+extern struct attribute *mgb4_fpdl3_in_attrs[];
+extern struct attribute *mgb4_gmsl_in_attrs[];
+extern struct attribute *mgb4_fpdl3_out_attrs[];
+extern struct attribute *mgb4_gmsl_out_attrs[];
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_in.c b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
new file mode 100644
index 000000000000..0ba66a2cf145
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * v4l2 input devices.
+ */
+
+#include <linux/device.h>
+#include "mgb4_core.h"
+#include "mgb4_i2c.h"
+#include "mgb4_vin.h"
+#include "mgb4_cmt.h"
+#include "mgb4_sysfs.h"
+
+/* Common for both FPDL3 and GMSL */
+
+static ssize_t input_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+
+ return sprintf(buf, "%d\n", vindev->config->id);
+}
+
+static ssize_t oldi_lane_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ struct mgb4_dev *mgbdev = vindev->mgbdev;
+ u16 i2c_reg;
+ u8 i2c_mask, i2c_single_val, i2c_dual_val;
+ u32 config;
+ int ret;
+
+ i2c_reg = MGB4_IS_GMSL(mgbdev) ? 0x1CE : 0x49;
+ i2c_mask = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x03;
+ i2c_single_val = MGB4_IS_GMSL(mgbdev) ? 0x00 : 0x02;
+ i2c_dual_val = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x00;
+
+ mutex_lock(&mgbdev->i2c_lock);
+ ret = mgb4_i2c_read_byte(&vindev->deser, i2c_reg);
+ mutex_unlock(&mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ config = mgb4_read_reg(&mgbdev->video, vindev->config->regs.config);
+
+ if (((config & (1U << 9)) && ((ret & i2c_mask) != i2c_dual_val)) ||
+ (!(config & (1U << 9)) && ((ret & i2c_mask) != i2c_single_val))) {
+ dev_err(dev, "I2C/FPGA register value mismatch\n");
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%s\n", config & (1U << 9) ? "1" : "0");
+}
+
+/*
+ * OLDI lane width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t oldi_lane_width_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ struct mgb4_dev *mgbdev = vindev->mgbdev;
+ u32 fpga_data;
+ u16 i2c_reg;
+ u8 i2c_mask, i2c_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* single */
+ fpga_data = 0;
+ i2c_data = MGB4_IS_GMSL(mgbdev) ? 0x00 : 0x02;
+ break;
+ case 1: /* dual */
+ fpga_data = 1U << 9;
+ i2c_data = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2c_reg = MGB4_IS_GMSL(mgbdev) ? 0x1CE : 0x49;
+ i2c_mask = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x03;
+
+ mutex_lock(&mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&vindev->deser, i2c_reg, i2c_mask, i2c_data);
+ mutex_unlock(&mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+ mgb4_mask_reg(&mgbdev->video, vindev->config->regs.config, 1U << 9,
+ fpga_data);
+ if (MGB4_IS_GMSL(mgbdev)) {
+ /* reset input link */
+ mutex_lock(&mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&vindev->deser, 0x10, 1U << 5, 1U << 5);
+ mutex_unlock(&mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t color_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.config);
+
+ return sprintf(buf, "%s\n", config & (1U << 8) ? "0" : "1");
+}
+
+/*
+ * Color mapping change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t color_mapping_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 fpga_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* OLDI/JEIDA */
+ fpga_data = (1U << 8);
+ break;
+ case 1: /* SPWG/VESA */
+ fpga_data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.config,
+ 1U << 8, fpga_data);
+
+ return count;
+}
+
+static ssize_t link_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.status);
+
+ return sprintf(buf, "%s\n", status & (1U << 2) ? "1" : "0");
+}
+
+static ssize_t stream_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.status);
+
+ return sprintf(buf, "%s\n", ((status & (1 << 14)) &&
+ (status & (1 << 2)) && (status & (3 << 9))) ? "1" : "0");
+}
+
+static ssize_t video_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.resolution);
+
+ return sprintf(buf, "%u\n", config >> 16);
+}
+
+static ssize_t video_height_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.resolution);
+
+ return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t hsync_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.status);
+ u32 res;
+
+ if (!(status & (1U << 11)))
+ res = 0x02; // not available
+ else if (status & (1U << 12))
+ res = 0x01; // active high
+ else
+ res = 0x00; // active low
+
+ return sprintf(buf, "%u\n", res);
+}
+
+static ssize_t vsync_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.status);
+ u32 res;
+
+ if (!(status & (1U << 11)))
+ res = 0x02; // not available
+ else if (status & (1U << 13))
+ res = 0x01; // active high
+ else
+ res = 0x00; // active low
+
+ return sprintf(buf, "%u\n", res);
+}
+
+static ssize_t hsync_gap_length_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sync = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.sync);
+
+ return sprintf(buf, "%u\n", sync >> 16);
+}
+
+/*
+ * HSYNC gap length change is expected to be called on live streams. Video
+ * device locking/queue check is not needed.
+ */
+static ssize_t hsync_gap_length_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.sync,
+ 0xFFFF0000, val << 16);
+
+ return count;
+}
+
+static ssize_t vsync_gap_length_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sync = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.sync);
+
+ return sprintf(buf, "%u\n", sync & 0xFFFF);
+}
+
+/*
+ * VSYNC gap length change is expected to be called on live streams. Video
+ * device locking/queue check is not needed.
+ */
+static ssize_t vsync_gap_length_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.sync, 0xFFFF,
+ val);
+
+ return count;
+}
+
+static ssize_t pclk_frequency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 freq = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.pclk);
+
+ return sprintf(buf, "%u\n", freq);
+}
+
+static ssize_t hsync_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal);
+
+ return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+static ssize_t vsync_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal2);
+
+ return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+static ssize_t hback_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal);
+
+ return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+static ssize_t hfront_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal);
+
+ return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+static ssize_t vback_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal2);
+
+ return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+static ssize_t vfront_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+ vindev->config->regs.signal2);
+
+ return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+static ssize_t frequency_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+
+ return sprintf(buf, "%d\n", vindev->freq_range);
+}
+
+static ssize_t frequency_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ mutex_lock(vindev->vdev.lock);
+ if (vb2_is_busy(vindev->vdev.queue)) {
+ mutex_unlock(vindev->vdev.lock);
+ return -EBUSY;
+ }
+
+ mgb4_cmt_set_vin_freq_range(vindev, val);
+ vindev->freq_range = val;
+
+ mutex_unlock(vindev->vdev.lock);
+
+ return count;
+}
+
+/* FPDL3 only */
+
+static ssize_t fpdl3_input_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ s32 ret;
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_read_byte(&vindev->deser, 0x34);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ switch ((u8)ret & 0x18) {
+ case 0:
+ return sprintf(buf, "0\n");
+ case 0x10:
+ return sprintf(buf, "1\n");
+ case 0x08:
+ return sprintf(buf, "2\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * FPD-Link width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t fpdl3_input_width_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ u8 i2c_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* auto */
+ i2c_data = 0x00;
+ break;
+ case 1: /* single */
+ i2c_data = 0x10;
+ break;
+ case 2: /* dual */
+ i2c_data = 0x08;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&vindev->deser, 0x34, 0x18, i2c_data);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ return count;
+}
+
+/* GMSL only */
+
+static ssize_t gmsl_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ s32 r1, r300, r3;
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ r1 = mgb4_i2c_read_byte(&vindev->deser, 0x01);
+ r300 = mgb4_i2c_read_byte(&vindev->deser, 0x300);
+ r3 = mgb4_i2c_read_byte(&vindev->deser, 0x03);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (r1 < 0 || r300 < 0 || r3 < 0)
+ return -EIO;
+
+ if ((r1 & 0x03) == 0x03 && (r300 & 0x0C) == 0x0C && (r3 & 0xC0) == 0xC0)
+ return sprintf(buf, "0\n");
+ else if ((r1 & 0x03) == 0x02 && (r300 & 0x0C) == 0x08 && (r3 & 0xC0) == 0x00)
+ return sprintf(buf, "1\n");
+ else if ((r1 & 0x03) == 0x01 && (r300 & 0x0C) == 0x04 && (r3 & 0xC0) == 0x00)
+ return sprintf(buf, "2\n");
+ else if ((r1 & 0x03) == 0x00 && (r300 & 0x0C) == 0x00 && (r3 & 0xC0) == 0x00)
+ return sprintf(buf, "3\n");
+ else
+ return -EINVAL;
+}
+
+/*
+ * GMSL mode change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t gmsl_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ static const struct mgb4_i2c_kv G12[] = {
+ {0x01, 0x03, 0x03}, {0x300, 0x0C, 0x0C}, {0x03, 0xC0, 0xC0}};
+ static const struct mgb4_i2c_kv G6[] = {
+ {0x01, 0x03, 0x02}, {0x300, 0x0C, 0x08}, {0x03, 0xC0, 0x00}};
+ static const struct mgb4_i2c_kv G3[] = {
+ {0x01, 0x03, 0x01}, {0x300, 0x0C, 0x04}, {0x03, 0xC0, 0x00}};
+ static const struct mgb4_i2c_kv G1[] = {
+ {0x01, 0x03, 0x00}, {0x300, 0x0C, 0x00}, {0x03, 0xC0, 0x00}};
+ static const struct mgb4_i2c_kv reset[] = {
+ {0x10, 1U << 5, 1U << 5}, {0x300, 1U << 6, 1U << 6}};
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ const struct mgb4_i2c_kv *values;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* 12Gb/s */
+ values = G12;
+ break;
+ case 1: /* 6Gb/s */
+ values = G6;
+ break;
+ case 2: /* 3Gb/s */
+ values = G3;
+ break;
+ case 3: /* 1.5Gb/s */
+ values = G1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_configure(&vindev->deser, values, 3);
+ ret |= mgb4_i2c_configure(&vindev->deser, reset, 2);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t gmsl_stream_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ s32 ret;
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_read_byte(&vindev->deser, 0xA0);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", ret & 0x03);
+}
+
+static ssize_t gmsl_stream_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 3)
+ return -EINVAL;
+
+ mutex_lock(vindev->vdev.lock);
+ if (vb2_is_busy(vindev->vdev.queue)) {
+ mutex_unlock(vindev->vdev.lock);
+ return -EBUSY;
+ }
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&vindev->deser, 0xA0, 0x03, (u8)val);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+
+ mutex_unlock(vindev->vdev.lock);
+
+ return (ret < 0) ? -EIO : count;
+}
+
+static ssize_t gmsl_fec_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ s32 r3e0, r308;
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ r3e0 = mgb4_i2c_read_byte(&vindev->deser, 0x3E0);
+ r308 = mgb4_i2c_read_byte(&vindev->deser, 0x308);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (r3e0 < 0 || r308 < 0)
+ return -EIO;
+
+ if ((r3e0 & 0x07) == 0x00 && (r308 & 0x01) == 0x00)
+ return sprintf(buf, "0\n");
+ else if ((r3e0 & 0x07) == 0x07 && (r308 & 0x01) == 0x01)
+ return sprintf(buf, "1\n");
+ else
+ return -EINVAL;
+}
+
+/*
+ * GMSL FEC change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t gmsl_fec_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+ static const struct mgb4_i2c_kv enable[] = {
+ {0x3E0, 0x07, 0x07}, {0x308, 0x01, 0x01}};
+ static const struct mgb4_i2c_kv disable[] = {
+ {0x3E0, 0x07, 0x00}, {0x308, 0x01, 0x00}};
+ static const struct mgb4_i2c_kv reset[] = {
+ {0x10, 1U << 5, 1U << 5}, {0x300, 1U << 6, 1U << 6}};
+ const struct mgb4_i2c_kv *values;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* disabled */
+ values = disable;
+ break;
+ case 1: /* enabled */
+ values = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&vindev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_configure(&vindev->deser, values, 2);
+ ret |= mgb4_i2c_configure(&vindev->deser, reset, 2);
+ mutex_unlock(&vindev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(input_id);
+static DEVICE_ATTR_RW(oldi_lane_width);
+static DEVICE_ATTR_RW(color_mapping);
+static DEVICE_ATTR_RO(link_status);
+static DEVICE_ATTR_RO(stream_status);
+static DEVICE_ATTR_RO(video_width);
+static DEVICE_ATTR_RO(video_height);
+static DEVICE_ATTR_RO(hsync_status);
+static DEVICE_ATTR_RO(vsync_status);
+static DEVICE_ATTR_RW(hsync_gap_length);
+static DEVICE_ATTR_RW(vsync_gap_length);
+static DEVICE_ATTR_RO(pclk_frequency);
+static DEVICE_ATTR_RO(hsync_width);
+static DEVICE_ATTR_RO(vsync_width);
+static DEVICE_ATTR_RO(hback_porch);
+static DEVICE_ATTR_RO(hfront_porch);
+static DEVICE_ATTR_RO(vback_porch);
+static DEVICE_ATTR_RO(vfront_porch);
+static DEVICE_ATTR_RW(frequency_range);
+
+static DEVICE_ATTR_RW(fpdl3_input_width);
+
+static DEVICE_ATTR_RW(gmsl_mode);
+static DEVICE_ATTR_RW(gmsl_stream_id);
+static DEVICE_ATTR_RW(gmsl_fec);
+
+struct attribute *mgb4_fpdl3_in_attrs[] = {
+ &dev_attr_input_id.attr,
+ &dev_attr_link_status.attr,
+ &dev_attr_stream_status.attr,
+ &dev_attr_video_width.attr,
+ &dev_attr_video_height.attr,
+ &dev_attr_hsync_status.attr,
+ &dev_attr_vsync_status.attr,
+ &dev_attr_oldi_lane_width.attr,
+ &dev_attr_color_mapping.attr,
+ &dev_attr_hsync_gap_length.attr,
+ &dev_attr_vsync_gap_length.attr,
+ &dev_attr_pclk_frequency.attr,
+ &dev_attr_hsync_width.attr,
+ &dev_attr_vsync_width.attr,
+ &dev_attr_hback_porch.attr,
+ &dev_attr_hfront_porch.attr,
+ &dev_attr_vback_porch.attr,
+ &dev_attr_vfront_porch.attr,
+ &dev_attr_frequency_range.attr,
+ &dev_attr_fpdl3_input_width.attr,
+ NULL
+};
+
+struct attribute *mgb4_gmsl_in_attrs[] = {
+ &dev_attr_input_id.attr,
+ &dev_attr_link_status.attr,
+ &dev_attr_stream_status.attr,
+ &dev_attr_video_width.attr,
+ &dev_attr_video_height.attr,
+ &dev_attr_hsync_status.attr,
+ &dev_attr_vsync_status.attr,
+ &dev_attr_oldi_lane_width.attr,
+ &dev_attr_color_mapping.attr,
+ &dev_attr_hsync_gap_length.attr,
+ &dev_attr_vsync_gap_length.attr,
+ &dev_attr_pclk_frequency.attr,
+ &dev_attr_hsync_width.attr,
+ &dev_attr_vsync_width.attr,
+ &dev_attr_hback_porch.attr,
+ &dev_attr_hfront_porch.attr,
+ &dev_attr_vback_porch.attr,
+ &dev_attr_vfront_porch.attr,
+ &dev_attr_frequency_range.attr,
+ &dev_attr_gmsl_mode.attr,
+ &dev_attr_gmsl_stream_id.attr,
+ &dev_attr_gmsl_fec.attr,
+ NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_out.c b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
new file mode 100644
index 000000000000..9f6e81c57726
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * v4l2 output devices.
+ */
+
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include "mgb4_core.h"
+#include "mgb4_i2c.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+#include "mgb4_cmt.h"
+#include "mgb4_sysfs.h"
+
+static int loopin_cnt(struct mgb4_vin_dev *vindev)
+{
+ struct mgb4_vout_dev *voutdev;
+ u32 config;
+ int i, cnt = 0;
+
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+ voutdev = vindev->mgbdev->vout[i];
+ if (!voutdev)
+ continue;
+
+ config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.config);
+ if ((config & 0xc) >> 2 == vindev->config->id)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static bool is_busy(struct video_device *dev)
+{
+ bool ret;
+
+ mutex_lock(dev->lock);
+ ret = vb2_is_busy(dev->queue);
+ mutex_unlock(dev->lock);
+
+ return ret;
+}
+
+/* Common for both FPDL3 and GMSL */
+
+static ssize_t output_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+
+ return sprintf(buf, "%d\n", voutdev->config->id);
+}
+
+static ssize_t video_source_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.config);
+
+ return sprintf(buf, "%u\n", (config & 0xc) >> 2);
+}
+
+/*
+ * Video source change may affect the buffer queue of ANY video input/output on
+ * the card thus if any of the inputs/outputs is in use, we do not allow
+ * the change.
+ *
+ * As we do not want to lock all the video devices at the same time, a two-stage
+ * locking strategy is used. In addition to the video device locking there is
+ * a global (PCI device) variable "io_reconfig" atomically checked/set when
+ * the reconfiguration is running. All the video devices check the variable in
+ * their queue_setup() functions and do not allow to start the queue when
+ * the reconfiguration has started.
+ */
+static ssize_t video_source_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ struct mgb4_dev *mgbdev = voutdev->mgbdev;
+ struct mgb4_vin_dev *loopin_new = NULL, *loopin_old = NULL;
+ unsigned long val;
+ ssize_t ret;
+ u32 config;
+ int i;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 3)
+ return -EINVAL;
+
+ if (test_and_set_bit(0, &mgbdev->io_reconfig))
+ return -EBUSY;
+
+ ret = -EBUSY;
+ for (i = 0; i < MGB4_VIN_DEVICES; i++)
+ if (mgbdev->vin[i] && is_busy(&mgbdev->vin[i]->vdev))
+ goto end;
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ if (mgbdev->vout[i] && is_busy(&mgbdev->vout[i]->vdev))
+ goto end;
+
+ config = mgb4_read_reg(&mgbdev->video, voutdev->config->regs.config);
+
+ if (((config & 0xc) >> 2) < MGB4_VIN_DEVICES)
+ loopin_old = mgbdev->vin[(config & 0xc) >> 2];
+ if (val < MGB4_VIN_DEVICES) {
+ val = array_index_nospec(val, MGB4_VIN_DEVICES);
+ loopin_new = mgbdev->vin[val];
+ }
+ if (loopin_old && loopin_cnt(loopin_old) == 1)
+ mgb4_mask_reg(&mgbdev->video, loopin_old->config->regs.config,
+ 0x2, 0x0);
+ if (loopin_new)
+ mgb4_mask_reg(&mgbdev->video, loopin_new->config->regs.config,
+ 0x2, 0x2);
+
+ if (val == voutdev->config->id + MGB4_VIN_DEVICES)
+ mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
+ config & ~(1 << 1));
+ else
+ mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
+ config | (1U << 1));
+
+ mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0xc,
+ val << 2);
+
+ ret = count;
+end:
+ clear_bit(0, &mgbdev->io_reconfig);
+
+ return ret;
+}
+
+static ssize_t display_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.resolution);
+
+ return sprintf(buf, "%u\n", config >> 16);
+}
+
+static ssize_t display_width_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+
+ mutex_lock(voutdev->vdev.lock);
+ if (vb2_is_busy(voutdev->vdev.queue)) {
+ mutex_unlock(voutdev->vdev.lock);
+ return -EBUSY;
+ }
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
+ 0xFFFF0000, val << 16);
+
+ mutex_unlock(voutdev->vdev.lock);
+
+ return count;
+}
+
+static ssize_t display_height_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.resolution);
+
+ return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t display_height_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+
+ mutex_lock(voutdev->vdev.lock);
+ if (vb2_is_busy(voutdev->vdev.queue)) {
+ mutex_unlock(voutdev->vdev.lock);
+ return -EBUSY;
+ }
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
+ 0xFFFF, val);
+
+ mutex_unlock(voutdev->vdev.lock);
+
+ return count;
+}
+
+static ssize_t frame_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 period = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.frame_period);
+
+ return sprintf(buf, "%u\n", 125000000 / period);
+}
+
+/*
+ * Frame rate change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t frame_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mgb4_write_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.frame_period, 125000000 / val);
+
+ return count;
+}
+
+static ssize_t hsync_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.hsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+/*
+ * HSYNC width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hsync_width_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+ 0x00FF0000, val << 16);
+
+ return count;
+}
+
+static ssize_t vsync_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.vsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+/*
+ * VSYNC vidth change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vsync_width_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+ 0x00FF0000, val << 16);
+
+ return count;
+}
+
+static ssize_t hback_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.hsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+/*
+ * hback porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hback_porch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+ 0x0000FF00, val << 8);
+
+ return count;
+}
+
+static ssize_t vback_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.vsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+/*
+ * vback porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vback_porch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+ 0x0000FF00, val << 8);
+
+ return count;
+}
+
+static ssize_t hfront_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.hsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+/*
+ * hfront porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hfront_porch_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+ 0x000000FF, val);
+
+ return count;
+}
+
+static ssize_t vfront_porch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.vsync);
+
+ return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+/*
+ * vfront porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vfront_porch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+ 0x000000FF, val);
+
+ return count;
+}
+
+/* FPDL3 only */
+
+static ssize_t hsync_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.hsync);
+
+ return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
+}
+
+/*
+ * HSYNC polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hsync_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+ (1U << 31), val << 31);
+
+ return count;
+}
+
+static ssize_t vsync_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.vsync);
+
+ return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
+}
+
+/*
+ * VSYNC polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vsync_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+ (1U << 31), val << 31);
+
+ return count;
+}
+
+static ssize_t de_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.vsync);
+
+ return sprintf(buf, "%u\n", (config & (1U << 30)) >> 30);
+}
+
+/*
+ * DE polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t de_polarity_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+ (1U << 30), val << 30);
+
+ return count;
+}
+
+static ssize_t fpdl3_output_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ s32 ret;
+
+ mutex_lock(&voutdev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_read_byte(&voutdev->ser, 0x5B);
+ mutex_unlock(&voutdev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ switch ((u8)ret & 0x03) {
+ case 0:
+ return sprintf(buf, "0\n");
+ case 1:
+ return sprintf(buf, "1\n");
+ case 3:
+ return sprintf(buf, "2\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * FPD-Link width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t fpdl3_output_width_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ u8 i2c_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0: /* auto */
+ i2c_data = 0x00;
+ break;
+ case 1: /* single */
+ i2c_data = 0x01;
+ break;
+ case 2: /* dual */
+ i2c_data = 0x03;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&voutdev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x5B, 0x03, i2c_data);
+ mutex_unlock(&voutdev->mgbdev->i2c_lock);
+ if (ret < 0)
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t pclk_frequency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+
+ return sprintf(buf, "%u\n", voutdev->freq);
+}
+
+static ssize_t pclk_frequency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+ unsigned long val;
+ int ret;
+ unsigned int dp;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(voutdev->vdev.lock);
+ if (vb2_is_busy(voutdev->vdev.queue)) {
+ mutex_unlock(voutdev->vdev.lock);
+ return -EBUSY;
+ }
+
+ dp = (val > 50000) ? 1 : 0;
+ voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, val >> dp) << dp;
+
+ mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.config,
+ 0x10, dp << 4);
+ mutex_lock(&voutdev->mgbdev->i2c_lock);
+ ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x4F, 1 << 6, ((~dp) & 1) << 6);
+ mutex_unlock(&voutdev->mgbdev->i2c_lock);
+
+ mutex_unlock(voutdev->vdev.lock);
+
+ return (ret < 0) ? -EIO : count;
+}
+
+static DEVICE_ATTR_RO(output_id);
+static DEVICE_ATTR_RW(video_source);
+static DEVICE_ATTR_RW(display_width);
+static DEVICE_ATTR_RW(display_height);
+static DEVICE_ATTR_RW(frame_rate);
+static DEVICE_ATTR_RW(hsync_polarity);
+static DEVICE_ATTR_RW(vsync_polarity);
+static DEVICE_ATTR_RW(de_polarity);
+static DEVICE_ATTR_RW(pclk_frequency);
+static DEVICE_ATTR_RW(hsync_width);
+static DEVICE_ATTR_RW(vsync_width);
+static DEVICE_ATTR_RW(hback_porch);
+static DEVICE_ATTR_RW(hfront_porch);
+static DEVICE_ATTR_RW(vback_porch);
+static DEVICE_ATTR_RW(vfront_porch);
+
+static DEVICE_ATTR_RW(fpdl3_output_width);
+
+struct attribute *mgb4_fpdl3_out_attrs[] = {
+ &dev_attr_output_id.attr,
+ &dev_attr_video_source.attr,
+ &dev_attr_display_width.attr,
+ &dev_attr_display_height.attr,
+ &dev_attr_frame_rate.attr,
+ &dev_attr_hsync_polarity.attr,
+ &dev_attr_vsync_polarity.attr,
+ &dev_attr_de_polarity.attr,
+ &dev_attr_pclk_frequency.attr,
+ &dev_attr_hsync_width.attr,
+ &dev_attr_vsync_width.attr,
+ &dev_attr_hback_porch.attr,
+ &dev_attr_hfront_porch.attr,
+ &dev_attr_vback_porch.attr,
+ &dev_attr_vfront_porch.attr,
+ &dev_attr_fpdl3_output_width.attr,
+ NULL
+};
+
+struct attribute *mgb4_gmsl_out_attrs[] = {
+ &dev_attr_output_id.attr,
+ &dev_attr_video_source.attr,
+ &dev_attr_display_width.attr,
+ &dev_attr_display_height.attr,
+ &dev_attr_frame_rate.attr,
+ NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_pci.c b/drivers/media/pci/mgb4/mgb4_sysfs_pci.c
new file mode 100644
index 000000000000..d26935ff956b
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_pci.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * PCI card device.
+ */
+
+#include <linux/device.h>
+#include "mgb4_core.h"
+#include "mgb4_sysfs.h"
+
+static ssize_t module_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", mgbdev->module_version & 0x0F);
+}
+
+static ssize_t module_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", mgbdev->module_version >> 4);
+}
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+ u32 config = mgb4_read_reg(&mgbdev->video, 0xC4);
+
+ return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t fw_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+ u32 config = mgb4_read_reg(&mgbdev->video, 0xC4);
+
+ return sprintf(buf, "%u\n", config >> 24);
+}
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+ u32 sn = mgbdev->serial_number;
+
+ return sprintf(buf, "%03d-%03d-%03d-%03d\n", sn >> 24, (sn >> 16) & 0xFF,
+ (sn >> 8) & 0xFF, sn & 0xFF);
+}
+
+static DEVICE_ATTR_RO(module_version);
+static DEVICE_ATTR_RO(module_type);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RO(fw_type);
+static DEVICE_ATTR_RO(serial_number);
+
+struct attribute *mgb4_pci_attrs[] = {
+ &dev_attr_module_type.attr,
+ &dev_attr_module_version.attr,
+ &dev_attr_fw_type.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
new file mode 100644
index 000000000000..923650d53d4c
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_trigger.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles the IIO trigger device. The card has two signal inputs
+ * for event triggers that can be used to record events related to the video
+ * stream. A standard linux IIO device with triggered buffer capability is
+ * created and configured that can be used to fetch the events with the same
+ * clock source as the video frames.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pci.h>
+#include <linux/dma/amd_xdma.h>
+#include "mgb4_core.h"
+#include "mgb4_trigger.h"
+
+struct trigger_data {
+ struct mgb4_dev *mgbdev;
+ struct iio_trigger *trig;
+};
+
+static int trigger_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct trigger_data *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ *val = mgb4_read_reg(&st->mgbdev->video, 0xA0);
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct trigger_data *st = iio_priv(indio_dev);
+ int irq = xdma_get_user_irq(st->mgbdev->xdev, 11);
+
+ if (state)
+ xdma_enable_user_irq(st->mgbdev->xdev, irq);
+ else
+ xdma_disable_user_irq(st->mgbdev->xdev, irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops trigger_ops = {
+ .set_trigger_state = &trigger_set_state,
+};
+
+static const struct iio_info trigger_info = {
+ .read_raw = trigger_read_raw,
+};
+
+#define TRIGGER_CHANNEL(_si) { \
+ .type = IIO_ACTIVITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .shift = 0, \
+ .endianness = IIO_CPU \
+ }, \
+}
+
+static const struct iio_chan_spec trigger_channels[] = {
+ TRIGGER_CHANNEL(0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static irqreturn_t trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct trigger_data *st = iio_priv(indio_dev);
+ struct {
+ u32 data;
+ s64 ts __aligned(8);
+ } scan;
+
+ scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
+ mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ mgb4_write_reg(&st->mgbdev->video, 0xB4, 1U << 11);
+
+ return IRQ_HANDLED;
+}
+
+static int probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+ int ret;
+ struct trigger_data *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc(&st->mgbdev->pdev->dev, "%s-dev%d",
+ indio_dev->name, iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ ret = request_irq(irq, &iio_trigger_generic_data_rdy_poll, 0,
+ "mgb4-trigger", st->trig);
+ if (ret)
+ goto error_free_trig;
+
+ st->trig->ops = &trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return 0;
+
+error_free_irq:
+ free_irq(irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+
+ return ret;
+}
+
+static void remove_trigger(struct iio_dev *indio_dev, int irq)
+{
+ struct trigger_data *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(irq, st->trig);
+ iio_trigger_free(st->trig);
+}
+
+struct iio_dev *mgb4_trigger_create(struct mgb4_dev *mgbdev)
+{
+ struct iio_dev *indio_dev;
+ struct trigger_data *data;
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+ int rv, irq;
+
+ indio_dev = iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return NULL;
+
+ indio_dev->info = &trigger_info;
+ indio_dev->name = "mgb4";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = trigger_channels;
+ indio_dev->num_channels = ARRAY_SIZE(trigger_channels);
+
+ data = iio_priv(indio_dev);
+ data->mgbdev = mgbdev;
+
+ irq = xdma_get_user_irq(mgbdev->xdev, 11);
+ rv = probe_trigger(indio_dev, irq);
+ if (rv < 0) {
+ dev_err(dev, "iio triggered setup failed\n");
+ goto error_alloc;
+ }
+ rv = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (rv < 0) {
+ dev_err(dev, "iio triggered buffer setup failed\n");
+ goto error_trigger;
+ }
+ rv = iio_device_register(indio_dev);
+ if (rv < 0) {
+ dev_err(dev, "iio device register failed\n");
+ goto error_buffer;
+ }
+
+ return indio_dev;
+
+error_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_trigger:
+ remove_trigger(indio_dev, irq);
+error_alloc:
+ iio_device_free(indio_dev);
+
+ return NULL;
+}
+
+void mgb4_trigger_free(struct iio_dev *indio_dev)
+{
+ struct trigger_data *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ remove_trigger(indio_dev, xdma_get_user_irq(st->mgbdev->xdev, 11));
+ iio_device_free(indio_dev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.h b/drivers/media/pci/mgb4/mgb4_trigger.h
new file mode 100644
index 000000000000..6c25bc4576f6
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_trigger.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+struct iio_dev *mgb4_trigger_create(struct mgb4_dev *mgbdev);
+void mgb4_trigger_free(struct iio_dev *indio_dev);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
new file mode 100644
index 000000000000..d72b07b87cd1
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the v4l2 input device module. It initializes the signal deserializers
+ * and creates the v4l2 video devices. The input signal can change at any time
+ * which is handled by the "timings" callbacks and an IRQ based watcher, that
+ * emits the V4L2_EVENT_SOURCE_CHANGE event in case of a signal source change.
+ *
+ * When the device is in loopback mode (a direct, in HW, in->out frame passing
+ * mode) the card's frame queue must be running regardless of whether a v4l2
+ * stream is running and the output parameters like frame buffers padding must
+ * be in sync with the input parameters.
+ */
+
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <linux/align.h>
+#include <linux/dma/amd_xdma.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_io.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+
+ATTRIBUTE_GROUPS(mgb4_fpdl3_in);
+ATTRIBUTE_GROUPS(mgb4_gmsl_in);
+
+static const struct mgb4_vin_config vin_cfg[] = {
+ {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28}},
+ {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58}}
+};
+
+static const struct i2c_board_info fpdl3_deser_info[] = {
+ {I2C_BOARD_INFO("deserializer1", 0x38)},
+ {I2C_BOARD_INFO("deserializer2", 0x36)},
+};
+
+static const struct i2c_board_info gmsl_deser_info[] = {
+ {I2C_BOARD_INFO("deserializer1", 0x4C)},
+ {I2C_BOARD_INFO("deserializer2", 0x2A)},
+};
+
+static const struct mgb4_i2c_kv fpdl3_i2c[] = {
+ {0x06, 0xFF, 0x04}, {0x07, 0xFF, 0x01}, {0x45, 0xFF, 0xE8},
+ {0x49, 0xFF, 0x00}, {0x34, 0xFF, 0x00}, {0x23, 0xFF, 0x00}
+};
+
+static const struct mgb4_i2c_kv gmsl_i2c[] = {
+ {0x01, 0x03, 0x03}, {0x300, 0x0C, 0x0C}, {0x03, 0xC0, 0xC0},
+ {0x1CE, 0x0E, 0x0E}, {0x11, 0x05, 0x00}, {0x05, 0xC0, 0x40},
+ {0x307, 0x0F, 0x00}, {0xA0, 0x03, 0x00}, {0x3E0, 0x07, 0x07},
+ {0x308, 0x01, 0x01}, {0x10, 0x20, 0x20}, {0x300, 0x40, 0x40}
+};
+
+static const struct v4l2_dv_timings_cap video_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .min_width = 320,
+ .max_width = 4096,
+ .min_height = 240,
+ .max_height = 2160,
+ .min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
+ .max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+/*
+ * Returns the video output connected with the given video input if the input
+ * is in loopback mode.
+ */
+static struct mgb4_vout_dev *loopback_dev(struct mgb4_vin_dev *vindev, int i)
+{
+ struct mgb4_vout_dev *voutdev;
+ u32 config;
+
+ voutdev = vindev->mgbdev->vout[i];
+ if (!voutdev)
+ return NULL;
+
+ config = mgb4_read_reg(&voutdev->mgbdev->video,
+ voutdev->config->regs.config);
+ if ((config & 0xc) >> 2 == vindev->config->id)
+ return voutdev;
+
+ return NULL;
+}
+
+/*
+ * Check, whether the loopback mode - a HW INPUT->OUTPUT transmission - is
+ * enabled on the given input.
+ */
+static int loopback_active(struct mgb4_vin_dev *vindev)
+{
+ int i;
+
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ if (loopback_dev(vindev, i))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Set the output frame buffer padding of all outputs connected with the given
+ * input when the video input is set to loopback mode. The paddings must be
+ * the same for the loopback to work properly.
+ */
+static void set_loopback_padding(struct mgb4_vin_dev *vindev, u32 padding)
+{
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ struct mgb4_vout_dev *voutdev;
+ int i;
+
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+ voutdev = loopback_dev(vindev, i);
+ if (voutdev)
+ mgb4_write_reg(video, voutdev->config->regs.padding,
+ padding);
+ }
+}
+
+static int get_timings(struct mgb4_vin_dev *vindev,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ const struct mgb4_vin_regs *regs = &vindev->config->regs;
+
+ u32 status = mgb4_read_reg(video, regs->status);
+ u32 pclk = mgb4_read_reg(video, regs->pclk);
+ u32 signal = mgb4_read_reg(video, regs->signal);
+ u32 signal2 = mgb4_read_reg(video, regs->signal2);
+ u32 resolution = mgb4_read_reg(video, regs->resolution);
+
+ if (!(status & (1U << 2)))
+ return -ENOLCK;
+ if (!(status & (3 << 9)))
+ return -ENOLINK;
+
+ memset(timings, 0, sizeof(*timings));
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = resolution >> 16;
+ timings->bt.height = resolution & 0xFFFF;
+ if (status & (1U << 12))
+ timings->bt.polarities |= V4L2_DV_HSYNC_POS_POL;
+ if (status & (1U << 13))
+ timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
+ timings->bt.pixelclock = pclk * 1000;
+ timings->bt.hsync = (signal & 0x00FF0000) >> 16;
+ timings->bt.vsync = (signal2 & 0x00FF0000) >> 16;
+ timings->bt.hbackporch = (signal & 0x0000FF00) >> 8;
+ timings->bt.hfrontporch = signal & 0x000000FF;
+ timings->bt.vbackporch = (signal2 & 0x0000FF00) >> 8;
+ timings->bt.vfrontporch = signal2 & 0x000000FF;
+
+ return 0;
+}
+
+static void return_all_buffers(struct mgb4_vin_dev *vindev,
+ enum vb2_buffer_state state)
+{
+ struct mgb4_frame_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vindev->qlock, flags);
+ list_for_each_entry_safe(buf, node, &vindev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q);
+ unsigned int size = (vindev->timings.bt.width + vindev->padding)
+ * vindev->timings.bt.height * 4;
+
+ /*
+ * If I/O reconfiguration is in process, do not allow to start
+ * the queue. See video_source_store() in mgb4_sysfs_out.c for
+ * details.
+ */
+ if (test_bit(0, &vindev->mgbdev->io_reconfig))
+ return -EBUSY;
+
+ if (!size)
+ return -EINVAL;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &vindev->mgbdev->pdev->dev;
+ unsigned int size = (vindev->timings.bt.width + vindev->padding)
+ * vindev->timings.bt.height * 4;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vindev->qlock, flags);
+ list_add_tail(&buf->list, &vindev->buf_list);
+ spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vq);
+ const struct mgb4_vin_config *config = vindev->config;
+ int irq = xdma_get_user_irq(vindev->mgbdev->xdev, config->vin_irq);
+
+ xdma_disable_user_irq(vindev->mgbdev->xdev, irq);
+
+ /*
+ * In loopback mode, the HW frame queue must be left running for
+ * the IN->OUT transmission to work!
+ */
+ if (!loopback_active(vindev))
+ mgb4_mask_reg(&vindev->mgbdev->video, config->regs.config, 0x2,
+ 0x0);
+
+ cancel_work_sync(&vindev->dma_work);
+ return_all_buffers(vindev, VB2_BUF_STATE_ERROR);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vq);
+ const struct mgb4_vin_config *config = vindev->config;
+ int irq = xdma_get_user_irq(vindev->mgbdev->xdev, config->vin_irq);
+
+ vindev->sequence = 0;
+
+ /*
+ * In loopback mode, the HW frame queue is already running.
+ */
+ if (!loopback_active(vindev))
+ mgb4_mask_reg(&vindev->mgbdev->video, config->regs.config, 0x2,
+ 0x2);
+
+ xdma_enable_user_irq(vindev->mgbdev->xdev, irq);
+
+ return 0;
+}
+
+static const struct vb2_ops queue_ops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish
+};
+
+static int fh_open(struct file *file)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ int rv;
+
+ mutex_lock(&vindev->lock);
+
+ rv = v4l2_fh_open(file);
+ if (rv)
+ goto out;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto out;
+
+ get_timings(vindev, &vindev->timings);
+ set_loopback_padding(vindev, vindev->padding);
+
+out:
+ mutex_unlock(&vindev->lock);
+ return rv;
+}
+
+static int fh_release(struct file *file)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ int rv;
+
+ mutex_lock(&vindev->lock);
+
+ if (v4l2_fh_is_singular_file(file))
+ set_loopback_padding(vindev, 0);
+
+ rv = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&vindev->lock);
+
+ return rv;
+}
+
+static const struct v4l2_file_operations video_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_open,
+ .release = fh_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "MGB4 PCIe Card", sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+
+ return 0;
+}
+
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *ival)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ if (ival->index != 0)
+ return -EINVAL;
+ if (ival->pixel_format != V4L2_PIX_FMT_ABGR32)
+ return -EINVAL;
+ if (ival->width != vindev->timings.bt.width ||
+ ival->height != vindev->timings.bt.height)
+ return -EINVAL;
+
+ ival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ ival->stepwise.min.denominator = 60;
+ ival->stepwise.min.numerator = 1;
+ ival->stepwise.max.denominator = 1;
+ ival->stepwise.max.numerator = 1;
+ ival->stepwise.step = ival->stepwise.max;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.width = vindev->timings.bt.width;
+ f->fmt.pix.height = vindev->timings.bt.height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.width = vindev->timings.bt.width;
+ f->fmt.pix.height = vindev->timings.bt.height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
+ ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+
+ if (vb2_is_busy(&vindev->queue))
+ return -EBUSY;
+
+ vidioc_try_fmt(file, priv, f);
+
+ vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ mgb4_write_reg(video, vindev->config->regs.padding, vindev->padding);
+ set_loopback_padding(vindev, vindev->padding);
+
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 status;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ strscpy(i->name, "MGB4", sizeof(i->name));
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ i->status = 0;
+
+ status = mgb4_read_reg(video, vindev->config->regs.status);
+ if (!(status & (1U << 2)))
+ i->status |= V4L2_IN_ST_NO_SYNC;
+ if (!(status & (3 << 9)))
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_ABGR32)
+ return -EINVAL;
+
+ fsize->discrete.width = vindev->timings.bt.width;
+ fsize->discrete.height = vindev->timings.bt.height;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return (i == 0) ? 0 : -EINVAL;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ const struct mgb4_vin_regs *regs = &vindev->config->regs;
+ struct v4l2_fract timeperframe = {
+ .numerator = mgb4_read_reg(video, regs->frame_period),
+ .denominator = 125000000,
+ };
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.readbuffers = 2;
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = timeperframe;
+
+ return 0;
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ if (timings->bt.width < video_timings_cap.bt.min_width ||
+ timings->bt.width > video_timings_cap.bt.max_width ||
+ timings->bt.height < video_timings_cap.bt.min_height ||
+ timings->bt.height > video_timings_cap.bt.max_height)
+ return -EINVAL;
+ if (timings->bt.width == vindev->timings.bt.width &&
+ timings->bt.height == vindev->timings.bt.height)
+ return 0;
+ if (vb2_is_busy(&vindev->queue))
+ return -EBUSY;
+
+ vindev->timings = *timings;
+
+ return 0;
+}
+
+static int vidioc_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ *timings = vindev->timings;
+
+ return 0;
+}
+
+static int vidioc_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ return get_timings(vindev, timings);
+}
+
+static int vidioc_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+}
+
+static int vidioc_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = video_timings_cap;
+
+ return 0;
+}
+
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_g_parm = vidioc_parm,
+ .vidioc_s_parm = vidioc_parm,
+ .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+ .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+ .vidioc_g_dv_timings = vidioc_g_dv_timings,
+ .vidioc_s_dv_timings = vidioc_s_dv_timings,
+ .vidioc_query_dv_timings = vidioc_query_dv_timings,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void dma_transfer(struct work_struct *work)
+{
+ struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
+ dma_work);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ struct device *dev = &vindev->mgbdev->pdev->dev;
+ struct mgb4_frame_buffer *buf = NULL;
+ unsigned long flags;
+ u32 addr;
+ int rv;
+
+ spin_lock_irqsave(&vindev->qlock, flags);
+ if (!list_empty(&vindev->buf_list)) {
+ buf = list_first_entry(&vindev->buf_list,
+ struct mgb4_frame_buffer, list);
+ list_del_init(vindev->buf_list.next);
+ }
+ spin_unlock_irqrestore(&vindev->qlock, flags);
+
+ if (!buf)
+ return;
+
+ addr = mgb4_read_reg(video, vindev->config->regs.address);
+ if (addr >= MGB4_ERR_QUEUE_FULL) {
+ dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ rv = mgb4_dma_transfer(vindev->mgbdev, vindev->config->dma_channel,
+ false, addr,
+ vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+ if (rv < 0) {
+ dev_warn(dev, "DMA transfer error\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = vindev->sequence++;
+ buf->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+}
+
+static void signal_change(struct work_struct *work)
+{
+ struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
+ err_work);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ struct v4l2_bt_timings *timings = &vindev->timings.bt;
+ struct device *dev = &vindev->mgbdev->pdev->dev;
+
+ u32 resolution = mgb4_read_reg(video, vindev->config->regs.resolution);
+ u32 width = resolution >> 16;
+ u32 height = resolution & 0xFFFF;
+
+ if (timings->width != width || timings->height != height) {
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ v4l2_event_queue(&vindev->vdev, &ev);
+
+ if (vb2_is_streaming(&vindev->queue))
+ vb2_queue_error(&vindev->queue);
+ }
+
+ dev_dbg(dev, "stream changed to %ux%u\n", width, height);
+}
+
+static irqreturn_t vin_handler(int irq, void *ctx)
+{
+ struct mgb4_vin_dev *vindev = (struct mgb4_vin_dev *)ctx;
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+
+ schedule_work(&vindev->dma_work);
+
+ mgb4_write_reg(video, 0xB4, 1U << vindev->config->vin_irq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t err_handler(int irq, void *ctx)
+{
+ struct mgb4_vin_dev *vindev = (struct mgb4_vin_dev *)ctx;
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+
+ schedule_work(&vindev->err_work);
+
+ mgb4_write_reg(video, 0xB4, 1U << vindev->config->err_irq);
+
+ return IRQ_HANDLED;
+}
+
+static int deser_init(struct mgb4_vin_dev *vindev, int id)
+{
+ int rv, addr_size;
+ size_t values_count;
+ const struct mgb4_i2c_kv *values;
+ const struct i2c_board_info *info;
+ struct device *dev = &vindev->mgbdev->pdev->dev;
+
+ if (MGB4_IS_GMSL(vindev->mgbdev)) {
+ info = &gmsl_deser_info[id];
+ addr_size = 16;
+ values = gmsl_i2c;
+ values_count = ARRAY_SIZE(gmsl_i2c);
+ } else {
+ info = &fpdl3_deser_info[id];
+ addr_size = 8;
+ values = fpdl3_i2c;
+ values_count = ARRAY_SIZE(fpdl3_i2c);
+ }
+
+ rv = mgb4_i2c_init(&vindev->deser, vindev->mgbdev->i2c_adap, info,
+ addr_size);
+ if (rv < 0) {
+ dev_err(dev, "failed to create deserializer\n");
+ return rv;
+ }
+ rv = mgb4_i2c_configure(&vindev->deser, values, values_count);
+ if (rv < 0) {
+ dev_err(dev, "failed to configure deserializer\n");
+ goto err_i2c_dev;
+ }
+
+ return 0;
+
+err_i2c_dev:
+ mgb4_i2c_free(&vindev->deser);
+
+ return rv;
+}
+
+static void fpga_init(struct mgb4_vin_dev *vindev)
+{
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ const struct mgb4_vin_regs *regs = &vindev->config->regs;
+
+ mgb4_write_reg(video, regs->config, 0x00000001);
+ mgb4_write_reg(video, regs->sync, 0x03E80002);
+ mgb4_write_reg(video, regs->padding, 0x00000000);
+ mgb4_write_reg(video, regs->config, 1U << 9);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void debugfs_init(struct mgb4_vin_dev *vindev)
+{
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+
+ vindev->debugfs = debugfs_create_dir(vindev->vdev.name,
+ vindev->mgbdev->debugfs);
+ if (!vindev->debugfs)
+ return;
+
+ vindev->regs[0].name = "CONFIG";
+ vindev->regs[0].offset = vindev->config->regs.config;
+ vindev->regs[1].name = "STATUS";
+ vindev->regs[1].offset = vindev->config->regs.status;
+ vindev->regs[2].name = "RESOLUTION";
+ vindev->regs[2].offset = vindev->config->regs.resolution;
+ vindev->regs[3].name = "FRAME_PERIOD";
+ vindev->regs[3].offset = vindev->config->regs.frame_period;
+ vindev->regs[4].name = "HS_VS_GENER_SETTINGS";
+ vindev->regs[4].offset = vindev->config->regs.sync;
+ vindev->regs[5].name = "PCLK_FREQUENCY";
+ vindev->regs[5].offset = vindev->config->regs.pclk;
+ vindev->regs[6].name = "VIDEO_PARAMS_1";
+ vindev->regs[6].offset = vindev->config->regs.signal;
+ vindev->regs[7].name = "VIDEO_PARAMS_2";
+ vindev->regs[7].offset = vindev->config->regs.signal2;
+ vindev->regs[8].name = "PADDING_PIXELS";
+ vindev->regs[8].offset = vindev->config->regs.padding;
+
+ vindev->regset.base = video->membase;
+ vindev->regset.regs = vindev->regs;
+ vindev->regset.nregs = ARRAY_SIZE(vindev->regs);
+
+ debugfs_create_regset32("registers", 0444, vindev->debugfs,
+ &vindev->regset);
+}
+#endif
+
+struct mgb4_vin_dev *mgb4_vin_create(struct mgb4_dev *mgbdev, int id)
+{
+ int rv;
+ const struct attribute_group **groups;
+ struct mgb4_vin_dev *vindev;
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+ int vin_irq, err_irq;
+
+ vindev = kzalloc(sizeof(*vindev), GFP_KERNEL);
+ if (!vindev)
+ return NULL;
+
+ vindev->mgbdev = mgbdev;
+ vindev->config = &vin_cfg[id];
+
+ /* Frame queue*/
+ INIT_LIST_HEAD(&vindev->buf_list);
+ spin_lock_init(&vindev->qlock);
+
+ /* Work queues */
+ INIT_WORK(&vindev->dma_work, dma_transfer);
+ INIT_WORK(&vindev->err_work, signal_change);
+
+ /* IRQ callback */
+ vin_irq = xdma_get_user_irq(mgbdev->xdev, vindev->config->vin_irq);
+ rv = request_irq(vin_irq, vin_handler, 0, "mgb4-vin", vindev);
+ if (rv) {
+ dev_err(dev, "failed to register vin irq handler\n");
+ goto err_alloc;
+ }
+ /* Error IRQ callback */
+ err_irq = xdma_get_user_irq(mgbdev->xdev, vindev->config->err_irq);
+ rv = request_irq(err_irq, err_handler, 0, "mgb4-err", vindev);
+ if (rv) {
+ dev_err(dev, "failed to register err irq handler\n");
+ goto err_vin_irq;
+ }
+
+ /* Set the FPGA registers default values */
+ fpga_init(vindev);
+
+ /* Set the deserializer default values */
+ rv = deser_init(vindev, id);
+ if (rv)
+ goto err_err_irq;
+
+ /* V4L2 stuff init */
+ rv = v4l2_device_register(dev, &vindev->v4l2dev);
+ if (rv) {
+ dev_err(dev, "failed to register v4l2 device\n");
+ goto err_err_irq;
+ }
+
+ mutex_init(&vindev->lock);
+
+ vindev->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vindev->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ vindev->queue.buf_struct_size = sizeof(struct mgb4_frame_buffer);
+ vindev->queue.ops = &queue_ops;
+ vindev->queue.mem_ops = &vb2_dma_sg_memops;
+ vindev->queue.gfp_flags = GFP_DMA32;
+ vindev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vindev->queue.min_buffers_needed = 2;
+ vindev->queue.drv_priv = vindev;
+ vindev->queue.lock = &vindev->lock;
+ vindev->queue.dev = dev;
+ rv = vb2_queue_init(&vindev->queue);
+ if (rv) {
+ dev_err(dev, "failed to initialize vb2 queue\n");
+ goto err_v4l2_dev;
+ }
+
+ snprintf(vindev->vdev.name, sizeof(vindev->vdev.name), "mgb4-in%d",
+ id + 1);
+ vindev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE
+ | V4L2_CAP_STREAMING;
+ vindev->vdev.fops = &video_fops;
+ vindev->vdev.ioctl_ops = &video_ioctl_ops;
+ vindev->vdev.release = video_device_release_empty;
+ vindev->vdev.v4l2_dev = &vindev->v4l2dev;
+ vindev->vdev.lock = &vindev->lock;
+ vindev->vdev.queue = &vindev->queue;
+ video_set_drvdata(&vindev->vdev, vindev);
+
+ /* Enable the video signal change watcher */
+ xdma_enable_user_irq(vindev->mgbdev->xdev, err_irq);
+
+ /* Register the video device */
+ rv = video_register_device(&vindev->vdev, VFL_TYPE_VIDEO, -1);
+ if (rv) {
+ dev_err(dev, "failed to register video device\n");
+ goto err_v4l2_dev;
+ }
+
+ /* Module sysfs attributes */
+ groups = MGB4_IS_GMSL(mgbdev)
+ ? mgb4_gmsl_in_groups : mgb4_fpdl3_in_groups;
+ rv = device_add_groups(&vindev->vdev.dev, groups);
+ if (rv) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_video_dev;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_init(vindev);
+#endif
+
+ return vindev;
+
+err_video_dev:
+ video_unregister_device(&vindev->vdev);
+err_v4l2_dev:
+ v4l2_device_unregister(&vindev->v4l2dev);
+err_err_irq:
+ free_irq(err_irq, vindev);
+err_vin_irq:
+ free_irq(vin_irq, vindev);
+err_alloc:
+ kfree(vindev);
+
+ return NULL;
+}
+
+void mgb4_vin_free(struct mgb4_vin_dev *vindev)
+{
+ const struct attribute_group **groups;
+ int vin_irq = xdma_get_user_irq(vindev->mgbdev->xdev,
+ vindev->config->vin_irq);
+ int err_irq = xdma_get_user_irq(vindev->mgbdev->xdev,
+ vindev->config->err_irq);
+
+ xdma_disable_user_irq(vindev->mgbdev->xdev, err_irq);
+
+ free_irq(vin_irq, vindev);
+ free_irq(err_irq, vindev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(vindev->debugfs);
+#endif
+
+ groups = MGB4_IS_GMSL(vindev->mgbdev)
+ ? mgb4_gmsl_in_groups : mgb4_fpdl3_in_groups;
+ device_remove_groups(&vindev->vdev.dev, groups);
+
+ mgb4_i2c_free(&vindev->deser);
+ video_unregister_device(&vindev->vdev);
+ v4l2_device_unregister(&vindev->v4l2dev);
+
+ kfree(vindev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
new file mode 100644
index 000000000000..0249b400ad4d
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_vin.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_VIN_H__
+#define __MGB4_VIN_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <linux/debugfs.h>
+#include "mgb4_i2c.h"
+
+struct mgb4_vin_regs {
+ u32 address;
+ u32 config;
+ u32 status;
+ u32 resolution;
+ u32 frame_period;
+ u32 sync;
+ u32 pclk;
+ u32 signal;
+ u32 signal2;
+ u32 padding;
+};
+
+struct mgb4_vin_config {
+ int id;
+ int dma_channel;
+ int vin_irq;
+ int err_irq;
+ struct mgb4_vin_regs regs;
+};
+
+struct mgb4_vin_dev {
+ struct mgb4_dev *mgbdev;
+ struct v4l2_device v4l2dev;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct mutex lock; /* vdev lock */
+
+ spinlock_t qlock; /* video buffer queue lock */
+ struct list_head buf_list;
+ struct work_struct dma_work, err_work;
+
+ unsigned int sequence;
+
+ struct v4l2_dv_timings timings;
+ u32 freq_range;
+ u32 padding;
+
+ struct mgb4_i2c_client deser;
+
+ const struct mgb4_vin_config *config;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+ struct debugfs_reg32 regs[9];
+#endif
+};
+
+struct mgb4_vin_dev *mgb4_vin_create(struct mgb4_dev *mgbdev, int id);
+void mgb4_vin_free(struct mgb4_vin_dev *vindev);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
new file mode 100644
index 000000000000..857fc7bbd21a
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the v4l2 output device module. It initializes the signal serializers
+ * and creates the v4l2 video devices.
+ *
+ * When the device is in loopback mode (a direct, in HW, in->out frame passing
+ * mode) we disable the v4l2 output by returning EBUSY in the open() syscall.
+ */
+
+#include <linux/pci.h>
+#include <linux/align.h>
+#include <linux/dma/amd_xdma.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-sg.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_io.h"
+#include "mgb4_cmt.h"
+#include "mgb4_vout.h"
+
+ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
+ATTRIBUTE_GROUPS(mgb4_gmsl_out);
+
+static const struct mgb4_vout_config vout_cfg[] = {
+ {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7c}},
+ {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8c, 0x90, 0x9c}}
+};
+
+static const struct i2c_board_info fpdl3_ser_info[] = {
+ {I2C_BOARD_INFO("serializer1", 0x14)},
+ {I2C_BOARD_INFO("serializer2", 0x16)},
+};
+
+static const struct mgb4_i2c_kv fpdl3_i2c[] = {
+ {0x05, 0xFF, 0x04}, {0x06, 0xFF, 0x01}, {0xC2, 0xFF, 0x80}
+};
+
+static void return_all_buffers(struct mgb4_vout_dev *voutdev,
+ enum vb2_buffer_state state)
+{
+ struct mgb4_frame_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&voutdev->qlock, flags);
+ list_for_each_entry_safe(buf, node, &voutdev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&voutdev->qlock, flags);
+}
+
+static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q);
+ unsigned int size;
+
+ /*
+ * If I/O reconfiguration is in process, do not allow to start
+ * the queue. See video_source_store() in mgb4_sysfs_out.c for
+ * details.
+ */
+ if (test_bit(0, &voutdev->mgbdev->io_reconfig))
+ return -EBUSY;
+
+ size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &voutdev->mgbdev->pdev->dev;
+ unsigned int size;
+
+ size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct mgb4_vout_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vindev->qlock, flags);
+ list_add_tail(&buf->list, &vindev->buf_list);
+ spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vq);
+ struct mgb4_dev *mgbdev = voutdev->mgbdev;
+ int irq = xdma_get_user_irq(mgbdev->xdev, voutdev->config->irq);
+
+ xdma_disable_user_irq(mgbdev->xdev, irq);
+ cancel_work_sync(&voutdev->dma_work);
+ mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0x2, 0x0);
+ return_all_buffers(voutdev, VB2_BUF_STATE_ERROR);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vq);
+ struct mgb4_dev *mgbdev = voutdev->mgbdev;
+ struct device *dev = &mgbdev->pdev->dev;
+ struct mgb4_frame_buffer *buf;
+ struct mgb4_regs *video = &mgbdev->video;
+ const struct mgb4_vout_config *config = voutdev->config;
+ int irq = xdma_get_user_irq(mgbdev->xdev, config->irq);
+ int rv;
+ u32 addr;
+
+ mgb4_mask_reg(video, config->regs.config, 0x2, 0x2);
+
+ addr = mgb4_read_reg(video, config->regs.address);
+ if (addr >= MGB4_ERR_QUEUE_FULL) {
+ dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+ return_all_buffers(voutdev, VB2_BUF_STATE_QUEUED);
+ return -EBUSY;
+ }
+
+ buf = list_first_entry(&voutdev->buf_list, struct mgb4_frame_buffer,
+ list);
+ list_del_init(voutdev->buf_list.next);
+
+ rv = mgb4_dma_transfer(mgbdev, config->dma_channel, true, addr,
+ vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+ if (rv < 0) {
+ dev_warn(dev, "DMA transfer error\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ xdma_enable_user_irq(mgbdev->xdev, irq);
+
+ return 0;
+}
+
+static const struct vb2_ops queue_ops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "MGB4 PCIe Card", sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.width = voutdev->width;
+ f->fmt.pix.height = voutdev->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.width = voutdev->width;
+ f->fmt.pix.height = voutdev->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
+ ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+ if (vb2_is_busy(&voutdev->queue))
+ return -EBUSY;
+
+ vidioc_try_fmt(file, priv, f);
+
+ voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
+
+ return 0;
+}
+
+static int vidioc_g_output(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_output(struct file *file, void *priv, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static int vidioc_enum_output(struct file *file, void *priv,
+ struct v4l2_output *out)
+{
+ if (out->index != 0)
+ return -EINVAL;
+
+ out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ strscpy(out->name, "MGB4", sizeof(out->name));
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_enum_output = vidioc_enum_output,
+ .vidioc_g_output = vidioc_g_output,
+ .vidioc_s_output = vidioc_s_output,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int fh_open(struct file *file)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct device *dev = &voutdev->mgbdev->pdev->dev;
+ u32 config, resolution;
+ int rv;
+
+ /* Return EBUSY when the device is in loopback mode */
+ config = mgb4_read_reg(video, voutdev->config->regs.config);
+ if ((config & 0xc) >> 2 != voutdev->config->id + MGB4_VIN_DEVICES) {
+ dev_dbg(dev, "can not open - device in loopback mode");
+ return -EBUSY;
+ }
+
+ mutex_lock(&voutdev->lock);
+
+ rv = v4l2_fh_open(file);
+ if (rv)
+ goto out;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto out;
+
+ resolution = mgb4_read_reg(video, voutdev->config->regs.resolution);
+ voutdev->width = resolution >> 16;
+ voutdev->height = resolution & 0xFFFF;
+
+out:
+ mutex_unlock(&voutdev->lock);
+ return rv;
+}
+
+static const struct v4l2_file_operations video_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .write = vb2_fop_write,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static void dma_transfer(struct work_struct *work)
+{
+ struct mgb4_vout_dev *voutdev = container_of(work, struct mgb4_vout_dev,
+ dma_work);
+ struct device *dev = &voutdev->mgbdev->pdev->dev;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct mgb4_frame_buffer *buf = NULL;
+ unsigned long flags;
+ u32 addr;
+ int rv;
+
+ spin_lock_irqsave(&voutdev->qlock, flags);
+ if (!list_empty(&voutdev->buf_list)) {
+ buf = list_first_entry(&voutdev->buf_list,
+ struct mgb4_frame_buffer, list);
+ list_del_init(voutdev->buf_list.next);
+ }
+ spin_unlock_irqrestore(&voutdev->qlock, flags);
+
+ if (!buf)
+ return;
+
+ addr = mgb4_read_reg(video, voutdev->config->regs.address);
+ if (addr >= MGB4_ERR_QUEUE_FULL) {
+ dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ rv = mgb4_dma_transfer(voutdev->mgbdev, voutdev->config->dma_channel,
+ true, addr,
+ vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+ if (rv < 0) {
+ dev_warn(dev, "DMA transfer error\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+}
+
+static irqreturn_t handler(int irq, void *ctx)
+{
+ struct mgb4_vout_dev *voutdev = (struct mgb4_vout_dev *)ctx;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+ schedule_work(&voutdev->dma_work);
+
+ mgb4_write_reg(video, 0xB4, 1U << voutdev->config->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int ser_init(struct mgb4_vout_dev *voutdev, int id)
+{
+ int rv;
+ const struct i2c_board_info *info = &fpdl3_ser_info[id];
+ struct mgb4_i2c_client *ser = &voutdev->ser;
+ struct device *dev = &voutdev->mgbdev->pdev->dev;
+
+ if (MGB4_IS_GMSL(voutdev->mgbdev))
+ return 0;
+
+ rv = mgb4_i2c_init(ser, voutdev->mgbdev->i2c_adap, info, 8);
+ if (rv < 0) {
+ dev_err(dev, "failed to create serializer\n");
+ return rv;
+ }
+ rv = mgb4_i2c_configure(ser, fpdl3_i2c, ARRAY_SIZE(fpdl3_i2c));
+ if (rv < 0) {
+ dev_err(dev, "failed to configure serializer\n");
+ goto err_i2c_dev;
+ }
+
+ return 0;
+
+err_i2c_dev:
+ mgb4_i2c_free(ser);
+
+ return rv;
+}
+
+static void fpga_init(struct mgb4_vout_dev *voutdev)
+{
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+
+ mgb4_write_reg(video, regs->config, 0x00000011);
+ mgb4_write_reg(video, regs->resolution,
+ (MGB4_DEFAULT_WIDTH << 16) | MGB4_DEFAULT_HEIGHT);
+ mgb4_write_reg(video, regs->hsync, 0x00102020);
+ mgb4_write_reg(video, regs->vsync, 0x40020202);
+ mgb4_write_reg(video, regs->frame_period, MGB4_DEFAULT_PERIOD);
+ mgb4_write_reg(video, regs->padding, 0x00000000);
+
+ voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 70000 >> 1) << 1;
+
+ mgb4_write_reg(video, regs->config,
+ (voutdev->config->id + MGB4_VIN_DEVICES) << 2 | 1 << 4);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void debugfs_init(struct mgb4_vout_dev *voutdev)
+{
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+ voutdev->debugfs = debugfs_create_dir(voutdev->vdev.name,
+ voutdev->mgbdev->debugfs);
+ if (!voutdev->debugfs)
+ return;
+
+ voutdev->regs[0].name = "CONFIG";
+ voutdev->regs[0].offset = voutdev->config->regs.config;
+ voutdev->regs[1].name = "STATUS";
+ voutdev->regs[1].offset = voutdev->config->regs.status;
+ voutdev->regs[2].name = "RESOLUTION";
+ voutdev->regs[2].offset = voutdev->config->regs.resolution;
+ voutdev->regs[3].name = "VIDEO_PARAMS_1";
+ voutdev->regs[3].offset = voutdev->config->regs.hsync;
+ voutdev->regs[4].name = "VIDEO_PARAMS_2";
+ voutdev->regs[4].offset = voutdev->config->regs.vsync;
+ voutdev->regs[5].name = "FRAME_PERIOD";
+ voutdev->regs[5].offset = voutdev->config->regs.frame_period;
+ voutdev->regs[6].name = "PADDING";
+ voutdev->regs[6].offset = voutdev->config->regs.padding;
+
+ voutdev->regset.base = video->membase;
+ voutdev->regset.regs = voutdev->regs;
+ voutdev->regset.nregs = ARRAY_SIZE(voutdev->regs);
+
+ debugfs_create_regset32("registers", 0444, voutdev->debugfs,
+ &voutdev->regset);
+}
+#endif
+
+struct mgb4_vout_dev *mgb4_vout_create(struct mgb4_dev *mgbdev, int id)
+{
+ int rv, irq;
+ const struct attribute_group **groups;
+ struct mgb4_vout_dev *voutdev;
+ struct pci_dev *pdev = mgbdev->pdev;
+ struct device *dev = &pdev->dev;
+
+ voutdev = kzalloc(sizeof(*voutdev), GFP_KERNEL);
+ if (!voutdev)
+ return NULL;
+
+ voutdev->mgbdev = mgbdev;
+ voutdev->config = &vout_cfg[id];
+
+ /* Frame queue */
+ INIT_LIST_HEAD(&voutdev->buf_list);
+ spin_lock_init(&voutdev->qlock);
+
+ /* DMA transfer stuff */
+ INIT_WORK(&voutdev->dma_work, dma_transfer);
+
+ /* IRQ callback */
+ irq = xdma_get_user_irq(mgbdev->xdev, voutdev->config->irq);
+ rv = request_irq(irq, handler, 0, "mgb4-vout", voutdev);
+ if (rv) {
+ dev_err(dev, "failed to register irq handler\n");
+ goto err_alloc;
+ }
+
+ /* Set the FPGA registers default values */
+ fpga_init(voutdev);
+
+ /* Set the serializer default values */
+ rv = ser_init(voutdev, id);
+ if (rv)
+ goto err_irq;
+
+ /* V4L2 stuff init */
+ rv = v4l2_device_register(dev, &voutdev->v4l2dev);
+ if (rv) {
+ dev_err(dev, "failed to register v4l2 device\n");
+ goto err_irq;
+ }
+
+ mutex_init(&voutdev->lock);
+
+ voutdev->queue.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ voutdev->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ voutdev->queue.buf_struct_size = sizeof(struct mgb4_frame_buffer);
+ voutdev->queue.ops = &queue_ops;
+ voutdev->queue.mem_ops = &vb2_dma_sg_memops;
+ voutdev->queue.gfp_flags = GFP_DMA32;
+ voutdev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ voutdev->queue.min_buffers_needed = 2;
+ voutdev->queue.drv_priv = voutdev;
+ voutdev->queue.lock = &voutdev->lock;
+ voutdev->queue.dev = dev;
+ rv = vb2_queue_init(&voutdev->queue);
+ if (rv) {
+ dev_err(dev, "failed to initialize vb2 queue\n");
+ goto err_v4l2_dev;
+ }
+
+ snprintf(voutdev->vdev.name, sizeof(voutdev->vdev.name), "mgb4-out%d",
+ id + 1);
+ voutdev->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE
+ | V4L2_CAP_STREAMING;
+ voutdev->vdev.vfl_dir = VFL_DIR_TX;
+ voutdev->vdev.fops = &video_fops;
+ voutdev->vdev.ioctl_ops = &video_ioctl_ops;
+ voutdev->vdev.release = video_device_release_empty;
+ voutdev->vdev.v4l2_dev = &voutdev->v4l2dev;
+ voutdev->vdev.lock = &voutdev->lock;
+ voutdev->vdev.queue = &voutdev->queue;
+ video_set_drvdata(&voutdev->vdev, voutdev);
+
+ rv = video_register_device(&voutdev->vdev, VFL_TYPE_VIDEO, -1);
+ if (rv) {
+ dev_err(dev, "failed to register video device\n");
+ goto err_v4l2_dev;
+ }
+
+ /* Module sysfs attributes */
+ groups = MGB4_IS_GMSL(mgbdev)
+ ? mgb4_gmsl_out_groups : mgb4_fpdl3_out_groups;
+ rv = device_add_groups(&voutdev->vdev.dev, groups);
+ if (rv) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_video_dev;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_init(voutdev);
+#endif
+
+ return voutdev;
+
+err_video_dev:
+ video_unregister_device(&voutdev->vdev);
+err_v4l2_dev:
+ v4l2_device_unregister(&voutdev->v4l2dev);
+err_irq:
+ free_irq(irq, voutdev);
+err_alloc:
+ kfree(voutdev);
+
+ return NULL;
+}
+
+void mgb4_vout_free(struct mgb4_vout_dev *voutdev)
+{
+ const struct attribute_group **groups;
+ int irq = xdma_get_user_irq(voutdev->mgbdev->xdev, voutdev->config->irq);
+
+ free_irq(irq, voutdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(voutdev->debugfs);
+#endif
+
+ groups = MGB4_IS_GMSL(voutdev->mgbdev)
+ ? mgb4_gmsl_out_groups : mgb4_fpdl3_out_groups;
+ device_remove_groups(&voutdev->vdev.dev, groups);
+
+ mgb4_i2c_free(&voutdev->ser);
+ video_unregister_device(&voutdev->vdev);
+ v4l2_device_unregister(&voutdev->v4l2dev);
+
+ kfree(voutdev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_vout.h b/drivers/media/pci/mgb4/mgb4_vout.h
new file mode 100644
index 000000000000..b163dee711fd
--- /dev/null
+++ b/drivers/media/pci/mgb4/mgb4_vout.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ * author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_VOUT_H__
+#define __MGB4_VOUT_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <linux/debugfs.h>
+#include "mgb4_i2c.h"
+
+struct mgb4_vout_regs {
+ u32 address;
+ u32 config;
+ u32 status;
+ u32 resolution;
+ u32 frame_period;
+ u32 hsync;
+ u32 vsync;
+ u32 padding;
+};
+
+struct mgb4_vout_config {
+ int id;
+ int dma_channel;
+ int irq;
+ struct mgb4_vout_regs regs;
+};
+
+struct mgb4_vout_dev {
+ struct mgb4_dev *mgbdev;
+ struct v4l2_device v4l2dev;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct mutex lock; /* vdev lock */
+
+ spinlock_t qlock; /* buffer queue lock */
+ struct list_head buf_list;
+ struct work_struct dma_work;
+
+ u32 width;
+ u32 height;
+ u32 freq;
+ u32 padding;
+
+ struct mgb4_i2c_client ser;
+
+ const struct mgb4_vout_config *config;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+ struct debugfs_reg32 regs[7];
+#endif
+};
+
+struct mgb4_vout_dev *mgb4_vout_create(struct mgb4_dev *mgbdev, int id);
+void mgb4_vout_free(struct mgb4_vout_dev *voutdev);
+
+#endif
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 56340553b282..1cd990468d3d 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -219,7 +219,7 @@ struct zoran {
const struct tvnorm *timing;
unsigned short id; /* number of this device */
- char name[32]; /* name of this device */
+ char name[40]; /* name of this device */
struct pci_dev *pci_dev; /* PCI device */
unsigned char revision; /* revision of zr36057 */
unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ee579916f874..91e54215de3a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -73,6 +73,7 @@ source "drivers/media/platform/intel/Kconfig"
source "drivers/media/platform/marvell/Kconfig"
source "drivers/media/platform/mediatek/Kconfig"
source "drivers/media/platform/microchip/Kconfig"
+source "drivers/media/platform/nuvoton/Kconfig"
source "drivers/media/platform/nvidia/Kconfig"
source "drivers/media/platform/nxp/Kconfig"
source "drivers/media/platform/qcom/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 5453bb868e67..3296ec1ebe16 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -16,6 +16,7 @@ obj-y += intel/
obj-y += marvell/
obj-y += mediatek/
obj-y += microchip/
+obj-y += nuvoton/
obj-y += nvidia/
obj-y += nxp/
obj-y += qcom/
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.c b/drivers/media/platform/allegro-dvt/allegro-mail.c
index 16effad10746..aadc947a77ae 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.c
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.c
@@ -16,7 +16,7 @@
const char *msg_type_name(enum mcu_msg_type type)
{
- static char buf[9];
+ static char buf[13];
switch (type) {
case MCU_MSG_TYPE_INIT:
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.h b/drivers/media/platform/allegro-dvt/allegro-mail.h
index a5686058d754..c0c9013f1aab 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.h
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.h
@@ -184,7 +184,7 @@ struct mcu_msg_push_buffers_internal {
struct mcu_msg_header header;
u32 channel_id;
size_t num_buffers;
- struct mcu_msg_push_buffers_internal_buffer buffer[];
+ struct mcu_msg_push_buffers_internal_buffer buffer[] __counted_by(num_buffers);
};
struct mcu_msg_put_stream_buffer {
diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
index 667637eedb5d..7320852668d6 100644
--- a/drivers/media/platform/amphion/vpu_defs.h
+++ b/drivers/media/platform/amphion/vpu_defs.h
@@ -71,6 +71,7 @@ enum {
VPU_MSG_ID_TIMESTAMP_INFO,
VPU_MSG_ID_FIRMWARE_XCPT,
VPU_MSG_ID_PIC_SKIPPED,
+ VPU_MSG_ID_DBG_MSG,
};
enum VPU_ENC_MEMORY_RESOURSE {
diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
index af3b336e5dc3..d12310af9ebc 100644
--- a/drivers/media/platform/amphion/vpu_helpers.c
+++ b/drivers/media/platform/amphion/vpu_helpers.c
@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
+ case VPU_MSG_ID_DBG_MSG: return "debug msg";
}
return "<unknown>";
}
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index f771661980c0..d3425de7bccd 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = {
{VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
{VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
{VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
+ {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
};
static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
index d0ead051f7d1..b74a407a19f2 100644
--- a/drivers/media/platform/amphion/vpu_msgs.c
+++ b/drivers/media/platform/amphion/vpu_msgs.c
@@ -23,6 +23,7 @@
struct vpu_msg_handler {
u32 id;
void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
+ u32 is_str;
};
static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
{
char *str = (char *)pkt->data;
- if (strlen(str))
+ if (*str)
dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
else
dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
vpu_inst_unlock(inst);
}
+static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+ char *str = (char *)pkt->data;
+
+ if (*str)
+ dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
+}
+
+static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
+{
+ if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
+ pkt->hdr.num--;
+ pkt->data[pkt->hdr.num] = 0;
+}
+
static struct vpu_msg_handler handlers[] = {
{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
- {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
- {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
+ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
+ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
{VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
+ {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
};
static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
}
}
- if (handler && handler->done)
- handler->done(inst, msg);
+ if (handler) {
+ if (handler->is_str)
+ vpu_terminate_string_msg(msg);
+ if (handler->done)
+ handler->done(inst, msg);
+ }
vpu_response_cmd(inst, msg_id, 1);
diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index a9c2c69b2ed9..d08aa7f73d4f 100644
--- a/drivers/media/platform/aspeed/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -1970,22 +1970,15 @@ static void aspeed_video_debugfs_remove(struct aspeed_video *video)
debugfs_entry = NULL;
}
-static int aspeed_video_debugfs_create(struct aspeed_video *video)
+static void aspeed_video_debugfs_create(struct aspeed_video *video)
{
debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL,
video,
&aspeed_video_debugfs_fops);
- if (!debugfs_entry)
- aspeed_video_debugfs_remove(video);
-
- return !debugfs_entry ? -EIO : 0;
}
#else
static void aspeed_video_debugfs_remove(struct aspeed_video *video) { }
-static int aspeed_video_debugfs_create(struct aspeed_video *video)
-{
- return 0;
-}
+static void aspeed_video_debugfs_create(struct aspeed_video *video) { }
#endif /* CONFIG_DEBUG_FS */
static int aspeed_video_setup_video(struct aspeed_video *video)
@@ -2198,9 +2191,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
return rc;
}
- rc = aspeed_video_debugfs_create(video);
- if (rc)
- dev_err(video->dev, "debugfs create failed\n");
+ aspeed_video_debugfs_create(video);
return 0;
}
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
index 480325d053de..1aa608c00dbc 100644
--- a/drivers/media/platform/cadence/Kconfig
+++ b/drivers/media/platform/cadence/Kconfig
@@ -8,6 +8,8 @@ config VIDEO_CADENCE_CSI2RX
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
help
Support for the Cadence MIPI CSI2 Receiver controller.
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 0d879d71d818..889f4fbbafb3 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -40,10 +41,14 @@
#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
+#define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4)
+#define CSI2RX_STREAM_CTRL_STOP BIT(1)
#define CSI2RX_STREAM_CTRL_START BIT(0)
+#define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004)
+#define CSI2RX_STREAM_STATUS_RDY BIT(31)
+
#define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008)
-#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31)
#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
@@ -61,6 +66,11 @@ enum csi2rx_pads {
CSI2RX_PAD_MAX,
};
+struct csi2rx_fmt {
+ u32 code;
+ u8 bpp;
+};
+
struct csi2rx_priv {
struct device *dev;
unsigned int count;
@@ -95,6 +105,32 @@ struct csi2rx_priv {
int source_pad;
};
+static const struct csi2rx_fmt formats[] = {
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, },
+ { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, },
+ { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
+};
+
+static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].code == code)
+ return &formats[i];
+
+ return NULL;
+}
+
static inline
struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
{
@@ -103,19 +139,54 @@ struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
static void csi2rx_reset(struct csi2rx_priv *csi2rx)
{
+ unsigned int i;
+
+ /* Reset module */
writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
csi2rx->base + CSI2RX_SOFT_RESET_REG);
+ /* Reset individual streams. */
+ for (i = 0; i < csi2rx->max_streams; i++) {
+ writel(CSI2RX_STREAM_CTRL_SOFT_RST,
+ csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ }
- udelay(10);
+ usleep_range(10, 20);
+ /* Clear resets */
writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
+ for (i = 0; i < csi2rx->max_streams; i++)
+ writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
}
static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
{
union phy_configure_opts opts = { };
+ struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2RX_PAD_SINK,
+ };
+ const struct csi2rx_fmt *fmt;
+ s64 link_freq;
int ret;
+ ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt,
+ &sd_fmt);
+ if (ret < 0)
+ return ret;
+
+ fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code);
+
+ link_freq = v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler,
+ fmt->bpp, 2 * csi2rx->num_lanes);
+ if (link_freq < 0)
+ return link_freq;
+
+ ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq,
+ csi2rx->num_lanes, cfg);
+ if (ret)
+ return ret;
+
ret = phy_power_on(csi2rx->dphy);
if (ret)
return ret;
@@ -199,8 +270,11 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
- writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
- CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
+ /*
+ * Enable one virtual channel. When multiple virtual channels
+ * are supported this will have to be changed.
+ */
+ writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0),
csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
writel(CSI2RX_STREAM_CTRL_START,
@@ -243,13 +317,25 @@ err_disable_pclk:
static void csi2rx_stop(struct csi2rx_priv *csi2rx)
{
unsigned int i;
+ u32 val;
+ int ret;
clk_prepare_enable(csi2rx->p_clk);
reset_control_assert(csi2rx->sys_rst);
clk_disable_unprepare(csi2rx->sys_clk);
for (i = 0; i < csi2rx->max_streams; i++) {
- writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ writel(CSI2RX_STREAM_CTRL_STOP,
+ csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+
+ ret = readl_relaxed_poll_timeout(csi2rx->base +
+ CSI2RX_STREAM_STATUS_REG(i),
+ val,
+ !(val & CSI2RX_STREAM_STATUS_RDY),
+ 10, 10000);
+ if (ret)
+ dev_warn(csi2rx->dev,
+ "Failed to stop streaming on pad%u\n", i);
reset_control_assert(csi2rx->pixel_rst[i]);
clk_disable_unprepare(csi2rx->pixel_clk[i]);
@@ -303,12 +389,72 @@ out:
return ret;
}
+static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int i;
+
+ /* No transcoding, source and sink formats must match. */
+ if (format->pad != CSI2RX_PAD_SINK)
+ return v4l2_subdev_get_fmt(subdev, state, format);
+
+ if (!csi2rx_get_fmt_by_code(format->format.code))
+ format->format.code = formats[0].code;
+
+ format->format.field = V4L2_FIELD_NONE;
+
+ /* Set sink format */
+ fmt = v4l2_subdev_get_pad_format(subdev, state, format->pad);
+ *fmt = format->format;
+
+ /* Propagate to source formats */
+ for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
+ fmt = v4l2_subdev_get_pad_format(subdev, state, i);
+ *fmt = format->format;
+ }
+
+ return 0;
+}
+
+static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format format = {
+ .pad = CSI2RX_PAD_SINK,
+ .format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ },
+ };
+
+ return csi2rx_set_fmt(subdev, state, &format);
+}
+
+static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = csi2rx_set_fmt,
+ .init_cfg = csi2rx_init_cfg,
+};
+
static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
.s_stream = csi2rx_s_stream,
};
static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
.video = &csi2rx_video_ops,
+ .pad = &csi2rx_pad_ops,
+};
+
+static const struct media_entity_operations csi2rx_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
};
static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
@@ -479,8 +625,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
struct v4l2_async_connection);
of_node_put(ep);
- if (IS_ERR(asd))
+ if (IS_ERR(asd)) {
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
return PTR_ERR(asd);
+ }
csi2rx->notifier.ops = &csi2rx_notifier_ops;
@@ -516,23 +664,29 @@ static int csi2rx_probe(struct platform_device *pdev)
csi2rx->subdev.dev = &pdev->dev;
v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
- snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
- KBUILD_MODNAME, dev_name(&pdev->dev));
+ snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name),
+ "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
/* Create our media pads */
csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+ csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csi2rx->subdev.entity.ops = &csi2rx_media_ops;
ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
csi2rx->pads);
if (ret)
goto err_cleanup;
+ ret = v4l2_subdev_init_finalize(&csi2rx->subdev);
+ if (ret)
+ goto err_cleanup;
+
ret = v4l2_async_register_subdev(&csi2rx->subdev);
if (ret < 0)
- goto err_cleanup;
+ goto err_free_state;
dev_info(&pdev->dev,
"Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
@@ -542,8 +696,12 @@ static int csi2rx_probe(struct platform_device *pdev)
return 0;
+err_free_state:
+ v4l2_subdev_cleanup(&csi2rx->subdev);
err_cleanup:
+ v4l2_async_nf_unregister(&csi2rx->notifier);
v4l2_async_nf_cleanup(&csi2rx->notifier);
+ media_entity_cleanup(&csi2rx->subdev.entity);
err_free_priv:
kfree(csi2rx);
return ret;
@@ -553,7 +711,11 @@ static void csi2rx_remove(struct platform_device *pdev)
{
struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+ v4l2_async_nf_unregister(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
v4l2_async_unregister_subdev(&csi2rx->subdev);
+ v4l2_subdev_cleanup(&csi2rx->subdev);
+ media_entity_cleanup(&csi2rx->subdev.entity);
kfree(csi2rx);
}
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index 1e0400b7803e..c115742f347f 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -480,7 +480,7 @@ static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
csi2tx->has_internal_dphy = !!(dev_cfg & CSI2TX_DEVICE_CONFIG_HAS_DPHY);
for (i = 0; i < csi2tx->max_streams; i++) {
- char clk_name[16];
+ char clk_name[23];
snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
csi2tx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
@@ -592,8 +592,8 @@ static int csi2tx_probe(struct platform_device *pdev)
csi2tx->subdev.owner = THIS_MODULE;
csi2tx->subdev.dev = &pdev->dev;
csi2tx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- snprintf(csi2tx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
- KBUILD_MODNAME, dev_name(&pdev->dev));
+ snprintf(csi2tx->subdev.name, sizeof(csi2tx->subdev.name),
+ "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
ret = csi2tx_check_lanes(csi2tx);
if (ret)
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
index 2bbc48c7402c..f8fa3b841ccf 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
u32 img_stride;
u32 mem_stride;
u32 i, enc_quality;
+ u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
value = width << 16 | height;
writel(value, base + JPEG_ENC_IMG_SIZE);
@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
writel(mem_stride, base + JPEG_ENC_STRIDE);
- enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
- for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
+ enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
+ for (i = 0; i < nr_enc_quality; i++) {
if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
break;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
index 3177592490be..6adac857a477 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
const struct v4l2_rect *compose;
u32 out = 0;
+ ctx = &path->comps[index];
if (CFG_CHECK(MT8183, p_id))
out = CFG_COMP(MT8183, ctx->param, outputs[0]);
compose = path->composes[out];
- ctx = &path->comps[index];
ret = call_op(ctx, config_frame, cmd, compose);
if (ret)
return ret;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 667933ea15f4..575c8d52acd1 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1137,6 +1137,7 @@ int mdp_comp_config(struct mdp_dev *mdp)
comp = mdp_comp_create(mdp, node, id);
if (IS_ERR(comp)) {
ret = PTR_ERR(comp);
+ of_node_put(node);
goto err_init_comps;
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index 9e744d07a1e8..6bbe55de6ce9 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -68,7 +68,7 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
plat_dev = dec_dev->plat_dev;
} else {
- pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+ pr_err("Invalid fw_use %d (use a reasonable fw id here)\n", fw_use);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 5e03b0886559..9f6e4b59455d 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -109,7 +109,7 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use
plat_dev = dec_dev->plat_dev;
rst_id = VPU_RST_DEC;
} else {
- pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+ pr_err("Invalid fw_use %d (use a reasonable fw id here)\n", fw_use);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
index 908602031fd0..9ce34a3b5ee6 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
@@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
{
+ enum mtk_instance_type inst_type = *((unsigned int *)priv);
+ struct platform_device *plat_dev;
unsigned long size = mem->size;
- struct mtk_vcodec_dec_ctx *ctx = priv;
- struct device *dev = &ctx->dev->plat_dev->dev;
+ int id;
- mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+ if (inst_type == MTK_INST_ENCODER) {
+ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
+
+ plat_dev = enc_ctx->dev->plat_dev;
+ id = enc_ctx->id;
+ } else {
+ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
+
+ plat_dev = dec_ctx->dev->plat_dev;
+ id = dec_ctx->id;
+ }
+
+ mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
- mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
+ mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
+ dev_name(&plat_dev->dev), size);
return -ENOMEM;
}
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
- (unsigned long)mem->dma_addr);
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
+ (unsigned long)mem->dma_addr, size);
return 0;
}
@@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
{
+ enum mtk_instance_type inst_type = *((unsigned int *)priv);
+ struct platform_device *plat_dev;
unsigned long size = mem->size;
- struct mtk_vcodec_dec_ctx *ctx = priv;
- struct device *dev = &ctx->dev->plat_dev->dev;
+ int id;
+
+ if (inst_type == MTK_INST_ENCODER) {
+ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
+
+ plat_dev = enc_ctx->dev->plat_dev;
+ id = enc_ctx->id;
+ } else {
+ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
+
+ plat_dev = dec_ctx->dev->plat_dev;
+ id = dec_ctx->id;
+ }
if (!mem->va) {
- mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
+ mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
+ dev_name(&plat_dev->dev), size);
return;
}
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
- (unsigned long)mem->dma_addr);
- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
+ (unsigned long)mem->dma_addr, size);
- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+ dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
mem->va = NULL;
mem->dma_addr = 0;
mem->size = 0;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index 04948d3eb011..eb381fa6e7d1 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -866,7 +866,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
struct venc_enc_param param;
- int ret, pm_ret;
+ int ret;
int i;
/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
@@ -886,18 +886,12 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
}
- ret = pm_runtime_resume_and_get(&ctx->dev->plat_dev->dev);
- if (ret < 0) {
- mtk_v4l2_venc_err(ctx, "pm_runtime_resume_and_get fail %d", ret);
- goto err_start_stream;
- }
-
mtk_venc_set_param(ctx, &param);
ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
if (ret) {
mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
ctx->state = MTK_STATE_ABORT;
- goto err_set_param;
+ goto err_start_stream;
}
ctx->param_change = MTK_ENCODE_PARAM_NONE;
@@ -910,18 +904,13 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret) {
mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
ctx->state = MTK_STATE_ABORT;
- goto err_set_param;
+ goto err_start_stream;
}
ctx->state = MTK_STATE_HEADER;
}
return 0;
-err_set_param:
- pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
- if (pm_ret < 0)
- mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", pm_ret);
-
err_start_stream:
for (i = 0; i < q->num_buffers; ++i) {
struct vb2_buffer *buf = vb2_get_buffer(q, i);
@@ -1004,10 +993,6 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
if (ret)
mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret);
- ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
- if (ret < 0)
- mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", ret);
-
ctx->state = MTK_STATE_FREE;
}
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
index 3fce936e61b9..a22b7dfc656e 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
@@ -58,6 +58,24 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
return 0;
}
+void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(pm->dev);
+ if (ret)
+ dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
+}
+
+void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_put(pm->dev);
+ if (ret && ret != -EAGAIN)
+ dev_err(pm->dev, "pm_runtime_put fail %d", ret);
+}
+
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
{
struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
index e50be0575190..157ea08ba9e3 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
@@ -10,7 +10,8 @@
#include "mtk_vcodec_enc_drv.h"
int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
-
+void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
index 1bdaecdd64a7..c402a686f3cb 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
@@ -32,9 +32,7 @@ int venc_if_init(struct mtk_vcodec_enc_ctx *ctx, unsigned int fourcc)
}
mtk_venc_lock(ctx);
- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->init(ctx);
- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
mtk_venc_unlock(ctx);
return ret;
@@ -46,9 +44,7 @@ int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
int ret = 0;
mtk_venc_lock(ctx);
- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->set_param(ctx->drv_handle, type, in);
- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
mtk_venc_unlock(ctx);
return ret;
@@ -68,10 +64,12 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
ctx->dev->curr_ctx = ctx;
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ mtk_vcodec_enc_pw_on(&ctx->dev->pm);
mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
bs_buf, result);
mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_vcodec_enc_pw_off(&ctx->dev->pm);
spin_lock_irqsave(&ctx->dev->irqlock, flags);
ctx->dev->curr_ctx = NULL;
@@ -89,9 +87,7 @@ int venc_if_deinit(struct mtk_vcodec_enc_ctx *ctx)
return 0;
mtk_venc_lock(ctx);
- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->deinit(ctx->drv_handle);
- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
mtk_venc_unlock(ctx);
ctx->drv_handle = NULL;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index ae6290d28f8e..84ad1cc6ad17 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -154,6 +154,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
return -EINVAL;
}
+ if (IS_ERR_OR_NULL(vpu->vsi)) {
+ mtk_venc_err(vpu->ctx, "invalid venc vsi");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c
index 8dbf7bc1e863..1f8528844497 100644
--- a/drivers/media/platform/microchip/microchip-isc-base.c
+++ b/drivers/media/platform/microchip/microchip-isc-base.c
@@ -478,12 +478,8 @@ static const struct vb2_ops isc_vb2_ops = {
static int isc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct isc_device *isc = video_drvdata(file);
-
strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
strscpy(cap->card, "Microchip Image Sensor Controller", sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", isc->v4l2_dev.name);
return 0;
}
@@ -1993,8 +1989,6 @@ int isc_mc_init(struct isc_device *isc, u32 ver)
strscpy(isc->mdev.driver_name, KBUILD_MODNAME,
sizeof(isc->mdev.driver_name));
strscpy(isc->mdev.model, match->compatible, sizeof(isc->mdev.model));
- snprintf(isc->mdev.bus_info, sizeof(isc->mdev.bus_info), "platform:%s",
- isc->v4l2_dev.name);
isc->mdev.hw_revision = ver;
media_device_init(&isc->mdev);
diff --git a/drivers/media/platform/nuvoton/Kconfig b/drivers/media/platform/nuvoton/Kconfig
new file mode 100644
index 000000000000..40b36d1be8dc
--- /dev/null
+++ b/drivers/media/platform/nuvoton/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Nuvoton media platform drivers"
+
+config VIDEO_NPCM_VCD_ECE
+ tristate "Nuvoton NPCM Video Capture/Encode Engine driver"
+ depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
+ depends on ARCH_NPCM || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Video Capture/Differentiation Engine (VCD) and
+ Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+ The VCD can capture a frame from digital video input and compare
+ two frames in memory, and then the ECE can compress the frame
+ data into HEXTILE format.
diff --git a/drivers/media/platform/nuvoton/Makefile b/drivers/media/platform/nuvoton/Makefile
new file mode 100644
index 000000000000..74a4e3fc8555
--- /dev/null
+++ b/drivers/media/platform/nuvoton/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_NPCM_VCD_ECE) += npcm-video.o
diff --git a/drivers/media/platform/nuvoton/npcm-regs.h b/drivers/media/platform/nuvoton/npcm-regs.h
new file mode 100644
index 000000000000..4a44f47f026e
--- /dev/null
+++ b/drivers/media/platform/nuvoton/npcm-regs.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register definition header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _NPCM_REGS_H
+#define _NPCM_REGS_H
+
+/* VCD Registers */
+#define VCD_DIFF_TBL 0x0000
+#define VCD_FBA_ADR 0x8000
+#define VCD_FBB_ADR 0x8004
+
+#define VCD_FB_LP 0x8008
+#define VCD_FBA_LP GENMASK(15, 0)
+#define VCD_FBB_LP GENMASK(31, 16)
+
+#define VCD_CAP_RES 0x800c
+#define VCD_CAP_RES_VERT_RES GENMASK(10, 0)
+#define VCD_CAP_RES_HOR_RES GENMASK(26, 16)
+
+#define VCD_MODE 0x8014
+#define VCD_MODE_VCDE BIT(0)
+#define VCD_MODE_CM565 BIT(1)
+#define VCD_MODE_IDBC BIT(3)
+#define VCD_MODE_KVM_BW_SET BIT(16)
+
+#define VCD_CMD 0x8018
+#define VCD_CMD_GO BIT(0)
+#define VCD_CMD_RST BIT(1)
+#define VCD_CMD_OPERATION GENMASK(6, 4)
+#define VCD_CMD_OPERATION_CAPTURE 0
+#define VCD_CMD_OPERATION_COMPARE 2
+
+#define VCD_STAT 0x801c
+#define VCD_STAT_DONE BIT(0)
+#define VCD_STAT_IFOT BIT(2)
+#define VCD_STAT_IFOR BIT(3)
+#define VCD_STAT_VHT_CHG BIT(5)
+#define VCD_STAT_HAC_CHG BIT(8)
+#define VCD_STAT_BUSY BIT(30)
+#define VCD_STAT_CLEAR 0x3fff
+
+#define VCD_INTE 0x8020
+#define VCD_INTE_DONE_IE BIT(0)
+#define VCD_INTE_IFOT_IE BIT(2)
+#define VCD_INTE_IFOR_IE BIT(3)
+#define VCD_INTE_VHT_IE BIT(5)
+#define VCD_INTE_HAC_IE BIT(8)
+
+#define VCD_RCHG 0x8028
+#define VCD_RCHG_IG_CHG0 GENMASK(2, 0)
+#define VCD_RCHG_TIM_PRSCL GENMASK(12, 9)
+
+#define VCD_VER_HI_TIM 0x8044
+#define VCD_VER_HI_TIME GENMASK(23, 0)
+
+#define VCD_VER_HI_LST 0x8048
+#define VCD_VER_HI_LAST GENMASK(23, 0)
+
+#define VCD_HOR_AC_TIM 0x804c
+#define VCD_HOR_AC_TIME GENMASK(13, 0)
+
+#define VCD_HOR_AC_LST 0x8050
+#define VCD_HOR_AC_LAST GENMASK(13, 0)
+
+#define VCD_FIFO 0x805c
+#define VCD_FIFO_TH 0x100350ff
+
+#define VCD_FB_SIZE 0x500000 /* support up to 1920 x 1200 */
+#define VCD_KVM_BW_PCLK 120000000UL
+#define VCD_TIMEOUT_US 300000
+
+/* ECE Registers */
+#define ECE_DDA_CTRL 0x0000
+#define ECE_DDA_CTRL_ECEEN BIT(0)
+#define ECE_DDA_CTRL_INTEN BIT(8)
+
+#define ECE_DDA_STS 0x0004
+#define ECE_DDA_STS_CDREADY BIT(8)
+#define ECE_DDA_STS_ACDRDY BIT(10)
+
+#define ECE_FBR_BA 0x0008
+#define ECE_ED_BA 0x000c
+#define ECE_RECT_XY 0x0010
+
+#define ECE_RECT_DIMEN 0x0014
+#define ECE_RECT_DIMEN_WR GENMASK(10, 0)
+#define ECE_RECT_DIMEN_WLTR GENMASK(14, 11)
+#define ECE_RECT_DIMEN_HR GENMASK(26, 16)
+#define ECE_RECT_DIMEN_HLTR GENMASK(30, 27)
+
+#define ECE_RESOL 0x001c
+#define ECE_RESOL_FB_LP_512 0
+#define ECE_RESOL_FB_LP_1024 1
+#define ECE_RESOL_FB_LP_2048 2
+#define ECE_RESOL_FB_LP_2560 3
+#define ECE_RESOL_FB_LP_4096 4
+
+#define ECE_HEX_CTRL 0x0040
+#define ECE_HEX_CTRL_ENCDIS BIT(0)
+#define ECE_HEX_CTRL_ENC_GAP GENMASK(12, 8)
+
+#define ECE_HEX_RECT_OFFSET 0x0048
+#define ECE_HEX_RECT_OFFSET_MASK GENMASK(22, 0)
+
+#define ECE_TILE_W 16
+#define ECE_TILE_H 16
+#define ECE_POLL_TIMEOUT_US 300000
+
+/* GCR Registers */
+#define INTCR 0x3c
+#define INTCR_GFXIFDIS GENMASK(9, 8)
+#define INTCR_DEHS BIT(27)
+
+#define INTCR2 0x60
+#define INTCR2_GIRST2 BIT(2)
+#define INTCR2_GIHCRST BIT(5)
+#define INTCR2_GIVCRST BIT(6)
+
+/* GFXI Register */
+#define DISPST 0x00
+#define DISPST_HSCROFF BIT(1)
+#define DISPST_MGAMODE BIT(7)
+
+#define HVCNTL 0x10
+#define HVCNTL_MASK GENMASK(7, 0)
+
+#define HVCNTH 0x14
+#define HVCNTH_MASK GENMASK(2, 0)
+
+#define VVCNTL 0x20
+#define VVCNTL_MASK GENMASK(7, 0)
+
+#define VVCNTH 0x24
+#define VVCNTH_MASK GENMASK(2, 0)
+
+#define GPLLINDIV 0x40
+#define GPLLINDIV_MASK GENMASK(5, 0)
+#define GPLLINDIV_GPLLFBDV8 BIT(7)
+
+#define GPLLFBDIV 0x44
+#define GPLLFBDIV_MASK GENMASK(7, 0)
+
+#define GPLLST 0x48
+#define GPLLST_PLLOTDIV1 GENMASK(2, 0)
+#define GPLLST_PLLOTDIV2 GENMASK(5, 3)
+#define GPLLST_GPLLFBDV109 GENMASK(7, 6)
+
+#endif /* _NPCM_REGS_H */
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
new file mode 100644
index 000000000000..b9e6782f59b4
--- /dev/null
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -0,0 +1,1831 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Video Capture/Differentiation Engine (VCD) and Encoding
+ * Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <uapi/linux/npcm-video.h>
+#include "npcm-regs.h"
+
+#define DEVICE_NAME "npcm-video"
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+#define MIN_WIDTH 320
+#define MIN_HEIGHT 240
+#define MIN_LP 512
+#define MAX_LP 4096
+#define RECT_W 16
+#define RECT_H 16
+#define BITMAP_SIZE 32
+
+struct npcm_video_addr {
+ size_t size;
+ dma_addr_t dma;
+ void *virt;
+};
+
+struct npcm_video_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head link;
+};
+
+#define to_npcm_video_buffer(x) \
+ container_of((x), struct npcm_video_buffer, vb)
+
+/*
+ * VIDEO_STREAMING: a flag indicating if the video has started streaming
+ * VIDEO_CAPTURING: a flag indicating if the VCD is capturing a frame
+ * VIDEO_RES_CHANGING: a flag indicating if the resolution is changing
+ * VIDEO_STOPPED: a flag indicating if the video has stopped streaming
+ */
+enum {
+ VIDEO_STREAMING,
+ VIDEO_CAPTURING,
+ VIDEO_RES_CHANGING,
+ VIDEO_STOPPED,
+};
+
+struct rect_list {
+ struct v4l2_clip clip;
+ struct list_head list;
+};
+
+struct rect_list_info {
+ struct rect_list *list;
+ struct rect_list *first;
+ struct list_head *head;
+ unsigned int index;
+ unsigned int tile_perline;
+ unsigned int tile_perrow;
+ unsigned int offset_perline;
+ unsigned int tile_size;
+ unsigned int tile_cnt;
+};
+
+struct npcm_ece {
+ struct regmap *regmap;
+ atomic_t clients;
+ struct reset_control *reset;
+ bool enable;
+};
+
+struct npcm_video {
+ struct regmap *gcr_regmap;
+ struct regmap *gfx_regmap;
+ struct regmap *vcd_regmap;
+
+ struct device *dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *rect_cnt_ctrl;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_pix_format pix_fmt;
+ struct v4l2_bt_timings active_timings;
+ struct v4l2_bt_timings detected_timings;
+ unsigned int v4l2_input_status;
+ struct vb2_queue queue;
+ struct video_device vdev;
+ struct mutex video_lock; /* v4l2 and videobuf2 lock */
+
+ struct list_head buffers;
+ spinlock_t lock; /* buffer list lock */
+ unsigned long flags;
+ unsigned int sequence;
+
+ struct npcm_video_addr src;
+ struct reset_control *reset;
+ struct npcm_ece ece;
+
+ unsigned int bytesperline;
+ unsigned int bytesperpixel;
+ unsigned int rect_cnt;
+ struct list_head list[VIDEO_MAX_FRAME];
+ unsigned int rect[VIDEO_MAX_FRAME];
+ unsigned int ctrl_cmd;
+ unsigned int op_cmd;
+};
+
+#define to_npcm_video(x) container_of((x), struct npcm_video, v4l2_dev)
+
+struct npcm_fmt {
+ unsigned int fourcc;
+ unsigned int bpp; /* bytes per pixel */
+};
+
+static const struct npcm_fmt npcm_fmt_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEXTILE,
+ .bpp = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(npcm_fmt_list)
+
+static const struct v4l2_dv_timings_cap npcm_video_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .min_width = MIN_WIDTH,
+ .max_width = MAX_WIDTH,
+ .min_height = MIN_HEIGHT,
+ .max_height = MAX_HEIGHT,
+ .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */
+ .max_pixelclock = 138240000, /* 1920 x 1200 x 60Hz */
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static DECLARE_BITMAP(bitmap, BITMAP_SIZE);
+
+static const struct npcm_fmt *npcm_video_find_format(struct v4l2_format *f)
+{
+ const struct npcm_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &npcm_fmt_list[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &npcm_fmt_list[k];
+}
+
+static void npcm_video_ece_prepend_rect_header(void *addr, u16 x, u16 y, u16 w, u16 h)
+{
+ __be16 x_pos = cpu_to_be16(x);
+ __be16 y_pos = cpu_to_be16(y);
+ __be16 width = cpu_to_be16(w);
+ __be16 height = cpu_to_be16(h);
+ __be32 encoding = cpu_to_be32(5); /* Hextile encoding */
+
+ memcpy(addr, &x_pos, 2);
+ memcpy(addr + 2, &y_pos, 2);
+ memcpy(addr + 4, &width, 2);
+ memcpy(addr + 6, &height, 2);
+ memcpy(addr + 8, &encoding, 4);
+}
+
+static unsigned int npcm_video_ece_get_ed_size(struct npcm_video *video,
+ unsigned int offset, void *addr)
+{
+ struct regmap *ece = video->ece.regmap;
+ unsigned int size, gap, val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(ece, ECE_DDA_STS, val,
+ (val & ECE_DDA_STS_CDREADY), 0,
+ ECE_POLL_TIMEOUT_US);
+
+ if (ret) {
+ dev_warn(video->dev, "Wait for ECE_DDA_STS_CDREADY timeout\n");
+ return 0;
+ }
+
+ size = readl((void __iomem *)addr + offset);
+ regmap_read(ece, ECE_HEX_CTRL, &val);
+ gap = FIELD_GET(ECE_HEX_CTRL_ENC_GAP, val);
+
+ dev_dbg(video->dev, "offset = %u, ed_size = %u, gap = %u\n", offset,
+ size, gap);
+
+ return size + gap;
+}
+
+static void npcm_video_ece_enc_rect(struct npcm_video *video,
+ unsigned int r_off_x, unsigned int r_off_y,
+ unsigned int r_w, unsigned int r_h)
+{
+ struct regmap *ece = video->ece.regmap;
+ unsigned int rect_offset = (r_off_y * video->bytesperline) + (r_off_x * 2);
+ unsigned int w_size = ECE_TILE_W, h_size = ECE_TILE_H;
+ unsigned int temp, w_tile, h_tile;
+
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, ECE_DDA_CTRL_ECEEN);
+ regmap_write(ece, ECE_DDA_STS, ECE_DDA_STS_CDREADY | ECE_DDA_STS_ACDRDY);
+ regmap_write(ece, ECE_RECT_XY, rect_offset);
+
+ w_tile = r_w / ECE_TILE_W;
+ h_tile = r_h / ECE_TILE_H;
+
+ if (r_w % ECE_TILE_W) {
+ w_tile += 1;
+ w_size = r_w % ECE_TILE_W;
+ }
+ if (r_h % ECE_TILE_H || !h_tile) {
+ h_tile += 1;
+ h_size = r_h % ECE_TILE_H;
+ }
+
+ temp = FIELD_PREP(ECE_RECT_DIMEN_WLTR, w_size - 1) |
+ FIELD_PREP(ECE_RECT_DIMEN_HLTR, h_size - 1) |
+ FIELD_PREP(ECE_RECT_DIMEN_WR, w_tile - 1) |
+ FIELD_PREP(ECE_RECT_DIMEN_HR, h_tile - 1);
+
+ regmap_write(ece, ECE_RECT_DIMEN, temp);
+}
+
+static unsigned int npcm_video_ece_read_rect_offset(struct npcm_video *video)
+{
+ struct regmap *ece = video->ece.regmap;
+ unsigned int offset;
+
+ regmap_read(ece, ECE_HEX_RECT_OFFSET, &offset);
+ return FIELD_GET(ECE_HEX_RECT_OFFSET_MASK, offset);
+}
+
+/*
+ * Set the line pitch (in bytes) for the frame buffers.
+ * Can be on of those values: 512, 1024, 2048, 2560 or 4096 bytes.
+ */
+static void npcm_video_ece_set_lp(struct npcm_video *video, unsigned int pitch)
+{
+ struct regmap *ece = video->ece.regmap;
+ unsigned int lp;
+
+ switch (pitch) {
+ case 512:
+ lp = ECE_RESOL_FB_LP_512;
+ break;
+ case 1024:
+ lp = ECE_RESOL_FB_LP_1024;
+ break;
+ case 2048:
+ lp = ECE_RESOL_FB_LP_2048;
+ break;
+ case 2560:
+ lp = ECE_RESOL_FB_LP_2560;
+ break;
+ case 4096:
+ lp = ECE_RESOL_FB_LP_4096;
+ break;
+ default:
+ return;
+ }
+
+ regmap_write(ece, ECE_RESOL, lp);
+}
+
+static inline void npcm_video_ece_set_fb_addr(struct npcm_video *video,
+ unsigned int buffer)
+{
+ struct regmap *ece = video->ece.regmap;
+
+ regmap_write(ece, ECE_FBR_BA, buffer);
+}
+
+static inline void npcm_video_ece_set_enc_dba(struct npcm_video *video,
+ unsigned int addr)
+{
+ struct regmap *ece = video->ece.regmap;
+
+ regmap_write(ece, ECE_ED_BA, addr);
+}
+
+static inline void npcm_video_ece_clear_rect_offset(struct npcm_video *video)
+{
+ struct regmap *ece = video->ece.regmap;
+
+ regmap_write(ece, ECE_HEX_RECT_OFFSET, 0);
+}
+
+static void npcm_video_ece_ctrl_reset(struct npcm_video *video)
+{
+ struct regmap *ece = video->ece.regmap;
+
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+ regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, ECE_HEX_CTRL_ENCDIS);
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, ECE_DDA_CTRL_ECEEN);
+ regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, 0);
+
+ npcm_video_ece_clear_rect_offset(video);
+}
+
+static void npcm_video_ece_ip_reset(struct npcm_video *video)
+{
+ /*
+ * After resetting a module and clearing the reset bit, it should wait
+ * at least 10 us before accessing the module.
+ */
+ reset_control_assert(video->ece.reset);
+ usleep_range(10, 20);
+ reset_control_deassert(video->ece.reset);
+ usleep_range(10, 20);
+}
+
+static void npcm_video_ece_stop(struct npcm_video *video)
+{
+ struct regmap *ece = video->ece.regmap;
+
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+ regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_INTEN, 0);
+ regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, ECE_HEX_CTRL_ENCDIS);
+ npcm_video_ece_clear_rect_offset(video);
+}
+
+static bool npcm_video_alloc_fb(struct npcm_video *video,
+ struct npcm_video_addr *addr)
+{
+ addr->virt = dma_alloc_coherent(video->dev, VCD_FB_SIZE, &addr->dma,
+ GFP_KERNEL);
+ if (!addr->virt)
+ return false;
+
+ addr->size = VCD_FB_SIZE;
+ return true;
+}
+
+static void npcm_video_free_fb(struct npcm_video *video,
+ struct npcm_video_addr *addr)
+{
+ dma_free_coherent(video->dev, addr->size, addr->virt, addr->dma);
+ addr->size = 0;
+ addr->dma = 0ULL;
+ addr->virt = NULL;
+}
+
+static void npcm_video_free_diff_table(struct npcm_video *video)
+{
+ struct list_head *head, *pos, *nx;
+ struct rect_list *tmp;
+ unsigned int i;
+
+ for (i = 0; i < video->queue.num_buffers; i++) {
+ head = &video->list[i];
+ list_for_each_safe(pos, nx, head) {
+ tmp = list_entry(pos, struct rect_list, list);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ }
+}
+
+static unsigned int npcm_video_add_rect(struct npcm_video *video,
+ unsigned int index,
+ unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct list_head *head = &video->list[index];
+ struct rect_list *list = NULL;
+ struct v4l2_rect *r;
+
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return 0;
+
+ r = &list->clip.c;
+ r->left = x;
+ r->top = y;
+ r->width = w;
+ r->height = h;
+
+ list_add_tail(&list->list, head);
+ return 1;
+}
+
+static void npcm_video_merge_rect(struct npcm_video *video,
+ struct rect_list_info *info)
+{
+ struct list_head *head = info->head;
+ struct rect_list *list = info->list, *first = info->first;
+ struct v4l2_rect *r = &list->clip.c, *f = &first->clip.c;
+
+ if (!first) {
+ first = list;
+ info->first = first;
+ list_add_tail(&list->list, head);
+ video->rect_cnt++;
+ } else {
+ if ((r->left == (f->left + f->width)) && r->top == f->top) {
+ f->width += r->width;
+ kfree(list);
+ } else if ((r->top == (f->top + f->height)) &&
+ (r->left == f->left)) {
+ f->height += r->height;
+ kfree(list);
+ } else if (((r->top > f->top) &&
+ (r->top < (f->top + f->height))) &&
+ ((r->left > f->left) &&
+ (r->left < (f->left + f->width)))) {
+ kfree(list);
+ } else {
+ list_add_tail(&list->list, head);
+ video->rect_cnt++;
+ info->first = list;
+ }
+ }
+}
+
+static struct rect_list *npcm_video_new_rect(struct npcm_video *video,
+ unsigned int offset,
+ unsigned int index)
+{
+ struct v4l2_bt_timings *act = &video->active_timings;
+ struct rect_list *list = NULL;
+ struct v4l2_rect *r;
+
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return NULL;
+
+ r = &list->clip.c;
+
+ r->left = (offset << 4);
+ r->top = (index >> 2);
+ r->width = RECT_W;
+ r->height = RECT_H;
+ if ((r->left + RECT_W) > act->width)
+ r->width = act->width - r->left;
+ if ((r->top + RECT_H) > act->height)
+ r->height = act->height - r->top;
+
+ return list;
+}
+
+static int npcm_video_find_rect(struct npcm_video *video,
+ struct rect_list_info *info,
+ unsigned int offset)
+{
+ if (offset < info->tile_perline) {
+ info->list = npcm_video_new_rect(video, offset, info->index);
+ if (!info->list) {
+ dev_err(video->dev, "Failed to allocate rect_list\n");
+ return -ENOMEM;
+ }
+
+ npcm_video_merge_rect(video, info);
+ }
+ return 0;
+}
+
+static int npcm_video_build_table(struct npcm_video *video,
+ struct rect_list_info *info)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int j, bit, value;
+ int ret;
+
+ for (j = 0; j < info->offset_perline; j += 4) {
+ regmap_read(vcd, VCD_DIFF_TBL + (j + info->index), &value);
+
+ bitmap_from_arr32(bitmap, &value, BITMAP_SIZE);
+
+ for_each_set_bit(bit, bitmap, BITMAP_SIZE) {
+ ret = npcm_video_find_rect(video, info, bit + (j << 3));
+ if (ret)
+ return ret;
+ }
+ }
+ info->index += 64;
+ return info->tile_perline;
+}
+
+static void npcm_video_get_rect_list(struct npcm_video *video, unsigned int index)
+{
+ struct v4l2_bt_timings *act = &video->active_timings;
+ struct rect_list_info info;
+ unsigned int tile_cnt = 0, mod;
+ int ret = 0;
+
+ memset(&info, 0, sizeof(struct rect_list_info));
+ info.head = &video->list[index];
+
+ info.tile_perline = act->width >> 4;
+ mod = act->width % RECT_W;
+ if (mod != 0)
+ info.tile_perline += 1;
+
+ info.tile_perrow = act->height >> 4;
+ mod = act->height % RECT_H;
+ if (mod != 0)
+ info.tile_perrow += 1;
+
+ info.tile_size = info.tile_perrow * info.tile_perline;
+
+ info.offset_perline = info.tile_perline >> 5;
+ mod = info.tile_perline % 32;
+ if (mod != 0)
+ info.offset_perline += 1;
+
+ info.offset_perline *= 4;
+
+ do {
+ ret = npcm_video_build_table(video, &info);
+ if (ret < 0)
+ return;
+
+ tile_cnt += ret;
+ } while (tile_cnt < info.tile_size);
+}
+
+static unsigned int npcm_video_is_mga(struct npcm_video *video)
+{
+ struct regmap *gfxi = video->gfx_regmap;
+ unsigned int dispst;
+
+ regmap_read(gfxi, DISPST, &dispst);
+ return ((dispst & DISPST_MGAMODE) == DISPST_MGAMODE);
+}
+
+static unsigned int npcm_video_hres(struct npcm_video *video)
+{
+ struct regmap *gfxi = video->gfx_regmap;
+ unsigned int hvcnth, hvcntl, apb_hor_res;
+
+ regmap_read(gfxi, HVCNTH, &hvcnth);
+ regmap_read(gfxi, HVCNTL, &hvcntl);
+ apb_hor_res = (((hvcnth & HVCNTH_MASK) << 8) + (hvcntl & HVCNTL_MASK) + 1);
+
+ return apb_hor_res;
+}
+
+static unsigned int npcm_video_vres(struct npcm_video *video)
+{
+ struct regmap *gfxi = video->gfx_regmap;
+ unsigned int vvcnth, vvcntl, apb_ver_res;
+
+ regmap_read(gfxi, VVCNTH, &vvcnth);
+ regmap_read(gfxi, VVCNTL, &vvcntl);
+
+ apb_ver_res = (((vvcnth & VVCNTH_MASK) << 8) + (vvcntl & VVCNTL_MASK));
+
+ return apb_ver_res;
+}
+
+static int npcm_video_capres(struct npcm_video *video, unsigned int hor_res,
+ unsigned int vert_res)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int res, cap_res;
+
+ if (hor_res > MAX_WIDTH || vert_res > MAX_HEIGHT)
+ return -EINVAL;
+
+ res = FIELD_PREP(VCD_CAP_RES_VERT_RES, vert_res) |
+ FIELD_PREP(VCD_CAP_RES_HOR_RES, hor_res);
+
+ regmap_write(vcd, VCD_CAP_RES, res);
+ regmap_read(vcd, VCD_CAP_RES, &cap_res);
+
+ if (cap_res != res)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void npcm_video_vcd_ip_reset(struct npcm_video *video)
+{
+ /*
+ * After resetting a module and clearing the reset bit, it should wait
+ * at least 10 us before accessing the module.
+ */
+ reset_control_assert(video->reset);
+ usleep_range(10, 20);
+ reset_control_deassert(video->reset);
+ usleep_range(10, 20);
+}
+
+static void npcm_video_vcd_state_machine_reset(struct npcm_video *video)
+{
+ struct regmap *vcd = video->vcd_regmap;
+
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_VCDE, 0);
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_IDBC, 0);
+ regmap_update_bits(vcd, VCD_CMD, VCD_CMD_RST, VCD_CMD_RST);
+
+ /*
+ * VCD_CMD_RST will reset VCD internal state machines and clear FIFOs,
+ * it should wait at least 800 us for the reset operations completed.
+ */
+ usleep_range(800, 1000);
+
+ regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_VCDE, VCD_MODE_VCDE);
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_IDBC, VCD_MODE_IDBC);
+}
+
+static void npcm_video_gfx_reset(struct npcm_video *video)
+{
+ struct regmap *gcr = video->gcr_regmap;
+
+ regmap_update_bits(gcr, INTCR2, INTCR2_GIRST2, INTCR2_GIRST2);
+ npcm_video_vcd_state_machine_reset(video);
+ regmap_update_bits(gcr, INTCR2, INTCR2_GIRST2, 0);
+}
+
+static void npcm_video_kvm_bw(struct npcm_video *video, bool set_bw)
+{
+ struct regmap *vcd = video->vcd_regmap;
+
+ if (set_bw || !npcm_video_is_mga(video))
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_KVM_BW_SET,
+ VCD_MODE_KVM_BW_SET);
+ else
+ regmap_update_bits(vcd, VCD_MODE, VCD_MODE_KVM_BW_SET, 0);
+}
+
+static unsigned int npcm_video_pclk(struct npcm_video *video)
+{
+ struct regmap *gfxi = video->gfx_regmap;
+ unsigned int tmp, pllfbdiv, pllinotdiv, gpllfbdiv;
+ unsigned int gpllfbdv109, gpllfbdv8, gpllindiv;
+ unsigned int gpllst_pllotdiv1, gpllst_pllotdiv2;
+
+ regmap_read(gfxi, GPLLST, &tmp);
+ gpllfbdv109 = FIELD_GET(GPLLST_GPLLFBDV109, tmp);
+ gpllst_pllotdiv1 = FIELD_GET(GPLLST_PLLOTDIV1, tmp);
+ gpllst_pllotdiv2 = FIELD_GET(GPLLST_PLLOTDIV2, tmp);
+
+ regmap_read(gfxi, GPLLINDIV, &tmp);
+ gpllfbdv8 = FIELD_GET(GPLLINDIV_GPLLFBDV8, tmp);
+ gpllindiv = FIELD_GET(GPLLINDIV_MASK, tmp);
+
+ regmap_read(gfxi, GPLLFBDIV, &tmp);
+ gpllfbdiv = FIELD_GET(GPLLFBDIV_MASK, tmp);
+
+ pllfbdiv = (512 * gpllfbdv109 + 256 * gpllfbdv8 + gpllfbdiv);
+ pllinotdiv = (gpllindiv * gpllst_pllotdiv1 * gpllst_pllotdiv2);
+ if (pllfbdiv == 0 || pllinotdiv == 0)
+ return 0;
+
+ return ((pllfbdiv * 25000) / pllinotdiv) * 1000;
+}
+
+static unsigned int npcm_video_get_bpp(struct npcm_video *video)
+{
+ const struct npcm_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &npcm_fmt_list[k];
+ if (fmt->fourcc == video->pix_fmt.pixelformat)
+ break;
+ }
+
+ return fmt->bpp;
+}
+
+/*
+ * Pitch must be a power of 2, >= linebytes,
+ * at least 512, and no more than 4096.
+ */
+static void npcm_video_set_linepitch(struct npcm_video *video,
+ unsigned int linebytes)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int pitch = MIN_LP;
+
+ while ((pitch < linebytes) && (pitch < MAX_LP))
+ pitch *= 2;
+
+ regmap_write(vcd, VCD_FB_LP, FIELD_PREP(VCD_FBA_LP, pitch) |
+ FIELD_PREP(VCD_FBB_LP, pitch));
+}
+
+static unsigned int npcm_video_get_linepitch(struct npcm_video *video)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int linepitch;
+
+ regmap_read(vcd, VCD_FB_LP, &linepitch);
+ return FIELD_GET(VCD_FBA_LP, linepitch);
+}
+
+static void npcm_video_command(struct npcm_video *video, unsigned int value)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int cmd;
+
+ regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+ regmap_read(vcd, VCD_CMD, &cmd);
+ cmd |= FIELD_PREP(VCD_CMD_OPERATION, value);
+
+ regmap_write(vcd, VCD_CMD, cmd);
+ regmap_update_bits(vcd, VCD_CMD, VCD_CMD_GO, VCD_CMD_GO);
+ video->op_cmd = value;
+}
+
+static void npcm_video_init_reg(struct npcm_video *video)
+{
+ struct regmap *gcr = video->gcr_regmap, *vcd = video->vcd_regmap;
+
+ /* Selects Data Enable */
+ regmap_update_bits(gcr, INTCR, INTCR_DEHS, 0);
+
+ /* Enable display of KVM GFX and access to memory */
+ regmap_update_bits(gcr, INTCR, INTCR_GFXIFDIS, 0);
+
+ /* Active Vertical/Horizontal Counters Reset */
+ regmap_update_bits(gcr, INTCR2, INTCR2_GIHCRST | INTCR2_GIVCRST,
+ INTCR2_GIHCRST | INTCR2_GIVCRST);
+
+ /* Reset video modules */
+ npcm_video_vcd_ip_reset(video);
+ npcm_video_gfx_reset(video);
+
+ /* Set the FIFO thresholds */
+ regmap_write(vcd, VCD_FIFO, VCD_FIFO_TH);
+
+ /* Set RCHG timer */
+ regmap_write(vcd, VCD_RCHG, FIELD_PREP(VCD_RCHG_TIM_PRSCL, 0xf) |
+ FIELD_PREP(VCD_RCHG_IG_CHG0, 0x3));
+
+ /* Set video mode */
+ regmap_write(vcd, VCD_MODE, VCD_MODE_VCDE | VCD_MODE_CM565 |
+ VCD_MODE_IDBC | VCD_MODE_KVM_BW_SET);
+}
+
+static int npcm_video_start_frame(struct npcm_video *video)
+{
+ struct npcm_video_buffer *buf;
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned long flags;
+ unsigned int val;
+ int ret;
+
+ if (video->v4l2_input_status) {
+ dev_dbg(video->dev, "No video signal; skip capture frame\n");
+ return 0;
+ }
+
+ ret = regmap_read_poll_timeout(vcd, VCD_STAT, val, !(val & VCD_STAT_BUSY),
+ 1000, VCD_TIMEOUT_US);
+ if (ret) {
+ dev_err(video->dev, "Wait for VCD_STAT_BUSY timeout\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&video->lock, flags);
+ buf = list_first_entry_or_null(&video->buffers,
+ struct npcm_video_buffer, link);
+ if (!buf) {
+ spin_unlock_irqrestore(&video->lock, flags);
+ dev_dbg(video->dev, "No empty buffers; skip capture frame\n");
+ return 0;
+ }
+
+ set_bit(VIDEO_CAPTURING, &video->flags);
+ spin_unlock_irqrestore(&video->lock, flags);
+
+ npcm_video_vcd_state_machine_reset(video);
+
+ regmap_read(vcd, VCD_HOR_AC_TIM, &val);
+ regmap_update_bits(vcd, VCD_HOR_AC_LST, VCD_HOR_AC_LAST,
+ FIELD_GET(VCD_HOR_AC_TIME, val));
+
+ regmap_read(vcd, VCD_VER_HI_TIM, &val);
+ regmap_update_bits(vcd, VCD_VER_HI_LST, VCD_VER_HI_LAST,
+ FIELD_GET(VCD_VER_HI_TIME, val));
+
+ regmap_update_bits(vcd, VCD_INTE, VCD_INTE_DONE_IE | VCD_INTE_IFOT_IE |
+ VCD_INTE_IFOR_IE | VCD_INTE_HAC_IE | VCD_INTE_VHT_IE,
+ VCD_INTE_DONE_IE | VCD_INTE_IFOT_IE | VCD_INTE_IFOR_IE |
+ VCD_INTE_HAC_IE | VCD_INTE_VHT_IE);
+
+ npcm_video_command(video, video->ctrl_cmd);
+
+ return 0;
+}
+
+static void npcm_video_bufs_done(struct npcm_video *video,
+ enum vb2_buffer_state state)
+{
+ struct npcm_video_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->lock, flags);
+ list_for_each_entry(buf, &video->buffers, link)
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+
+ INIT_LIST_HEAD(&video->buffers);
+ spin_unlock_irqrestore(&video->lock, flags);
+}
+
+static void npcm_video_get_diff_rect(struct npcm_video *video, unsigned int index)
+{
+ unsigned int width = video->active_timings.width;
+ unsigned int height = video->active_timings.height;
+
+ if (video->op_cmd != VCD_CMD_OPERATION_CAPTURE) {
+ video->rect_cnt = 0;
+ npcm_video_get_rect_list(video, index);
+ video->rect[index] = video->rect_cnt;
+ } else {
+ video->rect[index] = npcm_video_add_rect(video, index, 0, 0,
+ width, height);
+ }
+}
+
+static void npcm_video_detect_resolution(struct npcm_video *video)
+{
+ struct v4l2_bt_timings *act = &video->active_timings;
+ struct v4l2_bt_timings *det = &video->detected_timings;
+ struct regmap *gfxi = video->gfx_regmap;
+ unsigned int dispst;
+
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ det->width = npcm_video_hres(video);
+ det->height = npcm_video_vres(video);
+
+ if (act->width != det->width || act->height != det->height) {
+ dev_dbg(video->dev, "Resolution changed\n");
+
+ if (npcm_video_hres(video) > 0 && npcm_video_vres(video) > 0) {
+ if (test_bit(VIDEO_STREAMING, &video->flags)) {
+ /*
+ * Wait for resolution is available,
+ * and it is also captured by host.
+ */
+ do {
+ mdelay(100);
+ regmap_read(gfxi, DISPST, &dispst);
+ } while (npcm_video_vres(video) < 100 ||
+ npcm_video_pclk(video) == 0 ||
+ (dispst & DISPST_HSCROFF));
+ }
+
+ det->width = npcm_video_hres(video);
+ det->height = npcm_video_vres(video);
+ det->pixelclock = npcm_video_pclk(video);
+ }
+
+ clear_bit(VIDEO_RES_CHANGING, &video->flags);
+ }
+
+ if (det->width && det->height)
+ video->v4l2_input_status = 0;
+
+ dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
+ act->width, act->height, det->width, det->height,
+ video->v4l2_input_status);
+}
+
+static int npcm_video_set_resolution(struct npcm_video *video,
+ struct v4l2_bt_timings *timing)
+{
+ struct regmap *vcd = video->vcd_regmap;
+ unsigned int mode;
+
+ if (npcm_video_capres(video, timing->width, timing->height)) {
+ dev_err(video->dev, "Failed to set VCD_CAP_RES\n");
+ return -EINVAL;
+ }
+
+ video->active_timings = *timing;
+ video->bytesperpixel = npcm_video_get_bpp(video);
+ npcm_video_set_linepitch(video, timing->width * video->bytesperpixel);
+ video->bytesperline = npcm_video_get_linepitch(video);
+ video->pix_fmt.width = timing->width ? timing->width : MIN_WIDTH;
+ video->pix_fmt.height = timing->height ? timing->height : MIN_HEIGHT;
+ video->pix_fmt.sizeimage = video->pix_fmt.width * video->pix_fmt.height *
+ video->bytesperpixel;
+ video->pix_fmt.bytesperline = video->bytesperline;
+
+ npcm_video_kvm_bw(video, timing->pixelclock > VCD_KVM_BW_PCLK);
+ npcm_video_gfx_reset(video);
+ regmap_read(vcd, VCD_MODE, &mode);
+
+ dev_dbg(video->dev, "VCD mode = 0x%x, %s mode\n", mode,
+ npcm_video_is_mga(video) ? "Hi Res" : "VGA");
+
+ dev_dbg(video->dev,
+ "Digital mode: %d x %d x %d, pixelclock %lld, bytesperline %d\n",
+ timing->width, timing->height, video->bytesperpixel,
+ timing->pixelclock, video->bytesperline);
+
+ return 0;
+}
+
+static void npcm_video_start(struct npcm_video *video)
+{
+ npcm_video_init_reg(video);
+
+ if (!npcm_video_alloc_fb(video, &video->src)) {
+ dev_err(video->dev, "Failed to allocate VCD frame buffer\n");
+ return;
+ }
+
+ npcm_video_detect_resolution(video);
+ if (npcm_video_set_resolution(video, &video->detected_timings)) {
+ dev_err(video->dev, "Failed to set resolution\n");
+ return;
+ }
+
+ /* Set frame buffer physical address */
+ regmap_write(video->vcd_regmap, VCD_FBA_ADR, video->src.dma);
+ regmap_write(video->vcd_regmap, VCD_FBB_ADR, video->src.dma);
+
+ if (video->ece.enable && atomic_inc_return(&video->ece.clients) == 1) {
+ npcm_video_ece_ip_reset(video);
+ npcm_video_ece_ctrl_reset(video);
+ npcm_video_ece_set_fb_addr(video, video->src.dma);
+ npcm_video_ece_set_lp(video, video->bytesperline);
+
+ dev_dbg(video->dev, "ECE open: client %d\n",
+ atomic_read(&video->ece.clients));
+ }
+}
+
+static void npcm_video_stop(struct npcm_video *video)
+{
+ struct regmap *vcd = video->vcd_regmap;
+
+ set_bit(VIDEO_STOPPED, &video->flags);
+
+ regmap_write(vcd, VCD_INTE, 0);
+ regmap_write(vcd, VCD_MODE, 0);
+ regmap_write(vcd, VCD_RCHG, 0);
+ regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+
+ if (video->src.size)
+ npcm_video_free_fb(video, &video->src);
+
+ npcm_video_free_diff_table(video);
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ video->flags = 0;
+ video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+
+ if (video->ece.enable && atomic_dec_return(&video->ece.clients) == 0) {
+ npcm_video_ece_stop(video);
+ dev_dbg(video->dev, "ECE close: client %d\n",
+ atomic_read(&video->ece.clients));
+ }
+}
+
+static unsigned int npcm_video_raw(struct npcm_video *video, int index, void *addr)
+{
+ unsigned int width = video->active_timings.width;
+ unsigned int height = video->active_timings.height;
+ unsigned int i, len, offset, bytes = 0;
+
+ video->rect[index] = npcm_video_add_rect(video, index, 0, 0, width, height);
+
+ for (i = 0; i < height; i++) {
+ len = width * video->bytesperpixel;
+ offset = i * video->bytesperline;
+
+ memcpy(addr + bytes, video->src.virt + offset, len);
+ bytes += len;
+ }
+
+ return bytes;
+}
+
+static unsigned int npcm_video_hextile(struct npcm_video *video, unsigned int index,
+ unsigned int dma_addr, void *vaddr)
+{
+ struct rect_list *rect_list;
+ struct v4l2_rect *rect;
+ unsigned int offset, len, bytes = 0;
+
+ npcm_video_ece_ctrl_reset(video);
+ npcm_video_ece_clear_rect_offset(video);
+ npcm_video_ece_set_fb_addr(video, video->src.dma);
+
+ /* Set base address of encoded data to video buffer */
+ npcm_video_ece_set_enc_dba(video, dma_addr);
+
+ npcm_video_ece_set_lp(video, video->bytesperline);
+ npcm_video_get_diff_rect(video, index);
+
+ list_for_each_entry(rect_list, &video->list[index], list) {
+ rect = &rect_list->clip.c;
+ offset = npcm_video_ece_read_rect_offset(video);
+ npcm_video_ece_enc_rect(video, rect->left, rect->top,
+ rect->width, rect->height);
+
+ len = npcm_video_ece_get_ed_size(video, offset, vaddr);
+ npcm_video_ece_prepend_rect_header(vaddr + offset,
+ rect->left, rect->top,
+ rect->width, rect->height);
+ bytes += len;
+ }
+
+ return bytes;
+}
+
+static irqreturn_t npcm_video_irq(int irq, void *arg)
+{
+ struct npcm_video *video = arg;
+ struct regmap *vcd = video->vcd_regmap;
+ struct npcm_video_buffer *buf;
+ unsigned int index, size, status, fmt;
+ dma_addr_t dma_addr;
+ void *addr;
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ regmap_read(vcd, VCD_STAT, &status);
+ dev_dbg(video->dev, "VCD irq status 0x%x\n", status);
+
+ regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+
+ if (test_bit(VIDEO_STOPPED, &video->flags) ||
+ !test_bit(VIDEO_STREAMING, &video->flags))
+ return IRQ_NONE;
+
+ if (status & VCD_STAT_DONE) {
+ regmap_write(vcd, VCD_INTE, 0);
+ spin_lock(&video->lock);
+ clear_bit(VIDEO_CAPTURING, &video->flags);
+ buf = list_first_entry_or_null(&video->buffers,
+ struct npcm_video_buffer, link);
+ if (!buf) {
+ spin_unlock(&video->lock);
+ return IRQ_NONE;
+ }
+
+ addr = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ index = buf->vb.vb2_buf.index;
+ fmt = video->pix_fmt.pixelformat;
+
+ switch (fmt) {
+ case V4L2_PIX_FMT_RGB565:
+ size = npcm_video_raw(video, index, addr);
+ break;
+ case V4L2_PIX_FMT_HEXTILE:
+ dma_addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ size = npcm_video_hextile(video, index, dma_addr, addr);
+ break;
+ default:
+ spin_unlock(&video->lock);
+ return IRQ_NONE;
+ }
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = video->sequence++;
+ buf->vb.field = V4L2_FIELD_NONE;
+
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ list_del(&buf->link);
+ spin_unlock(&video->lock);
+
+ if (npcm_video_start_frame(video))
+ dev_err(video->dev, "Failed to capture next frame\n");
+ }
+
+ /* Resolution changed */
+ if (status & VCD_STAT_VHT_CHG || status & VCD_STAT_HAC_CHG) {
+ if (!test_bit(VIDEO_RES_CHANGING, &video->flags)) {
+ set_bit(VIDEO_RES_CHANGING, &video->flags);
+
+ vb2_queue_error(&video->queue);
+ v4l2_event_queue(&video->vdev, &ev);
+ }
+ }
+
+ if (status & VCD_STAT_IFOR || status & VCD_STAT_IFOT) {
+ dev_warn(video->dev, "VCD FIFO overrun or over thresholds\n");
+ if (npcm_video_start_frame(video))
+ dev_err(video->dev, "Failed to recover from FIFO overrun\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEVICE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "NPCM Video Engine", sizeof(cap->card));
+
+ return 0;
+}
+
+static int npcm_video_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct npcm_video *video = video_drvdata(file);
+ const struct npcm_fmt *fmt;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &npcm_fmt_list[f->index];
+ if (fmt->fourcc == V4L2_PIX_FMT_HEXTILE && !video->ece.enable)
+ return -EINVAL;
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int npcm_video_try_format(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct npcm_video *video = video_drvdata(file);
+ const struct npcm_fmt *fmt;
+
+ fmt = npcm_video_find_format(f);
+
+ /* If format not found or HEXTILE not supported, use RGB565 as default */
+ if (!fmt || (fmt->fourcc == V4L2_PIX_FMT_HEXTILE && !video->ece.enable))
+ f->fmt.pix.pixelformat = npcm_fmt_list[0].fourcc;
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ f->fmt.pix.width = video->pix_fmt.width;
+ f->fmt.pix.height = video->pix_fmt.height;
+ f->fmt.pix.bytesperline = video->bytesperline;
+ f->fmt.pix.sizeimage = video->pix_fmt.sizeimage;
+
+ return 0;
+}
+
+static int npcm_video_get_format(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct npcm_video *video = video_drvdata(file);
+
+ f->fmt.pix = video->pix_fmt;
+ return 0;
+}
+
+static int npcm_video_set_format(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct npcm_video *video = video_drvdata(file);
+ int ret;
+
+ ret = npcm_video_try_format(file, fh, f);
+ if (ret)
+ return ret;
+
+ if (vb2_is_busy(&video->queue)) {
+ dev_err(video->dev, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ video->pix_fmt.pixelformat = f->fmt.pix.pixelformat;
+ return 0;
+}
+
+static int npcm_video_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ struct npcm_video *video = video_drvdata(file);
+
+ if (inp->index)
+ return -EINVAL;
+
+ strscpy(inp->name, "Host VGA capture", sizeof(inp->name));
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ inp->status = video->v4l2_input_status;
+
+ return 0;
+}
+
+static int npcm_video_get_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int npcm_video_set_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int npcm_video_set_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct npcm_video *video = video_drvdata(file);
+ int rc;
+
+ if (timings->bt.width == video->active_timings.width &&
+ timings->bt.height == video->active_timings.height)
+ return 0;
+
+ if (vb2_is_busy(&video->queue)) {
+ dev_err(video->dev, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ rc = npcm_video_set_resolution(video, &timings->bt);
+ if (rc)
+ return rc;
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ return 0;
+}
+
+static int npcm_video_get_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct npcm_video *video = video_drvdata(file);
+
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt = video->active_timings;
+
+ return 0;
+}
+
+static int npcm_video_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct npcm_video *video = video_drvdata(file);
+
+ npcm_video_detect_resolution(video);
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt = video->detected_timings;
+
+ return video->v4l2_input_status ? -ENOLINK : 0;
+}
+
+static int npcm_video_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &npcm_video_timings_cap,
+ NULL, NULL);
+}
+
+static int npcm_video_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = npcm_video_timings_cap;
+
+ return 0;
+}
+
+static int npcm_video_sub_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops npcm_video_ioctls = {
+ .vidioc_querycap = npcm_video_querycap,
+
+ .vidioc_enum_fmt_vid_cap = npcm_video_enum_format,
+ .vidioc_g_fmt_vid_cap = npcm_video_get_format,
+ .vidioc_s_fmt_vid_cap = npcm_video_set_format,
+ .vidioc_try_fmt_vid_cap = npcm_video_try_format,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_enum_input = npcm_video_enum_input,
+ .vidioc_g_input = npcm_video_get_input,
+ .vidioc_s_input = npcm_video_set_input,
+
+ .vidioc_s_dv_timings = npcm_video_set_dv_timings,
+ .vidioc_g_dv_timings = npcm_video_get_dv_timings,
+ .vidioc_query_dv_timings = npcm_video_query_dv_timings,
+ .vidioc_enum_dv_timings = npcm_video_enum_dv_timings,
+ .vidioc_dv_timings_cap = npcm_video_dv_timings_cap,
+
+ .vidioc_subscribe_event = npcm_video_sub_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int npcm_video_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct npcm_video *video = container_of(ctrl->handler, struct npcm_video,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_NPCM_CAPTURE_MODE:
+ if (ctrl->val == V4L2_NPCM_CAPTURE_MODE_COMPLETE)
+ video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+ else if (ctrl->val == V4L2_NPCM_CAPTURE_MODE_DIFF)
+ video->ctrl_cmd = VCD_CMD_OPERATION_COMPARE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops npcm_video_ctrl_ops = {
+ .s_ctrl = npcm_video_set_ctrl,
+};
+
+static const char * const npcm_ctrl_capture_mode_menu[] = {
+ "COMPLETE",
+ "DIFF",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config npcm_ctrl_capture_mode = {
+ .ops = &npcm_video_ctrl_ops,
+ .id = V4L2_CID_NPCM_CAPTURE_MODE,
+ .name = "NPCM Video Capture Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = V4L2_NPCM_CAPTURE_MODE_DIFF,
+ .def = 0,
+ .qmenu = npcm_ctrl_capture_mode_menu,
+};
+
+/*
+ * This control value is set when a buffer is dequeued by userspace, i.e. in
+ * npcm_video_buf_finish function.
+ */
+static const struct v4l2_ctrl_config npcm_ctrl_rect_count = {
+ .id = V4L2_CID_NPCM_RECT_COUNT,
+ .name = "NPCM Hextile Rectangle Count",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (MAX_WIDTH / RECT_W) * (MAX_HEIGHT / RECT_H),
+ .step = 1,
+ .def = 0,
+};
+
+static int npcm_video_open(struct file *file)
+{
+ struct npcm_video *video = video_drvdata(file);
+ int rc;
+
+ mutex_lock(&video->video_lock);
+ rc = v4l2_fh_open(file);
+ if (rc) {
+ mutex_unlock(&video->video_lock);
+ return rc;
+ }
+
+ if (v4l2_fh_is_singular_file(file))
+ npcm_video_start(video);
+
+ mutex_unlock(&video->video_lock);
+ return 0;
+}
+
+static int npcm_video_release(struct file *file)
+{
+ struct npcm_video *video = video_drvdata(file);
+ int rc;
+
+ mutex_lock(&video->video_lock);
+ if (v4l2_fh_is_singular_file(file))
+ npcm_video_stop(video);
+
+ rc = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&video->video_lock);
+ return rc;
+}
+
+static const struct v4l2_file_operations npcm_video_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .open = npcm_video_open,
+ .release = npcm_video_release,
+};
+
+static int npcm_video_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct npcm_video *video = vb2_get_drv_priv(q);
+ unsigned int i;
+
+ if (*num_planes) {
+ if (sizes[0] < video->pix_fmt.sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = video->pix_fmt.sizeimage;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ INIT_LIST_HEAD(&video->list[i]);
+
+ return 0;
+}
+
+static int npcm_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb2_plane_size(vb, 0) < video->pix_fmt.sizeimage)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int npcm_video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct npcm_video *video = vb2_get_drv_priv(q);
+ int rc;
+
+ video->sequence = 0;
+ rc = npcm_video_start_frame(video);
+ if (rc) {
+ npcm_video_bufs_done(video, VB2_BUF_STATE_QUEUED);
+ return rc;
+ }
+
+ set_bit(VIDEO_STREAMING, &video->flags);
+ return 0;
+}
+
+static void npcm_video_stop_streaming(struct vb2_queue *q)
+{
+ struct npcm_video *video = vb2_get_drv_priv(q);
+ struct regmap *vcd = video->vcd_regmap;
+
+ clear_bit(VIDEO_STREAMING, &video->flags);
+ regmap_write(vcd, VCD_INTE, 0);
+ regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+ npcm_video_gfx_reset(video);
+ npcm_video_bufs_done(video, VB2_BUF_STATE_ERROR);
+ video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+ v4l2_ctrl_s_ctrl(video->rect_cnt_ctrl, 0);
+}
+
+static void npcm_video_buf_queue(struct vb2_buffer *vb)
+{
+ struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct npcm_video_buffer *nvb = to_npcm_video_buffer(vbuf);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->lock, flags);
+ empty = list_empty(&video->buffers);
+ list_add_tail(&nvb->link, &video->buffers);
+ spin_unlock_irqrestore(&video->lock, flags);
+
+ if (test_bit(VIDEO_STREAMING, &video->flags) &&
+ !test_bit(VIDEO_CAPTURING, &video->flags) && empty) {
+ if (npcm_video_start_frame(video))
+ dev_err(video->dev, "Failed to capture next frame\n");
+ }
+}
+
+static void npcm_video_buf_finish(struct vb2_buffer *vb)
+{
+ struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct list_head *head, *pos, *nx;
+ struct rect_list *tmp;
+
+ /*
+ * This callback is called when the buffer is dequeued, so update
+ * V4L2_CID_NPCM_RECT_COUNT control value with the number of rectangles
+ * in this buffer and free associated rect_list.
+ */
+ if (test_bit(VIDEO_STREAMING, &video->flags)) {
+ v4l2_ctrl_s_ctrl(video->rect_cnt_ctrl, video->rect[vb->index]);
+
+ head = &video->list[vb->index];
+ list_for_each_safe(pos, nx, head) {
+ tmp = list_entry(pos, struct rect_list, list);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ }
+}
+
+static const struct regmap_config npcm_video_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = VCD_FIFO,
+};
+
+static const struct regmap_config npcm_video_ece_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ECE_HEX_RECT_OFFSET,
+};
+
+static const struct vb2_ops npcm_video_vb2_ops = {
+ .queue_setup = npcm_video_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = npcm_video_buf_prepare,
+ .buf_finish = npcm_video_buf_finish,
+ .start_streaming = npcm_video_start_streaming,
+ .stop_streaming = npcm_video_stop_streaming,
+ .buf_queue = npcm_video_buf_queue,
+};
+
+static int npcm_video_setup_video(struct npcm_video *video)
+{
+ struct v4l2_device *v4l2_dev = &video->v4l2_dev;
+ struct video_device *vdev = &video->vdev;
+ struct vb2_queue *vbq = &video->queue;
+ int rc;
+
+ if (video->ece.enable)
+ video->pix_fmt.pixelformat = V4L2_PIX_FMT_HEXTILE;
+ else
+ video->pix_fmt.pixelformat = V4L2_PIX_FMT_RGB565;
+
+ video->pix_fmt.field = V4L2_FIELD_NONE;
+ video->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ video->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+
+ rc = v4l2_device_register(video->dev, v4l2_dev);
+ if (rc) {
+ dev_err(video->dev, "Failed to register v4l2 device\n");
+ return rc;
+ }
+
+ v4l2_ctrl_handler_init(&video->ctrl_handler, 2);
+ v4l2_ctrl_new_custom(&video->ctrl_handler, &npcm_ctrl_capture_mode, NULL);
+ video->rect_cnt_ctrl = v4l2_ctrl_new_custom(&video->ctrl_handler,
+ &npcm_ctrl_rect_count, NULL);
+ if (video->ctrl_handler.error) {
+ dev_err(video->dev, "Failed to init controls: %d\n",
+ video->ctrl_handler.error);
+
+ rc = video->ctrl_handler.error;
+ goto rel_ctrl_handler;
+ }
+ v4l2_dev->ctrl_handler = &video->ctrl_handler;
+
+ vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vbq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vbq->dev = v4l2_dev->dev;
+ vbq->lock = &video->video_lock;
+ vbq->ops = &npcm_video_vb2_ops;
+ vbq->mem_ops = &vb2_dma_contig_memops;
+ vbq->drv_priv = video;
+ vbq->buf_struct_size = sizeof(struct npcm_video_buffer);
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vbq->min_buffers_needed = 3;
+
+ rc = vb2_queue_init(vbq);
+ if (rc) {
+ dev_err(video->dev, "Failed to init vb2 queue\n");
+ goto rel_ctrl_handler;
+ }
+ vdev->queue = vbq;
+ vdev->fops = &npcm_video_v4l2_fops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->v4l2_dev = v4l2_dev;
+ strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name));
+ vdev->vfl_type = VFL_TYPE_VIDEO;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->release = video_device_release_empty;
+ vdev->ioctl_ops = &npcm_video_ioctls;
+ vdev->lock = &video->video_lock;
+
+ video_set_drvdata(vdev, video);
+ rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0);
+ if (rc) {
+ dev_err(video->dev, "Failed to register video device\n");
+ goto rel_vb_queue;
+ }
+
+ return 0;
+
+rel_vb_queue:
+ vb2_queue_release(vbq);
+rel_ctrl_handler:
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+ return rc;
+}
+
+static int npcm_video_ece_init(struct npcm_video *video)
+{
+ struct device *dev = video->dev;
+ struct device_node *ece_node;
+ struct platform_device *ece_pdev;
+ void __iomem *regs;
+
+ ece_node = of_parse_phandle(video->dev->of_node, "nuvoton,ece", 0);
+ if (!ece_node) {
+ dev_err(dev, "Failed to get ECE phandle in DTS\n");
+ return -ENODEV;
+ }
+
+ video->ece.enable = of_device_is_available(ece_node);
+
+ if (video->ece.enable) {
+ dev_info(dev, "Support HEXTILE pixel format\n");
+
+ ece_pdev = of_find_device_by_node(ece_node);
+ if (IS_ERR(ece_pdev)) {
+ dev_err(dev, "Failed to find ECE device\n");
+ return PTR_ERR(ece_pdev);
+ }
+ of_node_put(ece_node);
+
+ regs = devm_platform_ioremap_resource(ece_pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "Failed to parse ECE reg in DTS\n");
+ return PTR_ERR(regs);
+ }
+
+ video->ece.regmap = devm_regmap_init_mmio(dev, regs,
+ &npcm_video_ece_regmap_cfg);
+ if (IS_ERR(video->ece.regmap)) {
+ dev_err(dev, "Failed to initialize ECE regmap\n");
+ return PTR_ERR(video->ece.regmap);
+ }
+
+ video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL);
+ if (IS_ERR(video->ece.reset)) {
+ dev_err(dev, "Failed to get ECE reset control in DTS\n");
+ return PTR_ERR(video->ece.reset);
+ }
+ }
+
+ return 0;
+}
+
+static int npcm_video_init(struct npcm_video *video)
+{
+ struct device *dev = video->dev;
+ int irq, rc;
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_err(dev, "Failed to find VCD IRQ\n");
+ return -ENODEV;
+ }
+
+ rc = devm_request_threaded_irq(dev, irq, NULL, npcm_video_irq,
+ IRQF_ONESHOT, DEVICE_NAME, video);
+ if (rc < 0) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ of_reserved_mem_device_init(dev);
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "Failed to set DMA mask\n");
+ of_reserved_mem_device_release(dev);
+ }
+
+ rc = npcm_video_ece_init(video);
+ if (rc) {
+ dev_err(dev, "Failed to initialize ECE\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int npcm_video_probe(struct platform_device *pdev)
+{
+ struct npcm_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+ int rc;
+ void __iomem *regs;
+
+ if (!video)
+ return -ENOMEM;
+
+ video->dev = &pdev->dev;
+ spin_lock_init(&video->lock);
+ mutex_init(&video->video_lock);
+ INIT_LIST_HEAD(&video->buffers);
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(&pdev->dev, "Failed to parse VCD reg in DTS\n");
+ return PTR_ERR(regs);
+ }
+
+ video->vcd_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &npcm_video_regmap_cfg);
+ if (IS_ERR(video->vcd_regmap)) {
+ dev_err(&pdev->dev, "Failed to initialize VCD regmap\n");
+ return PTR_ERR(video->vcd_regmap);
+ }
+
+ video->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(video->reset)) {
+ dev_err(&pdev->dev, "Failed to get VCD reset control in DTS\n");
+ return PTR_ERR(video->reset);
+ }
+
+ video->gcr_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "nuvoton,sysgcr");
+ if (IS_ERR(video->gcr_regmap))
+ return PTR_ERR(video->gcr_regmap);
+
+ video->gfx_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "nuvoton,sysgfxi");
+ if (IS_ERR(video->gfx_regmap))
+ return PTR_ERR(video->gfx_regmap);
+
+ rc = npcm_video_init(video);
+ if (rc)
+ return rc;
+
+ rc = npcm_video_setup_video(video);
+ if (rc)
+ return rc;
+
+ dev_info(video->dev, "NPCM video driver probed\n");
+ return 0;
+}
+
+static int npcm_video_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
+ struct npcm_video *video = to_npcm_video(v4l2_dev);
+
+ video_unregister_device(&video->vdev);
+ vb2_queue_release(&video->queue);
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+ if (video->ece.enable)
+ npcm_video_ece_stop(video);
+ of_reserved_mem_device_release(dev);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_video_match[] = {
+ { .compatible = "nuvoton,npcm750-vcd" },
+ { .compatible = "nuvoton,npcm845-vcd" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, npcm_video_match);
+
+static struct platform_driver npcm_video_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = npcm_video_match,
+ },
+ .probe = npcm_video_probe,
+ .remove = npcm_video_remove,
+};
+
+module_platform_driver(npcm_video_driver);
+
+MODULE_AUTHOR("Joseph Liu <kwliu@nuvoton.com>");
+MODULE_AUTHOR("Marvin Lin <kflin@nuvoton.com>");
+MODULE_DESCRIPTION("Driver for Nuvoton NPCM Video Capture/Encode Engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
index a2b4fb9e29e7..d579c804b047 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
@@ -115,28 +115,17 @@ void print_cast_status(struct device *dev, void __iomem *reg,
void print_wrapper_info(struct device *dev, void __iomem *reg);
void mxc_jpeg_sw_reset(void __iomem *reg);
int mxc_jpeg_enable(void __iomem *reg);
-void wait_frmdone(struct device *dev, void __iomem *reg);
void mxc_jpeg_enc_mode_conf(struct device *dev, void __iomem *reg, u8 extseq);
void mxc_jpeg_enc_mode_go(struct device *dev, void __iomem *reg, u8 extseq);
void mxc_jpeg_enc_set_quality(struct device *dev, void __iomem *reg, u8 quality);
void mxc_jpeg_dec_mode_go(struct device *dev, void __iomem *reg);
-int mxc_jpeg_get_slot(void __iomem *reg);
-u32 mxc_jpeg_get_offset(void __iomem *reg, int slot);
void mxc_jpeg_enable_slot(void __iomem *reg, int slot);
void mxc_jpeg_set_l_endian(void __iomem *reg, int le);
void mxc_jpeg_enable_irq(void __iomem *reg, int slot);
void mxc_jpeg_disable_irq(void __iomem *reg, int slot);
-int mxc_jpeg_set_input(void __iomem *reg, u32 in_buf, u32 bufsize);
-int mxc_jpeg_set_output(void __iomem *reg, u16 out_pitch, u32 out_buf,
- u16 w, u16 h);
-void mxc_jpeg_set_config_mode(void __iomem *reg, int config_mode);
-int mxc_jpeg_set_params(struct mxc_jpeg_desc *desc, u32 bufsize, u16
- out_pitch, u32 format);
void mxc_jpeg_set_bufsize(struct mxc_jpeg_desc *desc, u32 bufsize);
void mxc_jpeg_set_res(struct mxc_jpeg_desc *desc, u16 w, u16 h);
void mxc_jpeg_set_line_pitch(struct mxc_jpeg_desc *desc, u32 line_pitch);
void mxc_jpeg_set_desc(u32 desc, void __iomem *reg, int slot);
void mxc_jpeg_clr_desc(void __iomem *reg, int slot);
-void mxc_jpeg_set_regs_from_desc(struct mxc_jpeg_desc *desc,
- void __iomem *reg);
#endif
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index b7a720198ce5..64112b63298c 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
return false;
}
+static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!next_dst_buf) {
+ ctx->fh.m2m_ctx->is_draining = true;
+ ctx->fh.m2m_ctx->next_buf_last = true;
+ return;
+ }
+
+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
+}
+
static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
struct mxc_jpeg_src_buf *jpeg_src_buf)
{
@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
jpeg_src_buf->fmt = q_data_cap->fmt;
- if (q_data_cap->fmt != jpeg_src_buf->fmt ||
+ if (ctx->need_initial_source_change_evt ||
+ q_data_cap->fmt != jpeg_src_buf->fmt ||
q_data_cap->w != jpeg_src_buf->w ||
q_data_cap->h != jpeg_src_buf->h) {
dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
mxc_jpeg_sizeimage(q_data_cap);
notify_src_chg(ctx);
ctx->source_change = 1;
+ ctx->need_initial_source_change_evt = false;
+ if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
+ mxc_jpeg_set_last_buffer(ctx);
}
return ctx->source_change ? true : false;
@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
for (i = 0; i < *nplanes; i++)
sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->need_initial_source_change_evt = true;
+
return 0;
}
@@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
- if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
- v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+ /* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
+ * restore the is_draining flag
+ */
+ if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
+ ctx->fh.m2m_ctx->is_draining = true;
+
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
notify_eos(ctx);
@@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
for (i = 0; i < q_data->fmt->mem_planes; i++) {
sizeimage = mxc_jpeg_get_plane_size(q_data, i);
- if (vb2_plane_size(vb, i) < sizeimage) {
+ if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
dev_err(dev, "plane %d too small (%lu < %lu)",
i, vb2_plane_size(vb, i), sizeimage);
return -EINVAL;
@@ -2768,7 +2794,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
ret = mxc_jpeg_attach_pm_domains(jpeg);
if (ret < 0) {
dev_err(dev, "failed to attach power domains %d\n", ret);
- return ret;
+ goto err_clk;
}
/* v4l2 */
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index d80e94cc9d99..dc4afeeff5b6 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
enum mxc_jpeg_enc_state enc_state;
int slot;
unsigned int source_change;
+ bool need_initial_source_change_evt;
bool header_parsed;
struct v4l2_ctrl_handler ctrl_handler;
u8 jpeg_quality;
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 5f93712bf485..6cb20b45e0a1 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -1114,8 +1114,6 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
fd->num_entries = 1;
- memset(entry, 0, sizeof(*entry));
-
entry->flags = 0;
entry->pixelcode = csis_fmt->code;
entry->bus.csi2.vc = 0;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 81be744e9f1b..f73facb97dc5 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -406,12 +406,10 @@ static int mxc_isi_clk_get(struct mxc_isi_dev *isi)
* sizeof(*isi->clks);
int ret;
- isi->clks = devm_kmalloc(isi->dev, size, GFP_KERNEL);
+ isi->clks = devm_kmemdup(isi->dev, isi->pdata->clks, size, GFP_KERNEL);
if (!isi->clks)
return -ENOMEM;
- memcpy(isi->clks, isi->pdata->clks, size);
-
ret = devm_clk_bulk_get(isi->dev, isi->pdata->num_clks,
isi->clks);
if (ret < 0) {
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-1.c b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
index d2aec0679dfc..dd49a40e6a70 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
@@ -47,28 +47,28 @@
static const struct csid_format csid_formats[] = {
{
- MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-7.c b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
index e7436ec6d02b..6b26e036294e 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
@@ -46,28 +46,28 @@
static const struct csid_format csid_formats[] = {
{
- MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
index 0f8ac29d038d..05ff5fa8095a 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
@@ -179,28 +179,28 @@
static const struct csid_format csid_formats[] = {
{
- MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1X16,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
@@ -352,12 +352,21 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
phy_sel = csid->phy.csiphy_id;
if (enable) {
- u8 dt_id = vc;
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ u8 dt_id = vc & 0x03;
if (tg->enabled) {
- /* Config Test Generator */
- vc = 0xa;
-
/* configure one DT, infinite frames */
val = vc << TPG_VC_CFG0_VC_NUM;
val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
@@ -370,14 +379,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
- val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
- val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+ val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
- val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
@@ -449,6 +458,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+ if (vc > 3)
+ val |= 1 << CSI2_RX_CFG1_VC_MODE;
val |= 1 << CSI2_RX_CFG1_MISR_EN;
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 6360314f04a6..95873f988f7e 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -159,15 +159,17 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
struct camss *camss = csid->camss;
struct device *dev = camss->dev;
struct vfe_device *vfe = &camss->vfe[csid->id];
- u32 version = camss->version;
int ret = 0;
if (on) {
- if (version == CAMSS_8250 || version == CAMSS_845) {
- ret = vfe_get(vfe);
- if (ret < 0)
- return ret;
- }
+ /*
+ * From SDM845 onwards, the VFE needs to be powered on before
+ * switching on the CSID. Do so unconditionally, as there is no
+ * drawback in following the same powering order on older SoCs.
+ */
+ ret = vfe_get(vfe);
+ if (ret < 0)
+ return ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
@@ -217,8 +219,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
regulator_bulk_disable(csid->num_supplies,
csid->supplies);
pm_runtime_put_sync(dev);
- if (version == CAMSS_8250 || version == CAMSS_845)
- vfe_put(vfe);
+ vfe_put(vfe);
}
return ret;
@@ -307,7 +308,7 @@ static void csid_try_format(struct csid_device *csid,
/* If not found, use UYVY as default */
if (i >= csid->nformats)
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -336,7 +337,7 @@ static void csid_try_format(struct csid_device *csid,
/* If not found, use UYVY as default */
if (i >= csid->nformats)
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -503,7 +504,7 @@ static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.width = 1920,
.height = 1080
}
@@ -566,7 +567,7 @@ static const struct v4l2_ctrl_ops csid_ctrl_ops = {
* Return 0 on success or a negative error code otherwise
*/
int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
- const struct resources *res, u8 id)
+ const struct camss_subdev_resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
@@ -575,23 +576,13 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
csid->camss = camss;
csid->id = id;
+ csid->ops = res->ops;
- if (camss->version == CAMSS_8x16) {
- csid->ops = &csid_ops_4_1;
- } else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660) {
- csid->ops = &csid_ops_4_7;
- } else if (camss->version == CAMSS_845 ||
- camss->version == CAMSS_8250) {
- csid->ops = &csid_ops_gen2;
- } else {
- return -EINVAL;
- }
csid->ops->subdev_init(csid);
/* Memory */
- if (camss->version == CAMSS_8250) {
+ if (camss->res->version == CAMSS_8250) {
/* for titan 480, CSID registers are inside the VFE region,
* between the VFE "top" and "bus" registers. this requires
* VFE to be initialized before CSID
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index d4b48432a097..30d94eb2eb04 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -172,7 +172,7 @@ struct csid_device {
const struct csid_hw_ops *ops;
};
-struct resources;
+struct camss_subdev_resources;
/*
* csid_find_code - Find a format code in an array using array index or format code
@@ -200,7 +200,7 @@ const struct csid_format *csid_get_fmt_entry(const struct csid_format *formats,
u32 code);
int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
- const struct resources *res, u8 id);
+ const struct camss_subdev_resources *res, u8 id);
int msm_csid_register_entity(struct csid_device *csid,
struct v4l2_device *v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index 04baa80494c6..f50e2235c37f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -402,7 +402,7 @@ static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
- if (csiphy->camss->version == CAMSS_660)
+ if (csiphy->camss->res->version == CAMSS_660)
val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660;
else
val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
@@ -419,7 +419,7 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
int i, l, array_size;
u32 val;
- switch (csiphy->camss->version) {
+ switch (csiphy->camss->res->version) {
case CAMSS_845:
r = &lane_regs_sdm845[0][0];
array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
@@ -468,15 +468,15 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
- bool is_gen2 = (csiphy->camss->version == CAMSS_845 ||
- csiphy->camss->version == CAMSS_8250);
+ bool is_gen2 = (csiphy->camss->res->version == CAMSS_845 ||
+ csiphy->camss->res->version == CAMSS_8250);
u8 settle_cnt;
u8 val;
int i;
settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
- val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
for (i = 0; i < c->num_data; i++)
val |= BIT(c->data[i].pos * 2);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 3f726a7237f5..edd573606a6a 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -30,10 +30,10 @@ struct csiphy_format {
};
static const struct csiphy_format csiphy_formats_8x16[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -50,10 +50,10 @@ static const struct csiphy_format csiphy_formats_8x16[] = {
};
static const struct csiphy_format csiphy_formats_8x96[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -74,10 +74,10 @@ static const struct csiphy_format csiphy_formats_8x96[] = {
};
static const struct csiphy_format csiphy_formats_sdm845[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -357,7 +357,7 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
/* If not found, use UYVY as default */
if (i >= csiphy->nformats)
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -527,7 +527,7 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.width = 1920,
.height = 1080
}
@@ -536,6 +536,15 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
}
+static bool csiphy_match_clock_name(const char *clock_name, const char *format,
+ int index)
+{
+ char name[16]; /* csiphyXXX_timer\0 */
+
+ snprintf(name, sizeof(name), format, index);
+ return !strcmp(clock_name, name);
+}
+
/*
* msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources
* @csiphy: CSIPHY device
@@ -546,33 +555,33 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
*/
int msm_csiphy_subdev_init(struct camss *camss,
struct csiphy_device *csiphy,
- const struct resources *res, u8 id)
+ const struct camss_subdev_resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- int i, j;
+ int i, j, k;
int ret;
csiphy->camss = camss;
csiphy->id = id;
csiphy->cfg.combo_mode = 0;
+ csiphy->ops = res->ops;
- if (camss->version == CAMSS_8x16) {
- csiphy->ops = &csiphy_ops_2ph_1_0;
+ switch (camss->res->version) {
+ case CAMSS_8x16:
csiphy->formats = csiphy_formats_8x16;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
- } else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660) {
- csiphy->ops = &csiphy_ops_3ph_1_0;
+ break;
+ case CAMSS_8x96:
+ case CAMSS_660:
csiphy->formats = csiphy_formats_8x96;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
- } else if (camss->version == CAMSS_845 ||
- camss->version == CAMSS_8250) {
- csiphy->ops = &csiphy_ops_3ph_1_0;
+ break;
+ case CAMSS_845:
+ case CAMSS_8250:
csiphy->formats = csiphy_formats_sdm845;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845);
- } else {
- return -EINVAL;
+ break;
}
/* Memory */
@@ -581,8 +590,8 @@ int msm_csiphy_subdev_init(struct camss *camss,
if (IS_ERR(csiphy->base))
return PTR_ERR(csiphy->base);
- if (camss->version == CAMSS_8x16 ||
- camss->version == CAMSS_8x96) {
+ if (camss->res->version == CAMSS_8x16 ||
+ camss->res->version == CAMSS_8x96) {
csiphy->base_clk_mux =
devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
if (IS_ERR(csiphy->base_clk_mux))
@@ -656,19 +665,23 @@ int msm_csiphy_subdev_init(struct camss *camss,
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
- if (!strcmp(clock->name, "csiphy0_timer") ||
- !strcmp(clock->name, "csiphy1_timer") ||
- !strcmp(clock->name, "csiphy2_timer") ||
- !strcmp(clock->name, "csiphy3_timer") ||
- !strcmp(clock->name, "csiphy4_timer") ||
- !strcmp(clock->name, "csiphy5_timer"))
- csiphy->rate_set[i] = true;
-
- if (camss->version == CAMSS_660 &&
- (!strcmp(clock->name, "csi0_phy") ||
- !strcmp(clock->name, "csi1_phy") ||
- !strcmp(clock->name, "csi2_phy")))
- csiphy->rate_set[i] = true;
+ for (k = 0; k < camss->res->csiphy_num; k++) {
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csiphy%d_timer", k);
+ if (csiphy->rate_set[i])
+ break;
+
+ if (camss->res->version == CAMSS_660) {
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csi%d_phy", k);
+ if (csiphy->rate_set[i])
+ break;
+ }
+
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", k);
+ if (csiphy->rate_set[i])
+ break;
+ }
}
return 0;
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index 1c14947f92d3..c9b7fe82b1f0 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -83,11 +83,11 @@ struct csiphy_device {
unsigned int nformats;
};
-struct resources;
+struct camss_subdev_resources;
int msm_csiphy_subdev_init(struct camss *camss,
struct csiphy_device *csiphy,
- const struct resources *res, u8 id);
+ const struct camss_subdev_resources *res, u8 id);
int msm_csiphy_register_entity(struct csiphy_device *csiphy,
struct v4l2_device *v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index b713f5b86aba..be9d2f0a10c1 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -106,10 +106,10 @@ enum ispif_intf_cmd {
};
static const u32 ispif_formats_8x16[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
@@ -126,10 +126,10 @@ static const u32 ispif_formats_8x16[] = {
};
static const u32 ispif_formats_8x96[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
@@ -270,7 +270,7 @@ static int ispif_vfe_reset(struct ispif_device *ispif, u8 vfe_id)
unsigned long time;
u32 val;
- if (vfe_id > (camss->vfe_num - 1)) {
+ if (vfe_id > camss->res->vfe_num - 1) {
dev_err(camss->dev,
"Error: asked reset for invalid VFE%d\n", vfe_id);
return -ENOENT;
@@ -829,8 +829,8 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
ispif_select_csid(ispif, intf, csid, vfe, 1);
ispif_select_cid(ispif, intf, cid, vfe, 1);
ispif_config_irq(ispif, intf, vfe, 1);
- if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
+ if (camss->res->version == CAMSS_8x96 ||
+ camss->res->version == CAMSS_660)
ispif_config_pack(ispif,
line->fmt[MSM_ISPIF_PAD_SINK].code,
intf, cid, vfe, 1);
@@ -847,8 +847,8 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
mutex_lock(&ispif->config_lock);
- if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
+ if (camss->res->version == CAMSS_8x96 ||
+ camss->res->version == CAMSS_660)
ispif_config_pack(ispif,
line->fmt[MSM_ISPIF_PAD_SINK].code,
intf, cid, vfe, 0);
@@ -911,7 +911,7 @@ static void ispif_try_format(struct ispif_line *line,
/* If not found, use UYVY as default */
if (i >= line->nformats)
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1078,7 +1078,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.width = 1920,
.height = 1080
}
@@ -1095,7 +1095,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
* Return 0 on success or a negative error code otherwise
*/
int msm_ispif_subdev_init(struct camss *camss,
- const struct resources_ispif *res)
+ const struct camss_subdev_resources *res)
{
struct device *dev = camss->dev;
struct ispif_device *ispif = camss->ispif;
@@ -1109,10 +1109,10 @@ int msm_ispif_subdev_init(struct camss *camss,
ispif->camss = camss;
/* Number of ISPIF lines - same as number of CSID hardware modules */
- if (camss->version == CAMSS_8x16)
+ if (camss->res->version == CAMSS_8x16)
ispif->line_num = 2;
- else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
+ else if (camss->res->version == CAMSS_8x96 ||
+ camss->res->version == CAMSS_660)
ispif->line_num = 4;
else
return -EINVAL;
@@ -1126,12 +1126,12 @@ int msm_ispif_subdev_init(struct camss *camss,
ispif->line[i].ispif = ispif;
ispif->line[i].id = i;
- if (camss->version == CAMSS_8x16) {
+ if (camss->res->version == CAMSS_8x16) {
ispif->line[i].formats = ispif_formats_8x16;
ispif->line[i].nformats =
ARRAY_SIZE(ispif_formats_8x16);
- } else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660) {
+ } else if (camss->res->version == CAMSS_8x96 ||
+ camss->res->version == CAMSS_660) {
ispif->line[i].formats = ispif_formats_8x96;
ispif->line[i].nformats =
ARRAY_SIZE(ispif_formats_8x96);
@@ -1152,18 +1152,18 @@ int msm_ispif_subdev_init(struct camss *camss,
/* Interrupt */
- ret = platform_get_irq_byname(pdev, res->interrupt);
+ ret = platform_get_irq_byname(pdev, res->interrupt[0]);
if (ret < 0)
return ret;
ispif->irq = ret;
snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
dev_name(dev), MSM_ISPIF_NAME);
- if (camss->version == CAMSS_8x16)
+ if (camss->res->version == CAMSS_8x16)
ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
- else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660)
+ else if (camss->res->version == CAMSS_8x96 ||
+ camss->res->version == CAMSS_660)
ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
else
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.h b/drivers/media/platform/qcom/camss/camss-ispif.h
index fdf28e68cc7d..dff6d5b35c72 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.h
+++ b/drivers/media/platform/qcom/camss/camss-ispif.h
@@ -66,10 +66,10 @@ struct ispif_device {
struct camss *camss;
};
-struct resources_ispif;
+struct camss_subdev_resources;
int msm_ispif_subdev_init(struct camss *camss,
- const struct resources_ispif *res);
+ const struct camss_subdev_resources *res);
int msm_ispif_register_entities(struct ispif_device *ispif,
struct v4l2_device *v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 02494c89da91..0b211fed1276 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -7,7 +7,6 @@
* Copyright (C) 2020-2021 Linaro Ltd.
*/
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -210,7 +209,8 @@ static void vfe_global_reset(struct vfe_device *vfe)
GLOBAL_RESET_CMD_IDLE_CGC |
GLOBAL_RESET_CMD_RDI0 |
GLOBAL_RESET_CMD_RDI1 |
- GLOBAL_RESET_CMD_RDI2;
+ GLOBAL_RESET_CMD_RDI2 |
+ GLOBAL_RESET_CMD_RDI3;
writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
@@ -344,7 +344,7 @@ static void vfe_violation_read(struct vfe_device *vfe)
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
- u32 status0, status1, vfe_bus_status[3];
+ u32 status0, status1, vfe_bus_status[VFE_LINE_NUM_MAX];
int i, wm;
status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
@@ -353,7 +353,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) {
+ for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) {
vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
}
@@ -367,11 +367,11 @@ static irqreturn_t vfe_isr(int irq, void *dev)
if (status0 & STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
if (status0 & STATUS_0_RDI_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
if (status0 & STATUS_1_RDI_SOF(i))
vfe->isr_ops.sof(vfe, i);
@@ -494,37 +494,6 @@ static int vfe_enable_output(struct vfe_line *line)
return 0;
}
-static int vfe_disable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
- bool done;
- int timeout = 0;
-
- do {
- spin_lock_irqsave(&vfe->output_lock, flags);
- done = !output->gen2.active_num;
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- usleep_range(10000, 20000);
-
- if (timeout++ == 100) {
- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
- vfe_reset(vfe);
- output->gen2.active_num = 0;
- return 0;
- }
- } while (!done);
-
- spin_lock_irqsave(&vfe->output_lock, flags);
- for (i = 0; i < output->wm_num; i++)
- vfe_wm_stop(vfe, output->wm_idx[i]);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
@@ -571,29 +540,6 @@ error_get_output:
}
/*
- * vfe_disable - Disable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_disable(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
-
- vfe_disable_output(line);
-
- vfe_put_output(line);
-
- mutex_lock(&vfe->stream_lock);
-
- vfe->stream_count--;
-
- mutex_unlock(&vfe->stream_lock);
-
- return 0;
-}
-
-/*
* vfe_isr_sof - Process start of frame interrupt
* @vfe: VFE Device
* @line_id: VFE line
@@ -689,7 +635,7 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
- if (vfe->id >= camss->vfe_num)
+ if (vfe->id >= camss->res->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
@@ -704,7 +650,7 @@ static int vfe_pm_domain_on(struct vfe_device *vfe)
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
- if (id >= camss->vfe_num)
+ if (id >= camss->res->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
@@ -769,8 +715,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_170;
vfe->video_ops = vfe_video_ops_170;
-
- vfe->line_num = VFE_LINE_NUM_GEN2;
}
const struct vfe_hw_ops vfe_ops_170 = {
@@ -787,4 +731,5 @@ const struct vfe_hw_ops vfe_ops_170 = {
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
+ .vfe_wm_stop = vfe_wm_stop,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 42047b11ba52..2911e4126e7a 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -614,20 +614,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
@@ -775,17 +775,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
@@ -992,8 +992,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_1;
vfe->video_ops = vfe_video_ops_gen1;
-
- vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_1 = {
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index ab2d57bdf5e7..b65ed0fef595 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -768,20 +768,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
@@ -941,17 +941,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
@@ -1188,8 +1188,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_7;
vfe->video_ops = vfe_video_ops_gen1;
-
- vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_7 = {
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 7e6b62c930ac..7b3805177f03 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -739,20 +739,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
@@ -873,17 +873,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
@@ -1173,8 +1173,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_8;
vfe->video_ops = vfe_video_ops_gen1;
-
- vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_8 = {
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
index f70aad2e8c23..f2368b77fc6d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
@@ -8,7 +8,6 @@
* Copyright (C) 2021 Jonathan Marek
*/
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -328,37 +327,6 @@ static int vfe_enable_output(struct vfe_line *line)
return 0;
}
-static int vfe_disable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
- bool done;
- int timeout = 0;
-
- do {
- spin_lock_irqsave(&vfe->output_lock, flags);
- done = !output->gen2.active_num;
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- usleep_range(10000, 20000);
-
- if (timeout++ == 100) {
- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
- vfe_reset(vfe);
- output->gen2.active_num = 0;
- return 0;
- }
- } while (!done);
-
- spin_lock_irqsave(&vfe->output_lock, flags);
- for (i = 0; i < output->wm_num; i++)
- vfe_wm_stop(vfe, output->wm_idx[i]);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
@@ -407,29 +375,6 @@ error_get_output:
}
/*
- * vfe_disable - Disable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_disable(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
-
- vfe_disable_output(line);
-
- vfe_put_output(line);
-
- mutex_lock(&vfe->stream_lock);
-
- vfe->stream_count--;
-
- mutex_unlock(&vfe->stream_lock);
-
- return 0;
-}
-
-/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
@@ -515,7 +460,7 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
- if (vfe->id >= camss->vfe_num)
+ if (vfe->id >= camss->res->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
@@ -530,7 +475,7 @@ static int vfe_pm_domain_on(struct vfe_device *vfe)
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
- if (id >= camss->vfe_num)
+ if (id >= camss->res->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
@@ -585,7 +530,6 @@ static const struct camss_video_ops vfe_video_ops_480 = {
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->video_ops = vfe_video_ops_480;
- vfe->line_num = MAX_VFE_OUTPUT_LINES;
}
const struct vfe_hw_ops vfe_ops_480 = {
@@ -598,4 +542,5 @@ const struct vfe_hw_ops vfe_ops_480 = {
.vfe_disable = vfe_disable,
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
+ .vfe_wm_stop = vfe_wm_stop,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 06c95568e5af..4839e2cedfe5 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -37,10 +37,10 @@ struct vfe_format {
};
static const struct vfe_format formats_rdi_8x16[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -57,17 +57,17 @@ static const struct vfe_format formats_rdi_8x16[] = {
};
static const struct vfe_format formats_pix_8x16[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
};
static const struct vfe_format formats_rdi_8x96[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -90,17 +90,17 @@ static const struct vfe_format formats_rdi_8x96[] = {
};
static const struct vfe_format formats_pix_8x96[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
};
static const struct vfe_format formats_rdi_845[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
- { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
- { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
- { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+ { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+ { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -170,42 +170,43 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
{
struct vfe_device *vfe = to_vfe(line);
- if (vfe->camss->version == CAMSS_8x16)
+ switch (vfe->camss->res->version) {
+ case CAMSS_8x16:
switch (sink_code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1X16,
MEDIA_BUS_FMT_YVYU8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_UYVY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1X16,
MEDIA_BUS_FMT_VYUY8_1_5X8,
};
@@ -218,57 +219,58 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
- else if (vfe->camss->version == CAMSS_8x96 ||
- vfe->camss->version == CAMSS_660 ||
- vfe->camss->version == CAMSS_845 ||
- vfe->camss->version == CAMSS_8250)
+ break;
+ case CAMSS_8x96:
+ case CAMSS_660:
+ case CAMSS_845:
+ case CAMSS_8250:
switch (sink_code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
MEDIA_BUS_FMT_YUYV8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
MEDIA_BUS_FMT_YVYU8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_VYUY8_1X16,
MEDIA_BUS_FMT_UYVY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
- case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
{
u32 src_code[] = {
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YVYU8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_VYUY8_1_5X8,
};
@@ -281,8 +283,9 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
- else
- return 0;
+ break;
+ }
+ return 0;
}
int vfe_reset(struct vfe_device *vfe)
@@ -407,6 +410,49 @@ int vfe_put_output(struct vfe_line *line)
return 0;
}
+static int vfe_disable_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe->ops->vfe_wm_stop(vfe, output->wm_idx[i]);
+ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return vfe_reset(vfe);
+}
+
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_disable(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ int ret;
+
+ ret = vfe_disable_output(line);
+ if (ret)
+ goto error;
+
+ vfe_put_output(line);
+
+ mutex_lock(&vfe->stream_lock);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+error:
+ return ret;
+}
+
/**
* vfe_isr_comp_done() - Process composite image done interrupt
* @vfe: VFE Device
@@ -428,6 +474,20 @@ void vfe_isr_reset_ack(struct vfe_device *vfe)
complete(&vfe->reset_complete);
}
+static int vfe_match_clock_names(struct vfe_device *vfe,
+ struct camss_clock *clock)
+{
+ char vfe_name[7]; /* vfeXXX\0 */
+ char vfe_lite_name[12]; /* vfe_liteXXX\0 */
+
+ snprintf(vfe_name, sizeof(vfe_name), "vfe%d", vfe->id);
+ snprintf(vfe_lite_name, sizeof(vfe_lite_name), "vfe_lite%d", vfe->id);
+
+ return (!strcmp(clock->name, vfe_name) ||
+ !strcmp(clock->name, vfe_lite_name) ||
+ !strcmp(clock->name, "vfe_lite"));
+}
+
/*
* vfe_set_clock_rates - Calculate and set clock rates on VFE module
* @vfe: VFE device
@@ -451,9 +511,7 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "vfe0") ||
- !strcmp(clock->name, "vfe1") ||
- !strcmp(clock->name, "vfe_lite")) {
+ if (vfe_match_clock_names(vfe, clock)) {
u64 min_rate = 0;
long rate;
@@ -534,8 +592,7 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "vfe0") ||
- !strcmp(clock->name, "vfe1")) {
+ if (vfe_match_clock_names(vfe, clock)) {
u64 min_rate = 0;
unsigned long rate;
@@ -611,7 +668,7 @@ int vfe_get(struct vfe_device *vfe)
} else {
ret = vfe_check_clock_rates(vfe);
if (ret < 0)
- goto error_pm_runtime_get;
+ goto error_pm_domain;
}
vfe->power_count++;
@@ -844,7 +901,7 @@ static void vfe_try_format(struct vfe_line *line,
/* If not found, use UYVY as default */
if (i >= line->nformats)
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1261,7 +1318,7 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.width = 1920,
.height = 1080
}
@@ -1278,32 +1335,19 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
* Return 0 on success or a negative error code otherwise
*/
int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
- const struct resources *res, u8 id)
+ const struct camss_subdev_resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
int i, j;
int ret;
- switch (camss->version) {
- case CAMSS_8x16:
- vfe->ops = &vfe_ops_4_1;
- break;
- case CAMSS_8x96:
- vfe->ops = &vfe_ops_4_7;
- break;
- case CAMSS_660:
- vfe->ops = &vfe_ops_4_8;
- break;
- case CAMSS_845:
- vfe->ops = &vfe_ops_170;
- break;
- case CAMSS_8250:
- vfe->ops = &vfe_ops_480;
- break;
- default:
+ vfe->ops = res->ops;
+
+ if (!res->line_num)
return -EINVAL;
- }
+
+ vfe->line_num = res->line_num;
vfe->ops->subdev_init(dev, vfe);
/* Memory */
@@ -1391,7 +1435,8 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
init_completion(&l->output.sof);
init_completion(&l->output.reg_update);
- if (camss->version == CAMSS_8x16) {
+ switch (camss->res->version) {
+ case CAMSS_8x16:
if (i == VFE_LINE_PIX) {
l->formats = formats_pix_8x16;
l->nformats = ARRAY_SIZE(formats_pix_8x16);
@@ -1399,8 +1444,9 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
l->formats = formats_rdi_8x16;
l->nformats = ARRAY_SIZE(formats_rdi_8x16);
}
- } else if (camss->version == CAMSS_8x96 ||
- camss->version == CAMSS_660) {
+ break;
+ case CAMSS_8x96:
+ case CAMSS_660:
if (i == VFE_LINE_PIX) {
l->formats = formats_pix_8x96;
l->nformats = ARRAY_SIZE(formats_pix_8x96);
@@ -1408,12 +1454,12 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
l->formats = formats_rdi_8x96;
l->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
- } else if (camss->version == CAMSS_845 ||
- camss->version == CAMSS_8250) {
+ break;
+ case CAMSS_845:
+ case CAMSS_8250:
l->formats = formats_rdi_845;
l->nformats = ARRAY_SIZE(formats_rdi_845);
- } else {
- return -EINVAL;
+ break;
}
}
@@ -1541,8 +1587,8 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
}
video_out->ops = &vfe->video_ops;
- if (vfe->camss->version == CAMSS_845 ||
- vfe->camss->version == CAMSS_8250)
+ if (vfe->camss->res->version == CAMSS_845 ||
+ vfe->camss->res->version == CAMSS_8250)
video_out->bpl_alignment = 16;
else
video_out->bpl_alignment = 8;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index cbc314c4e244..09baded0dcdd 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -52,9 +52,7 @@ enum vfe_line_id {
VFE_LINE_RDI0 = 0,
VFE_LINE_RDI1 = 1,
VFE_LINE_RDI2 = 2,
- VFE_LINE_NUM_GEN2 = 3,
VFE_LINE_PIX = 3,
- VFE_LINE_NUM_GEN1 = 4,
VFE_LINE_NUM_MAX = 4
};
@@ -116,6 +114,7 @@ struct vfe_hw_ops {
int (*vfe_enable)(struct vfe_line *line);
int (*vfe_halt)(struct vfe_device *vfe);
void (*violation_read)(struct vfe_device *vfe);
+ void (*vfe_wm_stop)(struct vfe_device *vfe, u8 wm);
};
struct vfe_isr_ops {
@@ -153,10 +152,10 @@ struct vfe_device {
struct camss_video_ops video_ops;
};
-struct resources;
+struct camss_subdev_resources;
int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
- const struct resources *res, u8 id);
+ const struct camss_subdev_resources *res, u8 id);
int msm_vfe_register_entities(struct vfe_device *vfe,
struct v4l2_device *v4l2_dev);
@@ -194,6 +193,14 @@ int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id);
*/
int vfe_reset(struct vfe_device *vfe);
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_disable(struct vfe_line *line);
+
extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 8640db306026..a89da5ef4710 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -48,13 +48,13 @@ struct camss_format_info {
};
static const struct camss_format_info formats_rdi_8x16[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -85,13 +85,13 @@ static const struct camss_format_info formats_rdi_8x16[] = {
};
static const struct camss_format_info formats_rdi_8x96[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -134,13 +134,13 @@ static const struct camss_format_info formats_rdi_8x96[] = {
};
static const struct camss_format_info formats_rdi_845[] = {
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -201,21 +201,21 @@ static const struct camss_format_info formats_pix_8x16[] = {
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
};
@@ -236,29 +236,29 @@ static const struct camss_format_info formats_pix_8x96[] = {
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
- { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
};
@@ -1006,7 +1006,8 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
mutex_init(&video->lock);
- if (video->camss->version == CAMSS_8x16) {
+ switch (video->camss->res->version) {
+ case CAMSS_8x16:
if (is_pix) {
video->formats = formats_pix_8x16;
video->nformats = ARRAY_SIZE(formats_pix_8x16);
@@ -1014,8 +1015,9 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
video->formats = formats_rdi_8x16;
video->nformats = ARRAY_SIZE(formats_rdi_8x16);
}
- } else if (video->camss->version == CAMSS_8x96 ||
- video->camss->version == CAMSS_660) {
+ break;
+ case CAMSS_8x96:
+ case CAMSS_660:
if (is_pix) {
video->formats = formats_pix_8x96;
video->nformats = ARRAY_SIZE(formats_pix_8x96);
@@ -1023,13 +1025,12 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
video->formats = formats_rdi_8x96;
video->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
- } else if (video->camss->version == CAMSS_845 ||
- video->camss->version == CAMSS_8250) {
+ break;
+ case CAMSS_845:
+ case CAMSS_8250:
video->formats = formats_rdi_845;
video->nformats = ARRAY_SIZE(formats_rdi_845);
- } else {
- ret = -EINVAL;
- goto error_video_register;
+ break;
}
ret = msm_video_init_format(video);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index f11dc59135a5..8e78dd8d5961 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
@@ -31,7 +32,7 @@
#define CAMSS_CLOCK_MARGIN_NUMERATOR 105
#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
-static const struct resources csiphy_res_8x16[] = {
+static const struct camss_subdev_resources csiphy_res_8x16[] = {
/* CSIPHY0 */
{
.regulators = {},
@@ -41,7 +42,8 @@ static const struct resources csiphy_res_8x16[] = {
{ 0 },
{ 100000000, 200000000 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
- .interrupt = { "csiphy0" }
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_2ph_1_0
},
/* CSIPHY1 */
@@ -53,11 +55,12 @@ static const struct resources csiphy_res_8x16[] = {
{ 0 },
{ 100000000, 200000000 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
- .interrupt = { "csiphy1" }
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_2ph_1_0
}
};
-static const struct resources csid_res_8x16[] = {
+static const struct camss_subdev_resources csid_res_8x16[] = {
/* CSID0 */
{
.regulators = { "vdda" },
@@ -72,7 +75,8 @@ static const struct resources csid_res_8x16[] = {
{ 0 },
{ 0 } },
.reg = { "csid0" },
- .interrupt = { "csid0" }
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_4_1,
},
/* CSID1 */
@@ -89,22 +93,23 @@ static const struct resources csid_res_8x16[] = {
{ 0 },
{ 0 } },
.reg = { "csid1" },
- .interrupt = { "csid1" }
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_4_1,
},
};
-static const struct resources_ispif ispif_res_8x16 = {
+static const struct camss_subdev_resources ispif_res_8x16 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0" },
.reg = { "ispif", "csi_clk_mux" },
- .interrupt = "ispif"
+ .interrupt = { "ispif" }
};
-static const struct resources vfe_res_8x16[] = {
+static const struct camss_subdev_resources vfe_res_8x16[] = {
/* VFE0 */
{
.regulators = {},
@@ -122,11 +127,13 @@ static const struct resources vfe_res_8x16[] = {
{ 0 },
{ 0 } },
.reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ .interrupt = { "vfe0" },
+ .line_num = 3,
+ .ops = &vfe_ops_4_1
}
};
-static const struct resources csiphy_res_8x96[] = {
+static const struct camss_subdev_resources csiphy_res_8x96[] = {
/* CSIPHY0 */
{
.regulators = {},
@@ -136,7 +143,8 @@ static const struct resources csiphy_res_8x96[] = {
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
- .interrupt = { "csiphy0" }
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY1 */
@@ -148,7 +156,8 @@ static const struct resources csiphy_res_8x96[] = {
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
- .interrupt = { "csiphy1" }
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY2 */
@@ -160,11 +169,12 @@ static const struct resources csiphy_res_8x96[] = {
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy2", "csiphy2_clk_mux" },
- .interrupt = { "csiphy2" }
+ .interrupt = { "csiphy2" },
+ .ops = &csiphy_ops_3ph_1_0
}
};
-static const struct resources csid_res_8x96[] = {
+static const struct camss_subdev_resources csid_res_8x96[] = {
/* CSID0 */
{
.regulators = { "vdda" },
@@ -179,7 +189,8 @@ static const struct resources csid_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "csid0" },
- .interrupt = { "csid0" }
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_4_7,
},
/* CSID1 */
@@ -196,7 +207,8 @@ static const struct resources csid_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "csid1" },
- .interrupt = { "csid1" }
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_4_7,
},
/* CSID2 */
@@ -213,7 +225,8 @@ static const struct resources csid_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "csid2" },
- .interrupt = { "csid2" }
+ .interrupt = { "csid2" },
+ .ops = &csid_ops_4_7,
},
/* CSID3 */
@@ -230,11 +243,12 @@ static const struct resources csid_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "csid3" },
- .interrupt = { "csid3" }
+ .interrupt = { "csid3" },
+ .ops = &csid_ops_4_7,
}
};
-static const struct resources_ispif ispif_res_8x96 = {
+static const struct camss_subdev_resources ispif_res_8x96 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
@@ -243,10 +257,10 @@ static const struct resources_ispif ispif_res_8x96 = {
"csi3", "csi3_pix", "csi3_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
.reg = { "ispif", "csi_clk_mux" },
- .interrupt = "ispif"
+ .interrupt = { "ispif" }
};
-static const struct resources vfe_res_8x96[] = {
+static const struct camss_subdev_resources vfe_res_8x96[] = {
/* VFE0 */
{
.regulators = {},
@@ -262,7 +276,9 @@ static const struct resources vfe_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ .interrupt = { "vfe0" },
+ .line_num = 3,
+ .ops = &vfe_ops_4_7
},
/* VFE1 */
@@ -280,11 +296,13 @@ static const struct resources vfe_res_8x96[] = {
{ 0 },
{ 0 } },
.reg = { "vfe1" },
- .interrupt = { "vfe1" }
+ .interrupt = { "vfe1" },
+ .line_num = 3,
+ .ops = &vfe_ops_4_7
}
};
-static const struct resources csiphy_res_660[] = {
+static const struct camss_subdev_resources csiphy_res_660[] = {
/* CSIPHY0 */
{
.regulators = {},
@@ -296,7 +314,8 @@ static const struct resources csiphy_res_660[] = {
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
- .interrupt = { "csiphy0" }
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY1 */
@@ -310,7 +329,8 @@ static const struct resources csiphy_res_660[] = {
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
- .interrupt = { "csiphy1" }
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY2 */
@@ -324,11 +344,12 @@ static const struct resources csiphy_res_660[] = {
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy2", "csiphy2_clk_mux" },
- .interrupt = { "csiphy2" }
+ .interrupt = { "csiphy2" },
+ .ops = &csiphy_ops_3ph_1_0
}
};
-static const struct resources csid_res_660[] = {
+static const struct camss_subdev_resources csid_res_660[] = {
/* CSID0 */
{
.regulators = { "vdda", "vdd_sec" },
@@ -346,7 +367,8 @@ static const struct resources csid_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "csid0" },
- .interrupt = { "csid0" }
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_4_7,
},
/* CSID1 */
@@ -366,7 +388,8 @@ static const struct resources csid_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "csid1" },
- .interrupt = { "csid1" }
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_4_7,
},
/* CSID2 */
@@ -386,7 +409,8 @@ static const struct resources csid_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "csid2" },
- .interrupt = { "csid2" }
+ .interrupt = { "csid2" },
+ .ops = &csid_ops_4_7,
},
/* CSID3 */
@@ -406,11 +430,12 @@ static const struct resources csid_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "csid3" },
- .interrupt = { "csid3" }
+ .interrupt = { "csid3" },
+ .ops = &csid_ops_4_7,
}
};
-static const struct resources_ispif ispif_res_660 = {
+static const struct camss_subdev_resources ispif_res_660 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
@@ -419,10 +444,10 @@ static const struct resources_ispif ispif_res_660 = {
"csi3", "csi3_pix", "csi3_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
.reg = { "ispif", "csi_clk_mux" },
- .interrupt = "ispif"
+ .interrupt = { "ispif" }
};
-static const struct resources vfe_res_660[] = {
+static const struct camss_subdev_resources vfe_res_660[] = {
/* VFE0 */
{
.regulators = {},
@@ -441,7 +466,9 @@ static const struct resources vfe_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ .interrupt = { "vfe0" },
+ .line_num = 3,
+ .ops = &vfe_ops_4_8
},
/* VFE1 */
@@ -462,11 +489,13 @@ static const struct resources vfe_res_660[] = {
{ 0 },
{ 0 } },
.reg = { "vfe1" },
- .interrupt = { "vfe1" }
+ .interrupt = { "vfe1" },
+ .line_num = 3,
+ .ops = &vfe_ops_4_8
}
};
-static const struct resources csiphy_res_845[] = {
+static const struct camss_subdev_resources csiphy_res_845[] = {
/* CSIPHY0 */
{
.regulators = {},
@@ -482,7 +511,8 @@ static const struct resources csiphy_res_845[] = {
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy0" },
- .interrupt = { "csiphy0" }
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY1 */
@@ -500,7 +530,8 @@ static const struct resources csiphy_res_845[] = {
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy1" },
- .interrupt = { "csiphy1" }
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY2 */
@@ -518,7 +549,8 @@ static const struct resources csiphy_res_845[] = {
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy2" },
- .interrupt = { "csiphy2" }
+ .interrupt = { "csiphy2" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY3 */
@@ -536,11 +568,12 @@ static const struct resources csiphy_res_845[] = {
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy3" },
- .interrupt = { "csiphy3" }
+ .interrupt = { "csiphy3" },
+ .ops = &csiphy_ops_3ph_1_0
}
};
-static const struct resources csid_res_845[] = {
+static const struct camss_subdev_resources csid_res_845[] = {
/* CSID0 */
{
.regulators = { "vdda-phy", "vdda-pll" },
@@ -558,7 +591,8 @@ static const struct resources csid_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid0" },
- .interrupt = { "csid0" }
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_gen2
},
/* CSID1 */
@@ -578,7 +612,8 @@ static const struct resources csid_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid1" },
- .interrupt = { "csid1" }
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_gen2
},
/* CSID2 */
@@ -598,11 +633,12 @@ static const struct resources csid_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid2" },
- .interrupt = { "csid2" }
+ .interrupt = { "csid2" },
+ .ops = &csid_ops_gen2
}
};
-static const struct resources vfe_res_845[] = {
+static const struct camss_subdev_resources vfe_res_845[] = {
/* VFE0 */
{
.regulators = {},
@@ -620,7 +656,9 @@ static const struct resources vfe_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ .interrupt = { "vfe0" },
+ .line_num = 4,
+ .ops = &vfe_ops_170
},
/* VFE1 */
@@ -640,7 +678,9 @@ static const struct resources vfe_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe1" },
- .interrupt = { "vfe1" }
+ .interrupt = { "vfe1" },
+ .line_num = 4,
+ .ops = &vfe_ops_170
},
/* VFE-lite */
@@ -659,11 +699,13 @@ static const struct resources vfe_res_845[] = {
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe_lite" },
- .interrupt = { "vfe_lite" }
+ .interrupt = { "vfe_lite" },
+ .line_num = 4,
+ .ops = &vfe_ops_170
}
};
-static const struct resources csiphy_res_8250[] = {
+static const struct camss_subdev_resources csiphy_res_8250[] = {
/* CSIPHY0 */
{
.regulators = {},
@@ -671,7 +713,8 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy0" },
- .interrupt = { "csiphy0" }
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY1 */
{
@@ -680,7 +723,8 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy1" },
- .interrupt = { "csiphy1" }
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY2 */
{
@@ -689,7 +733,8 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy2" },
- .interrupt = { "csiphy2" }
+ .interrupt = { "csiphy2" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY3 */
{
@@ -698,7 +743,8 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy3" },
- .interrupt = { "csiphy3" }
+ .interrupt = { "csiphy3" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY4 */
{
@@ -707,7 +753,8 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy4" },
- .interrupt = { "csiphy4" }
+ .interrupt = { "csiphy4" },
+ .ops = &csiphy_ops_3ph_1_0
},
/* CSIPHY5 */
{
@@ -716,11 +763,12 @@ static const struct resources csiphy_res_8250[] = {
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy5" },
- .interrupt = { "csiphy5" }
+ .interrupt = { "csiphy5" },
+ .ops = &csiphy_ops_3ph_1_0
}
};
-static const struct resources csid_res_8250[] = {
+static const struct camss_subdev_resources csid_res_8250[] = {
/* CSID0 */
{
.regulators = { "vdda-phy", "vdda-pll" },
@@ -731,7 +779,8 @@ static const struct resources csid_res_8250[] = {
{ 100000000, 200000000, 300000000, 400000000 },
{ 0 } },
.reg = { "csid0" },
- .interrupt = { "csid0" }
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_gen2
},
/* CSID1 */
{
@@ -743,7 +792,8 @@ static const struct resources csid_res_8250[] = {
{ 100000000, 200000000, 300000000, 400000000 },
{ 0 } },
.reg = { "csid1" },
- .interrupt = { "csid1" }
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_gen2
},
/* CSID2 */
{
@@ -754,7 +804,8 @@ static const struct resources csid_res_8250[] = {
{ 400000000, 480000000 },
{ 0 } },
.reg = { "csid2" },
- .interrupt = { "csid2" }
+ .interrupt = { "csid2" },
+ .ops = &csid_ops_gen2
},
/* CSID3 */
{
@@ -765,11 +816,12 @@ static const struct resources csid_res_8250[] = {
{ 400000000, 480000000 },
{ 0 } },
.reg = { "csid3" },
- .interrupt = { "csid3" }
+ .interrupt = { "csid3" },
+ .ops = &csid_ops_gen2
}
};
-static const struct resources vfe_res_8250[] = {
+static const struct camss_subdev_resources vfe_res_8250[] = {
/* VFE0 */
{
.regulators = {},
@@ -786,7 +838,9 @@ static const struct resources vfe_res_8250[] = {
{ 0 },
{ 0 } },
.reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ .interrupt = { "vfe0" },
+ .line_num = 3,
+ .ops = &vfe_ops_480
},
/* VFE1 */
{
@@ -804,7 +858,9 @@ static const struct resources vfe_res_8250[] = {
{ 0 },
{ 0 } },
.reg = { "vfe1" },
- .interrupt = { "vfe1" }
+ .interrupt = { "vfe1" },
+ .line_num = 3,
+ .ops = &vfe_ops_480
},
/* VFE2 (lite) */
{
@@ -821,7 +877,9 @@ static const struct resources vfe_res_8250[] = {
{ 400000000, 480000000 },
{ 0 } },
.reg = { "vfe_lite0" },
- .interrupt = { "vfe_lite0" }
+ .interrupt = { "vfe_lite0" },
+ .line_num = 4,
+ .ops = &vfe_ops_480
},
/* VFE3 (lite) */
{
@@ -838,7 +896,9 @@ static const struct resources vfe_res_8250[] = {
{ 400000000, 480000000 },
{ 0 } },
.reg = { "vfe_lite1" },
- .interrupt = { "vfe_lite1" }
+ .interrupt = { "vfe_lite1" },
+ .line_num = 4,
+ .ops = &vfe_ops_480
},
};
@@ -1004,7 +1064,7 @@ int camss_pm_domain_on(struct camss *camss, int id)
{
int ret = 0;
- if (id < camss->vfe_num) {
+ if (id < camss->res->vfe_num) {
struct vfe_device *vfe = &camss->vfe[id];
ret = vfe->ops->pm_domain_on(vfe);
@@ -1015,7 +1075,7 @@ int camss_pm_domain_on(struct camss *camss, int id)
void camss_pm_domain_off(struct camss *camss, int id)
{
- if (id < camss->vfe_num) {
+ if (id < camss->res->vfe_num) {
struct vfe_device *vfe = &camss->vfe[id];
vfe->ops->pm_domain_off(vfe);
@@ -1120,47 +1180,13 @@ err_cleanup:
*/
static int camss_init_subdevices(struct camss *camss)
{
- const struct resources *csiphy_res;
- const struct resources *csid_res;
- const struct resources_ispif *ispif_res;
- const struct resources *vfe_res;
+ const struct camss_resources *res = camss->res;
unsigned int i;
int ret;
- if (camss->version == CAMSS_8x16) {
- csiphy_res = csiphy_res_8x16;
- csid_res = csid_res_8x16;
- ispif_res = &ispif_res_8x16;
- vfe_res = vfe_res_8x16;
- } else if (camss->version == CAMSS_8x96) {
- csiphy_res = csiphy_res_8x96;
- csid_res = csid_res_8x96;
- ispif_res = &ispif_res_8x96;
- vfe_res = vfe_res_8x96;
- } else if (camss->version == CAMSS_660) {
- csiphy_res = csiphy_res_660;
- csid_res = csid_res_660;
- ispif_res = &ispif_res_660;
- vfe_res = vfe_res_660;
- } else if (camss->version == CAMSS_845) {
- csiphy_res = csiphy_res_845;
- csid_res = csid_res_845;
- /* Titan VFEs don't have an ISPIF */
- ispif_res = NULL;
- vfe_res = vfe_res_845;
- } else if (camss->version == CAMSS_8250) {
- csiphy_res = csiphy_res_8250;
- csid_res = csid_res_8250;
- /* Titan VFEs don't have an ISPIF */
- ispif_res = NULL;
- vfe_res = vfe_res_8250;
- } else {
- return -EINVAL;
- }
-
- for (i = 0; i < camss->csiphy_num; i++) {
+ for (i = 0; i < camss->res->csiphy_num; i++) {
ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
- &csiphy_res[i], i);
+ &res->csiphy_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csiphy%d sub-device: %d\n",
@@ -1170,9 +1196,9 @@ static int camss_init_subdevices(struct camss *camss)
}
/* note: SM8250 requires VFE to be initialized before CSID */
- for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
+ for (i = 0; i < camss->vfe_total_num; i++) {
ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
- &vfe_res[i], i);
+ &res->vfe_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Fail to init vfe%d sub-device: %d\n", i, ret);
@@ -1180,9 +1206,9 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- for (i = 0; i < camss->csid_num; i++) {
+ for (i = 0; i < camss->res->csid_num; i++) {
ret = msm_csid_subdev_init(camss, &camss->csid[i],
- &csid_res[i], i);
+ &res->csid_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csid%d sub-device: %d\n",
@@ -1191,7 +1217,7 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- ret = msm_ispif_subdev_init(camss, ispif_res);
+ ret = msm_ispif_subdev_init(camss, res->ispif_res);
if (ret < 0) {
dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
ret);
@@ -1212,7 +1238,7 @@ static int camss_register_entities(struct camss *camss)
int i, j, k;
int ret;
- for (i = 0; i < camss->csiphy_num; i++) {
+ for (i = 0; i < camss->res->csiphy_num; i++) {
ret = msm_csiphy_register_entity(&camss->csiphy[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -1223,7 +1249,7 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < camss->csid_num; i++) {
+ for (i = 0; i < camss->res->csid_num; i++) {
ret = msm_csid_register_entity(&camss->csid[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -1242,7 +1268,7 @@ static int camss_register_entities(struct camss *camss)
goto err_reg_ispif;
}
- for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
+ for (i = 0; i < camss->vfe_total_num; i++) {
ret = msm_vfe_register_entities(&camss->vfe[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -1253,8 +1279,8 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < camss->csiphy_num; i++) {
- for (j = 0; j < camss->csid_num; j++) {
+ for (i = 0; i < camss->res->csiphy_num; i++) {
+ for (j = 0; j < camss->res->csid_num; j++) {
ret = media_create_pad_link(
&camss->csiphy[i].subdev.entity,
MSM_CSIPHY_PAD_SRC,
@@ -1273,7 +1299,7 @@ static int camss_register_entities(struct camss *camss)
}
if (camss->ispif) {
- for (i = 0; i < camss->csid_num; i++) {
+ for (i = 0; i < camss->res->csid_num; i++) {
for (j = 0; j < camss->ispif->line_num; j++) {
ret = media_create_pad_link(
&camss->csid[i].subdev.entity,
@@ -1293,7 +1319,7 @@ static int camss_register_entities(struct camss *camss)
}
for (i = 0; i < camss->ispif->line_num; i++)
- for (k = 0; k < camss->vfe_num; k++)
+ for (k = 0; k < camss->res->vfe_num; k++)
for (j = 0; j < camss->vfe[k].line_num; j++) {
struct v4l2_subdev *ispif = &camss->ispif->line[i].subdev;
struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
@@ -1313,8 +1339,8 @@ static int camss_register_entities(struct camss *camss)
}
}
} else {
- for (i = 0; i < camss->csid_num; i++)
- for (k = 0; k < camss->vfe_num + camss->vfe_lite_num; k++)
+ for (i = 0; i < camss->res->csid_num; i++)
+ for (k = 0; k < camss->vfe_total_num; k++)
for (j = 0; j < camss->vfe[k].line_num; j++) {
struct v4l2_subdev *csid = &camss->csid[i].subdev;
struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
@@ -1338,7 +1364,7 @@ static int camss_register_entities(struct camss *camss)
return 0;
err_link:
- i = camss->vfe_num + camss->vfe_lite_num;
+ i = camss->vfe_total_num;
err_reg_vfe:
for (i--; i >= 0; i--)
msm_vfe_unregister_entities(&camss->vfe[i]);
@@ -1346,12 +1372,12 @@ err_reg_vfe:
err_reg_ispif:
msm_ispif_unregister_entities(camss->ispif);
- i = camss->csid_num;
+ i = camss->res->csid_num;
err_reg_csid:
for (i--; i >= 0; i--)
msm_csid_unregister_entity(&camss->csid[i]);
- i = camss->csiphy_num;
+ i = camss->res->csiphy_num;
err_reg_csiphy:
for (i--; i >= 0; i--)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
@@ -1369,15 +1395,15 @@ static void camss_unregister_entities(struct camss *camss)
{
unsigned int i;
- for (i = 0; i < camss->csiphy_num; i++)
+ for (i = 0; i < camss->res->csiphy_num; i++)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
- for (i = 0; i < camss->csid_num; i++)
+ for (i = 0; i < camss->res->csid_num; i++)
msm_csid_unregister_entity(&camss->csid[i]);
msm_ispif_unregister_entities(camss->ispif);
- for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++)
+ for (i = 0; i < camss->vfe_total_num; i++)
msm_vfe_unregister_entities(&camss->vfe[i]);
}
@@ -1496,7 +1522,7 @@ static int camss_configure_pd(struct camss *camss)
}
}
- if (i > camss->vfe_num) {
+ if (i > camss->res->vfe_num) {
camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
@@ -1518,26 +1544,34 @@ fail_pm:
static int camss_icc_get(struct camss *camss)
{
const struct resources_icc *icc_res;
- int nbr_icc_paths = 0;
int i;
- if (camss->version == CAMSS_8250) {
- icc_res = &icc_res_sm8250[0];
- nbr_icc_paths = ICC_SM8250_COUNT;
- }
+ icc_res = camss->res->icc_res;
- for (i = 0; i < nbr_icc_paths; i++) {
+ for (i = 0; i < camss->res->icc_path_num; i++) {
camss->icc_path[i] = devm_of_icc_get(camss->dev,
icc_res[i].name);
if (IS_ERR(camss->icc_path[i]))
return PTR_ERR(camss->icc_path[i]);
-
- camss->icc_bw_tbl[i] = icc_res[i].icc_bw_tbl;
}
return 0;
}
+static void camss_genpd_cleanup(struct camss *camss)
+{
+ int i;
+
+ if (camss->genpd_num == 1)
+ return;
+
+ if (camss->genpd_num > camss->res->vfe_num)
+ device_link_del(camss->genpd_link[camss->genpd_num - 1]);
+
+ for (i = 0; i < camss->genpd_num; i++)
+ dev_pm_domain_detach(camss->genpd[i], true);
+}
+
/*
* camss_probe - Probe CAMSS platform device
* @pdev: Pointer to CAMSS platform device
@@ -1548,84 +1582,59 @@ static int camss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct camss *camss;
- int num_subdevs, ret;
+ int num_subdevs;
+ int ret;
camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
if (!camss)
return -ENOMEM;
+ camss->res = of_device_get_match_data(dev);
+
atomic_set(&camss->ref_count, 0);
camss->dev = dev;
platform_set_drvdata(pdev, camss);
- if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
- camss->version = CAMSS_8x16;
- camss->csiphy_num = 2;
- camss->csid_num = 2;
- camss->vfe_num = 1;
- } else if (of_device_is_compatible(dev->of_node,
- "qcom,msm8996-camss")) {
- camss->version = CAMSS_8x96;
- camss->csiphy_num = 3;
- camss->csid_num = 4;
- camss->vfe_num = 2;
- } else if (of_device_is_compatible(dev->of_node,
- "qcom,sdm660-camss")) {
- camss->version = CAMSS_660;
- camss->csiphy_num = 3;
- camss->csid_num = 4;
- camss->vfe_num = 2;
- } else if (of_device_is_compatible(dev->of_node,
- "qcom,sdm845-camss")) {
- camss->version = CAMSS_845;
- camss->csiphy_num = 4;
- camss->csid_num = 3;
- camss->vfe_num = 2;
- camss->vfe_lite_num = 1;
- } else if (of_device_is_compatible(dev->of_node,
- "qcom,sm8250-camss")) {
- camss->version = CAMSS_8250;
- camss->csiphy_num = 6;
- camss->csid_num = 4;
- camss->vfe_num = 2;
- camss->vfe_lite_num = 2;
- } else {
- return -EINVAL;
- }
-
- camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+ camss->csiphy = devm_kcalloc(dev, camss->res->csiphy_num,
sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy)
return -ENOMEM;
- camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+ camss->csid = devm_kcalloc(dev, camss->res->csid_num, sizeof(*camss->csid),
GFP_KERNEL);
if (!camss->csid)
return -ENOMEM;
- if (camss->version == CAMSS_8x16 ||
- camss->version == CAMSS_8x96) {
+ if (camss->res->version == CAMSS_8x16 ||
+ camss->res->version == CAMSS_8x96) {
camss->ispif = devm_kcalloc(dev, 1, sizeof(*camss->ispif), GFP_KERNEL);
if (!camss->ispif)
return -ENOMEM;
}
- camss->vfe = devm_kcalloc(dev, camss->vfe_num + camss->vfe_lite_num,
+ camss->vfe_total_num = camss->res->vfe_num + camss->res->vfe_lite_num;
+ camss->vfe = devm_kcalloc(dev, camss->vfe_total_num,
sizeof(*camss->vfe), GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
ret = camss_icc_get(camss);
if (ret < 0)
- goto err_cleanup;
+ return ret;
+
+ ret = camss_configure_pd(camss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure power domains: %d\n", ret);
+ return ret;
+ }
ret = camss_init_subdevices(camss);
if (ret < 0)
- goto err_cleanup;
+ goto err_genpd_cleanup;
ret = dma_set_mask_and_coherent(dev, 0xffffffff);
if (ret)
- goto err_cleanup;
+ goto err_genpd_cleanup;
camss->media_dev.dev = camss->dev;
strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
@@ -1637,7 +1646,7 @@ static int camss_probe(struct platform_device *pdev)
ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
- goto err_cleanup;
+ goto err_genpd_cleanup;
}
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
@@ -1645,12 +1654,12 @@ static int camss_probe(struct platform_device *pdev)
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
- goto err_cleanup;
+ goto err_v4l2_device_unregister;
}
ret = camss_register_entities(camss);
if (ret < 0)
- goto err_cleanup;
+ goto err_v4l2_device_unregister;
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
@@ -1678,43 +1687,28 @@ static int camss_probe(struct platform_device *pdev)
}
}
- ret = camss_configure_pd(camss);
- if (ret < 0) {
- dev_err(dev, "Failed to configure power domains: %d\n", ret);
- return ret;
- }
-
pm_runtime_enable(dev);
return 0;
err_register_subdevs:
camss_unregister_entities(camss);
-err_cleanup:
+err_v4l2_device_unregister:
v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
+err_genpd_cleanup:
+ camss_genpd_cleanup(camss);
return ret;
}
void camss_delete(struct camss *camss)
{
- int i;
-
v4l2_device_unregister(&camss->v4l2_dev);
media_device_unregister(&camss->media_dev);
media_device_cleanup(&camss->media_dev);
pm_runtime_disable(camss->dev);
-
- if (camss->genpd_num == 1)
- return;
-
- if (camss->genpd_num > camss->vfe_num)
- device_link_del(camss->genpd_link[camss->genpd_num - 1]);
-
- for (i = 0; i < camss->genpd_num; i++)
- dev_pm_domain_detach(camss->genpd[i], true);
}
/*
@@ -1733,14 +1727,73 @@ static void camss_remove(struct platform_device *pdev)
if (atomic_read(&camss->ref_count) == 0)
camss_delete(camss);
+
+ camss_genpd_cleanup(camss);
}
+static const struct camss_resources msm8916_resources = {
+ .version = CAMSS_8x16,
+ .csiphy_res = csiphy_res_8x16,
+ .csid_res = csid_res_8x16,
+ .ispif_res = &ispif_res_8x16,
+ .vfe_res = vfe_res_8x16,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8x16),
+ .csid_num = ARRAY_SIZE(csid_res_8x16),
+ .vfe_num = ARRAY_SIZE(vfe_res_8x16),
+};
+
+static const struct camss_resources msm8996_resources = {
+ .version = CAMSS_8x96,
+ .csiphy_res = csiphy_res_8x96,
+ .csid_res = csid_res_8x96,
+ .ispif_res = &ispif_res_8x96,
+ .vfe_res = vfe_res_8x96,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
+ .csid_num = ARRAY_SIZE(csid_res_8x96),
+ .vfe_num = ARRAY_SIZE(vfe_res_8x96),
+};
+
+static const struct camss_resources sdm660_resources = {
+ .version = CAMSS_660,
+ .csiphy_res = csiphy_res_660,
+ .csid_res = csid_res_660,
+ .ispif_res = &ispif_res_660,
+ .vfe_res = vfe_res_660,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_660),
+ .csid_num = ARRAY_SIZE(csid_res_660),
+ .vfe_num = ARRAY_SIZE(vfe_res_660),
+};
+
+static const struct camss_resources sdm845_resources = {
+ .version = CAMSS_845,
+ .csiphy_res = csiphy_res_845,
+ .csid_res = csid_res_845,
+ .vfe_res = vfe_res_845,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_845),
+ .csid_num = ARRAY_SIZE(csid_res_845),
+ .vfe_num = 2,
+ .vfe_lite_num = 1,
+};
+
+static const struct camss_resources sm8250_resources = {
+ .version = CAMSS_8250,
+ .csiphy_res = csiphy_res_8250,
+ .csid_res = csid_res_8250,
+ .vfe_res = vfe_res_8250,
+ .icc_res = icc_res_sm8250,
+ .icc_path_num = ARRAY_SIZE(icc_res_sm8250),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8250),
+ .csid_num = ARRAY_SIZE(csid_res_8250),
+ .vfe_num = 2,
+ .vfe_lite_num = 2,
+};
+
static const struct of_device_id camss_dt_match[] = {
- { .compatible = "qcom,msm8916-camss" },
- { .compatible = "qcom,msm8996-camss" },
- { .compatible = "qcom,sdm660-camss" },
- { .compatible = "qcom,sdm845-camss" },
- { .compatible = "qcom,sm8250-camss" },
+ { .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
+ { .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+ { .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
+ { .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
+ { .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
{ }
};
@@ -1749,14 +1802,10 @@ MODULE_DEVICE_TABLE(of, camss_dt_match);
static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
struct camss *camss = dev_get_drvdata(dev);
- int nbr_icc_paths = 0;
int i;
int ret;
- if (camss->version == CAMSS_8250)
- nbr_icc_paths = ICC_SM8250_COUNT;
-
- for (i = 0; i < nbr_icc_paths; i++) {
+ for (i = 0; i < camss->res->icc_path_num; i++) {
ret = icc_set_bw(camss->icc_path[i], 0, 0);
if (ret)
return ret;
@@ -1768,17 +1817,14 @@ static int __maybe_unused camss_runtime_suspend(struct device *dev)
static int __maybe_unused camss_runtime_resume(struct device *dev)
{
struct camss *camss = dev_get_drvdata(dev);
- int nbr_icc_paths = 0;
+ const struct resources_icc *icc_res = camss->res->icc_res;
int i;
int ret;
- if (camss->version == CAMSS_8250)
- nbr_icc_paths = ICC_SM8250_COUNT;
-
- for (i = 0; i < nbr_icc_paths; i++) {
+ for (i = 0; i < camss->res->icc_path_num; i++) {
ret = icc_set_bw(camss->icc_path[i],
- camss->icc_bw_tbl[i].avg,
- camss->icc_bw_tbl[i].peak);
+ icc_res[i].icc_bw_tbl.avg,
+ icc_res[i].icc_bw_tbl.peak);
if (ret)
return ret;
}
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index f6c326cb853b..8acad7321c09 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -41,19 +41,15 @@
#define CAMSS_RES_MAX 17
-struct resources {
+struct camss_subdev_resources {
char *regulators[CAMSS_RES_MAX];
char *clock[CAMSS_RES_MAX];
+ char *clock_for_reset[CAMSS_RES_MAX];
u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX];
char *reg[CAMSS_RES_MAX];
char *interrupt[CAMSS_RES_MAX];
-};
-
-struct resources_ispif {
- char *clock[CAMSS_RES_MAX];
- char *clock_for_reset[CAMSS_RES_MAX];
- char *reg[CAMSS_RES_MAX];
- char *interrupt;
+ u8 line_num;
+ const void *ops;
};
struct icc_bw_tbl {
@@ -85,26 +81,36 @@ enum icc_count {
ICC_SM8250_COUNT = 4,
};
-struct camss {
+struct camss_resources {
enum camss_version version;
+ const struct camss_subdev_resources *csiphy_res;
+ const struct camss_subdev_resources *csid_res;
+ const struct camss_subdev_resources *ispif_res;
+ const struct camss_subdev_resources *vfe_res;
+ const struct resources_icc *icc_res;
+ const unsigned int icc_path_num;
+ const unsigned int csiphy_num;
+ const unsigned int csid_num;
+ const unsigned int vfe_num;
+ const unsigned int vfe_lite_num;
+};
+
+struct camss {
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
struct media_device media_dev;
struct device *dev;
- int csiphy_num;
struct csiphy_device *csiphy;
- int csid_num;
struct csid_device *csid;
struct ispif_device *ispif;
- int vfe_num;
- int vfe_lite_num;
struct vfe_device *vfe;
atomic_t ref_count;
int genpd_num;
struct device **genpd;
struct device_link **genpd_link;
struct icc_path *icc_path[ICC_SM8250_COUNT];
- struct icc_bw_tbl icc_bw_tbl[ICC_SM8250_COUNT];
+ const struct camss_resources *res;
+ unsigned int vfe_total_num;
};
struct camss_camera_interface {
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 054b8e74ba4f..9cffe975581b 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -549,7 +549,7 @@ static const struct venus_resources msm8916_res = {
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
- .fwname = "qcom/venus-1.8/venus.mdt",
+ .fwname = "qcom/venus-1.8/venus.mbn",
};
static const struct freq_tbl msm8996_freq_table[] = {
@@ -582,7 +582,7 @@ static const struct venus_resources msm8996_res = {
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
- .fwname = "qcom/venus-4.2/venus.mdt",
+ .fwname = "qcom/venus-4.2/venus.mbn",
};
static const struct freq_tbl sdm660_freq_table[] = {
@@ -690,7 +690,7 @@ static const struct venus_resources sdm845_res = {
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
- .fwname = "qcom/venus-5.2/venus.mdt",
+ .fwname = "qcom/venus-5.2/venus.mbn",
};
static const struct venus_resources sdm845_res_v2 = {
@@ -720,7 +720,7 @@ static const struct venus_resources sdm845_res_v2 = {
.cp_size = 0x70800000,
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
- .fwname = "qcom/venus-5.2/venus.mdt",
+ .fwname = "qcom/venus-5.2/venus.mbn",
};
static const struct freq_tbl sc7180_freq_table[] = {
@@ -768,7 +768,7 @@ static const struct venus_resources sc7180_res = {
.cp_size = 0x70800000,
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
- .fwname = "qcom/venus-5.4/venus.mdt",
+ .fwname = "qcom/venus-5.4/venus.mbn",
};
static const struct freq_tbl sm8250_freq_table[] = {
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index dd9c5066442d..20acd412ee7b 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -242,7 +242,7 @@ struct hfi_session_parse_sequence_header_pkt {
struct hfi_sfr {
u32 buf_size;
- u8 data[1];
+ u8 data[] __counted_by(buf_size);
};
struct hfi_sys_test_ssr_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 7cab685a2ec8..0a041b4db9ef 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
idx++;
- if (idx > HFI_BUFFER_TYPE_MAX)
+ if (idx >= HFI_BUFFER_TYPE_MAX)
return HFI_ERR_SESSION_INVALID_PARAMETER;
req_bytes -= sizeof(struct hfi_buffer_requirements);
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 6cf74b2bc5ae..c43839539d4d 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
struct hfi_plat_caps *caps = core->caps, *cap;
unsigned long bit;
+ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
+ return;
+
for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
cap = &caps[core->codecs_count++];
cap->codec = BIT(bit);
@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
{
const struct hfi_profile_level *pl = data;
+ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
+ return;
+
memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
cap->num_pl += num;
}
@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
{
const struct hfi_capability *caps = data;
+ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
+ return;
+
memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
cap->num_caps += num;
}
@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
{
const struct raw_formats *formats = fmts;
+ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
+ return;
+
memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
cap->num_fmts += num_fmts;
}
@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
rawfmts[i].buftype = fmt->buffer_type;
i++;
+ if (i >= MAX_FMT_ENTRIES)
+ return;
+
if (pinfo->num_planes > MAX_PLANES)
break;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 19fc6575a489..f9437b6412b9 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
new_wr_idx = wr_idx + dwords;
wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
+
+ if (wr_ptr < (u32 *)queue->qmem.kva ||
+ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
+ return -EINVAL;
+
if (new_wr_idx < qsize) {
memcpy(wr_ptr, packet, dwords << 2);
} else {
@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
}
rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
+
+ if (rd_ptr < (u32 *)queue->qmem.kva ||
+ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
+ return -EINVAL;
+
dwords = *rd_ptr >> 2;
if (!dwords)
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 48c9084bb4db..a1b127caa90a 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -870,7 +870,7 @@ static int vcodec_domains_get(struct venus_core *core)
pd = dev_pm_domain_attach_by_name(dev,
res->vcodec_pmdomains[i]);
if (IS_ERR_OR_NULL(pd))
- return PTR_ERR(pd) ? : -ENODATA;
+ return pd ? PTR_ERR(pd) : -ENODATA;
core->pmdomains[i] = pd;
}
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index 7360cf3863f2..19a005d83733 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -467,7 +467,7 @@ static int risp_probe(struct platform_device *pdev)
isp->subdev.dev = &pdev->dev;
v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
- snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ snprintf(isp->subdev.name, sizeof(isp->subdev.name), "%s %s",
KBUILD_MODNAME, dev_name(&pdev->dev));
isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index f6326df0b09b..66fe553a00e7 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -1889,7 +1889,7 @@ static int rcsi2_probe(struct platform_device *pdev)
priv->subdev.dev = &pdev->dev;
v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
- snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ snprintf(priv->subdev.name, sizeof(priv->subdev.name), "%s %s",
KBUILD_MODNAME, dev_name(&pdev->dev));
priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index 163a4ba61c17..292c5bf9e50c 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -871,8 +871,7 @@ static int rcar_drif_querycap(struct file *file, void *fh,
strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strscpy(cap->card, sdr->vdev->name, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
- sdr->vdev->name);
+ strscpy(cap->bus_info, "platform:R-Car DRIF", sizeof(cap->bus_info));
return 0;
}
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index ec631c6e2a57..2562b30acfb9 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -1183,17 +1183,13 @@ static int ceu_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct ceu_device *ceudev = video_drvdata(file);
- struct ceu_subdev *ceusd;
if (inp->index >= ceudev->num_sd)
return -EINVAL;
- ceusd = ceudev->subdevs[inp->index];
-
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
- snprintf(inp->name, sizeof(inp->name), "Camera%u: %s",
- inp->index, ceusd->v4l2_sd->name);
+ snprintf(inp->name, sizeof(inp->name), "Camera %u", inp->index);
return 0;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 8f3cba319762..c6d7e01c8949 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -479,9 +479,11 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_WIDTH, pixm->width);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT, pixm->height);
- rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_SIZE,
+ cap->sp_y_stride * pixm->height);
rkisp1_irq_frame_end_enable(cap);
@@ -1101,14 +1103,20 @@ rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
memset(pixm->plane_fmt, 0, sizeof(pixm->plane_fmt));
info = v4l2_format_info(pixm->pixelformat);
pixm->num_planes = info->mem_planes;
- stride = info->bpp[0] * pixm->width;
- /* Self path supports custom stride but Main path doesn't */
- if (id == RKISP1_MAINPATH || plane_y->bytesperline < stride)
- plane_y->bytesperline = stride;
- plane_y->sizeimage = plane_y->bytesperline * pixm->height;
- /* normalize stride to pixels per line */
- stride = DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]);
+ /*
+ * The SP supports custom strides, expressed as a number of pixels for
+ * the Y plane. Clamp the stride to a reasonable value to avoid integer
+ * overflows when calculating the bytesperline and sizeimage values.
+ */
+ if (id == RKISP1_SELFPATH)
+ stride = clamp(DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]),
+ pixm->width, 65536U);
+ else
+ stride = pixm->width;
+
+ plane_y->bytesperline = stride * info->bpp[0];
+ plane_y->sizeimage = plane_y->bytesperline * pixm->height;
for (i = 1; i < info->comp_planes; i++) {
struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i];
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index d30f0ecb1bfd..1e7cea1bea5e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -167,9 +167,6 @@ struct rkisp1_sensor_async {
* @is_dphy_errctrl_disabled: if dphy errctrl is disabled (avoid endless interrupt)
* @sd: v4l2_subdev variable
* @pads: media pads
- * @pad_cfg: configurations for the pads
- * @sink_fmt: input format
- * @lock: protects pad_cfg and sink_fmt
* @source: source in-use, set when starting streaming
*/
struct rkisp1_csi {
@@ -178,9 +175,6 @@ struct rkisp1_csi {
bool is_dphy_errctrl_disabled;
struct v4l2_subdev sd;
struct media_pad pads[RKISP1_CSI_PAD_NUM];
- struct v4l2_subdev_pad_config pad_cfg[RKISP1_CSI_PAD_NUM];
- const struct rkisp1_mbus_info *sink_fmt;
- struct mutex lock;
struct v4l2_subdev *source;
};
@@ -190,20 +184,14 @@ struct rkisp1_csi {
* @sd: v4l2_subdev variable
* @rkisp1: pointer to rkisp1_device
* @pads: media pads
- * @pad_cfg: pads configurations
* @sink_fmt: input format
- * @src_fmt: output format
- * @ops_lock: ops serialization
* @frame_sequence: used to synchronize frame_id between video devices.
*/
struct rkisp1_isp {
struct v4l2_subdev sd;
struct rkisp1_device *rkisp1;
struct media_pad pads[RKISP1_ISP_PAD_MAX];
- struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
const struct rkisp1_mbus_info *sink_fmt;
- const struct rkisp1_mbus_info *src_fmt;
- struct mutex ops_lock; /* serialize the subdevice ops */
__u32 frame_sequence;
};
@@ -390,10 +378,7 @@ struct rkisp1_params {
* @id: id of the resizer, one of RKISP1_SELFPATH, RKISP1_MAINPATH
* @rkisp1: pointer to the rkisp1 device
* @pads: media pads
- * @pad_cfg: configurations for the pads
* @config: the set of registers to configure the resizer
- * @pixel_enc: pixel encoding of the resizer
- * @ops_lock: a lock for the subdev ops
*/
struct rkisp1_resizer {
struct v4l2_subdev sd;
@@ -401,10 +386,7 @@ struct rkisp1_resizer {
enum rkisp1_stream_id id;
struct rkisp1_device *rkisp1;
struct media_pad pads[RKISP1_RSZ_PAD_MAX];
- struct v4l2_subdev_pad_config pad_cfg[RKISP1_RSZ_PAD_MAX];
const struct rkisp1_rsz_config *config;
- enum v4l2_pixel_encoding pixel_enc;
- struct mutex ops_lock; /* serialize the subdevice ops */
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index fdff3d0da4e5..6e17b2817e61 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -30,23 +30,6 @@ static inline struct rkisp1_csi *to_rkisp1_csi(struct v4l2_subdev *sd)
return container_of(sd, struct rkisp1_csi, sd);
}
-static struct v4l2_mbus_framefmt *
-rkisp1_csi_get_pad_fmt(struct rkisp1_csi *csi,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct v4l2_subdev_state state = {
- .pads = csi->pad_cfg
- };
-
- lockdep_assert_held(&csi->lock);
-
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
- else
- return v4l2_subdev_get_try_format(&csi->sd, &state, pad);
-}
-
int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
struct rkisp1_sensor_async *s_asd,
unsigned int source_pad)
@@ -76,7 +59,8 @@ int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
}
static int rkisp1_csi_config(struct rkisp1_csi *csi,
- const struct rkisp1_sensor_async *sensor)
+ const struct rkisp1_sensor_async *sensor,
+ const struct rkisp1_mbus_info *format)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
unsigned int lanes = sensor->lanes;
@@ -98,7 +82,7 @@ static int rkisp1_csi_config(struct rkisp1_csi *csi,
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL,
- RKISP1_CIF_MIPI_DATA_SEL_DT(csi->sink_fmt->mipi_dt) |
+ RKISP1_CIF_MIPI_DATA_SEL_DT(format->mipi_dt) |
RKISP1_CIF_MIPI_DATA_SEL_VC(0));
/* Clear MIPI interrupts */
@@ -151,7 +135,8 @@ static void rkisp1_csi_disable(struct rkisp1_csi *csi)
}
static int rkisp1_csi_start(struct rkisp1_csi *csi,
- const struct rkisp1_sensor_async *sensor)
+ const struct rkisp1_sensor_async *sensor,
+ const struct rkisp1_mbus_info *format)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
union phy_configure_opts opts;
@@ -159,7 +144,7 @@ static int rkisp1_csi_start(struct rkisp1_csi *csi,
s64 pixel_clock;
int ret;
- ret = rkisp1_csi_config(csi, sensor);
+ ret = rkisp1_csi_config(csi, sensor, format);
if (ret)
return ret;
@@ -169,7 +154,7 @@ static int rkisp1_csi_start(struct rkisp1_csi *csi,
return -EINVAL;
}
- phy_mipi_dphy_get_default_config(pixel_clock, csi->sink_fmt->bus_width,
+ phy_mipi_dphy_get_default_config(pixel_clock, format->bus_width,
sensor->lanes, cfg);
phy_set_mode(csi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(csi->dphy, &opts);
@@ -248,7 +233,6 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct rkisp1_csi *csi = to_rkisp1_csi(sd);
unsigned int i;
int pos = 0;
@@ -258,15 +242,10 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index)
return -EINVAL;
- mutex_lock(&csi->lock);
-
- sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state,
- RKISP1_CSI_PAD_SINK,
- code->which);
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
+ RKISP1_CSI_PAD_SINK);
code->code = sink_fmt->code;
- mutex_unlock(&csi->lock);
-
return 0;
}
@@ -296,9 +275,9 @@ static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_CSI_PAD_SINK);
- src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_CSI_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
@@ -311,36 +290,18 @@ static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
return 0;
}
-static int rkisp1_csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct rkisp1_csi *csi = to_rkisp1_csi(sd);
-
- mutex_lock(&csi->lock);
- fmt->format = *rkisp1_csi_get_pad_fmt(csi, sd_state, fmt->pad,
- fmt->which);
- mutex_unlock(&csi->lock);
-
- return 0;
-}
-
static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct rkisp1_csi *csi = to_rkisp1_csi(sd);
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
/* The format on the source pad always matches the sink pad. */
if (fmt->pad == RKISP1_CSI_PAD_SRC)
- return rkisp1_csi_get_fmt(sd, sd_state, fmt);
+ return v4l2_subdev_get_fmt(sd, sd_state, fmt);
- mutex_lock(&csi->lock);
-
- sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SINK,
- fmt->which);
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
sink_fmt->code = fmt->format.code;
@@ -359,16 +320,10 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
fmt->format = *sink_fmt;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- csi->sink_fmt = mbus_info;
-
/* Propagate the format to the source pad. */
- src_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SRC,
- fmt->which);
+ src_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SRC);
*src_fmt = *sink_fmt;
- mutex_unlock(&csi->lock);
-
return 0;
}
@@ -380,8 +335,11 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
struct rkisp1_device *rkisp1 = csi->rkisp1;
+ const struct v4l2_mbus_framefmt *sink_fmt;
+ const struct rkisp1_mbus_info *format;
struct rkisp1_sensor_async *source_asd;
struct v4l2_async_connection *asc;
+ struct v4l2_subdev_state *sd_state;
struct media_pad *source_pad;
struct v4l2_subdev *source;
int ret;
@@ -415,9 +373,12 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
- mutex_lock(&csi->lock);
- ret = rkisp1_csi_start(csi, source_asd);
- mutex_unlock(&csi->lock);
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
+ format = rkisp1_mbus_info_get_by_code(sink_fmt->code);
+ v4l2_subdev_unlock_state(sd_state);
+
+ ret = rkisp1_csi_start(csi, source_asd, format);
if (ret)
return ret;
@@ -447,7 +408,7 @@ static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
.enum_mbus_code = rkisp1_csi_enum_mbus_code,
.init_cfg = rkisp1_csi_init_config,
- .get_fmt = rkisp1_csi_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_csi_set_fmt,
};
@@ -459,13 +420,11 @@ static const struct v4l2_subdev_ops rkisp1_csi_ops = {
int rkisp1_csi_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
- struct v4l2_subdev_state state = {};
struct media_pad *pads;
struct v4l2_subdev *sd;
int ret;
csi->rkisp1 = rkisp1;
- mutex_init(&csi->lock);
sd = &csi->sd;
v4l2_subdev_init(sd, &rkisp1_csi_ops);
@@ -481,26 +440,26 @@ int rkisp1_csi_register(struct rkisp1_device *rkisp1)
pads[RKISP1_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
- csi->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_CSI_DEF_FMT);
-
ret = media_entity_pads_init(&sd->entity, RKISP1_CSI_PAD_NUM, pads);
if (ret)
- goto error;
+ goto err_entity_cleanup;
- state.pads = csi->pad_cfg;
- rkisp1_csi_init_config(sd, &state);
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
ret = v4l2_device_register_subdev(&csi->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register csi receiver subdev\n");
- goto error;
+ goto err_subdev_cleanup;
}
return 0;
-error:
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
media_entity_cleanup(&sd->entity);
- mutex_destroy(&csi->lock);
csi->rkisp1 = NULL;
return ret;
}
@@ -513,8 +472,8 @@ void rkisp1_csi_unregister(struct rkisp1_device *rkisp1)
return;
v4l2_device_unregister_subdev(&csi->sd);
+ v4l2_subdev_cleanup(&csi->sd);
media_entity_cleanup(&csi->sd.entity);
- mutex_destroy(&csi->lock);
}
int rkisp1_csi_init(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 07fbb77ce234..88ca8b2283b7 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -54,40 +54,6 @@
*/
/* ----------------------------------------------------------------------------
- * Helpers
- */
-
-static struct v4l2_mbus_framefmt *
-rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct v4l2_subdev_state state = {
- .pads = isp->pad_cfg
- };
-
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
- else
- return v4l2_subdev_get_try_format(&isp->sd, &state, pad);
-}
-
-static struct v4l2_rect *
-rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct v4l2_subdev_state state = {
- .pads = isp->pad_cfg
- };
-
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
- else
- return v4l2_subdev_get_try_crop(&isp->sd, &state, pad);
-}
-
-/* ----------------------------------------------------------------------------
* Camera Interface registers configurations
*/
@@ -96,12 +62,12 @@ rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
* This should only be called when configuring CIF
* or at the frame end interrupt
*/
-static void rkisp1_config_ism(struct rkisp1_isp *isp)
+static void rkisp1_config_ism(struct rkisp1_isp *isp,
+ struct v4l2_subdev_state *sd_state)
{
const struct v4l2_rect *src_crop =
- rkisp1_isp_get_pad_crop(isp, NULL,
- RKISP1_ISP_PAD_SOURCE_VIDEO,
- V4L2_SUBDEV_FORMAT_ACTIVE);
+ v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
@@ -125,21 +91,26 @@ static void rkisp1_config_ism(struct rkisp1_isp *isp)
* configure ISP blocks with input format, size......
*/
static int rkisp1_config_isp(struct rkisp1_isp *isp,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_mbus_type mbus_type, u32 mbus_flags)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, acq_prop = 0;
- const struct rkisp1_mbus_info *sink_fmt = isp->sink_fmt;
- const struct rkisp1_mbus_info *src_fmt = isp->src_fmt;
+ const struct rkisp1_mbus_info *sink_fmt;
+ const struct rkisp1_mbus_info *src_fmt;
+ const struct v4l2_mbus_framefmt *src_frm;
const struct v4l2_mbus_framefmt *sink_frm;
const struct v4l2_rect *sink_crop;
- sink_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
- RKISP1_ISP_PAD_SINK_VIDEO,
- V4L2_SUBDEV_FORMAT_ACTIVE);
- sink_crop = rkisp1_isp_get_pad_crop(isp, NULL,
- RKISP1_ISP_PAD_SINK_VIDEO,
- V4L2_SUBDEV_FORMAT_ACTIVE);
+ sink_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+
+ sink_fmt = rkisp1_mbus_info_get_by_code(sink_frm->code);
+ src_fmt = rkisp1_mbus_info_get_by_code(src_frm->code);
if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
acq_mult = 1;
@@ -230,14 +201,15 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp,
} else {
struct v4l2_mbus_framefmt *src_frm;
- src_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
- RKISP1_ISP_PAD_SOURCE_VIDEO,
- V4L2_SUBDEV_FORMAT_ACTIVE);
+ src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat,
src_frm->quantization,
src_frm->ycbcr_enc);
}
+ isp->sink_fmt = sink_fmt;
+
return 0;
}
@@ -258,16 +230,17 @@ static void rkisp1_config_path(struct rkisp1_isp *isp,
/* Hardware configure Entry */
static int rkisp1_config_cif(struct rkisp1_isp *isp,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_mbus_type mbus_type, u32 mbus_flags)
{
int ret;
- ret = rkisp1_config_isp(isp, mbus_type, mbus_flags);
+ ret = rkisp1_config_isp(isp, sd_state, mbus_type, mbus_flags);
if (ret)
return ret;
rkisp1_config_path(isp, mbus_type);
- rkisp1_config_ism(isp);
+ rkisp1_config_ism(isp, sd_state);
return 0;
}
@@ -328,9 +301,12 @@ static void rkisp1_config_clk(struct rkisp1_isp *isp)
}
}
-static void rkisp1_isp_start(struct rkisp1_isp *isp)
+static void rkisp1_isp_start(struct rkisp1_isp *isp,
+ struct v4l2_subdev_state *sd_state)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
+ const struct v4l2_mbus_framefmt *src_fmt;
+ const struct rkisp1_mbus_info *src_info;
u32 val;
rkisp1_config_clk(isp);
@@ -342,7 +318,11 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp)
RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
- if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER)
+ src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
+
+ if (src_info->pixel_enc != V4L2_PIXEL_ENC_BAYER)
rkisp1_params_post_configure(&rkisp1->params);
}
@@ -436,7 +416,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
struct v4l2_rect *sink_crop, *src_crop;
/* Video. */
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -447,14 +427,14 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+ sink_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
@@ -463,14 +443,14 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
- src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+ src_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_crop = *sink_crop;
/* Parameters and statistics. */
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_PARAMS);
- src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_STATS);
sink_fmt->width = 0;
sink_fmt->height = 0;
@@ -483,8 +463,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
- struct v4l2_mbus_framefmt *format,
- unsigned int which)
+ struct v4l2_mbus_framefmt *format)
{
const struct rkisp1_mbus_info *sink_info;
const struct rkisp1_mbus_info *src_info;
@@ -493,12 +472,12 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
const struct v4l2_rect *src_crop;
bool set_csc;
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO, which);
- src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+ sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
/*
* Media bus code. The ISP can operate in pass-through mode (Bayer in,
@@ -581,26 +560,20 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
*/
if (set_csc)
format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
-
- /* Store the source format info when setting the active format. */
- if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
- isp->src_fmt = src_info;
}
static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
- struct v4l2_rect *r, unsigned int which)
+ struct v4l2_rect *r)
{
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *sink_crop;
struct v4l2_rect *src_crop;
- src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO,
- which);
- sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- which);
+ src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
src_crop->left = ALIGN(r->left, 2);
src_crop->width = ALIGN(r->width, 2);
@@ -611,24 +584,22 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
*r = *src_crop;
/* Propagate to out format */
- src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt, which);
+ src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt);
}
static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
- struct v4l2_rect *r, unsigned int which)
+ struct v4l2_rect *r)
{
struct v4l2_rect *sink_crop, *src_crop;
const struct v4l2_mbus_framefmt *sink_fmt;
- sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- which);
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- which);
+ sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->left = ALIGN(r->left, 2);
sink_crop->width = ALIGN(r->width, 2);
@@ -639,32 +610,28 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
*r = *sink_crop;
/* Propagate to out crop */
- src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_crop(isp, sd_state, src_crop, which);
+ src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ rkisp1_isp_set_src_crop(isp, sd_state, src_crop);
}
static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
- struct v4l2_mbus_framefmt *format,
- unsigned int which)
+ struct v4l2_mbus_framefmt *format)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
bool is_yuv;
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- which);
+ sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->code = format->code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
- if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
- isp->sink_fmt = mbus_info;
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
@@ -706,23 +673,9 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
*format = *sink_fmt;
/* Propagate to in crop */
- sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- which);
- rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop, which);
-}
-
-static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct rkisp1_isp *isp = to_rkisp1_isp(sd);
-
- mutex_lock(&isp->ops_lock);
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
- fmt->which);
- mutex_unlock(&isp->ops_lock);
- return 0;
+ sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop);
}
static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
@@ -731,18 +684,13 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
- mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format,
- fmt->which);
+ rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format);
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format,
- fmt->which);
+ rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format);
else
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
- fmt->which);
+ fmt->format = *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad);
- mutex_unlock(&isp->ops_lock);
return 0;
}
@@ -750,39 +698,37 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct rkisp1_isp *isp = to_rkisp1_isp(sd);
int ret = 0;
if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
return -EINVAL;
- mutex_lock(&isp->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
struct v4l2_mbus_framefmt *fmt;
- fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, sel->pad,
- sel->which);
+ fmt = v4l2_subdev_get_pad_format(sd, sd_state, sel->pad);
sel->r.height = fmt->height;
sel->r.width = fmt->width;
sel->r.left = 0;
sel->r.top = 0;
} else {
- sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO,
- sel->which);
+ sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
}
break;
+
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state, sel->pad,
- sel->which);
+ sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, sel->pad);
break;
+
default:
ret = -EINVAL;
+ break;
}
- mutex_unlock(&isp->ops_lock);
+
return ret;
}
@@ -798,15 +744,14 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
- mutex_lock(&isp->ops_lock);
+
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r, sel->which);
+ rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r);
else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_crop(isp, sd_state, &sel->r, sel->which);
+ rkisp1_isp_set_src_crop(isp, sd_state, &sel->r);
else
ret = -EINVAL;
- mutex_unlock(&isp->ops_lock);
return ret;
}
@@ -824,7 +769,7 @@ static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
.get_selection = rkisp1_isp_get_selection,
.set_selection = rkisp1_isp_set_selection,
.init_cfg = rkisp1_isp_init_config,
- .get_fmt = rkisp1_isp_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_isp_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
@@ -837,6 +782,7 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
struct rkisp1_device *rkisp1 = isp->rkisp1;
+ struct v4l2_subdev_state *sd_state;
struct media_pad *source_pad;
struct media_pad *sink_pad;
enum v4l2_mbus_type mbus_type;
@@ -881,21 +827,23 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
}
isp->frame_sequence = -1;
- mutex_lock(&isp->ops_lock);
- ret = rkisp1_config_cif(isp, mbus_type, mbus_flags);
+
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ ret = rkisp1_config_cif(isp, sd_state, mbus_type, mbus_flags);
if (ret)
- goto mutex_unlock;
+ goto out_unlock;
- rkisp1_isp_start(isp);
+ rkisp1_isp_start(isp, sd_state);
ret = v4l2_subdev_call(rkisp1->source, video, s_stream, true);
if (ret) {
rkisp1_isp_stop(isp);
- goto mutex_unlock;
+ goto out_unlock;
}
-mutex_unlock:
- mutex_unlock(&isp->ops_lock);
+out_unlock:
+ v4l2_subdev_unlock_state(sd_state);
return ret;
}
@@ -933,9 +881,6 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
- struct v4l2_subdev_state state = {
- .pads = rkisp1->isp.pad_cfg
- };
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
struct v4l2_subdev *sd = &isp->sd;
@@ -956,27 +901,26 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
- isp->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SINK_PAD_FMT);
- isp->src_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SRC_PAD_FMT);
-
- mutex_init(&isp->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
if (ret)
- goto error;
+ goto err_entity_cleanup;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_subdev_cleanup;
ret = v4l2_device_register_subdev(&rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(rkisp1->dev, "Failed to register isp subdev\n");
- goto error;
+ goto err_subdev_cleanup;
}
- rkisp1_isp_init_config(sd, &state);
-
return 0;
-error:
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
media_entity_cleanup(&sd->entity);
- mutex_destroy(&isp->ops_lock);
isp->sd.v4l2_dev = NULL;
return ret;
}
@@ -990,7 +934,6 @@ void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
v4l2_device_unregister_subdev(&isp->sd);
media_entity_cleanup(&isp->sd.entity);
- mutex_destroy(&isp->ops_lock);
}
/* ----------------------------------------------------------------------------
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 3482f7d707b7..173d1ea41874 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -812,7 +812,7 @@ static void rkisp1_hst_config_v10(struct rkisp1_params *params,
weight[2], weight[3]));
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10,
- weight[0] & 0x1F);
+ weight[0] & 0x1f);
}
static void rkisp1_hst_config_v12(struct rkisp1_params *params,
@@ -1726,7 +1726,7 @@ static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
.afm_config = rkisp1_afm_config_v10,
};
-static struct rkisp1_params_ops rkisp1_v12_params_ops = {
+static const struct rkisp1_params_ops rkisp1_v12_params_ops = {
.lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
.goc_config = rkisp1_goc_config_v12,
.awb_meas_config = rkisp1_awb_meas_config_v12,
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 421cc73355db..350f452e676f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -273,13 +273,13 @@
#define RKISP1_CIF_C_PROC_YOUT_FULL BIT(1)
#define RKISP1_CIF_C_PROC_YIN_FULL BIT(2)
#define RKISP1_CIF_C_PROC_COUT_FULL BIT(3)
-#define RKISP1_CIF_C_PROC_CTRL_RESERVED 0xFFFFFFFE
-#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED 0xFFFFFF00
-#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED 0xFFFFFF00
-#define RKISP1_CIF_C_PROC_HUE_RESERVED 0xFFFFFF00
-#define RKISP1_CIF_C_PROC_SATURATION_RESERVED 0xFFFFFF00
-#define RKISP1_CIF_C_PROC_MACC_RESERVED 0xE000E000
-#define RKISP1_CIF_C_PROC_TONE_RESERVED 0xF000
+#define RKISP1_CIF_C_PROC_CTRL_RESERVED 0xfffffffe
+#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED 0xffffff00
+#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED 0xffffff00
+#define RKISP1_CIF_C_PROC_HUE_RESERVED 0xffffff00
+#define RKISP1_CIF_C_PROC_SATURATION_RESERVED 0xffffff00
+#define RKISP1_CIF_C_PROC_MACC_RESERVED 0xe000e000
+#define RKISP1_CIF_C_PROC_TONE_RESERVED 0xf000
/* DUAL_CROP_CTRL */
#define RKISP1_CIF_DUAL_CROP_MP_MODE_BYPASS (0 << 0)
#define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV (1 << 0)
@@ -310,7 +310,7 @@
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT 4
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT 5
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT 6
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK 0xE
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK 0xe
/* IMG_EFF_COLOR_SEL */
#define RKISP1_CIF_IMG_EFF_COLOR_RGB 0
@@ -324,7 +324,7 @@
/* MIPI_CTRL */
#define RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA BIT(0)
-#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a) (((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a) (((a) & 0xf) << 8)
#define RKISP1_CIF_MIPI_CTRL_NUM_LANES(a) (((a) & 0x3) << 12)
#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_HS_SKIP BIT(16)
#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP BIT(17)
@@ -332,14 +332,14 @@
/* MIPI_DATA_SEL */
#define RKISP1_CIF_MIPI_DATA_SEL_VC(a) (((a) & 0x3) << 6)
-#define RKISP1_CIF_MIPI_DATA_SEL_DT(a) (((a) & 0x3F) << 0)
+#define RKISP1_CIF_MIPI_DATA_SEL_DT(a) (((a) & 0x3f) << 0)
/* MIPI_IMSC, MIPI_RIS, MIPI_MIS, MIPI_ICR, MIPI_ISR */
-#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a) (((a) & 0xF) << 0)
-#define RKISP1_CIF_MIPI_ERR_SOT(a) (((a) & 0xF) << 4)
-#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a) (((a) & 0xF) << 8)
-#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a) (((a) & 0xF) << 12)
-#define RKISP1_CIF_MIPI_ERR_CTRL(a) (((a) & 0xF) << 16)
+#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a) (((a) & 0xf) << 0)
+#define RKISP1_CIF_MIPI_ERR_SOT(a) (((a) & 0xf) << 4)
+#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a) (((a) & 0xf) << 8)
+#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a) (((a) & 0xf) << 12)
+#define RKISP1_CIF_MIPI_ERR_CTRL(a) (((a) & 0xf) << 16)
#define RKISP1_CIF_MIPI_ERR_PROTOCOL BIT(20)
#define RKISP1_CIF_MIPI_ERR_ECC1 BIT(21)
#define RKISP1_CIF_MIPI_ERR_ECC2 BIT(22)
@@ -371,28 +371,28 @@
#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10 (4 << 0)
#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM_V10 (5 << 0)
#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10 0x7
-#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x) (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x) (((x) & 0x7f) << 3)
#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(v0, v1, v2, v3) \
- (((v0) & 0x1F) | (((v1) & 0x1F) << 8) |\
- (((v2) & 0x1F) << 16) | \
- (((v3) & 0x1F) << 24))
-
-#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xFFFFF000
-#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10 0xFFFFF800
-#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10 0xE0E0E0E0
-#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10 0x0000007F
+ (((v0) & 0x1f) | (((v1) & 0x1f) << 8) |\
+ (((v2) & 0x1f) << 16) | \
+ (((v3) & 0x1f) << 24))
+
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xfffff000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10 0xfffff800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10 0xe0e0e0e0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10 0x0000007f
#define RKISP1_CIF_ISP_HIST_ROW_NUM_V10 5
#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 5
-#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x) ((x) & 0x000FFFFF)
+#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x) ((x) & 0x000fffff)
/* ISP HISTOGRAM CALCULATION : CIF_ISP_HIST */
#define RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(x) (((x) & 0x01) << 0)
#define RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(0x01)
-#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x) (((x) & 0x7F) << 1)
+#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x) (((x) & 0x7f) << 1)
#define RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(x) (((x) & 0x07) << 8)
#define RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(0x07)
#define RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(x) (((x) & 0x01) << 11)
-#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x) (((x) & 0xFFF) << 12)
+#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x) (((x) & 0xfff) << 12)
#define RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(x) (((x) & 0x07) << 24)
#define RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(x) (((x) & 0x01) << 27)
#define RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 28)
@@ -403,19 +403,19 @@
(RKISP1_CIF_ISP_HIST_ROW_NUM_V12 * RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12)
#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(v0, v1, v2, v3) \
- (((v0) & 0x3F) | (((v1) & 0x3F) << 8) |\
- (((v2) & 0x3F) << 16) |\
- (((v3) & 0x3F) << 24))
+ (((v0) & 0x3f) | (((v1) & 0x3f) << 8) |\
+ (((v2) & 0x3f) << 16) |\
+ (((v3) & 0x3f) << 24))
#define RKISP1_CIF_ISP_HIST_OFFS_SET_V12(v0, v1) \
- (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 16))
+ (((v0) & 0x1fff) | (((v1) & 0x1fff) << 16))
#define RKISP1_CIF_ISP_HIST_SIZE_SET_V12(v0, v1) \
- (((v0) & 0x7FF) | (((v1) & 0x7FF) << 16))
+ (((v0) & 0x7ff) | (((v1) & 0x7ff) << 16))
#define RKISP1_CIF_ISP_HIST_GET_BIN0_V12(x) \
- ((x) & 0xFFFF)
+ ((x) & 0xffff)
#define RKISP1_CIF_ISP_HIST_GET_BIN1_V12(x) \
- (((x) >> 16) & 0xFFFF)
+ (((x) >> 16) & 0xffff)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
@@ -437,11 +437,11 @@
#define RKISP1_CIFFLASH_CONFIG_VSYNC_POS BIT(1)
#define RKISP1_CIFFLASH_CONFIG_PRELIGHT_LOW BIT(2)
#define RKISP1_CIFFLASH_CONFIG_SRC_FL_TRIG BIT(3)
-#define RKISP1_CIFFLASH_CONFIG_DELAY(a) (((a) & 0xF) << 4)
+#define RKISP1_CIFFLASH_CONFIG_DELAY(a) (((a) & 0xf) << 4)
/* Demosaic: ISP_DEMOSAIC */
#define RKISP1_CIF_ISP_DEMOSAIC_BYPASS BIT(10)
-#define RKISP1_CIF_ISP_DEMOSAIC_TH(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_DEMOSAIC_TH(x) ((x) & 0xff)
/* ISP_FLAGS_SHD */
#define RKISP1_CIF_ISP_FLAGS_SHD_ISP_ENABLE_SHD BIT(0)
@@ -458,39 +458,39 @@
#define RKISP1_CIF_ISP_AWB_YMAX_READ(x) (((x) >> 2) & 1)
#define RKISP1_CIF_ISP_AWB_MODE_RGB_EN ((1 << 31) | (0x2 << 0))
#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
-#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xFFFFFFFC
+#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xfffffffc
#define RKISP1_CIF_ISP_AWB_MODE_READ(x) ((x) & 3)
#define RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(x) (((x) & 0x07) << 28)
#define RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12 RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(0x07)
/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G */
-#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3FF) << 16)
-#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3FF)
-#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x) ((x) & 0x3FFF)
-#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x) ((x) & 0x3FFF)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3ff) << 16)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3ff)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x) ((x) & 0x3fff)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x) ((x) & 0x3fff)
/* ISP_AWB_REF */
-#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x) (((x) & 0xFF) << 8)
-#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x) (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x) (((x) & 0xff) << 8)
+#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x) (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x) ((x) & 0xff)
/* ISP_AWB_THRESH */
-#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x) (((x) & 0xFF) << 8)
-#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x) (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x) ((x) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x) (((x) & 0xFF) << 16)
-#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x) (((x) >> 16) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x) (((x) & 0xFF) << 24)
-#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x) (((x) >> 24) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x) (((x) & 0xff) << 8)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x) (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x) ((x) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x) (((x) & 0xff) << 16)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x) (((x) >> 16) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x) (((x) & 0xff) << 24)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x) (((x) >> 24) & 0xff)
/* ISP_AWB_MEAN */
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x) ((x) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x) (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x) (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x) ((x) & 0xff)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x) (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x) (((x) >> 16) & 0xff)
/* ISP_AWB_WHITE_CNT */
-#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x) ((x) & 0x3FFFFFF)
+#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x) ((x) & 0x3ffffff)
-#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL 0x000003FF
-#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX 0x00000FFF
-#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE 0x00001FFF
-#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF 0x000000FF
-#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC 0x000000FF
+#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL 0x000003ff
+#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX 0x00000fff
+#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE 0x00001fff
+#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF 0x000000ff
+#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC 0x000000ff
/* AE */
/* ISP_EXP_CTRL */
@@ -504,24 +504,24 @@
#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1 BIT(31)
/* ISP_EXP_H_SIZE */
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x) ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10 0x000007FF
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x) ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x) ((x) & 0x7ff)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10 0x000007ff
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x) ((x) & 0x7ff)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12 0x000007ff
/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x) ((x) & 0x7FE)
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x) (((x) & 0x7FE) << 16)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x) ((x) & 0x7fe)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x) (((x) & 0x7fe) << 16)
/* ISP_EXP_H_OFFSET */
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x) ((x) & 0x1fff)
#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V10 2424
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12 0x1FFF
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x) ((x) & 0x1fff)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12 0x1fff
/* ISP_EXP_V_OFFSET */
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x) ((x) & 0x1fff)
#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V10 1806
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x) (((x) & 0x1FFF) << 16)
-#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12 0x1FFF
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x) (((x) & 0x1fff) << 16)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12 0x1fff
#define RKISP1_CIF_ISP_EXP_ROW_NUM_V10 5
#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 5
@@ -545,10 +545,10 @@
#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V12 \
(RKISP1_CIF_ISP_EXP_ROW_NUM_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12)
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 0x7FF
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 0xE
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 0x7FE
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 0x7ff
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 0xe
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 0x7fe
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 0xe
#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V12 \
(RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V12 \
@@ -558,26 +558,26 @@
#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V12 \
(RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x) ((x) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x) (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x) (((x) >> 16) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x) (((x) >> 24) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x) ((x) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x) (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x) (((x) >> 16) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x) (((x) >> 24) & 0xff)
/* LSC: ISP_LSC_CTRL */
#define RKISP1_CIF_ISP_LSC_CTRL_ENA BIT(0)
-#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10 0xF000F000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10 0xF000F000
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12 0xE000E000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xfc00fc00
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10 0xf000f000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10 0xf000f000
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12 0xe000e000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12 0xe000e000
#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(v0, v1) \
- (((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+ (((v0) & 0xfff) | (((v1) & 0xfff) << 12))
#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(v0, v1) \
- (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
+ (((v0) & 0x1fff) | (((v1) & 0x1fff) << 13))
#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
- (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+ (((v0) & 0xfff) | (((v1) & 0xfff) << 16))
#define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1) \
- (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+ (((v0) & 0xfff) | (((v1) & 0xfff) << 16))
/* LSC: ISP_LSC_TABLE_SEL */
#define RKISP1_CIF_ISP_LSC_TABLE_0 0
@@ -601,19 +601,19 @@
#define RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(x) (((x) & 0x3) << 4)
#define RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(x) (((x) & 0x3) << 6)
#define RKISP1_CIF_ISP_FLT_CHROMA_MODE_MAX 3
-#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x) (((x) & 0xF) << 8)
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x) (((x) & 0xf) << 8)
#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1_MAX 8
-#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED 0xFFFFFC00
-#define RKISP1_CIF_ISP_FLT_FAC_RESERVED 0xFFFFFFC0
-#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED 0xFFF80000
+#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED 0xfffffc00
+#define RKISP1_CIF_ISP_FLT_FAC_RESERVED 0xffffffc0
+#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED 0xfff80000
-#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED 0xFFFFF800
-#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED 0xFFFFF000
+#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED 0xfffff800
+#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED 0xfffff000
/* GOC */
#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_EQU BIT(0)
#define RKISP1_CIF_ISP_GOC_MODE_MAX 1
-#define RKISP1_CIF_ISP_GOC_RESERVED 0xFFFFF800
+#define RKISP1_CIF_ISP_GOC_RESERVED 0xfffff800
/* ISP_CTRL BIT 11*/
#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x) (((x) >> 11) & 1)
@@ -643,22 +643,22 @@
#define RKISP1_CIFISP_DEGAMMA_X_RESERVED \
((1 << 31) | (1 << 27) | (1 << 23) | (1 << 19) |\
(1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
-#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xFFFFF000
+#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xfffff000
/* GAMMA-OUT */
#define RKISP1_CIF_ISP_GAMMA_VALUE_V12(x, y) \
- (((x) & 0xFFF) << 16 | ((y) & 0xFFF) << 0)
+ (((x) & 0xfff) << 16 | ((y) & 0xfff) << 0)
/* AFM */
#define RKISP1_CIF_ISP_AFM_ENA BIT(0)
-#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xFFFF0000
-#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED 0xFFF8FFF8
-#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED 0xE000
-#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED 0xF000
+#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xffff0000
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED 0xfff8fff8
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED 0xe000
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED 0xf000
#define RKISP1_CIF_ISP_AFM_WINDOW_X_MIN 0x5
#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN 0x2
-#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1FFF) << 16)
-#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1fff) << 16)
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1fff)
#define RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(x, y) (((x) & 0x7) << 16 | ((y) & 0x7) << 0)
#define RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(x, y) (((x) & 0x7) << 20 | ((y) & 0x7) << 4)
#define RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(x, y) (((x) & 0x7) << 24 | ((y) & 0x7) << 8)
@@ -676,9 +676,9 @@
#define RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP BIT(7)
#define RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP BIT(8)
#define RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN BIT(9)
-#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED 0xFFFFF000
-#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1F
-#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3FF
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED 0xfffff000
+#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1f
+#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3ff
/* =================================================================== */
/* CIF Registers */
@@ -686,7 +686,7 @@
#define RKISP1_CIF_CTRL_BASE 0x00000000
#define RKISP1_CIF_VI_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
-#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000C)
+#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000c)
#define RKISP1_CIF_VI_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
#define RKISP1_CIF_VI_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
@@ -695,11 +695,11 @@
#define RKISP1_CIF_IMG_EFF_CTRL (RKISP1_CIF_IMG_EFF_BASE + 0x00000000)
#define RKISP1_CIF_IMG_EFF_COLOR_SEL (RKISP1_CIF_IMG_EFF_BASE + 0x00000004)
#define RKISP1_CIF_IMG_EFF_MAT_1 (RKISP1_CIF_IMG_EFF_BASE + 0x00000008)
-#define RKISP1_CIF_IMG_EFF_MAT_2 (RKISP1_CIF_IMG_EFF_BASE + 0x0000000C)
+#define RKISP1_CIF_IMG_EFF_MAT_2 (RKISP1_CIF_IMG_EFF_BASE + 0x0000000c)
#define RKISP1_CIF_IMG_EFF_MAT_3 (RKISP1_CIF_IMG_EFF_BASE + 0x00000010)
#define RKISP1_CIF_IMG_EFF_MAT_4 (RKISP1_CIF_IMG_EFF_BASE + 0x00000014)
#define RKISP1_CIF_IMG_EFF_MAT_5 (RKISP1_CIF_IMG_EFF_BASE + 0x00000018)
-#define RKISP1_CIF_IMG_EFF_TINT (RKISP1_CIF_IMG_EFF_BASE + 0x0000001C)
+#define RKISP1_CIF_IMG_EFF_TINT (RKISP1_CIF_IMG_EFF_BASE + 0x0000001c)
#define RKISP1_CIF_IMG_EFF_CTRL_SHD (RKISP1_CIF_IMG_EFF_BASE + 0x00000020)
#define RKISP1_CIF_IMG_EFF_SHARPEN (RKISP1_CIF_IMG_EFF_BASE + 0x00000024)
@@ -707,7 +707,7 @@
#define RKISP1_CIF_SUPER_IMP_CTRL (RKISP1_CIF_SUPER_IMP_BASE + 0x00000000)
#define RKISP1_CIF_SUPER_IMP_OFFSET_X (RKISP1_CIF_SUPER_IMP_BASE + 0x00000004)
#define RKISP1_CIF_SUPER_IMP_OFFSET_Y (RKISP1_CIF_SUPER_IMP_BASE + 0x00000008)
-#define RKISP1_CIF_SUPER_IMP_COLOR_Y (RKISP1_CIF_SUPER_IMP_BASE + 0x0000000C)
+#define RKISP1_CIF_SUPER_IMP_COLOR_Y (RKISP1_CIF_SUPER_IMP_BASE + 0x0000000c)
#define RKISP1_CIF_SUPER_IMP_COLOR_CB (RKISP1_CIF_SUPER_IMP_BASE + 0x00000010)
#define RKISP1_CIF_SUPER_IMP_COLOR_CR (RKISP1_CIF_SUPER_IMP_BASE + 0x00000014)
@@ -715,148 +715,148 @@
#define RKISP1_CIF_ISP_CTRL (RKISP1_CIF_ISP_BASE + 0x00000000)
#define RKISP1_CIF_ISP_ACQ_PROP (RKISP1_CIF_ISP_BASE + 0x00000004)
#define RKISP1_CIF_ISP_ACQ_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_ACQ_V_OFFS (RKISP1_CIF_ISP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_ACQ_V_OFFS (RKISP1_CIF_ISP_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_ACQ_H_SIZE (RKISP1_CIF_ISP_BASE + 0x00000010)
#define RKISP1_CIF_ISP_ACQ_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000014)
#define RKISP1_CIF_ISP_ACQ_NR_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_GAMMA_DX_LO (RKISP1_CIF_ISP_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_GAMMA_DX_LO (RKISP1_CIF_ISP_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_GAMMA_DX_HI (RKISP1_CIF_ISP_BASE + 0x00000020)
#define RKISP1_CIF_ISP_GAMMA_R_Y0 (RKISP1_CIF_ISP_BASE + 0x00000024)
#define RKISP1_CIF_ISP_GAMMA_R_Y1 (RKISP1_CIF_ISP_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_GAMMA_R_Y2 (RKISP1_CIF_ISP_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y2 (RKISP1_CIF_ISP_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_GAMMA_R_Y3 (RKISP1_CIF_ISP_BASE + 0x00000030)
#define RKISP1_CIF_ISP_GAMMA_R_Y4 (RKISP1_CIF_ISP_BASE + 0x00000034)
#define RKISP1_CIF_ISP_GAMMA_R_Y5 (RKISP1_CIF_ISP_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_GAMMA_R_Y6 (RKISP1_CIF_ISP_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y6 (RKISP1_CIF_ISP_BASE + 0x0000003c)
#define RKISP1_CIF_ISP_GAMMA_R_Y7 (RKISP1_CIF_ISP_BASE + 0x00000040)
#define RKISP1_CIF_ISP_GAMMA_R_Y8 (RKISP1_CIF_ISP_BASE + 0x00000044)
#define RKISP1_CIF_ISP_GAMMA_R_Y9 (RKISP1_CIF_ISP_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_GAMMA_R_Y10 (RKISP1_CIF_ISP_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y10 (RKISP1_CIF_ISP_BASE + 0x0000004c)
#define RKISP1_CIF_ISP_GAMMA_R_Y11 (RKISP1_CIF_ISP_BASE + 0x00000050)
#define RKISP1_CIF_ISP_GAMMA_R_Y12 (RKISP1_CIF_ISP_BASE + 0x00000054)
#define RKISP1_CIF_ISP_GAMMA_R_Y13 (RKISP1_CIF_ISP_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_GAMMA_R_Y14 (RKISP1_CIF_ISP_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y14 (RKISP1_CIF_ISP_BASE + 0x0000005c)
#define RKISP1_CIF_ISP_GAMMA_R_Y15 (RKISP1_CIF_ISP_BASE + 0x00000060)
#define RKISP1_CIF_ISP_GAMMA_R_Y16 (RKISP1_CIF_ISP_BASE + 0x00000064)
#define RKISP1_CIF_ISP_GAMMA_G_Y0 (RKISP1_CIF_ISP_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_GAMMA_G_Y1 (RKISP1_CIF_ISP_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y1 (RKISP1_CIF_ISP_BASE + 0x0000006c)
#define RKISP1_CIF_ISP_GAMMA_G_Y2 (RKISP1_CIF_ISP_BASE + 0x00000070)
#define RKISP1_CIF_ISP_GAMMA_G_Y3 (RKISP1_CIF_ISP_BASE + 0x00000074)
#define RKISP1_CIF_ISP_GAMMA_G_Y4 (RKISP1_CIF_ISP_BASE + 0x00000078)
-#define RKISP1_CIF_ISP_GAMMA_G_Y5 (RKISP1_CIF_ISP_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y5 (RKISP1_CIF_ISP_BASE + 0x0000007c)
#define RKISP1_CIF_ISP_GAMMA_G_Y6 (RKISP1_CIF_ISP_BASE + 0x00000080)
#define RKISP1_CIF_ISP_GAMMA_G_Y7 (RKISP1_CIF_ISP_BASE + 0x00000084)
#define RKISP1_CIF_ISP_GAMMA_G_Y8 (RKISP1_CIF_ISP_BASE + 0x00000088)
-#define RKISP1_CIF_ISP_GAMMA_G_Y9 (RKISP1_CIF_ISP_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y9 (RKISP1_CIF_ISP_BASE + 0x0000008c)
#define RKISP1_CIF_ISP_GAMMA_G_Y10 (RKISP1_CIF_ISP_BASE + 0x00000090)
#define RKISP1_CIF_ISP_GAMMA_G_Y11 (RKISP1_CIF_ISP_BASE + 0x00000094)
#define RKISP1_CIF_ISP_GAMMA_G_Y12 (RKISP1_CIF_ISP_BASE + 0x00000098)
-#define RKISP1_CIF_ISP_GAMMA_G_Y13 (RKISP1_CIF_ISP_BASE + 0x0000009C)
-#define RKISP1_CIF_ISP_GAMMA_G_Y14 (RKISP1_CIF_ISP_BASE + 0x000000A0)
-#define RKISP1_CIF_ISP_GAMMA_G_Y15 (RKISP1_CIF_ISP_BASE + 0x000000A4)
-#define RKISP1_CIF_ISP_GAMMA_G_Y16 (RKISP1_CIF_ISP_BASE + 0x000000A8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y0 (RKISP1_CIF_ISP_BASE + 0x000000AC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y1 (RKISP1_CIF_ISP_BASE + 0x000000B0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y2 (RKISP1_CIF_ISP_BASE + 0x000000B4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y3 (RKISP1_CIF_ISP_BASE + 0x000000B8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y4 (RKISP1_CIF_ISP_BASE + 0x000000BC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y5 (RKISP1_CIF_ISP_BASE + 0x000000C0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y6 (RKISP1_CIF_ISP_BASE + 0x000000C4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y7 (RKISP1_CIF_ISP_BASE + 0x000000C8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y8 (RKISP1_CIF_ISP_BASE + 0x000000CC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y9 (RKISP1_CIF_ISP_BASE + 0x000000D0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y10 (RKISP1_CIF_ISP_BASE + 0x000000D4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y11 (RKISP1_CIF_ISP_BASE + 0x000000D8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y12 (RKISP1_CIF_ISP_BASE + 0x000000DC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y13 (RKISP1_CIF_ISP_BASE + 0x000000E0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000E4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000E8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_GAMMA_G_Y13 (RKISP1_CIF_ISP_BASE + 0x0000009c)
+#define RKISP1_CIF_ISP_GAMMA_G_Y14 (RKISP1_CIF_ISP_BASE + 0x000000a0)
+#define RKISP1_CIF_ISP_GAMMA_G_Y15 (RKISP1_CIF_ISP_BASE + 0x000000a4)
+#define RKISP1_CIF_ISP_GAMMA_G_Y16 (RKISP1_CIF_ISP_BASE + 0x000000a8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y0 (RKISP1_CIF_ISP_BASE + 0x000000ac)
+#define RKISP1_CIF_ISP_GAMMA_B_Y1 (RKISP1_CIF_ISP_BASE + 0x000000b0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y2 (RKISP1_CIF_ISP_BASE + 0x000000b4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y3 (RKISP1_CIF_ISP_BASE + 0x000000b8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y4 (RKISP1_CIF_ISP_BASE + 0x000000bc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y5 (RKISP1_CIF_ISP_BASE + 0x000000c0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y6 (RKISP1_CIF_ISP_BASE + 0x000000c4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y7 (RKISP1_CIF_ISP_BASE + 0x000000c8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y8 (RKISP1_CIF_ISP_BASE + 0x000000cc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y9 (RKISP1_CIF_ISP_BASE + 0x000000d0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y10 (RKISP1_CIF_ISP_BASE + 0x000000d4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y11 (RKISP1_CIF_ISP_BASE + 0x000000d8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y12 (RKISP1_CIF_ISP_BASE + 0x000000dc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y13 (RKISP1_CIF_ISP_BASE + 0x000000e0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000e4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000e8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000ec)
#define RKISP1_CIF_ISP_AWB_PROP_V10 (RKISP1_CIF_ISP_BASE + 0x00000110)
#define RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000114)
#define RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x0000011c)
#define RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x00000120)
#define RKISP1_CIF_ISP_AWB_FRAMES_V10 (RKISP1_CIF_ISP_BASE + 0x00000124)
#define RKISP1_CIF_ISP_AWB_REF_V10 (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_THRESH_V10 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_THRESH_V10 (RKISP1_CIF_ISP_BASE + 0x0000012c)
#define RKISP1_CIF_ISP_AWB_GAIN_G_V10 (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10 (RKISP1_CIF_ISP_BASE + 0x0000013c)
#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V10 (RKISP1_CIF_ISP_BASE + 0x00000140)
#define RKISP1_CIF_ISP_AWB_MEAN_V10 (RKISP1_CIF_ISP_BASE + 0x00000144)
#define RKISP1_CIF_ISP_AWB_PROP_V12 (RKISP1_CIF_ISP_BASE + 0x00000110)
#define RKISP1_CIF_ISP_AWB_SIZE_V12 (RKISP1_CIF_ISP_BASE + 0x00000114)
#define RKISP1_CIF_ISP_AWB_OFFS_V12 (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_REF_V12 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_REF_V12 (RKISP1_CIF_ISP_BASE + 0x0000011c)
#define RKISP1_CIF_ISP_AWB_THRESH_V12 (RKISP1_CIF_ISP_BASE + 0x00000120)
#define RKISP1_CIF_ISP_X_COOR12_V12 (RKISP1_CIF_ISP_BASE + 0x00000124)
#define RKISP1_CIF_ISP_X_COOR34_V12 (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12 (RKISP1_CIF_ISP_BASE + 0x0000012c)
#define RKISP1_CIF_ISP_AWB_MEAN_V12 (RKISP1_CIF_ISP_BASE + 0x00000130)
#define RKISP1_CIF_ISP_DEGAIN_V12 (RKISP1_CIF_ISP_BASE + 0x00000134)
#define RKISP1_CIF_ISP_AWB_GAIN_G_V12 (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12 (RKISP1_CIF_ISP_BASE + 0x0000013c)
#define RKISP1_CIF_ISP_REGION_LINE_V12 (RKISP1_CIF_ISP_BASE + 0x00000140)
#define RKISP1_CIF_ISP_WP_CNT_REGION0_V12 (RKISP1_CIF_ISP_BASE + 0x00000160)
#define RKISP1_CIF_ISP_WP_CNT_REGION1_V12 (RKISP1_CIF_ISP_BASE + 0x00000164)
#define RKISP1_CIF_ISP_WP_CNT_REGION2_V12 (RKISP1_CIF_ISP_BASE + 0x00000168)
-#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12 (RKISP1_CIF_ISP_BASE + 0x0000016C)
+#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12 (RKISP1_CIF_ISP_BASE + 0x0000016c)
#define RKISP1_CIF_ISP_CC_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x00000170)
#define RKISP1_CIF_ISP_CC_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x00000174)
#define RKISP1_CIF_ISP_CC_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x00000178)
-#define RKISP1_CIF_ISP_CC_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x0000017C)
+#define RKISP1_CIF_ISP_CC_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x0000017c)
#define RKISP1_CIF_ISP_CC_COEFF_4 (RKISP1_CIF_ISP_BASE + 0x00000180)
#define RKISP1_CIF_ISP_CC_COEFF_5 (RKISP1_CIF_ISP_BASE + 0x00000184)
#define RKISP1_CIF_ISP_CC_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x00000188)
-#define RKISP1_CIF_ISP_CC_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x0000018C)
+#define RKISP1_CIF_ISP_CC_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x0000018c)
#define RKISP1_CIF_ISP_CC_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x00000190)
#define RKISP1_CIF_ISP_OUT_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000194)
#define RKISP1_CIF_ISP_OUT_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000198)
-#define RKISP1_CIF_ISP_OUT_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000019C)
-#define RKISP1_CIF_ISP_OUT_V_SIZE (RKISP1_CIF_ISP_BASE + 0x000001A0)
-#define RKISP1_CIF_ISP_DEMOSAIC (RKISP1_CIF_ISP_BASE + 0x000001A4)
-#define RKISP1_CIF_ISP_FLAGS_SHD (RKISP1_CIF_ISP_BASE + 0x000001A8)
-#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001AC)
-#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001B0)
-#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001B4)
-#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001B8)
-#define RKISP1_CIF_ISP_IMSC (RKISP1_CIF_ISP_BASE + 0x000001BC)
-#define RKISP1_CIF_ISP_RIS (RKISP1_CIF_ISP_BASE + 0x000001C0)
-#define RKISP1_CIF_ISP_MIS (RKISP1_CIF_ISP_BASE + 0x000001C4)
-#define RKISP1_CIF_ISP_ICR (RKISP1_CIF_ISP_BASE + 0x000001C8)
-#define RKISP1_CIF_ISP_ISR (RKISP1_CIF_ISP_BASE + 0x000001CC)
-#define RKISP1_CIF_ISP_CT_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x000001D0)
-#define RKISP1_CIF_ISP_CT_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x000001D4)
-#define RKISP1_CIF_ISP_CT_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x000001D8)
-#define RKISP1_CIF_ISP_CT_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x000001DC)
-#define RKISP1_CIF_ISP_CT_COEFF_4 (RKISP1_CIF_ISP_BASE + 0x000001E0)
-#define RKISP1_CIF_ISP_CT_COEFF_5 (RKISP1_CIF_ISP_BASE + 0x000001E4)
-#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001E8)
-#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001EC)
-#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001F0)
-#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10 (RKISP1_CIF_ISP_BASE + 0x000001F4)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 (RKISP1_CIF_ISP_BASE + 0x000001F8)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10 (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_OUT_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000019c)
+#define RKISP1_CIF_ISP_OUT_V_SIZE (RKISP1_CIF_ISP_BASE + 0x000001a0)
+#define RKISP1_CIF_ISP_DEMOSAIC (RKISP1_CIF_ISP_BASE + 0x000001a4)
+#define RKISP1_CIF_ISP_FLAGS_SHD (RKISP1_CIF_ISP_BASE + 0x000001a8)
+#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001ac)
+#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001b0)
+#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001b4)
+#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001b8)
+#define RKISP1_CIF_ISP_IMSC (RKISP1_CIF_ISP_BASE + 0x000001bc)
+#define RKISP1_CIF_ISP_RIS (RKISP1_CIF_ISP_BASE + 0x000001c0)
+#define RKISP1_CIF_ISP_MIS (RKISP1_CIF_ISP_BASE + 0x000001c4)
+#define RKISP1_CIF_ISP_ICR (RKISP1_CIF_ISP_BASE + 0x000001c8)
+#define RKISP1_CIF_ISP_ISR (RKISP1_CIF_ISP_BASE + 0x000001cc)
+#define RKISP1_CIF_ISP_CT_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x000001d0)
+#define RKISP1_CIF_ISP_CT_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x000001d4)
+#define RKISP1_CIF_ISP_CT_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x000001d8)
+#define RKISP1_CIF_ISP_CT_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x000001dc)
+#define RKISP1_CIF_ISP_CT_COEFF_4 (RKISP1_CIF_ISP_BASE + 0x000001e0)
+#define RKISP1_CIF_ISP_CT_COEFF_5 (RKISP1_CIF_ISP_BASE + 0x000001e4)
+#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001e8)
+#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001ec)
+#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001f0)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10 (RKISP1_CIF_ISP_BASE + 0x000001f4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 (RKISP1_CIF_ISP_BASE + 0x000001f8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10 (RKISP1_CIF_ISP_BASE + 0x000001fc)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2_V10 (RKISP1_CIF_ISP_BASE + 0x00000200)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3_V10 (RKISP1_CIF_ISP_BASE + 0x00000204)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4_V10 (RKISP1_CIF_ISP_BASE + 0x00000208)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10 (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10 (RKISP1_CIF_ISP_BASE + 0x0000020c)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6_V10 (RKISP1_CIF_ISP_BASE + 0x00000210)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7_V10 (RKISP1_CIF_ISP_BASE + 0x00000214)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8_V10 (RKISP1_CIF_ISP_BASE + 0x00000218)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10 (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10 (RKISP1_CIF_ISP_BASE + 0x0000021c)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10_V10 (RKISP1_CIF_ISP_BASE + 0x00000220)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11_V10 (RKISP1_CIF_ISP_BASE + 0x00000224)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12_V10 (RKISP1_CIF_ISP_BASE + 0x00000228)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10 (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10 (RKISP1_CIF_ISP_BASE + 0x0000022c)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14_V10 (RKISP1_CIF_ISP_BASE + 0x00000230)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15_V10 (RKISP1_CIF_ISP_BASE + 0x00000234)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16_V10 (RKISP1_CIF_ISP_BASE + 0x00000238)
-#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023C)
+#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023c)
#define RKISP1_CIF_ISP_ERR_CLR (RKISP1_CIF_ISP_BASE + 0x00000240)
#define RKISP1_CIF_ISP_FRAME_COUNT (RKISP1_CIF_ISP_BASE + 0x00000244)
#define RKISP1_CIF_ISP_CT_OFFSET_R (RKISP1_CIF_ISP_BASE + 0x00000248)
-#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024C)
+#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024c)
#define RKISP1_CIF_ISP_CT_OFFSET_B (RKISP1_CIF_ISP_BASE + 0x00000250)
#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12 (RKISP1_CIF_ISP_BASE + 0x00000300)
#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 (RKISP1_CIF_ISP_BASE + 0x00000304)
@@ -865,7 +865,7 @@
#define RKISP1_CIF_ISP_FLASH_CMD (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
#define RKISP1_CIF_ISP_FLASH_CONFIG (RKISP1_CIF_ISP_FLASH_BASE + 0x00000004)
#define RKISP1_CIF_ISP_FLASH_PREDIV (RKISP1_CIF_ISP_FLASH_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_FLASH_DELAY (RKISP1_CIF_ISP_FLASH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_FLASH_DELAY (RKISP1_CIF_ISP_FLASH_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_FLASH_TIME (RKISP1_CIF_ISP_FLASH_BASE + 0x00000010)
#define RKISP1_CIF_ISP_FLASH_MAXP (RKISP1_CIF_ISP_FLASH_BASE + 0x00000014)
@@ -873,56 +873,56 @@
#define RKISP1_CIF_ISP_SH_CTRL (RKISP1_CIF_ISP_SH_BASE + 0x00000000)
#define RKISP1_CIF_ISP_SH_PREDIV (RKISP1_CIF_ISP_SH_BASE + 0x00000004)
#define RKISP1_CIF_ISP_SH_DELAY (RKISP1_CIF_ISP_SH_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_SH_TIME (RKISP1_CIF_ISP_SH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_SH_TIME (RKISP1_CIF_ISP_SH_BASE + 0x0000000c)
#define RKISP1_CIF_C_PROC_BASE 0x00000800
#define RKISP1_CIF_C_PROC_CTRL (RKISP1_CIF_C_PROC_BASE + 0x00000000)
#define RKISP1_CIF_C_PROC_CONTRAST (RKISP1_CIF_C_PROC_BASE + 0x00000004)
#define RKISP1_CIF_C_PROC_BRIGHTNESS (RKISP1_CIF_C_PROC_BASE + 0x00000008)
-#define RKISP1_CIF_C_PROC_SATURATION (RKISP1_CIF_C_PROC_BASE + 0x0000000C)
+#define RKISP1_CIF_C_PROC_SATURATION (RKISP1_CIF_C_PROC_BASE + 0x0000000c)
#define RKISP1_CIF_C_PROC_HUE (RKISP1_CIF_C_PROC_BASE + 0x00000010)
#define RKISP1_CIF_DUAL_CROP_BASE 0x00000880
#define RKISP1_CIF_DUAL_CROP_CTRL (RKISP1_CIF_DUAL_CROP_BASE + 0x00000000)
#define RKISP1_CIF_DUAL_CROP_M_H_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000004)
#define RKISP1_CIF_DUAL_CROP_M_V_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000008)
-#define RKISP1_CIF_DUAL_CROP_M_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000000C)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000000c)
#define RKISP1_CIF_DUAL_CROP_M_V_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x00000010)
#define RKISP1_CIF_DUAL_CROP_S_H_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000014)
#define RKISP1_CIF_DUAL_CROP_S_V_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000018)
-#define RKISP1_CIF_DUAL_CROP_S_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000001C)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000001c)
#define RKISP1_CIF_DUAL_CROP_S_V_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x00000020)
#define RKISP1_CIF_DUAL_CROP_M_H_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000024)
#define RKISP1_CIF_DUAL_CROP_M_V_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000028)
-#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000002C)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000002c)
#define RKISP1_CIF_DUAL_CROP_M_V_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000030)
#define RKISP1_CIF_DUAL_CROP_S_H_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000034)
#define RKISP1_CIF_DUAL_CROP_S_V_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000038)
-#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000003C)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000003c)
#define RKISP1_CIF_DUAL_CROP_S_V_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000040)
-#define RKISP1_CIF_MRSZ_BASE 0x00000C00
+#define RKISP1_CIF_MRSZ_BASE 0x00000c00
#define RKISP1_CIF_SRSZ_BASE 0x00001000
#define RKISP1_CIF_RSZ_CTRL 0x0000
#define RKISP1_CIF_RSZ_SCALE_HY 0x0004
#define RKISP1_CIF_RSZ_SCALE_HCB 0x0008
-#define RKISP1_CIF_RSZ_SCALE_HCR 0x000C
+#define RKISP1_CIF_RSZ_SCALE_HCR 0x000c
#define RKISP1_CIF_RSZ_SCALE_VY 0x0010
#define RKISP1_CIF_RSZ_SCALE_VC 0x0014
#define RKISP1_CIF_RSZ_PHASE_HY 0x0018
-#define RKISP1_CIF_RSZ_PHASE_HC 0x001C
+#define RKISP1_CIF_RSZ_PHASE_HC 0x001c
#define RKISP1_CIF_RSZ_PHASE_VY 0x0020
#define RKISP1_CIF_RSZ_PHASE_VC 0x0024
#define RKISP1_CIF_RSZ_SCALE_LUT_ADDR 0x0028
-#define RKISP1_CIF_RSZ_SCALE_LUT 0x002C
+#define RKISP1_CIF_RSZ_SCALE_LUT 0x002c
#define RKISP1_CIF_RSZ_CTRL_SHD 0x0030
#define RKISP1_CIF_RSZ_SCALE_HY_SHD 0x0034
#define RKISP1_CIF_RSZ_SCALE_HCB_SHD 0x0038
-#define RKISP1_CIF_RSZ_SCALE_HCR_SHD 0x003C
+#define RKISP1_CIF_RSZ_SCALE_HCR_SHD 0x003c
#define RKISP1_CIF_RSZ_SCALE_VY_SHD 0x0040
#define RKISP1_CIF_RSZ_SCALE_VC_SHD 0x0044
#define RKISP1_CIF_RSZ_PHASE_HY_SHD 0x0048
-#define RKISP1_CIF_RSZ_PHASE_HC_SHD 0x004C
+#define RKISP1_CIF_RSZ_PHASE_HC_SHD 0x004c
#define RKISP1_CIF_RSZ_PHASE_VY_SHD 0x0050
#define RKISP1_CIF_RSZ_PHASE_VC_SHD 0x0054
@@ -930,89 +930,89 @@
#define RKISP1_CIF_MI_CTRL (RKISP1_CIF_MI_BASE + 0x00000000)
#define RKISP1_CIF_MI_INIT (RKISP1_CIF_MI_BASE + 0x00000004)
#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000008)
-#define RKISP1_CIF_MI_MP_Y_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x0000000C)
+#define RKISP1_CIF_MI_MP_Y_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x0000000c)
#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000010)
#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000014)
#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_INIT (RKISP1_CIF_MI_BASE + 0x00000018)
-#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000001C)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000001c)
#define RKISP1_CIF_MI_MP_CB_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000020)
#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000024)
#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000028)
-#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000002C)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000002c)
#define RKISP1_CIF_MI_MP_CR_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000030)
#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000034)
#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000038)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000003C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000003c)
#define RKISP1_CIF_MI_SP_Y_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000040)
#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000044)
#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000048)
-#define RKISP1_CIF_MI_SP_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x0000004C)
+#define RKISP1_CIF_MI_SP_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x0000004c)
#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000050)
#define RKISP1_CIF_MI_SP_CB_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000054)
#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000058)
-#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000005C)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000005c)
#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000060)
#define RKISP1_CIF_MI_SP_CR_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000064)
#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000068)
-#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000006C)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000006c)
#define RKISP1_CIF_MI_BYTE_CNT (RKISP1_CIF_MI_BASE + 0x00000070)
#define RKISP1_CIF_MI_CTRL_SHD (RKISP1_CIF_MI_BASE + 0x00000074)
#define RKISP1_CIF_MI_MP_Y_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000078)
-#define RKISP1_CIF_MI_MP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000007C)
+#define RKISP1_CIF_MI_MP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000007c)
#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x00000080)
#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_SHD (RKISP1_CIF_MI_BASE + 0x00000084)
#define RKISP1_CIF_MI_MP_CB_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000088)
-#define RKISP1_CIF_MI_MP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000008C)
+#define RKISP1_CIF_MI_MP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000008c)
#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x00000090)
#define RKISP1_CIF_MI_MP_CR_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000094)
#define RKISP1_CIF_MI_MP_CR_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x00000098)
-#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x0000009C)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000A0)
-#define RKISP1_CIF_MI_SP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000A4)
-#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000A8)
-#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000B0)
-#define RKISP1_CIF_MI_SP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000B4)
-#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000B8)
-#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000BC)
-#define RKISP1_CIF_MI_SP_CR_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000C0)
-#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000C4)
-#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000C8)
-#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x000000CC)
-#define RKISP1_CIF_MI_DMA_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x000000D0)
-#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x000000D4)
-#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000D8)
-#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000E8)
-#define RKISP1_CIF_MI_IMSC (RKISP1_CIF_MI_BASE + 0x000000F8)
-#define RKISP1_CIF_MI_RIS (RKISP1_CIF_MI_BASE + 0x000000FC)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x0000009c)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000a0)
+#define RKISP1_CIF_MI_SP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000a4)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000a8)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000b0)
+#define RKISP1_CIF_MI_SP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000b4)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000b8)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000bc)
+#define RKISP1_CIF_MI_SP_CR_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000c0)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000c4)
+#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000c8)
+#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x000000cc)
+#define RKISP1_CIF_MI_DMA_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x000000d0)
+#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x000000d4)
+#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000d8)
+#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000e8)
+#define RKISP1_CIF_MI_IMSC (RKISP1_CIF_MI_BASE + 0x000000f8)
+#define RKISP1_CIF_MI_RIS (RKISP1_CIF_MI_BASE + 0x000000fc)
#define RKISP1_CIF_MI_MIS (RKISP1_CIF_MI_BASE + 0x00000100)
#define RKISP1_CIF_MI_ICR (RKISP1_CIF_MI_BASE + 0x00000104)
#define RKISP1_CIF_MI_ISR (RKISP1_CIF_MI_BASE + 0x00000108)
-#define RKISP1_CIF_MI_STATUS (RKISP1_CIF_MI_BASE + 0x0000010C)
+#define RKISP1_CIF_MI_STATUS (RKISP1_CIF_MI_BASE + 0x0000010c)
#define RKISP1_CIF_MI_STATUS_CLR (RKISP1_CIF_MI_BASE + 0x00000110)
#define RKISP1_CIF_MI_SP_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x00000114)
#define RKISP1_CIF_MI_SP_Y_PIC_HEIGHT (RKISP1_CIF_MI_BASE + 0x00000118)
-#define RKISP1_CIF_MI_SP_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x0000011C)
+#define RKISP1_CIF_MI_SP_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x0000011c)
#define RKISP1_CIF_MI_DMA_CTRL (RKISP1_CIF_MI_BASE + 0x00000120)
#define RKISP1_CIF_MI_DMA_START (RKISP1_CIF_MI_BASE + 0x00000124)
#define RKISP1_CIF_MI_DMA_STATUS (RKISP1_CIF_MI_BASE + 0x00000128)
-#define RKISP1_CIF_MI_PIXEL_COUNT (RKISP1_CIF_MI_BASE + 0x0000012C)
+#define RKISP1_CIF_MI_PIXEL_COUNT (RKISP1_CIF_MI_BASE + 0x0000012c)
#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000130)
#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000134)
#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000138)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x0000013C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x0000013c)
#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000140)
#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000144)
#define RKISP1_CIF_MI_XTD_FORMAT_CTRL (RKISP1_CIF_MI_BASE + 0x00000148)
-#define RKISP1_CIF_SMIA_BASE 0x00001A00
+#define RKISP1_CIF_SMIA_BASE 0x00001a00
#define RKISP1_CIF_SMIA_CTRL (RKISP1_CIF_SMIA_BASE + 0x00000000)
#define RKISP1_CIF_SMIA_STATUS (RKISP1_CIF_SMIA_BASE + 0x00000004)
#define RKISP1_CIF_SMIA_IMSC (RKISP1_CIF_SMIA_BASE + 0x00000008)
-#define RKISP1_CIF_SMIA_RIS (RKISP1_CIF_SMIA_BASE + 0x0000000C)
+#define RKISP1_CIF_SMIA_RIS (RKISP1_CIF_SMIA_BASE + 0x0000000c)
#define RKISP1_CIF_SMIA_MIS (RKISP1_CIF_SMIA_BASE + 0x00000010)
#define RKISP1_CIF_SMIA_ICR (RKISP1_CIF_SMIA_BASE + 0x00000014)
#define RKISP1_CIF_SMIA_ISR (RKISP1_CIF_SMIA_BASE + 0x00000018)
-#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL (RKISP1_CIF_SMIA_BASE + 0x0000001C)
+#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL (RKISP1_CIF_SMIA_BASE + 0x0000001c)
#define RKISP1_CIF_SMIA_SOF_EMB_DATA_LINES (RKISP1_CIF_SMIA_BASE + 0x00000020)
#define RKISP1_CIF_SMIA_EMB_HSTART (RKISP1_CIF_SMIA_BASE + 0x00000024)
#define RKISP1_CIF_SMIA_EMB_HSIZE (RKISP1_CIF_SMIA_BASE + 0x00000028)
@@ -1021,27 +1021,27 @@
#define RKISP1_CIF_SMIA_EMB_DATA_FIFO (RKISP1_CIF_SMIA_BASE + 0x00000034)
#define RKISP1_CIF_SMIA_EMB_DATA_WATERMARK (RKISP1_CIF_SMIA_BASE + 0x00000038)
-#define RKISP1_CIF_MIPI_BASE 0x00001C00
+#define RKISP1_CIF_MIPI_BASE 0x00001c00
#define RKISP1_CIF_MIPI_CTRL (RKISP1_CIF_MIPI_BASE + 0x00000000)
#define RKISP1_CIF_MIPI_STATUS (RKISP1_CIF_MIPI_BASE + 0x00000004)
#define RKISP1_CIF_MIPI_IMSC (RKISP1_CIF_MIPI_BASE + 0x00000008)
-#define RKISP1_CIF_MIPI_RIS (RKISP1_CIF_MIPI_BASE + 0x0000000C)
+#define RKISP1_CIF_MIPI_RIS (RKISP1_CIF_MIPI_BASE + 0x0000000c)
#define RKISP1_CIF_MIPI_MIS (RKISP1_CIF_MIPI_BASE + 0x00000010)
#define RKISP1_CIF_MIPI_ICR (RKISP1_CIF_MIPI_BASE + 0x00000014)
#define RKISP1_CIF_MIPI_ISR (RKISP1_CIF_MIPI_BASE + 0x00000018)
-#define RKISP1_CIF_MIPI_CUR_DATA_ID (RKISP1_CIF_MIPI_BASE + 0x0000001C)
+#define RKISP1_CIF_MIPI_CUR_DATA_ID (RKISP1_CIF_MIPI_BASE + 0x0000001c)
#define RKISP1_CIF_MIPI_IMG_DATA_SEL (RKISP1_CIF_MIPI_BASE + 0x00000020)
#define RKISP1_CIF_MIPI_ADD_DATA_SEL_1 (RKISP1_CIF_MIPI_BASE + 0x00000024)
#define RKISP1_CIF_MIPI_ADD_DATA_SEL_2 (RKISP1_CIF_MIPI_BASE + 0x00000028)
-#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3 (RKISP1_CIF_MIPI_BASE + 0x0000002C)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3 (RKISP1_CIF_MIPI_BASE + 0x0000002c)
#define RKISP1_CIF_MIPI_ADD_DATA_SEL_4 (RKISP1_CIF_MIPI_BASE + 0x00000030)
#define RKISP1_CIF_MIPI_ADD_DATA_FIFO (RKISP1_CIF_MIPI_BASE + 0x00000034)
#define RKISP1_CIF_MIPI_FIFO_FILL_LEVEL (RKISP1_CIF_MIPI_BASE + 0x00000038)
-#define RKISP1_CIF_MIPI_COMPRESSED_MODE (RKISP1_CIF_MIPI_BASE + 0x0000003C)
+#define RKISP1_CIF_MIPI_COMPRESSED_MODE (RKISP1_CIF_MIPI_BASE + 0x0000003c)
#define RKISP1_CIF_MIPI_FRAME (RKISP1_CIF_MIPI_BASE + 0x00000040)
#define RKISP1_CIF_MIPI_GEN_SHORT_DT (RKISP1_CIF_MIPI_BASE + 0x00000044)
#define RKISP1_CIF_MIPI_GEN_SHORT_8_9 (RKISP1_CIF_MIPI_BASE + 0x00000048)
-#define RKISP1_CIF_MIPI_GEN_SHORT_A_B (RKISP1_CIF_MIPI_BASE + 0x0000004C)
+#define RKISP1_CIF_MIPI_GEN_SHORT_A_B (RKISP1_CIF_MIPI_BASE + 0x0000004c)
#define RKISP1_CIF_MIPI_GEN_SHORT_C_D (RKISP1_CIF_MIPI_BASE + 0x00000050)
#define RKISP1_CIF_MIPI_GEN_SHORT_E_F (RKISP1_CIF_MIPI_BASE + 0x00000054)
@@ -1049,15 +1049,15 @@
#define RKISP1_CIF_ISP_AFM_CTRL (RKISP1_CIF_ISP_AFM_BASE + 0x00000000)
#define RKISP1_CIF_ISP_AFM_LT_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000004)
#define RKISP1_CIF_ISP_AFM_RB_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_AFM_LT_B (RKISP1_CIF_ISP_AFM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_AFM_LT_B (RKISP1_CIF_ISP_AFM_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_AFM_RB_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000010)
#define RKISP1_CIF_ISP_AFM_LT_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000014)
#define RKISP1_CIF_ISP_AFM_RB_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_AFM_THRES (RKISP1_CIF_ISP_AFM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_AFM_THRES (RKISP1_CIF_ISP_AFM_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_AFM_VAR_SHIFT (RKISP1_CIF_ISP_AFM_BASE + 0x00000020)
#define RKISP1_CIF_ISP_AFM_SUM_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000024)
#define RKISP1_CIF_ISP_AFM_SUM_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_AFM_SUM_C (RKISP1_CIF_ISP_AFM_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_AFM_SUM_C (RKISP1_CIF_ISP_AFM_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_AFM_LUM_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000030)
#define RKISP1_CIF_ISP_AFM_LUM_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000034)
#define RKISP1_CIF_ISP_AFM_LUM_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000038)
@@ -1066,11 +1066,11 @@
#define RKISP1_CIF_ISP_LSC_CTRL (RKISP1_CIF_ISP_LSC_BASE + 0x00000000)
#define RKISP1_CIF_ISP_LSC_R_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000004)
#define RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000010)
#define RKISP1_CIF_ISP_LSC_R_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000014)
#define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000020)
#define RKISP1_CIF_ISP_LSC_XGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000024 + (n) * 4)
#define RKISP1_CIF_ISP_LSC_YGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000034 + (n) * 4)
@@ -1083,46 +1083,46 @@
#define RKISP1_CIF_ISP_IS_CTRL (RKISP1_CIF_ISP_IS_BASE + 0x00000000)
#define RKISP1_CIF_ISP_IS_RECENTER (RKISP1_CIF_ISP_IS_BASE + 0x00000004)
#define RKISP1_CIF_ISP_IS_H_OFFS (RKISP1_CIF_ISP_IS_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_IS_V_OFFS (RKISP1_CIF_ISP_IS_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_IS_V_OFFS (RKISP1_CIF_ISP_IS_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_IS_H_SIZE (RKISP1_CIF_ISP_IS_BASE + 0x00000010)
#define RKISP1_CIF_ISP_IS_V_SIZE (RKISP1_CIF_ISP_IS_BASE + 0x00000014)
#define RKISP1_CIF_ISP_IS_MAX_DX (RKISP1_CIF_ISP_IS_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_IS_MAX_DY (RKISP1_CIF_ISP_IS_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_IS_MAX_DY (RKISP1_CIF_ISP_IS_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_IS_DISPLACE (RKISP1_CIF_ISP_IS_BASE + 0x00000020)
#define RKISP1_CIF_ISP_IS_H_OFFS_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000024)
#define RKISP1_CIF_ISP_IS_V_OFFS_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_IS_V_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
#define RKISP1_CIF_ISP_HIST_BASE_V10 0x00002400
#define RKISP1_CIF_ISP_HIST_PROP_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000000)
#define RKISP1_CIF_ISP_HIST_H_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000004)
#define RKISP1_CIF_ISP_HIST_V_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_H_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_H_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000c)
#define RKISP1_CIF_ISP_HIST_V_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000010)
#define RKISP1_CIF_ISP_HIST_BIN_0_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000014)
#define RKISP1_CIF_ISP_HIST_BIN_1_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000018)
-#define RKISP1_CIF_ISP_HIST_BIN_2_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_2_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001c)
#define RKISP1_CIF_ISP_HIST_BIN_3_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000020)
#define RKISP1_CIF_ISP_HIST_BIN_4_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000024)
#define RKISP1_CIF_ISP_HIST_BIN_5_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000028)
-#define RKISP1_CIF_ISP_HIST_BIN_6_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_6_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002c)
#define RKISP1_CIF_ISP_HIST_BIN_7_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000030)
#define RKISP1_CIF_ISP_HIST_BIN_8_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000034)
#define RKISP1_CIF_ISP_HIST_BIN_9_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000038)
-#define RKISP1_CIF_ISP_HIST_BIN_10_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_10_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003c)
#define RKISP1_CIF_ISP_HIST_BIN_11_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000040)
#define RKISP1_CIF_ISP_HIST_BIN_12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000044)
#define RKISP1_CIF_ISP_HIST_BIN_13_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000048)
-#define RKISP1_CIF_ISP_HIST_BIN_14_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_14_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004c)
#define RKISP1_CIF_ISP_HIST_BIN_15_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000050)
#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000054)
#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000058)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005c)
#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000060)
#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000064)
#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000068)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006c)
#define RKISP1_CIF_ISP_FILT_BASE 0x00002500
#define RKISP1_CIF_ISP_FILT_MODE (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
@@ -1135,13 +1135,13 @@
#define RKISP1_CIF_ISP_FILT_FAC_SH0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000040)
#define RKISP1_CIF_ISP_FILT_FAC_MID (RKISP1_CIF_ISP_FILT_BASE + 0x00000044)
#define RKISP1_CIF_ISP_FILT_FAC_BL0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_FILT_FAC_BL1 (RKISP1_CIF_ISP_FILT_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_FILT_FAC_BL1 (RKISP1_CIF_ISP_FILT_BASE + 0x0000004c)
#define RKISP1_CIF_ISP_CAC_BASE 0x00002580
#define RKISP1_CIF_ISP_CAC_CTRL (RKISP1_CIF_ISP_CAC_BASE + 0x00000000)
#define RKISP1_CIF_ISP_CAC_COUNT_START (RKISP1_CIF_ISP_CAC_BASE + 0x00000004)
#define RKISP1_CIF_ISP_CAC_A (RKISP1_CIF_ISP_CAC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_CAC_B (RKISP1_CIF_ISP_CAC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_CAC_B (RKISP1_CIF_ISP_CAC_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_CAC_C (RKISP1_CIF_ISP_CAC_BASE + 0x00000010)
#define RKISP1_CIF_ISP_X_NORM (RKISP1_CIF_ISP_CAC_BASE + 0x00000014)
#define RKISP1_CIF_ISP_Y_NORM (RKISP1_CIF_ISP_CAC_BASE + 0x00000018)
@@ -1150,7 +1150,7 @@
#define RKISP1_CIF_ISP_EXP_CTRL (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
#define RKISP1_CIF_ISP_EXP_H_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
#define RKISP1_CIF_ISP_EXP_V_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_EXP_H_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_H_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_EXP_V_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
#define RKISP1_CIF_ISP_EXP_MEAN_00_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
#define RKISP1_CIF_ISP_EXP_MEAN_10_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
@@ -1205,160 +1205,160 @@
#define RKISP1_CIF_ISP_DPF_MODE (RKISP1_CIF_ISP_DPF_BASE + 0x00000000)
#define RKISP1_CIF_ISP_DPF_STRENGTH_R (RKISP1_CIF_ISP_DPF_BASE + 0x00000004)
#define RKISP1_CIF_ISP_DPF_STRENGTH_G (RKISP1_CIF_ISP_DPF_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_DPF_STRENGTH_B (RKISP1_CIF_ISP_DPF_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_B (RKISP1_CIF_ISP_DPF_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000010)
#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6 (RKISP1_CIF_ISP_DPF_BASE + 0x00000014)
#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6 (RKISP1_CIF_ISP_DPF_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6 (RKISP1_CIF_ISP_DPF_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_0 (RKISP1_CIF_ISP_DPF_BASE + 0x00000020)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_1 (RKISP1_CIF_ISP_DPF_BASE + 0x00000024)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_2 (RKISP1_CIF_ISP_DPF_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3 (RKISP1_CIF_ISP_DPF_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3 (RKISP1_CIF_ISP_DPF_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000030)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_5 (RKISP1_CIF_ISP_DPF_BASE + 0x00000034)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_6 (RKISP1_CIF_ISP_DPF_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7 (RKISP1_CIF_ISP_DPF_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7 (RKISP1_CIF_ISP_DPF_BASE + 0x0000003c)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_8 (RKISP1_CIF_ISP_DPF_BASE + 0x00000040)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_9 (RKISP1_CIF_ISP_DPF_BASE + 0x00000044)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_10 (RKISP1_CIF_ISP_DPF_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11 (RKISP1_CIF_ISP_DPF_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11 (RKISP1_CIF_ISP_DPF_BASE + 0x0000004c)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_12 (RKISP1_CIF_ISP_DPF_BASE + 0x00000050)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_13 (RKISP1_CIF_ISP_DPF_BASE + 0x00000054)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_14 (RKISP1_CIF_ISP_DPF_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15 (RKISP1_CIF_ISP_DPF_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15 (RKISP1_CIF_ISP_DPF_BASE + 0x0000005c)
#define RKISP1_CIF_ISP_DPF_NULL_COEFF_16 (RKISP1_CIF_ISP_DPF_BASE + 0x00000060)
#define RKISP1_CIF_ISP_DPF_NF_GAIN_R (RKISP1_CIF_ISP_DPF_BASE + 0x00000064)
#define RKISP1_CIF_ISP_DPF_NF_GAIN_GR (RKISP1_CIF_ISP_DPF_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB (RKISP1_CIF_ISP_DPF_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB (RKISP1_CIF_ISP_DPF_BASE + 0x0000006c)
#define RKISP1_CIF_ISP_DPF_NF_GAIN_B (RKISP1_CIF_ISP_DPF_BASE + 0x00000070)
#define RKISP1_CIF_ISP_DPCC_BASE 0x00002900
#define RKISP1_CIF_ISP_DPCC_MODE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000000)
#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000004)
#define RKISP1_CIF_ISP_DPCC_SET_USE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000010)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000014)
#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_DPCC_PG_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000020)
#define RKISP1_CIF_ISP_DPCC_RND_THRESH_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000024)
#define RKISP1_CIF_ISP_DPCC_RG_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000030)
#define RKISP1_CIF_ISP_DPCC_PG_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000034)
#define RKISP1_CIF_ISP_DPCC_RND_THRESH_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_DPCC_RG_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000003c)
#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000040)
#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000044)
#define RKISP1_CIF_ISP_DPCC_PG_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000004c)
#define RKISP1_CIF_ISP_DPCC_RG_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000050)
#define RKISP1_CIF_ISP_DPCC_RO_LIMITS (RKISP1_CIF_ISP_DPCC_BASE + 0x00000054)
#define RKISP1_CIF_ISP_DPCC_RND_OFFS (RKISP1_CIF_ISP_DPCC_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_DPCC_BPT_CTRL (RKISP1_CIF_ISP_DPCC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPCC_BPT_CTRL (RKISP1_CIF_ISP_DPCC_BASE + 0x0000005c)
#define RKISP1_CIF_ISP_DPCC_BPT_NUMBER (RKISP1_CIF_ISP_DPCC_BASE + 0x00000060)
#define RKISP1_CIF_ISP_DPCC_BPT_ADDR (RKISP1_CIF_ISP_DPCC_BASE + 0x00000064)
#define RKISP1_CIF_ISP_DPCC_BPT_DATA (RKISP1_CIF_ISP_DPCC_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_WDR_BASE 0x00002A00
+#define RKISP1_CIF_ISP_WDR_BASE 0x00002a00
#define RKISP1_CIF_ISP_WDR_CTRL (RKISP1_CIF_ISP_WDR_BASE + 0x00000000)
#define RKISP1_CIF_ISP_WDR_TONECURVE_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000004)
#define RKISP1_CIF_ISP_WDR_TONECURVE_2 (RKISP1_CIF_ISP_WDR_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3 (RKISP1_CIF_ISP_WDR_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3 (RKISP1_CIF_ISP_WDR_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000010)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0 (RKISP1_CIF_ISP_WDR_BASE + 0x00000014)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2 (RKISP1_CIF_ISP_WDR_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2 (RKISP1_CIF_ISP_WDR_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3 (RKISP1_CIF_ISP_WDR_BASE + 0x00000020)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000024)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5 (RKISP1_CIF_ISP_WDR_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6 (RKISP1_CIF_ISP_WDR_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6 (RKISP1_CIF_ISP_WDR_BASE + 0x0000002c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7 (RKISP1_CIF_ISP_WDR_BASE + 0x00000030)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8 (RKISP1_CIF_ISP_WDR_BASE + 0x00000034)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9 (RKISP1_CIF_ISP_WDR_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10 (RKISP1_CIF_ISP_WDR_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10 (RKISP1_CIF_ISP_WDR_BASE + 0x0000003c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11 (RKISP1_CIF_ISP_WDR_BASE + 0x00000040)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12 (RKISP1_CIF_ISP_WDR_BASE + 0x00000044)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13 (RKISP1_CIF_ISP_WDR_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14 (RKISP1_CIF_ISP_WDR_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14 (RKISP1_CIF_ISP_WDR_BASE + 0x0000004c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15 (RKISP1_CIF_ISP_WDR_BASE + 0x00000050)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16 (RKISP1_CIF_ISP_WDR_BASE + 0x00000054)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17 (RKISP1_CIF_ISP_WDR_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18 (RKISP1_CIF_ISP_WDR_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18 (RKISP1_CIF_ISP_WDR_BASE + 0x0000005c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19 (RKISP1_CIF_ISP_WDR_BASE + 0x00000060)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20 (RKISP1_CIF_ISP_WDR_BASE + 0x00000064)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21 (RKISP1_CIF_ISP_WDR_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22 (RKISP1_CIF_ISP_WDR_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22 (RKISP1_CIF_ISP_WDR_BASE + 0x0000006c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23 (RKISP1_CIF_ISP_WDR_BASE + 0x00000070)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24 (RKISP1_CIF_ISP_WDR_BASE + 0x00000074)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25 (RKISP1_CIF_ISP_WDR_BASE + 0x00000078)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26 (RKISP1_CIF_ISP_WDR_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26 (RKISP1_CIF_ISP_WDR_BASE + 0x0000007c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27 (RKISP1_CIF_ISP_WDR_BASE + 0x00000080)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28 (RKISP1_CIF_ISP_WDR_BASE + 0x00000084)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29 (RKISP1_CIF_ISP_WDR_BASE + 0x00000088)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30 (RKISP1_CIF_ISP_WDR_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30 (RKISP1_CIF_ISP_WDR_BASE + 0x0000008c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31 (RKISP1_CIF_ISP_WDR_BASE + 0x00000090)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32 (RKISP1_CIF_ISP_WDR_BASE + 0x00000094)
#define RKISP1_CIF_ISP_WDR_OFFSET (RKISP1_CIF_ISP_WDR_BASE + 0x00000098)
-#define RKISP1_CIF_ISP_WDR_DELTAMIN (RKISP1_CIF_ISP_WDR_BASE + 0x0000009C)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000AC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000BC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000CC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000DC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000EC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000FC)
+#define RKISP1_CIF_ISP_WDR_DELTAMIN (RKISP1_CIF_ISP_WDR_BASE + 0x0000009c)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000a8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000ac)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000b8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000bc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000c8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000cc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000dc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000ec)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000fc)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000100)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000104)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000108)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000110)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000114)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000120)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000124)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012c)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
-#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002C00
+#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002c00
#define RKISP1_CIF_ISP_HIST_CTRL_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
#define RKISP1_CIF_ISP_HIST_SIZE_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000004)
#define RKISP1_CIF_ISP_HIST_OFFS_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_DBG1_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000C)
-#define RKISP1_CIF_ISP_HIST_DBG2_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001C)
-#define RKISP1_CIF_ISP_HIST_DBG3_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002C)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_DBG1_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000c)
+#define RKISP1_CIF_ISP_HIST_DBG2_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001c)
+#define RKISP1_CIF_ISP_HIST_DBG3_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002c)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003c)
#define RKISP1_CIF_ISP_HIST_BIN_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000120)
-#define RKISP1_CIF_ISP_VSM_BASE 0x00002F00
+#define RKISP1_CIF_ISP_VSM_BASE 0x00002f00
#define RKISP1_CIF_ISP_VSM_MODE (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
#define RKISP1_CIF_ISP_VSM_H_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
#define RKISP1_CIF_ISP_VSM_V_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_VSM_H_SIZE (RKISP1_CIF_ISP_VSM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_VSM_H_SIZE (RKISP1_CIF_ISP_VSM_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_VSM_V_SIZE (RKISP1_CIF_ISP_VSM_BASE + 0x00000010)
#define RKISP1_CIF_ISP_VSM_H_SEGMENTS (RKISP1_CIF_ISP_VSM_BASE + 0x00000014)
#define RKISP1_CIF_ISP_VSM_V_SEGMENTS (RKISP1_CIF_ISP_VSM_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index c15ae0218118..28ecc7347d54 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -60,7 +60,6 @@ struct rkisp1_rsz_config {
const int min_rsz_height;
/* registers */
struct {
- u32 ctrl;
u32 yuvmode_mask;
u32 rawmode_mask;
u32 h_offset;
@@ -78,7 +77,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
.dual_crop = {
- .ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
.rawmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_RAW,
.h_offset = RKISP1_CIF_DUAL_CROP_M_H_OFFS,
@@ -96,7 +94,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
.dual_crop = {
- .ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
.rawmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_RAW,
.h_offset = RKISP1_CIF_DUAL_CROP_S_H_OFFS,
@@ -117,34 +114,6 @@ static inline void rkisp1_rsz_write(struct rkisp1_resizer *rsz, u32 offset,
rkisp1_write(rsz->rkisp1, rsz->regs_base + offset, value);
}
-static struct v4l2_mbus_framefmt *
-rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct v4l2_subdev_state state = {
- .pads = rsz->pad_cfg,
- };
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&rsz->sd, sd_state, pad);
- else
- return v4l2_subdev_get_try_format(&rsz->sd, &state, pad);
-}
-
-static struct v4l2_rect *
-rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct v4l2_subdev_state state = {
- .pads = rsz->pad_cfg,
- };
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&rsz->sd, sd_state, pad);
- else
- return v4l2_subdev_get_try_crop(&rsz->sd, &state, pad);
-}
-
/* ----------------------------------------------------------------------------
* Dual crop hw configs
*/
@@ -152,7 +121,7 @@ rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
- u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl);
+ u32 dc_ctrl = rkisp1_read(rsz->rkisp1, RKISP1_CIF_DUAL_CROP_CTRL);
u32 mask = ~(rsz->config->dual_crop.yuvmode_mask |
rsz->config->dual_crop.rawmode_mask);
@@ -161,21 +130,22 @@ static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
else
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
- rkisp1_write(rsz->rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
+ rkisp1_write(rsz->rkisp1, RKISP1_CIF_DUAL_CROP_CTRL, dc_ctrl);
}
/* configure dual-crop unit */
-static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
+static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_state *sd_state)
{
struct rkisp1_device *rkisp1 = rsz->rkisp1;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
u32 dc_ctrl;
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
- V4L2_SUBDEV_FORMAT_ACTIVE);
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
- V4L2_SUBDEV_FORMAT_ACTIVE);
+ sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
if (sink_crop->width == sink_fmt->width &&
sink_crop->height == sink_fmt->height &&
@@ -185,14 +155,14 @@ static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
return;
}
- dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
+ dc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_DUAL_CROP_CTRL);
rkisp1_write(rkisp1, rsz->config->dual_crop.h_offset, sink_crop->left);
rkisp1_write(rkisp1, rsz->config->dual_crop.v_offset, sink_crop->top);
rkisp1_write(rkisp1, rsz->config->dual_crop.h_size, sink_crop->width);
rkisp1_write(rkisp1, rsz->config->dual_crop.v_size, sink_crop->height);
dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
- rkisp1_write(rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
+ rkisp1_write(rkisp1, RKISP1_CIF_DUAL_CROP_CTRL, dc_ctrl);
dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
sink_fmt->width, sink_fmt->height,
@@ -236,10 +206,10 @@ static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
- struct v4l2_rect *sink_y,
- struct v4l2_rect *sink_c,
- struct v4l2_rect *src_y,
- struct v4l2_rect *src_c,
+ const struct v4l2_rect *sink_y,
+ const struct v4l2_rect *sink_c,
+ const struct v4l2_area *src_y,
+ const struct v4l2_area *src_c,
enum rkisp1_shadow_regs_when when)
{
u32 ratio, rsz_ctrl = 0;
@@ -296,61 +266,63 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_state *sd_state,
enum rkisp1_shadow_regs_when when)
{
const struct rkisp1_rsz_yuv_mbus_info *sink_yuv_info, *src_yuv_info;
- struct v4l2_rect sink_y, sink_c, src_y, src_c;
- struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
- struct v4l2_rect *sink_crop;
+ const struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
+ const struct v4l2_rect *sink_y;
+ struct v4l2_area src_y, src_c;
+ struct v4l2_rect sink_c;
+
+ sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SRC);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
- V4L2_SUBDEV_FORMAT_ACTIVE);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
- V4L2_SUBDEV_FORMAT_ACTIVE);
- src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
- V4L2_SUBDEV_FORMAT_ACTIVE);
sink_yuv_info = rkisp1_rsz_get_yuv_mbus_info(sink_fmt->code);
+ src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
/*
- * The resizer only works on yuv formats,
- * so return if it is bayer format.
+ * The resizer only works on yuv formats, so return if it is bayer
+ * format.
*/
- if (rsz->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+ if (!sink_yuv_info) {
rkisp1_rsz_disable(rsz, when);
return;
}
- sink_y.width = sink_crop->width;
- sink_y.height = sink_crop->height;
+ sink_y = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ sink_c.width = sink_y->width / sink_yuv_info->hdiv;
+ sink_c.height = sink_y->height / sink_yuv_info->vdiv;
+
src_y.width = src_fmt->width;
src_y.height = src_fmt->height;
-
- sink_c.width = sink_y.width / sink_yuv_info->hdiv;
- sink_c.height = sink_y.height / sink_yuv_info->vdiv;
+ src_c.width = src_y.width / src_yuv_info->hdiv;
+ src_c.height = src_y.height / src_yuv_info->vdiv;
/*
* The resizer is used not only to change the dimensions of the frame
- * but also to change the scale for YUV formats,
- * (4:2:2 -> 4:2:0 for example). So the width/height of the CbCr
- * streams should be set according to the media bus format in the src pad.
+ * but also to change the subsampling for YUV formats (for instance
+ * converting from 4:2:2 to 4:2:0). Check both the luma and chroma
+ * dimensions to decide whether or not to enable the resizer.
*/
- src_c.width = src_y.width / src_yuv_info->hdiv;
- src_c.height = src_y.height / src_yuv_info->vdiv;
- if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
+ dev_dbg(rsz->rkisp1->dev,
+ "stream %u rsz/scale: Y %ux%u -> %ux%u, CbCr %ux%u -> %ux%u\n",
+ rsz->id, sink_y->width, sink_y->height,
+ src_fmt->width, src_fmt->height,
+ sink_c.width, sink_c.height, src_c.width, src_c.height);
+
+ if (sink_y->width == src_y.width && sink_y->height == src_y.height &&
+ sink_c.width == src_c.width && sink_c.height == src_c.height) {
rkisp1_rsz_disable(rsz, when);
return;
}
- dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d\n",
- rsz->id, sink_crop->width, sink_crop->height,
- src_fmt->width, src_fmt->height);
- dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d\n",
- sink_c.width, sink_c.height, src_c.width, src_c.height);
-
- /* set values in the hw */
- rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
+ /* Set values in the hardware. */
+ rkisp1_rsz_config_regs(rsz, sink_y, &sink_c, &src_y, &src_c, when);
}
/* ----------------------------------------------------------------------------
@@ -405,7 +377,7 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_RSZ_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -423,7 +395,7 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
RKISP1_RSZ_PAD_SINK);
*src_fmt = *sink_fmt;
@@ -434,16 +406,16 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
- struct v4l2_mbus_framefmt *format,
- unsigned int which)
+ struct v4l2_mbus_framefmt *format)
{
const struct rkisp1_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
- which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
- which);
+ sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SRC);
+
sink_mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
@@ -463,18 +435,16 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
- struct v4l2_rect *r,
- unsigned int which)
+ struct v4l2_rect *r)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
- which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
- RKISP1_RSZ_PAD_SINK,
- which);
+ sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
/* Not crop for MP bayer raw data */
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
@@ -501,21 +471,20 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
- struct v4l2_mbus_framefmt *format,
- unsigned int which)
+ struct v4l2_mbus_framefmt *format)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
bool is_yuv;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
- which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
- which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
- RKISP1_RSZ_PAD_SINK,
- which);
+ sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SRC);
+ sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+
if (rsz->id == RKISP1_SELFPATH)
sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
else
@@ -526,8 +495,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
sink_fmt->code = RKISP1_DEF_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
- if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
- rsz->pixel_enc = mbus_info->pixel_enc;
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
@@ -576,21 +543,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
src_fmt->quantization = sink_fmt->quantization;
/* Update sink crop */
- rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
-}
-
-static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct rkisp1_resizer *rsz =
- container_of(sd, struct rkisp1_resizer, sd);
-
- mutex_lock(&rsz->ops_lock);
- fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, sd_state, fmt->pad,
- fmt->which);
- mutex_unlock(&rsz->ops_lock);
- return 0;
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop);
}
static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
@@ -600,15 +553,11 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
- mutex_lock(&rsz->ops_lock);
if (fmt->pad == RKISP1_RSZ_PAD_SINK)
- rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format,
- fmt->which);
+ rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format);
else
- rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format,
- fmt->which);
+ rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format);
- mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -616,35 +565,32 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct rkisp1_resizer *rsz =
- container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_mbus_framefmt *mf_sink;
int ret = 0;
if (sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
- mutex_lock(&rsz->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- mf_sink = rkisp1_rsz_get_pad_fmt(rsz, sd_state,
- RKISP1_RSZ_PAD_SINK,
- sel->which);
+ mf_sink = v4l2_subdev_get_pad_format(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
sel->r.height = mf_sink->height;
sel->r.width = mf_sink->width;
sel->r.left = 0;
sel->r.top = 0;
break;
+
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_rsz_get_pad_crop(rsz, sd_state,
- RKISP1_RSZ_PAD_SINK,
- sel->which);
+ sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
break;
+
default:
ret = -EINVAL;
+ break;
}
- mutex_unlock(&rsz->ops_lock);
return ret;
}
@@ -661,9 +607,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
- mutex_lock(&rsz->ops_lock);
- rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r, sel->which);
- mutex_unlock(&rsz->ops_lock);
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r);
return 0;
}
@@ -677,7 +621,7 @@ static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
.get_selection = rkisp1_rsz_get_selection,
.set_selection = rkisp1_rsz_set_selection,
.init_cfg = rkisp1_rsz_init_config,
- .get_fmt = rkisp1_rsz_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_rsz_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
@@ -693,6 +637,7 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
struct rkisp1_device *rkisp1 = rsz->rkisp1;
struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
+ struct v4l2_subdev_state *sd_state;
if (!enable) {
rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
@@ -703,11 +648,13 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
if (other->is_streaming)
when = RKISP1_SHADOW_REGS_ASYNC;
- mutex_lock(&rsz->ops_lock);
- rkisp1_rsz_config(rsz, when);
- rkisp1_dcrop_config(rsz);
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ rkisp1_rsz_config(rsz, sd_state, when);
+ rkisp1_dcrop_config(rsz, sd_state);
+
+ v4l2_subdev_unlock_state(sd_state);
- mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -726,15 +673,12 @@ static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
return;
v4l2_device_unregister_subdev(&rsz->sd);
+ v4l2_subdev_cleanup(&rsz->sd);
media_entity_cleanup(&rsz->sd.entity);
- mutex_destroy(&rsz->ops_lock);
}
static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
{
- struct v4l2_subdev_state state = {
- .pads = rsz->pad_cfg,
- };
static const char * const dev_names[] = {
RKISP1_RSZ_MP_DEV_NAME,
RKISP1_RSZ_SP_DEV_NAME
@@ -763,25 +707,26 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
- rsz->pixel_enc = RKISP1_DEF_PIXEL_ENC;
-
- mutex_init(&rsz->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_RSZ_PAD_MAX, pads);
if (ret)
- goto error;
+ goto err_entity_cleanup;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register resizer subdev\n");
- goto error;
+ goto err_subdev_cleanup;
}
- rkisp1_rsz_init_config(sd, &state);
return 0;
-error:
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
media_entity_cleanup(&sd->entity);
- mutex_destroy(&rsz->ops_lock);
return ret;
}
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index 530a148fe4d3..a08c87ef6e2d 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
@@ -767,12 +767,32 @@ static void fimc_is_debugfs_create(struct fimc_is *is)
static int fimc_is_runtime_resume(struct device *dev);
static int fimc_is_runtime_suspend(struct device *dev);
+static void __iomem *fimc_is_get_pmu_regs(struct device *dev)
+{
+ struct device_node *node;
+ void __iomem *regs;
+
+ node = of_parse_phandle(dev->of_node, "samsung,pmu-syscon", 0);
+ if (!node) {
+ node = of_get_child_by_name(dev->of_node, "pmu");
+ if (!node)
+ return IOMEM_ERR_PTR(-ENODEV);
+ dev_warn(dev, "Found PMU node via deprecated method, update your DTB\n");
+ }
+
+ regs = of_iomap(node, 0);
+ of_node_put(node);
+ if (!regs)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ return regs;
+}
+
static int fimc_is_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_is *is;
struct resource res;
- struct device_node *node;
int ret;
is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL);
@@ -794,14 +814,9 @@ static int fimc_is_probe(struct platform_device *pdev)
if (IS_ERR(is->regs))
return PTR_ERR(is->regs);
- node = of_get_child_by_name(dev->of_node, "pmu");
- if (!node)
- return -ENODEV;
-
- is->pmu_regs = of_iomap(node, 0);
- of_node_put(node);
- if (!is->pmu_regs)
- return -ENOMEM;
+ is->pmu_regs = fimc_is_get_pmu_regs(dev);
+ if (IS_ERR(is->pmu_regs))
+ return PTR_ERR(is->pmu_regs);
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (!is->irq) {
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 76634d242b10..0f5b3845d7b9 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
ret = vb2_queue_init(q);
if (ret)
- goto err_vd_rel;
+ return ret;
vp->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
if (ret)
- goto err_vd_rel;
+ return ret;
video_set_drvdata(vfd, vp);
@@ -1171,8 +1171,6 @@ err_ctrlh_free:
v4l2_ctrl_handler_free(&vp->ctrl_handler);
err_me_cleanup:
media_entity_cleanup(&vfd->entity);
-err_vd_rel:
- video_device_release(vfd);
return ret;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index f62703cebb77..4b4c129c09e7 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -1297,7 +1297,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
src_ready = false;
if (!src_ready || ctx->dst_queue_cnt == 0)
- clear_work_bit(ctx);
+ clear_work_bit_irqsave(ctx);
return 0;
}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 5dc1f908b49b..e4cf27b5a072 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -695,16 +695,10 @@ static int c8sectpfe_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fei);
- fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+ fei->c8sectpfeclk = devm_clk_get_enabled(dev, "c8sectpfe");
if (IS_ERR(fei->c8sectpfeclk)) {
- dev_err(dev, "c8sectpfe clk not found\n");
- return PTR_ERR(fei->c8sectpfeclk);
- }
-
- ret = clk_prepare_enable(fei->c8sectpfeclk);
- if (ret) {
dev_err(dev, "Failed to enable c8sectpfe clock\n");
- return ret;
+ return PTR_ERR(fei->c8sectpfeclk);
}
/* to save power disable all IP's (on by default) */
@@ -722,7 +716,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
0, "c8sectpfe-idle-irq", fei);
if (ret) {
dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
- goto err_clk_disable;
+ return ret;
}
ret = devm_request_irq(dev, fei->error_irq,
@@ -730,7 +724,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
"c8sectpfe-error-irq", fei);
if (ret) {
dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
- goto err_clk_disable;
+ return ret;
}
fei->tsin_count = of_get_child_count(np);
@@ -739,16 +733,14 @@ static int c8sectpfe_probe(struct platform_device *pdev)
fei->tsin_count > fei->hw_stats.num_ib) {
dev_err(dev, "More tsin declared than exist on SoC!\n");
- ret = -EINVAL;
- goto err_clk_disable;
+ return -EINVAL;
}
fei->pinctrl = devm_pinctrl_get(dev);
if (IS_ERR(fei->pinctrl)) {
dev_err(dev, "Error getting tsin pins\n");
- ret = PTR_ERR(fei->pinctrl);
- goto err_clk_disable;
+ return PTR_ERR(fei->pinctrl);
}
for_each_child_of_node(np, child) {
@@ -859,7 +851,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
ret);
- goto err_clk_disable;
+ return ret;
}
c8sectpfe_debugfs_init(fei);
@@ -868,8 +860,6 @@ static int c8sectpfe_probe(struct platform_device *pdev)
err_node_put:
of_node_put(child);
-err_clk_disable:
- clk_disable_unprepare(fei->c8sectpfeclk);
return ret;
}
@@ -903,8 +893,6 @@ static void c8sectpfe_remove(struct platform_device *pdev)
if (readl(fei->io + SYS_OTHER_CLKEN))
writel(0, fei->io + SYS_OTHER_CLKEN);
-
- clk_disable_unprepare(fei->c8sectpfeclk);
}
diff --git a/drivers/media/platform/ti/Kconfig b/drivers/media/platform/ti/Kconfig
index e1ab56c3be1f..bab998c4179a 100644
--- a/drivers/media/platform/ti/Kconfig
+++ b/drivers/media/platform/ti/Kconfig
@@ -63,6 +63,18 @@ config VIDEO_TI_VPE_DEBUG
help
Enable debug messages on VPE driver.
+config VIDEO_TI_J721E_CSI2RX
+ tristate "TI J721E CSI2RX wrapper layer driver"
+ depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_SUPPORT && MEDIA_CONTROLLER
+ depends on (PHY_CADENCE_DPHY_RX && VIDEO_CADENCE_CSI2RX) || COMPILE_TEST
+ depends on ARCH_K3 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Support for TI CSI2RX wrapper layer. This just enables the wrapper driver.
+ The Cadence CSI2RX bridge driver needs to be enabled separately.
+
source "drivers/media/platform/ti/am437x/Kconfig"
source "drivers/media/platform/ti/davinci/Kconfig"
source "drivers/media/platform/ti/omap/Kconfig"
diff --git a/drivers/media/platform/ti/Makefile b/drivers/media/platform/ti/Makefile
index 98c5fe5c40d6..8a2f74c9380e 100644
--- a/drivers/media/platform/ti/Makefile
+++ b/drivers/media/platform/ti/Makefile
@@ -3,5 +3,6 @@ obj-y += am437x/
obj-y += cal/
obj-y += vpe/
obj-y += davinci/
+obj-y += j721e-csi2rx/
obj-y += omap/
obj-y += omap3isp/
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 63092013d476..5fa2ea9025d9 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -1271,12 +1271,8 @@ static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
static int vpfe_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vpfe_device *vpfe = video_drvdata(file);
-
strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", vpfe->v4l2_dev.name);
return 0;
}
diff --git a/drivers/media/platform/ti/j721e-csi2rx/Makefile b/drivers/media/platform/ti/j721e-csi2rx/Makefile
new file mode 100644
index 000000000000..377afc1d6280
--- /dev/null
+++ b/drivers/media/platform/ti/j721e-csi2rx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_J721E_CSI2RX) += j721e-csi2rx.o
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
new file mode 100644
index 000000000000..ada61391c8d2
--- /dev/null
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI CSI2RX Shim Wrapper Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author: Pratyush Yadav <p.yadav@ti.com>
+ * Author: Jai Luthra <j-luthra@ti.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define TI_CSI2RX_MODULE_NAME "j721e-csi2rx"
+
+#define SHIM_CNTL 0x10
+#define SHIM_CNTL_PIX_RST BIT(0)
+
+#define SHIM_DMACNTX 0x20
+#define SHIM_DMACNTX_EN BIT(31)
+#define SHIM_DMACNTX_YUV422 GENMASK(27, 26)
+#define SHIM_DMACNTX_SIZE GENMASK(21, 20)
+#define SHIM_DMACNTX_FMT GENMASK(5, 0)
+#define SHIM_DMACNTX_YUV422_MODE_11 3
+#define SHIM_DMACNTX_SIZE_8 0
+#define SHIM_DMACNTX_SIZE_16 1
+#define SHIM_DMACNTX_SIZE_32 2
+
+#define SHIM_PSI_CFG0 0x24
+#define SHIM_PSI_CFG0_SRC_TAG GENMASK(15, 0)
+#define SHIM_PSI_CFG0_DST_TAG GENMASK(31, 16)
+
+#define PSIL_WORD_SIZE_BYTES 16
+/*
+ * There are no hard limits on the width or height. The DMA engine can handle
+ * all sizes. The max width and height are arbitrary numbers for this driver.
+ * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
+ * the limit will be hit in practice.
+ */
+#define MAX_WIDTH_BYTES SZ_16K
+#define MAX_HEIGHT_LINES SZ_16K
+
+#define DRAIN_TIMEOUT_MS 50
+#define DRAIN_BUFFER_SIZE SZ_32K
+
+struct ti_csi2rx_fmt {
+ u32 fourcc; /* Four character code. */
+ u32 code; /* Mbus code. */
+ u32 csi_dt; /* CSI Data type. */
+ u8 bpp; /* Bits per pixel. */
+ u8 size; /* Data size shift when unpacking. */
+};
+
+struct ti_csi2rx_buffer {
+ /* Common v4l2 buffer. Must be first. */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ struct ti_csi2rx_dev *csi;
+};
+
+enum ti_csi2rx_dma_state {
+ TI_CSI2RX_DMA_STOPPED, /* Streaming not started yet. */
+ TI_CSI2RX_DMA_IDLE, /* Streaming but no pending DMA operation. */
+ TI_CSI2RX_DMA_ACTIVE, /* Streaming and pending DMA operation. */
+};
+
+struct ti_csi2rx_dma {
+ /* Protects all fields in this struct. */
+ spinlock_t lock;
+ struct dma_chan *chan;
+ /* Buffers queued to the driver, waiting to be processed by DMA. */
+ struct list_head queue;
+ enum ti_csi2rx_dma_state state;
+ /*
+ * Queue of buffers submitted to DMA engine.
+ */
+ struct list_head submitted;
+ /* Buffer to drain stale data from PSI-L endpoint */
+ struct {
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t len;
+ } drain;
+};
+
+struct ti_csi2rx_dev {
+ struct device *dev;
+ void __iomem *shim;
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct media_device mdev;
+ struct media_pipeline pipe;
+ struct media_pad pad;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *source;
+ struct vb2_queue vidq;
+ struct mutex mutex; /* To serialize ioctls. */
+ struct v4l2_format v_fmt;
+ struct ti_csi2rx_dma dma;
+ u32 sequence;
+};
+
+static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ },
+
+ /* More formats can be supported but they are not listed for now. */
+};
+
+/* Forward declaration needed by ti_csi2rx_dma_callback. */
+static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
+ struct ti_csi2rx_buffer *buf);
+
+static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
+ if (ti_csi2rx_formats[i].fourcc == pixelformat)
+ return &ti_csi2rx_formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
+ if (ti_csi2rx_formats[i].code == code)
+ return &ti_csi2rx_formats[i];
+ }
+
+ return NULL;
+}
+
+static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
+ struct v4l2_format *v4l2_fmt)
+{
+ struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
+ unsigned int pixels_in_word;
+
+ pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / csi_fmt->bpp;
+
+ /* Clamp width and height to sensible maximums (16K x 16K) */
+ pix->width = clamp_t(unsigned int, pix->width,
+ pixels_in_word,
+ MAX_WIDTH_BYTES * 8 / csi_fmt->bpp);
+ pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
+
+ /* Width should be a multiple of transfer word-size */
+ pix->width = rounddown(pix->width, pixels_in_word);
+
+ v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pix->pixelformat = csi_fmt->fourcc;
+ pix->bytesperline = pix->width * (csi_fmt->bpp / 8);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static int ti_csi2rx_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct ti_csi2rx_fmt *fmt = NULL;
+
+ if (f->mbus_code) {
+ /* 1-to-1 mapping between bus formats and pixel formats */
+ if (f->index > 0)
+ return -EINVAL;
+
+ fmt = find_format_by_code(f->mbus_code);
+ } else {
+ if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
+ return -EINVAL;
+
+ fmt = &ti_csi2rx_formats[f->index];
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->fourcc;
+ memset(f->reserved, 0, sizeof(f->reserved));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
+ struct v4l2_format *f)
+{
+ struct ti_csi2rx_dev *csi = video_drvdata(file);
+
+ *f = csi->v_fmt;
+
+ return 0;
+}
+
+static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct ti_csi2rx_fmt *fmt;
+
+ /*
+ * Default to the first format if the requested pixel format code isn't
+ * supported.
+ */
+ fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!fmt)
+ fmt = &ti_csi2rx_formats[0];
+
+ /* Interlaced formats are not supported. */
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ ti_csi2rx_fill_fmt(fmt, f);
+
+ return 0;
+}
+
+static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ti_csi2rx_dev *csi = video_drvdata(file);
+ struct vb2_queue *q = &csi->vidq;
+ int ret;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ csi->v_fmt = *f;
+
+ return 0;
+}
+
+static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct ti_csi2rx_fmt *fmt;
+ unsigned int pixels_in_word;
+
+ fmt = find_format_by_fourcc(fsize->pixel_format);
+ if (!fmt || fsize->index != 0)
+ return -EINVAL;
+
+ /*
+ * Number of pixels in one PSI-L word. The transfer happens in multiples
+ * of PSI-L word sizes.
+ */
+ pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / fmt->bpp;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = pixels_in_word;
+ fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / fmt->bpp,
+ pixels_in_word);
+ fsize->stepwise.step_width = pixels_in_word;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = MAX_HEIGHT_LINES;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops csi_ioctl_ops = {
+ .vidioc_querycap = ti_csi2rx_querycap,
+ .vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations csi_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+
+ csi->source = subdev;
+
+ return 0;
+}
+
+static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+ struct video_device *vdev = &csi->vdev;
+ int ret;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+ if (ret) {
+ video_unregister_device(vdev);
+ return ret;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+ if (ret)
+ video_unregister_device(vdev);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
+ .bound = csi_async_notifier_bound,
+ .complete = csi_async_notifier_complete,
+};
+
+static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
+{
+ struct fwnode_handle *fwnode;
+ struct v4l2_async_connection *asc;
+ struct device_node *node;
+ int ret;
+
+ node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
+ if (!node)
+ return -EINVAL;
+
+ fwnode = of_fwnode_handle(node);
+ if (!fwnode) {
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
+ csi->notifier.ops = &csi_async_notifier_ops;
+
+ asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
+ struct v4l2_async_connection);
+ of_node_put(node);
+ if (IS_ERR(asc)) {
+ v4l2_async_nf_cleanup(&csi->notifier);
+ return PTR_ERR(asc);
+ }
+
+ ret = v4l2_async_nf_register(&csi->notifier);
+ if (ret) {
+ v4l2_async_nf_cleanup(&csi->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
+{
+ const struct ti_csi2rx_fmt *fmt;
+ unsigned int reg;
+
+ fmt = find_format_by_fourcc(csi->v_fmt.fmt.pix.pixelformat);
+
+ /* De-assert the pixel interface reset. */
+ reg = SHIM_CNTL_PIX_RST;
+ writel(reg, csi->shim + SHIM_CNTL);
+
+ reg = SHIM_DMACNTX_EN;
+ reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
+
+ /*
+ * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
+ * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
+ * ...
+ *
+ * There is an option to swap the bytes around before storing in
+ * memory, to achieve different pixel formats:
+ *
+ * Byte3 <----------- Byte0
+ * [ Y1 ][ V0 ][ Y0 ][ U0 ] MODE 11
+ * [ Y1 ][ U0 ][ Y0 ][ V0 ] MODE 10
+ * [ V0 ][ Y1 ][ U0 ][ Y0 ] MODE 01
+ * [ U0 ][ Y1 ][ V0 ][ Y0 ] MODE 00
+ *
+ * We don't have any requirement to change pixelformat from what is
+ * coming from the source, so we keep it in MODE 11, which does not
+ * swap any bytes when storing in memory.
+ */
+ switch (fmt->fourcc) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+ SHIM_DMACNTX_YUV422_MODE_11);
+ break;
+ default:
+ /* Ignore if not YUV 4:2:2 */
+ break;
+ }
+
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
+
+ writel(reg, csi->shim + SHIM_DMACNTX);
+
+ reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
+ FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
+ writel(reg, csi->shim + SHIM_PSI_CFG0);
+}
+
+static void ti_csi2rx_drain_callback(void *param)
+{
+ struct completion *drain_complete = param;
+
+ complete(drain_complete);
+}
+
+/*
+ * Drain the stale data left at the PSI-L endpoint.
+ *
+ * This might happen if no buffers are queued in time but source is still
+ * streaming. In multi-stream scenarios this can happen when one stream is
+ * stopped but other is still streaming, and thus module-level pixel reset is
+ * not asserted.
+ *
+ * To prevent that stale data corrupting the subsequent transactions, it is
+ * required to issue DMA requests to drain it out.
+ */
+static int ti_csi2rx_drain_dma(struct ti_csi2rx_dev *csi)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct completion drain_complete;
+ dma_cookie_t cookie;
+ int ret;
+
+ init_completion(&drain_complete);
+
+ desc = dmaengine_prep_slave_single(csi->dma.chan, csi->dma.drain.paddr,
+ csi->dma.drain.len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto out;
+ }
+
+ desc->callback = ti_csi2rx_drain_callback;
+ desc->callback_param = &drain_complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto out;
+
+ dma_async_issue_pending(csi->dma.chan);
+
+ if (!wait_for_completion_timeout(&drain_complete,
+ msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
+ dmaengine_terminate_sync(csi->dma.chan);
+ dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static void ti_csi2rx_dma_callback(void *param)
+{
+ struct ti_csi2rx_buffer *buf = param;
+ struct ti_csi2rx_dev *csi = buf->csi;
+ struct ti_csi2rx_dma *dma = &csi->dma;
+ unsigned long flags;
+
+ /*
+ * TODO: Derive the sequence number from the CSI2RX frame number
+ * hardware monitor registers.
+ */
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = csi->sequence++;
+
+ spin_lock_irqsave(&dma->lock, flags);
+
+ WARN_ON(!list_is_first(&buf->list, &dma->submitted));
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ list_del(&buf->list);
+
+ /* If there are more buffers to process then start their transfer. */
+ while (!list_empty(&dma->queue)) {
+ buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+
+ if (ti_csi2rx_start_dma(csi, buf)) {
+ dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ list_move_tail(&buf->list, &dma->submitted);
+ }
+ }
+
+ if (list_empty(&dma->submitted))
+ dma->state = TI_CSI2RX_DMA_IDLE;
+
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
+ struct ti_csi2rx_buffer *buf)
+{
+ unsigned long addr;
+ struct dma_async_tx_descriptor *desc;
+ size_t len = csi->v_fmt.fmt.pix.sizeimage;
+ dma_cookie_t cookie;
+ int ret = 0;
+
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EIO;
+
+ desc->callback = ti_csi2rx_dma_callback;
+ desc->callback_param = buf;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(csi->dma.chan);
+
+ return 0;
+}
+
+static void ti_csi2rx_stop_dma(struct ti_csi2rx_dev *csi)
+{
+ struct ti_csi2rx_dma *dma = &csi->dma;
+ enum ti_csi2rx_dma_state state;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ state = csi->dma.state;
+ dma->state = TI_CSI2RX_DMA_STOPPED;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ if (state != TI_CSI2RX_DMA_STOPPED) {
+ /*
+ * Normal DMA termination does not clean up pending data on
+ * the endpoint if multiple streams are running and only one
+ * is stopped, as the module-level pixel reset cannot be
+ * enforced before terminating DMA.
+ */
+ ret = ti_csi2rx_drain_dma(csi);
+ if (ret && ret != -ETIMEDOUT)
+ dev_warn(csi->dev,
+ "Failed to drain DMA. Next frame might be bogus\n");
+ }
+
+ ret = dmaengine_terminate_sync(csi->dma.chan);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop DMA: %d\n", ret);
+}
+
+static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_dev *csi,
+ enum vb2_buffer_state state)
+{
+ struct ti_csi2rx_dma *dma = &csi->dma;
+ struct ti_csi2rx_buffer *buf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ list_for_each_entry_safe(buf, tmp, &csi->dma.queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ list_for_each_entry_safe(buf, tmp, &csi->dma.submitted, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
+ unsigned int size = csi->v_fmt.fmt.pix.sizeimage;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = csi->v_fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(csi->dev, "Data will not fit into plane\n");
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
+{
+ struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct ti_csi2rx_buffer *buf;
+ struct ti_csi2rx_dma *dma = &csi->dma;
+ bool restart_dma = false;
+ unsigned long flags = 0;
+ int ret;
+
+ buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
+ buf->csi = csi;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ /*
+ * Usually the DMA callback takes care of queueing the pending buffers.
+ * But if DMA has stalled due to lack of buffers, restart it now.
+ */
+ if (dma->state == TI_CSI2RX_DMA_IDLE) {
+ /*
+ * Do not restart DMA with the lock held because
+ * ti_csi2rx_drain_dma() might block for completion.
+ * There won't be a race on queueing DMA anyway since the
+ * callback is not being fired.
+ */
+ restart_dma = true;
+ dma->state = TI_CSI2RX_DMA_ACTIVE;
+ } else {
+ list_add_tail(&buf->list, &dma->queue);
+ }
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ if (restart_dma) {
+ /*
+ * Once frames start dropping, some data gets stuck in the DMA
+ * pipeline somewhere. So the first DMA transfer after frame
+ * drops gives a partial frame. This is obviously not useful to
+ * the application and will only confuse it. Issue a DMA
+ * transaction to drain that up.
+ */
+ ret = ti_csi2rx_drain_dma(csi);
+ if (ret && ret != -ETIMEDOUT)
+ dev_warn(csi->dev,
+ "Failed to drain DMA. Next frame might be bogus\n");
+
+ ret = ti_csi2rx_start_dma(csi, buf);
+ if (ret) {
+ dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+ spin_lock_irqsave(&dma->lock, flags);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dma->state = TI_CSI2RX_DMA_IDLE;
+ spin_unlock_irqrestore(&dma->lock, flags);
+ } else {
+ spin_lock_irqsave(&dma->lock, flags);
+ list_add_tail(&buf->list, &dma->submitted);
+ spin_unlock_irqrestore(&dma->lock, flags);
+ }
+ }
+}
+
+static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
+ struct ti_csi2rx_dma *dma = &csi->dma;
+ struct ti_csi2rx_buffer *buf;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ if (list_empty(&dma->queue))
+ ret = -EIO;
+ spin_unlock_irqrestore(&dma->lock, flags);
+ if (ret)
+ return ret;
+
+ ret = video_device_pipeline_start(&csi->vdev, &csi->pipe);
+ if (ret)
+ goto err;
+
+ ti_csi2rx_setup_shim(csi);
+
+ csi->sequence = 0;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+
+ ret = ti_csi2rx_start_dma(csi, buf);
+ if (ret) {
+ dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+ spin_unlock_irqrestore(&dma->lock, flags);
+ goto err_pipeline;
+ }
+
+ list_move_tail(&buf->list, &dma->submitted);
+ dma->state = TI_CSI2RX_DMA_ACTIVE;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
+ if (ret)
+ goto err_dma;
+
+ return 0;
+
+err_dma:
+ ti_csi2rx_stop_dma(csi);
+err_pipeline:
+ video_device_pipeline_stop(&csi->vdev);
+ writel(0, csi->shim + SHIM_CNTL);
+ writel(0, csi->shim + SHIM_DMACNTX);
+err:
+ ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
+{
+ struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
+ int ret;
+
+ video_device_pipeline_stop(&csi->vdev);
+
+ writel(0, csi->shim + SHIM_CNTL);
+ writel(0, csi->shim + SHIM_DMACNTX);
+
+ ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop subdev stream\n");
+
+ ti_csi2rx_stop_dma(csi);
+ ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops csi_vb2_qops = {
+ .queue_setup = ti_csi2rx_queue_setup,
+ .buf_prepare = ti_csi2rx_buffer_prepare,
+ .buf_queue = ti_csi2rx_buffer_queue,
+ .start_streaming = ti_csi2rx_start_streaming,
+ .stop_streaming = ti_csi2rx_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
+{
+ struct vb2_queue *q = &csi->vidq;
+ int ret;
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = csi;
+ q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
+ q->ops = &csi_vb2_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = dmaengine_get_dma_device(csi->dma.chan);
+ q->lock = &csi->mutex;
+ q->min_buffers_needed = 1;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ csi->vdev.queue = q;
+
+ return 0;
+}
+
+static int ti_csi2rx_link_validate(struct media_link *link)
+{
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vdev = media_entity_to_video_device(entity);
+ struct ti_csi2rx_dev *csi = container_of(vdev, struct ti_csi2rx_dev, vdev);
+ struct v4l2_pix_format *csi_fmt = &csi->v_fmt.fmt.pix;
+ struct v4l2_subdev_format source_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
+ const struct ti_csi2rx_fmt *ti_fmt;
+ int ret;
+
+ ret = v4l2_subdev_call_state_active(csi->source, pad,
+ get_fmt, &source_fmt);
+ if (ret)
+ return ret;
+
+ if (source_fmt.format.width != csi_fmt->width) {
+ dev_dbg(csi->dev, "Width does not match (source %u, sink %u)\n",
+ source_fmt.format.width, csi_fmt->width);
+ return -EPIPE;
+ }
+
+ if (source_fmt.format.height != csi_fmt->height) {
+ dev_dbg(csi->dev, "Height does not match (source %u, sink %u)\n",
+ source_fmt.format.height, csi_fmt->height);
+ return -EPIPE;
+ }
+
+ if (source_fmt.format.field != csi_fmt->field &&
+ csi_fmt->field != V4L2_FIELD_NONE) {
+ dev_dbg(csi->dev, "Field does not match (source %u, sink %u)\n",
+ source_fmt.format.field, csi_fmt->field);
+ return -EPIPE;
+ }
+
+ ti_fmt = find_format_by_code(source_fmt.format.code);
+ if (!ti_fmt) {
+ dev_dbg(csi->dev, "Media bus format 0x%x not supported\n",
+ source_fmt.format.code);
+ return -EPIPE;
+ }
+
+ if (ti_fmt->fourcc != csi_fmt->pixelformat) {
+ dev_dbg(csi->dev,
+ "Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
+ ti_fmt->fourcc, csi_fmt->pixelformat);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
+ .link_validate = ti_csi2rx_link_validate,
+};
+
+static int ti_csi2rx_init_dma(struct ti_csi2rx_dev *csi)
+{
+ struct dma_slave_config cfg = {
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
+ };
+ int ret;
+
+ INIT_LIST_HEAD(&csi->dma.queue);
+ INIT_LIST_HEAD(&csi->dma.submitted);
+ spin_lock_init(&csi->dma.lock);
+
+ csi->dma.state = TI_CSI2RX_DMA_STOPPED;
+
+ csi->dma.chan = dma_request_chan(csi->dev, "rx0");
+ if (IS_ERR(csi->dma.chan))
+ return PTR_ERR(csi->dma.chan);
+
+ ret = dmaengine_slave_config(csi->dma.chan, &cfg);
+ if (ret) {
+ dma_release_channel(csi->dma.chan);
+ return ret;
+ }
+
+ csi->dma.drain.len = DRAIN_BUFFER_SIZE;
+ csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
+ &csi->dma.drain.paddr,
+ GFP_KERNEL);
+ if (!csi->dma.drain.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
+{
+ struct media_device *mdev = &csi->mdev;
+ struct video_device *vdev = &csi->vdev;
+ const struct ti_csi2rx_fmt *fmt;
+ struct v4l2_pix_format *pix_fmt = &csi->v_fmt.fmt.pix;
+ int ret;
+
+ fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
+ if (!fmt)
+ return -EINVAL;
+
+ pix_fmt->width = 640;
+ pix_fmt->height = 480;
+ pix_fmt->field = V4L2_FIELD_NONE;
+ pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601,
+ pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB,
+
+ ti_csi2rx_fill_fmt(fmt, &csi->v_fmt);
+
+ mdev->dev = csi->dev;
+ mdev->hw_revision = 1;
+ strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
+
+ media_device_init(mdev);
+
+ strscpy(vdev->name, TI_CSI2RX_MODULE_NAME, sizeof(vdev->name));
+ vdev->v4l2_dev = &csi->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->fops = &csi_fops;
+ vdev->ioctl_ops = &csi_ioctl_ops;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_IO_MC;
+ vdev->lock = &csi->mutex;
+ video_set_drvdata(vdev, csi);
+
+ csi->pad.flags = MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = &ti_csi2rx_video_entity_ops;
+ ret = media_entity_pads_init(&csi->vdev.entity, 1, &csi->pad);
+ if (ret)
+ return ret;
+
+ csi->v4l2_dev.mdev = mdev;
+
+ ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = media_device_register(mdev);
+ if (ret) {
+ v4l2_device_unregister(&csi->v4l2_dev);
+ media_device_cleanup(mdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_dev *csi)
+{
+ dma_free_coherent(csi->dev, csi->dma.drain.len,
+ csi->dma.drain.vaddr, csi->dma.drain.paddr);
+ csi->dma.drain.vaddr = NULL;
+ dma_release_channel(csi->dma.chan);
+}
+
+static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
+{
+ media_device_unregister(&csi->mdev);
+ v4l2_device_unregister(&csi->v4l2_dev);
+ media_device_cleanup(&csi->mdev);
+}
+
+static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
+{
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+}
+
+static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
+{
+ vb2_queue_release(&csi->vidq);
+}
+
+static int ti_csi2rx_probe(struct platform_device *pdev)
+{
+ struct ti_csi2rx_dev *csi;
+ struct resource *res;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, csi);
+
+ mutex_init(&csi->mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi->shim = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csi->shim)) {
+ ret = PTR_ERR(csi->shim);
+ goto err_mutex;
+ }
+
+ ret = ti_csi2rx_init_dma(csi);
+ if (ret)
+ goto err_mutex;
+
+ ret = ti_csi2rx_v4l2_init(csi);
+ if (ret)
+ goto err_dma;
+
+ ret = ti_csi2rx_init_vb2q(csi);
+ if (ret)
+ goto err_v4l2;
+
+ ret = ti_csi2rx_notifier_register(csi);
+ if (ret)
+ goto err_vb2q;
+
+ ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
+ if (ret) {
+ dev_err(csi->dev, "Failed to create children: %d\n", ret);
+ goto err_subdev;
+ }
+
+ return 0;
+
+err_subdev:
+ ti_csi2rx_cleanup_subdev(csi);
+err_vb2q:
+ ti_csi2rx_cleanup_vb2q(csi);
+err_v4l2:
+ ti_csi2rx_cleanup_v4l2(csi);
+err_dma:
+ ti_csi2rx_cleanup_dma(csi);
+err_mutex:
+ mutex_destroy(&csi->mutex);
+ return ret;
+}
+
+static int ti_csi2rx_remove(struct platform_device *pdev)
+{
+ struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
+
+ video_unregister_device(&csi->vdev);
+
+ ti_csi2rx_cleanup_vb2q(csi);
+ ti_csi2rx_cleanup_subdev(csi);
+ ti_csi2rx_cleanup_v4l2(csi);
+ ti_csi2rx_cleanup_dma(csi);
+
+ mutex_destroy(&csi->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id ti_csi2rx_of_match[] = {
+ { .compatible = "ti,j721e-csi2rx-shim", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
+
+static struct platform_driver ti_csi2rx_pdrv = {
+ .probe = ti_csi2rx_probe,
+ .remove = ti_csi2rx_remove,
+ .driver = {
+ .name = TI_CSI2RX_MODULE_NAME,
+ .of_match_table = ti_csi2rx_of_match,
+ },
+};
+
+module_platform_driver(ti_csi2rx_pdrv);
+
+MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
+MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
index 68cf68dbcace..359a846205b0 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.c
+++ b/drivers/media/platform/ti/omap3isp/ispstat.c
@@ -1039,7 +1039,7 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
struct media_entity *me = &subdev->entity;
v4l2_subdev_init(subdev, sd_ops);
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
+ snprintf(subdev->name, sizeof(subdev->name), "OMAP3 ISP %s", name);
subdev->grp_id = BIT(16); /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_set_subdevdata(subdev, stat);
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 423fc85d79ee..a9fa05ac56a9 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
if (ctx) {
vpu_err("frame processing timed out!\n");
- ctx->codec_ops->reset(ctx);
+ if (ctx->codec_ops->reset)
+ ctx->codec_ops->reset(ctx);
hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
}
}
@@ -898,8 +899,9 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
vfd->vfl_dir = VFL_DIR_M2M;
vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
vfd->ioctl_ops = &hantro_ioctl_ops;
- snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
- funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+ strscpy(vfd->name, match->compatible, sizeof(vfd->name));
+ strlcat(vfd->name, funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ?
+ "-enc" : "-dec", sizeof(vfd->name));
if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
vpu->encoder = func;
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 0224ff68ab3f..64d6fb852ae9 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
static int down_scale_factor(struct hantro_ctx *ctx)
{
- if (ctx->src_fmt.width == ctx->dst_fmt.width)
+ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
return 0;
return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
index f2ae84f0b436..f64dea797eff 100644
--- a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
+++ b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only or Apache-2.0
+// SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0
#include "rockchip_av1_filmgrain.h"
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index 816ffa905a4b..f97527670783 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
};
static const char * const rk3588_vpu981_vpu_clk_names[] = {
- "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
+ "aclk", "hclk",
};
/* VDPU1/VEPU1 */
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 80d6f5b072ea..a96de5d388a1 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -708,9 +708,8 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
snprintf(name, sizeof(name), "port%u", port);
dma->dma = dma_request_chan(dma->xdev->dev, name);
if (IS_ERR(dma->dma)) {
- ret = PTR_ERR(dma->dma);
- if (ret != -EPROBE_DEFER)
- dev_err(dma->xdev->dev, "no VDMA channel found\n");
+ ret = dev_err_probe(dma->xdev->dev, PTR_ERR(dma->dma),
+ "no VDMA channel found\n");
goto error;
}
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index c591c0851fa2..ad49151f5ff0 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void *priv,
strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
strscpy(v->card, isa->drv->card, sizeof(v->card));
- snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
+ snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev));
return 0;
}
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 99788834c646..08be77b8f3b7 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -199,11 +199,9 @@ static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
- struct pcm20 *dev = video_drvdata(file);
-
strscpy(v->driver, "Miro PCM20", sizeof(v->driver));
strscpy(v->card, "Miro PCM20", sizeof(v->card));
- snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev->v4l2_dev.name);
+ strscpy(v->bus_info, "ISA:radio-miropcm20", sizeof(v->bus_info));
return 0;
}
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 6061506159f1..b2c5809a8bc7 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -328,9 +328,7 @@ static int si476x_radio_querycap(struct file *file, void *priv,
strscpy(capability->driver, radio->v4l2dev.name,
sizeof(capability->driver));
- strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
- snprintf(capability->bus_info, sizeof(capability->bus_info),
- "platform:%s", radio->v4l2dev.name);
+ strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
return 0;
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index e8166eac9efe..f6b98c304b72 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1020,7 +1020,7 @@ static int wl1273_fm_set_rds(struct wl1273_device *radio, unsigned int new_mode)
}
if (!r)
- radio->rds_on = (new_mode == WL1273_RDS_ON) ? true : false;
+ radio->rds_on = new_mode == WL1273_RDS_ON;
return r;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 07bdf649c60d..2afe67ffa285 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -338,16 +338,6 @@ config IR_REDRAT3
To compile this driver as a module, choose M here: the
module will be called redrat3.
-config IR_RX51
- tristate "Nokia N900 IR transmitter diode"
- depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
- help
- Say Y or M here if you want to enable support for the IR
- transmitter diode built in the Nokia N900 (RX51) device.
-
- The driver uses omap DM timers for generating the carrier
- wave and pulses.
-
config IR_SERIAL
tristate "Homebrew Serial Port Receiver"
depends on HAS_IOPORT
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index a9285266e944..2bca6f7f07bc 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_PWM_TX) += pwm-ir-tx.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
-obj-$(CONFIG_IR_RX51) += ir-rx51.o
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SPI) += ir-spi.o
obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index fff4dd48eaca..d7721e60776e 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -251,7 +251,7 @@ struct ati_remote {
char rc_name[NAME_BUFSIZE];
char rc_phys[NAME_BUFSIZE];
- char mouse_name[NAME_BUFSIZE];
+ char mouse_name[NAME_BUFSIZE + 6];
char mouse_phys[NAME_BUFSIZE];
wait_queue_head_t wait;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 74546f7e3469..5719dda6e0f0 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
goto fail;
}
+ if (first_if->dev.driver != interface->dev.driver) {
+ dev_err(&interface->dev, "inconsistent driver matching\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
if (ifnum == 0) {
ictx = imon_init_intf0(interface, id);
if (!ictx) {
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
deleted file mode 100644
index 13e81bf8005d..000000000000
--- a/drivers/media/rc/ir-rx51.c
+++ /dev/null
@@ -1,285 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008 Nokia Corporation
- *
- * Based on lirc_serial.c
- */
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/wait.h>
-#include <linux/pwm.h>
-#include <linux/of.h>
-#include <linux/hrtimer.h>
-
-#include <media/rc-core.h>
-
-#define WBUF_LEN 256
-
-struct ir_rx51 {
- struct rc_dev *rcdev;
- struct pwm_device *pwm;
- struct pwm_state state;
- struct hrtimer timer;
- struct device *dev;
- wait_queue_head_t wqueue;
-
- unsigned int freq; /* carrier frequency */
- unsigned int duty_cycle; /* carrier duty cycle */
- int wbuf[WBUF_LEN];
- int wbuf_index;
- unsigned long device_is_open;
-};
-
-static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
-{
- ir_rx51->state.enabled = true;
- pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
-}
-
-static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
-{
- ir_rx51->state.enabled = false;
- pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
-}
-
-static int init_timing_params(struct ir_rx51 *ir_rx51)
-{
- ir_rx51->state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
- pwm_set_relative_duty_cycle(&ir_rx51->state, ir_rx51->duty_cycle, 100);
-
- return 0;
-}
-
-static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
-{
- struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
- ktime_t now;
-
- if (ir_rx51->wbuf_index < 0) {
- dev_err_ratelimited(ir_rx51->dev,
- "BUG wbuf_index has value of %i\n",
- ir_rx51->wbuf_index);
- goto end;
- }
-
- /*
- * If we happen to hit an odd latency spike, loop through the
- * pulses until we catch up.
- */
- do {
- u64 ns;
-
- if (ir_rx51->wbuf_index >= WBUF_LEN)
- goto end;
- if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
- goto end;
-
- if (ir_rx51->wbuf_index % 2)
- ir_rx51_off(ir_rx51);
- else
- ir_rx51_on(ir_rx51);
-
- ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
- hrtimer_add_expires_ns(timer, ns);
-
- ir_rx51->wbuf_index++;
-
- now = timer->base->get_time();
-
- } while (hrtimer_get_expires_tv64(timer) < now);
-
- return HRTIMER_RESTART;
-end:
- /* Stop TX here */
- ir_rx51_off(ir_rx51);
- ir_rx51->wbuf_index = -1;
-
- wake_up_interruptible(&ir_rx51->wqueue);
-
- return HRTIMER_NORESTART;
-}
-
-static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
- unsigned int count)
-{
- struct ir_rx51 *ir_rx51 = dev->priv;
-
- if (count > WBUF_LEN)
- return -EINVAL;
-
- memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
-
- /* Wait any pending transfers to finish */
- wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
-
- init_timing_params(ir_rx51);
- if (count < WBUF_LEN)
- ir_rx51->wbuf[count] = -1; /* Insert termination mark */
-
- /*
- * REVISIT: Adjust latency requirements so the device doesn't go in too
- * deep sleep states with pm_qos_add_request().
- */
-
- ir_rx51_on(ir_rx51);
- ir_rx51->wbuf_index = 1;
- hrtimer_start(&ir_rx51->timer,
- ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
- HRTIMER_MODE_REL);
- /*
- * Don't return back to the userspace until the transfer has
- * finished
- */
- wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
-
- /* REVISIT: Remove pm_qos constraint, we can sleep again */
-
- return count;
-}
-
-static int ir_rx51_open(struct rc_dev *dev)
-{
- struct ir_rx51 *ir_rx51 = dev->priv;
-
- if (test_and_set_bit(1, &ir_rx51->device_is_open))
- return -EBUSY;
-
- ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
- if (IS_ERR(ir_rx51->pwm)) {
- int res = PTR_ERR(ir_rx51->pwm);
-
- dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
- return res;
- }
-
- return 0;
-}
-
-static void ir_rx51_release(struct rc_dev *dev)
-{
- struct ir_rx51 *ir_rx51 = dev->priv;
-
- hrtimer_cancel(&ir_rx51->timer);
- ir_rx51_off(ir_rx51);
- pwm_put(ir_rx51->pwm);
-
- clear_bit(1, &ir_rx51->device_is_open);
-}
-
-static struct ir_rx51 ir_rx51 = {
- .duty_cycle = 50,
- .wbuf_index = -1,
-};
-
-static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
-{
- struct ir_rx51 *ir_rx51 = dev->priv;
-
- ir_rx51->duty_cycle = duty;
-
- return 0;
-}
-
-static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
-{
- struct ir_rx51 *ir_rx51 = dev->priv;
-
- if (carrier > 500000 || carrier < 20000)
- return -EINVAL;
-
- ir_rx51->freq = carrier;
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
-{
- /*
- * In case the device is still open, do not suspend. Normally
- * this should not be a problem as lircd only keeps the device
- * open only for short periods of time. We also don't want to
- * get involved with race conditions that might happen if we
- * were in a middle of a transmit. Thus, we defer any suspend
- * actions until transmit has completed.
- */
- if (test_and_set_bit(1, &ir_rx51.device_is_open))
- return -EAGAIN;
-
- clear_bit(1, &ir_rx51.device_is_open);
-
- return 0;
-}
-
-static int ir_rx51_resume(struct platform_device *dev)
-{
- return 0;
-}
-
-#else
-
-#define ir_rx51_suspend NULL
-#define ir_rx51_resume NULL
-
-#endif /* CONFIG_PM */
-
-static int ir_rx51_probe(struct platform_device *dev)
-{
- struct pwm_device *pwm;
- struct rc_dev *rcdev;
-
- pwm = pwm_get(&dev->dev, NULL);
- if (IS_ERR(pwm))
- return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n");
-
- /* Use default, in case userspace does not set the carrier */
- ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
- pwm_init_state(pwm, &ir_rx51.state);
- pwm_put(pwm);
-
- hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ir_rx51.timer.function = ir_rx51_timer_cb;
-
- ir_rx51.dev = &dev->dev;
-
- rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->priv = &ir_rx51;
- rcdev->open = ir_rx51_open;
- rcdev->close = ir_rx51_release;
- rcdev->tx_ir = ir_rx51_tx;
- rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
- rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
- rcdev->driver_name = KBUILD_MODNAME;
-
- ir_rx51.rcdev = rcdev;
-
- return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
-}
-
-static const struct of_device_id ir_rx51_match[] = {
- {
- .compatible = "nokia,n900-ir",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, ir_rx51_match);
-
-static struct platform_driver ir_rx51_platform_driver = {
- .probe = ir_rx51_probe,
- .suspend = ir_rx51_suspend,
- .resume = ir_rx51_resume,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = ir_rx51_match,
- },
-};
-module_platform_driver(ir_rx51_platform_driver);
-
-MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
-MODULE_AUTHOR("Nokia Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 3d8488c39c56..3311099cbd57 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -15,7 +15,9 @@
#define SHARP_UNIT 40 /* us */
#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
-#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
+#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
+#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
+#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
#define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
#define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
.header_pulse = 0,
.header_space = 0,
.bit_pulse = SHARP_BIT_PULSE,
- .bit_space[0] = SHARP_BIT_0_PERIOD,
- .bit_space[1] = SHARP_BIT_1_PERIOD,
+ .bit_space[0] = SHARP_BIT_0_SPACE,
+ .bit_space[1] = SHARP_BIT_1_SPACE,
.trailer_pulse = SHARP_BIT_PULSE,
.trailer_space = SHARP_ECHO_SPACE,
.msb_first = 1,
diff --git a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
index 0a867ca90038..e24946c8fe20 100644
--- a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
+++ b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
@@ -83,3 +83,4 @@ module_exit(exit_rc_map_adstech_dvb_t_pci)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("ADS Tech Instant TV DVB-T PCI Remote");
diff --git a/drivers/media/rc/keymaps/rc-alink-dtu-m.c b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
index 8a2ccaf3b817..9926259b43ee 100644
--- a/drivers/media/rc/keymaps/rc-alink-dtu-m.c
+++ b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
@@ -54,3 +54,4 @@ module_exit(exit_rc_map_alink_dtu_m)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("A-Link DTU(m) slim remote, 6 rows, 3 columns.");
diff --git a/drivers/media/rc/keymaps/rc-anysee.c b/drivers/media/rc/keymaps/rc-anysee.c
index 34da03c46104..e4bcbf889fed 100644
--- a/drivers/media/rc/keymaps/rc-anysee.c
+++ b/drivers/media/rc/keymaps/rc-anysee.c
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_anysee)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Anysee remote keytable");
diff --git a/drivers/media/rc/keymaps/rc-apac-viewcomp.c b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
index bdc47e25d46e..80b096f02e99 100644
--- a/drivers/media/rc/keymaps/rc-apac-viewcomp.c
+++ b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_apac_viewcomp)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("apac-viewcomp remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
index 1d322137898e..212b0d920901 100644
--- a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
+++ b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
@@ -62,3 +62,4 @@ module_exit(exit_rc_map_t2hybrid)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
+MODULE_DESCRIPTION("Astrometa T2hybrid remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-asus-pc39.c b/drivers/media/rc/keymaps/rc-asus-pc39.c
index 7a4b3a6e3a49..bd55b7c6f82a 100644
--- a/drivers/media/rc/keymaps/rc-asus-pc39.c
+++ b/drivers/media/rc/keymaps/rc-asus-pc39.c
@@ -85,3 +85,4 @@ module_exit(exit_rc_map_asus_pc39)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Model PC-39 keytable for asus-pc39 remote controller");
diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
index 09b60fa335e3..9d63c1e4a17a 100644
--- a/drivers/media/rc/keymaps/rc-asus-ps3-100.c
+++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_asus_ps3_100)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Asus My Cinema PS3-100 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
index b4b7932c0c5a..063237f0d2e2 100644
--- a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
+++ b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_ati_tv_wonder_hd_600)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("ati-tv-wonder-hd-600 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index 31fe1106b708..9f7cbe9a1ac8 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -123,3 +123,4 @@ module_exit(exit_rc_map_ati_x10)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("ATI X10 RF remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-a16d.c b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
index 6467ff6e48d7..98497f4f6f92 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-a16d.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_avermedia_a16d)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-a16d remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
index 54fc6d9022c2..5832c2f8ab3f 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_avermedia_cardbus)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-cardbus remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
index 92c6df3360b3..3157d0c1cee9 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_avermedia_dvbt)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-dvbt remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 311ddeb061ca..cc1318ad09d9 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -142,3 +142,4 @@ module_exit(exit_rc_map_avermedia_m135a)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Avermedia M135A with RM-JX and RM-K6 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
index a970ed5a090b..ec6c866c9f5d 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_avermedia_m733a_rm_k6)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Avermedia M733A with IR model RM-K6 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
index cf8a4fd107f4..ee4fe5791add 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
@@ -65,3 +65,4 @@ module_exit(exit_rc_map_avermedia_rm_ks)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("AverMedia RM-KS remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avermedia.c b/drivers/media/rc/keymaps/rc-avermedia.c
index f96f229b70bb..b827536a1f5f 100644
--- a/drivers/media/rc/keymaps/rc-avermedia.c
+++ b/drivers/media/rc/keymaps/rc-avermedia.c
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_avermedia)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-avertv-303.c b/drivers/media/rc/keymaps/rc-avertv-303.c
index a3e2e945c769..71d1da42528d 100644
--- a/drivers/media/rc/keymaps/rc-avertv-303.c
+++ b/drivers/media/rc/keymaps/rc-avertv-303.c
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_avertv_303)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("AVERTV STUDIO 303 Remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
index 5fc8e4cd102e..56f8eb1f0d01 100644
--- a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
+++ b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
@@ -88,3 +88,4 @@ module_exit(exit_rc_map_azurewave_ad_tu700)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TwinHan AzureWave AD-TU700(704J) remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-beelink-gs1.c b/drivers/media/rc/keymaps/rc-beelink-gs1.c
index cedbd5d20bc7..6e767d88c707 100644
--- a/drivers/media/rc/keymaps/rc-beelink-gs1.c
+++ b/drivers/media/rc/keymaps/rc-beelink-gs1.c
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_beelink_gs1)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
+MODULE_DESCRIPTION("Beelink GS1 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-beelink-mxiii.c b/drivers/media/rc/keymaps/rc-beelink-mxiii.c
index 01180cd92205..88fad9959a86 100644
--- a/drivers/media/rc/keymaps/rc-beelink-mxiii.c
+++ b/drivers/media/rc/keymaps/rc-beelink-mxiii.c
@@ -55,3 +55,4 @@ module_exit(exit_rc_map_beelink_mxiii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Beelink Mini MXIII remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c
index 8579b3d5128d..6bdc924ac3cf 100644
--- a/drivers/media/rc/keymaps/rc-behold-columbus.c
+++ b/drivers/media/rc/keymaps/rc-behold-columbus.c
@@ -102,3 +102,4 @@ module_exit(exit_rc_map_behold_columbus)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("BeholdTV Columbus remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c
index 28397ce05a7f..0251ce835f79 100644
--- a/drivers/media/rc/keymaps/rc-behold.c
+++ b/drivers/media/rc/keymaps/rc-behold.c
@@ -135,3 +135,4 @@ module_exit(exit_rc_map_behold)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("BeholdTV 60x series remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-budget-ci-old.c b/drivers/media/rc/keymaps/rc-budget-ci-old.c
index 6ca822256862..8fda5d1e140b 100644
--- a/drivers/media/rc/keymaps/rc-budget-ci-old.c
+++ b/drivers/media/rc/keymaps/rc-budget-ci-old.c
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_budget_ci_old)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("budget-ci-old remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-cinergy-1400.c b/drivers/media/rc/keymaps/rc-cinergy-1400.c
index 4433d28b219c..092c3533d712 100644
--- a/drivers/media/rc/keymaps/rc-cinergy-1400.c
+++ b/drivers/media/rc/keymaps/rc-cinergy-1400.c
@@ -78,3 +78,4 @@ module_exit(exit_rc_map_cinergy_1400)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Cinergy 1400 DVB-T remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-cinergy.c b/drivers/media/rc/keymaps/rc-cinergy.c
index b34a37b8fe61..334a290a3b91 100644
--- a/drivers/media/rc/keymaps/rc-cinergy.c
+++ b/drivers/media/rc/keymaps/rc-cinergy.c
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_cinergy)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("cinergy remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-ct-90405.c b/drivers/media/rc/keymaps/rc-ct-90405.c
index 8914c83c9d9f..d4638df37c53 100644
--- a/drivers/media/rc/keymaps/rc-ct-90405.c
+++ b/drivers/media/rc/keymaps/rc-ct-90405.c
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_ct_90405)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Voronov <avv.0@ya.ru>");
+MODULE_DESCRIPTION("Toshiba CT-90405 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-d680-dmb.c b/drivers/media/rc/keymaps/rc-d680-dmb.c
index d491a5e9750f..7870d36f2c69 100644
--- a/drivers/media/rc/keymaps/rc-d680-dmb.c
+++ b/drivers/media/rc/keymaps/rc-d680-dmb.c
@@ -70,3 +70,4 @@ module_exit(exit_rc_map_d680_dmb)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("d680-dmb remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index f1fcdf16f485..0323049fd2b1 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -118,3 +118,4 @@ module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dib0700-nec remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index 002fffcba95d..d34e92eb92be 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -229,3 +229,4 @@ module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dib0700-rc5 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
index 2466d8c50226..d18b8f93a018 100644
--- a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
+++ b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_digitalnow_tinytwin)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("DigitalNow TinyTwin remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-digittrade.c b/drivers/media/rc/keymaps/rc-digittrade.c
index 65bc8ad7e52c..129a81f59b4a 100644
--- a/drivers/media/rc/keymaps/rc-digittrade.c
+++ b/drivers/media/rc/keymaps/rc-digittrade.c
@@ -68,3 +68,4 @@ module_exit(exit_rc_map_digittrade)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Digittrade DVB-T USB Stick remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dm1105-nec.c b/drivers/media/rc/keymaps/rc-dm1105-nec.c
index cd0b985c994d..b82290ce925b 100644
--- a/drivers/media/rc/keymaps/rc-dm1105-nec.c
+++ b/drivers/media/rc/keymaps/rc-dm1105-nec.c
@@ -70,3 +70,4 @@ module_exit(exit_rc_map_dm1105_nec)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dm1105-nec remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
index a82f64dc9411..4b23335615cf 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_dntv_live_dvb_t)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dntv-live-dvb-t remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
index d3f5048a0220..46d8ea1b49a3 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_dntv_live_dvbt_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("DigitalNow DNTV Live DVB-T Remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dreambox.c b/drivers/media/rc/keymaps/rc-dreambox.c
index dea024fa3a22..e1ec99ce3105 100644
--- a/drivers/media/rc/keymaps/rc-dreambox.c
+++ b/drivers/media/rc/keymaps/rc-dreambox.c
@@ -149,3 +149,4 @@ module_exit(exit_rc_map_dreambox)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Emanuel Strobel <emanuel.strobel@yahoo.com>");
+MODULE_DESCRIPTION("Dreambox RC10/RC0 and RC20/RC-BT remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dtt200u.c b/drivers/media/rc/keymaps/rc-dtt200u.c
index e7f87baa3212..eeb2f6e303d7 100644
--- a/drivers/media/rc/keymaps/rc-dtt200u.c
+++ b/drivers/media/rc/keymaps/rc-dtt200u.c
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_dtt200u)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Wideview WT-220U remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dvbsky.c b/drivers/media/rc/keymaps/rc-dvbsky.c
index f5063af2e5bc..1fcd47bd8595 100644
--- a/drivers/media/rc/keymaps/rc-dvbsky.c
+++ b/drivers/media/rc/keymaps/rc-dvbsky.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_rc5_dvbsky)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nibble Max <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("DVBSky remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dvico-mce.c b/drivers/media/rc/keymaps/rc-dvico-mce.c
index b1bb8cdb3705..4bb4222d259e 100644
--- a/drivers/media/rc/keymaps/rc-dvico-mce.c
+++ b/drivers/media/rc/keymaps/rc-dvico-mce.c
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_dvico_mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dvico-mce remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-dvico-portable.c b/drivers/media/rc/keymaps/rc-dvico-portable.c
index ec12ba6995dc..ba9ef9b75777 100644
--- a/drivers/media/rc/keymaps/rc-dvico-portable.c
+++ b/drivers/media/rc/keymaps/rc-dvico-portable.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_dvico_portable)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dvico-portable remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-em-terratec.c b/drivers/media/rc/keymaps/rc-em-terratec.c
index a1f59aa6ff23..8a51fe632840 100644
--- a/drivers/media/rc/keymaps/rc-em-terratec.c
+++ b/drivers/media/rc/keymaps/rc-em-terratec.c
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_em_terratec)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("em-terratec remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
index 7a00471b6005..320e184f4298 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_encore_enltv_fm53)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV-FM v5.3 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv.c b/drivers/media/rc/keymaps/rc-encore-enltv.c
index 712210097b4d..0b235d72e57d 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv.c
@@ -106,3 +106,4 @@ module_exit(exit_rc_map_encore_enltv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV-FM remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c
index a08470b4f187..d8057f41252d 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv2.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_encore_enltv2)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV2-FM remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-evga-indtube.c b/drivers/media/rc/keymaps/rc-evga-indtube.c
index f4398444330b..95295f6882b1 100644
--- a/drivers/media/rc/keymaps/rc-evga-indtube.c
+++ b/drivers/media/rc/keymaps/rc-evga-indtube.c
@@ -55,3 +55,4 @@ module_exit(exit_rc_map_evga_indtube)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("EVGA inDtube remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-eztv.c b/drivers/media/rc/keymaps/rc-eztv.c
index 4e494d953e33..522e772f7c2b 100644
--- a/drivers/media/rc/keymaps/rc-eztv.c
+++ b/drivers/media/rc/keymaps/rc-eztv.c
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_eztv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("eztv remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-flydvb.c b/drivers/media/rc/keymaps/rc-flydvb.c
index 202a1fbd1935..fcb3bcadd82d 100644
--- a/drivers/media/rc/keymaps/rc-flydvb.c
+++ b/drivers/media/rc/keymaps/rc-flydvb.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_flydvb)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("flydvb remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-flyvideo.c b/drivers/media/rc/keymaps/rc-flyvideo.c
index a44467fb15cb..fcb70c9507cf 100644
--- a/drivers/media/rc/keymaps/rc-flyvideo.c
+++ b/drivers/media/rc/keymaps/rc-flyvideo.c
@@ -64,3 +64,4 @@ module_exit(exit_rc_map_flyvideo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("flyvideo remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
index 253199f5531a..43f73db91098 100644
--- a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
+++ b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
@@ -92,3 +92,4 @@ module_exit(exit_rc_map_fusionhdtv_mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("DViCO FUSION HDTV MCE remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
index c630ef306f11..8a446d125789 100644
--- a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
+++ b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_gadmei_rm008z)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("GADMEI UTV330+ RM008Z remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-geekbox.c b/drivers/media/rc/keymaps/rc-geekbox.c
index 11735ad36c6a..d3f2e960c925 100644
--- a/drivers/media/rc/keymaps/rc-geekbox.c
+++ b/drivers/media/rc/keymaps/rc-geekbox.c
@@ -47,3 +47,4 @@ module_exit(exit_rc_map_geekbox)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("GeekBox remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
index c966c130b05d..e49828ea2b80 100644
--- a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
+++ b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
@@ -78,3 +78,4 @@ module_exit(exit_rc_map_genius_tvgo_a11mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Genius TVGO A11MCE remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-gotview7135.c b/drivers/media/rc/keymaps/rc-gotview7135.c
index 0dc4ef36d76f..a044991e30ba 100644
--- a/drivers/media/rc/keymaps/rc-gotview7135.c
+++ b/drivers/media/rc/keymaps/rc-gotview7135.c
@@ -73,3 +73,4 @@ module_exit(exit_rc_map_gotview7135)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("gotview7135 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index 82552360c3c3..d7156774aa0e 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -287,3 +287,4 @@ module_exit(exit_rc_map_rc5_hauppauge_new)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Hauppauge remote controllers keytable");
diff --git a/drivers/media/rc/keymaps/rc-hisi-poplar.c b/drivers/media/rc/keymaps/rc-hisi-poplar.c
index 49a18e916915..b10ad674c32a 100644
--- a/drivers/media/rc/keymaps/rc-hisi-poplar.c
+++ b/drivers/media/rc/keymaps/rc-hisi-poplar.c
@@ -63,3 +63,4 @@ module_init(init_rc_map_hisi_poplar)
module_exit(exit_rc_map_hisi_poplar)
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon poplar remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
index c73068b653f7..24dcb38df27c 100644
--- a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
+++ b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
@@ -75,3 +75,4 @@ module_init(init_rc_map_hisi_tv_demo)
module_exit(exit_rc_map_hisi_tv_demo)
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon tv demo remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c
index b89e3569e76a..130f685ae30e 100644
--- a/drivers/media/rc/keymaps/rc-imon-mce.c
+++ b/drivers/media/rc/keymaps/rc-imon-mce.c
@@ -137,3 +137,4 @@ module_exit(exit_rc_map_imon_mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("iMON MCE remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-imon-pad.c b/drivers/media/rc/keymaps/rc-imon-pad.c
index bceb4e7726b6..cd5ba44d033c 100644
--- a/drivers/media/rc/keymaps/rc-imon-pad.c
+++ b/drivers/media/rc/keymaps/rc-imon-pad.c
@@ -150,3 +150,4 @@ module_exit(exit_rc_map_imon_pad)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("iMON PAD remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-imon-rsc.c b/drivers/media/rc/keymaps/rc-imon-rsc.c
index 38787dd0e4a0..e4124fadf705 100644
--- a/drivers/media/rc/keymaps/rc-imon-rsc.c
+++ b/drivers/media/rc/keymaps/rc-imon-rsc.c
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_imon_rsc)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("iMON RSC remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
index 9cc6ea0f4226..95256e85458a 100644
--- a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
+++ b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_iodata_bctv7e)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("IO-DATA BCTV7E remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-it913x-v1.c b/drivers/media/rc/keymaps/rc-it913x-v1.c
index 1e049f26a246..d80764c98f44 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v1.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v1.c
@@ -89,3 +89,4 @@ module_exit(exit_rc_it913x_v1_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("it913x-v1 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-it913x-v2.c b/drivers/media/rc/keymaps/rc-it913x-v2.c
index da3107da26b7..c37358cf8b86 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v2.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v2.c
@@ -88,3 +88,4 @@ module_exit(exit_rc_it913x_v2_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("it913x-v2 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c
index 548760e86a2d..bea50c6f7e99 100644
--- a/drivers/media/rc/keymaps/rc-kaiomy.c
+++ b/drivers/media/rc/keymaps/rc-kaiomy.c
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_kaiomy)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kaiomy TVnPC U2 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-khadas.c b/drivers/media/rc/keymaps/rc-khadas.c
index ce4938444d90..2b7161e1bc3f 100644
--- a/drivers/media/rc/keymaps/rc-khadas.c
+++ b/drivers/media/rc/keymaps/rc-khadas.c
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_khadas)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Khadas VIM/EDGE SBC remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-khamsin.c b/drivers/media/rc/keymaps/rc-khamsin.c
index 0c98c2faacff..2121cad8d3df 100644
--- a/drivers/media/rc/keymaps/rc-khamsin.c
+++ b/drivers/media/rc/keymaps/rc-khamsin.c
@@ -73,3 +73,4 @@ module_exit(exit_rc_map_khamsin)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("KHAMSIN remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-kworld-315u.c b/drivers/media/rc/keymaps/rc-kworld-315u.c
index f5aed4b96019..f849dd6b7ef2 100644
--- a/drivers/media/rc/keymaps/rc-kworld-315u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-315u.c
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_kworld_315u)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kworld 315U remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-kworld-pc150u.c b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
index 7938761eb994..630ef7c330d9 100644
--- a/drivers/media/rc/keymaps/rc-kworld-pc150u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
@@ -96,3 +96,4 @@ module_exit(exit_rc_map_kworld_pc150u)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyle Strickland <kyle@kyle.strickland.name>");
+MODULE_DESCRIPTION("Kworld PC150-U remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
index 75389b74e02d..1fb9dc434685 100644
--- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
+++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
@@ -97,3 +97,4 @@ module_exit(exit_rc_map_kworld_plus_tv_analog)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kworld Plus TV Analog Lite PCI IR remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
index 2f2b981e1995..c637312643b7 100644
--- a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
+++ b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
@@ -85,3 +85,4 @@ module_exit(exit_rc_map_leadtek_y04g0051)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("LeadTek Y04G0051 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index 181e48f0cb67..575485655a85 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -104,3 +104,4 @@ module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("LME2510 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c
index e884aeb5c3d6..b81149a0dfd8 100644
--- a/drivers/media/rc/keymaps/rc-manli.c
+++ b/drivers/media/rc/keymaps/rc-manli.c
@@ -128,3 +128,4 @@ module_exit(exit_rc_map_manli)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MANLI MTV00[0x0c] and BeholdTV 40[13] remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-mecool-kii-pro.c b/drivers/media/rc/keymaps/rc-mecool-kii-pro.c
index 77ca8a8fade8..273fe1a304f0 100644
--- a/drivers/media/rc/keymaps/rc-mecool-kii-pro.c
+++ b/drivers/media/rc/keymaps/rc-mecool-kii-pro.c
@@ -89,3 +89,4 @@ module_exit(exit_rc_map_mecool_kii_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Mecool Kii Pro remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-mecool-kiii-pro.c b/drivers/media/rc/keymaps/rc-mecool-kiii-pro.c
index 8e99686fd6b1..53fd7c895ddc 100644
--- a/drivers/media/rc/keymaps/rc-mecool-kiii-pro.c
+++ b/drivers/media/rc/keymaps/rc-mecool-kiii-pro.c
@@ -86,3 +86,4 @@ module_exit(exit_rc_map_mecool_kiii_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Mecool Kiii Pro remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 843dba3bad73..3ea8fdbaf18c 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -102,3 +102,4 @@ module_exit(exit_rc_map_medion_x10)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Medion X10 RF remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-minix-neo.c b/drivers/media/rc/keymaps/rc-minix-neo.c
index 9165af548ff1..ce16e964e72e 100644
--- a/drivers/media/rc/keymaps/rc-minix-neo.c
+++ b/drivers/media/rc/keymaps/rc-minix-neo.c
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_minix_neo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Minix NEO remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
index ab001d2dac67..f152626fd802 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_msi_digivox_ii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("MSI DIGIVOX mini II remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
index 6129d3e925e5..1250cde3367d 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_msi_digivox_iii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("MSI DIGIVOX mini III remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
index 42270a7ef3ee..648bac448f29 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
@@ -117,3 +117,4 @@ module_exit(exit_rc_map_msi_tvanywhere_plus)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MSI TV@nywhere Plus remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
index 45793c641009..b59af39ba005 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_msi_tvanywhere)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MSI TV@nywhere MASTER remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-nebula.c b/drivers/media/rc/keymaps/rc-nebula.c
index 2dc6061f69b3..23b75269d307 100644
--- a/drivers/media/rc/keymaps/rc-nebula.c
+++ b/drivers/media/rc/keymaps/rc-nebula.c
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_nebula)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("nebula remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
index b12c54d47db3..94340a1864a0 100644
--- a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -151,3 +151,4 @@ module_exit(exit_rc_map_nec_terratec_cinergy_xs)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Terratec Cinergy Hybrid T USB XS FM remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-norwood.c b/drivers/media/rc/keymaps/rc-norwood.c
index acd5b1ccf8d0..da00003a5e79 100644
--- a/drivers/media/rc/keymaps/rc-norwood.c
+++ b/drivers/media/rc/keymaps/rc-norwood.c
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_norwood)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Norwood Micro (non-Pro) TV Tuner remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-npgtech.c b/drivers/media/rc/keymaps/rc-npgtech.c
index 98a755e8bc18..6f4412922e0c 100644
--- a/drivers/media/rc/keymaps/rc-npgtech.c
+++ b/drivers/media/rc/keymaps/rc-npgtech.c
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_npgtech)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("npgtech remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-odroid.c b/drivers/media/rc/keymaps/rc-odroid.c
index c6fbb64b5c41..0353229a4915 100644
--- a/drivers/media/rc/keymaps/rc-odroid.c
+++ b/drivers/media/rc/keymaps/rc-odroid.c
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_odroid)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("HardKernel ODROID remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pctv-sedna.c b/drivers/media/rc/keymaps/rc-pctv-sedna.c
index c3bb1ecdd0ca..6583bf4fcb04 100644
--- a/drivers/media/rc/keymaps/rc-pctv-sedna.c
+++ b/drivers/media/rc/keymaps/rc-pctv-sedna.c
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pctv-sedna remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pine64.c b/drivers/media/rc/keymaps/rc-pine64.c
index 9b2bdbbce04e..bcdb99997d4d 100644
--- a/drivers/media/rc/keymaps/rc-pine64.c
+++ b/drivers/media/rc/keymaps/rc-pine64.c
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_pine64)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonas Karlman");
+MODULE_DESCRIPTION("Pine64 IR remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-color.c b/drivers/media/rc/keymaps/rc-pinnacle-color.c
index b862725635b9..f33c38644f83 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-color.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-color.c
@@ -88,3 +88,4 @@ module_exit(exit_rc_map_pinnacle_color)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pinnacle-color remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-grey.c b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
index 3853b653cee6..22ef3d4e2e71 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-grey.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
@@ -83,3 +83,4 @@ module_exit(exit_rc_map_pinnacle_grey)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pinnacle-grey remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index 96d8112fb468..35f0c790cc0c 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -64,3 +64,4 @@ module_exit(exit_rc_map_pinnacle_pctv_hd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Pinnacle PCTV HD 800i mini remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-002t.c b/drivers/media/rc/keymaps/rc-pixelview-002t.c
index c3439c46644c..0966ebf0903a 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-002t.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-002t.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("002-T IR remote keytable");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-mk12.c b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
index ea11ccde8442..e18774473809 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-mk12.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MK-F12 IR remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c
index 0259666831b0..cf7f1cf8c3b5 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-new.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-new.c
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_pixelview_new)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pixelview-new remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pixelview.c b/drivers/media/rc/keymaps/rc-pixelview.c
index 29f6d2c013e4..567ad0a076fc 100644
--- a/drivers/media/rc/keymaps/rc-pixelview.c
+++ b/drivers/media/rc/keymaps/rc-pixelview.c
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pixelview remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
index 66fe2e52e7c8..e7a6add1df26 100644
--- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
+++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_powercolor_real_angel)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Powercolor Real Angel 330 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-proteus-2309.c b/drivers/media/rc/keymaps/rc-proteus-2309.c
index 36eebefd975c..1300482a6119 100644
--- a/drivers/media/rc/keymaps/rc-proteus-2309.c
+++ b/drivers/media/rc/keymaps/rc-proteus-2309.c
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_proteus_2309)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("proteus-2309 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-purpletv.c b/drivers/media/rc/keymaps/rc-purpletv.c
index bf4543fecb6f..9f6ee0be1347 100644
--- a/drivers/media/rc/keymaps/rc-purpletv.c
+++ b/drivers/media/rc/keymaps/rc-purpletv.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_purpletv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("purpletv remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-pv951.c b/drivers/media/rc/keymaps/rc-pv951.c
index 69db55463000..539e8573eb19 100644
--- a/drivers/media/rc/keymaps/rc-pv951.c
+++ b/drivers/media/rc/keymaps/rc-pv951.c
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_pv951)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pv951 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index d491e0fa8605..ef1c61eb99b2 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -114,3 +114,4 @@ module_exit(exit_rc_map_rc6_mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("rc6 MCE remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
index 33bb458b81fd..088ead8f736b 100644
--- a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
+++ b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_real_audio_220_32_keys)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Zogis Real Audio 220 - 32 keys remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-reddo.c b/drivers/media/rc/keymaps/rc-reddo.c
index b70390d19e78..af50d1ca4b15 100644
--- a/drivers/media/rc/keymaps/rc-reddo.c
+++ b/drivers/media/rc/keymaps/rc-reddo.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_reddo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("reddo remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index e3d5bff3bd9e..826f44595e70 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -92,3 +92,4 @@ module_exit(exit_rc_map_snapstream_firefly)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("SnapStream Firefly X10 RF remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-streamzap.c b/drivers/media/rc/keymaps/rc-streamzap.c
index 6684e2e86bc9..b82c3cdfca3b 100644
--- a/drivers/media/rc/keymaps/rc-streamzap.c
+++ b/drivers/media/rc/keymaps/rc-streamzap.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_streamzap)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("Streamzap remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c
index 64cfc01aa48f..a333ade3b1d2 100644
--- a/drivers/media/rc/keymaps/rc-su3000.c
+++ b/drivers/media/rc/keymaps/rc-su3000.c
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_su3000)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeny Plehov <Evgeny Plehov@ukr.net>");
+MODULE_DESCRIPTION("Geniatech HDStar remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tanix-tx3mini.c b/drivers/media/rc/keymaps/rc-tanix-tx3mini.c
index d486cd69afb2..b5d77a0c94ed 100644
--- a/drivers/media/rc/keymaps/rc-tanix-tx3mini.c
+++ b/drivers/media/rc/keymaps/rc-tanix-tx3mini.c
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_tanix_tx3mini)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Tanix TX3 mini STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tanix-tx5max.c b/drivers/media/rc/keymaps/rc-tanix-tx5max.c
index 59aaabed80dd..91db9017845a 100644
--- a/drivers/media/rc/keymaps/rc-tanix-tx5max.c
+++ b/drivers/media/rc/keymaps/rc-tanix-tx5max.c
@@ -66,3 +66,4 @@ module_exit(exit_rc_map_tanix_tx5max)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Tanix TX5 max STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tbs-nec.c b/drivers/media/rc/keymaps/rc-tbs-nec.c
index 420980925f29..426c767c907f 100644
--- a/drivers/media/rc/keymaps/rc-tbs-nec.c
+++ b/drivers/media/rc/keymaps/rc-tbs-nec.c
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_tbs_nec)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("tbs-nec remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-technisat-ts35.c b/drivers/media/rc/keymaps/rc-technisat-ts35.c
index 9a917ea0ceba..07d5e0884eb9 100644
--- a/drivers/media/rc/keymaps/rc-technisat-ts35.c
+++ b/drivers/media/rc/keymaps/rc-technisat-ts35.c
@@ -70,3 +70,4 @@ module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TechniSat TS35 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c
index 942100686c82..74ac89d37966 100644
--- a/drivers/media/rc/keymaps/rc-technisat-usb2.c
+++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c
@@ -88,3 +88,4 @@ module_exit(exit_rc_map)
MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TechniSat TS35 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
index da06f844d8fb..d44891308193 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
@@ -82,3 +82,4 @@ module_init(init_rc_map_terratec_cinergy_c_pci);
module_exit(exit_rc_map_terratec_cinergy_c_pci);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Terratec Cinergy C PCI remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
index a1844b531572..dbbb1ba0247b 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
@@ -80,3 +80,4 @@ module_init(init_rc_map_terratec_cinergy_s2_hd);
module_exit(exit_rc_map_terratec_cinergy_s2_hd);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Terratec Cinergy S2 HD remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
index fe587e3f0240..a9452d581339 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
@@ -86,3 +86,4 @@ module_exit(exit_rc_map_terratec_cinergy_xs)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Terratec Cinergy Hybrid T USB XS remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim-2.c b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
index a54a59f90313..ea259d88769b 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim-2.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
@@ -58,3 +58,4 @@ module_exit(exit_rc_map_terratec_slim_2)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TerraTec slim remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim.c b/drivers/media/rc/keymaps/rc-terratec-slim.c
index 146e3a3480dc..bb40cbd58ebe 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim.c
@@ -65,3 +65,4 @@ module_exit(exit_rc_map_terratec_slim)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TerraTec slim remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tevii-nec.c b/drivers/media/rc/keymaps/rc-tevii-nec.c
index 5b96e9a38e9d..ee7f8014742b 100644
--- a/drivers/media/rc/keymaps/rc-tevii-nec.c
+++ b/drivers/media/rc/keymaps/rc-tevii-nec.c
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_tevii_nec)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("tevii-nec remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index c51606a3be68..c02b8c8abd5c 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -93,3 +93,4 @@ module_exit(exit_rc_map_tivo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("TiVo remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
index 40b773ba45b9..290d1cc8577c 100644
--- a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_total_media_in_hand_02)
MODULE_LICENSE("GPL");
MODULE_AUTHOR(" Alfredo J. Delaiti <alfredodelaiti@netscape.net>");
+MODULE_DESCRIPTION("Total Media In Hand_02 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand.c b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
index 2144db485d83..7f4b31b98f35 100644
--- a/drivers/media/rc/keymaps/rc-total-media-in-hand.c
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_total_media_in_hand)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Total Media In Hand remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-trekstor.c b/drivers/media/rc/keymaps/rc-trekstor.c
index e938e0da51a6..ff01de550904 100644
--- a/drivers/media/rc/keymaps/rc-trekstor.c
+++ b/drivers/media/rc/keymaps/rc-trekstor.c
@@ -66,3 +66,4 @@ module_exit(exit_rc_map_trekstor)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TrekStor remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-tt-1500.c b/drivers/media/rc/keymaps/rc-tt-1500.c
index ff70aab13b48..eb8d7fc5061a 100644
--- a/drivers/media/rc/keymaps/rc-tt-1500.c
+++ b/drivers/media/rc/keymaps/rc-tt-1500.c
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_tt_1500)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Technotrend 1500 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
index 5fc696d9e583..8e5cf8eb0db9 100644
--- a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
+++ b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
@@ -92,3 +92,4 @@ module_init(init_rc_map_twinhan_dtv_cab_ci);
module_exit(exit_rc_map_twinhan_dtv_cab_ci);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Twinhan DTV CAB CI remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index e1cdcfa792dc..411ce3c8cbd0 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_twinhan_vp1027)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
+MODULE_DESCRIPTION("twinhan1027 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-vega-s9x.c b/drivers/media/rc/keymaps/rc-vega-s9x.c
index bf210c4dc535..40fbf408bf65 100644
--- a/drivers/media/rc/keymaps/rc-vega-s9x.c
+++ b/drivers/media/rc/keymaps/rc-vega-s9x.c
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_vega_s9x)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Tronsmart Vega S9x remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-videomate-m1f.c b/drivers/media/rc/keymaps/rc-videomate-m1f.c
index e16b9b851c72..1f9be84ff27b 100644
--- a/drivers/media/rc/keymaps/rc-videomate-m1f.c
+++ b/drivers/media/rc/keymaps/rc-videomate-m1f.c
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_videomate_k100)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pavel Osnova <pvosnova@gmail.com>");
+MODULE_DESCRIPTION("videomate-m1f remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-videomate-s350.c b/drivers/media/rc/keymaps/rc-videomate-s350.c
index a867d7a08055..281cc747229f 100644
--- a/drivers/media/rc/keymaps/rc-videomate-s350.c
+++ b/drivers/media/rc/keymaps/rc-videomate-s350.c
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_videomate_s350)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("videomate-s350 remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
index fdc3b0e1350f..829842425fae 100644
--- a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
+++ b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_videomate_tv_pvr)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("videomate-tv-pvr remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
index 414d4d231e7e..10cbc2c781d2 100644
--- a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
+++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_kii_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>");
+MODULE_DESCRIPTION("Videostrong KII Pro STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-wetek-hub.c b/drivers/media/rc/keymaps/rc-wetek-hub.c
index b5a21aff45f5..591ec20399f4 100644
--- a/drivers/media/rc/keymaps/rc-wetek-hub.c
+++ b/drivers/media/rc/keymaps/rc-wetek-hub.c
@@ -51,3 +51,4 @@ module_exit(exit_rc_map_wetek_hub)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("WeTek Hub STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-wetek-play2.c b/drivers/media/rc/keymaps/rc-wetek-play2.c
index bbbb11fa3c11..ce3b1029df23 100644
--- a/drivers/media/rc/keymaps/rc-wetek-play2.c
+++ b/drivers/media/rc/keymaps/rc-wetek-play2.c
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_wetek_play2)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("WeTek Play 2 STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
index 999ba4e084ae..edfba31f9ae6 100644
--- a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
+++ b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_winfast_usbii_deluxe)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Leadtek Winfast TV USB II Deluxe remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-winfast.c b/drivers/media/rc/keymaps/rc-winfast.c
index be52a3e1f8ae..89649c8cdee5 100644
--- a/drivers/media/rc/keymaps/rc-winfast.c
+++ b/drivers/media/rc/keymaps/rc-winfast.c
@@ -96,3 +96,4 @@ module_exit(exit_rc_map_winfast)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Leadtek Winfast remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-x96max.c b/drivers/media/rc/keymaps/rc-x96max.c
index 0998ec3320e4..a22fcbbfa383 100644
--- a/drivers/media/rc/keymaps/rc-x96max.c
+++ b/drivers/media/rc/keymaps/rc-x96max.c
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_x96max)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("X96-max STB remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-xbox-360.c b/drivers/media/rc/keymaps/rc-xbox-360.c
index 231aa00514af..1364daf3ae6f 100644
--- a/drivers/media/rc/keymaps/rc-xbox-360.c
+++ b/drivers/media/rc/keymaps/rc-xbox-360.c
@@ -81,3 +81,4 @@ module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xbox 360 Universal Media remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-xbox-dvd.c b/drivers/media/rc/keymaps/rc-xbox-dvd.c
index 9d656042a81f..11ab134b05b5 100644
--- a/drivers/media/rc/keymaps/rc-xbox-dvd.c
+++ b/drivers/media/rc/keymaps/rc-xbox-dvd.c
@@ -61,3 +61,4 @@ module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xbox DVD remote controller keytable");
diff --git a/drivers/media/rc/keymaps/rc-zx-irdec.c b/drivers/media/rc/keymaps/rc-zx-irdec.c
index 7bb0c05eb759..e4bea7b39fd1 100644
--- a/drivers/media/rc/keymaps/rc-zx-irdec.c
+++ b/drivers/media/rc/keymaps/rc-zx-irdec.c
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_zx_irdec)
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("zx-irdec remote controller keytable");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 043d23aaa3cb..a537734832c5 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
if (ret < 0)
goto out_kfree_raw;
- count = ret;
+ /* drop trailing space */
+ if (!(ret % 2))
+ count = ret - 1;
+ else
+ count = ret;
txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
if (!txbuf) {
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 70322fab34ac..5303e6da5809 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -20,48 +20,196 @@
#define DRIVER_NAME "meson-ir"
-#define IR_DEC_LDR_ACTIVE 0x00
-#define IR_DEC_LDR_IDLE 0x04
-#define IR_DEC_LDR_REPEAT 0x08
-#define IR_DEC_BIT_0 0x0c
-#define IR_DEC_REG0 0x10
-#define IR_DEC_REG0_BASE_TIME GENMASK(11, 0)
-#define IR_DEC_FRAME 0x14
-#define IR_DEC_STATUS 0x18
-#define IR_DEC_STATUS_PULSE BIT(8)
-#define IR_DEC_REG1 0x1c
-#define IR_DEC_REG1_TIME_IV GENMASK(28, 16)
-#define IR_DEC_REG1_ENABLE BIT(15)
-#define IR_DEC_REG1_MODE GENMASK(8, 7)
-#define IR_DEC_REG1_IRQSEL GENMASK(3, 2)
-#define IR_DEC_REG1_RESET BIT(0)
-/* The following regs are only available on Meson 8b and newer */
-#define IR_DEC_REG2 0x20
-#define IR_DEC_REG2_MODE GENMASK(3, 0)
-
-#define DEC_MODE_NEC 0x0
-#define DEC_MODE_RAW 0x2
-
-#define IRQSEL_NEC_MODE 0
-#define IRQSEL_RISE_FALL 1
-#define IRQSEL_FALL 2
-#define IRQSEL_RISE 3
-
-#define MESON_RAW_TRATE 10 /* us */
-#define MESON_HW_TRATE 20 /* us */
+#define IR_DEC_LDR_ACTIVE 0x00
+#define IR_DEC_LDR_ACTIVE_MAX GENMASK(28, 16)
+#define IR_DEC_LDR_ACTIVE_MIN GENMASK(12, 0)
+#define IR_DEC_LDR_IDLE 0x04
+#define IR_DEC_LDR_IDLE_MAX GENMASK(28, 16)
+#define IR_DEC_LDR_IDLE_MIN GENMASK(12, 0)
+#define IR_DEC_LDR_REPEAT 0x08
+#define IR_DEC_LDR_REPEAT_MAX GENMASK(25, 16)
+#define IR_DEC_LDR_REPEAT_MIN GENMASK(9, 0)
+#define IR_DEC_BIT_0 0x0c
+#define IR_DEC_BIT_0_MAX GENMASK(25, 16)
+#define IR_DEC_BIT_0_MIN GENMASK(9, 0)
+#define IR_DEC_REG0 0x10
+#define IR_DEC_REG0_FILTER GENMASK(30, 28)
+#define IR_DEC_REG0_FRAME_TIME_MAX GENMASK(24, 12)
+#define IR_DEC_REG0_BASE_TIME GENMASK(11, 0)
+#define IR_DEC_FRAME 0x14
+#define IR_DEC_STATUS 0x18
+#define IR_DEC_STATUS_BIT_1_ENABLE BIT(30)
+#define IR_DEC_STATUS_BIT_1_MAX GENMASK(29, 20)
+#define IR_DEC_STATUS_BIT_1_MIN GENMASK(19, 10)
+#define IR_DEC_STATUS_PULSE BIT(8)
+#define IR_DEC_STATUS_BUSY BIT(7)
+#define IR_DEC_STATUS_FRAME_STATUS GENMASK(3, 0)
+#define IR_DEC_REG1 0x1c
+#define IR_DEC_REG1_TIME_IV GENMASK(28, 16)
+#define IR_DEC_REG1_FRAME_LEN GENMASK(13, 8)
+#define IR_DEC_REG1_ENABLE BIT(15)
+#define IR_DEC_REG1_HOLD_CODE BIT(6)
+#define IR_DEC_REG1_IRQSEL GENMASK(3, 2)
+#define IR_DEC_REG1_RESET BIT(0)
+/* Meson 6b uses REG1 to configure IR mode */
+#define IR_DEC_REG1_MODE GENMASK(8, 7)
+
+/* The following registers are only available on Meson 8b and newer */
+#define IR_DEC_REG2 0x20
+#define IR_DEC_REG2_TICK_MODE BIT(15)
+#define IR_DEC_REG2_REPEAT_COUNTER BIT(13)
+#define IR_DEC_REG2_REPEAT_TIME BIT(12)
+#define IR_DEC_REG2_COMPARE_FRAME BIT(11)
+#define IR_DEC_REG2_BIT_ORDER BIT(8)
+/* Meson 8b / GXBB use REG2 to configure IR mode */
+#define IR_DEC_REG2_MODE GENMASK(3, 0)
+#define IR_DEC_DURATN2 0x24
+#define IR_DEC_DURATN2_MAX GENMASK(25, 16)
+#define IR_DEC_DURATN2_MIN GENMASK(9, 0)
+#define IR_DEC_DURATN3 0x28
+#define IR_DEC_DURATN3_MAX GENMASK(25, 16)
+#define IR_DEC_DURATN3_MIN GENMASK(9, 0)
+#define IR_DEC_FRAME1 0x2c
+
+#define FRAME_MSB_FIRST true
+#define FRAME_LSB_FIRST false
+
+#define DEC_MODE_NEC 0x0
+#define DEC_MODE_RAW 0x2
+#define DEC_MODE_RC6 0x9
+#define DEC_MODE_XMP 0xE
+#define DEC_MODE_UNKNOW 0xFF
+
+#define DEC_STATUS_VALID BIT(3)
+#define DEC_STATUS_DATA_CODE_ERR BIT(2)
+#define DEC_STATUS_CUSTOM_CODE_ERR BIT(1)
+#define DEC_STATUS_REPEAT BIT(0)
+
+#define IRQSEL_DEC_MODE 0
+#define IRQSEL_RISE_FALL 1
+#define IRQSEL_FALL 2
+#define IRQSEL_RISE 3
+
+#define MESON_RAW_TRATE 10 /* us */
+#define MESON_HW_TRATE 20 /* us */
+
+/**
+ * struct meson_ir_protocol - describe IR Protocol parameter
+ *
+ * @hw_protocol: select IR Protocol from IR Controller
+ * @repeat_counter_enable: enable frame-to-frame time counter, it should work
+ * with @repeat_compare_enable to detect the repeat frame
+ * @repeat_check_enable: enable repeat time check for repeat detection
+ * @repeat_compare_enable: enable to compare frame for repeat frame detection.
+ * Some IR Protocol send the same data as repeat frame.
+ * In this case, it should work with
+ * @repeat_counter_enable to detect the repeat frame.
+ * @bit_order: bit order, LSB or MSB
+ * @bit1_match_enable: enable to check bit 1
+ * @hold_code_enable: hold frame code in register IR_DEC_FRAME1, the new one
+ * frame code will not be store in IR_DEC_FRAME1.
+ * until IR_DEC_FRAME1 has been read
+ * @count_tick_mode: increasing time unit of frame-to-frame time counter.
+ * 0 = 100us, 1 = 10us
+ * @code_length: length (N-1) of data frame
+ * @frame_time_max: max time for whole frame. Unit: MESON_HW_TRATE
+ * @leader_active_max: max time for NEC/RC6 leader active part. Unit: MESON_HW_TRATE
+ * @leader_active_min: min time for NEC/RC6 leader active part. Unit: MESON_HW_TRATE
+ * @leader_idle_max: max time for NEC/RC6 leader idle part. Unit: MESON_HW_TRATE
+ * @leader_idle_min: min time for NEC/RC6 leader idle part. Unit: MESON_HW_TRATE
+ * @repeat_leader_max: max time for NEC repeat leader idle part. Unit: MESON_HW_TRATE
+ * @repeat_leader_min: min time for NEC repeat leader idle part. Unit: MESON_HW_TRATE
+ * @bit0_max: max time for NEC Logic '0', half of RC6 trailer bit, XMP Logic '00'
+ * @bit0_min: min time for NEC Logic '0', half of RC6 trailer bit, XMP Logic '00'
+ * @bit1_max: max time for NEC Logic '1', whole of RC6 trailer bit, XMP Logic '01'
+ * @bit1_min: min time for NEC Logic '1', whole of RC6 trailer bit, XMP Logic '01'
+ * @duration2_max: max time for half of RC6 normal bit, XMP Logic '10'
+ * @duration2_min: min time for half of RC6 normal bit, XMP Logic '10'
+ * @duration3_max: max time for whole of RC6 normal bit, XMP Logic '11'
+ * @duration3_min: min time for whole of RC6 normal bit, XMP Logic '11'
+ */
+
+struct meson_ir_protocol {
+ u8 hw_protocol;
+ bool repeat_counter_enable;
+ bool repeat_check_enable;
+ bool repeat_compare_enable;
+ bool bit_order;
+ bool bit1_match_enable;
+ bool hold_code_enable;
+ bool count_tick_mode;
+ u8 code_length;
+ u16 frame_time_max;
+ u16 leader_active_max;
+ u16 leader_active_min;
+ u16 leader_idle_max;
+ u16 leader_idle_min;
+ u16 repeat_leader_max;
+ u16 repeat_leader_min;
+ u16 bit0_max;
+ u16 bit0_min;
+ u16 bit1_max;
+ u16 bit1_min;
+ u16 duration2_max;
+ u16 duration2_min;
+ u16 duration3_max;
+ u16 duration3_min;
+};
+
+struct meson_ir_param {
+ bool support_hw_decoder;
+ unsigned int max_register;
+};
struct meson_ir {
+ const struct meson_ir_param *param;
struct regmap *reg;
struct rc_dev *rc;
spinlock_t lock;
};
-static const struct regmap_config meson_ir_regmap_config = {
+static struct regmap_config meson_ir_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
+static const struct meson_ir_protocol protocol_timings[] = {
+ /* protocol, repeat counter, repeat check, repeat compare, order */
+ {DEC_MODE_NEC, false, false, false, FRAME_LSB_FIRST,
+ /* bit 1 match, hold code, count tick, len, frame time */
+ true, false, false, 32, 4000,
+ /* leader active max/min, leader idle max/min, repeat leader max/min */
+ 500, 400, 300, 200, 150, 80,
+ /* bit0 max/min, bit1 max/min, duration2 max/min, duration3 max/min */
+ 72, 40, 134, 90, 0, 0, 0, 0}
+};
+
+static void meson_ir_nec_handler(struct meson_ir *ir)
+{
+ u32 code = 0;
+ u32 status = 0;
+ enum rc_proto proto;
+
+ regmap_read(ir->reg, IR_DEC_STATUS, &status);
+
+ if (status & DEC_STATUS_REPEAT) {
+ rc_repeat(ir->rc);
+ } else {
+ regmap_read(ir->reg, IR_DEC_FRAME, &code);
+
+ code = ir_nec_bytes_to_scancode(code, code >> 8,
+ code >> 16, code >> 24, &proto);
+ rc_keydown(ir->rc, proto, code, 0);
+ }
+}
+
+static void meson_ir_hw_handler(struct meson_ir *ir)
+{
+ if (ir->rc->enabled_protocols & RC_PROTO_BIT_NEC)
+ meson_ir_nec_handler(ir);
+}
+
static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
{
struct meson_ir *ir = dev_id;
@@ -70,22 +218,232 @@ static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
spin_lock(&ir->lock);
- regmap_read(ir->reg, IR_DEC_REG1, &duration);
- duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
- rawir.duration = duration * MESON_RAW_TRATE;
-
regmap_read(ir->reg, IR_DEC_STATUS, &status);
- rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
- ir_raw_event_store_with_timeout(ir->rc, &rawir);
+ if (ir->rc->driver_type == RC_DRIVER_IR_RAW) {
+ rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
+
+ regmap_read(ir->reg, IR_DEC_REG1, &duration);
+ duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
+ rawir.duration = duration * MESON_RAW_TRATE;
+
+ ir_raw_event_store_with_timeout(ir->rc, &rawir);
+ } else if (ir->rc->driver_type == RC_DRIVER_SCANCODE) {
+ if (status & DEC_STATUS_VALID)
+ meson_ir_hw_handler(ir);
+ }
spin_unlock(&ir->lock);
return IRQ_HANDLED;
}
+static int meson_ir_hw_decoder_init(struct rc_dev *dev, u64 *rc_type)
+{
+ u8 protocol;
+ u32 regval;
+ int i;
+ unsigned long flags;
+ const struct meson_ir_protocol *timings;
+ struct meson_ir *ir = dev->priv;
+
+ if (*rc_type & RC_PROTO_BIT_NEC)
+ protocol = DEC_MODE_NEC;
+ else
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(protocol_timings); i++)
+ if (protocol_timings[i].hw_protocol == protocol)
+ break;
+
+ if (i == ARRAY_SIZE(protocol_timings)) {
+ dev_err(&dev->dev, "hw protocol isn't supported: %d\n",
+ protocol);
+ return -EINVAL;
+ }
+ timings = &protocol_timings[i];
+
+ spin_lock_irqsave(&ir->lock, flags);
+
+ /* Clear controller status */
+ regmap_read(ir->reg, IR_DEC_STATUS, &regval);
+ regmap_read(ir->reg, IR_DEC_FRAME, &regval);
+
+ /* Reset ir decoder and disable decoder */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+ IR_DEC_REG1_RESET);
+
+ /* Base time resolution, (19+1)*1us=20us */
+ regval = FIELD_PREP(IR_DEC_REG0_BASE_TIME, MESON_HW_TRATE - 1);
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME, regval);
+
+ /* Monitor timing for input filter */
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_FILTER,
+ FIELD_PREP(IR_DEC_REG0_FILTER, 7));
+
+ /* HW protocol */
+ regval = FIELD_PREP(IR_DEC_REG2_MODE, timings->hw_protocol);
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE, regval);
+
+ /* Hold frame data until register was read */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_HOLD_CODE,
+ timings->hold_code_enable ?
+ IR_DEC_REG1_HOLD_CODE : 0);
+
+ /* Bit order */
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_BIT_ORDER,
+ timings->bit_order ? IR_DEC_REG2_BIT_ORDER : 0);
+
+ /* Select tick mode */
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_TICK_MODE,
+ timings->count_tick_mode ?
+ IR_DEC_REG2_TICK_MODE : 0);
+
+ /*
+ * Some protocols transmit the same data frame as repeat frame
+ * when the key is pressing. In this case, it could be detected as
+ * repeat frame if the repeat checker was enabled.
+ */
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_REPEAT_COUNTER,
+ timings->repeat_counter_enable ?
+ IR_DEC_REG2_REPEAT_COUNTER : 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_REPEAT_TIME,
+ timings->repeat_check_enable ?
+ IR_DEC_REG2_REPEAT_TIME : 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_COMPARE_FRAME,
+ timings->repeat_compare_enable ?
+ IR_DEC_REG2_COMPARE_FRAME : 0);
+
+ /*
+ * FRAME_TIME_MAX should be larger than the time between
+ * data frame and repeat frame
+ */
+ regval = FIELD_PREP(IR_DEC_REG0_FRAME_TIME_MAX,
+ timings->frame_time_max);
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_FRAME_TIME_MAX,
+ regval);
+
+ /* Length(N-1) of data frame */
+ regval = FIELD_PREP(IR_DEC_REG1_FRAME_LEN, timings->code_length - 1);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_FRAME_LEN, regval);
+
+ /* Time for leader active part */
+ regval = FIELD_PREP(IR_DEC_LDR_ACTIVE_MAX,
+ timings->leader_active_max) |
+ FIELD_PREP(IR_DEC_LDR_ACTIVE_MIN,
+ timings->leader_active_min);
+ regmap_update_bits(ir->reg, IR_DEC_LDR_ACTIVE, IR_DEC_LDR_ACTIVE_MAX |
+ IR_DEC_LDR_ACTIVE_MIN, regval);
+
+ /* Time for leader idle part */
+ regval = FIELD_PREP(IR_DEC_LDR_IDLE_MAX, timings->leader_idle_max) |
+ FIELD_PREP(IR_DEC_LDR_IDLE_MIN, timings->leader_idle_min);
+ regmap_update_bits(ir->reg, IR_DEC_LDR_IDLE,
+ IR_DEC_LDR_IDLE_MAX | IR_DEC_LDR_IDLE_MIN, regval);
+
+ /* Time for repeat leader idle part */
+ regval = FIELD_PREP(IR_DEC_LDR_REPEAT_MAX, timings->repeat_leader_max) |
+ FIELD_PREP(IR_DEC_LDR_REPEAT_MIN, timings->repeat_leader_min);
+ regmap_update_bits(ir->reg, IR_DEC_LDR_REPEAT, IR_DEC_LDR_REPEAT_MAX |
+ IR_DEC_LDR_REPEAT_MIN, regval);
+
+ /*
+ * NEC: Time for logic '0'
+ * RC6: Time for half of trailer bit
+ */
+ regval = FIELD_PREP(IR_DEC_BIT_0_MAX, timings->bit0_max) |
+ FIELD_PREP(IR_DEC_BIT_0_MIN, timings->bit0_min);
+ regmap_update_bits(ir->reg, IR_DEC_BIT_0,
+ IR_DEC_BIT_0_MAX | IR_DEC_BIT_0_MIN, regval);
+
+ /*
+ * NEC: Time for logic '1'
+ * RC6: Time for whole of trailer bit
+ */
+ regval = FIELD_PREP(IR_DEC_STATUS_BIT_1_MAX, timings->bit1_max) |
+ FIELD_PREP(IR_DEC_STATUS_BIT_1_MIN, timings->bit1_min);
+ regmap_update_bits(ir->reg, IR_DEC_STATUS, IR_DEC_STATUS_BIT_1_MAX |
+ IR_DEC_STATUS_BIT_1_MIN, regval);
+
+ /* Enable to match logic '1' */
+ regmap_update_bits(ir->reg, IR_DEC_STATUS, IR_DEC_STATUS_BIT_1_ENABLE,
+ timings->bit1_match_enable ?
+ IR_DEC_STATUS_BIT_1_ENABLE : 0);
+
+ /*
+ * NEC: Unused
+ * RC6: Time for halt of logic 0/1
+ */
+ regval = FIELD_PREP(IR_DEC_DURATN2_MAX, timings->duration2_max) |
+ FIELD_PREP(IR_DEC_DURATN2_MIN, timings->duration2_min);
+ regmap_update_bits(ir->reg, IR_DEC_DURATN2,
+ IR_DEC_DURATN2_MAX | IR_DEC_DURATN2_MIN, regval);
+
+ /*
+ * NEC: Unused
+ * RC6: Time for whole logic 0/1
+ */
+ regval = FIELD_PREP(IR_DEC_DURATN3_MAX, timings->duration3_max) |
+ FIELD_PREP(IR_DEC_DURATN3_MIN, timings->duration3_min);
+ regmap_update_bits(ir->reg, IR_DEC_DURATN3,
+ IR_DEC_DURATN3_MAX | IR_DEC_DURATN3_MIN, regval);
+
+ /* Reset ir decoder and enable decode */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+ IR_DEC_REG1_RESET);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
+ IR_DEC_REG1_ENABLE);
+
+ spin_unlock_irqrestore(&ir->lock, flags);
+
+ dev_info(&dev->dev, "hw decoder init, protocol: %d\n", protocol);
+
+ return 0;
+}
+
+static void meson_ir_sw_decoder_init(struct rc_dev *dev)
+{
+ unsigned long flags;
+ struct meson_ir *ir = dev->priv;
+
+ spin_lock_irqsave(&ir->lock, flags);
+
+ /* Reset the decoder */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+ IR_DEC_REG1_RESET);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
+
+ /* Set general operation mode (= raw/software decoding) */
+ if (of_device_is_compatible(dev->dev.of_node, "amlogic,meson6-ir"))
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
+ FIELD_PREP(IR_DEC_REG1_MODE,
+ DEC_MODE_RAW));
+ else
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
+ FIELD_PREP(IR_DEC_REG2_MODE,
+ DEC_MODE_RAW));
+
+ /* Set rate */
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
+ FIELD_PREP(IR_DEC_REG0_BASE_TIME,
+ MESON_RAW_TRATE - 1));
+ /* IRQ on rising and falling edges */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
+ FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
+ /* Enable the decoder */
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
+ IR_DEC_REG1_ENABLE);
+
+ spin_unlock_irqrestore(&ir->lock, flags);
+
+ dev_info(&dev->dev, "sw decoder init\n");
+}
+
static int meson_ir_probe(struct platform_device *pdev)
{
+ const struct meson_ir_param *match_data;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
void __iomem *res_start;
@@ -97,10 +455,17 @@ static int meson_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -ENODEV, "failed to get match data\n");
+
+ ir->param = match_data;
+
res_start = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(res_start))
return PTR_ERR(res_start);
+ meson_ir_regmap_config.max_register = ir->param->max_register;
ir->reg = devm_regmap_init_mmio(&pdev->dev, res_start,
&meson_ir_regmap_config);
if (IS_ERR(ir->reg))
@@ -110,23 +475,34 @@ static int meson_ir_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+ if (ir->param->support_hw_decoder)
+ ir->rc = devm_rc_allocate_device(&pdev->dev,
+ RC_DRIVER_SCANCODE);
+ else
+ ir->rc = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
+
if (!ir->rc) {
dev_err(dev, "failed to allocate rc device\n");
return -ENOMEM;
}
+ if (ir->rc->driver_type == RC_DRIVER_IR_RAW) {
+ ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ ir->rc->rx_resolution = MESON_RAW_TRATE;
+ ir->rc->min_timeout = 1;
+ ir->rc->timeout = IR_DEFAULT_TIMEOUT;
+ ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+ } else if (ir->rc->driver_type == RC_DRIVER_SCANCODE) {
+ ir->rc->allowed_protocols = RC_PROTO_BIT_NEC;
+ ir->rc->change_protocol = meson_ir_hw_decoder_init;
+ }
+
ir->rc->priv = ir;
ir->rc->device_name = DRIVER_NAME;
ir->rc->input_phys = DRIVER_NAME "/input0";
ir->rc->input_id.bustype = BUS_HOST;
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
- ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- ir->rc->rx_resolution = MESON_RAW_TRATE;
- ir->rc->min_timeout = 1;
- ir->rc->timeout = IR_DEFAULT_TIMEOUT;
- ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
ir->rc->driver_name = DRIVER_NAME;
spin_lock_init(&ir->lock);
@@ -138,36 +514,15 @@ static int meson_ir_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_irq(dev, irq, meson_ir_irq, 0, NULL, ir);
+ if (ir->rc->driver_type == RC_DRIVER_IR_RAW)
+ meson_ir_sw_decoder_init(ir->rc);
+
+ ret = devm_request_irq(dev, irq, meson_ir_irq, 0, "meson_ir", ir);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- /* Reset the decoder */
- regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
- IR_DEC_REG1_RESET);
- regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
-
- /* Set general operation mode (= raw/software decoding) */
- if (of_device_is_compatible(node, "amlogic,meson6-ir"))
- regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
- FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_RAW));
- else
- regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
- FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_RAW));
-
- /* Set rate */
- regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
- FIELD_PREP(IR_DEC_REG0_BASE_TIME,
- MESON_RAW_TRATE - 1));
- /* IRQ on rising and falling edges */
- regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
- FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
- /* Enable the decoder */
- regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
- IR_DEC_REG1_ENABLE);
-
dev_info(dev, "receiver initialized\n");
return 0;
@@ -212,11 +567,36 @@ static void meson_ir_shutdown(struct platform_device *pdev)
spin_unlock_irqrestore(&ir->lock, flags);
}
+static const struct meson_ir_param meson6_ir_param = {
+ .support_hw_decoder = false,
+ .max_register = IR_DEC_REG1,
+};
+
+static const struct meson_ir_param meson8b_ir_param = {
+ .support_hw_decoder = false,
+ .max_register = IR_DEC_REG2,
+};
+
+static const struct meson_ir_param meson_s4_ir_param = {
+ .support_hw_decoder = true,
+ .max_register = IR_DEC_FRAME1,
+};
+
static const struct of_device_id meson_ir_match[] = {
- { .compatible = "amlogic,meson6-ir" },
- { .compatible = "amlogic,meson8b-ir" },
- { .compatible = "amlogic,meson-gxbb-ir" },
- { },
+ {
+ .compatible = "amlogic,meson6-ir",
+ .data = &meson6_ir_param,
+ }, {
+ .compatible = "amlogic,meson8b-ir",
+ .data = &meson8b_ir_param,
+ }, {
+ .compatible = "amlogic,meson-gxbb-ir",
+ .data = &meson8b_ir_param,
+ }, {
+ .compatible = "amlogic,meson-s4-ir",
+ .data = &meson_s4_ir_param,
+ },
+ {},
};
MODULE_DEVICE_TABLE(of, meson_ir_match);
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index 7732054c4621..c5f37c03af9c 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -23,6 +23,7 @@ struct pwm_ir {
static const struct of_device_id pwm_ir_of_match[] = {
{ .compatible = "pwm-ir-tx", },
+ { .compatible = "nokia,n900-ir" },
{ },
};
MODULE_DEVICE_TABLE(of, pwm_ir_of_match);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
index b51e6a3b8cbe..f99878eff7ac 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
m->priv = args->priv;
m->network_id = args->network_id;
m->network_name = kstrdup(args->network_name, GFP_KERNEL);
+ if (!m->network_name)
+ goto free_mux_buf;
+
m->timing.current_jiffies = get_jiffies_64();
if (args->channels)
m->channels = args->channels;
else
if (vidtv_channels_init(m) < 0)
- goto free_mux_buf;
+ goto free_mux_network_name;
/* will alloc data for pmt_sections after initializing pat */
if (vidtv_channel_si_init(m) < 0)
@@ -527,6 +530,8 @@ free_channel_si:
vidtv_channel_si_destroy(m);
free_channels:
vidtv_channels_destroy(m);
+free_mux_network_name:
+ kfree(m->network_name);
free_mux_buf:
vfree(m->mux_buf);
free_mux:
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
index ce0b7a6e92dc..2a51c898c11e 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
desc->service_name_len = service_name_len;
- if (service_name && service_name_len)
+ if (service_name && service_name_len) {
desc->service_name = kstrdup(service_name, GFP_KERNEL);
+ if (!desc->service_name)
+ goto free_desc;
+ }
desc->provider_name_len = provider_name_len;
- if (provider_name && provider_name_len)
+ if (provider_name && provider_name_len) {
desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
+ if (!desc->provider_name)
+ goto free_desc_service_name;
+ }
vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
+
+free_desc_service_name:
+ if (service_name && service_name_len)
+ kfree(desc->service_name);
+free_desc:
+ kfree(desc);
+ return NULL;
}
struct vidtv_psi_desc_registration
@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
desc->length = network_name_len;
- if (network_name && network_name_len)
+ if (network_name && network_name_len) {
desc->network_name = kstrdup(network_name, GFP_KERNEL);
+ if (!desc->network_name) {
+ kfree(desc);
+ return NULL;
+ }
+ }
vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
iso_language_code = "eng";
desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
+ if (!desc->iso_language_code)
+ goto free_desc;
- if (event_name && event_name_len)
+ if (event_name && event_name_len) {
desc->event_name = kstrdup(event_name, GFP_KERNEL);
+ if (!desc->event_name)
+ goto free_desc_language_code;
+ }
- if (text && text_len)
+ if (text && text_len) {
desc->text = kstrdup(text, GFP_KERNEL);
+ if (!desc->text)
+ goto free_desc_event_name;
+ }
vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
+
+free_desc_event_name:
+ if (event_name && event_name_len)
+ kfree(desc->event_name);
+free_desc_language_code:
+ kfree(desc->iso_language_code);
+free_desc:
+ kfree(desc);
+ return NULL;
}
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index e95bdccfc18e..394c9f81ea72 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -240,7 +240,7 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "vivid", sizeof(cap->driver));
strscpy(cap->card, "vivid", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev->v4l2_dev.name);
+ "platform:%s-%03d", VIVID_MODULE_NAME, dev->inst);
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
index b5b104ee64c9..c57771119a34 100644
--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
rds->ta = alt;
rds->ms = true;
snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
- freq / 16, ((freq & 0xf) * 10) / 16);
+ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
if (alt)
strscpy(rds->radiotext,
" The Radio Data System can switch between different Radio Texts ",
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index c5e21785fafe..fe4410a5e128 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -937,7 +937,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
u32 *p_current_fw, *p_fw;
u32 *p_fw_data;
int frame = 0;
- u16 _buffer_size = 4096;
u8 *p_buffer;
p_current_fw = vmalloc(1884180 * 4);
@@ -947,7 +946,7 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
return -ENOMEM;
}
- p_buffer = vmalloc(4096);
+ p_buffer = vmalloc(EP5_BUF_SIZE);
if (p_buffer == NULL) {
dprintk(2, "FAIL!!!\n");
vfree(p_current_fw);
@@ -1030,9 +1029,9 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
/*download the firmware by ep5-out*/
- for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/_buffer_size);
+ for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/EP5_BUF_SIZE);
frame++) {
- for (i = 0; i < _buffer_size; i++) {
+ for (i = 0; i < EP5_BUF_SIZE; i++) {
*(p_buffer + i) = (u8)(*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x000000FF);
i++;
*(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x0000FF00) >> 8);
@@ -1041,7 +1040,7 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
i++;
*(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0xFF000000) >> 24);
}
- cx231xx_ep5_bulkout(dev, p_buffer, _buffer_size);
+ cx231xx_ep5_bulkout(dev, p_buffer, EP5_BUF_SIZE);
}
p_current_fw = p_fw;
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 727e6268567f..7b7e2a26ef93 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -751,13 +751,12 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
int ret = -ENOMEM;
u32 *buffer;
- buffer = kzalloc(4096, GFP_KERNEL);
+ buffer = kmemdup(firmware, EP5_BUF_SIZE, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
- memcpy(&buffer[0], firmware, 4096);
ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
- buffer, 4096, &actlen, 2000);
+ buffer, EP5_BUF_SIZE, &actlen, EP5_TIMEOUT_MS);
if (ret)
dev_err(dev->dev,
@@ -994,7 +993,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
/* De-allocates all pending stuff */
cx231xx_uninit_isoc(dev);
- dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
+ dma_q->p_left_data = kzalloc(EP5_BUF_SIZE, GFP_KERNEL);
if (dma_q->p_left_data == NULL)
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 6929e4d97067..74339a6a2f71 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -120,6 +120,9 @@
#define CX23417_OSC_EN 8
#define CX23417_RESET 9
+#define EP5_BUF_SIZE 4096
+#define EP5_TIMEOUT_MS 2000
+
struct cx23417_fmt {
u32 fourcc; /* v4l2 format id */
int depth;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 33a2aa8907e6..4eb7dd4599b7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
- if (msg[0].len < 3 || msg[1].len < 1)
- return -EOPNOTSUPP;
+ if (msg[0].len < 3 || msg[1].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
- if (msg[0].len < 3)
- return -EOPNOTSUPP;
+ if (msg[0].len < 3) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index b2b27a86dfe5..4cd21bb8805e 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -287,7 +287,7 @@ static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
int id = le16_to_cpu(d->udev->descriptor.idProduct);
int is_rev1;
- is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+ is_rev1 = id == USB_PID_GENPIX_8PSK_REV_1_WARM;
adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
&gp8psk_fe_ops, d, is_rev1);
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 46ed95483e22..5f5fa851ca64 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -18,6 +18,7 @@
#include <linux/input.h>
#include <linux/sched/signal.h>
+#include <linux/bitops.h>
#include "gspca.h"
@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
sd->params.exposure.expMode = 2;
sd->exposure_status = EXPOSURE_NORMAL;
}
+ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
+ return -EINVAL;
currentexp = currentexp << sd->params.exposure.gain;
sd->params.exposure.gain = 0;
/* round down current exposure to nearest value */
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 8a39cac76c58..9d9e14c858e6 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -279,10 +279,8 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
}
}
- fw_buffer = kmalloc(fw->size, GFP_KERNEL);
+ fw_buffer = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (fw_buffer) {
- memcpy(fw_buffer, fw->data, fw->size);
-
rc = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 2),
fw_buffer, fw->size, &dummy, 1000);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index f77ebd688cde..331b8e535e5b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -82,19 +82,3 @@ config V4L2_CCI_I2C
depends on I2C
select REGMAP_I2C
select V4L2_CCI
-
-# Used by drivers that need Videobuf modules
-config VIDEOBUF_GEN
- tristate
-
-config VIDEOBUF_DMA_SG
- tristate
- select VIDEOBUF_GEN
-
-config VIDEOBUF_VMALLOC
- tristate
- select VIDEOBUF_GEN
-
-config VIDEOBUF_DMA_CONTIG
- tristate
- select VIDEOBUF_GEN
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index be2551705755..2177b9d63a8f 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -33,10 +33,5 @@ obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
-obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
-obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
-obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
-obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
-
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index c5ce9f11ad7b..3898ff7edddb 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -238,6 +238,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
if (!sev)
return -ENOMEM;
+ sev->elems = elems;
for (i = 0; i < elems; i++)
sev->events[i].sev = sev;
sev->type = sub->type;
@@ -245,7 +246,6 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
- sev->elems = elems;
mutex_lock(&fh->subscribe_lock);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f4d9d6279094..9b1de54ce379 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1510,6 +1510,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break;
case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break;
+ case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break;
default:
if (fmt->description[0])
return;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 31752c06d1f0..be86b906c985 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/videodev2.h>
@@ -306,6 +307,42 @@ static int call_set_selection(struct v4l2_subdev *sd,
sd->ops->pad->set_selection(sd, state, sel);
}
+static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ unsigned int i;
+ int ret;
+
+ memset(fd, 0, sizeof(*fd));
+
+ ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
+ if (ret)
+ return ret;
+
+ dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
+ fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
+ fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
+ "unknown");
+
+ for (i = 0; i < fd->num_entries; i++) {
+ struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
+ char buf[20] = "";
+
+ if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+ WARN_ON(snprintf(buf, sizeof(buf),
+ ", vc %u, dt 0x%02x",
+ entry->bus.csi2.vc,
+ entry->bus.csi2.dt) >= sizeof(buf));
+
+ dev_dbg(sd->dev,
+ "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
+ entry->stream, entry->pixelcode, entry->length,
+ entry->flags, buf);
+ }
+
+ return 0;
+}
+
static inline int check_edid(struct v4l2_subdev *sd,
struct v4l2_subdev_edid *edid)
{
@@ -359,6 +396,18 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
{
int ret;
+ /*
+ * The .s_stream() operation must never be called to start or stop an
+ * already started or stopped subdev. Catch offenders but don't return
+ * an error yet to avoid regressions.
+ *
+ * As .s_stream() is mutually exclusive with the .enable_streams() and
+ * .disable_streams() operation, we can use the enabled_streams field
+ * to store the subdev streaming state.
+ */
+ if (WARN_ON(!!sd->enabled_streams == !!enable))
+ return 0;
+
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
if (enable)
@@ -372,9 +421,12 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable && ret < 0) {
dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
- return 0;
+ ret = 0;
}
+ if (!ret)
+ sd->enabled_streams = enable ? BIT(0) : 0;
+
return ret;
}
@@ -431,6 +483,7 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.set_edid = call_set_edid,
.dv_timings_cap = call_dv_timings_cap,
.enum_dv_timings = call_enum_dv_timings,
+ .get_frame_desc = call_get_frame_desc,
.get_mbus_config = call_get_mbus_config,
};
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
deleted file mode 100644
index 606a271bdd2d..000000000000
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ /dev/null
@@ -1,1198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <media/videobuf-core.h>
-#include <media/v4l2-common.h>
-
-#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should) \
- do { \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR \
- "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- } \
- } while (0)
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
- } while (0)
-
-/* --------------------------------------------------------------------- */
-
-#define CALL(q, f, arg...) \
- ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-#define CALLPTR(q, f, arg...) \
- ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-{
- struct videobuf_buffer *vb;
-
- BUG_ON(q->msize < sizeof(*vb));
-
- if (!q->int_ops || !q->int_ops->alloc_vb) {
- printk(KERN_ERR "No specific ops defined!\n");
- BUG();
- }
-
- vb = q->int_ops->alloc_vb(q->msize);
- if (NULL != vb) {
- init_waitqueue_head(&vb->done);
- vb->magic = MAGIC_BUFFER;
- }
-
- return vb;
-}
-EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-
-static int state_neither_active_nor_queued(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- unsigned long flags;
- bool rc;
-
- spin_lock_irqsave(q->irqlock, flags);
- rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
- spin_unlock_irqrestore(q->irqlock, flags);
- return rc;
-};
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
- int non_blocking, int intr)
-{
- bool is_ext_locked;
- int ret = 0;
-
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
-
- if (non_blocking) {
- if (state_neither_active_nor_queued(q, vb))
- return 0;
- return -EAGAIN;
- }
-
- is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
-
- /* Release vdev lock to prevent this wait from blocking outside access to
- the device. */
- if (is_ext_locked)
- mutex_unlock(q->ext_lock);
- if (intr)
- ret = wait_event_interruptible(vb->done,
- state_neither_active_nor_queued(q, vb));
- else
- wait_event(vb->done, state_neither_active_nor_queued(q, vb));
- /* Relock */
- if (is_ext_locked)
- mutex_lock(q->ext_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- return CALL(q, iolock, q, vb, fbuf);
-}
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- if (q->int_ops->vaddr)
- return q->int_ops->vaddr(buf);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
-
-/* --------------------------------------------------------------------- */
-
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct videobuf_qtype_ops *int_ops,
- struct mutex *ext_lock)
-{
- BUG_ON(!q);
- memset(q, 0, sizeof(*q));
- q->irqlock = irqlock;
- q->ext_lock = ext_lock;
- q->dev = dev;
- q->type = type;
- q->field = field;
- q->msize = msize;
- q->ops = ops;
- q->priv_data = priv;
- q->int_ops = int_ops;
-
- /* All buffer operations are mandatory */
- BUG_ON(!q->ops->buf_setup);
- BUG_ON(!q->ops->buf_prepare);
- BUG_ON(!q->ops->buf_queue);
- BUG_ON(!q->ops->buf_release);
-
- /* Lock is mandatory for queue_cancel to work */
- BUG_ON(!irqlock);
-
- /* Having implementations for abstract methods are mandatory */
- BUG_ON(!q->int_ops);
-
- mutex_init(&q->vb_lock);
- init_waitqueue_head(&q->wait);
- INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-
-/* Locking: Only usage in bttv unsafe find way to remove */
-int videobuf_queue_is_busy(struct videobuf_queue *q)
-{
- int i;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (q->streaming) {
- dprintk(1, "busy: streaming active\n");
- return 1;
- }
- if (q->reading) {
- dprintk(1, "busy: pending read #1\n");
- return 1;
- }
- if (q->read_buf) {
- dprintk(1, "busy: pending read #2\n");
- return 1;
- }
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- if (q->bufs[i]->map) {
- dprintk(1, "busy: buffer #%d mapped\n", i);
- return 1;
- }
- if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
- dprintk(1, "busy: buffer #%d queued\n", i);
- return 1;
- }
- if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
- dprintk(1, "busy: buffer #%d active\n", i);
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-/*
- * __videobuf_free() - free all the buffers and their control structures
- *
- * This function can only be called if streaming/reading is off, i.e. no buffers
- * are under control of the driver.
- */
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_free(struct videobuf_queue *q)
-{
- int i;
-
- dprintk(1, "%s\n", __func__);
- if (!q)
- return 0;
-
- if (q->streaming || q->reading) {
- dprintk(1, "Cannot free buffers when streaming or reading\n");
- return -EBUSY;
- }
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++)
- if (q->bufs[i] && q->bufs[i]->map) {
- dprintk(1, "Cannot free mmapped buffers\n");
- return -EBUSY;
- }
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->ops->buf_release(q, q->bufs[i]);
- kfree(q->bufs[i]);
- q->bufs[i] = NULL;
- }
-
- return 0;
-}
-
-/* Locking: Caller holds q->vb_lock */
-void videobuf_queue_cancel(struct videobuf_queue *q)
-{
- unsigned long flags = 0;
- int i;
-
- q->streaming = 0;
- q->reading = 0;
- wake_up_interruptible_sync(&q->wait);
-
- /* remove queued buffers from list */
- spin_lock_irqsave(q->irqlock, flags);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
- list_del(&q->bufs[i]->queue);
- q->bufs[i]->state = VIDEOBUF_ERROR;
- wake_up_all(&q->bufs[i]->done);
- }
- }
- spin_unlock_irqrestore(q->irqlock, flags);
-
- /* free all buffers + clear queue */
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->ops->buf_release(q, q->bufs[i]);
- }
- INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-
-/* --------------------------------------------------------------------- */
-
-/* Locking: Caller holds q->vb_lock */
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-{
- enum v4l2_field field = q->field;
-
- BUG_ON(V4L2_FIELD_ANY == field);
-
- if (V4L2_FIELD_ALTERNATE == field) {
- if (V4L2_FIELD_TOP == q->last) {
- field = V4L2_FIELD_BOTTOM;
- q->last = V4L2_FIELD_BOTTOM;
- } else {
- field = V4L2_FIELD_TOP;
- q->last = V4L2_FIELD_TOP;
- }
- }
- return field;
-}
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-
-/* Locking: Caller holds q->vb_lock */
-static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
- struct videobuf_buffer *vb, enum v4l2_buf_type type)
-{
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- b->index = vb->i;
- b->type = type;
-
- b->memory = vb->memory;
- switch (b->memory) {
- case V4L2_MEMORY_MMAP:
- b->m.offset = vb->boff;
- b->length = vb->bsize;
- break;
- case V4L2_MEMORY_USERPTR:
- b->m.userptr = vb->baddr;
- b->length = vb->bsize;
- break;
- case V4L2_MEMORY_OVERLAY:
- b->m.offset = vb->boff;
- break;
- case V4L2_MEMORY_DMABUF:
- /* DMABUF is not handled in videobuf framework */
- break;
- }
-
- b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb->map)
- b->flags |= V4L2_BUF_FLAG_MAPPED;
-
- switch (vb->state) {
- case VIDEOBUF_PREPARED:
- case VIDEOBUF_QUEUED:
- case VIDEOBUF_ACTIVE:
- b->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case VIDEOBUF_ERROR:
- b->flags |= V4L2_BUF_FLAG_ERROR;
- fallthrough;
- case VIDEOBUF_DONE:
- b->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case VIDEOBUF_NEEDS_INIT:
- case VIDEOBUF_IDLE:
- /* nothing */
- break;
- }
-
- b->field = vb->field;
- v4l2_buffer_set_timestamp(b, vb->ts);
- b->bytesused = vb->size;
- b->sequence = vb->field_count >> 1;
-}
-
-int videobuf_mmap_free(struct videobuf_queue *q)
-{
- int ret;
- videobuf_queue_lock(q);
- ret = __videobuf_free(q);
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-
-/* Locking: Caller holds q->vb_lock */
-int __videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory)
-{
- unsigned int i;
- int err;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- err = __videobuf_free(q);
- if (0 != err)
- return err;
-
- /* Allocate and initialize buffers */
- for (i = 0; i < bcount; i++) {
- q->bufs[i] = videobuf_alloc_vb(q);
-
- if (NULL == q->bufs[i])
- break;
-
- q->bufs[i]->i = i;
- q->bufs[i]->memory = memory;
- q->bufs[i]->bsize = bsize;
- switch (memory) {
- case V4L2_MEMORY_MMAP:
- q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
- break;
- case V4L2_MEMORY_USERPTR:
- case V4L2_MEMORY_OVERLAY:
- case V4L2_MEMORY_DMABUF:
- /* nothing */
- break;
- }
- }
-
- if (!i)
- return -ENOMEM;
-
- dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
-
- return i;
-}
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory)
-{
- int ret;
- videobuf_queue_lock(q);
- ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-
-int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req)
-{
- unsigned int size, count;
- int retval;
-
- if (req->memory != V4L2_MEMORY_MMAP &&
- req->memory != V4L2_MEMORY_USERPTR &&
- req->memory != V4L2_MEMORY_OVERLAY) {
- dprintk(1, "reqbufs: memory type invalid\n");
- return -EINVAL;
- }
-
- videobuf_queue_lock(q);
- if (req->type != q->type) {
- dprintk(1, "reqbufs: queue type invalid\n");
- retval = -EINVAL;
- goto done;
- }
-
- if (q->streaming) {
- dprintk(1, "reqbufs: streaming already exists\n");
- retval = -EBUSY;
- goto done;
- }
- if (!list_empty(&q->stream)) {
- dprintk(1, "reqbufs: stream running\n");
- retval = -EBUSY;
- goto done;
- }
-
- if (req->count == 0) {
- dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
- retval = __videobuf_free(q);
- goto done;
- }
-
- count = req->count;
- if (count > VIDEO_MAX_FRAME)
- count = VIDEO_MAX_FRAME;
- size = 0;
- q->ops->buf_setup(q, &count, &size);
- dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
- count, size,
- (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
-
- retval = __videobuf_mmap_setup(q, count, size, req->memory);
- if (retval < 0) {
- dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
- goto done;
- }
-
- req->count = retval;
- retval = 0;
-
- done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
- int ret = -EINVAL;
-
- videobuf_queue_lock(q);
- if (unlikely(b->type != q->type)) {
- dprintk(1, "querybuf: Wrong type.\n");
- goto done;
- }
- if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
- dprintk(1, "querybuf: index out of range.\n");
- goto done;
- }
- if (unlikely(NULL == q->bufs[b->index])) {
- dprintk(1, "querybuf: buffer is null.\n");
- goto done;
- }
-
- videobuf_status(q, b, q->bufs[b->index], q->type);
-
- ret = 0;
-done:
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-
-int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
- struct videobuf_buffer *buf;
- enum v4l2_field field;
- unsigned long flags = 0;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (b->memory == V4L2_MEMORY_MMAP)
- mmap_read_lock(current->mm);
-
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->reading) {
- dprintk(1, "qbuf: Reading running...\n");
- goto done;
- }
- retval = -EINVAL;
- if (b->type != q->type) {
- dprintk(1, "qbuf: Wrong type.\n");
- goto done;
- }
- if (b->index >= VIDEO_MAX_FRAME) {
- dprintk(1, "qbuf: index out of range.\n");
- goto done;
- }
- buf = q->bufs[b->index];
- if (NULL == buf) {
- dprintk(1, "qbuf: buffer is null.\n");
- goto done;
- }
- MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
- if (buf->memory != b->memory) {
- dprintk(1, "qbuf: memory type is wrong.\n");
- goto done;
- }
- if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
- dprintk(1, "qbuf: buffer is already queued or active.\n");
- goto done;
- }
-
- switch (b->memory) {
- case V4L2_MEMORY_MMAP:
- if (0 == buf->baddr) {
- dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
- goto done;
- }
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
- || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
- || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
- || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
- buf->size = b->bytesused;
- buf->field = b->field;
- buf->ts = v4l2_buffer_get_timestamp(b);
- }
- break;
- case V4L2_MEMORY_USERPTR:
- if (b->length < buf->bsize) {
- dprintk(1, "qbuf: buffer length is not enough\n");
- goto done;
- }
- if (VIDEOBUF_NEEDS_INIT != buf->state &&
- buf->baddr != b->m.userptr)
- q->ops->buf_release(q, buf);
- buf->baddr = b->m.userptr;
- break;
- case V4L2_MEMORY_OVERLAY:
- buf->boff = b->m.offset;
- break;
- default:
- dprintk(1, "qbuf: wrong memory type\n");
- goto done;
- }
-
- dprintk(1, "qbuf: requesting next field\n");
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, buf, field);
- if (0 != retval) {
- dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
- goto done;
- }
-
- list_add_tail(&buf->stream, &q->stream);
- if (q->streaming) {
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- }
- dprintk(1, "qbuf: succeeded\n");
- retval = 0;
- wake_up_interruptible_sync(&q->wait);
-
-done:
- videobuf_queue_unlock(q);
-
- if (b->memory == V4L2_MEMORY_MMAP)
- mmap_read_unlock(current->mm);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-{
- int retval;
-
-checks:
- if (!q->streaming) {
- dprintk(1, "next_buffer: Not streaming\n");
- retval = -EINVAL;
- goto done;
- }
-
- if (list_empty(&q->stream)) {
- if (noblock) {
- retval = -EAGAIN;
- dprintk(2, "next_buffer: no buffers to dequeue\n");
- goto done;
- } else {
- dprintk(2, "next_buffer: waiting on buffer\n");
-
- /* Drop lock to avoid deadlock with qbuf */
- videobuf_queue_unlock(q);
-
- /* Checking list_empty and streaming is safe without
- * locks because we goto checks to validate while
- * holding locks before proceeding */
- retval = wait_event_interruptible(q->wait,
- !list_empty(&q->stream) || !q->streaming);
- videobuf_queue_lock(q);
-
- if (retval)
- goto done;
-
- goto checks;
- }
- }
-
- retval = 0;
-
-done:
- return retval;
-}
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer(struct videobuf_queue *q,
- struct videobuf_buffer **vb, int nonblocking)
-{
- int retval;
- struct videobuf_buffer *buf = NULL;
-
- retval = stream_next_buffer_check_queue(q, nonblocking);
- if (retval)
- goto done;
-
- buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
- retval = videobuf_waiton(q, buf, nonblocking, 1);
- if (retval < 0)
- goto done;
-
- *vb = buf;
-done:
- return retval;
-}
-
-int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking)
-{
- struct videobuf_buffer *buf = NULL;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- memset(b, 0, sizeof(*b));
- videobuf_queue_lock(q);
-
- retval = stream_next_buffer(q, &buf, nonblocking);
- if (retval < 0) {
- dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
- goto done;
- }
-
- switch (buf->state) {
- case VIDEOBUF_ERROR:
- dprintk(1, "dqbuf: state is error\n");
- break;
- case VIDEOBUF_DONE:
- dprintk(1, "dqbuf: state is done\n");
- break;
- default:
- dprintk(1, "dqbuf: state invalid\n");
- retval = -EINVAL;
- goto done;
- }
- CALL(q, sync, q, buf);
- videobuf_status(q, b, buf, q->type);
- list_del(&buf->stream);
- buf->state = VIDEOBUF_IDLE;
- b->flags &= ~V4L2_BUF_FLAG_DONE;
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-
-int videobuf_streamon(struct videobuf_queue *q)
-{
- struct videobuf_buffer *buf;
- unsigned long flags = 0;
- int retval;
-
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->reading)
- goto done;
- retval = 0;
- if (q->streaming)
- goto done;
- q->streaming = 1;
- spin_lock_irqsave(q->irqlock, flags);
- list_for_each_entry(buf, &q->stream, stream)
- if (buf->state == VIDEOBUF_PREPARED)
- q->ops->buf_queue(q, buf);
- spin_unlock_irqrestore(q->irqlock, flags);
-
- wake_up_interruptible_sync(&q->wait);
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_streamoff(struct videobuf_queue *q)
-{
- if (!q->streaming)
- return -EINVAL;
-
- videobuf_queue_cancel(q);
-
- return 0;
-}
-
-int videobuf_streamoff(struct videobuf_queue *q)
-{
- int retval;
-
- videobuf_queue_lock(q);
- retval = __videobuf_streamoff(q);
- videobuf_queue_unlock(q);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-/* Locking: Caller holds q->vb_lock */
-static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
- char __user *data,
- size_t count, loff_t *ppos)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- /* setup stuff */
- q->read_buf = videobuf_alloc_vb(q);
- if (NULL == q->read_buf)
- return -ENOMEM;
-
- q->read_buf->memory = V4L2_MEMORY_USERPTR;
- q->read_buf->baddr = (unsigned long)data;
- q->read_buf->bsize = count;
-
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, q->read_buf, field);
- if (0 != retval)
- goto done;
-
- /* start capture & wait */
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- retval = videobuf_waiton(q, q->read_buf, 0, 0);
- if (0 == retval) {
- CALL(q, sync, q, q->read_buf);
- if (VIDEOBUF_ERROR == q->read_buf->state)
- retval = -EIO;
- else
- retval = q->read_buf->size;
- }
-
-done:
- /* cleanup */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- return retval;
-}
-
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- char __user *data, size_t count,
- int nonblocking)
-{
- void *vaddr = CALLPTR(q, vaddr, buf);
-
- /* copy to userspace */
- if (count > buf->size - q->read_off)
- count = buf->size - q->read_off;
-
- if (copy_to_user(data, vaddr + q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking)
-{
- unsigned int *fc = CALLPTR(q, vaddr, buf);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc += (buf->size >> 2) - 1;
- *fc = buf->field_count >> 1;
- dprintk(1, "vbihack: %d\n", *fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
-
- if ((count == -EFAULT) && (pos == 0))
- return -EFAULT;
-
- return count;
-}
-
-ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int nonblocking)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- unsigned size = 0, nbufs = 1;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- videobuf_queue_lock(q);
-
- q->ops->buf_setup(q, &nbufs, &size);
-
- if (NULL == q->read_buf &&
- count >= size &&
- !nonblocking) {
- retval = videobuf_read_zerocopy(q, data, count, ppos);
- if (retval >= 0 || retval == -EIO)
- /* ok, all done */
- goto done;
- /* fallback to kernel bounce buffer on failures */
- }
-
- if (NULL == q->read_buf) {
- /* need to capture a new frame */
- retval = -ENOMEM;
- q->read_buf = videobuf_alloc_vb(q);
-
- dprintk(1, "video alloc=0x%p\n", q->read_buf);
- if (NULL == q->read_buf)
- goto done;
- q->read_buf->memory = V4L2_MEMORY_USERPTR;
- q->read_buf->bsize = count; /* preferred size */
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, q->read_buf, field);
-
- if (0 != retval) {
- kfree(q->read_buf);
- q->read_buf = NULL;
- goto done;
- }
-
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
-
- q->read_off = 0;
- }
-
- /* wait until capture is done */
- retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
- if (0 != retval)
- goto done;
-
- CALL(q, sync, q, q->read_buf);
-
- if (VIDEOBUF_ERROR == q->read_buf->state) {
- /* catch I/O errors */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- retval = -EIO;
- goto done;
- }
-
- /* Copy to userspace */
- retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
- if (retval < 0)
- goto done;
-
- q->read_off += retval;
- if (q->read_off == q->read_buf->size) {
- /* all data copied, cleanup */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- }
-
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_read_start(struct videobuf_queue *q)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- unsigned int count = 0, size = 0;
- int err, i;
-
- q->ops->buf_setup(q, &count, &size);
- if (count < 2)
- count = 2;
- if (count > VIDEO_MAX_FRAME)
- count = VIDEO_MAX_FRAME;
- size = PAGE_ALIGN(size);
-
- err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
- if (err < 0)
- return err;
-
- count = err;
-
- for (i = 0; i < count; i++) {
- field = videobuf_next_field(q);
- err = q->ops->buf_prepare(q, q->bufs[i], field);
- if (err)
- return err;
- list_add_tail(&q->bufs[i]->stream, &q->stream);
- }
- spin_lock_irqsave(q->irqlock, flags);
- for (i = 0; i < count; i++)
- q->ops->buf_queue(q, q->bufs[i]);
- spin_unlock_irqrestore(q->irqlock, flags);
- q->reading = 1;
- return 0;
-}
-
-static void __videobuf_read_stop(struct videobuf_queue *q)
-{
- int i;
-
- videobuf_queue_cancel(q);
- __videobuf_free(q);
- INIT_LIST_HEAD(&q->stream);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- kfree(q->bufs[i]);
- q->bufs[i] = NULL;
- }
- q->read_buf = NULL;
-}
-
-int videobuf_read_start(struct videobuf_queue *q)
-{
- int rc;
-
- videobuf_queue_lock(q);
- rc = __videobuf_read_start(q);
- videobuf_queue_unlock(q);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-
-void videobuf_read_stop(struct videobuf_queue *q)
-{
- videobuf_queue_lock(q);
- __videobuf_read_stop(q);
- videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-
-void videobuf_stop(struct videobuf_queue *q)
-{
- videobuf_queue_lock(q);
-
- if (q->streaming)
- __videobuf_streamoff(q);
-
- if (q->reading)
- __videobuf_read_stop(q);
-
- videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_stop);
-
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int vbihack, int nonblocking)
-{
- int rc, retval;
- unsigned long flags = 0;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- dprintk(2, "%s\n", __func__);
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->streaming)
- goto done;
- if (!q->reading) {
- retval = __videobuf_read_start(q);
- if (retval < 0)
- goto done;
- }
-
- retval = 0;
- while (count > 0) {
- /* get / wait for data */
- if (NULL == q->read_buf) {
- q->read_buf = list_entry(q->stream.next,
- struct videobuf_buffer,
- stream);
- list_del(&q->read_buf->stream);
- q->read_off = 0;
- }
- rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
- if (rc < 0) {
- if (0 == retval)
- retval = rc;
- break;
- }
-
- if (q->read_buf->state == VIDEOBUF_DONE) {
- rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
- retval, vbihack, nonblocking);
- if (rc < 0) {
- retval = rc;
- break;
- }
- retval += rc;
- count -= rc;
- q->read_off += rc;
- } else {
- /* some error */
- q->read_off = q->read_buf->size;
- if (0 == retval)
- retval = -EIO;
- }
-
- /* requeue buffer when done with copying */
- if (q->read_off == q->read_buf->size) {
- list_add_tail(&q->read_buf->stream,
- &q->stream);
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- q->read_buf = NULL;
- }
- if (retval < 0)
- break;
- }
-
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-
-__poll_t videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct videobuf_buffer *buf = NULL;
- __poll_t rc = 0;
-
- videobuf_queue_lock(q);
- if (q->streaming) {
- if (!list_empty(&q->stream))
- buf = list_entry(q->stream.next,
- struct videobuf_buffer, stream);
- } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
- if (!q->reading)
- __videobuf_read_start(q);
- if (!q->reading) {
- rc = EPOLLERR;
- } else if (NULL == q->read_buf) {
- q->read_buf = list_entry(q->stream.next,
- struct videobuf_buffer,
- stream);
- list_del(&q->read_buf->stream);
- q->read_off = 0;
- }
- buf = q->read_buf;
- }
- if (buf)
- poll_wait(file, &buf->done, wait);
- else
- rc = EPOLLERR;
-
- if (0 == rc) {
- if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR) {
- switch (q->type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- case V4L2_BUF_TYPE_VBI_OUTPUT:
- case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- case V4L2_BUF_TYPE_SDR_OUTPUT:
- rc = EPOLLOUT | EPOLLWRNORM;
- break;
- default:
- rc = EPOLLIN | EPOLLRDNORM;
- break;
- }
- }
- }
- videobuf_queue_unlock(q);
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-{
- int rc = -EINVAL;
- int i;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
- dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
- return -EINVAL;
- }
-
- videobuf_queue_lock(q);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- struct videobuf_buffer *buf = q->bufs[i];
-
- if (buf && buf->memory == V4L2_MEMORY_MMAP &&
- buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
- rc = CALL(q, mmap_mapper, q, buf, vma);
- break;
- }
- }
- videobuf_queue_unlock(q);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
deleted file mode 100644
index 4c2ec7a0d804..000000000000
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- *
- * Based on videobuf-vmalloc.c,
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <media/videobuf-dma-contig.h>
-
-struct videobuf_dma_contig_memory {
- u32 magic;
- void *vaddr;
- dma_addr_t dma_handle;
- unsigned long size;
-};
-
-#define MAGIC_DC_MEM 0x0733ac61
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
- BUG(); \
- }
-
-static int __videobuf_dc_alloc(struct device *dev,
- struct videobuf_dma_contig_memory *mem,
- unsigned long size)
-{
- mem->size = size;
- mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
- GFP_KERNEL);
- if (!mem->vaddr) {
- dev_err(dev, "memory alloc size %ld failed\n", mem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
-
- return 0;
-}
-
-static void __videobuf_dc_free(struct device *dev,
- struct videobuf_dma_contig_memory *mem)
-{
- dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
-
- mem->vaddr = NULL;
-}
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
-
- dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
- map, map->count, vma->vm_start, vma->vm_end);
-
- map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- int i;
-
- dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
- map, map->count, vma->vm_start, vma->vm_end);
-
- map->count--;
- if (0 == map->count) {
- struct videobuf_dma_contig_memory *mem;
-
- dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
- videobuf_queue_lock(q);
-
- /* We need first to cancel streams, before unmapping */
- if (q->streaming)
- videobuf_queue_cancel(q);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
-
- if (q->bufs[i]->map != map)
- continue;
-
- mem = q->bufs[i]->priv;
- if (mem) {
- /* This callback is called only if kernel has
- allocated memory and this memory is mmapped.
- In this case, memory should be freed,
- in order to do memory unmap.
- */
-
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- /* vfree is not atomic - can't be
- called with IRQ's disabled
- */
- dev_dbg(q->dev, "buf[%d] freeing %p\n",
- i, mem->vaddr);
-
- __videobuf_dc_free(q->dev, mem);
- mem->vaddr = NULL;
- }
-
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- }
-
- kfree(map);
-
- videobuf_queue_unlock(q);
- }
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
-};
-
-/**
- * videobuf_dma_contig_user_put() - reset pointer to user space buffer
- * @mem: per-buffer private videobuf-dma-contig data
- *
- * This function resets the user space pointer
- */
-static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
-{
- mem->dma_handle = 0;
- mem->size = 0;
-}
-
-/**
- * videobuf_dma_contig_user_get() - setup user space memory pointer
- * @mem: per-buffer private videobuf-dma-contig data
- * @vb: video buffer to map
- *
- * This function validates and sets up a pointer to user space memory.
- * Only physically contiguous pfn-mapped memory is accepted.
- *
- * Returns 0 if successful.
- */
-static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
- struct videobuf_buffer *vb)
-{
- unsigned long untagged_baddr = untagged_addr(vb->baddr);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long prev_pfn, this_pfn;
- unsigned long pages_done, user_address;
- unsigned int offset;
- int ret;
-
- offset = untagged_baddr & ~PAGE_MASK;
- mem->size = PAGE_ALIGN(vb->size + offset);
- ret = -EINVAL;
-
- mmap_read_lock(mm);
-
- vma = find_vma(mm, untagged_baddr);
- if (!vma)
- goto out_up;
-
- if ((untagged_baddr + mem->size) > vma->vm_end)
- goto out_up;
-
- pages_done = 0;
- prev_pfn = 0; /* kill warning */
- user_address = untagged_baddr;
-
- while (pages_done < (mem->size >> PAGE_SHIFT)) {
- ret = follow_pfn(vma, user_address, &this_pfn);
- if (ret)
- break;
-
- if (pages_done == 0)
- mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
- else if (this_pfn != (prev_pfn + 1))
- ret = -EFAULT;
-
- if (ret)
- break;
-
- prev_pfn = this_pfn;
- user_address += PAGE_SIZE;
- pages_done++;
- }
-
-out_up:
- mmap_read_unlock(current->mm);
-
- return ret;
-}
-
-static struct videobuf_buffer *__videobuf_alloc(size_t size)
-{
- struct videobuf_dma_contig_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (vb) {
- vb->priv = ((char *)vb) + size;
- mem = vb->priv;
- mem->magic = MAGIC_DC_MEM;
- }
-
- return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- return mem->vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- struct videobuf_dma_contig_memory *mem = vb->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
-
- /* All handling should be done by __videobuf_mmap_mapper() */
- if (!mem->vaddr) {
- dev_err(q->dev, "memory is not allocated/mmapped.\n");
- return -EINVAL;
- }
- break;
- case V4L2_MEMORY_USERPTR:
- dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
-
- /* handle pointer from user space */
- if (vb->baddr)
- return videobuf_dma_contig_user_get(mem, vb);
-
- /* allocate memory for the read() method */
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
- return -ENOMEM;
- break;
- case V4L2_MEMORY_OVERLAY:
- default:
- dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_dma_contig_memory *mem;
- struct videobuf_mapping *map;
- int retval;
-
- dev_dbg(q->dev, "%s\n", __func__);
-
- /* create mapping + update buffer list */
- map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- buf->map = map;
- map->q = q;
-
- buf->baddr = vma->vm_start;
-
- mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
- goto error;
-
- /* the "vm_pgoff" is just used in v4l2 to find the
- * corresponding buffer data structure which is allocated
- * earlier and it does not mean the offset from the physical
- * buffer start address as usual. So set it to 0 to pass
- * the sanity check in dma_mmap_coherent().
- */
- vma->vm_pgoff = 0;
- retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
- mem->size);
- if (retval) {
- dev_err(q->dev, "mmap: remap failed with error %d. ",
- retval);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
- goto error;
- }
-
- vma->vm_ops = &videobuf_vm_ops;
- vm_flags_set(vma, VM_DONTEXPAND);
- vma->vm_private_data = map;
-
- dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
- map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize, vma->vm_pgoff, buf->i);
-
- videobuf_vm_open(vma);
-
- return 0;
-
-error:
- kfree(map);
- return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
- .alloc_vb = __videobuf_alloc,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
-};
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- return mem->dma_handle;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
-
-void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- /* mmapped memory can't be freed here, otherwise mmapped region
- would be released, while still needed. In this case, the memory
- release should happen inside videobuf_vm_close().
- So, it should free memory only if the memory were allocated for
- read() operation.
- */
- if (buf->memory != V4L2_MEMORY_USERPTR)
- return;
-
- if (!mem)
- return;
-
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- /* handle user space pointer case */
- if (buf->baddr) {
- videobuf_dma_contig_user_put(mem);
- return;
- }
-
- /* read() method */
- if (mem->vaddr) {
- __videobuf_dc_free(q->dev, mem);
- mem->vaddr = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
deleted file mode 100644
index 405b89ea1054..000000000000
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ /dev/null
@@ -1,681 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched/mm.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pgtable.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <asm/page.h>
-
-#include <media/videobuf-dma-sg.h>
-
-#define MAGIC_DMABUF 0x19721112
-#define MAGIC_SG_MEM 0x17890714
-
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Return a scatterlist for some page-aligned vmalloc()'ed memory
- * block (NULL on errors). Memory for the scatterlist is allocated
- * using kmalloc. The caller must free the memory.
- */
-static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
- int nr_pages)
-{
- struct scatterlist *sglist;
- struct page *pg;
- int i;
-
- sglist = vzalloc(array_size(nr_pages, sizeof(*sglist)));
- if (NULL == sglist)
- return NULL;
- sg_init_table(sglist, nr_pages);
- for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
- pg = vmalloc_to_page(virt);
- if (NULL == pg)
- goto err;
- BUG_ON(PageHighMem(pg));
- sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
- }
- return sglist;
-
-err:
- vfree(sglist);
- return NULL;
-}
-
-/*
- * Return a scatterlist for a an array of userpages (NULL on errors).
- * Memory for the scatterlist is allocated using kmalloc. The caller
- * must free the memory.
- */
-static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
- int nr_pages, int offset, size_t size)
-{
- struct scatterlist *sglist;
- int i;
-
- if (NULL == pages[0])
- return NULL;
- sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
- if (NULL == sglist)
- return NULL;
- sg_init_table(sglist, nr_pages);
-
- if (PageHighMem(pages[0]))
- /* DMA to highmem pages might not work */
- goto highmem;
- sg_set_page(&sglist[0], pages[0],
- min_t(size_t, PAGE_SIZE - offset, size), offset);
- size -= min_t(size_t, PAGE_SIZE - offset, size);
- for (i = 1; i < nr_pages; i++) {
- if (NULL == pages[i])
- goto nopage;
- if (PageHighMem(pages[i]))
- goto highmem;
- sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
- return sglist;
-
-nopage:
- dprintk(2, "sgl: oops - no page\n");
- vfree(sglist);
- return NULL;
-
-highmem:
- dprintk(2, "sgl: oops - highmem page\n");
- vfree(sglist);
- return NULL;
-}
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- return &mem->dma;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-
-static void videobuf_dma_init(struct videobuf_dmabuf *dma)
-{
- memset(dma, 0, sizeof(*dma));
- dma->magic = MAGIC_DMABUF;
-}
-
-static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
- int direction, unsigned long data, unsigned long size)
-{
- unsigned int gup_flags = FOLL_LONGTERM;
- unsigned long first, last;
- int err;
-
- dma->direction = direction;
- switch (dma->direction) {
- case DMA_FROM_DEVICE:
- gup_flags |= FOLL_WRITE;
- break;
- case DMA_TO_DEVICE:
- break;
- default:
- BUG();
- }
-
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->offset = data & ~PAGE_MASK;
- dma->size = size;
- dma->nr_pages = last-first+1;
- dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (NULL == dma->pages)
- return -ENOMEM;
-
- dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n",
- data, size, dma->nr_pages);
-
- err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags,
- dma->pages);
-
- if (err != dma->nr_pages) {
- dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "pin_user_pages: err=%d [%lu]\n", err,
- dma->nr_pages);
- return err < 0 ? err : -EINVAL;
- }
- return 0;
-}
-
-static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
- unsigned long data, unsigned long size)
-{
- int ret;
-
- mmap_read_lock(current->mm);
- ret = videobuf_dma_init_user_locked(dma, direction, data, size);
- mmap_read_unlock(current->mm);
-
- return ret;
-}
-
-static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
- unsigned long nr_pages)
-{
- int i;
-
- dprintk(1, "init kernel [%lu pages]\n", nr_pages);
-
- dma->direction = direction;
- dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
- GFP_KERNEL);
- if (!dma->vaddr_pages)
- return -ENOMEM;
-
- dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
- if (!dma->dma_addr) {
- kfree(dma->vaddr_pages);
- return -ENOMEM;
- }
- for (i = 0; i < nr_pages; i++) {
- void *addr;
-
- addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
- &(dma->dma_addr[i]), GFP_KERNEL);
- if (addr == NULL)
- goto out_free_pages;
-
- dma->vaddr_pages[i] = virt_to_page(addr);
- }
- dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
- PAGE_KERNEL);
- if (NULL == dma->vaddr) {
- dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
- goto out_free_pages;
- }
-
- dprintk(1, "vmalloc is at addr %p, size=%lu\n",
- dma->vaddr, nr_pages << PAGE_SHIFT);
-
- memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
- dma->nr_pages = nr_pages;
-
- return 0;
-out_free_pages:
- while (i > 0) {
- void *addr;
-
- i--;
- addr = page_address(dma->vaddr_pages[i]);
- dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
- }
- kfree(dma->dma_addr);
- dma->dma_addr = NULL;
- kfree(dma->vaddr_pages);
- dma->vaddr_pages = NULL;
-
- return -ENOMEM;
-
-}
-
-static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
- dma_addr_t addr, unsigned long nr_pages)
-{
- dprintk(1, "init overlay [%lu pages @ bus 0x%lx]\n",
- nr_pages, (unsigned long)addr);
- dma->direction = direction;
-
- if (0 == addr)
- return -EINVAL;
-
- dma->bus_addr = addr;
- dma->nr_pages = nr_pages;
-
- return 0;
-}
-
-static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(0 == dma->nr_pages);
-
- if (dma->pages) {
- dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
- dma->offset, dma->size);
- }
- if (dma->vaddr) {
- dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
- dma->nr_pages);
- }
- if (dma->bus_addr) {
- dma->sglist = vmalloc(sizeof(*dma->sglist));
- if (NULL != dma->sglist) {
- dma->sglen = 1;
- sg_dma_address(&dma->sglist[0]) = dma->bus_addr
- & PAGE_MASK;
- dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
- sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
- }
- }
- if (NULL == dma->sglist) {
- dprintk(1, "scatterlist is NULL\n");
- return -ENOMEM;
- }
- if (!dma->bus_addr) {
- dma->sglen = dma_map_sg(dev, dma->sglist,
- dma->nr_pages, dma->direction);
- if (0 == dma->sglen) {
- printk(KERN_WARNING
- "%s: videobuf_map_sg failed\n", __func__);
- vfree(dma->sglist);
- dma->sglist = NULL;
- dma->sglen = 0;
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
-
- if (!dma->sglen)
- return 0;
-
- dma_unmap_sg(dev, dma->sglist, dma->nr_pages, dma->direction);
-
- vfree(dma->sglist);
- dma->sglist = NULL;
- dma->sglen = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-
-int videobuf_dma_free(struct videobuf_dmabuf *dma)
-{
- int i;
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(dma->sglen);
-
- if (dma->pages) {
- unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
- dma->direction == DMA_FROM_DEVICE);
- kfree(dma->pages);
- dma->pages = NULL;
- }
-
- if (dma->dma_addr) {
- for (i = 0; i < dma->nr_pages; i++) {
- void *addr;
-
- addr = page_address(dma->vaddr_pages[i]);
- dma_free_coherent(dma->dev, PAGE_SIZE, addr,
- dma->dma_addr[i]);
- }
- kfree(dma->dma_addr);
- dma->dma_addr = NULL;
- kfree(dma->vaddr_pages);
- dma->vaddr_pages = NULL;
- vunmap(dma->vaddr);
- dma->vaddr = NULL;
- }
-
- if (dma->bus_addr)
- dma->bus_addr = 0;
- dma->direction = DMA_NONE;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-/* --------------------------------------------------------------------- */
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
-
- dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- struct videobuf_dma_sg_memory *mem;
- int i;
-
- dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- map->count--;
- if (0 == map->count) {
- dprintk(1, "munmap %p q=%p\n", map, q);
- videobuf_queue_lock(q);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- mem = q->bufs[i]->priv;
- if (!mem)
- continue;
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- if (q->bufs[i]->map != map)
- continue;
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- q->ops->buf_release(q, q->bufs[i]);
- }
- videobuf_queue_unlock(q);
- kfree(map);
- }
-}
-
-/*
- * Get a anonymous page for the mapping. Make sure we can DMA to that
- * memory location with 32bit PCI devices (i.e. don't use highmem for
- * now ...). Bounce buffers don't work very well for the data rates
- * video capture has.
- */
-static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct page *page;
-
- dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
- vmf->address, vma->vm_start, vma->vm_end);
-
- page = alloc_page(GFP_USER | __GFP_DMA32);
- if (!page)
- return VM_FAULT_OOM;
- clear_user_highpage(page, vmf->address);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
- .fault = videobuf_vm_fault,
-};
-
-/* ---------------------------------------------------------------------
- * SG handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
- struct video_buffer
- struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
- struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
- struct videobuf_dma_sg_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (!vb)
- return vb;
-
- mem = vb->priv = ((char *)vb) + size;
- mem->magic = MAGIC_SG_MEM;
-
- videobuf_dma_init(&mem->dma);
-
- dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
- mem, (long)sizeof(*mem));
-
- return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- return mem->dma.vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- struct videobuf_dma_sg_memory *mem = vb->priv;
- unsigned long pages;
- dma_addr_t bus;
- int err;
-
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- if (!mem->dma.dev)
- mem->dma.dev = q->dev;
- else
- WARN_ON(mem->dma.dev != q->dev);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- case V4L2_MEMORY_USERPTR:
- if (0 == vb->baddr) {
- /* no userspace addr -- kernel bounce buffer */
- pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_kernel(&mem->dma,
- DMA_FROM_DEVICE,
- pages);
- if (0 != err)
- return err;
- } else if (vb->memory == V4L2_MEMORY_USERPTR) {
- /* dma directly to userspace */
- err = videobuf_dma_init_user(&mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr, vb->bsize);
- if (0 != err)
- return err;
- } else {
- /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
- buffers can only be called from videobuf_qbuf
- we take current->mm->mmap_lock there, to prevent
- locking inversion, so don't take it here */
-
- err = videobuf_dma_init_user_locked(&mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr, vb->bsize);
- if (0 != err)
- return err;
- }
- break;
- case V4L2_MEMORY_OVERLAY:
- if (NULL == fbuf)
- return -EINVAL;
- /* FIXME: need sanity checks for vb->boff */
- /*
- * Using a double cast to avoid compiler warnings when
- * building for PAE. Compiler doesn't like direct casting
- * of a 32 bit ptr to 64 bit integer.
- */
- bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
- pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
- bus, pages);
- if (0 != err)
- return err;
- break;
- default:
- BUG();
- }
- err = videobuf_dma_map(q->dev, &mem->dma);
- if (0 != err)
- return err;
-
- return 0;
-}
-
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem || !mem->dma.sglen);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
- MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
-
- dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
- mem->dma.nr_pages, mem->dma.direction);
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- struct videobuf_mapping *map;
- unsigned int first, last, size = 0, i;
- int retval;
-
- retval = -EINVAL;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (buf == q->bufs[first]) {
- size = PAGE_ALIGN(q->bufs[first]->bsize);
- break;
- }
- }
-
- /* paranoia, should never happen since buf is always valid. */
- if (!size) {
- dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- goto done;
- }
-
- last = first;
-
- /* create mapping + update buffer list */
- retval = -ENOMEM;
- map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (NULL == map)
- goto done;
-
- size = 0;
- for (i = first; i <= last; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->bufs[i]->map = map;
- q->bufs[i]->baddr = vma->vm_start + size;
- size += PAGE_ALIGN(q->bufs[i]->bsize);
- }
-
- map->count = 1;
- map->q = q;
- vma->vm_ops = &videobuf_vm_ops;
- /* using shared anonymous pages */
- vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
- vma->vm_private_data = map;
- dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
- map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
- retval = 0;
-
-done:
- return retval;
-}
-
-static struct videobuf_qtype_ops sg_ops = {
- .magic = MAGIC_QTYPE_OPS,
-
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
-};
-
-void *videobuf_sg_alloc(size_t size)
-{
- struct videobuf_queue q;
-
- /* Required to make generic handler to call __videobuf_alloc */
- q.int_ops = &sg_ops;
-
- q.msize = size;
-
- return videobuf_alloc_vb(&q);
-}
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &sg_ops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
deleted file mode 100644
index 85c7090606d6..000000000000
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ /dev/null
@@ -1,326 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for vmalloc video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab <mchehab@kernel.org>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pgtable.h>
-
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <asm/page.h>
-
-#include <media/videobuf-vmalloc.h>
-
-#define MAGIC_DMABUF 0x17760309
-#define MAGIC_VMAL_MEM 0x18221223
-
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
-
-
-/***************************************************************************/
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
-
- dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- int i;
-
- dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- map->count--;
- if (0 == map->count) {
- struct videobuf_vmalloc_memory *mem;
-
- dprintk(1, "munmap %p q=%p\n", map, q);
- videobuf_queue_lock(q);
-
- /* We need first to cancel streams, before unmapping */
- if (q->streaming)
- videobuf_queue_cancel(q);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
-
- if (q->bufs[i]->map != map)
- continue;
-
- mem = q->bufs[i]->priv;
- if (mem) {
- /* This callback is called only if kernel has
- allocated memory and this memory is mmapped.
- In this case, memory should be freed,
- in order to do memory unmap.
- */
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- /* vfree is not atomic - can't be
- called with IRQ's disabled
- */
- dprintk(1, "%s: buf[%d] freeing (%p)\n",
- __func__, i, mem->vaddr);
-
- vfree(mem->vaddr);
- mem->vaddr = NULL;
- }
-
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- }
-
- kfree(map);
-
- videobuf_queue_unlock(q);
- }
-
- return;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
-};
-
-/* ---------------------------------------------------------------------
- * vmalloc handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
- struct video_buffer
- struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
- struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
- struct videobuf_vmalloc_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (!vb)
- return vb;
-
- mem = vb->priv = ((char *)vb) + size;
- mem->magic = MAGIC_VMAL_MEM;
-
- dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
- mem, (long)sizeof(*mem));
-
- return vb;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- struct videobuf_vmalloc_memory *mem = vb->priv;
- int pages;
-
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- dprintk(1, "%s memory method MMAP\n", __func__);
-
- /* All handling should be done by __videobuf_mmap_mapper() */
- if (!mem->vaddr) {
- printk(KERN_ERR "memory is not allocated/mmapped.\n");
- return -EINVAL;
- }
- break;
- case V4L2_MEMORY_USERPTR:
- pages = PAGE_ALIGN(vb->size);
-
- dprintk(1, "%s memory method USERPTR\n", __func__);
-
- if (vb->baddr) {
- printk(KERN_ERR "USERPTR is currently not supported\n");
- return -EINVAL;
- }
-
- /* The only USERPTR currently supported is the one needed for
- * read() method.
- */
-
- mem->vaddr = vmalloc_user(pages);
- if (!mem->vaddr) {
- printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
- return -ENOMEM;
- }
- dprintk(1, "vmalloc is at addr %p (%d pages)\n",
- mem->vaddr, pages);
- break;
- case V4L2_MEMORY_OVERLAY:
- default:
- dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
-
- /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
- printk(KERN_ERR "Memory method currently unsupported.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_vmalloc_memory *mem;
- struct videobuf_mapping *map;
- int retval, pages;
-
- dprintk(1, "%s\n", __func__);
-
- /* create mapping + update buffer list */
- map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (NULL == map)
- return -ENOMEM;
-
- buf->map = map;
- map->q = q;
-
- buf->baddr = vma->vm_start;
-
- mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
- mem->vaddr = vmalloc_user(pages);
- if (!mem->vaddr) {
- printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
- goto error;
- }
- dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
-
- /* Try to remap memory */
- retval = remap_vmalloc_range(vma, mem->vaddr, 0);
- if (retval < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
- vfree(mem->vaddr);
- goto error;
- }
-
- vma->vm_ops = &videobuf_vm_ops;
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
- vma->vm_private_data = map;
-
- dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
- map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize,
- vma->vm_pgoff, buf->i);
-
- videobuf_vm_open(vma);
-
- return 0;
-
-error:
- mem = NULL;
- kfree(map);
- return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
-
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = videobuf_to_vmalloc,
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
-{
- struct videobuf_vmalloc_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- return mem->vaddr;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf)
-{
- struct videobuf_vmalloc_memory *mem = buf->priv;
-
- /* mmapped memory can't be freed here, otherwise mmapped region
- would be released, while still needed. In this case, the memory
- release should happen inside videobuf_vm_close().
- So, it should free memory only if the memory were allocated for
- read() operation.
- */
- if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
- return;
-
- if (!mem)
- return;
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- vfree(mem->vaddr);
- mem->vaddr = NULL;
-
- return;
-}
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 67d6e70b4eab..a083921a8968 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -979,35 +979,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
}
}
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
- mc->gart = tegra_gart_probe(&pdev->dev, mc);
- if (IS_ERR(mc->gart)) {
- dev_err(&pdev->dev, "failed to probe GART: %ld\n",
- PTR_ERR(mc->gart));
- mc->gart = NULL;
- }
- }
-
- return 0;
-}
-
-static int __maybe_unused tegra_mc_suspend(struct device *dev)
-{
- struct tegra_mc *mc = dev_get_drvdata(dev);
-
- if (mc->soc->ops && mc->soc->ops->suspend)
- return mc->soc->ops->suspend(mc);
-
- return 0;
-}
-
-static int __maybe_unused tegra_mc_resume(struct device *dev)
-{
- struct tegra_mc *mc = dev_get_drvdata(dev);
-
- if (mc->soc->ops && mc->soc->ops->resume)
- return mc->soc->ops->resume(mc);
-
return 0;
}
@@ -1020,15 +991,10 @@ static void tegra_mc_sync_state(struct device *dev)
icc_sync_state(dev);
}
-static const struct dev_pm_ops tegra_mc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
-};
-
static struct platform_driver tegra_mc_driver = {
.driver = {
.name = "tegra-mc",
.of_match_table = tegra_mc_of_match,
- .pm = &tegra_mc_pm_ops,
.suppress_bind_attrs = true,
.sync_state = tegra_mc_sync_state,
},
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index 544bfd216a22..aa4b97d5e732 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -688,32 +688,6 @@ static int tegra20_mc_probe(struct tegra_mc *mc)
return 0;
}
-static int tegra20_mc_suspend(struct tegra_mc *mc)
-{
- int err;
-
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
- err = tegra_gart_suspend(mc->gart);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static int tegra20_mc_resume(struct tegra_mc *mc)
-{
- int err;
-
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
- err = tegra_gart_resume(mc->gart);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
{
struct tegra_mc *mc = data;
@@ -789,8 +763,6 @@ static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
static const struct tegra_mc_ops tegra20_mc_ops = {
.probe = tegra20_mc_probe,
- .suspend = tegra20_mc_suspend,
- .resume = tegra20_mc_resume,
.handle_irq = tegra20_mc_handle_irq,
};
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
index e17d81231ea6..1f42d1d5a630 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.c
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -703,12 +703,12 @@ int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL);
if (!entry)
return -ENOMEM;
+ entry->to_v_blks = 1; /* always 1 block */
/* fill up necessary data */
entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
set_q_num(&entry->to_v_msg[0], q_num);
set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
- entry->to_v_blks = 1; /* always 1 block */
entry->to_v_msg[0].cmd = shut_type;
entry->to_v_msg[0].arg = pid;
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h
index 56784c8896d8..157495e48f15 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.h
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.h
@@ -116,7 +116,7 @@ struct bcm_vk_wkent {
u32 usr_msg_id;
u32 to_v_blks;
u32 seq_num;
- struct vk_msg_blk to_v_msg[];
+ struct vk_msg_blk to_v_msg[] __counted_by(to_v_blks);
};
/* queue stats counters */
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f574c83b82cf..2bb1dd2511f9 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -923,7 +923,7 @@ struct c2port_device *c2port_device_register(char *name,
}
dev_set_drvdata(c2dev->dev, c2dev);
- strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1);
+ strscpy(c2dev->name, name, sizeof(c2dev->name));
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 144d1f2d78ce..012e11b959bc 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -38,8 +38,6 @@
static dev_t cxl_dev;
-static struct class *cxl_class;
-
static int __afu_open(struct inode *inode, struct file *file, bool master)
{
struct cxl *adapter;
@@ -559,7 +557,10 @@ static char *cxl_devnode(const struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
}
-extern struct class *cxl_class;
+static const struct class cxl_class = {
+ .name = "cxl",
+ .devnode = cxl_devnode,
+};
static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
struct device **chardev, char *postfix, char *desc,
@@ -575,7 +576,7 @@ static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
return rc;
}
- dev = device_create(cxl_class, &afu->dev, devt, afu,
+ dev = device_create(&cxl_class, &afu->dev, devt, afu,
"afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
@@ -633,14 +634,14 @@ void cxl_chardev_afu_remove(struct cxl_afu *afu)
int cxl_register_afu(struct cxl_afu *afu)
{
- afu->dev.class = cxl_class;
+ afu->dev.class = &cxl_class;
return device_register(&afu->dev);
}
int cxl_register_adapter(struct cxl *adapter)
{
- adapter->dev.class = cxl_class;
+ adapter->dev.class = &cxl_class;
/*
* Future: When we support dynamically reprogramming the PSL & AFU we
@@ -678,13 +679,11 @@ int __init cxl_file_init(void)
pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
- cxl_class = class_create("cxl");
- if (IS_ERR(cxl_class)) {
+ rc = class_register(&cxl_class);
+ if (rc) {
pr_err("Unable to create CXL class\n");
- rc = PTR_ERR(cxl_class);
goto err;
}
- cxl_class->devnode = cxl_devnode;
return 0;
@@ -696,5 +695,5 @@ err:
void cxl_file_exit(void)
{
unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
- class_destroy(cxl_class);
+ class_unregister(&cxl_class);
}
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 2d240bfa819f..4e61ac18cc96 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -46,20 +46,6 @@ config EEPROM_AT25
This driver can also be built as a module. If so, the module
will be called at25.
-config EEPROM_LEGACY
- tristate "Old I2C EEPROM reader (DEPRECATED)"
- depends on I2C && SYSFS
- help
- If you say yes here you get read-only access to the EEPROM data
- available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
- EEPROMs could theoretically be available on other devices as well.
-
- This driver is deprecated and will be removed soon, please use the
- better at24 driver instead.
-
- This driver can also be built as a module. If so, the module
- will be called eeprom.
-
config EEPROM_MAX6875
tristate "Maxim MAX6874/5 power supply supervisor"
depends on I2C
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index a9b4b6579b75..65794e526d5d 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_EEPROM_AT24) += at24.o
obj-$(CONFIG_EEPROM_AT25) += at25.o
-obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index dbbf7db4ff2f..f61a80597a22 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -92,7 +92,7 @@ struct at24_data {
* them for us.
*/
u8 bank_addr_shift;
- struct regmap *client_regmaps[];
+ struct regmap *client_regmaps[] __counted_by(num_addresses);
};
/*
@@ -191,9 +191,13 @@ AT24_CHIP_DATA(at24_data_24c16, 16384 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs16, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c32, 32768 / 8, AT24_FLAG_ADDR16);
+/* M24C32-D Additional Write lockable page (M24C32-D order codes) */
+AT24_CHIP_DATA(at24_data_24c32d_wlp, 32, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24cs32, 16,
AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c64, 65536 / 8, AT24_FLAG_ADDR16);
+/* M24C64-D Additional Write lockable page (M24C64-D order codes) */
+AT24_CHIP_DATA(at24_data_24c64d_wlp, 32, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24cs64, 16,
AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
@@ -222,8 +226,10 @@ static const struct i2c_device_id at24_ids[] = {
{ "24c16", (kernel_ulong_t)&at24_data_24c16 },
{ "24cs16", (kernel_ulong_t)&at24_data_24cs16 },
{ "24c32", (kernel_ulong_t)&at24_data_24c32 },
+ { "24c32d-wl", (kernel_ulong_t)&at24_data_24c32d_wlp },
{ "24cs32", (kernel_ulong_t)&at24_data_24cs32 },
{ "24c64", (kernel_ulong_t)&at24_data_24c64 },
+ { "24c64-wl", (kernel_ulong_t)&at24_data_24c64d_wlp },
{ "24cs64", (kernel_ulong_t)&at24_data_24cs64 },
{ "24c128", (kernel_ulong_t)&at24_data_24c128 },
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
@@ -252,8 +258,10 @@ static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c16", .data = &at24_data_24c16 },
{ .compatible = "atmel,24cs16", .data = &at24_data_24cs16 },
{ .compatible = "atmel,24c32", .data = &at24_data_24c32 },
+ { .compatible = "atmel,24c32d-wl", .data = &at24_data_24c32d_wlp },
{ .compatible = "atmel,24cs32", .data = &at24_data_24cs32 },
{ .compatible = "atmel,24c64", .data = &at24_data_24c64 },
+ { .compatible = "atmel,24c64d-wl", .data = &at24_data_24c64d_wlp },
{ .compatible = "atmel,24cs64", .data = &at24_data_24cs64 },
{ .compatible = "atmel,24c128", .data = &at24_data_24c128 },
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
@@ -509,32 +517,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
return 0;
}
-static const struct at24_chip_data *at24_get_chip_data(struct device *dev)
-{
- struct device_node *of_node = dev->of_node;
- const struct at24_chip_data *cdata;
- const struct i2c_device_id *id;
-
- id = i2c_match_id(at24_ids, to_i2c_client(dev));
-
- /*
- * The I2C core allows OF nodes compatibles to match against the
- * I2C device ID table as a fallback, so check not only if an OF
- * node is present but also if it matches an OF device ID entry.
- */
- if (of_node && of_match_device(at24_of_match, dev))
- cdata = of_device_get_match_data(dev);
- else if (id)
- cdata = (void *)id->driver_data;
- else
- cdata = acpi_device_get_match_data(dev);
-
- if (!cdata)
- return ERR_PTR(-ENODEV);
-
- return cdata;
-}
-
static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
struct i2c_client *base_client,
struct regmap_config *regmap_config)
@@ -601,9 +583,9 @@ static int at24_probe(struct i2c_client *client)
i2c_fn_block = i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK);
- cdata = at24_get_chip_data(dev);
- if (IS_ERR(cdata))
- return PTR_ERR(cdata);
+ cdata = i2c_get_match_data(client);
+ if (!cdata)
+ return -ENODEV;
err = device_property_read_u32(dev, "pagesize", &page_size);
if (err)
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
deleted file mode 100644
index ccb7c2f7ee2f..000000000000
--- a/drivers/misc/eeprom/eeprom.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
- * Philip Edelbrock <phil@netroedge.com>
- * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/capability.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-
-/* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
- 0x55, 0x56, 0x57, I2C_CLIENT_END };
-
-
-/* Size of EEPROM in bytes */
-#define EEPROM_SIZE 256
-
-/* possible types of eeprom devices */
-enum eeprom_nature {
- UNKNOWN,
- VAIO,
-};
-
-/* Each client has this additional data */
-struct eeprom_data {
- struct mutex update_lock;
- u8 valid; /* bitfield, bit!=0 if slice is valid */
- unsigned long last_updated[8]; /* In jiffies, 8 slices */
- u8 data[EEPROM_SIZE]; /* Register values */
- enum eeprom_nature nature;
-};
-
-
-static void eeprom_update_client(struct i2c_client *client, u8 slice)
-{
- struct eeprom_data *data = i2c_get_clientdata(client);
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (!(data->valid & (1 << slice)) ||
- time_after(jiffies, data->last_updated[slice] + 300 * HZ)) {
- dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice);
-
- if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
- for (i = slice << 5; i < (slice + 1) << 5; i += 32)
- if (i2c_smbus_read_i2c_block_data(client, i,
- 32, data->data + i)
- != 32)
- goto exit;
- } else {
- for (i = slice << 5; i < (slice + 1) << 5; i += 2) {
- int word = i2c_smbus_read_word_data(client, i);
- if (word < 0)
- goto exit;
- data->data[i] = word & 0xff;
- data->data[i + 1] = word >> 8;
- }
- }
- data->last_updated[slice] = jiffies;
- data->valid |= (1 << slice);
- }
-exit:
- mutex_unlock(&data->update_lock);
-}
-
-static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct i2c_client *client = kobj_to_i2c_client(kobj);
- struct eeprom_data *data = i2c_get_clientdata(client);
- u8 slice;
-
- /* Only refresh slices which contain requested bytes */
- for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
- eeprom_update_client(client, slice);
-
- /* Hide Vaio private settings to regular users:
- - BIOS passwords: bytes 0x00 to 0x0f
- - UUID: bytes 0x10 to 0x1f
- - Serial number: 0xc0 to 0xdf */
- if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
- int i;
-
- for (i = 0; i < count; i++) {
- if ((off + i <= 0x1f) ||
- (off + i >= 0xc0 && off + i <= 0xdf))
- buf[i] = 0;
- else
- buf[i] = data->data[off + i];
- }
- } else {
- memcpy(buf, &data->data[off], count);
- }
-
- return count;
-}
-
-static const struct bin_attribute eeprom_attr = {
- .attr = {
- .name = "eeprom",
- .mode = S_IRUGO,
- },
- .size = EEPROM_SIZE,
- .read = eeprom_read,
-};
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all
- addresses 0x50-0x57, but we only care about 0x50. So decline
- attaching to addresses >= 0x51 on DDC buses */
- if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51)
- return -ENODEV;
-
- /* There are four ways we can read the EEPROM data:
- (1) I2C block reads (faster, but unsupported by most adapters)
- (2) Word reads (128% overhead)
- (3) Consecutive byte reads (88% overhead, unsafe)
- (4) Regular byte data reads (265% overhead)
- The third and fourth methods are not implemented by this driver
- because all known adapters support one of the first two. */
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)
- && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
- return -ENODEV;
-
- strscpy(info->type, "eeprom", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int eeprom_probe(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- struct eeprom_data *data;
-
- data = devm_kzalloc(&client->dev, sizeof(struct eeprom_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memset(data->data, 0xff, EEPROM_SIZE);
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
- data->nature = UNKNOWN;
-
- /* Detect the Vaio nature of EEPROMs.
- We use the "PCG-" or "VGN-" prefix as the signature. */
- if (client->addr == 0x57
- && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
- char name[4];
-
- name[0] = i2c_smbus_read_byte_data(client, 0x80);
- name[1] = i2c_smbus_read_byte_data(client, 0x81);
- name[2] = i2c_smbus_read_byte_data(client, 0x82);
- name[3] = i2c_smbus_read_byte_data(client, 0x83);
-
- if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
- dev_info(&client->dev, "Vaio EEPROM detected, "
- "enabling privacy protection\n");
- data->nature = VAIO;
- }
- }
-
- /* Let the users know they are using deprecated driver */
- dev_notice(&client->dev,
- "eeprom driver is deprecated, please use at24 instead\n");
-
- /* create the sysfs eeprom file */
- return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
-}
-
-static void eeprom_remove(struct i2c_client *client)
-{
- sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
-}
-
-static const struct i2c_device_id eeprom_id[] = {
- { "eeprom", 0 },
- { }
-};
-
-static struct i2c_driver eeprom_driver = {
- .driver = {
- .name = "eeprom",
- },
- .probe = eeprom_probe,
- .remove = eeprom_remove,
- .id_table = eeprom_id,
-
- .class = I2C_CLASS_DDC | I2C_CLASS_SPD,
- .detect = eeprom_detect,
- .address_list = normal_i2c,
-};
-
-module_i2c_driver(eeprom_driver);
-
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
- "Philip Edelbrock <phil@netroedge.com> and "
- "Greg Kroah-Hartman <greg@kroah.com>");
-MODULE_DESCRIPTION("I2C EEPROM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 1d1f30b5c426..d807d08e2614 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -905,7 +905,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
{
struct idt_89hpesx_dev *pdev = filep->private_data;
char *colon_ch, *csraddr_str, *csrval_str;
- int ret, csraddr_len;
+ int ret;
u32 csraddr, csrval;
char *buf;
@@ -927,21 +927,16 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
* no new CSR value
*/
if (colon_ch != NULL) {
- csraddr_len = colon_ch - buf;
- csraddr_str =
- kmalloc(csraddr_len + 1, GFP_KERNEL);
+ /* Copy the register address to the substring buffer */
+ csraddr_str = kmemdup_nul(buf, colon_ch - buf, GFP_KERNEL);
if (csraddr_str == NULL) {
ret = -ENOMEM;
goto free_buf;
}
- /* Copy the register address to the substring buffer */
- strncpy(csraddr_str, buf, csraddr_len);
- csraddr_str[csraddr_len] = '\0';
/* Register value must follow the colon */
csrval_str = colon_ch + 1;
} else /* if (str_colon == NULL) */ {
csraddr_str = (char *)buf; /* Just to shut warning up */
- csraddr_len = strnlen(csraddr_str, count);
csrval_str = NULL;
}
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 7739b783c2db..e5f935b5249d 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1249,9 +1249,7 @@ static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
return -EIO;
}
- /* Make sure buffer is NULL terminated before trying to print it */
- memset(print_buffer, 0, HMC_ID_LEN + 1);
- strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
+ strscpy(print_buffer, hmc->hmc_id, sizeof(print_buffer));
pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index fc28714ae3a6..6a33889d0902 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -68,12 +68,20 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
#define no_pac_addr(addr) \
((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+#ifdef CONFIG_RISCV
+/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */
+#define FRAME_RA_OFFSET (-1)
+#else
+#define FRAME_RA_OFFSET 1
+#endif
+
/* The ultimate ROP gadget. */
static noinline __no_ret_protection
void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
- unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+ unsigned long * volatile *ret_addr =
+ (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
@@ -88,7 +96,8 @@ static noinline
void set_return_addr(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
- unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+ unsigned long * volatile *ret_addr =
+ (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 2e65ce6bdec7..f9bcff197615 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -257,7 +257,7 @@ out:
}
/**
- * mei_cldev_send_vtag - me device send with vtag (write)
+ * mei_cldev_send_vtag - me device send with vtag (write)
*
* @cldev: me client device
* @buf: buffer to send
@@ -279,6 +279,29 @@ ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
/**
+ * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ * @vtag: virtual tag
+ * @timeout: send timeout in milliseconds, 0 for infinite timeout
+ *
+ * Return:
+ * * written size in bytes
+ * * < 0 on error
+ */
+
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout)
+{
+ struct mei_cl *cl = cldev->cl;
+
+ return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
+
+/**
* mei_cldev_recv_vtag - client receive with vtag (read)
*
* @cldev: me client device
@@ -323,7 +346,49 @@ ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
/**
- * mei_cldev_send - me device send (write)
+ * mei_cldev_recv_timeout - client receive with timeout (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ * @timeout: send timeout in milliseconds, 0 for infinite timeout
+ *
+ * Return:
+ * * read size in bytes
+ * * < 0 on error
+ */
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout)
+{
+ return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
+
+/**
+ * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ * @vtag: virtual tag
+ * @timeout: recv timeout in milliseconds, 0 for infinite timeout
+ *
+ * Return:
+ * * read size in bytes
+ * * < 0 on error
+ */
+
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout)
+{
+ struct mei_cl *cl = cldev->cl;
+
+ return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
+
+/**
+ * mei_cldev_send - me device send (write)
*
* @cldev: me client device
* @buf: buffer to send
@@ -340,6 +405,25 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length
EXPORT_SYMBOL_GPL(mei_cldev_send);
/**
+ * mei_cldev_send_timeout - me device send with timeout (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ * @timeout: send timeout in milliseconds, 0 for infinite timeout
+ *
+ * Return:
+ * * written size in bytes
+ * * < 0 on error
+ */
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
+ unsigned long timeout)
+{
+ return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
+
+/**
* mei_cldev_recv - client receive (read)
*
* @cldev: me client device
@@ -1301,7 +1385,7 @@ static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
* @bus: mei device
* @me_cl: me client
*
- * Return: allocated device structur or NULL on allocation failure
+ * Return: allocated device structure or NULL on allocation failure
*/
static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_me_client *me_cl)
@@ -1361,7 +1445,7 @@ static bool mei_cl_bus_dev_setup(struct mei_device *bus,
*
* @cldev: me client device
*
- * Return: 0 on success; < 0 on failre
+ * Return: 0 on success; < 0 on failure
*/
static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
{
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 5c19097266fe..9c8fc87938a7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -48,9 +48,9 @@ struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
/**
* mei_me_cl_release - free me client
*
- * Locking: called under "dev->device_lock" lock
- *
* @ref: me_client refcount
+ *
+ * Locking: called under "dev->device_lock" lock
*/
static void mei_me_cl_release(struct kref *ref)
{
@@ -63,9 +63,9 @@ static void mei_me_cl_release(struct kref *ref)
/**
* mei_me_cl_put - decrease me client refcount and free client if necessary
*
- * Locking: called under "dev->device_lock" lock
- *
* @me_cl: me client
+ *
+ * Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_put(struct mei_me_client *me_cl)
{
@@ -329,10 +329,10 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
/**
* mei_tx_cb_enqueue - queue tx callback
*
- * Locking: called under "dev->device_lock" lock
- *
* @cb: mei callback struct
* @head: an instance of list to queue on
+ *
+ * Locking: called under "dev->device_lock" lock
*/
static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
struct list_head *head)
@@ -344,9 +344,9 @@ static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
/**
* mei_tx_cb_dequeue - dequeue tx callback
*
- * Locking: called under "dev->device_lock" lock
- *
* @cb: mei callback struct to dequeue and free
+ *
+ * Locking: called under "dev->device_lock" lock
*/
static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
{
@@ -359,10 +359,10 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
/**
* mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
*
- * Locking: called under "dev->device_lock" lock
- *
* @cl: mei client
* @fp: pointer to file structure
+ *
+ * Locking: called under "dev->device_lock" lock
*/
static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
const struct file *fp)
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
index ef56f849b251..651e77ef82bd 100644
--- a/drivers/misc/mei/dma-ring.c
+++ b/drivers/misc/mei/dma-ring.c
@@ -124,6 +124,8 @@ void mei_dma_ring_reset(struct mei_device *dev)
* @buf: data buffer
* @offset: offset in slots.
* @n: number of slots to copy.
+ *
+ * Return: number of bytes copied
*/
static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
u32 offset, u32 n)
@@ -144,6 +146,8 @@ static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
* @buf: data buffer
* @offset: offset in slots.
* @n: number of slots to copy.
+ *
+ * Return: number of bytes copied
*/
static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
u32 offset, u32 n)
@@ -161,7 +165,7 @@ static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
/**
* mei_dma_ring_read() - read data from the ring
* @dev: mei device
- * @buf: buffer to read into: may be NULL in case of droping the data.
+ * @buf: buffer to read into: may be NULL in case of dropping the data.
* @len: length to read.
*/
void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 12a62a911e42..026b1f686c16 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -91,6 +91,8 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
* @dev: mei device
* @hdr: mei header
* @data: payload
+ *
+ * Return: >=0 on success, <0 on error
*/
static inline int mei_hbm_write_message(struct mei_device *dev,
struct mei_msg_hdr *hdr,
@@ -111,7 +113,7 @@ void mei_hbm_idle(struct mei_device *dev)
}
/**
- * mei_hbm_reset - reset hbm counters and book keeping data structurs
+ * mei_hbm_reset - reset hbm counters and book keeping data structures
*
* @dev: the device structure
*/
@@ -907,7 +909,7 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
- * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW
+ * mei_hbm_cl_disconnect_rsp - sends disconnect response to the FW
*
* @dev: the device structure
* @cl: a client to disconnect from
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bdc65d50b945..961e5d53a27a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -123,6 +123,9 @@
# define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */
# define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */
#define PCI_CFG_HFS_2 0x48
+# define PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR 0x1000000 /* CMoff->CMx wake after an error */
+# define PCI_CFG_HFS_2_PM_CM_RESET_ERROR 0x5000000 /* CME reset due to exception */
+# define PCI_CFG_HFS_2_PM_EVENT_MASK 0xf000000
#define PCI_CFG_HFS_3 0x60
# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
# define PCI_CFG_HFS_3_FW_SKU_IGN 0x00000000
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index da4ef0b51954..d11a0740b47c 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -443,11 +443,22 @@ static void mei_gsc_pxp_check(struct mei_device *dev)
struct mei_me_hw *hw = to_me_hw(dev);
u32 fwsts5 = 0;
- if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+ if (!kind_is_gsc(dev) && !kind_is_gscfi(dev))
return;
hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+
+ if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
+ if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT)
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED;
+ } else {
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT;
+ }
+
+ if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+ return;
+
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
dev->pxp_mode = MEI_DEV_PXP_READY;
@@ -483,6 +494,43 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
}
/**
+ * mei_me_check_fw_reset - check for the firmware reset error and exception conditions
+ *
+ * @dev: mei device
+ */
+static void mei_me_check_fw_reset(struct mei_device *dev)
+{
+ struct mei_fw_status fw_status;
+ char fw_sts_str[MEI_FW_STATUS_STR_SZ] = {0};
+ int ret;
+ u32 fw_pm_event = 0;
+
+ if (!dev->saved_fw_status_flag)
+ goto end;
+
+ if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) {
+ ret = mei_fw_status(dev, &fw_status);
+ if (!ret) {
+ fw_pm_event = fw_status.status[1] & PCI_CFG_HFS_2_PM_EVENT_MASK;
+ if (fw_pm_event != PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR &&
+ fw_pm_event != PCI_CFG_HFS_2_PM_CM_RESET_ERROR)
+ goto end;
+ } else {
+ dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
+ }
+ }
+
+ mei_fw_status2str(&dev->saved_fw_status, fw_sts_str, sizeof(fw_sts_str));
+ dev_warn(dev->dev, "unexpected reset: fw_pm_event = 0x%x, dev_state = %u fw status = %s\n",
+ fw_pm_event, dev->saved_dev_state, fw_sts_str);
+
+end:
+ if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
+ dev->saved_fw_status_flag = false;
+}
+
+/**
* mei_me_hw_start - hw start routine
*
* @dev: mei device
@@ -492,6 +540,8 @@ static int mei_me_hw_start(struct mei_device *dev)
{
int ret = mei_me_hw_ready_wait(dev);
+ if (kind_is_gsc(dev) || kind_is_gscfi(dev))
+ mei_me_check_fw_reset(dev);
if (ret)
return ret;
dev_dbg(dev->dev, "hw is ready\n");
@@ -1300,8 +1350,13 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
- dev->dev_state, dev->pxp_mode);
+ if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
+ dev_dbg(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev->dev_state);
+ } else {
+ dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev->dev_state);
+ }
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN)
mei_cl_all_disconnect(dev);
@@ -1379,6 +1434,8 @@ EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
/**
* mei_me_polling_thread - interrupt register polling thread
*
+ * @_dev: mei device
+ *
* The thread monitors the interrupt source register and calls
* mei_me_irq_thread_handler() to handle the firmware
* input.
@@ -1388,8 +1445,6 @@ EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
* time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
* up to MEI_POLLING_TIMEOUT_IDLE.
*
- * @_dev: mei device
- *
* Return: always 0
*/
int mei_me_polling_thread(void *_dev)
@@ -1468,12 +1523,12 @@ static const struct mei_hw_ops mei_me_hw_ops = {
/**
* mei_me_fw_type_nm() - check for nm sku
*
+ * @pdev: pci device
+ *
* Read ME FW Status register to check for the Node Manager (NM) Firmware.
* The NM FW is only signaled in PCI function 0.
* __Note__: Deprecated by PCH8 and newer.
*
- * @pdev: pci device
- *
* Return: true in case of NM firmware
*/
static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
@@ -1494,12 +1549,12 @@ static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
/**
* mei_me_fw_type_sps_4() - check for sps 4.0 sku
*
+ * @pdev: pci device
+ *
* Read ME FW Status register to check for SPS Firmware.
* The SPS FW is only signaled in the PCI function 0.
* __Note__: Deprecated by SPS 5.0 and newer.
*
- * @pdev: pci device
- *
* Return: true in case of SPS firmware
*/
static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
@@ -1519,11 +1574,11 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
/**
* mei_me_fw_type_sps_ign() - check for sps or ign sku
*
+ * @pdev: pci device
+ *
* Read ME FW Status register to check for SPS or IGN Firmware.
* The SPS/IGN FW is only signaled in pci function 0
*
- * @pdev: pci device
- *
* Return: true in case of SPS/IGN firmware
*/
static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 95cf830b7c7b..204b92af6c47 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -102,10 +102,14 @@ static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw)
* @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_ITOUCH_CFG: Platform Controller Hub Gen12
+ * client platforms (iTouch)
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
+ * @MEI_ME_GSC_CFG: Graphics System Controller
+ * @MEI_ME_GSCFI_CFG: Graphics System Controller Firmware Interface
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index e910302fcd1f..eb800a07a84b 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -429,7 +429,7 @@ struct mei_bus_message {
} __packed;
/**
- * struct hbm_cl_cmd - client specific host bus command
+ * struct mei_hbm_cl_cmd - client specific host bus command
* CONNECT, DISCONNECT, and FlOW CONTROL
*
* @hbm_cmd: bus message command header
@@ -733,7 +733,7 @@ struct hbm_dma_setup_response {
} __packed;
/**
- * struct mei_dma_ring_ctrl - dma ring control block
+ * struct hbm_dma_ring_ctrl - dma ring control block
*
* @hbuf_wr_idx: host circular buffer write index in slots
* @reserved1: reserved for alignment
@@ -806,8 +806,8 @@ struct hbm_client_dma_map_request {
} __packed;
/**
- * struct hbm_client_dma_unmap_request
- * client dma unmap request from the host to the firmware
+ * struct hbm_client_dma_unmap_request - client dma unmap request
+ * from the host to the firmware
*
* @hbm_cmd: bus message command header
* @status: unmap status
@@ -822,8 +822,8 @@ struct hbm_client_dma_unmap_request {
} __packed;
/**
- * struct hbm_client_dma_response
- * client dma unmap response from the firmware to the host
+ * struct hbm_client_dma_response - client dma unmap response
+ * from the firmware to the host
*
* @hbm_cmd: bus message command header
* @status: command status
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index c35e005b26be..8ef2b1df8ac7 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -89,6 +89,22 @@ void mei_cancel_work(struct mei_device *dev)
}
EXPORT_SYMBOL_GPL(mei_cancel_work);
+static void mei_save_fw_status(struct mei_device *dev)
+{
+ struct mei_fw_status fw_status;
+ int ret;
+
+ ret = mei_fw_status(dev, &fw_status);
+ if (ret) {
+ dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
+ return;
+ }
+
+ dev->saved_dev_state = dev->dev_state;
+ dev->saved_fw_status_flag = true;
+ memcpy(&dev->saved_fw_status, &fw_status, sizeof(fw_status));
+}
+
/**
* mei_reset - resets host and fw.
*
@@ -109,8 +125,14 @@ int mei_reset(struct mei_device *dev)
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
- dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
- mei_dev_state_str(state), fw_sts_str);
+ if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
+ dev_dbg(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ mei_dev_state_str(state), fw_sts_str);
+ mei_save_fw_status(dev);
+ } else {
+ dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ mei_dev_state_str(state), fw_sts_str);
+ }
}
mei_clear_interrupts(dev);
@@ -394,6 +416,7 @@ void mei_device_init(struct mei_device *dev,
dev->open_handle_count = 0;
dev->pxp_mode = MEI_DEV_PXP_DEFAULT;
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT;
/*
* Reserving the first client ID
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 0a0e984e5673..b09b79fedaba 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -72,7 +72,7 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
discard_len = 0;
}
/*
- * no need to check for size as it is guarantied
+ * no need to check for size as it is guaranteed
* that length fits into rd_msg_buf
*/
mei_read_slots(dev, dev->rd_msg_buf, discard_len);
@@ -626,9 +626,9 @@ static void mei_connect_timeout(struct mei_cl *cl)
/**
* mei_schedule_stall_timer - re-arm stall_timer work
*
- * Schedule stall timer
- *
* @dev: the device structure
+ *
+ * Schedule stall timer
*/
void mei_schedule_stall_timer(struct mei_device *dev)
{
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index bb4e9eabda97..79e6f3c1341f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -460,11 +460,11 @@ end:
/**
* mei_vt_support_check - check if client support vtags
*
- * Locking: called under "dev->device_lock" lock
- *
* @dev: mei_device
* @uuid: client UUID
*
+ * Locking: called under "dev->device_lock" lock
+ *
* Return:
* 0 - supported
* -ENOTTY - no such client
@@ -587,8 +587,8 @@ static int mei_ioctl_connect_vtag(struct file *file,
}
/**
- * mei_ioctl_client_notify_request -
- * propagate event notification request to client
+ * mei_ioctl_client_notify_request - propagate event notification
+ * request to client
*
* @file: pointer to file structure
* @request: 0 - disable, 1 - enable
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index cdf8a2edf0b3..37d7fb15cad7 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -70,9 +70,9 @@ enum mei_dev_state {
/**
* enum mei_dev_pxp_mode - MEI PXP mode state
*
- * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required
+ * @MEI_DEV_PXP_DEFAULT: PCH based device, no initialization required
* @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware
- * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse
+ * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware response
* @MEI_DEV_PXP_READY: device initialized
*/
enum mei_dev_pxp_mode {
@@ -82,6 +82,19 @@ enum mei_dev_pxp_mode {
MEI_DEV_PXP_READY = 3,
};
+/**
+ * enum mei_dev_reset_to_pxp - reset to PXP mode performed
+ *
+ * @MEI_DEV_RESET_TO_PXP_DEFAULT: before reset
+ * @MEI_DEV_RESET_TO_PXP_PERFORMED: reset performed
+ * @MEI_DEV_RESET_TO_PXP_DONE: reset processed
+ */
+enum mei_dev_reset_to_pxp {
+ MEI_DEV_RESET_TO_PXP_DEFAULT = 0,
+ MEI_DEV_RESET_TO_PXP_PERFORMED = 1,
+ MEI_DEV_RESET_TO_PXP_DONE = 2,
+};
+
const char *mei_dev_state_str(int state);
enum mei_file_transaction_states {
@@ -534,6 +547,11 @@ struct mei_dev_timeouts {
*
* @dbgfs_dir : debugfs mei root directory
*
+ * @saved_fw_status : saved firmware status
+ * @saved_dev_state : saved device state
+ * @saved_fw_status_flag : flag indicating that firmware status was saved
+ * @gsc_reset_to_pxp : state of reset to the PXP mode
+ *
* @ops: : hw specific operations
* @hw : hw specific data
*/
@@ -630,6 +648,11 @@ struct mei_device {
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
+ struct mei_fw_status saved_fw_status;
+ enum mei_dev_state saved_dev_state;
+ bool saved_fw_status_flag;
+ enum mei_dev_reset_to_pxp gsc_reset_to_pxp;
+
const struct mei_hw_ops *ops;
char hw[] __aligned(sizeof(void *));
};
@@ -874,5 +897,29 @@ static inline ssize_t mei_fw_status_str(struct mei_device *dev,
return ret;
}
+/**
+ * kind_is_gsc - checks whether the device is gsc
+ *
+ * @dev: the device structure
+ *
+ * Return: whether the device is gsc
+ */
+static inline bool kind_is_gsc(struct mei_device *dev)
+{
+ /* check kind for NULL because it may be not set, like at the fist call to hw_start */
+ return dev->kind && (strcmp(dev->kind, "gsc") == 0);
+}
+/**
+ * kind_is_gscfi - checks whether the device is gscfi
+ *
+ * @dev: the device structure
+ *
+ * Return: whether the device is gscfi
+ */
+static inline bool kind_is_gscfi(struct mei_device *dev)
+{
+ /* check kind for NULL because it may be not set, like at the fist call to hw_start */
+ return dev->kind && (strcmp(dev->kind, "gscfi") == 0);
+}
#endif
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index 2dcb9169e404..f77d78fa5054 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -11,6 +11,7 @@
* negotiation messages to ME FW command payloads and vice versa.
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mei.h>
@@ -22,31 +23,70 @@
#include "mei_pxp.h"
+static inline int mei_pxp_reenable(const struct device *dev, struct mei_cl_device *cldev)
+{
+ int ret;
+
+ dev_warn(dev, "Trying to reset the channel...\n");
+ ret = mei_cldev_disable(cldev);
+ if (ret < 0)
+ dev_warn(dev, "mei_cldev_disable failed. %d\n", ret);
+ /*
+ * Explicitly ignoring disable failure,
+ * enable may fix the states and succeed
+ */
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0)
+ dev_err(dev, "mei_cldev_enable failed. %d\n", ret);
+ return ret;
+}
+
/**
* mei_pxp_send_message() - Sends a PXP message to ME FW.
* @dev: device corresponding to the mei_cl_device
* @message: a message buffer to send
* @size: size of the message
- * Return: 0 on Success, <0 on Failure
+ * @timeout_ms: timeout in milliseconds, zero means wait indefinitely.
+ *
+ * Returns: 0 on Success, <0 on Failure with the following defined failures.
+ * -ENODEV: Client was not connected.
+ * Caller may attempt to try again immediately.
+ * -ENOMEM: Internal memory allocation failure experienced.
+ * Caller may sleep to allow kernel reclaim before retrying.
+ * -EINTR : Calling thread received a signal. Caller may choose
+ * to abandon with the same thread id.
+ * -ETIME : Request is timed out.
+ * Caller may attempt to try again immediately.
*/
static int
-mei_pxp_send_message(struct device *dev, const void *message, size_t size)
+mei_pxp_send_message(struct device *dev, const void *message, size_t size, unsigned long timeout_ms)
{
struct mei_cl_device *cldev;
ssize_t byte;
+ int ret;
if (!dev || !message)
return -EINVAL;
cldev = to_mei_cl_device(dev);
- byte = mei_cldev_send(cldev, message, size);
+ byte = mei_cldev_send_timeout(cldev, message, size, timeout_ms);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
- return byte;
+ switch (byte) {
+ case -ENOMEM:
+ fallthrough;
+ case -ENODEV:
+ fallthrough;
+ case -ETIME:
+ ret = mei_pxp_reenable(dev, cldev);
+ if (ret)
+ byte = ret;
+ break;
+ }
}
- return 0;
+ return byte;
}
/**
@@ -54,23 +94,53 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size)
* @dev: device corresponding to the mei_cl_device
* @buffer: a message buffer to contain the received message
* @size: size of the buffer
- * Return: bytes sent on Success, <0 on Failure
+ * @timeout_ms: timeout in milliseconds, zero means wait indefinitely.
+ *
+ * Returns: number of bytes send on Success, <0 on Failure with the following defined failures.
+ * -ENODEV: Client was not connected.
+ * Caller may attempt to try again from send immediately.
+ * -ENOMEM: Internal memory allocation failure experienced.
+ * Caller may sleep to allow kernel reclaim before retrying.
+ * -EINTR : Calling thread received a signal. Caller will need to repeat calling
+ * (with a different owning thread) to retrieve existing unclaimed response
+ * (and may discard it).
+ * -ETIME : Request is timed out.
+ * Caller may attempt to try again from send immediately.
*/
static int
-mei_pxp_receive_message(struct device *dev, void *buffer, size_t size)
+mei_pxp_receive_message(struct device *dev, void *buffer, size_t size, unsigned long timeout_ms)
{
struct mei_cl_device *cldev;
ssize_t byte;
+ bool retry = false;
+ int ret;
if (!dev || !buffer)
return -EINVAL;
cldev = to_mei_cl_device(dev);
- byte = mei_cldev_recv(cldev, buffer, size);
+retry:
+ byte = mei_cldev_recv_timeout(cldev, buffer, size, timeout_ms);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
- return byte;
+ switch (byte) {
+ case -ENOMEM:
+ /* Retry the read when pages are reclaimed */
+ msleep(20);
+ if (!retry) {
+ retry = true;
+ goto retry;
+ }
+ fallthrough;
+ case -ENODEV:
+ fallthrough;
+ case -ETIME:
+ ret = mei_pxp_reenable(dev, cldev);
+ if (ret)
+ byte = ret;
+ break;
+ }
}
return byte;
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 6e63f060e4cc..ac69b7f361f5 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -14,7 +14,6 @@
#define OCXL_NUM_MINORS 256 /* Total to reserve */
static dev_t ocxl_dev;
-static struct class *ocxl_class;
static DEFINE_MUTEX(minors_idr_lock);
static struct idr minors_idr;
@@ -509,6 +508,16 @@ static void ocxl_file_make_invisible(struct ocxl_file_info *info)
cdev_del(&info->cdev);
}
+static char *ocxl_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev));
+}
+
+static const struct class ocxl_class = {
+ .name = "ocxl",
+ .devnode = ocxl_devnode,
+};
+
int ocxl_file_register_afu(struct ocxl_afu *afu)
{
int minor;
@@ -529,7 +538,7 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
info->dev.parent = &fn->dev;
info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor);
- info->dev.class = ocxl_class;
+ info->dev.class = &ocxl_class;
info->dev.release = info_release;
info->afu = afu;
@@ -584,11 +593,6 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu)
device_unregister(&info->dev);
}
-static char *ocxl_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev));
-}
-
int ocxl_file_init(void)
{
int rc;
@@ -601,20 +605,19 @@ int ocxl_file_init(void)
return rc;
}
- ocxl_class = class_create("ocxl");
- if (IS_ERR(ocxl_class)) {
+ rc = class_register(&ocxl_class);
+ if (rc) {
pr_err("Unable to create ocxl class\n");
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
- return PTR_ERR(ocxl_class);
+ return rc;
}
- ocxl_class->devnode = ocxl_devnode;
return 0;
}
void ocxl_file_exit(void)
{
- class_destroy(ocxl_class);
+ class_unregister(&ocxl_class);
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
idr_destroy(&minors_idr);
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 150083dab71a..af519088732d 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -71,6 +71,7 @@
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
+#define PCI_DEVICE_ID_TI_J721S2 0xb013
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define PCI_DEVICE_ID_IMX8 0x0808
@@ -1003,6 +1004,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 7966a6b8b5b3..30bd7c39c261 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -35,9 +35,12 @@
#define PHB_NOT_OH 2
static DEFINE_MUTEX(phantom_mutex);
-static struct class *phantom_class;
static int phantom_major;
+static const struct class phantom_class = {
+ .name = "phantom",
+};
+
struct phantom_device {
unsigned int opened;
void __iomem *caddr;
@@ -403,7 +406,7 @@ static int phantom_probe(struct pci_dev *pdev,
goto err_irq;
}
- if (IS_ERR(device_create(phantom_class, &pdev->dev,
+ if (IS_ERR(device_create(&phantom_class, &pdev->dev,
MKDEV(phantom_major, minor), NULL,
"phantom%u", minor)))
dev_err(&pdev->dev, "can't create device\n");
@@ -436,7 +439,7 @@ static void phantom_remove(struct pci_dev *pdev)
struct phantom_device *pht = pci_get_drvdata(pdev);
unsigned int minor = MINOR(pht->cdev.dev);
- device_destroy(phantom_class, MKDEV(phantom_major, minor));
+ device_destroy(&phantom_class, MKDEV(phantom_major, minor));
cdev_del(&pht->cdev);
@@ -503,13 +506,12 @@ static int __init phantom_init(void)
int retval;
dev_t dev;
- phantom_class = class_create("phantom");
- if (IS_ERR(phantom_class)) {
- retval = PTR_ERR(phantom_class);
+ retval = class_register(&phantom_class);
+ if (retval) {
printk(KERN_ERR "phantom: can't register phantom class\n");
goto err;
}
- retval = class_create_file(phantom_class, &class_attr_version.attr);
+ retval = class_create_file(&phantom_class, &class_attr_version.attr);
if (retval) {
printk(KERN_ERR "phantom: can't create sysfs version file\n");
goto err_class;
@@ -535,9 +537,9 @@ static int __init phantom_init(void)
err_unchr:
unregister_chrdev_region(dev, PHANTOM_MAX_MINORS);
err_attr:
- class_remove_file(phantom_class, &class_attr_version.attr);
+ class_remove_file(&phantom_class, &class_attr_version.attr);
err_class:
- class_destroy(phantom_class);
+ class_unregister(&phantom_class);
err:
return retval;
}
@@ -548,8 +550,8 @@ static void __exit phantom_exit(void)
unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS);
- class_remove_file(phantom_class, &class_attr_version.attr);
- class_destroy(phantom_class);
+ class_remove_file(&phantom_class, &class_attr_version.attr);
+ class_unregister(&phantom_class);
pr_debug("phantom: module successfully removed\n");
}
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index eb97167c03fb..9715798acce3 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -24,52 +24,9 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic-mmio device driver");
MODULE_LICENSE("GPL");
-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%x\n", pi->capability);
-}
-static DEVICE_ATTR_RO(capability);
-
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%x\n", pi->events);
-}
-
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
- unsigned int tmp;
- int err;
-
- err = kstrtouint(buf, 16, &tmp);
- if (err)
- return err;
-
- if ((tmp & pi->capability) != tmp)
- return -EINVAL;
-
- pi->events = tmp;
-
- return count;
-}
-static DEVICE_ATTR_RW(events);
-
-static struct attribute *pvpanic_mmio_dev_attrs[] = {
- &dev_attr_capability.attr,
- &dev_attr_events.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
-
static int pvpanic_mmio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pvpanic_instance *pi;
struct resource *res;
void __iomem *base;
@@ -92,18 +49,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
return -EINVAL;
}
- pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
- if (!pi)
- return -ENOMEM;
-
- pi->base = base;
- pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
-
- /* initialize capability by RDPT */
- pi->capability &= ioread8(base);
- pi->events = pi->capability;
-
- return devm_pvpanic_probe(dev, pi);
+ return devm_pvpanic_probe(dev, base);
}
static const struct of_device_id pvpanic_mmio_match[] = {
@@ -123,7 +69,7 @@ static struct platform_driver pvpanic_mmio_driver = {
.name = "pvpanic-mmio",
.of_match_table = pvpanic_mmio_match,
.acpi_match_table = pvpanic_device_ids,
- .dev_groups = pvpanic_mmio_dev_groups,
+ .dev_groups = pvpanic_dev_groups,
},
.probe = pvpanic_mmio_probe,
};
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 07eddb5ea30f..689af4c28c2a 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -22,51 +22,8 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%x\n", pi->capability);
-}
-static DEVICE_ATTR_RO(capability);
-
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%x\n", pi->events);
-}
-
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(dev);
- unsigned int tmp;
- int err;
-
- err = kstrtouint(buf, 16, &tmp);
- if (err)
- return err;
-
- if ((tmp & pi->capability) != tmp)
- return -EINVAL;
-
- pi->events = tmp;
-
- return count;
-}
-static DEVICE_ATTR_RW(events);
-
-static struct attribute *pvpanic_pci_dev_attrs[] = {
- &dev_attr_capability.attr,
- &dev_attr_events.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(pvpanic_pci_dev);
-
static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct pvpanic_instance *pi;
void __iomem *base;
int ret;
@@ -78,18 +35,7 @@ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
if (!base)
return -ENOMEM;
- pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL);
- if (!pi)
- return -ENOMEM;
-
- pi->base = base;
- pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
-
- /* initlize capability by RDPT */
- pi->capability &= ioread8(base);
- pi->events = pi->capability;
-
- return devm_pvpanic_probe(&pdev->dev, pi);
+ return devm_pvpanic_probe(&pdev->dev, base);
}
static const struct pci_device_id pvpanic_pci_id_tbl[] = {
@@ -103,7 +49,7 @@ static struct pci_driver pvpanic_pci_driver = {
.id_table = pvpanic_pci_id_tbl,
.probe = pvpanic_pci_probe,
.driver = {
- .dev_groups = pvpanic_pci_dev_groups,
+ .dev_groups = pvpanic_dev_groups,
},
};
module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 049a12006348..305b367e0ce3 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -7,6 +7,7 @@
* Copyright (C) 2021 Oracle.
*/
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
@@ -26,6 +27,13 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
+struct pvpanic_instance {
+ void __iomem *base;
+ unsigned int capability;
+ unsigned int events;
+ struct list_head list;
+};
+
static struct list_head pvpanic_list;
static spinlock_t pvpanic_lock;
@@ -81,11 +89,75 @@ static void pvpanic_remove(void *param)
spin_unlock(&pvpanic_lock);
}
-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
+static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", pi->events);
+}
+
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pvpanic_instance *pi = dev_get_drvdata(dev);
+ unsigned int tmp;
+ int err;
+
+ err = kstrtouint(buf, 16, &tmp);
+ if (err)
+ return err;
+
+ if ((tmp & pi->capability) != tmp)
+ return -EINVAL;
+
+ pi->events = tmp;
+
+ return count;
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_dev_attrs[] = {
+ &dev_attr_capability.attr,
+ &dev_attr_events.attr,
+ NULL
+};
+
+static const struct attribute_group pvpanic_dev_group = {
+ .attrs = pvpanic_dev_attrs,
+};
+
+const struct attribute_group *pvpanic_dev_groups[] = {
+ &pvpanic_dev_group,
+ NULL
+};
+EXPORT_SYMBOL_GPL(pvpanic_dev_groups);
+
+int devm_pvpanic_probe(struct device *dev, void __iomem *base)
{
- if (!pi || !pi->base)
+ struct pvpanic_instance *pi;
+
+ if (!base)
return -EINVAL;
+ pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->base = base;
+ pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+
+ /* initlize capability by RDPT */
+ pi->capability &= ioread8(base);
+ pi->events = pi->capability;
+
spin_lock(&pvpanic_lock);
list_add(&pi->list, &pvpanic_list);
spin_unlock(&pvpanic_lock);
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
index 493545951754..46ffb10438ad 100644
--- a/drivers/misc/pvpanic/pvpanic.h
+++ b/drivers/misc/pvpanic/pvpanic.h
@@ -8,13 +8,7 @@
#ifndef PVPANIC_H_
#define PVPANIC_H_
-struct pvpanic_instance {
- void __iomem *base;
- unsigned int capability;
- unsigned int events;
- struct list_head list;
-};
-
-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi);
+int devm_pvpanic_probe(struct device *dev, void __iomem *base);
+extern const struct attribute_group *pvpanic_dev_groups[];
#endif /* PVPANIC_H_ */
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index c1a134bd8ba7..b878431553ab 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
+#include <linux/netdevice.h>
/*
* function pointer pointing to either,
@@ -429,7 +430,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
case ST_LL_AWAKE_TO_ASLEEP:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
case ST_LL_ASLEEP:
skb_queue_tail(&st_gdata->tx_waitq, skb);
@@ -438,7 +439,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
default:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
}
@@ -492,7 +493,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
spin_unlock_irqrestore(&st_data->lock, flags);
break;
}
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
spin_unlock_irqrestore(&st_data->lock, flags);
}
/* if wake-up is set in another context- restart sending */
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index fe682e0553b2..4b1be0bb6ac0 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -590,7 +590,7 @@ static ssize_t store_dev_name(struct device *dev,
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
pr_debug("storing dev name >%s<", buf);
- strncpy(kim_data->dev_name, buf, count);
+ strscpy(kim_data->dev_name, buf, sizeof(kim_data->dev_name));
pr_debug("stored dev name >%s<", kim_data->dev_name);
return count;
}
@@ -751,7 +751,8 @@ static int kim_probe(struct platform_device *pdev)
}
/* copying platform data */
- strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN);
+ strscpy(kim_gdata->dev_name, pdata->dev_name,
+ sizeof(kim_gdata->dev_name));
kim_gdata->flow_cntrl = pdata->flow_cntrl;
kim_gdata->baud_rate = pdata->baud_rate;
pr_info("sysfs entries created\n");
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 930c252753a0..bdc2e6fda782 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -7,10 +7,13 @@
#include <linux/slab.h>
#include <linux/uacce.h>
-static struct class *uacce_class;
static dev_t uacce_devt;
static DEFINE_XARRAY_ALLOC(uacce_xa);
+static const struct class uacce_class = {
+ .name = UACCE_NAME,
+};
+
/*
* If the parent driver or the device disappears, the queue state is invalid and
* ops are not usable anymore.
@@ -530,7 +533,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
- uacce->dev.class = uacce_class;
+ uacce->dev.class = &uacce_class;
uacce->dev.groups = uacce_dev_groups;
uacce->dev.parent = uacce->parent;
uacce->dev.release = uacce_release;
@@ -623,13 +626,13 @@ static int __init uacce_init(void)
{
int ret;
- uacce_class = class_create(UACCE_NAME);
- if (IS_ERR(uacce_class))
- return PTR_ERR(uacce_class);
+ ret = class_register(&uacce_class);
+ if (ret)
+ return ret;
ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
if (ret)
- class_destroy(uacce_class);
+ class_unregister(&uacce_class);
return ret;
}
@@ -637,7 +640,7 @@ static int __init uacce_init(void)
static __exit void uacce_exit(void)
{
unregister_chrdev_region(uacce_devt, MINORMASK);
- class_destroy(uacce_class);
+ class_unregister(&uacce_class);
}
subsys_initcall(uacce_init);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 11b06fefaa0e..c10693ba265b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extra_size = 0;
/* Protection Register info */
- if (extp->NumProtectionFields)
+ if (extp->NumProtectionFields) {
+ struct cfi_intelext_otpinfo *otp =
+ (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
extra_size += (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ sizeof(struct cfi_intelext_otpinfo);
+
+ if (extp_size >= sizeof(*extp) + extra_size) {
+ int i;
+
+ /* Do some byteswapping if necessary */
+ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
+ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
+ otp->FactGroups = le16_to_cpu(otp->FactGroups);
+ otp->UserGroups = le16_to_cpu(otp->UserGroups);
+ otp++;
+ }
+ }
+ }
}
if (extp->MinorVersion >= '1') {
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index e8dd6496927e..f9d3e32ef8e9 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -70,12 +70,16 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
- mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
- mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
+ /* Disable direct access when NO_XIP is set */
+ if (map->phys != NO_XIP) {
+ mtd->_point = mapram_point;
+ mtd->_unpoint = mapram_unpoint;
+ }
+
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 3af50db8b21b..74f559bf8dfb 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -357,19 +357,17 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
return 0;
}
-static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+static void bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
mtd_device_unregister(&b47s->mtd);
iounmap(b47s->window);
-
- return 0;
}
static struct platform_driver bcma_sflash_driver = {
.probe = bcm47xxsflash_bcma_probe,
- .remove = bcm47xxsflash_bcma_remove,
+ .remove_new = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
},
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 22e73dd6118b..a2b643af7019 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2046,7 +2046,7 @@ err_probe:
*
* Returns 0
*/
-static int docg3_release(struct platform_device *pdev)
+static void docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
@@ -2058,7 +2058,6 @@ static int docg3_release(struct platform_device *pdev)
doc_release_device(cascade->floors[floor]);
bch_free(docg3->cascade->bch);
- return 0;
}
#ifdef CONFIG_OF
@@ -2076,7 +2075,7 @@ static struct platform_driver g3_driver = {
},
.suspend = docg3_suspend,
.resume = docg3_resume,
- .remove = docg3_release,
+ .remove_new = docg3_release,
};
module_platform_driver_probe(g3_driver, docg3_probe);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 208bd4d871f4..1bf192f229d7 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -388,20 +388,18 @@ static int phram_probe(struct platform_device *pdev)
PAGE_SIZE);
}
-static int phram_remove(struct platform_device *pdev)
+static void phram_remove(struct platform_device *pdev)
{
struct phram_mtd_list *phram = platform_get_drvdata(pdev);
mtd_device_unregister(&phram->mtd);
phram_unmap(phram);
kfree(phram);
-
- return 0;
}
static struct platform_driver phram_driver = {
.probe = phram_probe,
- .remove = phram_remove,
+ .remove_new = phram_remove,
.driver = {
.name = "phram",
.of_match_table = of_match_ptr(phram_of_match),
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 36e060386e59..66044f4f5bad 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -265,14 +265,12 @@ static int powernv_flash_probe(struct platform_device *pdev)
*
* Returns 0
*/
-static int powernv_flash_release(struct platform_device *pdev)
+static void powernv_flash_release(struct platform_device *pdev)
{
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
WARN_ON(mtd_device_unregister(&data->mtd));
-
- return 0;
}
static const struct of_device_id powernv_flash_match[] = {
@@ -285,7 +283,7 @@ static struct platform_driver powernv_flash_driver = {
.name = "powernv_flash",
.of_match_table = powernv_flash_match,
},
- .remove = powernv_flash_release,
+ .remove_new = powernv_flash_release,
.probe = powernv_flash_probe,
};
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 0a35e5236ae5..1574296d47e2 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1031,7 +1031,7 @@ err:
*
* free all allocations and delete the partitions.
*/
-static int spear_smi_remove(struct platform_device *pdev)
+static void spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
@@ -1048,8 +1048,6 @@ static int spear_smi_remove(struct platform_device *pdev)
/* clean up mtd stuff */
WARN_ON(mtd_device_unregister(&flash->mtd));
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1095,7 +1093,7 @@ static struct platform_driver spear_smi_driver = {
.pm = &spear_smi_pm_ops,
},
.probe = spear_smi_probe,
- .remove = spear_smi_remove,
+ .remove_new = spear_smi_remove,
};
module_platform_driver(spear_smi_driver);
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 95530cbbb1e0..3268de5fc780 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2097,13 +2097,11 @@ static int stfsm_probe(struct platform_device *pdev)
return mtd_device_register(&fsm->mtd, NULL, 0);
}
-static int stfsm_remove(struct platform_device *pdev)
+static void stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
WARN_ON(mtd_device_unregister(&fsm->mtd));
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2134,7 +2132,7 @@ MODULE_DEVICE_TABLE(of, stfsm_match);
static struct platform_driver stfsm_driver = {
.probe = stfsm_probe,
- .remove = stfsm_remove,
+ .remove_new = stfsm_remove,
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a6161ce340d4..dbe3eb361cca 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -229,7 +229,7 @@ disable_mux:
return ret;
}
-static int am654_hbmc_remove(struct platform_device *pdev)
+static void am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
@@ -241,8 +241,6 @@ static int am654_hbmc_remove(struct platform_device *pdev)
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
-
- return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
@@ -256,7 +254,7 @@ MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
static struct platform_driver am654_hbmc_platform_driver = {
.probe = am654_hbmc_probe,
- .remove = am654_hbmc_remove,
+ .remove_new = am654_hbmc_remove,
.driver = {
.name = "hbmc-am654",
.of_match_table = am654_hbmc_dt_ids,
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index ef32fca5f785..b22aa57119f2 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -154,20 +154,18 @@ out_disable_rpm:
return error;
}
-static int rpcif_hb_remove(struct platform_device *pdev)
+static void rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
hyperbus_unregister_device(&hyperbus->hbdev);
pm_runtime_disable(hyperbus->rpc.dev);
-
- return 0;
}
static struct platform_driver rpcif_platform_driver = {
.probe = rpcif_hb_probe,
- .remove = rpcif_hb_remove,
+ .remove_new = rpcif_hb_remove,
.driver = {
.name = "rpc-if-hyperflash",
},
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index f4e5174b2449..9169e1155dbb 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -476,11 +476,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
/*
* lpddr2_nvm driver remove method
*/
-static int lpddr2_nvm_remove(struct platform_device *pdev)
+static void lpddr2_nvm_remove(struct platform_device *pdev)
{
WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
-
- return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
@@ -489,7 +487,7 @@ static struct platform_driver lpddr2_nvm_drv = {
.name = "lpddr2_nvm",
},
.probe = lpddr2_nvm_probe,
- .remove = lpddr2_nvm_remove,
+ .remove_new = lpddr2_nvm_remove,
};
module_platform_driver(lpddr2_nvm_drv);
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 3c3939bc2dad..14e36ae71958 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -61,7 +61,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->_point = lpddr_point;
mtd->_unpoint = lpddr_unpoint;
}
- mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+ mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index a1da1c8973c0..124b13c5d747 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -166,8 +166,7 @@ err_destroy:
return err;
}
-static int
-ltq_mtd_remove(struct platform_device *pdev)
+static void ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -175,7 +174,6 @@ ltq_mtd_remove(struct platform_device *pdev)
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- return 0;
}
static const struct of_device_id ltq_mtd_match[] = {
@@ -186,7 +184,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
static struct platform_driver ltq_mtd_driver = {
.probe = ltq_mtd_probe,
- .remove = ltq_mtd_remove,
+ .remove_new = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
.of_match_table = ltq_mtd_match,
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index fc8721339282..746a27d15d44 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
@@ -37,7 +38,7 @@
#include <linux/mtd/concat.h>
#include <linux/mtd/cfi_endian.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
@@ -62,7 +63,7 @@ struct physmap_flash_info {
unsigned int win_order;
};
-static int physmap_flash_remove(struct platform_device *dev)
+static void physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
@@ -88,7 +89,6 @@ static int physmap_flash_remove(struct platform_device *dev)
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
@@ -296,14 +296,9 @@ static const char * const *of_get_part_probes(struct platform_device *dev)
static const char *of_select_probe_type(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
- const struct of_device_id *match;
const char *probe_type;
- match = of_match_device(of_flash_match, &dev->dev);
- if (!match)
- return NULL;
-
- probe_type = match->data;
+ probe_type = device_get_match_data(&dev->dev);
if (probe_type)
return probe_type;
@@ -626,7 +621,7 @@ static void physmap_flash_shutdown(struct platform_device *dev)
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
- .remove = physmap_flash_remove,
+ .remove_new = physmap_flash_remove,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 4c921dce7396..8b736f029f81 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -65,14 +65,14 @@ static inline void platram_setrw(struct platram_info *info, int to)
* called to remove the device from the driver's control
*/
-static int platram_remove(struct platform_device *pdev)
+static void platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
- return 0;
+ return;
if (info->mtd) {
mtd_device_unregister(info->mtd);
@@ -84,8 +84,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
kfree(info);
-
- return 0;
}
/* platram_probe
@@ -207,7 +205,7 @@ MODULE_ALIAS("platform:mtd-ram");
static struct platform_driver platram_driver = {
.probe = platram_probe,
- .remove = platram_remove,
+ .remove_new = platram_remove,
.driver = {
.name = "mtd-ram",
},
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 62a5bf41a6d7..f2a2d4706f1f 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
return 0;
}
-static int pxa2xx_flash_remove(struct platform_device *dev)
+static void pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -109,7 +109,6 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
if (info->map.cached)
iounmap(info->map.cached);
kfree(info);
- return 0;
}
#ifdef CONFIG_PM
@@ -129,7 +128,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.name = "pxa2xx-flash",
},
.probe = pxa2xx_flash_probe,
- .remove = pxa2xx_flash_remove,
+ .remove_new = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d3d4e987c163..d4ce2376d33f 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -285,19 +285,17 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
return err;
}
-static int sa1100_mtd_remove(struct platform_device *pdev)
+static void sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
sa1100_destroy(info, plat);
-
- return 0;
}
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
- .remove = sa1100_mtd_remove,
+ .remove_new = sa1100_mtd_remove,
.driver = {
.name = "sa1100-mtd",
},
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 2bfdf1b7e18a..f58cfb15d6e8 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -118,7 +118,7 @@ static int uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int uflash_remove(struct platform_device *op)
+static void uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -132,8 +132,6 @@ static int uflash_remove(struct platform_device *op)
}
kfree(up);
-
- return 0;
}
static const struct of_device_id uflash_match[] = {
@@ -151,7 +149,7 @@ static struct platform_driver uflash_driver = {
.of_match_table = uflash_match,
},
.probe = uflash_probe,
- .remove = uflash_remove,
+ .remove_new = uflash_remove,
};
module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9bd661be3ae9..bb0759ca12f1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -552,6 +552,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.dev = &mtd->dev;
config.name = dev_name(&mtd->dev);
config.owner = THIS_MODULE;
+ config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
config.reg_read = mtd_nvmem_reg_read;
config.size = mtd->size;
config.word_size = 1;
@@ -559,7 +560,6 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.read_only = true;
config.root_only = true;
config.ignore_wp = true;
- config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
mtd->nvmem = nvmem_register(&config);
@@ -898,6 +898,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
+ config.add_legacy_fixed_of_cells = true;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
@@ -1505,6 +1506,8 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
ret = mtd_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
+ WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 23483db8f30c..6811a714349d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -426,7 +426,11 @@ int add_mtd_partitions(struct mtd_info *parent,
mtd_add_partition_attrs(child);
/* Look for subpartitions */
- parse_mtd_partitions(child, parts[i].types, NULL);
+ ret = parse_mtd_partitions(child, parts[i].types, NULL);
+ if (ret < 0) {
+ pr_err("Failed to parse subpartitions: %d\n", ret);
+ goto err_del_partitions;
+ }
cur_offset = child->part.offset + child->part.size;
}
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index a492051c46f5..2ff1d2b13e3c 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -481,7 +481,7 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
}
bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
- NULL, 0, NULL, 0,
+ anand->hw_ecc, chip->ecc.bytes, NULL, 0,
chip->ecc.strength);
if (bf > 0) {
mtd->ecc_stats.corrected += bf;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 3f494f7c7ecb..4cb478bbee4a 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -165,7 +165,7 @@ struct atmel_nand {
struct atmel_pmecc_user *pmecc;
struct gpio_desc *cdgpio;
int numcs;
- struct atmel_nand_cs cs[];
+ struct atmel_nand_cs cs[] __counted_by(numcs);
};
static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 034ec564c2ed..04f84d87c657 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -15,8 +15,10 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/of_device.h>
#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
/*
@@ -526,7 +528,7 @@ struct cdns_nand_chip {
/* ECC strength index. */
u8 corr_str_idx;
- u8 cs[];
+ u8 cs[] __counted_by(nsels);
};
struct ecc_info {
@@ -2995,15 +2997,11 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
struct cadence_nand_dt *dt;
struct cdns_nand_ctrl *cdns_ctrl;
int ret;
- const struct of_device_id *of_id;
const struct cadence_nand_dt_devdata *devdata;
u32 val;
- of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
- if (of_id) {
- ofdev->id_entry = of_id->data;
- devdata = of_id->data;
- } else {
+ devdata = device_get_match_data(&ofdev->dev);
+ if (!devdata) {
pr_err("Failed to find the right device id.\n");
return -ENOMEM;
}
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index ac46eb7956ce..5f2fab022fc5 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@ struct denali_chip {
struct nand_chip chip;
struct list_head node;
unsigned int nsels;
- struct denali_chip_sel sels[];
+ struct denali_chip_sel sels[] __counted_by(nsels);
};
/**
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index c816dc137245..0e7dd9ca4b2b 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -46,7 +46,7 @@ struct ingenic_nfc {
struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
- struct ingenic_nand_cs cs[];
+ struct ingenic_nand_cs cs[] __counted_by(num_banks);
};
struct ingenic_nand {
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index cb5d88f42297..f0ad2308f6d5 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->cs_num = cs;
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_of_node_put;
+ }
+
ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
resname);
if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
@@ -649,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
}
resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_cleanup_dma;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
if (!res) {
ret = -EINVAL;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index e9932da18bdd..b7162ced9efa 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -106,7 +106,6 @@ int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
int oob_required, int page);
int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
int oob_required, int page);
-int nand_exit_status_op(struct nand_chip *chip);
int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
unsigned int len);
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index b841a81cb128..a46698744850 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -348,7 +348,7 @@ struct marvell_nand_chip {
int addr_cyc;
int selected_die;
unsigned int nsels;
- struct marvell_nand_chip_sel sels[];
+ struct marvell_nand_chip_sel sels[] __counted_by(nsels);
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 25e3c1cb605e..71ec4052e52a 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -128,7 +128,7 @@ struct meson_nfc_nand_chip {
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
- u8 sels[];
+ u8 sels[] __counted_by(nsels);
};
struct meson_nand_ecc {
@@ -1134,6 +1134,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
init.name = devm_kasprintf(nfc->dev,
GFP_KERNEL, "%s#div",
dev_name(nfc->dev));
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &clk_divider_ops;
nfc_divider_parent_data[0].fw_name = "device";
init.parent_data = nfc_divider_parent_data;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 29c8bddde67f..60198e33d2d5 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -130,7 +130,7 @@ struct mtk_nfc_nand_chip {
u32 spare_per_sector;
int nsels;
- u8 sels[];
+ u8 sels[] __counted_by(nsels);
/* nothing after this field */
};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 1fcac403cee6..9e24bedffd89 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include "internals.h"
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index c45bef6158e7..cf76afc6c0ed 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1881,8 +1881,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
- if (info->gpmc_irq_fifo <= 0)
- return -ENODEV;
+ if (info->gpmc_irq_fifo < 0)
+ return info->gpmc_irq_fifo;
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
@@ -1894,8 +1894,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
- if (info->gpmc_irq_count <= 0)
- return -ENODEV;
+ if (info->gpmc_irq_count < 0)
+ return info->gpmc_irq_count;
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
index 589021ea9eb2..c9a01feff8df 100644
--- a/drivers/mtd/nand/raw/renesas-nand-controller.c
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -210,7 +210,7 @@ struct rnand_chip {
u32 tim_gen_seq1;
u32 tim_gen_seq2;
u32 tim_gen_seq3;
- struct rnand_chip_sel sels[];
+ struct rnand_chip_sel sels[] __counted_by(nsels);
};
struct rnandc {
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 5bc90ffa721f..596cf9a78274 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -158,8 +158,7 @@ struct rk_nfc_nand_chip {
u32 timing;
u8 nsels;
- u8 sels[];
- /* Nothing after this field. */
+ u8 sels[] __counted_by(nsels);
};
struct rk_nfc {
@@ -1119,7 +1118,7 @@ static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
return -EINVAL;
}
- rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
+ rknand = devm_kzalloc(dev, struct_size(rknand, sels, nsels),
GFP_KERNEL);
if (!rknand)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 3e5df75cbc98..2a8164efb273 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1215,6 +1215,7 @@ static void flctl_remove(struct platform_device *pdev)
}
static struct platform_driver flctl_driver = {
+ .probe = flctl_probe,
.remove_new = flctl_remove,
.driver = {
.name = "sh_flctl",
@@ -1222,7 +1223,7 @@ static struct platform_driver flctl_driver = {
},
};
-module_platform_driver_probe(flctl_driver, flctl_probe);
+module_platform_driver(flctl_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 9abf38049d35..4ec17c8bce5a 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -197,7 +197,7 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int nsels;
- struct sunxi_nand_chip_sel sels[];
+ struct sunxi_nand_chip_sel sels[] __counted_by(nsels);
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index eb0b9d16e8da..a553e3ac8ff4 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
init_completion(&ctrl->dma_complete);
ctrl->irq = platform_get_irq(pdev, 0);
+ if (ctrl->irq < 0) {
+ err = ctrl->irq;
+ goto err_put_pm;
+ }
err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
dev_name(&pdev->dev), ctrl);
if (err) {
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 3f783b8f76c9..f31d23219f91 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -29,8 +29,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -810,7 +811,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *chip;
struct device_node *child;
- const struct of_device_id *of_id;
int err;
int irq;
@@ -840,12 +840,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->clk);
}
- of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
- if (!of_id)
+ nfc->variant = (enum vf610_nfc_variant)device_get_match_data(&pdev->dev);
+ if (!nfc->variant)
return -ENODEV;
- nfc->variant = (uintptr_t)of_id->data;
-
for_each_available_child_of_node(nfc->dev->of_node, child) {
if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 51d802a165ed..008549011fb9 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -6,7 +6,6 @@
*/
#include <linux/mtd/rawnand.h>
-#include <linux/of_gpio.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index cd8b66bf7740..19cc77288ebb 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
+spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 393ff37f0d23..849ccfedbc72 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -940,6 +940,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&alliancememory_spinand_manufacturer,
&ato_spinand_manufacturer,
&esmt_c8_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
new file mode 100644
index 000000000000..e0d2d9257045
--- /dev/null
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FORESEE 0xCD
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
+ .ecc = f35sqa002g_ooblayout_ecc,
+ .free = f35sqa002g_ooblayout_free,
+};
+
+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ default:
+ break;
+ }
+
+ /* More than 1-bit error was detected in one or more sectors and
+ * cannot be corrected.
+ */
+ return -EBADMSG;
+}
+
+static const struct spinand_info foresee_spinand_table[] = {
+ SPINAND_INFO("F35SQA002G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer foresee_spinand_manufacturer = {
+ .id = SPINAND_MFR_FORESEE,
+ .name = "FORESEE",
+ .chips = foresee_spinand_table,
+ .nchips = ARRAY_SIZE(foresee_spinand_table),
+ .ops = &foresee_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index f507e3759301..1a473021cca5 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -169,6 +169,51 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N01JW",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N02JWZEIF",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N512GW",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20),
+ NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N02KWZEIR",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N01GWZEIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 3911520f718c..66a4255bdf06 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -4,6 +4,7 @@
* Felix Matouschek <felix@matouschek.org>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
@@ -15,6 +16,12 @@
#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4)
+#define XT26XXXD_STATUS_ECC3_ECC2_MASK GENMASK(7, 6)
+#define XT26XXXD_STATUS_ECC_NO_DETECTED (0)
+#define XT26XXXD_STATUS_ECC_1_7_CORRECTED (1)
+#define XT26XXXD_STATUS_ECC_8_CORRECTED (3)
+#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -84,6 +91,53 @@ static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
return status >> 2;
}
+static int xt26xxxd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int xt26xxxd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize / 2 - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops xt26xxxd_ooblayout = {
+ .ecc = xt26xxxd_ooblayout_ecc,
+ .free = xt26xxxd_ooblayout_free,
+};
+
+static int xt26xxxd_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (FIELD_GET(STATUS_ECC_MASK, status)) {
+ case XT26XXXD_STATUS_ECC_NO_DETECTED:
+ return 0;
+ case XT26XXXD_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+ case XT26XXXD_STATUS_ECC_1_7_CORRECTED:
+ return 4 + FIELD_GET(XT26XXXD_STATUS_ECC3_ECC2_MASK, status);
+ case XT26XXXD_STATUS_ECC_8_CORRECTED:
+ return 8;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
static const struct spinand_info xtx_spinand_table[] = {
SPINAND_INFO("XT26G01A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1),
@@ -115,6 +169,86 @@ static const struct spinand_info xtx_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&xt26g0xa_ooblayout,
xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G01D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x31),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G11D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x34),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q01D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G02D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x32),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G12D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q02D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x52),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26G04D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x33),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
+ SPINAND_INFO("XT26Q04D",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x53),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+ xt26xxxd_ecc_get_status)),
};
static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 60738edcd5d5..da03ab6efe04 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -1,9 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MTD_AR7_PARTS
- tristate "TI AR7 partitioning parser"
- help
- TI AR7 partitioning parser support
-
config MTD_BCM47XX_PARTS
tristate "BCM47XX partitioning parser"
depends on BCM47XX || ARCH_BCM_5301X
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 0e70b621a1d8..9b00c62b837a 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o
diff --git a/drivers/mtd/parsers/ar7part.c b/drivers/mtd/parsers/ar7part.c
deleted file mode 100644
index 8cd683711ac6..000000000000
--- a/drivers/mtd/parsers/ar7part.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
- *
- * TI AR7 flash partition table.
- * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/memblock.h>
-#include <linux/module.h>
-
-#include <uapi/linux/magic.h>
-
-#define AR7_PARTS 4
-#define ROOT_OFFSET 0xe0000
-
-#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
-#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-
-struct ar7_bin_rec {
- unsigned int checksum;
- unsigned int length;
- unsigned int address;
-};
-
-static int create_mtd_partitions(struct mtd_info *master,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
-{
- struct ar7_bin_rec header;
- unsigned int offset;
- size_t len;
- unsigned int pre_size = master->erasesize, post_size = 0;
- unsigned int root_offset = ROOT_OFFSET;
-
- int retries = 10;
- struct mtd_partition *ar7_parts;
-
- ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
- if (!ar7_parts)
- return -ENOMEM;
- ar7_parts[0].name = "loader";
- ar7_parts[0].offset = 0;
- ar7_parts[0].size = master->erasesize;
- ar7_parts[0].mask_flags = MTD_WRITEABLE;
-
- ar7_parts[1].name = "config";
- ar7_parts[1].offset = 0;
- ar7_parts[1].size = master->erasesize;
- ar7_parts[1].mask_flags = 0;
-
- do { /* Try 10 blocks starting from master->erasesize */
- offset = pre_size;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- if (!strncmp((char *)&header, "TIENV0.8", 8))
- ar7_parts[1].offset = pre_size;
- if (header.checksum == LOADER_MAGIC1)
- break;
- if (header.checksum == LOADER_MAGIC2)
- break;
- pre_size += master->erasesize;
- } while (retries--);
-
- pre_size = offset;
-
- if (!ar7_parts[1].offset) {
- ar7_parts[1].offset = master->size - master->erasesize;
- post_size = master->erasesize;
- }
-
- switch (header.checksum) {
- case LOADER_MAGIC1:
- while (header.length) {
- offset += sizeof(header) + header.length;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- }
- root_offset = offset + sizeof(header) + 4;
- break;
- case LOADER_MAGIC2:
- while (header.length) {
- offset += sizeof(header) + header.length;
- mtd_read(master, offset, sizeof(header), &len,
- (uint8_t *)&header);
- }
- root_offset = offset + sizeof(header) + 4 + 0xff;
- root_offset &= ~(uint32_t)0xff;
- break;
- default:
- printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
- break;
- }
-
- mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
- if (header.checksum != SQUASHFS_MAGIC) {
- root_offset += master->erasesize - 1;
- root_offset &= ~(master->erasesize - 1);
- }
-
- ar7_parts[2].name = "linux";
- ar7_parts[2].offset = pre_size;
- ar7_parts[2].size = master->size - pre_size - post_size;
- ar7_parts[2].mask_flags = 0;
-
- ar7_parts[3].name = "rootfs";
- ar7_parts[3].offset = root_offset;
- ar7_parts[3].size = master->size - root_offset - post_size;
- ar7_parts[3].mask_flags = 0;
-
- *pparts = ar7_parts;
- return AR7_PARTS;
-}
-
-static struct mtd_part_parser ar7_parser = {
- .parse_fn = create_mtd_partitions,
- .name = "ar7part",
-};
-module_mtd_part_parser(ar7_parser);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
- "Eugene Konev <ejka@openwrt.org>");
-MODULE_DESCRIPTION("MTD partitioning for TI AR7");
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index e347b435a038..5e68468b72fc 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -2,11 +2,9 @@
spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o
spi-nor-objs += atmel.o
-spi-nor-objs += catalyst.o
spi-nor-objs += eon.o
spi-nor-objs += esmt.o
spi-nor-objs += everspin.o
-spi-nor-objs += fujitsu.o
spi-nor-objs += gigadevice.o
spi-nor-objs += intel.o
spi-nor-objs += issi.o
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 58968c1e7d2f..e13b8d2dd50a 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -163,49 +163,84 @@ static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
};
static const struct flash_info atmel_nor_parts[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &at25fs_nor_fixups },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &at25fs_nor_fixups },
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- .fixups = &atmel_nor_global_protection_fixups },
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x1f, 0x04, 0x00),
+ .name = "at26f004",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1f, 0x25, 0x00),
+ .name = "at45db081d",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1f, 0x42, 0x16),
+ .name = "at25sl321",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x1f, 0x44, 0x01),
+ .name = "at25df041a",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups,
+ }, {
+ .id = SNOR_ID(0x1f, 0x45, 0x01),
+ .name = "at26df081a",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x46, 0x01),
+ .name = "at26df161a",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x00),
+ .name = "at25df321",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x01),
+ .name = "at25df321a",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x47, 0x08),
+ .name = "at25ff321a",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x48, 0x00),
+ .name = "at25df641",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &atmel_nor_global_protection_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x66, 0x01),
+ .name = "at25fs010",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x66, 0x04),
+ .name = "at25fs040",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ .fixups = &at25fs_nor_fixups
+ },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
deleted file mode 100644
index 6d310815fb12..000000000000
--- a/drivers/mtd/spi-nor/catalyst.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info catalyst_nor_parts[] = {
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
- { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
- { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
- { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
-};
-
-const struct spi_nor_manufacturer spi_nor_catalyst = {
- .name = "catalyst",
- .parts = catalyst_nor_parts,
- .nparts = ARRAY_SIZE(catalyst_nor_parts),
-};
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 5070d72835ec..89a7f0bbc4b3 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -468,13 +468,12 @@ static int hisi_spi_nor_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_spi_nor_remove(struct platform_device *pdev)
+static void hisi_spi_nor_remove(struct platform_device *pdev)
{
struct hifmc_host *host = platform_get_drvdata(pdev);
hisi_spi_nor_unregister_all(host);
mutex_destroy(&host->lock);
- return 0;
}
static const struct of_device_id hisi_spi_nor_dt_ids[] = {
@@ -489,7 +488,7 @@ static struct platform_driver hisi_spi_nor_driver = {
.of_match_table = hisi_spi_nor_dt_ids,
},
.probe = hisi_spi_nor_probe,
- .remove = hisi_spi_nor_remove,
+ .remove_new = hisi_spi_nor_remove,
};
module_platform_driver(hisi_spi_nor_driver);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 5d8f47ab146f..5aee62f51031 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -431,13 +431,11 @@ static int nxp_spifi_probe(struct platform_device *pdev)
return 0;
}
-static int nxp_spifi_remove(struct platform_device *pdev)
+static void nxp_spifi_remove(struct platform_device *pdev)
{
struct nxp_spifi *spifi = platform_get_drvdata(pdev);
mtd_device_unregister(&spifi->nor.mtd);
-
- return 0;
}
static const struct of_device_id nxp_spifi_match[] = {
@@ -448,7 +446,7 @@ MODULE_DEVICE_TABLE(of, nxp_spifi_match);
static struct platform_driver nxp_spifi_driver = {
.probe = nxp_spifi_probe,
- .remove = nxp_spifi_remove,
+ .remove_new = nxp_spifi_remove,
.driver = {
.name = "nxp-spifi",
.of_match_table = nxp_spifi_match,
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 1b0c6770c14e..1c443fe568cf 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1999,11 +1999,9 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
static const struct spi_nor_manufacturer *manufacturers[] = {
&spi_nor_atmel,
- &spi_nor_catalyst,
&spi_nor_eon,
&spi_nor_esmt,
&spi_nor_everspin,
- &spi_nor_fujitsu,
&spi_nor_gigadevice,
&spi_nor_intel,
&spi_nor_issi,
@@ -2019,13 +2017,6 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
static const struct flash_info spi_nor_generic_flash = {
.name = "spi-nor-generic",
- .n_banks = 1,
- /*
- * JESD216 rev A doesn't specify the page size, therefore we need a
- * sane default.
- */
- .page_size = 256,
- .parse_sfdp = true,
};
static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
@@ -2037,8 +2028,8 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
part = &manufacturers[i]->parts[j];
- if (part->id_len &&
- !memcmp(part->id, id, part->id_len)) {
+ if (part->id &&
+ !memcmp(part->id->bytes, id, part->id->len)) {
nor->manufacturer = manufacturers[i];
return part;
}
@@ -2520,13 +2511,6 @@ static int spi_nor_select_pp(struct spi_nor *nor,
/**
* spi_nor_select_uniform_erase() - select optimum uniform erase type
* @map: the erase map of the SPI NOR
- * @wanted_size: the erase type size to search for. Contains the value of
- * info->sector_size, the "small sector" size in case
- * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
- * there is no information about the sector size. The
- * latter is the case if the flash parameters are parsed
- * solely by SFDP, then the largest supported erase type
- * is selected.
*
* Once the optimum uniform sector erase command is found, disable all the
* other.
@@ -2534,13 +2518,16 @@ static int spi_nor_select_pp(struct spi_nor *nor,
* Return: pointer to erase type on success, NULL otherwise.
*/
static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
- const u32 wanted_size)
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
{
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
int i;
u8 uniform_erase_type = map->uniform_erase_type;
+ /*
+ * Search for the biggest erase size, except for when compiled
+ * to use 4k erases.
+ */
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
if (!(uniform_erase_type & BIT(i)))
continue;
@@ -2552,10 +2539,11 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
continue;
/*
- * If the current erase size is the one, stop here:
+ * If the current erase size is the 4k one, stop here,
* we have found the right uniform Sector Erase command.
*/
- if (tested_erase->size == wanted_size) {
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
+ tested_erase->size == SZ_4K) {
erase = tested_erase;
break;
}
@@ -2583,7 +2571,6 @@ static int spi_nor_select_erase(struct spi_nor *nor)
struct spi_nor_erase_map *map = &nor->params->erase_map;
const struct spi_nor_erase_type *erase = NULL;
struct mtd_info *mtd = &nor->mtd;
- u32 wanted_size = nor->info->sector_size;
int i;
/*
@@ -2594,13 +2581,8 @@ static int spi_nor_select_erase(struct spi_nor *nor)
* manage the SPI flash memory as uniform with a single erase sector
* size, when possible.
*/
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- wanted_size = 4096u;
-#endif
-
if (spi_nor_has_uniform_erase(nor)) {
- erase = spi_nor_select_uniform_erase(map, wanted_size);
+ erase = spi_nor_select_uniform_erase(map);
if (!erase)
return -EINVAL;
nor->erase_opcode = erase->opcode;
@@ -2773,7 +2755,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = &params->erase_map;
- const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
+ const struct flash_info *info = nor->info;
+ const u8 no_sfdp_flags = info->no_sfdp_flags;
u8 i, erase_mask;
if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
@@ -2827,7 +2810,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
i++;
}
erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
+ spi_nor_set_erase_type(&map->erase_type[i],
+ info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
@@ -2869,7 +2853,7 @@ static void spi_nor_init_flags(struct spi_nor *nor)
if (flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 &&
+ if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
!nor->controller_ops)
nor->flags |= SNOR_F_RWW;
}
@@ -2933,8 +2917,8 @@ static int spi_nor_late_init_params(struct spi_nor *nor)
if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
spi_nor_init_default_locking_ops(nor);
- if (nor->info->n_banks > 1)
- params->bank_size = div64_u64(params->size, nor->info->n_banks);
+ if (params->n_banks > 1)
+ params->bank_size = div64_u64(params->size, params->n_banks);
return 0;
}
@@ -2994,16 +2978,17 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
struct device_node *np = spi_nor_get_flash_node(nor);
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->otp.org = &info->otp_org;
+ params->otp.org = info->otp;
/* Default to 16-bit Write Status (01h) Command */
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->writesize = 1;
- params->size = (u64)info->sector_size * info->n_sectors;
+ params->size = info->size;
params->bank_size = params->size;
- params->page_size = info->page_size;
+ params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
+ params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
if (!(info->flags & SPI_NOR_NO_FR)) {
/* Default to Fast Read for DT and non-DT platform devices. */
@@ -3083,7 +3068,7 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_default_params(nor);
- if (nor->info->parse_sfdp) {
+ if (spi_nor_needs_sfdp(nor)) {
ret = spi_nor_parse_sfdp(nor);
if (ret) {
dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
@@ -3385,7 +3370,7 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
- if (name && info->id_len) {
+ if (name && info->id) {
const struct flash_info *jinfo;
jinfo = spi_nor_detect(nor);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 9217379b9cfe..93cd2fc3606d 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -10,6 +10,13 @@
#include "sfdp.h"
#define SPI_NOR_MAX_ID_LEN 6
+/*
+ * 256 bytes is a sane default for most older flashes. Newer flashes will
+ * have the page size defined within their SFDP tables.
+ */
+#define SPI_NOR_DEFAULT_PAGE_SIZE 256
+#define SPI_NOR_DEFAULT_N_BANKS 1
+#define SPI_NOR_DEFAULT_SECTOR_SIZE SZ_64K
/* Standard SPI NOR flash operations. */
#define SPI_NOR_READID_OP(naddr, ndummy, buf, len) \
@@ -353,6 +360,7 @@ struct spi_nor_otp {
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
* command in octal DTR mode.
+ * @n_banks: number of banks.
* @n_dice: number of dice in the flash memory.
* @vreg_offset: volatile register offset for each die.
* @hwcaps: describes the read and page program hardware
@@ -389,6 +397,7 @@ struct spi_nor_flash_parameter {
u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
+ u8 n_banks;
u8 n_dice;
u32 *vreg_offset;
@@ -438,21 +447,31 @@ struct spi_nor_fixups {
};
/**
+ * struct spi_nor_id - SPI NOR flash ID.
+ *
+ * @bytes: the bytes returned by the flash when issuing command 9F. Typically,
+ * the first byte is the manufacturer ID code (see JEP106) and the next
+ * two bytes are a flash part specific ID.
+ * @len: the number of bytes of ID.
+ */
+struct spi_nor_id {
+ const u8 *bytes;
+ u8 len;
+};
+
+/**
* struct flash_info - SPI NOR flash_info entry.
+ * @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
+ * older chips).
* @name: the name of the flash.
- * @id: the flash's ID bytes. The first three bytes are the
- * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
- * @id_len: the number of bytes of ID.
- * @sector_size: the size listed here is what works with SPINOR_OP_SE, which
- * isn't necessarily called a "sector" by the vendor.
- * @n_sectors: the number of sectors.
- * @n_banks: the number of banks.
- * @page_size: the flash's page size.
+ * @size: the size of the flash in bytes.
+ * @sector_size: (optional) the size listed here is what works with
+ * SPINOR_OP_SE, which isn't necessarily called a "sector" by
+ * the vendor. Defaults to 64k.
+ * @n_banks: (optional) the number of banks. Defaults to 1.
+ * @page_size: (optional) the flash's page size. Defaults to 256.
* @addr_nbytes: number of address bytes to send.
*
- * @parse_sfdp: true when flash supports SFDP tables. The false value has no
- * meaning. If one wants to skip the SFDP tables, one should
- * instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
* @flags: flags that indicate support that is not defined by the
* JESD216 standard in its SFDP tables. Flag meanings:
* SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
@@ -503,15 +522,13 @@ struct spi_nor_fixups {
*/
struct flash_info {
char *name;
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
+ const struct spi_nor_id *id;
+ size_t size;
unsigned sector_size;
- u16 n_sectors;
u16 page_size;
u8 n_banks;
u8 addr_nbytes;
- bool parse_sfdp;
u16 flags;
#define SPI_NOR_HAS_LOCK BIT(0)
#define SPI_NOR_HAS_TB BIT(1)
@@ -540,70 +557,23 @@ struct flash_info {
u8 mfr_flags;
- const struct spi_nor_otp_organization otp_org;
+ const struct spi_nor_otp_organization *otp;
const struct spi_nor_fixups *fixups;
};
-#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff
-#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id)
-
-#define SPI_NOR_ID(_jedec_id, _ext_id) \
- .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \
- .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))
-
-#define SPI_NOR_ID6(_jedec_id, _ext_id) \
- .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \
- .id_len = 6
-
-#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors, _n_banks) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .n_banks = (_n_banks)
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
- SPI_NOR_ID((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
-
-#define INFOB(_jedec_id, _ext_id, _sector_size, _n_sectors, _n_banks) \
- SPI_NOR_ID((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), (_n_banks)),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
- SPI_NOR_ID6((_jedec_id), (_ext_id)), \
- SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
+#define SNOR_ID(...) \
+ (&(const struct spi_nor_id){ \
+ .bytes = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ })
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .n_banks = 1, \
- .addr_nbytes = (_addr_nbytes), \
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
-
-#define OTP_INFO(_len, _n_regions, _base, _offset) \
- .otp_org = { \
- .len = (_len), \
- .base = (_base), \
- .offset = (_offset), \
- .n_regions = (_n_regions), \
- },
-
-#define PARSE_SFDP \
- .parse_sfdp = true, \
-
-#define FLAGS(_flags) \
- .flags = (_flags), \
-
-#define NO_SFDP_FLAGS(_no_sfdp_flags) \
- .no_sfdp_flags = (_no_sfdp_flags), \
-
-#define FIXUP_FLAGS(_fixup_flags) \
- .fixup_flags = (_fixup_flags), \
-
-#define MFR_FLAGS(_mfr_flags) \
- .mfr_flags = (_mfr_flags), \
+#define SNOR_OTP(_len, _n_regions, _base, _offset) \
+ (&(const struct spi_nor_otp_organization){ \
+ .len = (_len), \
+ .base = (_base), \
+ .offset = (_offset), \
+ .n_regions = (_n_regions), \
+ })
/**
* struct spi_nor_manufacturer - SPI NOR manufacturer object
@@ -631,11 +601,9 @@ struct sfdp {
/* Manufacturer drivers. */
extern const struct spi_nor_manufacturer spi_nor_atmel;
-extern const struct spi_nor_manufacturer spi_nor_catalyst;
extern const struct spi_nor_manufacturer spi_nor_eon;
extern const struct spi_nor_manufacturer spi_nor_esmt;
extern const struct spi_nor_manufacturer spi_nor_everspin;
-extern const struct spi_nor_manufacturer spi_nor_fujitsu;
extern const struct spi_nor_manufacturer spi_nor_gigadevice;
extern const struct spi_nor_manufacturer spi_nor_intel;
extern const struct spi_nor_manufacturer spi_nor_issi;
@@ -734,6 +702,22 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
return container_of(mtd, struct spi_nor, mtd);
}
+/**
+ * spi_nor_needs_sfdp() - returns true if SFDP parsing is used for this flash.
+ *
+ * Return: true if SFDP parsing is needed
+ */
+static inline bool spi_nor_needs_sfdp(const struct spi_nor *nor)
+{
+ /*
+ * The flash size is one property parsed by the SFDP. We use it as an
+ * indicator whether we need SFDP parsing for a particular flash. I.e.
+ * non-legacy flash entries in flash_info will have a size of zero iff
+ * SFDP should be used.
+ */
+ return !nor->info->size;
+}
+
#ifdef CONFIG_DEBUG_FS
void spi_nor_debugfs_register(struct spi_nor *nor);
void spi_nor_debugfs_shutdown(void);
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
index 50a11053711f..c1ddf662f782 100644
--- a/drivers/mtd/spi-nor/eon.c
+++ b/drivers/mtd/spi-nor/eon.c
@@ -9,26 +9,60 @@
#include "core.h"
static const struct flash_info eon_nor_parts[] = {
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512)
- PARSE_SFDP },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x1c, 0x20, 0x16),
+ .name = "en25p32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x20, 0x17),
+ .name = "en25p64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x14),
+ .name = "en25q80a",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x16),
+ .name = "en25q32b",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x30, 0x17),
+ .name = "en25q64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1c, 0x31, 0x16),
+ .name = "en25f32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .name = "en25s64",
+ .id = SNOR_ID(0x1c, 0x38, 0x17),
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x15),
+ .name = "en25qh16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x16),
+ .name = "en25qh32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x17),
+ .name = "en25qh64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x18),
+ .name = "en25qh128",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x1c, 0x70, 0x19),
+ .name = "en25qh256",
+ },
};
const struct spi_nor_manufacturer spi_nor_eon = {
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index fcc3b0e7cda9..089fcd1aa794 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -9,16 +9,25 @@
#include "core.h"
static const struct flash_info esmt_nor_parts[] = {
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
+ {
+ .id = SNOR_ID(0x8c, 0x20, 0x16),
+ .name = "f25l32pa",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x8c, 0x41, 0x16),
+ .name = "f25l32qa-2s",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x8c, 0x41, 0x17),
+ .name = "f25l64qa",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }
};
const struct spi_nor_manufacturer spi_nor_esmt = {
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
index 84a07c2e0536..5f321e24ae7d 100644
--- a/drivers/mtd/spi-nor/everspin.c
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -9,11 +9,29 @@
#include "core.h"
static const struct flash_info everspin_nor_parts[] = {
- /* Everspin */
- { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
- { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
+ {
+ .name = "mr25h128",
+ .size = SZ_16K,
+ .sector_size = SZ_16K,
+ .addr_nbytes = 2,
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ }, {
+ .name = "mr25h256",
+ .size = SZ_32K,
+ .sector_size = SZ_32K,
+ .addr_nbytes = 2,
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ }, {
+ .name = "mr25h10",
+ .size = SZ_128K,
+ .sector_size = SZ_128K,
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ }, {
+ .name = "mr25h40",
+ .size = SZ_512K,
+ .sector_size = SZ_512K,
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ }
};
const struct spi_nor_manufacturer spi_nor_everspin = {
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
deleted file mode 100644
index 69cffc5c73ef..000000000000
--- a/drivers/mtd/spi-nor/fujitsu.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info fujitsu_nor_parts[] = {
- /* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
- FLAGS(SPI_NOR_NO_ERASE) },
-};
-
-const struct spi_nor_manufacturer spi_nor_fujitsu = {
- .name = "fujitsu",
- .parts = fujitsu_nor_parts,
- .nparts = ARRAY_SIZE(fujitsu_nor_parts),
-};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
index d57ddaf1525b..ef1edd0add70 100644
--- a/drivers/mtd/spi-nor/gigadevice.c
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -34,39 +34,55 @@ static const struct spi_nor_fixups gd25q256_fixups = {
};
static const struct flash_info gigadevice_nor_parts[] = {
- { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
- PARSE_SFDP
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- .fixups = &gd25q256_fixups },
+ {
+ .id = SNOR_ID(0xc8, 0x40, 0x15),
+ .name = "gd25q16",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x16),
+ .name = "gd25q32",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x17),
+ .name = "gd25q64",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x18),
+ .name = "gd25q128",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x40, 0x19),
+ .name = "gd25q256",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6,
+ .fixups = &gd25q256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x16),
+ .name = "gd25lq32",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x17),
+ .name = "gd25lq64c",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc8, 0x60, 0x18),
+ .name = "gd25lq128d",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ },
};
const struct spi_nor_manufacturer spi_nor_gigadevice = {
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
index 9179f2d09cba..f647359fee7a 100644
--- a/drivers/mtd/spi-nor/intel.c
+++ b/drivers/mtd/spi-nor/intel.c
@@ -9,13 +9,22 @@
#include "core.h"
static const struct flash_info intel_nor_parts[] = {
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ {
+ .id = SNOR_ID(0x89, 0x89, 0x11),
+ .name = "160s33b",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }, {
+ .id = SNOR_ID(0x89, 0x89, 0x12),
+ .name = "320s33b",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }, {
+ .id = SNOR_ID(0x89, 0x89, 0x13),
+ .name = "640s33b",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ }
};
const struct spi_nor_manufacturer spi_nor_intel = {
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index accdf7aa2bfd..18d9a00aa22e 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -47,48 +47,86 @@ static const struct spi_nor_fixups pm25lv_nor_fixups = {
};
static const struct flash_info issi_nor_parts[] = {
- /* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
- PARSE_SFDP
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 0, 0)
- PARSE_SFDP
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- FLAGS(SPI_NOR_QUAD_PP)
- .fixups = &is25lp256_fixups },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K)
+ {
+ .name = "pm25lv512",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
.fixups = &pm25lv_nor_fixups
- },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K)
+ }, {
+ .name = "pm25lv010",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ .no_sfdp_flags = SECT_4K,
.fixups = &pm25lv_nor_fixups
- },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
+ }, {
+ .id = SNOR_ID(0x7f, 0x9d, 0x20),
+ .name = "is25cd512",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x7f, 0x9d, 0x46),
+ .name = "pm25lq032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x9d, 0x40, 0x13),
+ .name = "is25lq040b",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x14),
+ .name = "is25lp080d",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x15),
+ .name = "is25lp016d",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x16),
+ .name = "is25lp032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x17),
+ .name = "is25lp064",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x18),
+ .name = "is25lp128",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x60, 0x19),
+ .name = "is25lp256",
+ .fixups = &is25lp256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x16),
+ .name = "is25wp032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x17),
+ .size = SZ_8M,
+ .name = "is25wp064",
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x18),
+ .name = "is25wp128",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x9d, 0x70, 0x19),
+ .name = "is25wp256",
+ .flags = SPI_NOR_QUAD_PP,
+ .fixups = &is25lp256_fixups,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }
};
static void issi_nor_default_init(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index eb149e517c1f..ea6be95e75a5 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -33,76 +33,156 @@ static const struct spi_nor_fixups mx25l25635_fixups = {
};
static const struct flash_info macronix_nor_parts[] = {
- /* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
- NO_SFDP_FLAGS(SECT_4K) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
- { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4)
- PARSE_SFDP
- FLAGS(SPI_NOR_RWW) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
- { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ {
+ .id = SNOR_ID(0xc2, 0x20, 0x10),
+ .name = "mx25l512e",
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x12),
+ .name = "mx25l2005a",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x13),
+ .name = "mx25l4005a",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x14),
+ .name = "mx25l8005",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x15),
+ .name = "mx25l1606e",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x16),
+ .name = "mx25l3205d",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x17),
+ .name = "mx25l6405d",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x18),
+ .name = "mx25l12805d",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x19),
+ .name = "mx25l25635e",
+ .size = SZ_32M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &mx25l25635_fixups
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x1a),
+ .name = "mx66l51235f",
+ .size = SZ_64M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc2, 0x20, 0x1b),
+ .name = "mx66l1g45g",
+ .size = SZ_128M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x23, 0x14),
+ .name = "mx25v8035f",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x32),
+ .name = "mx25u2033e",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x33),
+ .name = "mx25u4035",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x34),
+ .name = "mx25u8035",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x36),
+ .name = "mx25u3235f",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x37),
+ .name = "mx25u6435f",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x38),
+ .name = "mx25u12835f",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x39),
+ .name = "mx25u25635f",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x3a),
+ .name = "mx25u51245g",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x3a),
+ .name = "mx66u51235f",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc2, 0x25, 0x3c),
+ .name = "mx66u2g45g",
+ .size = SZ_256M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x18),
+ .name = "mx25l12855e",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x19),
+ .name = "mx25l25655e",
+ .size = SZ_32M,
+ }, {
+ .id = SNOR_ID(0xc2, 0x26, 0x1b),
+ .name = "mx66l1g55g",
+ .size = SZ_128M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x28, 0x15),
+ .name = "mx25r1635f",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x28, 0x16),
+ .name = "mx25r3235f",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xc2, 0x81, 0x3a),
+ .name = "mx25uw51245g",
+ .n_banks = 4,
+ .flags = SPI_NOR_RWW,
+ }, {
+ .id = SNOR_ID(0xc2, 0x9e, 0x16),
+ .name = "mx25l3255e",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }
};
static void macronix_nor_default_init(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 6ad080c52ab5..8920547c12bf 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -78,7 +78,7 @@ static int micron_st_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -114,7 +114,7 @@ static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -159,148 +159,291 @@ static const struct spi_nor_fixups mt35xu512aba_fixups = {
};
static const struct flash_info micron_nor_parts[] = {
- { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
- SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
- MFR_FLAGS(USE_FSR)
- .fixups = &mt35xu512aba_fixups
- },
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
+ {
+ .id = SNOR_ID(0x2c, 0x5b, 0x1a),
+ .name = "mt35xu512aba",
+ .sector_size = SZ_128K,
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP,
+ .mfr_flags = USE_FSR,
+ .fixup_flags = SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE,
+ .fixups = &mt35xu512aba_fixups,
+ }, {
+ .id = SNOR_ID(0x2c, 0x5b, 0x1c),
+ .name = "mt35xu02g",
+ .sector_size = SZ_128K,
+ .size = SZ_256M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_OCTAL_READ,
+ .mfr_flags = USE_FSR,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
},
};
-static const struct flash_info st_nor_parts[] = {
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
- FLAGS(NO_CHIP_ERASE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_FSR)
- },
+static int mt25qu512a_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ return 0;
+}
+
+static struct spi_nor_fixups mt25qu512a_fixups = {
+ .post_bfpt = mt25qu512a_post_bfpt_fixup,
+};
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
+static const struct flash_info st_nor_parts[] = {
+ {
+ .name = "m25p05-nonjedec",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ }, {
+ .name = "m25p10-nonjedec",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ }, {
+ .name = "m25p20-nonjedec",
+ .size = SZ_256K,
+ }, {
+ .name = "m25p40-nonjedec",
+ .size = SZ_512K,
+ }, {
+ .name = "m25p80-nonjedec",
+ .size = SZ_1M,
+ }, {
+ .name = "m25p16-nonjedec",
+ .size = SZ_2M,
+ }, {
+ .name = "m25p32-nonjedec",
+ .size = SZ_4M,
+ }, {
+ .name = "m25p64-nonjedec",
+ .size = SZ_8M,
+ }, {
+ .name = "m25p128-nonjedec",
+ .sector_size = SZ_256K,
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x10),
+ .name = "m25p05",
+ .sector_size = SZ_32K,
+ .size = SZ_64K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x11),
+ .name = "m25p10",
+ .sector_size = SZ_32K,
+ .size = SZ_128K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x12),
+ .name = "m25p20",
+ .size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x13),
+ .name = "m25p40",
+ .size = SZ_512K,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x14),
+ .name = "m25p80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x15),
+ .name = "m25p16",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x16),
+ .name = "m25p32",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x17),
+ .name = "m25p64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x20, 0x20, 0x18),
+ .name = "m25p128",
+ .sector_size = SZ_256K,
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x11),
+ .name = "m45pe10",
+ .size = SZ_128K,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x14),
+ .name = "m45pe80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x40, 0x15),
+ .name = "m45pe16",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x20, 0x63, 0x16),
+ .name = "m25px32-s1",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x14),
+ .name = "m25px80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x15),
+ .name = "m25px16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x16),
+ .name = "m25px32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x71, 0x17),
+ .name = "m25px64",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x20, 0x73, 0x16),
+ .name = "m25px32-s0",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x12),
+ .name = "m25pe20",
+ .size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x14),
+ .name = "m25pe80",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x20, 0x80, 0x15),
+ .name = "m25pe16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x16),
+ .name = "n25q032",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x17),
+ .name = "n25q064",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x18),
+ .name = "n25q128a13",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x19, 0x10, 0x44, 0x00),
+ .name = "mt25ql256a",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x19),
+ .name = "n25q256a",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x20, 0x10, 0x44, 0x00),
+ .name = "mt25ql512a",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x20),
+ .name = "n25q512ax3",
+ .size = SZ_64M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x21),
+ .name = "n25q00",
+ .size = SZ_128M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xba, 0x22),
+ .name = "mt25ql02g",
+ .size = SZ_256M,
+ .flags = NO_CHIP_ERASE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x15),
+ .name = "n25q016a",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x16),
+ .name = "n25q032a",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x17),
+ .name = "n25q064a",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x18),
+ .name = "n25q128a11",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x19, 0x10, 0x44, 0x00),
+ .name = "mt25qu256a",
+ .size = SZ_32M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x19),
+ .name = "n25q256ax1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x20, 0x10, 0x44, 0x00),
+ .name = "mt25qu512a",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25qu512a_fixups,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x20),
+ .name = "n25q512a",
+ .size = SZ_64M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x21),
+ .name = "n25q00a",
+ .size = SZ_128M,
+ .flags = NO_CHIP_ERASE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }, {
+ .id = SNOR_ID(0x20, 0xbb, 0x22),
+ .name = "mt25qu02g",
+ .size = SZ_256M,
+ .flags = NO_CHIP_ERASE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_FSR,
+ }
};
/**
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 709822fced86..12921344373d 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -228,7 +228,7 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -272,7 +272,7 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
return ret;
}
- if (memcmp(buf, nor->info->id, nor->info->id_len))
+ if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
return -EINVAL;
return 0;
@@ -756,155 +756,252 @@ static const struct spi_nor_fixups s25fs_s_nor_fixups = {
};
static const struct flash_info spansion_nor_parts[] = {
- /* Spansion/Cypress -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25fs_s_nor_fixups, },
- { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- .fixups = &s25fs_s_nor_fixups, },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
- NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- MFR_FLAGS(USE_CLSR)
- },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
- { "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- .fixups = &s25fs256t_fixups },
- { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- .fixups = &s25hx_t_fixups },
- { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- .fixups = &s25hx_t_fixups },
- { "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- FLAGS(NO_CHIP_ERASE)
- .fixups = &s25hx_t_fixups },
- { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- .fixups = &s25hx_t_fixups },
- { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- .fixups = &s25hx_t_fixups },
- { "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
- FLAGS(NO_CHIP_ERASE)
- .fixups = &s25hx_t_fixups },
- { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
- FLAGS(SPI_NOR_NO_ERASE) },
- { "s28hl512t", INFO(0x345a1a, 0, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
+ {
+ .id = SNOR_ID(0x01, 0x02, 0x12),
+ .name = "s25sl004a",
+ .size = SZ_512K,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x13),
+ .name = "s25sl008a",
+ .size = SZ_1M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x14),
+ .name = "s25sl016a",
+ .size = SZ_2M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x15, 0x4d, 0x00),
+ .name = "s25sl032p",
+ .size = SZ_4M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x15),
+ .name = "s25sl032a",
+ .size = SZ_4M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x16, 0x4d, 0x00),
+ .name = "s25sl064p",
+ .size = SZ_8M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x16),
+ .name = "s25sl064a",
+ .size = SZ_8M,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x80),
+ .name = "s25fl256s0",
+ .size = SZ_32M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x81),
+ .name = "s25fs256s0",
+ .size = SZ_32M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x80),
+ .name = "s25fl256s1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x81),
+ .name = "s25fs256s1",
+ .size = SZ_32M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x80),
+ .name = "s25fl512s",
+ .size = SZ_64M,
+ .sector_size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x81),
+ .name = "s25fs512s",
+ .size = SZ_64M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ .fixups = &s25fs_s_nor_fixups,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x00),
+ .name = "s25sl12800",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x01),
+ .name = "s25sl12801",
+ .size = SZ_16M,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00, 0x80),
+ .name = "s25fl128s0",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00),
+ .name = "s25fl129p0",
+ .size = SZ_16M,
+ .sector_size = SZ_256K,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x80),
+ .name = "s25fl128s1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x81),
+ .name = "s25fs128s1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ .fixups = &s25fs_s_nor_fixups,
+ }, {
+ .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01),
+ .name = "s25fl129p1",
+ .size = SZ_16M,
+ .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .mfr_flags = USE_CLSR,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x13),
+ .name = "s25fl204k",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x14),
+ .name = "s25fl208k",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x15),
+ .name = "s25fl116k",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x16),
+ .name = "s25fl132k",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x01, 0x40, 0x17),
+ .name = "s25fl164k",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x17),
+ .name = "s25fl064l",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x18),
+ .name = "s25fl128l",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x01, 0x60, 0x19),
+ .name = "s25fl256l",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixup_flags = SPI_NOR_4B_OPCODES,
+ }, {
+ .id = SNOR_ID(0x04, 0x2c, 0xc2, 0x7f, 0x7f, 0x7f),
+ .name = "cy15x104q",
+ .size = SZ_512K,
+ .sector_size = SZ_512K,
+ .flags = SPI_NOR_NO_ERASE,
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1a, 0x0f, 0x03, 0x90),
+ .name = "s25hl512t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1b, 0x0f, 0x03, 0x90),
+ .name = "s25hl01gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90),
+ .name = "s25hl02gt",
+ .mfr_flags = USE_CLPEF,
+ .flags = NO_CHIP_ERASE,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90),
+ .name = "s25fs256t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25fs256t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1a, 0x0f, 0x03, 0x90),
+ .name = "s25hs512t",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1b, 0x0f, 0x03, 0x90),
+ .name = "s25hs01gt",
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90),
+ .name = "s25hs02gt",
+ .mfr_flags = USE_CLPEF,
+ .flags = NO_CHIP_ERASE,
+ .fixups = &s25hx_t_fixups
+ }, {
+ .id = SNOR_ID(0x34, 0x5a, 0x1a),
+ .name = "s28hl512t",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hl01gt", INFO(0x345a1b, 0, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
+ }, {
+ .id = SNOR_ID(0x34, 0x5a, 0x1b),
+ .name = "s28hl01gt",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hs512t", INFO(0x345b1a, 0, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1a),
+ .name = "s28hs512t",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hs01gt", INFO(0x345b1b, 0, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1b),
+ .name = "s28hs01gt",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
- { "s28hs02gt", INFO(0x345b1c, 0, 0, 0)
- PARSE_SFDP
- MFR_FLAGS(USE_CLPEF)
+ }, {
+ .id = SNOR_ID(0x34, 0x5b, 0x1c),
+ .name = "s28hs02gt",
+ .mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
- },
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x13),
+ .name = "s25fl004k",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x14),
+ .name = "s25fl008k",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x15),
+ .name = "s25fl016k",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x17),
+ .name = "s25fl064k",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }
};
/**
@@ -956,7 +1053,8 @@ static int spansion_nor_late_init(struct spi_nor *nor)
nor->flags |= SNOR_F_4B_OPCODES;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
+ nor->mtd.erasesize = nor->info->sector_size ?:
+ SPI_NOR_DEFAULT_SECTOR_SIZE;
}
if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 197d2c1101ed..44d2a546bf17 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -61,66 +61,110 @@ static const struct spi_nor_fixups sst26vf_nor_fixups = {
};
static const struct flash_info sst_nor_parts[] = {
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
- SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK)
- NO_SFDP_FLAGS(SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K)
- MFR_FLAGS(SST_WRITE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
- { "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- PARSE_SFDP
- .fixups = &sst26vf_nor_fixups },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &sst26vf_nor_fixups },
+ {
+ .id = SNOR_ID(0x62, 0x16, 0x12),
+ .name = "sst25wf020a",
+ .size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0x62, 0x16, 0x13),
+ .name = "sst25wf040b",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x01),
+ .name = "sst25wf512",
+ .size = SZ_64K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x02),
+ .name = "sst25wf010",
+ .size = SZ_128K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x03),
+ .name = "sst25wf020",
+ .size = SZ_256K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x04),
+ .name = "sst25wf040",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x05),
+ .name = "sst25wf080",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x41),
+ .name = "sst25vf016b",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x4a),
+ .name = "sst25vf032b",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x4b),
+ .name = "sst25vf064c",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x8d),
+ .name = "sst25vf040b",
+ .size = SZ_512K,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x25, 0x8e),
+ .name = "sst25vf080b",
+ .size = SZ_1M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K,
+ .mfr_flags = SST_WRITE,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x41),
+ .name = "sst26vf016b",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x42),
+ .name = "sst26vf032b",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .fixups = &sst26vf_nor_fixups,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x43),
+ .name = "sst26vf064b",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &sst26vf_nor_fixups,
+ }, {
+ .id = SNOR_ID(0xbf, 0x26, 0x51),
+ .name = "sst26wf016b",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }
};
static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 5ab9d5324860..585813310ee1 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -34,17 +34,22 @@ static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
{
unsigned int bp_slots, bp_slots_needed;
+ /*
+ * sector_size will eventually be replaced with the max erase size of
+ * the flash. For now, we need to have that ugly default.
+ */
+ unsigned int sector_size = nor->info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE;
+ u64 n_sectors = div_u64(nor->params->size, sector_size);
u8 mask = spi_nor_get_sr_bp_mask(nor);
/* Reserved one for "protect none" and one for "protect all". */
bp_slots = (1 << hweight8(mask)) - 2;
- bp_slots_needed = ilog2(nor->info->n_sectors);
+ bp_slots_needed = ilog2(n_sectors);
if (bp_slots_needed > bp_slots)
- return nor->info->sector_size <<
- (bp_slots_needed - bp_slots);
+ return sector_size << (bp_slots_needed - bp_slots);
else
- return nor->info->sector_size;
+ return sector_size;
}
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index c09bb832b3b9..2dfdc555a69f 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -35,8 +35,8 @@ static ssize_t jedec_id_show(struct device *dev,
struct spi_device *spi = to_spi_device(dev);
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
- const u8 *id = nor->info->id_len ? nor->info->id : nor->id;
- u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN;
+ const u8 *id = nor->info->id ? nor->info->id->bytes : nor->id;
+ u8 id_len = nor->info->id ? nor->info->id->len : SPI_NOR_MAX_ID_LEN;
return sysfs_emit(buf, "%*phN\n", id_len, id);
}
@@ -78,7 +78,7 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
return 0;
- if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id)
+ if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id)
return 0;
return 0444;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index cd99c9a1c568..142fb27b2ea9 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -42,107 +42,191 @@ static const struct spi_nor_fixups w25q256_fixups = {
};
static const struct flash_info winbond_nor_parts[] = {
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
- NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 0, 0)
- PARSE_SFDP
- FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &w25q256_fixups },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
- PARSE_SFDP },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
- SPI_NOR_DUAL_READ) },
- { "w25q512nwq", INFO(0xef6020, 0, 0, 0)
- PARSE_SFDP
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024)
- PARSE_SFDP
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ {
+ .id = SNOR_ID(0xef, 0x30, 0x10),
+ .name = "w25x05",
+ .size = SZ_64K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x11),
+ .name = "w25x10",
+ .size = SZ_128K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x12),
+ .name = "w25x20",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x13),
+ .name = "w25x40",
+ .size = SZ_512K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x14),
+ .name = "w25x80",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x15),
+ .name = "w25x16",
+ .size = SZ_2M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x16),
+ .name = "w25x32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x30, 0x17),
+ .name = "w25x64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x12),
+ .name = "w25q20cl",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x14),
+ .name = "w25q80bl",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x16),
+ .name = "w25q32",
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x17),
+ .name = "w25q64",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x18),
+ .name = "w25q128",
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x19),
+ .name = "w25q256",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &w25q256_fixups,
+ }, {
+ .id = SNOR_ID(0xef, 0x40, 0x20),
+ .name = "w25q512jvq",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x50, 0x12),
+ .name = "w25q20bw",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x50, 0x14),
+ .name = "w25q80",
+ .size = SZ_1M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x12),
+ .name = "w25q20ew",
+ .size = SZ_256K,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x15),
+ .name = "w25q16dw",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x16),
+ .name = "w25q32dw",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x17),
+ .name = "w25q64dw",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x18),
+ .name = "w25q128fw",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x19),
+ .name = "w25q256jw",
+ .size = SZ_32M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x60, 0x20),
+ .name = "w25q512nwq",
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x15),
+ .name = "w25q16jv-im/jm",
+ .size = SZ_2M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x16),
+ .name = "w25q32jv",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x17),
+ .name = "w25q64jvm",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x18),
+ .name = "w25q128jv",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x70, 0x19),
+ .name = "w25q256jvm",
+ }, {
+ .id = SNOR_ID(0xef, 0x71, 0x19),
+ .name = "w25m512jv",
+ .size = SZ_64M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x16),
+ .name = "w25q32jwm",
+ .size = SZ_4M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x17),
+ .name = "w25q64jwm",
+ .size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x18),
+ .name = "w25q128jwm",
+ .size = SZ_16M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x19),
+ .name = "w25q256jwm",
+ .size = SZ_32M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0xef, 0x80, 0x20),
+ .name = "w25q512nwm",
+ .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+ },
};
/**
@@ -221,7 +305,7 @@ static int winbond_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
- if (params->otp.org->n_regions)
+ if (params->otp.org)
params->otp.ops = &winbond_nor_otp_ops;
/*
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 00d53eae5ee8..f99118c691b0 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -21,28 +21,22 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, buf, 0))
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8 * (_page_size)), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .n_banks = 1, \
- .addr_nbytes = 3, \
- .flags = SPI_NOR_NO_FR
+#define S3AN_FLASH(_id, _name, _n_sectors, _page_size) \
+ .id = _id, \
+ .name = _name, \
+ .size = 8 * (_page_size) * (_n_sectors), \
+ .sector_size = (8 * (_page_size)), \
+ .page_size = (_page_size), \
+ .flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
static const struct flash_info xilinx_nor_parts[] = {
/* Xilinx S3AN Internal Flash */
- { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
- { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
- { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+ { S3AN_FLASH(SNOR_ID(0x1f, 0x22, 0x00), "3S50AN", 64, 264) },
+ { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S200AN", 256, 264) },
+ { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S400AN", 256, 264) },
+ { S3AN_FLASH(SNOR_ID(0x1f, 0x25, 0x00), "3S700AN", 512, 264) },
+ { S3AN_FLASH(SNOR_ID(0x1f, 0x26, 0x00), "3S1400AN", 512, 528) },
};
/*
@@ -144,7 +138,7 @@ static int xilinx_nor_setup(struct spi_nor *nor,
page_size = (nor->params->page_size == 264) ? 256 : 512;
nor->params->page_size = page_size;
nor->mtd.writebufsize = page_size;
- nor->params->size = 8 * page_size * nor->info->n_sectors;
+ nor->params->size = nor->info->size;
nor->mtd.erasesize = 8 * page_size;
} else {
/* Flash in Default addressing mode */
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
index 051411e86339..d5a06054b0dd 100644
--- a/drivers/mtd/spi-nor/xmc.c
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -9,15 +9,20 @@
#include "core.h"
static const struct flash_info xmc_nor_parts[] = {
- /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ {
+ .id = SNOR_ID(0x20, 0x70, 0x17),
+ .name = "XM25QH64A",
+ .size = SZ_8M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ }, {
+ .id = SNOR_ID(0x20, 0x70, 0x18),
+ .name = "XM25QH128A",
+ .size = SZ_16M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ },
};
+/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
const struct spi_nor_manufacturer spi_nor_xmc = {
.name = "xmc",
.parts = xmc_nor_parts,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 437c5b83ffe5..309a42aeaa4c 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -447,13 +447,15 @@ out_unlock:
static void ubiblock_cleanup(struct ubiblock *dev)
{
+ int id = dev->gd->first_minor;
+
/* Stop new requests to arrive */
del_gendisk(dev->gd);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
- idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
+ idr_remove(&ubiblock_minor_idr, id);
}
int ubiblock_remove(struct ubi_volume_info *vi)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 8ee51e49fced..7d4ff1193db6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -35,7 +35,7 @@
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 5
+#define MTD_PARAM_MAX_COUNT 6
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -54,6 +54,7 @@
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
* @enable_fm: enable fastmap when value is non-zero
+ * @need_resv_pool: reserve pool->max_size pebs when value is none-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
@@ -61,6 +62,7 @@ struct mtd_dev_param {
int vid_hdr_offs;
int max_beb_per1024;
int enable_fm;
+ int need_resv_pool;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -825,6 +827,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve pebs to fill fm_pool
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -840,7 +843,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm,
+ bool need_resv_pool)
{
struct ubi_device *ubi;
int i, err;
@@ -951,6 +955,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+ ubi->fm_pool_rsv_cnt = need_resv_pool ? ubi->fm_pool.max_size : 0;
ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
@@ -1273,7 +1278,8 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p->vid_hdr_offs, p->max_beb_per1024,
- p->enable_fm == 0);
+ p->enable_fm == 0,
+ p->need_resv_pool != 0);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
@@ -1482,6 +1488,18 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
} else
p->enable_fm = 0;
+ token = tokens[5];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->need_resv_pool);
+
+ if (err) {
+ pr_err("UBI error: bad value for need_resv_pool parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->need_resv_pool = 0;
+
mtd_devs += 1;
return 0;
}
@@ -1495,6 +1513,7 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
"Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
+ "Optional \"need_resv_pool\" parameter determines whether to reserve pool->max_size pebs during attach. If the value is non-zero, peb reservation is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f43430b9c1e6..0d8f04cf03c5 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1041,7 +1041,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
- req.max_beb_per1024, !!req.disable_fm);
+ req.max_beb_per1024, !!req.disable_fm,
+ !!req.need_resv_pool);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 655ff41863e2..8d1f0e05892c 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -33,9 +33,6 @@
#include <linux/err.h>
#include "ubi.h"
-/* Number of physical eraseblocks reserved for atomic LEB change operation */
-#define EBA_RESERVED_PEBS 1
-
/**
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
* @pnum: the physical eraseblock number attached to the LEB
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 863f571f1adb..2a9cc9413c42 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ if (!ubi->free.rb_node)
goto out;
if (anchor)
@@ -98,43 +98,104 @@ out:
}
/*
- * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * wait_free_pebs_for_pool - wait until there enough free pebs
+ * @ubi: UBI device description object
+ *
+ * Wait and execute do_work until there are enough free pebs, fill pool
+ * as much as we can. This will reduce pool refilling times, which can
+ * reduce the fastmap updating frequency.
+ */
+static void wait_free_pebs_for_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int free, expect_free, executed;
+ /*
+ * There are at least following free pebs which reserved by UBI:
+ * 1. WL_RESERVED_PEBS[1]
+ * 2. EBA_RESERVED_PEBS[1]
+ * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
+ * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
+ */
+ int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
+ ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
+
+ do {
+ spin_lock(&ubi->wl_lock);
+ free = ubi->free_count;
+ free += pool->size - pool->used + wl_pool->size - wl_pool->used;
+ expect_free = reserved + ubi->beb_rsvd_pebs;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Break out if there are no works or work is executed failure,
+ * given the fact that erase_worker will schedule itself when
+ * -EBUSY is returned from mtd layer caused by system shutdown.
+ */
+ if (do_work(ubi, &executed) || !executed)
+ break;
+ } while (free < expect_free);
+}
+
+/*
+ * left_free_count - returns the number of free pebs to fill fm pools
* @ubi: UBI device description object
- * @is_wl_pool: whether UBI is filling wear leveling pool
*
- * This helper function checks whether there are enough free pebs (deducted
- * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
- * there is at least one of free pebs is filled into fm_wl_pool.
- * For wear leveling pool, UBI should also reserve free pebs for bad pebs
- * handling, because there maybe no enough free pebs for user volumes after
- * producing new bad pebs.
+ * This helper function returns the number of free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool.
*/
-static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+static int left_free_count(struct ubi_device *ubi)
{
int fm_used = 0; // fastmap non anchor pebs.
- int beb_rsvd_pebs;
if (!ubi->free.rb_node)
- return false;
+ return 0;
- beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
- if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ if (!ubi->ro_mode && !ubi->fm_disabled)
fm_used = ubi->fm_size / ubi->leb_size - 1;
- return ubi->free_count - beb_rsvd_pebs > fm_used;
+ return ubi->free_count - fm_used;
+}
+
+/*
+ * can_fill_pools - whether free PEBs will be left after filling pools
+ * @ubi: UBI device description object
+ * @free: current number of free PEBs
+ *
+ * Return %1 if there are still left free PEBs after filling pools,
+ * otherwise %0 is returned.
+ */
+static int can_fill_pools(struct ubi_device *ubi, int free)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ int pool_need = pool->max_size - pool->size +
+ wl_pool->max_size - wl_pool->size;
+
+ if (free - pool_need < 1)
+ return 0;
+
+ return 1;
}
/**
- * ubi_refill_pools - refills all fastmap PEB pools.
+ * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
* @ubi: UBI device description object
*/
-void ubi_refill_pools(struct ubi_device *ubi)
+void ubi_refill_pools_and_lock(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ wait_free_pebs_for_pool(ubi);
+
+ down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
@@ -159,7 +220,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!has_enough_free_count(ubi, false))
+ if (left_free_count(ubi) <= 0)
break;
e = wl_get_wle(ubi);
@@ -172,10 +233,13 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!has_enough_free_count(ubi, true))
+ int left_free = left_free_count(ubi);
+
+ if (left_free <= 0)
break;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
+ !can_fill_pools(ubi, left_free));
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
@@ -210,7 +274,7 @@ static int produce_free_peb(struct ubi_device *ubi)
while (!ubi->free.rb_node && ubi->works_count) {
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err)
return err;
@@ -315,12 +379,12 @@ static bool need_wear_leveling(struct ubi_device *ubi)
if (!e) {
if (!ubi->free.rb_node)
return false;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = e->ec;
} else {
ec = e->ec;
if (ubi->free.rb_node) {
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
ec = max(ec, e->ec);
}
}
@@ -481,7 +545,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
- if (e && !ubi->fm_disabled && !ubi->fm &&
+ if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 28c8151a0725..2a728c31e6b8 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -20,7 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
+ ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -105,7 +105,7 @@ static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
- new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!new)
goto out;
@@ -1390,53 +1390,6 @@ out:
}
/**
- * erase_block - Manually erase a PEB.
- * @ubi: UBI device object
- * @pnum: PEB to be erased
- *
- * Returns the new EC value on success, < 0 indicates an internal error.
- */
-static int erase_block(struct ubi_device *ubi, int pnum)
-{
- int ret;
- struct ubi_ec_hdr *ec_hdr;
- long long ec;
-
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ec_hdr)
- return -ENOMEM;
-
- ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
- if (ret < 0)
- goto out;
- else if (ret && ret != UBI_IO_BITFLIPS) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = ubi_io_sync_erase(ubi, pnum, 0);
- if (ret < 0)
- goto out;
-
- ec = be64_to_cpu(ec_hdr->ec);
- ec += ret;
- if (ec > UBI_MAX_ERASECOUNTER) {
- ret = -EINVAL;
- goto out;
- }
-
- ec_hdr->ec = cpu_to_be64(ec);
- ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
- if (ret < 0)
- goto out;
-
- ret = ec;
-out:
- kfree(ec_hdr);
- return ret;
-}
-
-/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
*
@@ -1462,7 +1415,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
ubi->fm = NULL;
ret = -ENOMEM;
- fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ fm = kzalloc(sizeof(*fm), GFP_NOFS);
if (!fm)
goto out;
@@ -1538,11 +1491,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
- down_write(&ubi->fm_protect);
- down_write(&ubi->work_sem);
- down_write(&ubi->fm_eba_sem);
-
- ubi_refill_pools(ubi);
+ ubi_refill_pools_and_lock(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
@@ -1551,7 +1500,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
@@ -1576,7 +1525,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (!tmp_e) {
if (old_fm && old_fm->e[i]) {
- ret = erase_block(ubi, old_fm->e[i]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
@@ -1628,7 +1577,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
- ret = erase_block(ubi, old_fm->e[0]->pnum);
+ ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
@@ -1640,7 +1589,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
goto err;
}
new_fm->e[0] = old_fm->e[0];
- new_fm->e[0]->ec = ret;
old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c8f1bd4fa100..a5ec566df0d7 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -82,6 +82,9 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
#define UBI_DFS_DIR_NAME "ubi%d"
#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -491,6 +494,7 @@ struct ubi_debug_info {
* @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ * @fm_pool_rsv_cnt: Number of reserved PEBs for filling pool/wl_pool
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -601,6 +605,7 @@ struct ubi_device {
int fast_attach;
struct ubi_wl_entry *fm_anchor;
int fm_do_produce_anchor;
+ int fm_pool_rsv_cnt;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -902,6 +907,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan);
/* wl.c */
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture);
int ubi_wl_get_peb(struct ubi_device *ubi);
int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
int pnum, int torture);
@@ -914,7 +920,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int lnum, int torture);
int ubi_is_erase_work(struct ubi_work *wrk);
-void ubi_refill_pools(struct ubi_device *ubi);
+void ubi_refill_pools_and_lock(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
@@ -938,7 +944,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
int vid_hdr_offset, int max_beb_per1024,
- bool disable_fm);
+ bool disable_fm, bool need_resv_pool);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 26a214f016c1..a357f3d27f2f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -181,11 +181,13 @@ static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* do_work - do one pending work.
* @ubi: UBI device description object
+ * @executed: whether there is one work is executed
*
* This function returns zero in case of success and a negative error code in
- * case of failure.
+ * case of failure. If @executed is not NULL and there is one work executed,
+ * @executed is set as %1, otherwise @executed is set as %0.
*/
-static int do_work(struct ubi_device *ubi)
+static int do_work(struct ubi_device *ubi, int *executed)
{
int err;
struct ubi_work *wrk;
@@ -203,9 +205,13 @@ static int do_work(struct ubi_device *ubi)
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
+ if (executed)
+ *executed = 0;
return 0;
}
+ if (executed)
+ *executed = 1;
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
@@ -311,12 +317,14 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
+ * @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
- struct rb_root *root, int diff)
+ struct rb_root *root, int diff,
+ int pick_max)
{
struct rb_node *p;
struct ubi_wl_entry *e;
@@ -330,9 +338,11 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
- if (e1->ec >= max)
+ if (e1->ec >= max) {
+ if (pick_max)
+ e = e1;
p = p->rb_left;
- else {
+ } else {
p = p->rb_right;
e = e1;
}
@@ -361,12 +371,15 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best
- * WL entry such that fastmap can use the anchor PEB later. */
+ /*
+ * If no fastmap has been written and fm_anchor is not
+ * reserved and this WL entry can be used as anchor PEB
+ * hold it back and return the second best WL entry such
+ * that fastmap can use the anchor PEB later.
+ */
e = may_reserve_for_fm(ubi, e, root);
} else
- e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
return e;
}
@@ -427,7 +440,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
}
/**
- * sync_erase - synchronously erase a physical eraseblock.
+ * ubi_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
@@ -435,8 +448,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -1040,7 +1052,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -1094,7 +1106,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
- err = sync_erase(ubi, e, wl_wrk->torture);
+ err = ubi_sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
@@ -1686,7 +1698,7 @@ int ubi_thread(void *u)
}
spin_unlock(&ubi->wl_lock);
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
if (err) {
ubi_err(ubi, "%s: work failed with error code %d",
ubi->bgt_name, err);
@@ -1749,7 +1761,7 @@ static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync
ubi->lookuptbl[e->pnum] = e;
if (sync) {
- err = sync_erase(ubi, e, false);
+ err = ubi_sync_erase(ubi, e, false);
if (err)
goto out_free;
@@ -2071,7 +2083,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
self_check_in_wl_tree(ubi, e, &ubi->free);
ubi->free_count--;
ubi_assert(ubi->free_count >= 0);
@@ -2097,7 +2109,7 @@ static int produce_free_peb(struct ubi_device *ubi)
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
- err = do_work(ubi);
+ err = do_work(ubi, NULL);
spin_lock(&ubi->wl_lock);
if (err)
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 5ebe374a08ae..7b6715ef6d4a 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -10,8 +10,10 @@ static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
- /* Reserve enough LEBs to store two fastmaps. */
- *count += (ubi->fm_size / ubi->leb_size) * 2;
+ if (ubi->fm_disabled)
+ ubi->fm_pool_rsv_cnt = 0;
+ /* Reserve enough LEBs to store two fastmaps and to fill pools. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2 + ubi->fm_pool_rsv_cnt;
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
}
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index d8ab2b77d201..167a86f39f27 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
*val = lan9303_mdio_real_read(sw_dev->device, reg);
*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5665d0c3668f..1dee27349367 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6647,9 +6647,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
tnapi->tx_cons = sw_idx;
- /* Need to make the tx_cons update visible to tg3_start_xmit()
+ /* Need to make the tx_cons update visible to __tg3_start_xmit()
* before checking for netif_queue_stopped(). Without the
- * memory barrier, there is a small possibility that tg3_start_xmit()
+ * memory barrier, there is a small possibility that __tg3_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
@@ -7889,7 +7889,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
}
-static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
/* Use GSO to workaround all TSO packets that meet HW bug conditions
* indicated in tg3_tx_frag_set()
@@ -7923,7 +7923,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
skb_list_walk_safe(segs, seg, next) {
skb_mark_not_on_list(seg);
- tg3_start_xmit(seg, tp->dev);
+ __tg3_start_xmit(seg, tp->dev);
}
tg3_tso_bug_end:
@@ -7933,7 +7933,7 @@ tg3_tso_bug_end:
}
/* hard_start_xmit for all devices */
-static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
u32 len, entry, base_flags, mss, vlan = 0;
@@ -8182,11 +8182,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
- /* Packets are ready, update Tx producer idx on card. */
- tw32_tx_mbox(tnapi->prodmbox, entry);
- }
-
return NETDEV_TX_OK;
dma_error:
@@ -8199,6 +8194,42 @@ drop_nofree:
return NETDEV_TX_OK;
}
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ u16 skb_queue_mapping;
+ netdev_tx_t ret;
+
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, skb_queue_mapping);
+
+ ret = __tg3_start_xmit(skb, dev);
+
+ /* Notify the hardware that packets are ready by updating the TX ring
+ * tail pointer. We respect netdev_xmit_more() thus avoiding poking
+ * the hardware for every packet. To guarantee forward progress the TX
+ * ring must be drained when it is full as indicated by
+ * netif_xmit_stopped(). This needs to happen even when the current
+ * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
+ * queued by previous __tg3_start_xmit() calls might get stuck in
+ * the queue forever.
+ */
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
+ struct tg3_napi *tnapi;
+ struct tg3 *tp;
+
+ tp = netdev_priv(dev);
+ tnapi = &tp->napi[skb_queue_mapping];
+
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+ }
+
+ return ret;
+}
+
static void tg3_mac_loopback(struct tg3 *tp, bool enable)
{
if (enable) {
@@ -17729,7 +17760,7 @@ static int tg3_init_one(struct pci_dev *pdev,
* device behind the EPB cannot support DMA addresses > 40-bit.
* On 64-bit systems with IOMMU, use 40-bit dma_mask.
* On 64-bit systems without IOMMU, use 64-bit dma_mask and
- * do DMA address check in tg3_start_xmit().
+ * do DMA address check in __tg3_start_xmit().
*/
if (tg3_flag(tp, IS_5788))
persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
@@ -18127,7 +18158,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
- tg3_power_down(tp);
+ if (system_state == SYSTEM_POWER_OFF)
+ tg3_power_down(tp);
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 30bec47bc665..cffbf27c4656 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+ "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index 74bc111b4849..cc4e9e2addb7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -231,6 +231,5 @@ int i40e_devlink_create_port(struct i40e_pf *pf)
**/
void i40e_devlink_destroy_port(struct i40e_pf *pf)
{
- devlink_port_type_clear(&pf->devlink_port);
devlink_port_unregister(&pf->devlink_port);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3157d14d9b12..f7a332e51524 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -14213,8 +14213,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
}
set_bit(__I40E_VSI_RELEASING, vsi->state);
uplink_seid = vsi->uplink_seid;
- if (vsi->type == I40E_VSI_MAIN)
- i40e_devlink_destroy_port(pf);
+
if (vsi->type != I40E_VSI_SRIOV) {
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
@@ -14228,6 +14227,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* clear the sync flag on all filters */
@@ -14402,14 +14404,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
err_rings:
i40e_vsi_free_q_vectors(vsi);
- if (vsi->type == I40E_VSI_MAIN)
- i40e_devlink_destroy_port(pf);
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
err_vsi:
i40e_vsi_clear(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b980f89dc892..cd065ec48c87 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
@@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_lag_netdev_list *entry;
struct ice_netdev_priv *np;
- struct net_device *netdev;
struct ice_pf *pf;
- list_for_each_entry(entry, lag->netdev_head, node) {
- netdev = entry->netdev;
- np = netdev_priv(netdev);
- pf = np->vsi->back;
-
- ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
- }
+ np = netdev_priv(lag->netdev);
+ pf = np->vsi->back;
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
/**
@@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
- nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
+ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
if (!nd_list)
break;
@@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 37b54db91df2..dd03cb69ad26 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
return ice_tc_tun_get_type(dev) != TNL_LAST;
}
-static int
-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
- struct flow_action_entry *act)
+static bool ice_tc_is_dev_uplink(struct net_device *dev)
+{
+ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
+}
+
+static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev)
{
struct ice_repr *repr;
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_tc_is_dev_uplink(target_dev)) {
+ repr = ice_netdev_to_repr(filter_dev);
+
+ fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_tc_setup_drop_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr)
+{
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+
+ if (ice_is_port_repr_netdev(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ int err;
+
switch (act->id) {
case FLOW_ACTION_DROP:
- fltr->action.fltr_act = ICE_DROP_PACKET;
+ err = ice_tc_setup_drop_action(filter_dev, fltr);
+ if (err)
+ return err;
+
break;
case FLOW_ACTION_REDIRECT:
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
-
- if (ice_is_port_repr_netdev(act->dev)) {
- repr = ice_netdev_to_repr(act->dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else if (netif_is_ice(act->dev) ||
- ice_is_tunnel_supported(act->dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
+ err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+ if (err)
+ return err;
break;
@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
goto exit;
}
- /* egress traffic is always redirect to uplink */
- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
-
rule_info.sw_act.fltr_act = fltr->action.fltr_act;
if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ /* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
- } else {
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
+ /* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ } else {
+ /* VF to VF */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
}
/* specify the cookie as filter_rule_id */
@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
/**
* ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @filter_dev: Pointer to device on which filter is being added
* @vsi: Pointer to VSI
* @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
* Parse the actions for a TC filter
*/
-static int
-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
+ struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
flow_action_for_each(i, act, flow_action) {
if (ice_is_eswitch_mode_switchdev(vsi->back))
- err = ice_eswitch_tc_parse_action(fltr, act);
+ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
else
err = ice_tc_parse_action(vsi, fltr, act);
if (err)
@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
if (err < 0)
goto err;
- err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
if (err < 0)
goto err;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 5e1ef70d54fe..1f728a9004d9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2365,7 +2365,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
*/
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct skb_shared_info *shinfo;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -2379,13 +2379,15 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
u32 paylen, l4_start;
int err;
- if (!shinfo->gso_size)
+ if (!skb_is_gso(skb))
return 0;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
+ shinfo = skb_shinfo(skb);
+
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 1a42bfded872..7ca6941ea0b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
u64 incr, *ptr, val;
- int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
@@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
continue;
incr = (u64)qidx << 32;
- while (timeout) {
- val = otx2_atomic64_add(incr, ptr);
- sqe_head = (val >> 20) & 0x3F;
- sqe_tail = (val >> 28) & 0x3F;
- if (sqe_head == sqe_tail)
- break;
- usleep_range(1, 3);
- timeout--;
- }
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head != sqe_tail)
+ usleep_range(50, 60);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index c04a8ee53a82..e7c69b57147e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
void otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
+void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6daf4d58c25d..91b99fd70361 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1193,31 +1193,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
};
static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
- "NIX_SND_STATUS_GOOD",
- "NIX_SND_STATUS_SQ_CTX_FAULT",
- "NIX_SND_STATUS_SQ_CTX_POISON",
- "NIX_SND_STATUS_SQB_FAULT",
- "NIX_SND_STATUS_SQB_POISON",
- "NIX_SND_STATUS_HDR_ERR",
- "NIX_SND_STATUS_EXT_ERR",
- "NIX_SND_STATUS_JUMP_FAULT",
- "NIX_SND_STATUS_JUMP_POISON",
- "NIX_SND_STATUS_CRC_ERR",
- "NIX_SND_STATUS_IMM_ERR",
- "NIX_SND_STATUS_SG_ERR",
- "NIX_SND_STATUS_MEM_ERR",
- "NIX_SND_STATUS_INVALID_SUBDC",
- "NIX_SND_STATUS_SUBDC_ORDER_ERR",
- "NIX_SND_STATUS_DATA_FAULT",
- "NIX_SND_STATUS_DATA_POISON",
- "NIX_SND_STATUS_NPC_DROP_ACTION",
- "NIX_SND_STATUS_LOCK_VIOL",
- "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
- "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
- "NIX_SND_STATUS_NPC_MCAST_ABORT",
- "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
- "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
- "NIX_SND_STATUS_SEND_STATS_ERR",
+ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
+ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
+ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
+ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
+ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
+ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
+ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
+ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
+ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
+ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
+ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
+ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
+ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
+ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
+ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
+ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
+ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
+ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
+ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
+ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
+ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
+ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
+ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
};
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
@@ -1238,14 +1239,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
continue;
if (val & BIT_ULL(42)) {
- netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ netdev_err(pf->netdev,
+ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT));
} else {
if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
netdev_err(pf->netdev, "CQ%lld: Doorbell error",
qidx);
if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
- netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ netdev_err(pf->netdev,
+ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
qidx);
}
@@ -1272,7 +1275,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
(val & NIX_SQINT_BITS));
if (val & BIT_ULL(42)) {
- netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ netdev_err(pf->netdev,
+ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT));
goto done;
}
@@ -1282,8 +1286,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
goto chk_mnq_err_dbg;
sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
- qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
+ qidx, sq_op_err_dbg,
+ nix_sqoperr_e_str[sq_op_err_code],
+ sq_op_err_code);
otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
@@ -1300,16 +1307,21 @@ chk_mnq_err_dbg:
goto chk_snd_err_dbg;
mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
- netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
- qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
+ mnq_err_code);
otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
chk_snd_err_dbg:
snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
if (snd_err_dbg & BIT(44)) {
snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
- qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
+ netdev_err(pf->netdev,
+ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
+ qidx, snd_err_dbg,
+ nix_snd_status_e_str[snd_err_code],
+ snd_err_code);
otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
}
@@ -1589,6 +1601,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
else
otx2_cleanup_tx_cqes(pf, cq);
}
+ otx2_free_pending_sqe(pf);
otx2_free_sq_res(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index fa37b9f312ca..4e5899d8fa2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -318,23 +318,23 @@ enum nix_snd_status_e {
NIX_SND_STATUS_EXT_ERR = 0x6,
NIX_SND_STATUS_JUMP_FAULT = 0x7,
NIX_SND_STATUS_JUMP_POISON = 0x8,
- NIX_SND_STATUS_CRC_ERR = 0x9,
- NIX_SND_STATUS_IMM_ERR = 0x10,
- NIX_SND_STATUS_SG_ERR = 0x11,
- NIX_SND_STATUS_MEM_ERR = 0x12,
- NIX_SND_STATUS_INVALID_SUBDC = 0x13,
- NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
- NIX_SND_STATUS_DATA_FAULT = 0x15,
- NIX_SND_STATUS_DATA_POISON = 0x16,
- NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
- NIX_SND_STATUS_LOCK_VIOL = 0x18,
- NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
- NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
- NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
- NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
- NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
- NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
- NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
+ NIX_SND_STATUS_CRC_ERR = 0x10,
+ NIX_SND_STATUS_IMM_ERR = 0x11,
+ NIX_SND_STATUS_SG_ERR = 0x12,
+ NIX_SND_STATUS_MEM_ERR = 0x13,
+ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
+ NIX_SND_STATUS_DATA_FAULT = 0x16,
+ NIX_SND_STATUS_DATA_POISON = 0x17,
+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
+ NIX_SND_STATUS_LOCK_VIOL = 0x21,
+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
+ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
NIX_SND_STATUS_MAX,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 53b2a4ef5298..6ee15f3c25ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{
+ int tx_pkts = 0, tx_bytes = 0;
struct sk_buff *skb = NULL;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
+ struct netdev_queue *txq;
int processed_cqe = 0;
struct sg_list *sg;
int qidx;
@@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
+ tx_bytes += skb->len;
+ tx_pkts++;
otx2_dma_unmap_skb_frags(pfvf, sg);
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
}
+ if (likely(tx_pkts)) {
+ if (qidx >= pfvf->hw.tx_queues)
+ qidx -= pfvf->hw.xdp_queues;
+ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ }
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
@@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
return err;
}
+void otx2_free_pending_sqe(struct otx2_nic *pfvf)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ struct sg_list *sg;
+ int sq_idx, sqe;
+
+ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
+ sq = &pfvf->qset.sq[sq_idx];
+ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
+ sg = &sq->sg[sqe];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ tx_bytes += skb->len;
+ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
+ if (!tx_pkts)
+ continue;
+ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ tx_pkts = 0;
+ tx_bytes = 0;
+ }
+}
+
static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
int len, int *offset)
{
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a987defb575c..0c76c162b8a9 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2582,9 +2582,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
rx_mode |= AcceptAllPhys;
+ } else if (!(dev->flags & IFF_MULTICAST)) {
+ rx_mode &= ~AcceptMulticast;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
- tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_48) {
/* accept all multicasts */
} else if (netdev_mc_empty(dev)) {
rx_mode &= ~AcceptMulticast;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 7a8f47e7b728..a4e8b498dea9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -259,7 +259,7 @@
((val) << XGMAC_PPS_MINIDX(x))
#define XGMAC_PPSCMD_START 0x2
#define XGMAC_PPSCMD_STOP 0x5
-#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
#define XGMAC_TRGTBUSY0 BIT(31)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f352be269deb..453e88b75be0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1178,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
- val |= XGMAC_PPSEN0;
+
+ /* XGMAC Core has 4 PPS outputs at most.
+ *
+ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
+ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
+ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
+ * read-only reserved to 0.
+ * But we always set PPSEN{1,2,3} do not make things worse ;-)
+ *
+ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
+ * be set, or the PPS outputs stay in Fixed PPS mode by default.
+ */
+ val |= XGMAC_PPSENx(index);
writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 24120605502f..ece9f8df98ae 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1588,10 +1588,10 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
/* rx_pause/tx_pause */
if (rx_pause)
- mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
if (tx_pause)
- mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 4cf2a52e4378..3025e9c18970 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
writel(upper_32_bits(ns), iep->base +
iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
- writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
}
static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 531bf919aef5..e0d26148dfd9 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -163,7 +163,6 @@ typedef void buffer_t;
/* Information about built-in Ethernet MAC interfaces */
struct eth_plat_info {
- u8 phy; /* MII PHY ID, 0 - 31 */
u8 rxq; /* configurable, currently 0 - 31 only */
u8 txreadyq;
u8 hwaddr[ETH_ALEN];
@@ -1583,7 +1582,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
if ((err = register_netdev(ndev)))
goto err_phy_dis;
- netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
+ netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev),
npe_name(port->npe));
return 0;
diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
index 4630dde01974..5d0f11f280cf 100644
--- a/drivers/net/mdio/acpi_mdio.c
+++ b/drivers/net/mdio/acpi_mdio.c
@@ -16,6 +16,7 @@
MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ACPI MDIO bus (Ethernet PHY) accessors");
/**
* __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1183ef5e203e..fd02f5cbc853 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -14,6 +14,7 @@
MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors");
static struct pse_control *
fwnode_find_pse_control(struct fwnode_handle *fwnode)
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 70edeeb7771e..c2170650415c 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -205,3 +205,4 @@ module_platform_driver(aspeed_mdio_driver);
MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ASPEED MDIO bus controller");
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 81b7748c10ce..f88639297ff2 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -263,3 +263,4 @@ void free_mdio_bitbang(struct mii_bus *bus)
EXPORT_SYMBOL(free_mdio_bitbang);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Bitbanged MDIO buses");
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 7eb32ebb846d..64ebcb6d235c 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -25,6 +25,7 @@
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors");
/* Extract the clause 22 phy ID from the compatible string of the form
* ethernet-phy-idAAAA.BBBB */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index ef00d6163061..cb4b91af5e17 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -942,3 +942,4 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(bcm_ptp_probe);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom PHY PTP driver");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index cc2858107668..e81404bf8994 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -223,3 +223,4 @@ static struct phy_driver bcm87xx_driver[] = {
module_phy_driver(bcm87xx_driver);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Broadcom BCM87xx PHY driver");
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6712883498bb..25c19496a336 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1616,6 +1616,7 @@ struct phylink *phylink_create(struct phylink_config *config,
pl->config = config;
if (config->type == PHYLINK_NETDEV) {
pl->netdev = to_net_dev(config->dev);
+ netif_carrier_off(pl->netdev);
} else if (config->type == PHYLINK_DEV) {
pl->dev = config->dev;
} else {
@@ -3726,3 +3727,4 @@ static int __init phylink_init(void)
module_init(phylink_init);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("phylink models the MAC to optional PHY connection");
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index b8c0961daf53..5468bd209fab 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -3153,3 +3153,4 @@ module_exit(sfp_exit);
MODULE_ALIAS("platform:sfp");
MODULE_AUTHOR("Russell King");
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SFP cage support");
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a9beacd552cf..0193af2d31c9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -570,8 +570,8 @@ static struct bpf_prog *get_filter(struct sock_fprog *uprog)
/* uprog->len is unsigned short, so no overflow here */
fprog.len = uprog->len;
- fprog.filter = memdup_user(uprog->filter,
- uprog->len * sizeof(struct sock_filter));
+ fprog.filter = memdup_array_user(uprog->filter,
+ uprog->len, sizeof(struct sock_filter));
if (IS_ERR(fprog.filter))
return ERR_CAST(fprog.filter);
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index eedca8c72098..74f59ceed3d5 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NVME_COMMON) += common/
+obj-y += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
index 06c8df00d1e2..244432e0b73d 100644
--- a/drivers/nvme/common/Kconfig
+++ b/drivers/nvme/common/Kconfig
@@ -1,14 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NVME_COMMON
- tristate
-
config NVME_KEYRING
- bool
+ tristate
select KEYS
config NVME_AUTH
- bool
+ tristate
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
index 0cbd0b0b8d49..681514cf2e2f 100644
--- a/drivers/nvme/common/Makefile
+++ b/drivers/nvme/common/Makefile
@@ -2,7 +2,8 @@
ccflags-y += -I$(src)
-obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+obj-$(CONFIG_NVME_AUTH) += nvme-auth.o
+obj-$(CONFIG_NVME_KEYRING) += nvme-keyring.o
-nvme-common-$(CONFIG_NVME_AUTH) += auth.o
-nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o
+nvme-auth-y += auth.o
+nvme-keyring-y += keyring.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index a8e87dfbeab2..a23ab5c968b9 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -341,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen)
{
struct crypto_shash *tfm;
- struct shash_desc *desc;
u8 *hashed_key;
const char *hmac_name;
int ret;
@@ -369,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
goto out_free_key;
}
- desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out_free_hash;
- }
- desc->tfm = tfm;
-
ret = crypto_shash_setkey(tfm, hashed_key, hlen);
if (ret)
- goto out_free_desc;
-
- ret = crypto_shash_init(desc);
- if (ret)
- goto out_free_desc;
-
- ret = crypto_shash_update(desc, challenge, hlen);
- if (ret)
- goto out_free_desc;
+ goto out_free_hash;
- ret = crypto_shash_final(desc, aug);
-out_free_desc:
- kfree_sensitive(desc);
+ ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
out_free_hash:
crypto_free_shash(tfm);
out_free_key:
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index f8d9a208397b..ee341b83eeba 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -151,7 +151,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
}
EXPORT_SYMBOL_GPL(nvme_tls_psk_default);
-int nvme_keyring_init(void)
+static int __init nvme_keyring_init(void)
{
int err;
@@ -171,12 +171,15 @@ int nvme_keyring_init(void)
}
return 0;
}
-EXPORT_SYMBOL_GPL(nvme_keyring_init);
-void nvme_keyring_exit(void)
+static void __exit nvme_keyring_exit(void)
{
unregister_key_type(&nvme_tls_psk_key_type);
key_revoke(nvme_keyring);
key_put(nvme_keyring);
}
-EXPORT_SYMBOL_GPL(nvme_keyring_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+module_init(nvme_keyring_init);
+module_exit(nvme_keyring_exit);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 48f7d72de5e9..8fe2dd619e80 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -95,7 +95,6 @@ config NVME_TCP
config NVME_TCP_TLS
bool "NVMe over Fabrics TCP TLS encryption support"
depends on NVME_TCP
- select NVME_COMMON
select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
@@ -110,7 +109,6 @@ config NVME_TCP_TLS
config NVME_HOST_AUTH
bool "NVM Express over Fabrics In-Band Authentication"
depends on NVME_CORE
- select NVME_COMMON
select NVME_AUTH
help
This provides support for NVMe over Fabrics In-Band Authentication.
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index eaefebb2a799..48328e36e93b 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -29,6 +29,7 @@ struct nvme_dhchap_queue_context {
int error;
u32 s1;
u32 s2;
+ bool bi_directional;
u16 transaction;
u8 status;
u8 dhgroup_id;
@@ -312,17 +313,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
data->dhvlen = cpu_to_le16(chap->host_key_len);
memcpy(data->rval, chap->response, chap->hash_len);
if (ctrl->ctrl_key) {
+ chap->bi_directional = true;
get_random_bytes(chap->c2, chap->hash_len);
data->cvalid = 1;
- chap->s2 = nvme_auth_get_seqnum();
memcpy(data->rval + chap->hash_len, chap->c2,
chap->hash_len);
dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
__func__, chap->qid, (int)chap->hash_len, chap->c2);
} else {
memset(chap->c2, 0, chap->hash_len);
- chap->s2 = 0;
}
+ chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
@@ -339,10 +340,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
- size_t size = sizeof(*data);
-
- if (chap->s2)
- size += chap->hash_len;
+ size_t size = sizeof(*data) + chap->hash_len;
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
@@ -663,6 +661,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
chap->error = 0;
chap->s1 = 0;
chap->s2 = 0;
+ chap->bi_directional = false;
chap->transaction = 0;
memset(chap->c1, 0, sizeof(chap->c1));
memset(chap->c2, 0, sizeof(chap->c2));
@@ -825,7 +824,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2;
}
- if (chap->s2) {
+ if (chap->bi_directional) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 62612f87aafa..88b54cdcbd68 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -25,7 +25,6 @@
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-auth.h>
-#include <linux/nvme-keyring.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -483,6 +482,7 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
+ nvme_stop_keep_alive(ctrl);
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
@@ -3200,6 +3200,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
ctrl->identified = true;
+ nvme_start_keep_alive(ctrl);
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
@@ -4074,8 +4076,21 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
return;
if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
- log, sizeof(*log), 0))
+ log, sizeof(*log), 0)) {
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+ goto out_free_log;
+ }
+
+ if (log->afi & 0x70 || !(log->afi & 0x7)) {
+ dev_info(ctrl->device,
+ "Firmware is activated after next Controller Level Reset\n");
+ goto out_free_log;
+ }
+
+ memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+ sizeof(ctrl->subsys->firmware_rev));
+
+out_free_log:
kfree(log);
}
@@ -4333,7 +4348,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl);
- nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
@@ -4344,8 +4358,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- nvme_start_keep_alive(ctrl);
-
nvme_enable_aen(ctrl);
/*
@@ -4724,16 +4736,11 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_ns_chr_class);
goto unregister_generic_ns;
}
- result = nvme_keyring_init();
- if (result)
- goto destroy_ns_chr;
result = nvme_init_auth();
if (result)
- goto keyring_exit;
+ goto destroy_ns_chr;
return 0;
-keyring_exit:
- nvme_keyring_exit();
destroy_ns_chr:
class_destroy(nvme_ns_chr_class);
unregister_generic_ns:
@@ -4757,7 +4764,6 @@ out:
static void __exit nvme_core_exit(void)
{
nvme_exit_auth();
- nvme_keyring_exit();
class_destroy(nvme_ns_chr_class);
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a15b37750d6e..49c3e46eaa1e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2530,6 +2530,12 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
nvme_quiesce_admin_queue(&ctrl->ctrl);
+
+ /*
+ * Open-coding nvme_cancel_admin_tagset() as fc
+ * is not using nvme_cancel_request().
+ */
+ nvme_stop_keep_alive(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 747c879e8982..529b9954d2b8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
req->bio = pdu->bio;
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
pdu->nvme_status = -EINTR;
- else
+ } else {
pdu->nvme_status = nvme_req(req)->status;
+ if (!pdu->nvme_status)
+ pdu->nvme_status = blk_status_to_errno(err);
+ }
pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
/*
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4714a902f4ca..89661a9cf850 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1423,13 +1423,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
nvme_tcp_queue_id(queue), ret);
goto free_icresp;
}
+ ret = -ENOTCONN;
if (queue->ctrl->ctrl.opts->tls) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
pr_err("queue %d: unhandled TLS record %d\n",
nvme_tcp_queue_id(queue), ctype);
- return -ENOTCONN;
+ goto free_icresp;
}
}
ret = -EINVAL;
@@ -2236,11 +2237,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
- nvme_quiesce_admin_queue(ctrl);
- blk_sync_queue(ctrl->admin_q);
- nvme_tcp_stop_queue(ctrl, 0);
- nvme_cancel_admin_tagset(ctrl);
- nvme_tcp_destroy_admin_queue(ctrl, new);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
return ret;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index fa479c9f5c3d..31633da9427c 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -87,7 +87,6 @@ config NVME_TARGET_TCP
config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
- select NVME_COMMON
select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
@@ -102,7 +101,6 @@ config NVME_TARGET_TCP_TLS
config NVME_TARGET_AUTH
bool "NVMe over Fabrics In-band Authentication support"
depends on NVME_TARGET
- select NVME_COMMON
select NVME_AUTH
help
This enables support for NVMe over Fabrics In-band Authentication
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 1d9854484e2e..eb7785be0ca7 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -163,11 +163,11 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
__func__, ctrl->cntlid, req->sq->qid, data->hl,
req->sq->dhchap_c2);
- req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
} else {
req->sq->authenticated = true;
req->sq->dhchap_c2 = NULL;
}
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
return 0;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 48d5df054cd0..9cb434c58075 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -466,6 +466,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
out_destroy_io:
nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -600,6 +602,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
return &ctrl->ctrl;
out_remove_admin_queue:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
out_free_queues:
kfree(ctrl->queues);
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
index 9b7c87102104..d3d49d22338b 100644
--- a/drivers/nvmem/apple-efuses.c
+++ b/drivers/nvmem/apple-efuses.c
@@ -36,6 +36,7 @@ static int apple_efuses_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_config config = {
.dev = &pdev->dev,
+ .add_legacy_fixed_of_cells = true,
.read_only = true,
.reg_read = apple_efuses_read,
.stride = sizeof(u32),
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index eaf6a3fe8ca6..bf42b7e826db 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -935,7 +935,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->nkeepout = config->nkeepout;
if (config->of_node)
nvmem->dev.of_node = config->of_node;
- else if (!config->no_of_node)
+ else
nvmem->dev.of_node = config->dev->of_node;
switch (config->id) {
@@ -997,9 +997,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- rval = nvmem_add_cells_from_legacy_of(nvmem);
- if (rval)
- goto err_remove_cells;
+ if (config->add_legacy_fixed_of_cells) {
+ rval = nvmem_add_cells_from_legacy_of(nvmem);
+ if (rval)
+ goto err_remove_cells;
+ }
rval = nvmem_add_cells_from_fixed_layout(nvmem);
if (rval)
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index c38d9c1c3f48..517d83e11af2 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -220,6 +220,7 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
+ .add_legacy_fixed_of_cells = true,
.read_only = false,
.word_size = 4,
.stride = 1,
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index e8b6f194925d..f1e202efaa49 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -615,6 +615,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.add_legacy_fixed_of_cells = true;
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index d6b533497ce1..b922df99f9bc 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -93,6 +93,7 @@ static int meson_efuse_probe(struct platform_device *pdev)
econfig->dev = dev;
econfig->name = dev_name(dev);
+ econfig->add_legacy_fixed_of_cells = true;
econfig->stride = 1;
econfig->word_size = 1;
econfig->reg_read = meson_efuse_read;
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index d6d7aeda31f9..3ff04d5ca8f8 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -210,6 +210,7 @@ static int meson_mx_efuse_probe(struct platform_device *pdev)
efuse->config.owner = THIS_MODULE;
efuse->config.dev = &pdev->dev;
efuse->config.priv = efuse;
+ efuse->config.add_legacy_fixed_of_cells = true;
efuse->config.stride = drvdata->word_size;
efuse->config.word_size = drvdata->word_size;
efuse->config.size = SZ_512;
diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c
index 436e0dc4f337..7cf81738a3e0 100644
--- a/drivers/nvmem/microchip-otpc.c
+++ b/drivers/nvmem/microchip-otpc.c
@@ -261,6 +261,7 @@ static int mchp_otpc_probe(struct platform_device *pdev)
return ret;
mchp_nvmem_config.dev = otpc->dev;
+ mchp_nvmem_config.add_legacy_fixed_of_cells = true;
mchp_nvmem_config.size = size;
mchp_nvmem_config.priv = otpc;
nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config);
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index b36cd0dcc8c7..87c94686cfd2 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -83,6 +83,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
pdata = device_get_match_data(dev);
+ econfig.add_legacy_fixed_of_cells = true;
econfig.stride = 1;
econfig.word_size = 1;
econfig.reg_read = mtk_reg_read;
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 588ab56d75b7..7b78f18f9545 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -13,8 +13,9 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
@@ -140,11 +141,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct mxs_data *data;
struct mxs_ocotp *otp;
- const struct of_device_id *match;
int ret;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data)
+ data = device_get_match_data(dev);
+ if (!data)
return -EINVAL;
otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
@@ -169,8 +169,6 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
if (ret)
return ret;
- data = match->data;
-
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 70f2d4f2efbf..9aa8f42faa4c 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -142,6 +142,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.name = "spmi_sdam";
sdam->sdam_config.id = NVMEM_DEVID_AUTO;
sdam->sdam_config.owner = THIS_MODULE;
+ sdam->sdam_config.add_legacy_fixed_of_cells = true;
sdam->sdam_config.stride = 1;
sdam->sdam_config.word_size = 1;
sdam->sdam_config.reg_read = sdam_read;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 14814cba2dd6..116a39e804c7 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -357,6 +357,7 @@ static int qfprom_probe(struct platform_device *pdev)
{
struct nvmem_config econfig = {
.name = "qfprom",
+ .add_legacy_fixed_of_cells = true,
.stride = 1,
.word_size = 1,
.id = NVMEM_DEVID_AUTO,
@@ -423,12 +424,12 @@ static int qfprom_probe(struct platform_device *pdev)
if (IS_ERR(priv->vcc))
return PTR_ERR(priv->vcc);
- priv->secclk = devm_clk_get(dev, "core");
+ priv->secclk = devm_clk_get_optional(dev, "core");
if (IS_ERR(priv->secclk))
return dev_err_probe(dev, PTR_ERR(priv->secclk), "Error getting clock\n");
- /* Only enable writing if we have SoC data. */
- if (priv->soc_data)
+ /* Only enable writing if we have SoC data and a valid clock */
+ if (priv->soc_data && priv->secclk)
econfig.reg_write = qfprom_reg_write;
}
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
index df6a1c594b78..9ecf3873cbb7 100644
--- a/drivers/nvmem/rave-sp-eeprom.c
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -328,6 +328,7 @@ static int rave_sp_eeprom_probe(struct platform_device *pdev)
of_property_read_string(np, "zii,eeprom-name", &config.name);
config.priv = eeprom;
config.dev = dev;
+ config.add_legacy_fixed_of_cells = true;
config.size = size;
config.reg_read = rave_sp_eeprom_reg_read;
config.reg_write = rave_sp_eeprom_reg_write;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 4004c5bece42..2b40978ddb18 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -205,6 +205,7 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
+ .add_legacy_fixed_of_cells = true,
.stride = 1,
.word_size = 1,
.read_only = true,
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index 2210da40dfbd..bff27011f4ff 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -247,6 +247,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
econfig.reg_read = sc27xx_efuse_read;
econfig.priv = efuse;
econfig.dev = &pdev->dev;
+ econfig.add_legacy_fixed_of_cells = true;
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
diff --git a/drivers/nvmem/sec-qfprom.c b/drivers/nvmem/sec-qfprom.c
index e48c2dc0c44b..19799b3fe00a 100644
--- a/drivers/nvmem/sec-qfprom.c
+++ b/drivers/nvmem/sec-qfprom.c
@@ -47,6 +47,7 @@ static int sec_qfprom_probe(struct platform_device *pdev)
{
struct nvmem_config econfig = {
.name = "sec-qfprom",
+ .add_legacy_fixed_of_cells = true,
.stride = 1,
.word_size = 1,
.id = NVMEM_DEVID_AUTO,
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 7e6e31db4baa..bb3105f3291f 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -408,6 +408,7 @@ static int sprd_efuse_probe(struct platform_device *pdev)
econfig.read_only = false;
econfig.name = "sprd-efuse";
econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.add_legacy_fixed_of_cells = true;
econfig.reg_read = sprd_efuse_read;
econfig.reg_write = sprd_efuse_write;
econfig.priv = efuse;
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
index 0f84044bd1ad..8a553b1799a8 100644
--- a/drivers/nvmem/stm32-romem.c
+++ b/drivers/nvmem/stm32-romem.c
@@ -10,7 +10,9 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/tee_drv.h>
#include "stm32-bsec-optee-ta.h"
@@ -207,11 +209,11 @@ static int stm32_romem_probe(struct platform_device *pdev)
priv->cfg.priv = priv;
priv->cfg.owner = THIS_MODULE;
priv->cfg.type = NVMEM_TYPE_OTP;
+ priv->cfg.add_legacy_fixed_of_cells = true;
priv->lower = 0;
- cfg = (const struct stm32_romem_cfg *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ cfg = device_get_match_data(dev);
if (!cfg) {
priv->cfg.read_only = true;
priv->cfg.size = resource_size(res);
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index f3a18aa0a6c7..38f5d9df39cd 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -145,6 +145,7 @@ disable_clk:
static struct nvmem_config sp_ocotp_nvmem_config = {
.name = "sp-ocotp",
+ .add_legacy_fixed_of_cells = true,
.read_only = true,
.word_size = 1,
.size = QAC628_OTP_SIZE,
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 5d364d85347f..ba14a76208ab 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -153,6 +153,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
nvmem_cfg->dev = dev;
nvmem_cfg->name = "sunxi-sid";
nvmem_cfg->type = NVMEM_TYPE_OTP;
+ nvmem_cfg->add_legacy_fixed_of_cells = true;
nvmem_cfg->read_only = true;
nvmem_cfg->size = cfg->size;
nvmem_cfg->word_size = 1;
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index 0a1dbb80537e..6ad3295d3195 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -52,6 +52,7 @@ static int uniphier_efuse_probe(struct platform_device *pdev)
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
+ econfig.add_legacy_fixed_of_cells = true;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index f49bb9a26d05..7f15aa89a9d0 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -58,6 +58,7 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
priv->dev = dev;
econfig.dev = dev;
+ econfig.add_legacy_fixed_of_cells = true;
econfig.reg_read = zynqmp_nvmem_read;
econfig.priv = priv;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index cf8dacf3e3b8..afdaefbd03f6 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1267,6 +1267,7 @@ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
+DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1356,6 +1357,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_leds, },
{ .parse_prop = parse_backlight, },
{ .parse_prop = parse_panel, },
+ { .parse_prop = parse_msi_parent, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 532d5cbbd344..bd388560ed59 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -32,13 +32,6 @@
#define PARPORT_MAX_TIMESLICE_VALUE ((unsigned long) HZ)
#define PARPORT_MIN_SPINTIME_VALUE 1
#define PARPORT_MAX_SPINTIME_VALUE 1000
-/*
- * PARPORT_BASE_* is the size of the known parts of the sysctl path
- * in dev/partport/%s/devices/%s. "dev/parport/"(12), "/devices/"(9
- * and null char(1).
- */
-#define PARPORT_BASE_PATH_SIZE 13
-#define PARPORT_BASE_DEVICES_PATH_SIZE 22
static int do_active_device(struct ctl_table *table, int write,
void *result, size_t *lenp, loff_t *ppos)
@@ -423,8 +416,7 @@ int parport_proc_register(struct parport *port)
{
struct parport_sysctl_table *t;
char *tmp_dir_path;
- size_t tmp_path_len, port_name_len;
- int bytes_written, i, err = 0;
+ int i, err = 0;
t = kmemdup(&parport_sysctl_template, sizeof(*t), GFP_KERNEL);
if (t == NULL)
@@ -440,35 +432,23 @@ int parport_proc_register(struct parport *port)
#endif /* IEEE 1284 support */
}
- port_name_len = strnlen(port->name, PARPORT_NAME_MAX_LEN);
- /*
- * Allocate a buffer for two paths: dev/parport/PORT and dev/parport/PORT/devices.
- * We calculate for the second as that will give us enough for the first.
- */
- tmp_path_len = PARPORT_BASE_DEVICES_PATH_SIZE + port_name_len;
- tmp_dir_path = kzalloc(tmp_path_len, GFP_KERNEL);
+ tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s/devices", port->name);
if (!tmp_dir_path) {
err = -ENOMEM;
goto exit_free_t;
}
- bytes_written = snprintf(tmp_dir_path, tmp_path_len,
- "dev/parport/%s/devices", port->name);
- if (tmp_path_len <= bytes_written) {
- err = -ENOENT;
- goto exit_free_tmp_dir_path;
- }
t->devices_header = register_sysctl(tmp_dir_path, t->device_dir);
if (t->devices_header == NULL) {
err = -ENOENT;
goto exit_free_tmp_dir_path;
}
- tmp_path_len = PARPORT_BASE_PATH_SIZE + port_name_len;
- bytes_written = snprintf(tmp_dir_path, tmp_path_len,
- "dev/parport/%s", port->name);
- if (tmp_path_len <= bytes_written) {
- err = -ENOENT;
+ kfree(tmp_dir_path);
+
+ tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s", port->name);
+ if (!tmp_dir_path) {
+ err = -ENOMEM;
goto unregister_devices_h;
}
@@ -508,34 +488,22 @@ int parport_proc_unregister(struct parport *port)
int parport_device_proc_register(struct pardevice *device)
{
- int bytes_written, err = 0;
struct parport_device_sysctl_table *t;
struct parport * port = device->port;
- size_t port_name_len, device_name_len, tmp_dir_path_len;
char *tmp_dir_path;
+ int err = 0;
t = kmemdup(&parport_device_sysctl_template, sizeof(*t), GFP_KERNEL);
if (t == NULL)
return -ENOMEM;
- port_name_len = strnlen(port->name, PARPORT_NAME_MAX_LEN);
- device_name_len = strnlen(device->name, PATH_MAX);
-
/* Allocate a buffer for two paths: dev/parport/PORT/devices/DEVICE. */
- tmp_dir_path_len = PARPORT_BASE_DEVICES_PATH_SIZE + port_name_len + device_name_len;
- tmp_dir_path = kzalloc(tmp_dir_path_len, GFP_KERNEL);
+ tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s/devices/%s", port->name, device->name);
if (!tmp_dir_path) {
err = -ENOMEM;
goto exit_free_t;
}
- bytes_written = snprintf(tmp_dir_path, tmp_dir_path_len, "dev/parport/%s/devices/%s",
- port->name, device->name);
- if (tmp_dir_path_len <= bytes_written) {
- err = -ENOENT;
- goto exit_free_path;
- }
-
t->vars[0].data = &device->timeslice;
t->sysctl_header = register_sysctl(tmp_dir_path, t->vars);
@@ -548,9 +516,6 @@ int parport_device_proc_register(struct pardevice *device)
kfree(tmp_dir_path);
return 0;
-exit_free_path:
- kfree(tmp_dir_path);
-
exit_free_t:
kfree(t);
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 2d46b1d4fd69..e21831d93305 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -361,7 +361,6 @@ static void free_port(struct device *dev)
kfree(port->probe_info[d].description);
}
- kfree(port->name);
kfree(port);
}
@@ -438,7 +437,6 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
struct parport *tmp;
int num;
int device;
- char *name;
int ret;
tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
@@ -450,13 +448,9 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
tmp->irq = irq;
tmp->dma = dma;
tmp->muxport = tmp->daisy = tmp->muxsel = -1;
- tmp->modes = 0;
INIT_LIST_HEAD(&tmp->list);
- tmp->devices = tmp->cad = NULL;
- tmp->flags = 0;
tmp->ops = ops;
tmp->physport = tmp;
- memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
rwlock_init(&tmp->cad_lock);
spin_lock_init(&tmp->waitlist_lock);
spin_lock_init(&tmp->pardevice_lock);
@@ -465,19 +459,15 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
sema_init(&tmp->ieee1284.irq, 0);
tmp->spintime = parport_default_spintime;
atomic_set(&tmp->ref_count, 1);
- INIT_LIST_HEAD(&tmp->full_list);
- name = kmalloc(PARPORT_NAME_MAX_LEN, GFP_KERNEL);
- if (!name) {
- kfree(tmp);
- return NULL;
- }
/* Search for the lowest free parport number. */
spin_lock(&full_list_lock);
- for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
+ num = 0;
+ list_for_each(l, &all_ports) {
struct parport *p = list_entry(l, struct parport, full_list);
- if (p->number != num)
+
+ if (p->number != num++)
break;
}
tmp->portnum = tmp->number = num;
@@ -487,19 +477,17 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
/*
* Now that the portnum is known finish doing the Init.
*/
- sprintf(name, "parport%d", tmp->portnum = tmp->number);
- tmp->name = name;
+ dev_set_name(&tmp->bus_dev, "parport%d", tmp->portnum);
tmp->bus_dev.bus = &parport_bus_type;
tmp->bus_dev.release = free_port;
- dev_set_name(&tmp->bus_dev, name);
tmp->bus_dev.type = &parport_device_type;
+ tmp->name = dev_name(&tmp->bus_dev);
+
for (device = 0; device < 5; device++)
/* assume the worst */
tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
- tmp->waithead = tmp->waittail = NULL;
-
ret = device_register(&tmp->bus_dev);
if (ret) {
put_device(&tmp->bus_dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8e6d1031510b..2321fdfefd7d 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -831,6 +831,19 @@ static const struct attribute_group pci_dev_config_attr_group = {
.is_bin_visible = pci_dev_config_attr_is_visible,
};
+/*
+ * llseek operation for mmappable PCI resources.
+ * May be left unused if the arch doesn't provide them.
+ */
+static __maybe_unused loff_t
+pci_llseek_resource(struct file *filep,
+ struct kobject *kobj __always_unused,
+ struct bin_attribute *attr,
+ loff_t offset, int whence)
+{
+ return fixed_size_llseek(filep, offset, whence, attr->size);
+}
+
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
@@ -963,6 +976,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.mode = 0600;
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
+ /* See pci_create_attr() for motivation */
+ b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
b->legacy_io->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_io);
@@ -977,6 +992,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
+ /* See pci_create_attr() for motivation */
+ b->legacy_mem->llseek = pci_llseek_resource;
b->legacy_mem->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
@@ -1195,8 +1212,15 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->mmap = pci_mmap_resource_uc;
}
}
- if (res_attr->mmap)
+ if (res_attr->mmap) {
res_attr->f_mapping = iomem_get_mapping;
+ /*
+ * generic_file_llseek() consults f_mapping->host to determine
+ * the file size. As iomem_inode knows nothing about the
+ * attribute, it's not going to work, so override it as well.
+ */
+ res_attr->llseek = pci_llseek_resource;
+ }
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = 0600;
res_attr->size = pci_resource_len(pdev, num);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 228652a59f27..8999fcebde6a 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -49,6 +49,15 @@ config PCIEAER_INJECT
gotten from:
https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+config PCIEAER_CXL
+ bool "PCI Express CXL RAS support"
+ default y
+ depends on PCIEAER && CXL_PCI
+ help
+ Enables CXL error handling.
+
+ If unsure, say Y.
+
#
# PCI Express ECRC
#
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index dcd35993004e..42a3bd35a3e1 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -760,9 +760,10 @@ int cper_severity_to_aer(int cper_severity)
}
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
+#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
- struct aer_capability_regs *aer)
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
+ struct aer_capability_regs *aer)
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
@@ -801,7 +802,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
}
-#endif
+EXPORT_SYMBOL_NS_GPL(pci_print_aer, CXL);
/**
* add_error_device - list device to be handled
@@ -934,14 +935,153 @@ static bool find_source_device(struct pci_dev *parent,
return true;
}
+#ifdef CONFIG_PCIEAER_CXL
+
/**
- * handle_error_source - handle logging error into an event log
+ * pci_aer_unmask_internal_errors - unmask internal errors
+ * @dev: pointer to the pcie_dev data structure
+ *
+ * Unmasks internal errors in the Uncorrectable and Correctable Error
+ * Mask registers.
+ *
+ * Note: AER must be enabled and supported by the device which must be
+ * checked in advance, e.g. with pcie_aer_is_native().
+ */
+static void pci_aer_unmask_internal_errors(struct pci_dev *dev)
+{
+ int aer = dev->aer_cap;
+ u32 mask;
+
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
+ mask &= ~PCI_ERR_UNC_INTN;
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, mask);
+
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
+ mask &= ~PCI_ERR_COR_INTERNAL;
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, mask);
+}
+
+static bool is_cxl_mem_dev(struct pci_dev *dev)
+{
+ /*
+ * The capability, status, and control fields in Device 0,
+ * Function 0 DVSEC control the CXL functionality of the
+ * entire device (CXL 3.0, 8.1.3).
+ */
+ if (dev->devfn != PCI_DEVFN(0, 0))
+ return false;
+
+ /*
+ * CXL Memory Devices must have the 502h class code set (CXL
+ * 3.0, 8.1.12.1).
+ */
+ if ((dev->class >> 8) != PCI_CLASS_MEMORY_CXL)
+ return false;
+
+ return true;
+}
+
+static bool cxl_error_is_native(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+
+ return (pcie_ports_native || host->native_aer);
+}
+
+static bool is_internal_error(struct aer_err_info *info)
+{
+ if (info->severity == AER_CORRECTABLE)
+ return info->status & PCI_ERR_COR_INTERNAL;
+
+ return info->status & PCI_ERR_UNC_INTN;
+}
+
+static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *info = (struct aer_err_info *)data;
+ const struct pci_error_handlers *err_handler;
+
+ if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
+ return 0;
+
+ /* protect dev->driver */
+ device_lock(&dev->dev);
+
+ err_handler = dev->driver ? dev->driver->err_handler : NULL;
+ if (!err_handler)
+ goto out;
+
+ if (info->severity == AER_CORRECTABLE) {
+ if (err_handler->cor_error_detected)
+ err_handler->cor_error_detected(dev);
+ } else if (err_handler->error_detected) {
+ if (info->severity == AER_NONFATAL)
+ err_handler->error_detected(dev, pci_channel_io_normal);
+ else if (info->severity == AER_FATAL)
+ err_handler->error_detected(dev, pci_channel_io_frozen);
+ }
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static void cxl_rch_handle_error(struct pci_dev *dev, struct aer_err_info *info)
+{
+ /*
+ * Internal errors of an RCEC indicate an AER error in an
+ * RCH's downstream port. Check and handle them in the CXL.mem
+ * device driver.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC &&
+ is_internal_error(info))
+ pcie_walk_rcec(dev, cxl_rch_handle_error_iter, info);
+}
+
+static int handles_cxl_error_iter(struct pci_dev *dev, void *data)
+{
+ bool *handles_cxl = data;
+
+ if (!*handles_cxl)
+ *handles_cxl = is_cxl_mem_dev(dev) && cxl_error_is_native(dev);
+
+ /* Non-zero terminates iteration */
+ return *handles_cxl;
+}
+
+static bool handles_cxl_errors(struct pci_dev *rcec)
+{
+ bool handles_cxl = false;
+
+ if (pci_pcie_type(rcec) == PCI_EXP_TYPE_RC_EC &&
+ pcie_aer_is_native(rcec))
+ pcie_walk_rcec(rcec, handles_cxl_error_iter, &handles_cxl);
+
+ return handles_cxl;
+}
+
+static void cxl_rch_enable_rcec(struct pci_dev *rcec)
+{
+ if (!handles_cxl_errors(rcec))
+ return;
+
+ pci_aer_unmask_internal_errors(rcec);
+ pci_info(rcec, "CXL: Internal errors unmasked");
+}
+
+#else
+static inline void cxl_rch_enable_rcec(struct pci_dev *dev) { }
+static inline void cxl_rch_handle_error(struct pci_dev *dev,
+ struct aer_err_info *info) { }
+#endif
+
+/**
+ * pci_aer_handle_error - handle logging error into an event log
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+static void pci_aer_handle_error(struct pci_dev *dev, struct aer_err_info *info)
{
int aer = dev->aer_cap;
@@ -965,6 +1105,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
+}
+
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+{
+ cxl_rch_handle_error(dev, info);
+ pci_aer_handle_error(dev, info);
pci_dev_put(dev);
}
@@ -997,7 +1143,7 @@ static void aer_recover_work_func(struct work_struct *work)
PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
continue;
}
- cper_print_aer(pdev, entry.severity, entry.regs);
+ pci_print_aer(pdev, entry.severity, entry.regs);
/*
* Memory for aer_capability_regs(entry.regs) is being allocated from the
* ghes_estatus_pool to protect it from overwriting when multiple sections
@@ -1348,6 +1494,7 @@ static int aer_probe(struct pcie_device *dev)
return status;
}
+ cxl_rch_enable_rcec(port);
aer_enable_rootport(rpc);
pci_info(port, "enabled with IRQ %d\n", dev->irq);
return 0;
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 5658745c398f..b33be1e63c98 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
skt->thread = NULL;
complete(&skt->thread_done);
+ put_device(&skt->dev);
return 0;
}
ret = pccard_sysfs_add_socket(&skt->dev);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index d500e5dbbc3f..b4b8363d1de2 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
/* by default don't allow DMA */
p_dev->dma_mask = 0;
p_dev->dev.dma_mask = &p_dev->dma_mask;
- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
- if (!dev_name(&p_dev->dev))
- goto err_free;
p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
if (!p_dev->devname)
goto err_free;
@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
pcmcia_device_query(p_dev);
- if (device_register(&p_dev->dev))
- goto err_unreg;
+ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+ if (device_register(&p_dev->dev)) {
+ mutex_lock(&s->ops_mutex);
+ list_del(&p_dev->socket_device_list);
+ s->device_count--;
+ mutex_unlock(&s->ops_mutex);
+ put_device(&p_dev->dev);
+ return NULL;
+ }
return p_dev;
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index d78091e79a0f..e9e31c638a67 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -684,7 +684,7 @@ EXPORT_SYMBOL(pcmcia_request_io);
* pcmcia_request_irq() is a wrapper around request_irq() which allows
* the PCMCIA core to clean up the registration in pcmcia_disable_device().
* Drivers are free to use request_irq() directly, but then they need to
- * call free_irq() themselfves, too. Also, only %IRQF_SHARED capable IRQ
+ * call free_irq() themselves, too. Also, only %IRQF_SHARED capable IRQ
* handlers are allowed.
*/
int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 1a0e3f098759..5ef888688e23 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -435,7 +435,7 @@ static int __init init_tcic(void)
}
/* Set up polling */
- timer_setup(&poll_timer, &tcic_timer, 0);
+ timer_setup(&poll_timer, tcic_timer, 0);
/* Build interrupt mask */
printk(KERN_CONT ", %d sockets\n", sockets);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 96c7f670c8f0..16acd4dcdb96 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -22,7 +22,7 @@
#include <asm/errata_list.h>
#include <asm/sbi.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
@@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- on_each_cpu_mask(mm_cpumask(event->owner->mm),
- pmu_sbi_set_scounteren, (void *)event, 1);
+ pmu_sbi_set_scounteren((void *)event);
}
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- on_each_cpu_mask(mm_cpumask(event->owner->mm),
- pmu_sbi_reset_scounteren, (void *)event, 1);
+ pmu_sbi_reset_scounteren((void *)event);
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
@@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* Firmware counter don't support overflow yet */
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
+ if (fidx == RISCV_MAX_COUNTERS) {
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ return IRQ_NONE;
+ }
+
event = cpu_hw_evt->events[fidx];
if (!event) {
csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index d1670bbe6d6b..787354b849c7 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -18,7 +18,7 @@ config GENERIC_PHY
config GENERIC_PHY_MIPI_DPHY
bool
- depends on GENERIC_PHY
+ select GENERIC_PHY
help
Generic MIPI D-PHY support.
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index ec551464dd4f..e53a9a9317bc 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -782,7 +782,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
for (i = 0; i < data->cfg->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
- char name[16];
+ char name[32];
if (data->cfg->missing_phys & BIT(i))
continue;
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 69584b685edb..2c8b1b7dda5b 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -16,10 +16,11 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/property.h>
#include <linux/slab.h>
#define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f
@@ -189,7 +190,6 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
- const struct of_device_id *of_id;
struct phy_provider *phy_provider;
struct device_node *syscon_np;
struct bcm_ns_usb3 *usb3;
@@ -203,10 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
usb3->dev = dev;
usb3->mdiodev = mdiodev;
- of_id = of_match_device(bcm_ns_usb3_id_table, dev);
- if (!of_id)
- return -EINVAL;
- usb3->family = (uintptr_t)of_id->data;
+ usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);
diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
index 78ef6ae72a9a..f26bf630da2c 100644
--- a/drivers/phy/marvell/phy-berlin-usb.c
+++ b/drivers/phy/marvell/phy-berlin-usb.c
@@ -8,9 +8,10 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#define USB_PHY_PLL 0x04
@@ -162,8 +163,6 @@ MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
static int phy_berlin_usb_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(phy_berlin_usb_of_match, &pdev->dev);
struct phy_berlin_usb_priv *priv;
struct phy *phy;
struct phy_provider *phy_provider;
@@ -180,7 +179,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
if (IS_ERR(priv->rst_ctrl))
return PTR_ERR(priv->rst_ctrl);
- priv->pll_divider = *((u32 *)match->data);
+ priv->pll_divider = *((u32 *)device_get_match_data(&pdev->dev));
phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 74333e814221..7bbf729a7c90 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/iio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -612,14 +611,8 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct usb_otg *otg;
- const struct of_device_id *of_id;
int error;
- of_id = of_match_device(of_match_ptr(cpcap_usb_phy_id_table),
- &pdev->dev);
- if (!of_id)
- return -EINVAL;
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index 52c275fbb2a1..a623f092b11f 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -24,23 +24,73 @@
#define EUSB2_FORCE_VAL_5 0xeD
#define V_CLK_19P2M_EN BIT(6)
+#define EUSB2_TUNE_USB2_CROSSOVER 0x50
#define EUSB2_TUNE_IUSB2 0x51
+#define EUSB2_TUNE_RES_FSDIF 0x52
+#define EUSB2_TUNE_HSDISC 0x53
#define EUSB2_TUNE_SQUELCH_U 0x54
+#define EUSB2_TUNE_USB2_SLEW 0x55
+#define EUSB2_TUNE_USB2_EQU 0x56
#define EUSB2_TUNE_USB2_PREEM 0x57
+#define EUSB2_TUNE_USB2_HS_COMP_CUR 0x58
+#define EUSB2_TUNE_EUSB_SLEW 0x59
+#define EUSB2_TUNE_EUSB_EQU 0x5A
+#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
+#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
{ \
- .offset = o, \
+ .reg = r, \
.val = v, \
}
-struct eusb2_repeater_init_tbl {
- unsigned int offset;
- unsigned int val;
+enum reg_fields {
+ F_TUNE_EUSB_HS_COMP_CUR,
+ F_TUNE_EUSB_EQU,
+ F_TUNE_EUSB_SLEW,
+ F_TUNE_USB2_HS_COMP_CUR,
+ F_TUNE_USB2_PREEM,
+ F_TUNE_USB2_EQU,
+ F_TUNE_USB2_SLEW,
+ F_TUNE_SQUELCH_U,
+ F_TUNE_HSDISC,
+ F_TUNE_RES_FSDIF,
+ F_TUNE_IUSB2,
+ F_TUNE_USB2_CROSSOVER,
+ F_NUM_TUNE_FIELDS,
+
+ F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
+ F_FORCE_EN_5,
+
+ F_EN_CTL1,
+
+ F_RPTR_STATUS,
+ F_NUM_FIELDS,
+};
+
+static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
+ [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
+ [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
+ [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
+ [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
+ [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
+ [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
+ [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
+ [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
+ [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
+ [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
+ [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
+ [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
+
+ [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
+ [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
+
+ [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
+
+ [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
};
struct eusb2_repeater_cfg {
- const struct eusb2_repeater_init_tbl *init_tbl;
+ const u32 *init_tbl;
int init_tbl_num;
const char * const *vreg_list;
int num_vregs;
@@ -48,11 +98,10 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap *regmap;
+ struct regmap_field *regs[F_NUM_FIELDS];
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
- u16 base;
enum phy_mode mode;
};
@@ -60,10 +109,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
+static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
+ [F_TUNE_IUSB2] = 0x8,
+ [F_TUNE_SQUELCH_U] = 0x3,
+ [F_TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -91,9 +140,11 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
+ struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
- const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
- int num = rptr->cfg->init_tbl_num;
+ struct device_node *np = rptr->dev->of_node;
+ u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
+ u8 override;
u32 val;
int ret;
int i;
@@ -102,17 +153,34 @@ static int eusb2_repeater_init(struct phy *phy)
if (ret)
return ret;
- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
- EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
- for (i = 0; i < num; i++)
- regmap_update_bits(rptr->regmap,
- rptr->base + init_tbl[i].offset,
- init_tbl[i].val, init_tbl[i].val);
+ for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
+ if (init_tbl[i]) {
+ regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
+ } else {
+ /* Write 0 if there's no value set */
+ u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
+
+ regmap_field_update_bits(rptr->regs[i], mask, 0);
+ }
+ }
+ memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
+ init_tbl[F_TUNE_IUSB2] = override;
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
+ init_tbl[F_TUNE_HSDISC] = override;
- ret = regmap_read_poll_timeout(rptr->regmap,
- rptr->base + EUSB2_RPTR_STATUS, val,
- val & RPTR_OK, 10, 5);
+ if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
+ init_tbl[F_TUNE_USB2_PREEM] = override;
+
+ for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
+ regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
+
+ ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
+ val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -131,10 +199,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
+ F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
+ V_CLK_19P2M_EN, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -143,10 +211,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
- F_CLK_19P2M_EN, 0);
- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
- V_CLK_19P2M_EN, 0);
+ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
+ F_CLK_19P2M_EN, 0);
+ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
+ V_CLK_19P2M_EN, 0);
break;
default:
return -EINVAL;
@@ -175,8 +243,9 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int i, ret;
u32 res;
- int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -189,15 +258,22 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- rptr->regmap = dev_get_regmap(dev->parent, NULL);
- if (!rptr->regmap)
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- rptr->base = res;
+ for (i = 0; i < F_NUM_FIELDS; i++)
+ eusb2_repeater_tune_reg_fields[i].reg += res;
+
+ ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
+ eusb2_repeater_tune_reg_fields,
+ F_NUM_FIELDS);
+ if (ret)
+ return ret;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 5cb7e79b99b3..c2590579190a 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -82,6 +82,50 @@ struct m31_priv_data {
unsigned int nregs;
};
+static const struct m31_phy_regs m31_ipq5018_regs[] = {
+ {
+ .off = USB_PHY_CFG0,
+ .val = UTMI_PHY_OVERRIDE_EN
+ },
+ {
+ .off = USB_PHY_UTMI_CTRL5,
+ .val = POR_EN,
+ .delay = 15
+ },
+ {
+ .off = USB_PHY_FSEL_SEL,
+ .val = FREQ_SEL
+ },
+ {
+ .off = USB_PHY_HS_PHY_CTRL_COMMON0,
+ .val = COMMONONN | FSEL | RETENABLEN
+ },
+ {
+ .off = USB_PHY_REFCLK_CTRL,
+ .val = CLKCORE
+ },
+ {
+ .off = USB_PHY_UTMI_CTRL5,
+ .val = POR_EN
+ },
+ {
+ .off = USB_PHY_HS_PHY_CTRL2,
+ .val = USB2_SUSPEND_N_SEL | USB2_SUSPEND_N | USB2_UTMI_CLK_EN
+ },
+ {
+ .off = USB_PHY_UTMI_CTRL5,
+ .val = 0x0
+ },
+ {
+ .off = USB_PHY_HS_PHY_CTRL2,
+ .val = USB2_SUSPEND_N | USB2_UTMI_CLK_EN
+ },
+ {
+ .off = USB_PHY_CFG0,
+ .val = 0x0
+ },
+};
+
static struct m31_phy_regs m31_ipq5332_regs[] = {
{
USB_PHY_CFG0,
@@ -241,7 +285,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qphy->clk))
return dev_err_probe(dev, PTR_ERR(qphy->clk),
- "failed to get clk\n");
+ "failed to get clk\n");
data = of_device_get_match_data(dev);
qphy->regs = data->regs;
@@ -251,12 +295,12 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->phy = devm_phy_create(dev, NULL, &m31usb_phy_gen_ops);
if (IS_ERR(qphy->phy))
return dev_err_probe(dev, PTR_ERR(qphy->phy),
- "failed to create phy\n");
+ "failed to create phy\n");
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->vreg),
- "failed to get vreg\n");
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
@@ -267,6 +311,12 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static const struct m31_priv_data m31_ipq5018_data = {
+ .ulpi_mode = false,
+ .regs = m31_ipq5018_regs,
+ .nregs = ARRAY_SIZE(m31_ipq5018_regs),
+};
+
static const struct m31_priv_data m31_ipq5332_data = {
.ulpi_mode = false,
.regs = m31_ipq5332_regs,
@@ -274,6 +324,7 @@ static const struct m31_priv_data m31_ipq5332_data = {
};
static const struct of_device_id m31usb_phy_id_table[] = {
+ { .compatible = "qcom,ipq5018-usb-hsphy", .data = &m31_ipq5018_data },
{ .compatible = "qcom,ipq5332-usb-hsphy", .data = &m31_ipq5332_data },
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 5e6fc8103e9d..9c87845c78ec 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -194,14 +194,14 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
};
static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
- [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
- [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
- [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+ [QPHY_SW_RESET] = QPHY_V6_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_POWER_DOWN_CONTROL,
/* In PCS_USB */
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
[QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
[QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
@@ -845,28 +845,28 @@ static const struct qmp_phy_init_tbl sm8550_usb3_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RX_SIGDET_LVL, 0x99),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
};
static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
};
static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index a63ca7424974..b64598ac59f4 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -2147,6 +2147,34 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl[]
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
};
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ep_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BG_TIMER, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x27),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x28),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ep_pcs_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_INSIG_MX_CTRL7, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_INSIG_SW_CTRL7, 0x00),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
@@ -3043,6 +3071,15 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
},
+ .tbls_ep = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x2_pcie_ep_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_ep_serdes_alt_tbl),
+ .pcs_misc = sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
+ .pcs = sa8775p_qmp_gen4x2_pcie_ep_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_ep_pcs_alt_tbl),
+ },
+
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v4.h
new file mode 100644
index 000000000000..e256a089f228
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-misc-v4.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_MISC_V4_H_
+#define QCOM_PHY_QMP_PCS_MISC_V4_H_
+
+/* Only for QMP V4 PHY - PCS_MISC registers */
+#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
+#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
+#define QPHY_V4_PCS_MISC_PCS_MISC_CONFIG1 0x08
+#define QPHY_V4_PCS_MISC_CLAMP_ENABLE 0x0c
+#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
+#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
index c38530d6776b..df670143feb1 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
@@ -6,27 +6,12 @@
#ifndef QCOM_PHY_QMP_PCS_USB_V6_H_
#define QCOM_PHY_QMP_PCS_USB_V6_H_
-/* Only for QMP V6 PHY - USB3 have different offsets than V5 */
-#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG1 0xc4
-#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG2 0xc8
-#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3 0xcc
-#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6 0xd8
-#define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
-#define QPHY_USB_V6_PCS_POWER_STATE_CONFIG1 0x90
-#define QPHY_USB_V6_PCS_RX_SIGDET_LVL 0x188
-#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
-#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
-#define QPHY_USB_V6_PCS_CDR_RESET_TIME 0x1b0
-#define QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG1 0x1c0
-#define QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG2 0x1c4
-#define QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG 0x1d0
-#define QPHY_USB_V6_PCS_EQ_CONFIG1 0x1dc
-#define QPHY_USB_V6_PCS_EQ_CONFIG5 0x1ec
-
-#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x00
-#define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
-#define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
-#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
-#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x44
+#define QPHY_V6_PCS_USB3_POWER_STATE_CONFIG1 0x00
+#define QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x08
+#define QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x14
+#define QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
+#define QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
+#define QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
+#define QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x44
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
index f0754b6f9e3a..d3ad5b7f5425 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
@@ -6,6 +6,8 @@
#ifndef QCOM_PHY_QMP_PCS_V5_20_H_
#define QCOM_PHY_QMP_PCS_V5_20_H_
+#define QPHY_V5_20_PCS_INSIG_SW_CTRL7 0x060
+#define QPHY_V5_20_PCS_INSIG_MX_CTRL7 0x07c
#define QPHY_V5_20_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V5_20_PCS_RX_SIGDET_LVL 0x188
#define QPHY_V5_20_PCS_EQ_CONFIG2 0x1d8
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
index 18c4a3abe590..08299d2b78f0 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
@@ -7,10 +7,26 @@
#define QCOM_PHY_QMP_PCS_V6_H_
/* Only for QMP V6 PHY - USB/PCIe PCS registers */
-#define QPHY_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
+#define QPHY_V6_PCS_SW_RESET 0x000
+#define QPHY_V6_PCS_PCS_STATUS1 0x014
+#define QPHY_V6_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V6_PCS_START_CONTROL 0x044
+#define QPHY_V6_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V6_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V6_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V6_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V6_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V6_PCS_REFGEN_REQ_CONFIG1 0x0dc
#define QPHY_V6_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
#define QPHY_V6_PCS_RATE_SLEW_CNTRL1 0x198
-#define QPHY_V6_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V6_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V6_PCS_ALIGN_DETECT_CONFIG2 0x1c4
#define QPHY_V6_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V6_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V6_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_PCS_EQ_CONFIG5 0x1ec
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 3927eba8e468..514fa14df634 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -177,6 +177,111 @@ static const struct qmp_phy_init_tbl msm8996_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
};
+static const struct qmp_phy_init_tbl sc7280_ufsphy_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sc7280_ufsphy_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sc7280_ufsphy_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_PLL_CNTL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03),
+};
+
+static const struct qmp_phy_init_tbl sc7280_ufsphy_hs_g4_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x2c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x3c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x0f),
+};
+
static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
@@ -888,6 +993,40 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.regs = ufsphy_v5_regs_layout,
};
+static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_ufs_offsets,
+
+ .tbls = {
+ .serdes = sm8150_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
+ .tx = sc7280_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sc7280_ufsphy_tx),
+ .rx = sc7280_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sc7280_ufsphy_rx),
+ .pcs = sc7280_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sc7280_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8150_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8250_ufsphy_hs_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_tx),
+ .rx = sc7280_ufsphy_hs_g4_rx,
+ .rx_num = ARRAY_SIZE(sc7280_ufsphy_hs_g4_rx),
+ .pcs = sm8150_ufsphy_hs_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ },
+ .clk_list = sm8450_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v4_regs_layout,
+};
+
static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.lanes = 2,
@@ -1649,6 +1788,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
.compatible = "qcom,sa8775p-qmp-ufs-phy",
.data = &sa8775p_ufsphy_cfg,
}, {
+ .compatible = "qcom,sc7280-qmp-ufs-phy",
+ .data = &sc7280_ufsphy_cfg,
+ }, {
.compatible = "qcom,sc8180x-qmp-ufs-phy",
.data = &sm8150_ufsphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index c69577601ae0..02f156298e77 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -21,6 +21,7 @@
#include "phy-qcom-qmp.h"
#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-misc-v4.h"
#include "phy-qcom-qmp-pcs-usb-v4.h"
#include "phy-qcom-qmp-pcs-usb-v5.h"
@@ -94,6 +95,7 @@ enum qphy_reg_layout {
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_POWER_DOWN_CONTROL,
+ QPHY_PCS_MISC_CLAMP_ENABLE,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -114,6 +116,16 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+ [QPHY_PCS_MISC_CLAMP_ENABLE] = QPHY_V3_PCS_MISC_CLAMP_ENABLE,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout_qcm2290[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
};
static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
@@ -125,6 +137,7 @@ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
/* In PCS_USB */
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_MISC_CLAMP_ENABLE] = QPHY_V4_PCS_MISC_CLAMP_ENABLE,
};
static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
@@ -1178,12 +1191,6 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *pcs_usb_tbl;
int pcs_usb_tbl_num;
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
- /* resets to be requested */
- const char * const *reset_list;
- int num_resets;
/* regulators to be requested */
const char * const *vreg_list;
int num_vregs;
@@ -1214,6 +1221,8 @@ struct qmp_usb {
struct clk *pipe_clk;
struct clk_bulk_data *clks;
+ int num_clks;
+ int num_resets;
struct reset_control_bulk_data *resets;
struct regulator_bulk_data *vregs;
@@ -1249,37 +1258,16 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
}
/* list of clocks required by phy */
-static const char * const msm8996_phy_clk_l[] = {
- "aux", "cfg_ahb", "ref",
-};
-
-static const char * const qmp_v3_phy_clk_l[] = {
+static const char * const qmp_usb_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
-static const char * const qmp_v4_phy_clk_l[] = {
- "aux", "ref", "com_aux",
-};
-
-static const char * const qmp_v4_ref_phy_clk_l[] = {
- "aux", "ref_clk_src", "ref", "com_aux",
-};
-
-/* usb3 phy on sdx55 doesn't have com_aux clock */
-static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
- "aux", "cfg_ahb", "ref"
-};
-
-static const char * const qcm2290_usb3phy_clk_l[] = {
- "cfg_ahb", "ref", "com_aux",
-};
-
/* list of resets */
-static const char * const msm8996_usb3phy_reset_l[] = {
+static const char * const usb3phy_legacy_reset_l[] = {
"phy", "common",
};
-static const char * const qcm2290_usb3phy_reset_l[] = {
+static const char * const usb3phy_reset_l[] = {
"phy_phy", "phy",
};
@@ -1298,6 +1286,13 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
.serdes = 0,
+ .pcs = 0x600,
+ .tx = 0x200,
+ .rx = 0x400,
+};
+
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_qcm2290 = {
+ .serdes = 0x0,
.pcs = 0xc00,
.pcs_misc = 0xa00,
.tx = 0x200,
@@ -1306,6 +1301,14 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
.rx2 = 0x800,
};
+static const struct qmp_usb_offsets qmp_usb_offsets_v4 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .pcs_usb = 0x0e00,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
.serdes = 0,
.pcs = 0x0200,
@@ -1317,6 +1320,8 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v3,
+
.serdes_tbl = ipq8074_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
.tx_tbl = msm8996_usb3_tx_tbl,
@@ -1325,10 +1330,6 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
.pcs_tbl = ipq8074_usb3_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
@@ -1347,10 +1348,6 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
.rx_tbl_num = ARRAY_SIZE(ipq9574_usb3_rx_tbl),
.pcs_tbl = ipq9574_usb3_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(ipq9574_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = qcm2290_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
@@ -1359,6 +1356,8 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v3,
+
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
.tx_tbl = msm8996_usb3_tx_tbl,
@@ -1367,10 +1366,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
.pcs_tbl = msm8996_usb3_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v2_usb3phy_regs_layout,
@@ -1391,10 +1386,6 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = qcm2290_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v5_usb3phy_regs_layout,
@@ -1415,10 +1406,6 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sc8280xp_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
- .reset_list = qcm2290_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v5_usb3phy_regs_layout,
@@ -1427,6 +1414,8 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v3,
+
.serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
.tx_tbl = qmp_v3_usb3_uniphy_tx_tbl,
@@ -1435,10 +1424,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_rx_tbl),
.pcs_tbl = qmp_v3_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
@@ -1449,6 +1434,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.lanes = 2,
+ .offsets = &qmp_usb_offsets_v3_qcm2290,
+
.serdes_tbl = msm8998_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
.tx_tbl = msm8998_usb3_tx_tbl,
@@ -1457,10 +1444,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
.pcs_tbl = msm8998_usb3_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
@@ -1469,6 +1452,8 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v4,
+
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
.tx_tbl = sm8150_usb3_uniphy_tx_tbl,
@@ -1479,10 +1464,6 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sm8150_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_ref_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v4_usb3phy_regs_layout,
@@ -1494,6 +1475,8 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v4,
+
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
.tx_tbl = sm8250_usb3_uniphy_tx_tbl,
@@ -1504,10 +1487,6 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_ref_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v4_usb3phy_regs_layout,
@@ -1519,6 +1498,8 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v4,
+
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
.tx_tbl = sdx55_usb3_uniphy_tx_tbl,
@@ -1529,10 +1510,6 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_sdx55_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v4_usb3phy_regs_layout,
@@ -1544,6 +1521,8 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v5,
+
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
.tx_tbl = sdx65_usb3_uniphy_tx_tbl,
@@ -1554,10 +1533,6 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_sdx55_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v5_usb3phy_regs_layout,
@@ -1569,6 +1544,8 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_usb_offsets_v5,
+
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
.tx_tbl = sm8350_usb3_uniphy_tx_tbl,
@@ -1579,10 +1556,6 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
.pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl),
- .clk_list = qmp_v4_ref_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v5_usb3phy_regs_layout,
@@ -1594,7 +1567,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.lanes = 2,
- .offsets = &qmp_usb_offsets_v3,
+ .offsets = &qmp_usb_offsets_v3_qcm2290,
.serdes_tbl = qcm2290_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
@@ -1604,13 +1577,9 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
.pcs_tbl = qcm2290_usb3_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
- .clk_list = qcm2290_usb3phy_clk_l,
- .num_clks = ARRAY_SIZE(qcm2290_usb3phy_clk_l),
- .reset_list = qcm2290_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
+ .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
};
static void qmp_usb_configure_lane(void __iomem *base,
@@ -1664,19 +1633,19 @@ static int qmp_usb_init(struct phy *phy)
return ret;
}
- ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ ret = reset_control_bulk_assert(qmp->num_resets, qmp->resets);
if (ret) {
dev_err(qmp->dev, "reset assert failed\n");
goto err_disable_regulators;
}
- ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ ret = reset_control_bulk_deassert(qmp->num_resets, qmp->resets);
if (ret) {
dev_err(qmp->dev, "reset deassert failed\n");
goto err_disable_regulators;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
goto err_assert_reset;
@@ -1685,7 +1654,7 @@ static int qmp_usb_init(struct phy *phy)
return 0;
err_assert_reset:
- reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -1697,9 +1666,9 @@ static int qmp_usb_exit(struct phy *phy)
struct qmp_usb *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
- reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -1852,8 +1821,8 @@ static void qmp_usb_enable_autonomous_mode(struct qmp_usb *qmp)
qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
/* Enable i/o clamp_n for autonomous mode */
- if (pcs_misc)
- qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+ if (pcs_misc && cfg->regs[QPHY_PCS_MISC_CLAMP_ENABLE])
+ qphy_clrbits(pcs_misc, cfg->regs[QPHY_PCS_MISC_CLAMP_ENABLE], CLAMP_EN);
}
static void qmp_usb_disable_autonomous_mode(struct qmp_usb *qmp)
@@ -1863,8 +1832,8 @@ static void qmp_usb_disable_autonomous_mode(struct qmp_usb *qmp)
void __iomem *pcs_misc = qmp->pcs_misc;
/* Disable i/o clamp_n on resume for normal mode */
- if (pcs_misc)
- qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+ if (pcs_misc && cfg->regs[QPHY_PCS_MISC_CLAMP_ENABLE])
+ qphy_setbits(pcs_misc, cfg->regs[QPHY_PCS_MISC_CLAMP_ENABLE], CLAMP_EN);
qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
@@ -1877,7 +1846,6 @@ static void qmp_usb_disable_autonomous_mode(struct qmp_usb *qmp)
static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev)
{
struct qmp_usb *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
@@ -1889,7 +1857,7 @@ static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev)
qmp_usb_enable_autonomous_mode(qmp);
clk_disable_unprepare(qmp->pipe_clk);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return 0;
}
@@ -1897,7 +1865,6 @@ static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev)
static int __maybe_unused qmp_usb_runtime_resume(struct device *dev)
{
struct qmp_usb *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
int ret = 0;
dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
@@ -1907,14 +1874,14 @@ static int __maybe_unused qmp_usb_runtime_resume(struct device *dev)
return 0;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
return ret;
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return ret;
}
@@ -1945,22 +1912,25 @@ static int qmp_usb_vreg_init(struct qmp_usb *qmp)
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qmp_usb_reset_init(struct qmp_usb *qmp)
+static int qmp_usb_reset_init(struct qmp_usb *qmp,
+ const char *const *reset_list,
+ int num_resets)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
int i;
int ret;
- qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ qmp->resets = devm_kcalloc(dev, num_resets,
sizeof(*qmp->resets), GFP_KERNEL);
if (!qmp->resets)
return -ENOMEM;
- for (i = 0; i < cfg->num_resets; i++)
- qmp->resets[i].id = cfg->reset_list[i];
+ for (i = 0; i < num_resets; i++)
+ qmp->resets[i].id = reset_list[i];
+
+ qmp->num_resets = num_resets;
- ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ ret = devm_reset_control_bulk_get_exclusive(dev, num_resets, qmp->resets);
if (ret)
return dev_err_probe(dev, ret, "failed to get resets\n");
@@ -1969,9 +1939,8 @@ static int qmp_usb_reset_init(struct qmp_usb *qmp)
static int qmp_usb_clk_init(struct qmp_usb *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
+ int num = ARRAY_SIZE(qmp_usb_phy_clk_l);
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -1979,9 +1948,11 @@ static int qmp_usb_clk_init(struct qmp_usb *qmp)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->clks[i].id = qmp_usb_phy_clk_l[i];
+
+ qmp->num_clks = num;
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
}
static void phy_clk_release_provider(void *res)
@@ -2061,6 +2032,7 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
bool exclusive = true;
+ int ret;
qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qmp->serdes))
@@ -2121,6 +2093,17 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
"failed to get pipe clock\n");
}
+ ret = devm_clk_bulk_get_all(qmp->dev, &qmp->clks);
+ if (ret < 0)
+ return ret;
+
+ qmp->num_clks = ret;
+
+ ret = qmp_usb_reset_init(qmp, usb3phy_legacy_reset_l,
+ ARRAY_SIZE(usb3phy_legacy_reset_l));
+ if (ret)
+ return ret;
+
return 0;
}
@@ -2131,6 +2114,7 @@ static int qmp_usb_parse_dt(struct qmp_usb *qmp)
const struct qmp_usb_offsets *offs = cfg->offsets;
struct device *dev = qmp->dev;
void __iomem *base;
+ int ret;
if (!offs)
return -EINVAL;
@@ -2141,8 +2125,10 @@ static int qmp_usb_parse_dt(struct qmp_usb *qmp)
qmp->serdes = base + offs->serdes;
qmp->pcs = base + offs->pcs;
- qmp->pcs_misc = base + offs->pcs_misc;
- qmp->pcs_usb = base + offs->pcs_usb;
+ if (offs->pcs_usb)
+ qmp->pcs_usb = base + offs->pcs_usb;
+ if (offs->pcs_misc)
+ qmp->pcs_misc = base + offs->pcs_misc;
qmp->tx = base + offs->tx;
qmp->rx = base + offs->rx;
@@ -2151,12 +2137,21 @@ static int qmp_usb_parse_dt(struct qmp_usb *qmp)
qmp->rx2 = base + offs->rx2;
}
+ ret = qmp_usb_clk_init(qmp);
+ if (ret)
+ return ret;
+
qmp->pipe_clk = devm_clk_get(dev, "pipe");
if (IS_ERR(qmp->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
"failed to get pipe clock\n");
}
+ ret = qmp_usb_reset_init(qmp, usb3phy_reset_l,
+ ARRAY_SIZE(usb3phy_reset_l));
+ if (ret)
+ return ret;
+
return 0;
}
@@ -2178,14 +2173,6 @@ static int qmp_usb_probe(struct platform_device *pdev)
if (!qmp->cfg)
return -EINVAL;
- ret = qmp_usb_clk_init(qmp);
- if (ret)
- return ret;
-
- ret = qmp_usb_reset_init(qmp);
- if (ret)
- return ret;
-
ret = qmp_usb_vreg_init(qmp);
if (ret)
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 32d897684755..71f063f4a56e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -126,14 +126,6 @@
#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V4_DP_PHY_STATUS 0x0dc
-/* Only for QMP V4 PHY - PCS_MISC registers */
-#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
-#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
-#define QPHY_V4_PCS_MISC_PCS_MISC_CONFIG1 0x08
-#define QPHY_V4_PCS_MISC_CLAMP_ENABLE 0x0c
-#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
-#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
-
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
/* Only for QMP V6 PHY - DP PHY registers */
diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c
index 2bd8ad2e76ed..0ff07e210769 100644
--- a/drivers/phy/ralink/phy-ralink-usb.c
+++ b/drivers/phy/ralink/phy-ralink-usb.c
@@ -13,7 +13,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -171,18 +171,13 @@ static int ralink_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
- const struct of_device_id *match;
struct ralink_usb_phy *phy;
- match = of_match_device(ralink_usb_phy_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->clk = (uintptr_t)match->data;
+ phy->clk = (uintptr_t)device_get_match_data(&pdev->dev);
phy->base = NULL;
phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl");
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
index aedc78bd37f7..0a6426285c67 100644
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -8,8 +8,8 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/nvmem-consumer.h>
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
index dfb3122f3f11..67446a85e968 100644
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -8,8 +8,8 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/nvmem-consumer.h>
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index 683b19bc411a..fc6e398fa3bf 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -214,6 +214,10 @@ static int r8a779f0_eth_serdes_hw_init(struct r8a779f0_eth_serdes_channel *chann
if (dd->initialized)
return 0;
+ reset_control_reset(dd->reset);
+
+ usleep_range(1000, 2000);
+
ret = r8a779f0_eth_serdes_common_init_ram(dd);
if (ret)
return ret;
@@ -257,6 +261,15 @@ static int r8a779f0_eth_serdes_init(struct phy *p)
return ret;
}
+static int r8a779f0_eth_serdes_exit(struct phy *p)
+{
+ struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
+
+ channel->dd->initialized = false;
+
+ return 0;
+}
+
static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
*channel)
{
@@ -314,6 +327,7 @@ static int r8a779f0_eth_serdes_set_speed(struct phy *p, int speed)
static const struct phy_ops r8a779f0_eth_serdes_ops = {
.init = r8a779f0_eth_serdes_init,
+ .exit = r8a779f0_eth_serdes_exit,
.power_on = r8a779f0_eth_serdes_power_on,
.set_mode = r8a779f0_eth_serdes_set_mode,
.set_speed = r8a779f0_eth_serdes_set_speed,
@@ -356,8 +370,6 @@ static int r8a779f0_eth_serdes_probe(struct platform_device *pdev)
if (IS_ERR(dd->reset))
return PTR_ERR(dd->reset);
- reset_control_reset(dd->reset);
-
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
struct r8a779f0_eth_serdes_channel *channel = &dd->channel[i];
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index b982c3f0d4b5..a24d2af154df 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -17,9 +17,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 8234b83fdd88..1bbd6be2a584 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -12,10 +12,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -63,7 +62,7 @@ struct rockchip_pcie_data {
};
struct rockchip_pcie_phy {
- struct rockchip_pcie_data *phy_data;
+ const struct rockchip_pcie_data *phy_data;
struct regmap *reg_base;
struct phy_pcie_instance {
struct phy *phy;
@@ -350,7 +349,6 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
struct rockchip_pcie_phy *rk_phy;
struct phy_provider *phy_provider;
struct regmap *grf;
- const struct of_device_id *of_id;
int i;
u32 phy_num;
@@ -364,11 +362,10 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
if (!rk_phy)
return -ENOMEM;
- of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
- if (!of_id)
+ rk_phy->phy_data = device_get_match_data(&pdev->dev);
+ if (!rk_phy->phy_data)
return -EINVAL;
- rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
rk_phy->reg_base = grf;
mutex_init(&rk_phy->pcie_mutex);
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 8454285977eb..666a896c8f0a 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -13,10 +13,9 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/regmap.h>
@@ -458,7 +457,6 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rockchip_usb_phy_base *phy_base;
struct phy_provider *phy_provider;
- const struct of_device_id *match;
struct device_node *child;
int err;
@@ -466,14 +464,12 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
if (!phy_base)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
+ phy_base->pdata = device_get_match_data(dev);
+ if (!phy_base->pdata) {
dev_err(dev, "missing phy data\n");
return -EINVAL;
}
- phy_base->pdata = match->data;
-
phy_base->dev = dev;
phy_base->reg_base = ERR_PTR(-ENODEV);
if (dev->parent && dev->parent->of_node)
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index db153a55f4e1..d5ae972a31fb 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -13,7 +13,6 @@
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/phy/phy.h>
-#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@@ -161,7 +160,6 @@ static UNIVERSAL_DEV_PM_OPS(dm816x_usb_phy_pm_ops,
dm816x_usb_phy_runtime_resume,
NULL);
-#ifdef CONFIG_OF
static const struct of_device_id dm816x_usb_phy_id_table[] = {
{
.compatible = "ti,dm8168-usb-phy",
@@ -169,7 +167,6 @@ static const struct of_device_id dm816x_usb_phy_id_table[] = {
{},
};
MODULE_DEVICE_TABLE(of, dm816x_usb_phy_id_table);
-#endif
static int dm816x_usb_phy_probe(struct platform_device *pdev)
{
@@ -178,14 +175,8 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct usb_otg *otg;
- const struct of_device_id *of_id;
int error;
- of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
- &pdev->dev);
- if (!of_id)
- return -EINVAL;
-
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
@@ -272,7 +263,7 @@ static struct platform_driver dm816x_usb_phy_driver = {
.driver = {
.name = "dm816x-usb-phy",
.pm = &dm816x_usb_phy_pm_ops,
- .of_match_table = of_match_ptr(dm816x_usb_phy_id_table),
+ .of_match_table = dm816x_usb_phy_id_table,
},
};
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 76c5595f0859..2fdb8f4241c7 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -8,9 +8,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
@@ -268,20 +268,15 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
static int omap_control_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct omap_control_phy *control_phy;
- of_id = of_match_device(omap_control_phy_id_table, &pdev->dev);
- if (!of_id)
- return -EINVAL;
-
control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
if (!control_phy)
return -ENOMEM;
control_phy->dev = &pdev->dev;
- control_phy->type = *(enum omap_control_phy_type *)of_id->data;
+ control_phy->type = *(enum omap_control_phy_type *)device_get_match_data(&pdev->dev);
if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
control_phy->otghs_control =
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 762d3de8b3c5..dd2913ac0fa2 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -19,6 +19,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -371,16 +372,12 @@ static int omap_usb2_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
- const struct of_device_id *of_id;
- struct usb_phy_data *phy_data;
+ const struct usb_phy_data *phy_data;
- of_id = of_match_device(omap_usb2_id_table, &pdev->dev);
-
- if (!of_id)
+ phy_data = device_get_match_data(&pdev->dev);
+ if (!phy_data)
return -EINVAL;
- phy_data = (struct usb_phy_data *)of_id->data;
-
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 507e1552db5e..874c1a25ce36 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/phy/phy.h>
#include <linux/of.h>
@@ -778,23 +779,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
int ret;
- const struct of_device_id *match;
- struct pipe3_data *data;
+ const struct pipe3_data *data;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_device(ti_pipe3_id_table, dev);
- if (!match)
+ data = device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct pipe3_data *)match->data;
- if (!data) {
- dev_err(dev, "no driver data\n");
- return -EINVAL;
- }
-
phy->dev = dev;
phy->mode = data->mode;
phy->dpll_map = data->dpll_map;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 7dfb7190580e..1de4e1edede0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -520,6 +520,7 @@ source "drivers/pinctrl/nuvoton/Kconfig"
source "drivers/pinctrl/nxp/Kconfig"
source "drivers/pinctrl/pxa/Kconfig"
source "drivers/pinctrl/qcom/Kconfig"
+source "drivers/pinctrl/realtek/Kconfig"
source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index dd6cda270294..37575deb7a69 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -66,6 +66,7 @@ obj-y += nuvoton/
obj-y += nxp/
obj-$(CONFIG_PINCTRL_PXA) += pxa/
obj-y += qcom/
+obj-$(CONFIG_ARCH_REALTEK) += realtek/
obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index bfed0e274643..774f8d05142f 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2563,15 +2563,20 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
* deconfigured and is the reason we re-evaluate after writing
* all descriptor bits.
*
- * Port D and port E GPIO loopback modes are the only exception
- * as those are commonly used with front-panel buttons to allow
- * normal operation of the host when the BMC is powered off or
- * fails to boot. Once the BMC has booted, the loopback mode
- * must be disabled for the BMC to control host power-on and
- * reset.
+ * We make two exceptions to the read-only rule:
+ *
+ * - The passthrough mode of GPIO ports D and E are commonly
+ * used with front-panel buttons to allow normal operation
+ * of the host if the BMC is powered off or fails to boot.
+ * Once the BMC has booted, the loopback mode must be
+ * disabled for the BMC to control host power-on and reset.
+ *
+ * - The operating mode of the SPI1 interface is simply
+ * strapped incorrectly on some systems and requires a
+ * software fixup, which we allow to be done via pinctrl.
*/
if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP1 &&
- !(desc->mask & (BIT(21) | BIT(22))))
+ !(desc->mask & (BIT(22) | BIT(21) | BIT(13) | BIT(12))))
continue;
if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP2)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 4c0d26606b6c..5bb8fd0d1e41 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2742,15 +2742,20 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
* deconfigured and is the reason we re-evaluate after writing
* all descriptor bits.
*
- * Port D and port E GPIO loopback modes are the only exception
- * as those are commonly used with front-panel buttons to allow
- * normal operation of the host when the BMC is powered off or
- * fails to boot. Once the BMC has booted, the loopback mode
- * must be disabled for the BMC to control host power-on and
- * reset.
+ * We make two exceptions to the read-only rule:
+ *
+ * - The passthrough mode of GPIO ports D and E are commonly
+ * used with front-panel buttons to allow normal operation
+ * of the host if the BMC is powered off or fails to boot.
+ * Once the BMC has booted, the loopback mode must be
+ * disabled for the BMC to control host power-on and reset.
+ *
+ * - The operating mode of the SPI1 interface is simply
+ * strapped incorrectly on some systems and requires a
+ * software fixup, which we allow to be done via pinctrl.
*/
if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP1 &&
- !(desc->mask & (BIT(21) | BIT(22))))
+ !(desc->mask & (BIT(22) | BIT(21) | BIT(13) | BIT(12))))
continue;
if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP2)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 80838dc54b3a..d376fa7114d1 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -1592,9 +1592,10 @@ SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
SIG_DESC_SET(SCUC20, 16));
SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
-SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC,
+ SIG_DESC_SET(SCUC20, 16));
PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
- SIG_EXPR_LIST_PTR(A4, USB2AHDP));
+ SIG_EXPR_LIST_PTR(A4, USB2AHDP), SIG_EXPR_LIST_PTR(A4, USB2AHPDP));
#define B4 253
SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
@@ -1602,7 +1603,7 @@ SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
- SIG_EXPR_LIST_PTR(B4, USB2AHDN));
+ SIG_EXPR_LIST_PTR(B4, USB2AHDN), SIG_EXPR_LIST_PTR(B4, USB2AHPDN));
GROUP_DECL(USBA, A4, B4);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index bc7bb9876e57..fd5ce52d05b1 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -330,24 +330,22 @@ static const struct irq_chip iproc_gpio_irq_chip = {
static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
{
struct iproc_gpio *chip = gpiochip_get_data(gc);
- unsigned gpio = gc->base + offset;
/* not all Iproc GPIO pins can be muxed individually */
if (!chip->pinmux_is_supported)
return 0;
- return pinctrl_gpio_request(gpio);
+ return pinctrl_gpio_request(gc, offset);
}
static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
{
struct iproc_gpio *chip = gpiochip_get_data(gc);
- unsigned gpio = gc->base + offset;
if (!chip->pinmux_is_supported)
return;
- pinctrl_gpio_free(gpio);
+ pinctrl_gpio_free(gc, offset);
}
static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index f80630a74d34..d099a7f25f64 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -7,11 +7,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "../core.h"
@@ -208,7 +208,6 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
static int ns_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
@@ -225,10 +224,7 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
ns_pinctrl->dev = dev;
- of_id = of_match_device(ns_pinctrl_of_match_table, dev);
- if (!of_id)
- return -EINVAL;
- ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
+ ns_pinctrl->chipset_flag = (uintptr_t)device_get_match_data(dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"cru_gpio_control");
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index acbd413340e8..15aed4467627 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -8,8 +8,9 @@
*/
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "berlin.h"
@@ -227,10 +228,7 @@ static const struct of_device_id berlin2_pinctrl_match[] = {
static int berlin2_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(berlin2_pinctrl_match, &pdev->dev);
-
- return berlin_pinctrl_probe(pdev, match->data);
+ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
}
static struct platform_driver berlin2_pinctrl_driver = {
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index c0f5d86d5d01..73a1d8c23088 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -8,8 +8,9 @@
*/
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "berlin.h"
@@ -172,10 +173,7 @@ static const struct of_device_id berlin2cd_pinctrl_match[] = {
static int berlin2cd_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(berlin2cd_pinctrl_match, &pdev->dev);
-
- return berlin_pinctrl_probe(pdev, match->data);
+ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
}
static struct platform_driver berlin2cd_pinctrl_driver = {
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index 20a3216ede07..a5dbc8f279e7 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -8,8 +8,9 @@
*/
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "berlin.h"
@@ -389,10 +390,7 @@ static const struct of_device_id berlin2q_pinctrl_match[] = {
static int berlin2q_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(berlin2q_pinctrl_match, &pdev->dev);
-
- return berlin_pinctrl_probe(pdev, match->data);
+ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
}
static struct platform_driver berlin2q_pinctrl_driver = {
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 3026a3b3da2d..9bf0a54f2798 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -8,8 +8,9 @@
*/
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "berlin.h"
@@ -449,8 +450,8 @@ static const struct of_device_id berlin4ct_pinctrl_match[] = {
static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(berlin4ct_pinctrl_match, &pdev->dev);
+ const struct berlin_pinctrl_desc *desc =
+ device_get_match_data(&pdev->dev);
struct regmap_config *rmconfig;
struct regmap *regmap;
struct resource *res;
@@ -473,7 +474,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
+ return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
}
static struct platform_driver berlin4ct_pinctrl_driver = {
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index 1e427ea4d31b..9550cc8095c2 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -96,10 +96,10 @@ static int berlin_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrl_dev,
}
static const struct pinctrl_ops berlin_pinctrl_ops = {
- .get_groups_count = &berlin_pinctrl_get_group_count,
- .get_group_name = &berlin_pinctrl_get_group_name,
- .dt_node_to_map = &berlin_pinctrl_dt_node_to_map,
- .dt_free_map = &pinctrl_utils_free_map,
+ .get_groups_count = berlin_pinctrl_get_group_count,
+ .get_group_name = berlin_pinctrl_get_group_name,
+ .dt_node_to_map = berlin_pinctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int berlin_pinmux_get_functions_count(struct pinctrl_dev *pctrl_dev)
diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
index b631c14813a7..fc0daec94e10 100644
--- a/drivers/pinctrl/berlin/pinctrl-as370.c
+++ b/drivers/pinctrl/berlin/pinctrl-as370.c
@@ -8,8 +8,9 @@
*/
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "berlin.h"
@@ -330,8 +331,8 @@ static const struct of_device_id as370_pinctrl_match[] = {
static int as370_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(as370_pinctrl_match, &pdev->dev);
+ const struct berlin_pinctrl_desc *desc =
+ device_get_match_data(&pdev->dev);
struct regmap_config *rmconfig;
struct regmap *regmap;
struct resource *res;
@@ -354,7 +355,7 @@ static int as370_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
+ return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
}
static struct platform_driver as370_pinctrl_driver = {
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index c09646318419..012b0a3bad5a 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -506,17 +506,12 @@ static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
pm_runtime_put(priv->dev);
}
-static int cs42l43_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int cs42l43_gpio_direction_out(struct gpio_chip *chip,
unsigned int offset, int value)
{
cs42l43_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip)
@@ -551,7 +546,7 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
priv->gpio_chip.request = gpiochip_generic_request;
priv->gpio_chip.free = gpiochip_generic_free;
- priv->gpio_chip.direction_input = cs42l43_gpio_direction_in;
+ priv->gpio_chip.direction_input = pinctrl_gpio_direction_input;
priv->gpio_chip.direction_output = cs42l43_gpio_direction_out;
priv->gpio_chip.add_pin_ranges = cs42l43_gpio_add_pin_ranges;
priv->gpio_chip.get = cs42l43_gpio_get;
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 0b78cf611afe..014297a3fbd2 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -1098,7 +1098,7 @@ static int lochnagar_gpio_direction_out(struct gpio_chip *chip,
{
lochnagar_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int lochnagar_fill_func_groups(struct lochnagar_pin_priv *priv)
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index bb589922d8c5..898b197c3738 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1084,19 +1084,17 @@ static int madera_pin_probe(struct platform_device *pdev)
return 0;
}
-static int madera_pin_remove(struct platform_device *pdev)
+static void madera_pin_remove(struct platform_device *pdev)
{
struct madera_pin_private *priv = platform_get_drvdata(pdev);
if (priv->madera->pdata.gpio_configs)
pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
-
- return 0;
}
static struct platform_driver madera_pin_driver = {
.probe = madera_pin_probe,
- .remove = madera_pin_remove,
+ .remove_new = madera_pin_remove,
.driver = {
.name = "madera-pinctrl",
},
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e9dc9638120a..1fa89be29b8f 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -12,17 +12,19 @@
*/
#define pr_fmt(fmt) "pinctrl core: " fmt
+#include <linux/array_size.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/gpio/driver.h>
+
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pinctrl/machine.h>
@@ -267,7 +269,8 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
/**
* gpio_to_pin() - GPIO range GPIO number to pin number translation
* @range: GPIO range used for the translation
- * @gpio: gpio pin to translate to a pin number
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* Finds the pin number for a given GPIO using the specified GPIO range
* as a base for translation. The distinction between linear GPIO ranges
@@ -278,25 +281,27 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
* result of successful pinctrl_get_device_gpio_range calls)!
*/
static inline int gpio_to_pin(struct pinctrl_gpio_range *range,
- unsigned int gpio)
+ struct gpio_chip *gc, unsigned int offset)
{
- unsigned int offset = gpio - range->base;
+ unsigned int pin = gc->base + offset - range->base;
if (range->pins)
- return range->pins[offset];
+ return range->pins[pin];
else
- return range->pin_base + offset;
+ return range->pin_base + pin;
}
/**
* pinctrl_match_gpio_range() - check if a certain GPIO pin is in range
* @pctldev: pin controller device to check
- * @gpio: gpio pin to check taken from the global GPIO pin space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* Tries to match a GPIO pin number to the ranges handled by a certain pin
* controller, return the range or NULL
*/
static struct pinctrl_gpio_range *
-pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
+pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, struct gpio_chip *gc,
+ unsigned int offset)
{
struct pinctrl_gpio_range *range;
@@ -304,8 +309,8 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
/* Loop over the ranges */
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
/* Check if we're in the valid range */
- if (gpio >= range->base &&
- gpio < range->base + range->npins) {
+ if ((gc->base + offset) >= range->base &&
+ (gc->base + offset) < range->base + range->npins) {
mutex_unlock(&pctldev->mutex);
return range;
}
@@ -317,7 +322,8 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
/**
* pinctrl_ready_for_gpio_range() - check if other GPIO pins of
* the same GPIO chip are in range
- * @gpio: gpio pin to check taken from the global GPIO pin space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* This function is complement of pinctrl_match_gpio_range(). If the return
* value of pinctrl_match_gpio_range() is NULL, this function could be used
@@ -328,19 +334,11 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
* is false, it means that pinctrl device may not be ready.
*/
#ifdef CONFIG_GPIOLIB
-static bool pinctrl_ready_for_gpio_range(unsigned gpio)
+static bool pinctrl_ready_for_gpio_range(struct gpio_chip *gc,
+ unsigned int offset)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range = NULL;
- /*
- * FIXME: "gpio" here is a number in the global GPIO numberspace.
- * get rid of this from the ranges eventually and get the GPIO
- * descriptor from the gpio_chip.
- */
- struct gpio_chip *chip = gpiod_to_chip(gpio_to_desc(gpio));
-
- if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
- return false;
mutex_lock(&pinctrldev_list_mutex);
@@ -350,8 +348,8 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
mutex_lock(&pctldev->mutex);
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
/* Check if any gpio range overlapped with gpio chip */
- if (range->base + range->npins - 1 < chip->base ||
- range->base > chip->base + chip->ngpio - 1)
+ if (range->base + range->npins - 1 < gc->base ||
+ range->base > gc->base + gc->ngpio - 1)
continue;
mutex_unlock(&pctldev->mutex);
mutex_unlock(&pinctrldev_list_mutex);
@@ -365,12 +363,17 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
return false;
}
#else
-static bool pinctrl_ready_for_gpio_range(unsigned gpio) { return true; }
+static inline bool
+pinctrl_ready_for_gpio_range(struct gpio_chip *gc, unsigned int offset)
+{
+ return true;
+}
#endif
/**
* pinctrl_get_device_gpio_range() - find device for GPIO range
- * @gpio: the pin to locate the pin controller for
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
* @outdev: the pin control device if found
* @outrange: the GPIO range if found
*
@@ -379,7 +382,8 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio) { return true; }
* -EPROBE_DEFER if the GPIO range could not be found in any device since it
* may still have not been registered.
*/
-static int pinctrl_get_device_gpio_range(unsigned gpio,
+static int pinctrl_get_device_gpio_range(struct gpio_chip *gc,
+ unsigned int offset,
struct pinctrl_dev **outdev,
struct pinctrl_gpio_range **outrange)
{
@@ -391,7 +395,7 @@ static int pinctrl_get_device_gpio_range(unsigned gpio,
list_for_each_entry(pctldev, &pinctrldev_list, node) {
struct pinctrl_gpio_range *range;
- range = pinctrl_match_gpio_range(pctldev, gpio);
+ range = pinctrl_match_gpio_range(pctldev, gc, offset);
if (range) {
*outdev = pctldev;
*outrange = range;
@@ -445,9 +449,9 @@ struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
* it has not probed yet, so the driver trying to register this
* range need to defer probing.
*/
- if (!pctldev) {
+ if (!pctldev)
return ERR_PTR(-EPROBE_DEFER);
- }
+
pinctrl_add_gpio_range(pctldev, range);
return pctldev;
@@ -753,7 +757,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
return -EINVAL;
}
-bool pinctrl_gpio_can_use_line(unsigned gpio)
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -765,13 +769,13 @@ bool pinctrl_gpio_can_use_line(unsigned gpio)
* we're probably dealing with GPIO driver
* without a backing pin controller - bail out.
*/
- if (pinctrl_get_device_gpio_range(gpio, &pctldev, &range))
+ if (pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range))
return true;
mutex_lock(&pctldev->mutex);
/* Convert to the pin controllers number space */
- pin = gpio_to_pin(range, gpio);
+ pin = gpio_to_pin(range, gc, offset);
result = pinmux_can_be_used_for_gpio(pctldev, pin);
@@ -783,22 +787,22 @@ EXPORT_SYMBOL_GPL(pinctrl_gpio_can_use_line);
/**
* pinctrl_gpio_request() - request a single pin to be used as GPIO
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_request() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed in.
*/
-int pinctrl_gpio_request(unsigned gpio)
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
- struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
- int ret;
- int pin;
+ struct pinctrl_dev *pctldev;
+ int ret, pin;
- ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
if (ret) {
- if (pinctrl_ready_for_gpio_range(gpio))
+ if (pinctrl_ready_for_gpio_range(gc, offset))
ret = 0;
return ret;
}
@@ -806,9 +810,9 @@ int pinctrl_gpio_request(unsigned gpio)
mutex_lock(&pctldev->mutex);
/* Convert to the pin controllers number space */
- pin = gpio_to_pin(range, gpio);
+ pin = gpio_to_pin(range, gc, offset);
- ret = pinmux_request_gpio(pctldev, range, pin, gpio);
+ ret = pinmux_request_gpio(pctldev, range, pin, gc->base + offset);
mutex_unlock(&pctldev->mutex);
@@ -818,27 +822,27 @@ EXPORT_SYMBOL_GPL(pinctrl_gpio_request);
/**
* pinctrl_gpio_free() - free control on a single pin, currently used as GPIO
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
- * as part of their gpio_free() semantics, platforms and individual drivers
- * shall *NOT* request GPIO pins to be muxed out.
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
*/
-void pinctrl_gpio_free(unsigned gpio)
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
- struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
- int ret;
- int pin;
+ struct pinctrl_dev *pctldev;
+ int ret, pin;
- ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
- if (ret) {
+ ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
+ if (ret)
return;
- }
+
mutex_lock(&pctldev->mutex);
/* Convert to the pin controllers number space */
- pin = gpio_to_pin(range, gpio);
+ pin = gpio_to_pin(range, gc, offset);
pinmux_free_gpio(pctldev, pin, range);
@@ -846,14 +850,15 @@ void pinctrl_gpio_free(unsigned gpio)
}
EXPORT_SYMBOL_GPL(pinctrl_gpio_free);
-static int pinctrl_gpio_direction(unsigned gpio, bool input)
+static int pinctrl_gpio_direction(struct gpio_chip *gc, unsigned int offset,
+ bool input)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
int ret;
int pin;
- ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
if (ret) {
return ret;
}
@@ -861,7 +866,7 @@ static int pinctrl_gpio_direction(unsigned gpio, bool input)
mutex_lock(&pctldev->mutex);
/* Convert to the pin controllers number space */
- pin = gpio_to_pin(range, gpio);
+ pin = gpio_to_pin(range, gc, offset);
ret = pinmux_gpio_direction(pctldev, range, pin, input);
mutex_unlock(&pctldev->mutex);
@@ -871,54 +876,58 @@ static int pinctrl_gpio_direction(unsigned gpio, bool input)
/**
* pinctrl_gpio_direction_input() - request a GPIO pin to go into input mode
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_direction_input() semantics, platforms and individual
* drivers shall *NOT* touch pin control GPIO calls.
*/
-int pinctrl_gpio_direction_input(unsigned gpio)
+int pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
- return pinctrl_gpio_direction(gpio, true);
+ return pinctrl_gpio_direction(gc, offset, true);
}
EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_input);
/**
* pinctrl_gpio_direction_output() - request a GPIO pin to go into output mode
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_direction_output() semantics, platforms and individual
* drivers shall *NOT* touch pin control GPIO calls.
*/
-int pinctrl_gpio_direction_output(unsigned gpio)
+int pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
{
- return pinctrl_gpio_direction(gpio, false);
+ return pinctrl_gpio_direction(gc, offset, false);
}
EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_output);
/**
* pinctrl_gpio_set_config() - Apply config to given GPIO pin
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
* @config: the configuration to apply to the GPIO
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers, if
* they need to call the underlying pin controller to change GPIO config
* (for example set debounce time).
*/
-int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
unsigned long configs[] = { config };
struct pinctrl_gpio_range *range;
struct pinctrl_dev *pctldev;
int ret, pin;
- ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
if (ret)
return ret;
mutex_lock(&pctldev->mutex);
- pin = gpio_to_pin(range, gpio);
+ pin = gpio_to_pin(range, gc, offset);
ret = pinconf_set_config(pctldev, pin, configs, ARRAY_SIZE(configs));
mutex_unlock(&pctldev->mutex);
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index cf3f4d2e0c16..e77311f26262 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -395,6 +395,12 @@ static int mxs_pinctrl_parse_group(struct platform_device *pdev,
return 0;
}
+static bool is_mxs_gpio(struct device_node *child)
+{
+ return of_device_is_compatible(child, "fsl,imx23-gpio") ||
+ of_device_is_compatible(child, "fsl,imx28-gpio");
+}
+
static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
struct mxs_pinctrl_data *d)
{
@@ -402,7 +408,6 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
struct mxs_function *f;
- const char *gpio_compat = "fsl,mxs-gpio";
const char *fn, *fnull = "";
int i = 0, idxf = 0, idxg = 0;
int ret;
@@ -417,7 +422,7 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
/* Count total functions and groups */
fn = fnull;
for_each_child_of_node(np, child) {
- if (of_device_is_compatible(child, gpio_compat))
+ if (is_mxs_gpio(child))
continue;
soc->ngroups++;
/* Skip pure pinconf node */
@@ -446,7 +451,7 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
fn = fnull;
f = &soc->functions[idxf];
for_each_child_of_node(np, child) {
- if (of_device_is_compatible(child, gpio_compat))
+ if (is_mxs_gpio(child))
continue;
if (of_property_read_u32(child, "reg", &val))
continue;
@@ -486,7 +491,7 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
idxf = 0;
fn = fnull;
for_each_child_of_node(np, child) {
- if (of_device_is_compatible(child, gpio_compat))
+ if (is_mxs_gpio(child))
continue;
if (of_property_read_u32(child, "reg", &val)) {
ret = mxs_pinctrl_parse_group(pdev, child,
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index faa8b7ff5bcf..3cd0798ee631 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -7,16 +7,16 @@
*/
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm.h>
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
@@ -722,8 +722,6 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
raw_spin_unlock_irqrestore(&byt_lock, flags);
- pm_runtime_get(vg->dev);
-
return 0;
}
@@ -734,7 +732,6 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
byt_gpio_clear_triggering(vg, offset);
- pm_runtime_put(vg->dev);
}
static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
@@ -983,11 +980,18 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- if (arg)
+ if (arg) {
conf |= BYT_DEBOUNCE_EN;
- else
+ } else {
conf &= ~BYT_DEBOUNCE_EN;
+ /*
+ * No need to update the pulse value.
+ * Debounce is going to be disabled.
+ */
+ break;
+ }
+
switch (arg) {
case 375:
db_pulse = BYT_DEBOUNCE_PULSE_375US;
@@ -1654,7 +1658,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, vg);
- pm_runtime_enable(dev);
return 0;
}
@@ -1743,26 +1746,15 @@ static int byt_gpio_resume(struct device *dev)
return 0;
}
-static int byt_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int byt_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
static const struct dev_pm_ops byt_gpio_pm_ops = {
LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
- RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume, NULL)
};
static struct platform_driver byt_gpio_driver = {
.probe = byt_pinctrl_probe,
.driver = {
.name = "byt_gpio",
- .pm = pm_ptr(&byt_gpio_pm_ops),
+ .pm = pm_sleep_ptr(&byt_gpio_pm_ops),
.acpi_match_table = byt_gpio_acpi_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 4d5ddb297909..3118c7c8842f 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -998,6 +998,7 @@ static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
{ "broxton-pinctrl", (kernel_ulong_t)bxt_pinctrl_soc_data },
{ }
};
+MODULE_DEVICE_TABLE(platform, bxt_pinctrl_platform_ids);
static INTEL_PINCTRL_PM_OPS(bxt_pinctrl_pm_ops);
@@ -1026,6 +1027,4 @@ module_exit(bxt_pinctrl_exit);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:apollolake-pinctrl");
-MODULE_ALIAS("platform:broxton-pinctrl");
MODULE_IMPORT_NS(PINCTRL_INTEL);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 81ee949b946d..067b0d344f0e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -11,9 +11,10 @@
*/
#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/cleanup.h>
#include <linux/dmi.h>
#include <linux/gpio/driver.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
@@ -612,26 +613,26 @@ static void chv_writel(struct intel_pinctrl *pctrl, unsigned int pin, unsigned i
}
/* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
+static bool chv_pad_is_locked(u32 ctrl1)
+{
+ return ctrl1 & CHV_PADCTRL1_CFGLOCK;
+}
+
static bool chv_pad_locked(struct intel_pinctrl *pctrl, unsigned int offset)
{
- return chv_readl(pctrl, offset, CHV_PADCTRL1) & CHV_PADCTRL1_CFGLOCK;
+ return chv_pad_is_locked(chv_readl(pctrl, offset, CHV_PADCTRL1));
}
static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned int offset)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned long flags;
u32 ctrl0, ctrl1;
- bool locked;
-
- raw_spin_lock_irqsave(&chv_lock, flags);
-
- ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
- ctrl1 = chv_readl(pctrl, offset, CHV_PADCTRL1);
- locked = chv_pad_locked(pctrl, offset);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &chv_lock) {
+ ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
+ ctrl1 = chv_readl(pctrl, offset, CHV_PADCTRL1);
+ }
if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
seq_puts(s, "GPIO ");
@@ -646,7 +647,7 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, "0x%08x 0x%08x", ctrl0, ctrl1);
- if (locked)
+ if (chv_pad_is_locked(ctrl1))
seq_puts(s, " [LOCKED]");
}
@@ -663,17 +664,15 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
struct device *dev = pctrl->dev;
const struct intel_pingroup *grp;
- unsigned long flags;
int i;
grp = &pctrl->soc->groups[group];
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
/* Check first that the pad is not locked */
for (i = 0; i < grp->grp.npins; i++) {
if (chv_pad_locked(pctrl, grp->grp.pins[i])) {
- raw_spin_unlock_irqrestore(&chv_lock, flags);
dev_warn(dev, "unable to set mode for locked pin %u\n", grp->grp.pins[i]);
return -EBUSY;
}
@@ -713,8 +712,6 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
invert_oe ? "" : "not ");
}
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -745,16 +742,14 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
unsigned int offset)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
if (chv_pad_locked(pctrl, offset)) {
value = chv_readl(pctrl, offset, CHV_PADCTRL0);
if (!(value & CHV_PADCTRL0_GPIOEN)) {
/* Locked so cannot enable */
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EBUSY;
}
} else {
@@ -789,8 +784,6 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
chv_writel(pctrl, offset, CHV_PADCTRL0, value);
}
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -799,14 +792,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
unsigned int offset)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned long flags;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
- if (!chv_pad_locked(pctrl, offset))
- chv_gpio_clear_triggering(pctrl, offset);
+ if (chv_pad_locked(pctrl, offset))
+ return;
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ chv_gpio_clear_triggering(pctrl, offset);
}
static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -814,10 +806,9 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned int offset, bool input)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- unsigned long flags;
u32 ctrl0;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0) & ~CHV_PADCTRL0_GPIOCFG_MASK;
if (input)
@@ -826,8 +817,6 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -846,15 +835,14 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- unsigned long flags;
u32 ctrl0, ctrl1;
u16 arg = 0;
u32 term;
- raw_spin_lock_irqsave(&chv_lock, flags);
- ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0);
- ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &chv_lock) {
+ ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1);
+ }
term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
@@ -906,6 +894,7 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
break;
+ }
case PIN_CONFIG_DRIVE_PUSH_PULL:
if (ctrl1 & CHV_PADCTRL1_ODEN)
@@ -916,7 +905,6 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (!(ctrl1 & CHV_PADCTRL1_ODEN))
return -EINVAL;
break;
- }
default:
return -ENOTSUPP;
@@ -929,10 +917,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 arg)
{
- unsigned long flags;
u32 ctrl0, pull;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
+
ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0);
switch (param) {
@@ -955,7 +943,6 @@ static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -973,7 +960,6 @@ static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -981,12 +967,10 @@ static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
break;
default:
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
chv_writel(pctrl, pin, CHV_PADCTRL0, ctrl0);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -994,10 +978,10 @@ static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
static int chv_config_set_oden(struct intel_pinctrl *pctrl, unsigned int pin,
bool enable)
{
- unsigned long flags;
u32 ctrl1;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
+
ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1);
if (enable)
@@ -1006,7 +990,6 @@ static int chv_config_set_oden(struct intel_pinctrl *pctrl, unsigned int pin,
ctrl1 &= ~CHV_PADCTRL1_ODEN;
chv_writel(pctrl, pin, CHV_PADCTRL1, ctrl1);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -1116,12 +1099,10 @@ static struct pinctrl_desc chv_pinctrl_desc = {
static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
- unsigned long flags;
u32 ctrl0, cfg;
- raw_spin_lock_irqsave(&chv_lock, flags);
- ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &chv_lock)
+ ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1134,10 +1115,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
- unsigned long flags;
u32 ctrl0;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
@@ -1147,19 +1127,15 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE;
chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0);
-
- raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
u32 ctrl0, direction;
- unsigned long flags;
- raw_spin_lock_irqsave(&chv_lock, flags);
- ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &chv_lock)
+ ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1172,14 +1148,14 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ return pinctrl_gpio_direction_input(chip, offset);
}
static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
chv_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static const struct gpio_chip chv_gpio_chip = {
@@ -1200,23 +1176,20 @@ static void chv_gpio_irq_ack(struct irq_data *d)
irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 intr_line;
- raw_spin_lock(&chv_lock);
+ guard(raw_spinlock)(&chv_lock);
intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
chv_pctrl_writel(pctrl, CHV_INTSTAT, BIT(intr_line));
-
- raw_spin_unlock(&chv_lock);
}
static void chv_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq, bool mask)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
u32 value, intr_line;
- unsigned long flags;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1228,8 +1201,6 @@ static void chv_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq
else
value |= BIT(intr_line);
chv_pctrl_writel(pctrl, CHV_INTMASK, value);
-
- raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1254,7 +1225,15 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
{
/*
* Check if the interrupt has been requested with 0 as triggering
- * type. In that case it is assumed that the current values
+ * type. If not, bail out, ...
+ */
+ if (irqd_get_trigger_type(d) != IRQ_TYPE_NONE) {
+ chv_gpio_irq_unmask(d);
+ return 0;
+ }
+
+ /*
+ * ...otherwise it is assumed that the current values
* programmed to the hardware are used (e.g BIOS configured
* defaults).
*
@@ -1262,17 +1241,15 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
* read back the values from hardware now, set correct flow handler
* and update mappings before the interrupt is being used.
*/
- if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
+ scoped_guard(raw_spinlock_irqsave, &chv_lock) {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
struct device *dev = pctrl->dev;
struct intel_community_context *cctx = &pctrl->context.communities[0];
irq_hw_number_t hwirq = irqd_to_hwirq(d);
irq_flow_handler_t handler;
- unsigned long flags;
u32 intsel, value;
- raw_spin_lock_irqsave(&chv_lock, flags);
intsel = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1289,7 +1266,6 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
intsel, hwirq);
cctx->intr_lines[intsel] = hwirq;
}
- raw_spin_unlock_irqrestore(&chv_lock, flags);
}
chv_gpio_irq_unmask(d);
@@ -1354,17 +1330,14 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 value;
int ret;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
ret = chv_gpio_set_intr_line(pctrl, hwirq);
- if (ret) {
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ if (ret)
return ret;
- }
/*
* Pins which can be used as shared interrupt are configured in
@@ -1405,8 +1378,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -1430,14 +1401,12 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct intel_community_context *cctx = &pctrl->context.communities[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
- unsigned long flags;
u32 intr_line;
chained_irq_enter(chip, desc);
- raw_spin_lock_irqsave(&chv_lock, flags);
- pending = chv_pctrl_readl(pctrl, CHV_INTSTAT);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &chv_lock)
+ pending = chv_pctrl_readl(pctrl, CHV_INTSTAT);
for_each_set_bit(intr_line, &pending, community->nirqs) {
unsigned int offset;
@@ -1626,21 +1595,17 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
void *handler_context, void *region_context)
{
struct intel_pinctrl *pctrl = region_context;
- unsigned long flags;
- acpi_status ret = AE_OK;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
if (function == ACPI_WRITE)
chv_pctrl_writel(pctrl, address, *value);
else if (function == ACPI_READ)
*value = chv_pctrl_readl(pctrl, address);
else
- ret = AE_BAD_PARAMETER;
-
- raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return AE_BAD_PARAMETER;
- return ret;
+ return AE_OK;
}
static int chv_pinctrl_probe(struct platform_device *pdev)
@@ -1728,7 +1693,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int chv_pinctrl_remove(struct platform_device *pdev)
+static void chv_pinctrl_remove(struct platform_device *pdev)
{
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
const struct intel_community *community = &pctrl->communities[0];
@@ -1736,18 +1701,15 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev),
community->acpi_space_id,
chv_pinctrl_mmio_access_handler);
-
- return 0;
}
static int chv_pinctrl_suspend_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
struct intel_community_context *cctx = &pctrl->context.communities[0];
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
cctx->saved_intmask = chv_pctrl_readl(pctrl, CHV_INTMASK);
@@ -1765,8 +1727,6 @@ static int chv_pinctrl_suspend_noirq(struct device *dev)
ctx->padctrl1 = chv_readl(pctrl, desc->number, CHV_PADCTRL1);
}
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -1774,10 +1734,9 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
struct intel_community_context *cctx = &pctrl->context.communities[0];
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&chv_lock, flags);
+ guard(raw_spinlock_irqsave)(&chv_lock);
/*
* Mask all interrupts before restoring per-pin configuration
@@ -1819,8 +1778,6 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff);
chv_pctrl_writel(pctrl, CHV_INTMASK, cctx->saved_intmask);
- raw_spin_unlock_irqrestore(&chv_lock, flags);
-
return 0;
}
@@ -1835,7 +1792,7 @@ MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match);
static struct platform_driver chv_pinctrl_driver = {
.probe = chv_pinctrl_probe,
- .remove = chv_pinctrl_remove,
+ .remove_new = chv_pinctrl_remove,
.driver = {
.name = "cherryview-pinctrl",
.pm = pm_sleep_ptr(&chv_pinctrl_pm_ops),
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 0c4694cfa594..562a4f9188e4 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -257,6 +257,12 @@ static const struct acpi_device_id dnv_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, dnv_pinctrl_acpi_match);
+static const struct platform_device_id dnv_pinctrl_platform_ids[] = {
+ { "denverton-pinctrl", (kernel_ulong_t)&dnv_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, dnv_pinctrl_platform_ids);
+
static struct platform_driver dnv_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
@@ -264,6 +270,7 @@ static struct platform_driver dnv_pinctrl_driver = {
.acpi_match_table = dnv_pinctrl_acpi_match,
.pm = &dnv_pinctrl_pm_ops,
},
+ .id_table = dnv_pinctrl_platform_ids,
};
static int __init dnv_pinctrl_init(void)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 999f453344d2..652ba451f885 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
@@ -393,20 +394,17 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct intel_pingroup *grp = &pctrl->soc->groups[group];
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
/*
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
for (i = 0; i < grp->grp.npins; i++) {
- if (!intel_pad_usable(pctrl, grp->grp.pins[i])) {
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (!intel_pad_usable(pctrl, grp->grp.pins[i]))
return -EBUSY;
- }
}
/* Now enable the mux setting for each pin in the group */
@@ -428,8 +426,6 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
@@ -485,21 +481,16 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
- unsigned long flags;
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
- if (!intel_pad_owned_by_host(pctrl, pin)) {
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (!intel_pad_owned_by_host(pctrl, pin))
return -EBUSY;
- }
- if (!intel_pad_is_unlocked(pctrl, pin)) {
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (!intel_pad_is_unlocked(pctrl, pin))
return 0;
- }
/*
* If pin is already configured in GPIO mode, we assume that
@@ -507,15 +498,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
* potential glitches on the pin. Otherwise, for the pin in
* alternative mode, consumer has to supply respective flags.
*/
- if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) {
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO)
return 0;
- }
intel_gpio_set_gpio_mode(padcfg0);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
@@ -525,13 +512,12 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
- unsigned long flags;
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
__intel_gpio_set_direction(padcfg0, input);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -548,17 +534,13 @@ static const struct pinmux_ops intel_pinmux_ops = {
static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 *arg)
{
- const struct intel_community *community;
void __iomem *padcfg1;
- unsigned long flags;
u32 value, term;
- community = intel_get_community(pctrl, pin);
padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- value = readl(padcfg1);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &pctrl->lock)
+ value = readl(padcfg1);
term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
@@ -592,7 +574,9 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ const struct intel_community *community = intel_get_community(pctrl, pin);
+
if (!term || value & PADCFG1_TERM_UP)
return -EINVAL;
@@ -619,6 +603,7 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
}
break;
+ }
default:
return -EINVAL;
@@ -631,7 +616,6 @@ static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int p
enum pin_config_param param, u32 *arg)
{
void __iomem *padcfg2;
- unsigned long flags;
unsigned long v;
u32 value2;
@@ -639,9 +623,9 @@ static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int p
if (!padcfg2)
return -ENOTSUPP;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- value2 = readl(padcfg2);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &pctrl->lock)
+ value2 = readl(padcfg2);
+
if (!(value2 & PADCFG2_DEBEN))
return -EINVAL;
@@ -690,19 +674,8 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
{
unsigned int param = pinconf_to_config_param(config);
unsigned int arg = pinconf_to_config_argument(config);
- const struct intel_community *community;
+ u32 term = 0, up = 0, value;
void __iomem *padcfg1;
- unsigned long flags;
- int ret = 0;
- u32 value;
-
- community = intel_get_community(pctrl, pin);
- padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
-
- raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- value = readl(padcfg1);
- value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
/* Set default strength value in case none is given */
if (arg == 1)
@@ -715,78 +688,77 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
switch (arg) {
case 20000:
- value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_20K;
break;
case 5000:
- value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_5K;
break;
case 4000:
- value |= PADCFG1_TERM_4K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_4K;
break;
case 1000:
- value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_1K;
break;
case 833:
- value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_833;
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- value |= PADCFG1_TERM_UP;
+ up = PADCFG1_TERM_UP;
break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ const struct intel_community *community = intel_get_community(pctrl, pin);
+
switch (arg) {
case 20000:
- value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_20K;
break;
case 5000:
- value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_5K;
break;
case 4000:
- value |= PADCFG1_TERM_4K << PADCFG1_TERM_SHIFT;
+ term = PADCFG1_TERM_4K;
break;
case 1000:
- if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
- ret = -EINVAL;
- break;
- }
- value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+ term = PADCFG1_TERM_1K;
break;
case 833:
- if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
- ret = -EINVAL;
- break;
- }
- value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+ term = PADCFG1_TERM_833;
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
break;
+ }
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- if (!ret)
- writel(value, padcfg1);
+ padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
- return ret;
+ value = readl(padcfg1);
+ value = (value & ~PADCFG1_TERM_MASK) | (term << PADCFG1_TERM_SHIFT);
+ value = (value & ~PADCFG1_TERM_UP) | up;
+ writel(value, padcfg1);
+
+ return 0;
}
static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
unsigned int pin, unsigned int debounce)
{
void __iomem *padcfg0, *padcfg2;
- unsigned long flags;
u32 value0, value2;
padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
@@ -795,7 +767,7 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
value0 = readl(padcfg0);
value2 = readl(padcfg2);
@@ -808,10 +780,8 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
unsigned long v;
v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
- if (v < 3 || v > 15) {
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (v < 3 || v > 15)
return -EINVAL;
- }
/* Enable glitch filter and debouncer */
value0 |= PADCFG0_PREGFRXSEL;
@@ -822,8 +792,6 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
writel(value0, padcfg0);
writel(value2, padcfg2);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
@@ -973,7 +941,6 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u32 padcfg0;
int pin;
@@ -986,20 +953,19 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
if (!reg)
return;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
padcfg0 = readl(reg);
if (value)
padcfg0 |= PADCFG0_GPIOTXSTATE;
else
padcfg0 &= ~PADCFG0_GPIOTXSTATE;
writel(padcfg0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u32 padcfg0;
int pin;
@@ -1012,9 +978,9 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- padcfg0 = readl(reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &pctrl->lock)
+ padcfg0 = readl(reg);
+
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
@@ -1026,14 +992,14 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ return pinctrl_gpio_direction_input(chip, offset);
}
static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
intel_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static const struct gpio_chip intel_gpio_chip = {
@@ -1058,15 +1024,17 @@ static void intel_gpio_irq_ack(struct irq_data *d)
pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
if (pin >= 0) {
- unsigned int gpp, gpp_offset, is_offset;
+ unsigned int gpp, gpp_offset;
+ void __iomem *is;
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
- is_offset = community->is_offset + gpp * 4;
- raw_spin_lock(&pctrl->lock);
- writel(BIT(gpp_offset), community->regs + is_offset);
- raw_spin_unlock(&pctrl->lock);
+ is = community->regs + community->is_offset + gpp * 4;
+
+ guard(raw_spinlock)(&pctrl->lock);
+
+ writel(BIT(gpp_offset), is);
}
}
@@ -1080,7 +1048,6 @@ static void intel_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwi
pin = intel_gpio_to_pin(pctrl, hwirq, &community, &padgrp);
if (pin >= 0) {
unsigned int gpp, gpp_offset;
- unsigned long flags;
void __iomem *reg, *is;
u32 value;
@@ -1090,7 +1057,7 @@ static void intel_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwi
reg = community->regs + community->ie_offset + gpp * 4;
is = community->regs + community->is_offset + gpp * 4;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
/* Clear interrupt status first to avoid unexpected interrupt */
writel(BIT(gpp_offset), is);
@@ -1101,7 +1068,6 @@ static void intel_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwi
else
value |= BIT(gpp_offset);
writel(value, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
}
@@ -1129,7 +1095,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned int pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
u32 rxevcfg, rxinv, value;
- unsigned long flags;
void __iomem *reg;
reg = intel_get_padcfg(pctrl, pin, PADCFG0);
@@ -1163,7 +1128,7 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
else
rxinv = 0;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
intel_gpio_set_gpio_mode(reg);
@@ -1179,8 +1144,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
@@ -1219,16 +1182,19 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
for (gpp = 0; gpp < community->ngpps; gpp++) {
const struct intel_padgroup *padgrp = &community->gpps[gpp];
- unsigned long pending, enabled, gpp_offset;
+ unsigned long pending, enabled;
+ unsigned int gpp, gpp_offset;
+ void __iomem *reg, *is;
- raw_spin_lock(&pctrl->lock);
+ gpp = padgrp->reg_num;
- pending = readl(community->regs + community->is_offset +
- padgrp->reg_num * 4);
- enabled = readl(community->regs + community->ie_offset +
- padgrp->reg_num * 4);
+ reg = community->regs + community->ie_offset + gpp * 4;
+ is = community->regs + community->is_offset + gpp * 4;
- raw_spin_unlock(&pctrl->lock);
+ scoped_guard(raw_spinlock, &pctrl->lock) {
+ pending = readl(is);
+ enabled = readl(reg);
+ }
/* Only interrupts that are enabled */
pending &= enabled;
@@ -1264,16 +1230,18 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
for (i = 0; i < pctrl->ncommunities; i++) {
const struct intel_community *community;
- void __iomem *base;
+ void __iomem *reg, *is;
unsigned int gpp;
community = &pctrl->communities[i];
- base = community->regs;
for (gpp = 0; gpp < community->ngpps; gpp++) {
+ reg = community->regs + community->ie_offset + gpp * 4;
+ is = community->regs + community->is_offset + gpp * 4;
+
/* Mask and clear all interrupts */
- writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + community->is_offset + gpp * 4);
+ writel(0, reg);
+ writel(0xffff, is);
}
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index cee512f97b56..2bb553598e8b 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,11 +10,11 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/compiler_types.h>
#include <linux/gpio/driver.h>
#include <linux/irq.h>
-#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/spinlock_types.h>
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index c3732a9f0658..e6878e4cf20c 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -8,14 +8,14 @@
*/
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -337,8 +337,6 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 value;
- pm_runtime_get(lg->dev);
-
raw_spin_lock_irqsave(&lg->lock, flags);
/*
@@ -373,8 +371,6 @@ static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
lp_gpio_disable_input(conf2);
raw_spin_unlock_irqrestore(&lg->lock, flags);
-
- pm_runtime_put(lg->dev);
}
static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -545,7 +541,7 @@ static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ return pinctrl_gpio_direction_input(chip, offset);
}
static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -553,7 +549,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
{
lp_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -841,24 +837,6 @@ static int lp_gpio_probe(struct platform_device *pdev)
return ret;
}
- pm_runtime_enable(dev);
-
- return 0;
-}
-
-static int lp_gpio_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static int lp_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_runtime_resume(struct device *dev)
-{
return 0;
}
@@ -876,10 +854,7 @@ static int lp_gpio_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops lp_gpio_pm_ops = {
- SYSTEM_SLEEP_PM_OPS(NULL, lp_gpio_resume)
- RUNTIME_PM_OPS(lp_gpio_runtime_suspend, lp_gpio_runtime_resume, NULL)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(lp_gpio_pm_ops, NULL, lp_gpio_resume);
static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
{ "INT33C7", (kernel_ulong_t)&lptlp_soc_data },
@@ -890,10 +865,9 @@ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
static struct platform_driver lp_gpio_driver = {
.probe = lp_gpio_probe,
- .remove = lp_gpio_remove,
.driver = {
.name = "lp_gpio",
- .pm = pm_ptr(&lp_gpio_pm_ops),
+ .pm = pm_sleep_ptr(&lp_gpio_pm_ops),
.acpi_match_table = lynxpoint_gpio_acpi_match,
},
};
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index d809680a09c9..1a556f5822b6 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -6,8 +6,8 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
+#include <linux/array_size.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/intel/pinctrl-moorefield.c b/drivers/pinctrl/intel/pinctrl-moorefield.c
index 807a694b818b..7b995fbf5c84 100644
--- a/drivers/pinctrl/intel/pinctrl-moorefield.c
+++ b/drivers/pinctrl/intel/pinctrl-moorefield.c
@@ -6,8 +6,8 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
+#include <linux/array_size.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 8649a2f9d324..c3d59eddd994 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -45,7 +45,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
struct function_desc *func;
struct group_desc *grp;
- int i;
+ int i, err;
func = pinmux_generic_get_function(pctldev, selector);
if (!func)
@@ -67,8 +67,11 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (!desc->name)
return -ENOTSUPP;
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
- pin_modes[i]);
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+ pin_modes[i]);
+
+ if (err)
+ return err;
}
return 0;
@@ -507,17 +510,12 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
}
-static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
- return pinctrl_gpio_direction_input(chip->base + gpio);
-}
-
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
mtk_gpio_set(chip, gpio, value);
- return pinctrl_gpio_direction_output(chip->base + gpio);
+ return pinctrl_gpio_direction_output(chip, gpio);
}
static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -566,7 +564,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->parent = hw->dev;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
- chip->direction_input = mtk_gpio_direction_input;
+ chip->direction_input = pinctrl_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
chip->set = mtk_gpio_set;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index 0fd2c0c451f9..7e59a4407859 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -611,6 +611,9 @@ static int mt7981_wo0_jtag_1_funcs[] = { 5, 5, 5, 5, 5, };
static int mt7981_uart2_0_pins[] = { 4, 5, 6, 7, };
static int mt7981_uart2_0_funcs[] = { 3, 3, 3, 3, };
+static int mt7981_uart2_0_tx_rx_pins[] = { 4, 5, };
+static int mt7981_uart2_0_tx_rx_funcs[] = { 3, 3, };
+
/* GBE_LED0 */
static int mt7981_gbe_led0_pins[] = { 8, };
static int mt7981_gbe_led0_funcs[] = { 3, };
@@ -731,6 +734,9 @@ static int mt7981_uart1_0_funcs[] = { 4, 4, 4, 4, };
static int mt7981_uart1_1_pins[] = { 26, 27, 28, 29, };
static int mt7981_uart1_1_funcs[] = { 2, 2, 2, 2, };
+static int mt7981_uart1_2_pins[] = { 9, 10, };
+static int mt7981_uart1_2_funcs[] = { 2, 2, };
+
/* UART2 */
static int mt7981_uart2_1_pins[] = { 22, 23, 24, 25, };
static int mt7981_uart2_1_funcs[] = { 3, 3, 3, 3, };
@@ -805,6 +811,8 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("wo0_jtag_0", mt7981_wo0_jtag_0),
/* @GPIO(4,7) WM_JTAG(3) */
PINCTRL_PIN_GROUP("uart2_0", mt7981_uart2_0),
+ /* @GPIO(4,5) WM_JTAG(4) */
+ PINCTRL_PIN_GROUP("uart2_0_tx_rx", mt7981_uart2_0_tx_rx),
/* @GPIO(8) GBE_LED0(3) */
PINCTRL_PIN_GROUP("gbe_led0", mt7981_gbe_led0),
/* @GPIO(4,6) PTA_EXT(4) */
@@ -861,6 +869,8 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("uart1_0", mt7981_uart1_0),
/* @GPIO(26,29): UART1(2) */
PINCTRL_PIN_GROUP("uart1_1", mt7981_uart1_1),
+ /* @GPIO(9,10): UART1(2) */
+ PINCTRL_PIN_GROUP("uart1_2", mt7981_uart1_2),
/* @GPIO(22,25): UART1(3) */
PINCTRL_PIN_GROUP("uart2_1", mt7981_uart2_1),
/* @GPIO(22,24) PTA_EXT(4) */
@@ -922,9 +932,9 @@ static const struct group_desc mt7981_groups[] = {
*/
static const char *mt7981_wa_aice_groups[] = { "wa_aice1", "wa_aice2", "wm_aice1_1",
"wa_aice3", "wm_aice1_2", };
-static const char *mt7981_uart_groups[] = { "wm_uart_0", "uart2_0",
- "net_wo0_uart_txd_0", "net_wo0_uart_txd_1", "net_wo0_uart_txd_2",
- "uart1_0", "uart1_1", "uart2_1", "wm_aurt_1", "wm_aurt_2", "uart0", };
+static const char *mt7981_uart_groups[] = { "net_wo0_uart_txd_0", "net_wo0_uart_txd_1",
+ "net_wo0_uart_txd_2", "uart0", "uart1_0", "uart1_1", "uart1_2", "uart2_0",
+ "uart2_0_tx_rx", "uart2_1", "wm_uart_0", "wm_aurt_1", "wm_aurt_2", };
static const char *mt7981_dfd_groups[] = { "dfd", "dfd_ntrst", };
static const char *mt7981_wdt_groups[] = { "watchdog", "watchdog1", };
static const char *mt7981_pcie_groups[] = { "pcie_pereset", "pcie_clk", "pcie_wake", };
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 74b15952b742..e79d66a04194 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -808,17 +808,11 @@ static const struct pinmux_ops mtk_pmx_ops = {
.gpio_request_enable = mtk_pmx_gpio_request_enable,
};
-static int mtk_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int mtk_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
mtk_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -898,7 +892,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get_direction = mtk_gpio_get_direction,
- .direction_input = mtk_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = mtk_gpio_direction_output,
.get = mtk_gpio_get,
.set = mtk_gpio_set,
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 33d6c3fb7908..6392f1e05d02 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -779,9 +779,7 @@ static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
desc = (const struct mtk_pin_desc *)&hw->soc->pins[grp->pin];
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, desc_func->muxval);
-
- return 0;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, desc_func->muxval);
}
static const struct pinmux_ops mtk_pmxops = {
@@ -918,7 +916,7 @@ static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
if (gpio >= hw->soc->npins)
return -EINVAL;
- return pinctrl_gpio_direction_input(chip->base + gpio);
+ return pinctrl_gpio_direction_input(chip, gpio);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
@@ -931,7 +929,7 @@ static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
mtk_gpio_set(chip, gpio, value);
- return pinctrl_gpio_direction_output(chip->base + gpio);
+ return pinctrl_gpio_direction_output(chip, gpio);
}
static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index 71fa7431df67..cc397896762c 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -73,4 +73,10 @@ config PINCTRL_AMLOGIC_C3
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_AMLOGIC_T7
+ tristate "Amlogic T7 SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 7ecddf7f683e..9e538b9ffb9b 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
obj-$(CONFIG_PINCTRL_MESON_S4) += pinctrl-meson-s4.o
obj-$(CONFIG_PINCTRL_AMLOGIC_C3) += pinctrl-amlogic-c3.o
+obj-$(CONFIG_PINCTRL_AMLOGIC_T7) += pinctrl-amlogic-t7.o
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
new file mode 100644
index 000000000000..0aed5de3f068
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic T7 SoC.
+ *
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc t7_periphs_pins[] = {
+ MESON_PIN(GPIOB_0),
+ MESON_PIN(GPIOB_1),
+ MESON_PIN(GPIOB_2),
+ MESON_PIN(GPIOB_3),
+ MESON_PIN(GPIOB_4),
+ MESON_PIN(GPIOB_5),
+ MESON_PIN(GPIOB_6),
+ MESON_PIN(GPIOB_7),
+ MESON_PIN(GPIOB_8),
+ MESON_PIN(GPIOB_9),
+ MESON_PIN(GPIOB_10),
+ MESON_PIN(GPIOB_11),
+ MESON_PIN(GPIOB_12),
+
+ MESON_PIN(GPIOC_0),
+ MESON_PIN(GPIOC_1),
+ MESON_PIN(GPIOC_2),
+ MESON_PIN(GPIOC_3),
+ MESON_PIN(GPIOC_4),
+ MESON_PIN(GPIOC_5),
+ MESON_PIN(GPIOC_6),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+
+ MESON_PIN(GPIOW_0),
+ MESON_PIN(GPIOW_1),
+ MESON_PIN(GPIOW_2),
+ MESON_PIN(GPIOW_3),
+ MESON_PIN(GPIOW_4),
+ MESON_PIN(GPIOW_5),
+ MESON_PIN(GPIOW_6),
+ MESON_PIN(GPIOW_7),
+ MESON_PIN(GPIOW_8),
+ MESON_PIN(GPIOW_9),
+ MESON_PIN(GPIOW_10),
+ MESON_PIN(GPIOW_11),
+ MESON_PIN(GPIOW_12),
+ MESON_PIN(GPIOW_13),
+ MESON_PIN(GPIOW_14),
+ MESON_PIN(GPIOW_15),
+ MESON_PIN(GPIOW_16),
+
+ MESON_PIN(GPIOD_0),
+ MESON_PIN(GPIOD_1),
+ MESON_PIN(GPIOD_2),
+ MESON_PIN(GPIOD_3),
+ MESON_PIN(GPIOD_4),
+ MESON_PIN(GPIOD_5),
+ MESON_PIN(GPIOD_6),
+ MESON_PIN(GPIOD_7),
+ MESON_PIN(GPIOD_8),
+ MESON_PIN(GPIOD_9),
+ MESON_PIN(GPIOD_10),
+ MESON_PIN(GPIOD_11),
+ MESON_PIN(GPIOD_12),
+
+ MESON_PIN(GPIOE_0),
+ MESON_PIN(GPIOE_1),
+ MESON_PIN(GPIOE_2),
+ MESON_PIN(GPIOE_3),
+ MESON_PIN(GPIOE_4),
+ MESON_PIN(GPIOE_5),
+ MESON_PIN(GPIOE_6),
+
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+
+ MESON_PIN(GPIOT_0),
+ MESON_PIN(GPIOT_1),
+ MESON_PIN(GPIOT_2),
+ MESON_PIN(GPIOT_3),
+ MESON_PIN(GPIOT_4),
+ MESON_PIN(GPIOT_5),
+ MESON_PIN(GPIOT_6),
+ MESON_PIN(GPIOT_7),
+ MESON_PIN(GPIOT_8),
+ MESON_PIN(GPIOT_9),
+ MESON_PIN(GPIOT_10),
+ MESON_PIN(GPIOT_11),
+ MESON_PIN(GPIOT_12),
+ MESON_PIN(GPIOT_13),
+ MESON_PIN(GPIOT_14),
+ MESON_PIN(GPIOT_15),
+ MESON_PIN(GPIOT_16),
+ MESON_PIN(GPIOT_17),
+ MESON_PIN(GPIOT_18),
+ MESON_PIN(GPIOT_19),
+ MESON_PIN(GPIOT_20),
+ MESON_PIN(GPIOT_21),
+ MESON_PIN(GPIOT_22),
+ MESON_PIN(GPIOT_23),
+
+ MESON_PIN(GPIOM_0),
+ MESON_PIN(GPIOM_1),
+ MESON_PIN(GPIOM_2),
+ MESON_PIN(GPIOM_3),
+ MESON_PIN(GPIOM_4),
+ MESON_PIN(GPIOM_5),
+ MESON_PIN(GPIOM_6),
+ MESON_PIN(GPIOM_7),
+ MESON_PIN(GPIOM_8),
+ MESON_PIN(GPIOM_9),
+ MESON_PIN(GPIOM_10),
+ MESON_PIN(GPIOM_11),
+ MESON_PIN(GPIOM_12),
+ MESON_PIN(GPIOM_13),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+ MESON_PIN(GPIOY_17),
+ MESON_PIN(GPIOY_18),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+/* Bank B func1 */
+static const unsigned int emmc_nand_d0_pins[] = { GPIOB_0 };
+static const unsigned int emmc_nand_d1_pins[] = { GPIOB_1 };
+static const unsigned int emmc_nand_d2_pins[] = { GPIOB_2 };
+static const unsigned int emmc_nand_d3_pins[] = { GPIOB_3 };
+static const unsigned int emmc_nand_d4_pins[] = { GPIOB_4 };
+static const unsigned int emmc_nand_d5_pins[] = { GPIOB_5 };
+static const unsigned int emmc_nand_d6_pins[] = { GPIOB_6 };
+static const unsigned int emmc_nand_d7_pins[] = { GPIOB_7 };
+static const unsigned int emmc_clk_pins[] = { GPIOB_8 };
+static const unsigned int emmc_cmd_pins[] = { GPIOB_10 };
+static const unsigned int emmc_nand_ds_pins[] = { GPIOB_11 };
+
+/* Bank B func2 */
+static const unsigned int nor_hold_pins[] = { GPIOB_3 };
+static const unsigned int nor_d_pins[] = { GPIOB_4 };
+static const unsigned int nor_q_pins[] = { GPIOB_5 };
+static const unsigned int nor_c_pins[] = { GPIOB_6 };
+static const unsigned int nor_wp_pins[] = { GPIOB_7 };
+static const unsigned int nor_cs_pins[] = { GPIOB_12 };
+
+/* Bank C func1 */
+static const unsigned int sdcard_d0_pins[] = { GPIOC_0 };
+static const unsigned int sdcard_d1_pins[] = { GPIOC_1 };
+static const unsigned int sdcard_d2_pins[] = { GPIOC_2 };
+static const unsigned int sdcard_d3_pins[] = { GPIOC_3 };
+static const unsigned int sdcard_clk_pins[] = { GPIOC_4 };
+static const unsigned int sdcard_cmd_pins[] = { GPIOC_5 };
+static const unsigned int gen_clk_out_c_pins[] = { GPIOC_6 };
+
+/* Bank C func2 */
+static const unsigned int jtag_b_tdo_pins[] = { GPIOC_0 };
+static const unsigned int jtag_b_tdi_pins[] = { GPIOC_1 };
+static const unsigned int uart_ao_a_rx_c_pins[] = { GPIOC_2 };
+static const unsigned int uart_ao_a_tx_c_pins[] = { GPIOC_3 };
+static const unsigned int jtag_b_clk_pins[] = { GPIOC_4 };
+static const unsigned int jtag_b_tms_pins[] = { GPIOC_5 };
+
+/* Bank C func3 */
+static const unsigned int spi1_mosi_c_pins[] = { GPIOC_0 };
+static const unsigned int spi1_miso_c_pins[] = { GPIOC_1 };
+static const unsigned int spi1_sclk_c_pins[] = { GPIOC_2 };
+static const unsigned int spi1_ss0_c_pins[] = { GPIOC_3 };
+
+/* Bank X func1 */
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_5 };
+static const unsigned int pwm_b_pins[] = { GPIOX_6 };
+static const unsigned int pwm_c_pins[] = { GPIOX_7 };
+static const unsigned int tdm_d0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_d1_pins[] = { GPIOX_9 };
+static const unsigned int tdm_fs0_pins[] = { GPIOX_10 };
+static const unsigned int tdm_sclk0_pins[] = { GPIOX_11 };
+static const unsigned int uart_c_tx_pins[] = { GPIOX_12 };
+static const unsigned int uart_c_rx_pins[] = { GPIOX_13 };
+static const unsigned int uart_c_cts_pins[] = { GPIOX_14 };
+static const unsigned int uart_c_rts_pins[] = { GPIOX_15 };
+static const unsigned int pwm_a_pins[] = { GPIOX_16 };
+static const unsigned int i2c2_sda_x_pins[] = { GPIOX_17 };
+static const unsigned int i2c2_sck_x_pins[] = { GPIOX_18 };
+static const unsigned int pwm_d_pins[] = { GPIOX_19 };
+
+/* Bank X func2 */
+static const unsigned int clk12_24_x_pins[] = { GPIOX_14 };
+
+/* Bank W func1 */
+static const unsigned int hdmirx_a_hpd_pins[] = { GPIOW_0 };
+static const unsigned int hdmirx_a_det_pins[] = { GPIOW_1 };
+static const unsigned int hdmirx_a_sda_pins[] = { GPIOW_2 };
+static const unsigned int hdmirx_a_sck_pins[] = { GPIOW_3 };
+static const unsigned int hdmirx_c_hpd_pins[] = { GPIOW_4 };
+static const unsigned int hdmirx_c_det_pins[] = { GPIOW_5 };
+static const unsigned int hdmirx_c_sda_pins[] = { GPIOW_6 };
+static const unsigned int hdmirx_c_sck_pins[] = { GPIOW_7 };
+static const unsigned int hdmirx_b_hpd_pins[] = { GPIOW_8 };
+static const unsigned int hdmirx_b_det_pins[] = { GPIOW_9 };
+static const unsigned int hdmirx_b_sda_pins[] = { GPIOW_10 };
+static const unsigned int hdmirx_b_sck_pins[] = { GPIOW_11 };
+static const unsigned int cec_a_pins[] = { GPIOW_12 };
+static const unsigned int hdmitx_sda_w13_pins[] = { GPIOW_13 };
+static const unsigned int hdmitx_sck_w14_pins[] = { GPIOW_14 };
+static const unsigned int hdmitx_hpd_in_pins[] = { GPIOW_15 };
+static const unsigned int cec_b_pins[] = { GPIOW_16 };
+
+/* Bank W func2 */
+static const unsigned int uart_ao_a_tx_w2_pins[] = { GPIOW_2 };
+static const unsigned int uart_ao_a_rx_w3_pins[] = { GPIOW_3 };
+static const unsigned int uart_ao_a_tx_w6_pins[] = { GPIOW_6 };
+static const unsigned int uart_ao_a_rx_w7_pins[] = { GPIOW_7 };
+static const unsigned int uart_ao_a_tx_w10_pins[] = { GPIOW_10 };
+static const unsigned int uart_ao_a_rx_w11_pins[] = { GPIOW_11 };
+
+/* Bank W func3 */
+static const unsigned int hdmitx_sda_w2_pins[] = { GPIOW_2 };
+static const unsigned int hdmitx_sck_w3_pins[] = { GPIOW_3 };
+
+/* Bank D func1 */
+static const unsigned int uart_ao_a_tx_d0_pins[] = { GPIOD_0 };
+static const unsigned int uart_ao_a_rx_d1_pins[] = { GPIOD_1 };
+static const unsigned int i2c0_ao_sck_d_pins[] = { GPIOD_2 };
+static const unsigned int i2c0_ao_sda_d_pins[] = { GPIOD_3 };
+static const unsigned int remote_out_d4_pins[] = { GPIOD_4 };
+static const unsigned int remote_in_pins[] = { GPIOD_5 };
+static const unsigned int jtag_a_clk_pins[] = { GPIOD_6 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOD_7 };
+static const unsigned int jtag_a_tdi_pins[] = { GPIOD_8 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOD_9 };
+static const unsigned int gen_clk_out_d_pins[] = { GPIOD_10 };
+static const unsigned int pwm_ao_g_d11_pins[] = { GPIOD_11 };
+static const unsigned int wd_rsto_pins[] = { GPIOD_12 };
+
+/* Bank D func2 */
+static const unsigned int i2c0_slave_ao_sck_pins[] = { GPIOD_2 };
+static const unsigned int i2c0_slave_ao_sda_pins[] = { GPIOD_3 };
+static const unsigned int rtc_clk_in_pins[] = { GPIOD_4 };
+static const unsigned int pwm_ao_h_d5_pins[] = { GPIOD_5 };
+static const unsigned int pwm_ao_c_d_pins[] = { GPIOD_6 };
+static const unsigned int pwm_ao_g_d7_pins[] = { GPIOD_7 };
+static const unsigned int spdif_out_d_pins[] = { GPIOD_8 };
+static const unsigned int spdif_in_d_pins[] = { GPIOD_9 };
+static const unsigned int pwm_ao_h_d10_pins[] = { GPIOD_10 };
+
+/* Bank D func3 */
+static const unsigned int uart_ao_b_tx_pins[] = { GPIOD_2 };
+static const unsigned int uart_ao_b_rx_pins[] = { GPIOD_3 };
+static const unsigned int uart_ao_b_cts_pins[] = { GPIOD_4 };
+static const unsigned int pwm_ao_c_hiz_pins[] = { GPIOD_6 };
+static const unsigned int pwm_ao_g_hiz_pins[] = { GPIOD_7 };
+static const unsigned int uart_ao_b_rts_pins[] = { GPIOD_10 };
+
+/* Bank D func4 */
+static const unsigned int remote_out_d6_pins[] = { GPIOD_6 };
+
+/* Bank E func1 */
+static const unsigned int pwm_ao_a_pins[] = { GPIOE_0 };
+static const unsigned int pwm_ao_b_pins[] = { GPIOE_1 };
+static const unsigned int pwm_ao_c_e_pins[] = { GPIOE_2 };
+static const unsigned int pwm_ao_d_pins[] = { GPIOE_3 };
+static const unsigned int pwm_ao_e_pins[] = { GPIOE_4 };
+static const unsigned int pwm_ao_f_pins[] = { GPIOE_5 };
+static const unsigned int pwm_ao_g_e_pins[] = { GPIOE_6 };
+
+/* Bank E func2 */
+static const unsigned int i2c0_ao_sck_e_pins[] = { GPIOE_0 };
+static const unsigned int i2c0_ao_sda_e_pins[] = { GPIOE_1 };
+static const unsigned int clk25m_pins[] = { GPIOE_2 };
+static const unsigned int i2c1_ao_sck_pins[] = { GPIOE_3 };
+static const unsigned int i2c1_ao_sda_pins[] = { GPIOE_4 };
+static const unsigned int rtc_clk_out_pins[] = { GPIOD_5 };
+
+/* Bank E func3 */
+static const unsigned int clk12_24_e_pins[] = { GPIOE_4 };
+
+/* Bank Z func1 */
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_rgmii_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_rgmii_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_rgmii_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_txen_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_rgmii_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_rgmii_pins[] = { GPIOZ_13 };
+
+/* Bank Z func2 */
+static const unsigned int iso7816_clk_z_pins[] = { GPIOZ_0 };
+static const unsigned int iso7816_data_z_pins[] = { GPIOZ_1 };
+static const unsigned int tsin_b_valid_pins[] = { GPIOZ_2 };
+static const unsigned int tsin_b_sop_pins[] = { GPIOZ_3 };
+static const unsigned int tsin_b_din0_pins[] = { GPIOZ_4 };
+static const unsigned int tsin_b_clk_pins[] = { GPIOZ_5 };
+static const unsigned int tsin_b_fail_pins[] = { GPIOZ_6 };
+static const unsigned int tsin_b_din1_pins[] = { GPIOZ_7 };
+static const unsigned int tsin_b_din2_pins[] = { GPIOZ_8 };
+static const unsigned int tsin_b_din3_pins[] = { GPIOZ_9 };
+static const unsigned int tsin_b_din4_pins[] = { GPIOZ_10 };
+static const unsigned int tsin_b_din5_pins[] = { GPIOZ_11 };
+static const unsigned int tsin_b_din6_pins[] = { GPIOZ_12 };
+static const unsigned int tsin_b_din7_pins[] = { GPIOZ_13 };
+
+/* Bank Z func3 */
+static const unsigned int tsin_c_z_valid_pins[] = { GPIOZ_6 };
+static const unsigned int tsin_c_z_sop_pins[] = { GPIOZ_7 };
+static const unsigned int tsin_c_z_din0_pins[] = { GPIOZ_8 };
+static const unsigned int tsin_c_z_clk_pins[] = { GPIOZ_9 };
+static const unsigned int tsin_d_z_valid_pins[] = { GPIOZ_10 };
+static const unsigned int tsin_d_z_sop_pins[] = { GPIOZ_11 };
+static const unsigned int tsin_d_z_din0_pins[] = { GPIOZ_12 };
+static const unsigned int tsin_d_z_clk_pins[] = { GPIOZ_13 };
+
+/* Bank Z func4 */
+static const unsigned int spi4_mosi_pins[] = { GPIOZ_0 };
+static const unsigned int spi4_miso_pins[] = { GPIOZ_1 };
+static const unsigned int spi4_sclk_pins[] = { GPIOZ_2 };
+static const unsigned int spi4_ss0_pins[] = { GPIOZ_3 };
+static const unsigned int spi5_mosi_pins[] = { GPIOZ_4 };
+static const unsigned int spi5_miso_pins[] = { GPIOZ_5 };
+static const unsigned int spi5_sclk_pins[] = { GPIOZ_6 };
+static const unsigned int spi5_ss0_pins[] = { GPIOZ_7 };
+
+/* Bank T func1 */
+static const unsigned int mclk1_pins[] = { GPIOT_0 };
+static const unsigned int tdm_sclk1_pins[] = { GPIOT_1 };
+static const unsigned int tdm_fs1_pins[] = { GPIOT_2 };
+static const unsigned int tdm_d2_pins[] = { GPIOT_3 };
+static const unsigned int tdm_d3_pins[] = { GPIOT_4 };
+static const unsigned int tdm_d4_pins[] = { GPIOT_5 };
+static const unsigned int tdm_d5_pins[] = { GPIOT_6 };
+static const unsigned int tdm_d6_pins[] = { GPIOT_7 };
+static const unsigned int tdm_d7_pins[] = { GPIOT_8 };
+static const unsigned int tdm_d8_pins[] = { GPIOT_9 };
+static const unsigned int tdm_d9_pins[] = { GPIOT_10 };
+static const unsigned int tdm_d10_pins[] = { GPIOT_11 };
+static const unsigned int tdm_d11_pins[] = { GPIOT_12 };
+static const unsigned int mclk2_pins[] = { GPIOT_13 };
+static const unsigned int tdm_sclk2_pins[] = { GPIOT_14 };
+static const unsigned int tdm_fs2_pins[] = { GPIOT_15 };
+static const unsigned int i2c1_sck_pins[] = { GPIOT_16 };
+static const unsigned int i2c1_sda_pins[] = { GPIOT_17 };
+static const unsigned int spi0_mosi_pins[] = { GPIOT_18 };
+static const unsigned int spi0_miso_pins[] = { GPIOT_19 };
+static const unsigned int spi0_sclk_pins[] = { GPIOT_20 };
+static const unsigned int spi0_ss0_pins[] = { GPIOT_21 };
+static const unsigned int spi0_ss1_pins[] = { GPIOT_22 };
+static const unsigned int spi0_ss2_pins[] = { GPIOT_23 };
+
+/* Bank T func2 */
+static const unsigned int spdif_in_t_pins[] = { GPIOT_3 };
+static const unsigned int spdif_out_t_pins[] = { GPIOT_4 };
+static const unsigned int iso7816_clk_t_pins[] = { GPIOT_5 };
+static const unsigned int iso7816_data_t_pins[] = { GPIOT_6 };
+static const unsigned int tsin_a_sop_t_pins[] = { GPIOT_7 };
+static const unsigned int tsin_a_din0_t_pins[] = { GPIOT_8 };
+static const unsigned int tsin_a_clk_t_pins[] = { GPIOT_9 };
+static const unsigned int tsin_a_valid_t_pins[] = { GPIOT_10 };
+static const unsigned int i2c0_sck_t_pins[] = { GPIOT_20 };
+static const unsigned int i2c0_sda_t_pins[] = { GPIOT_21 };
+static const unsigned int i2c2_sck_t_pins[] = { GPIOT_22 };
+static const unsigned int i2c2_sda_t_pins[] = { GPIOT_23 };
+
+/* Bank T func3 */
+static const unsigned int spi3_mosi_pins[] = { GPIOT_6 };
+static const unsigned int spi3_miso_pins[] = { GPIOT_7 };
+static const unsigned int spi3_sclk_pins[] = { GPIOT_8 };
+static const unsigned int spi3_ss0_pins[] = { GPIOT_9 };
+
+/* Bank M func1 */
+static const unsigned int tdm_d12_pins[] = { GPIOM_0 };
+static const unsigned int tdm_d13_pins[] = { GPIOM_1 };
+static const unsigned int tdm_d14_pins[] = { GPIOM_2 };
+static const unsigned int tdm_d15_pins[] = { GPIOM_3 };
+static const unsigned int tdm_sclk3_pins[] = { GPIOM_4 };
+static const unsigned int tdm_fs3_pins[] = { GPIOM_5 };
+static const unsigned int i2c3_sda_m_pins[] = { GPIOM_6 };
+static const unsigned int i2c3_sck_m_pins[] = { GPIOM_7 };
+static const unsigned int spi1_mosi_m_pins[] = { GPIOM_8 };
+static const unsigned int spi1_miso_m_pins[] = { GPIOM_9 };
+static const unsigned int spi1_sclk_m_pins[] = { GPIOM_10 };
+static const unsigned int spi1_ss0_m_pins[] = { GPIOM_11 };
+static const unsigned int spi1_ss1_m_pins[] = { GPIOM_12 };
+static const unsigned int spi1_ss2_m_pins[] = { GPIOM_13 };
+
+/* Bank M func2 */
+static const unsigned int pdm_din1_m0_pins[] = { GPIOM_0 };
+static const unsigned int pdm_din2_pins[] = { GPIOM_1 };
+static const unsigned int pdm_din3_pins[] = { GPIOM_2 };
+static const unsigned int pdm_dclk_pins[] = { GPIOM_3 };
+static const unsigned int pdm_din0_pins[] = { GPIOM_4 };
+static const unsigned int pdm_din1_m5_pins[] = { GPIOM_5 };
+static const unsigned int uart_d_tx_m_pins[] = { GPIOM_8 };
+static const unsigned int uart_d_rx_m_pins[] = { GPIOM_9 };
+static const unsigned int uart_d_cts_m_pins[] = { GPIOM_10 };
+static const unsigned int uart_d_rts_m_pins[] = { GPIOM_11 };
+static const unsigned int i2c2_sda_m_pins[] = { GPIOM_12 };
+static const unsigned int i2c2_sck_m_pins[] = { GPIOM_13 };
+
+/* Bank Y func1 */
+static const unsigned int spi2_mosi_pins[] = { GPIOY_0 };
+static const unsigned int spi2_miso_pins[] = { GPIOY_1 };
+static const unsigned int spi2_sclk_pins[] = { GPIOY_2 };
+static const unsigned int spi2_ss0_pins[] = { GPIOY_3 };
+static const unsigned int spi2_ss1_pins[] = { GPIOY_4 };
+static const unsigned int spi2_ss2_pins[] = { GPIOY_5 };
+static const unsigned int uart_e_tx_pins[] = { GPIOY_6 };
+static const unsigned int uart_e_rx_pins[] = { GPIOY_7 };
+static const unsigned int uart_e_cts_pins[] = { GPIOY_8 };
+static const unsigned int uart_e_rts_pins[] = { GPIOY_9 };
+static const unsigned int uart_d_cts_y_pins[] = { GPIOY_10 };
+static const unsigned int uart_d_rts_y_pins[] = { GPIOY_11 };
+static const unsigned int uart_d_tx_y_pins[] = { GPIOY_12 };
+static const unsigned int uart_d_rx_y_pins[] = { GPIOY_13 };
+static const unsigned int i2c4_sck_y_pins[] = { GPIOY_15 };
+static const unsigned int i2c4_sda_y_pins[] = { GPIOY_16 };
+static const unsigned int i2c5_sck_pins[] = { GPIOY_17 };
+static const unsigned int i2c5_sda_pins[] = { GPIOY_18 };
+
+/* Bank Y func2 */
+static const unsigned int tsin_c_y_sop_pins[] = { GPIOY_4 };
+static const unsigned int tsin_c_y_din0_pins[] = { GPIOY_5 };
+static const unsigned int tsin_c_y_clk_pins[] = { GPIOY_6 };
+static const unsigned int tsin_c_y_valid_pins[] = { GPIOY_7 };
+static const unsigned int tsin_d_y_sop_pins[] = { GPIOY_8 };
+static const unsigned int tsin_d_y_din0_pins[] = { GPIOY_9 };
+static const unsigned int tsin_d_y_clk_pins[] = { GPIOY_10 };
+static const unsigned int tsin_d_y_valid_pins[] = { GPIOY_11 };
+static const unsigned int pcieck_reqn_y_pins[] = { GPIOY_18 };
+
+/* Bank Y func3 */
+static const unsigned int pwm_e_pins[] = { GPIOY_1 };
+static const unsigned int hsync_pins[] = { GPIOY_4 };
+static const unsigned int vsync_pins[] = { GPIOY_5 };
+static const unsigned int pwm_f_pins[] = { GPIOY_8 };
+static const unsigned int sync_3d_out_pins[] = { GPIOY_9 };
+static const unsigned int vx1_a_htpdn_pins[] = { GPIOY_10 };
+static const unsigned int vx1_b_htpdn_pins[] = { GPIOY_11 };
+static const unsigned int vx1_a_lockn_pins[] = { GPIOY_12 };
+static const unsigned int vx1_b_lockn_pins[] = { GPIOY_13 };
+static const unsigned int pwm_vs_y_pins[] = { GPIOY_14 };
+
+/* Bank Y func4 */
+static const unsigned int edp_a_hpd_pins[] = { GPIOY_10 };
+static const unsigned int edp_b_hpd_pins[] = { GPIOY_11 };
+
+/* Bank H func1 */
+static const unsigned int mic_mute_key_pins[] = { GPIOH_0 };
+static const unsigned int mic_mute_led_pins[] = { GPIOH_1 };
+static const unsigned int i2c3_sck_h_pins[] = { GPIOH_2 };
+static const unsigned int i2c3_sda_h_pins[] = { GPIOH_3 };
+static const unsigned int i2c4_sck_h_pins[] = { GPIOH_4 };
+static const unsigned int i2c4_sda_h_pins[] = { GPIOH_5 };
+static const unsigned int eth_link_led_pins[] = { GPIOH_6 };
+static const unsigned int eth_act_led_pins[] = { GPIOH_7 };
+
+/* Bank H func2 */
+static const unsigned int pwm_vs_h_pins[] = { GPIOH_1 };
+static const unsigned int uart_f_tx_pins[] = { GPIOH_2 };
+static const unsigned int uart_f_rx_pins[] = { GPIOH_3 };
+static const unsigned int uart_f_cts_pins[] = { GPIOH_4 };
+static const unsigned int uart_f_rts_pins[] = { GPIOH_5 };
+static const unsigned int i2c0_sda_h_pins[] = { GPIOH_6 };
+static const unsigned int i2c0_sck_h_pins[] = { GPIOH_7 };
+
+/* Bank H func3 */
+static const unsigned int pcieck_reqn_h_pins[] = { GPIOH_2 };
+
+static struct meson_pmx_group t7_periphs_groups[] = {
+ GPIO_GROUP(GPIOB_0),
+ GPIO_GROUP(GPIOB_1),
+ GPIO_GROUP(GPIOB_2),
+ GPIO_GROUP(GPIOB_3),
+ GPIO_GROUP(GPIOB_4),
+ GPIO_GROUP(GPIOB_5),
+ GPIO_GROUP(GPIOB_6),
+ GPIO_GROUP(GPIOB_7),
+ GPIO_GROUP(GPIOB_8),
+ GPIO_GROUP(GPIOB_9),
+ GPIO_GROUP(GPIOB_10),
+ GPIO_GROUP(GPIOB_11),
+ GPIO_GROUP(GPIOB_12),
+
+ GPIO_GROUP(GPIOC_0),
+ GPIO_GROUP(GPIOC_1),
+ GPIO_GROUP(GPIOC_2),
+ GPIO_GROUP(GPIOC_3),
+ GPIO_GROUP(GPIOC_4),
+ GPIO_GROUP(GPIOC_5),
+ GPIO_GROUP(GPIOC_6),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+
+ GPIO_GROUP(GPIOW_0),
+ GPIO_GROUP(GPIOW_1),
+ GPIO_GROUP(GPIOW_2),
+ GPIO_GROUP(GPIOW_3),
+ GPIO_GROUP(GPIOW_4),
+ GPIO_GROUP(GPIOW_5),
+ GPIO_GROUP(GPIOW_6),
+ GPIO_GROUP(GPIOW_7),
+ GPIO_GROUP(GPIOW_8),
+ GPIO_GROUP(GPIOW_9),
+ GPIO_GROUP(GPIOW_10),
+ GPIO_GROUP(GPIOW_11),
+ GPIO_GROUP(GPIOW_12),
+ GPIO_GROUP(GPIOW_13),
+ GPIO_GROUP(GPIOW_14),
+ GPIO_GROUP(GPIOW_15),
+ GPIO_GROUP(GPIOW_16),
+
+ GPIO_GROUP(GPIOD_0),
+ GPIO_GROUP(GPIOD_1),
+ GPIO_GROUP(GPIOD_2),
+ GPIO_GROUP(GPIOD_3),
+ GPIO_GROUP(GPIOD_4),
+ GPIO_GROUP(GPIOD_5),
+ GPIO_GROUP(GPIOD_6),
+ GPIO_GROUP(GPIOD_7),
+ GPIO_GROUP(GPIOD_8),
+ GPIO_GROUP(GPIOD_9),
+ GPIO_GROUP(GPIOD_10),
+ GPIO_GROUP(GPIOD_11),
+ GPIO_GROUP(GPIOD_12),
+
+ GPIO_GROUP(GPIOE_0),
+ GPIO_GROUP(GPIOE_1),
+ GPIO_GROUP(GPIOE_2),
+ GPIO_GROUP(GPIOE_3),
+ GPIO_GROUP(GPIOE_4),
+ GPIO_GROUP(GPIOE_5),
+ GPIO_GROUP(GPIOE_6),
+
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+
+ GPIO_GROUP(GPIOT_0),
+ GPIO_GROUP(GPIOT_1),
+ GPIO_GROUP(GPIOT_2),
+ GPIO_GROUP(GPIOT_3),
+ GPIO_GROUP(GPIOT_4),
+ GPIO_GROUP(GPIOT_5),
+ GPIO_GROUP(GPIOT_6),
+ GPIO_GROUP(GPIOT_7),
+ GPIO_GROUP(GPIOT_8),
+ GPIO_GROUP(GPIOT_9),
+ GPIO_GROUP(GPIOT_10),
+ GPIO_GROUP(GPIOT_11),
+ GPIO_GROUP(GPIOT_12),
+ GPIO_GROUP(GPIOT_13),
+ GPIO_GROUP(GPIOT_14),
+ GPIO_GROUP(GPIOT_15),
+ GPIO_GROUP(GPIOT_16),
+ GPIO_GROUP(GPIOT_17),
+ GPIO_GROUP(GPIOT_18),
+ GPIO_GROUP(GPIOT_19),
+ GPIO_GROUP(GPIOT_20),
+ GPIO_GROUP(GPIOT_21),
+ GPIO_GROUP(GPIOT_22),
+ GPIO_GROUP(GPIOT_23),
+
+ GPIO_GROUP(GPIOM_0),
+ GPIO_GROUP(GPIOM_1),
+ GPIO_GROUP(GPIOM_2),
+ GPIO_GROUP(GPIOM_3),
+ GPIO_GROUP(GPIOM_4),
+ GPIO_GROUP(GPIOM_5),
+ GPIO_GROUP(GPIOM_6),
+ GPIO_GROUP(GPIOM_7),
+ GPIO_GROUP(GPIOM_8),
+ GPIO_GROUP(GPIOM_9),
+ GPIO_GROUP(GPIOM_10),
+ GPIO_GROUP(GPIOM_11),
+ GPIO_GROUP(GPIOM_12),
+ GPIO_GROUP(GPIOM_13),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+ GPIO_GROUP(GPIOY_17),
+ GPIO_GROUP(GPIOY_18),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIO_TEST_N),
+
+ /* Bank B func1 */
+ GROUP(emmc_nand_d0, 1),
+ GROUP(emmc_nand_d1, 1),
+ GROUP(emmc_nand_d2, 1),
+ GROUP(emmc_nand_d3, 1),
+ GROUP(emmc_nand_d4, 1),
+ GROUP(emmc_nand_d5, 1),
+ GROUP(emmc_nand_d6, 1),
+ GROUP(emmc_nand_d7, 1),
+ GROUP(emmc_clk, 1),
+ GROUP(emmc_cmd, 1),
+ GROUP(emmc_nand_ds, 1),
+
+ /* Bank B func1 */
+ GROUP(nor_hold, 2),
+ GROUP(nor_d, 2),
+ GROUP(nor_q, 2),
+ GROUP(nor_c, 2),
+ GROUP(nor_wp, 2),
+ GROUP(nor_cs, 2),
+
+ /* Bank C func1 */
+ GROUP(sdcard_d0, 1),
+ GROUP(sdcard_d1, 1),
+ GROUP(sdcard_d2, 1),
+ GROUP(sdcard_d3, 1),
+ GROUP(sdcard_clk, 1),
+ GROUP(sdcard_cmd, 1),
+ GROUP(gen_clk_out_c, 1),
+
+ /* Bank C func2 */
+ GROUP(jtag_b_tdo, 2),
+ GROUP(jtag_b_tdi, 2),
+ GROUP(uart_ao_a_rx_c, 2),
+ GROUP(uart_ao_a_tx_c, 2),
+ GROUP(jtag_b_clk, 2),
+ GROUP(jtag_b_tms, 2),
+
+ /* Bank C func3 */
+ GROUP(spi1_mosi_c, 3),
+ GROUP(spi1_miso_c, 3),
+ GROUP(spi1_sclk_c, 3),
+ GROUP(spi1_ss0_c, 3),
+
+ /* Bank X func1 */
+ GROUP(sdio_d0, 1),
+ GROUP(sdio_d1, 1),
+ GROUP(sdio_d2, 1),
+ GROUP(sdio_d3, 1),
+ GROUP(sdio_clk, 1),
+ GROUP(sdio_cmd, 1),
+ GROUP(pwm_b, 1),
+ GROUP(pwm_c, 1),
+ GROUP(tdm_d0, 1),
+ GROUP(tdm_d1, 1),
+ GROUP(tdm_fs0, 1),
+ GROUP(tdm_sclk0, 1),
+ GROUP(uart_c_tx, 1),
+ GROUP(uart_c_rx, 1),
+ GROUP(uart_c_cts, 1),
+ GROUP(uart_c_rts, 1),
+ GROUP(pwm_a, 1),
+ GROUP(i2c2_sda_x, 1),
+ GROUP(i2c2_sck_x, 1),
+ GROUP(pwm_d, 1),
+
+ /* Bank X func2 */
+ GROUP(clk12_24_x, 2),
+
+ /* Bank W func1 */
+ GROUP(hdmirx_a_hpd, 1),
+ GROUP(hdmirx_a_det, 1),
+ GROUP(hdmirx_a_sda, 1),
+ GROUP(hdmirx_a_sck, 1),
+ GROUP(hdmirx_c_hpd, 1),
+ GROUP(hdmirx_c_det, 1),
+ GROUP(hdmirx_c_sda, 1),
+ GROUP(hdmirx_c_sck, 1),
+ GROUP(hdmirx_b_hpd, 1),
+ GROUP(hdmirx_b_det, 1),
+ GROUP(hdmirx_b_sda, 1),
+ GROUP(hdmirx_b_sck, 1),
+ GROUP(cec_a, 1),
+ GROUP(hdmitx_sda_w13, 1),
+ GROUP(hdmitx_sck_w14, 1),
+ GROUP(hdmitx_hpd_in, 1),
+ GROUP(cec_b, 1),
+
+ /* Bank W func2 */
+ GROUP(uart_ao_a_tx_w2, 2),
+ GROUP(uart_ao_a_rx_w3, 2),
+ GROUP(uart_ao_a_tx_w6, 2),
+ GROUP(uart_ao_a_rx_w7, 2),
+ GROUP(uart_ao_a_tx_w10, 2),
+ GROUP(uart_ao_a_rx_w11, 2),
+
+ /* Bank W func3 */
+ GROUP(hdmitx_sda_w2, 3),
+ GROUP(hdmitx_sck_w3, 3),
+
+ /* Bank D func1 */
+ GROUP(uart_ao_a_tx_d0, 1),
+ GROUP(uart_ao_a_rx_d1, 1),
+ GROUP(i2c0_ao_sck_d, 1),
+ GROUP(i2c0_ao_sda_d, 1),
+ GROUP(remote_out_d4, 1),
+ GROUP(remote_in, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(gen_clk_out_d, 1),
+ GROUP(pwm_ao_g_d11, 1),
+ GROUP(wd_rsto, 1),
+
+ /* Bank D func2 */
+ GROUP(i2c0_slave_ao_sck, 2),
+ GROUP(i2c0_slave_ao_sda, 2),
+ GROUP(rtc_clk_in, 2),
+ GROUP(pwm_ao_h_d5, 2),
+ GROUP(pwm_ao_c_d, 2),
+ GROUP(pwm_ao_g_d7, 2),
+ GROUP(spdif_out_d, 2),
+ GROUP(spdif_in_d, 2),
+ GROUP(pwm_ao_h_d10, 2),
+
+ /* Bank D func3 */
+ GROUP(uart_ao_b_tx, 3),
+ GROUP(uart_ao_b_rx, 3),
+ GROUP(uart_ao_b_cts, 3),
+ GROUP(pwm_ao_c_hiz, 3),
+ GROUP(pwm_ao_g_hiz, 3),
+ GROUP(uart_ao_b_rts, 3),
+
+ /* Bank D func4 */
+ GROUP(remote_out_d6, 4),
+
+ /* Bank E func1 */
+ GROUP(pwm_ao_a, 1),
+ GROUP(pwm_ao_b, 1),
+ GROUP(pwm_ao_c_e, 1),
+ GROUP(pwm_ao_d, 1),
+ GROUP(pwm_ao_e, 1),
+ GROUP(pwm_ao_f, 1),
+ GROUP(pwm_ao_g_e, 1),
+
+ /* Bank E func2 */
+ GROUP(i2c0_ao_sck_e, 2),
+ GROUP(i2c0_ao_sda_e, 2),
+ GROUP(clk25m, 2),
+ GROUP(i2c1_ao_sck, 2),
+ GROUP(i2c1_ao_sda, 2),
+ GROUP(rtc_clk_out, 2),
+
+ /* Bank E func3 */
+ GROUP(clk12_24_e, 3),
+
+ /* Bank Z func1 */
+ GROUP(eth_mdio, 1),
+ GROUP(eth_mdc, 1),
+ GROUP(eth_rgmii_rx_clk, 1),
+ GROUP(eth_rx_dv, 1),
+ GROUP(eth_rxd0, 1),
+ GROUP(eth_rxd1, 1),
+ GROUP(eth_rxd2_rgmii, 1),
+ GROUP(eth_rxd3_rgmii, 1),
+ GROUP(eth_rgmii_tx_clk, 1),
+ GROUP(eth_txen, 1),
+ GROUP(eth_txd0, 1),
+ GROUP(eth_txd1, 1),
+ GROUP(eth_txd2_rgmii, 1),
+ GROUP(eth_txd3_rgmii, 1),
+
+ /* Bank Z func2 */
+ GROUP(iso7816_clk_z, 2),
+ GROUP(iso7816_data_z, 2),
+ GROUP(tsin_b_valid, 2),
+ GROUP(tsin_b_sop, 2),
+ GROUP(tsin_b_din0, 2),
+ GROUP(tsin_b_clk, 2),
+ GROUP(tsin_b_fail, 2),
+ GROUP(tsin_b_din1, 2),
+ GROUP(tsin_b_din2, 2),
+ GROUP(tsin_b_din3, 2),
+ GROUP(tsin_b_din4, 2),
+ GROUP(tsin_b_din5, 2),
+ GROUP(tsin_b_din6, 2),
+ GROUP(tsin_b_din7, 2),
+
+ /* Bank Z func3 */
+ GROUP(tsin_c_z_valid, 3),
+ GROUP(tsin_c_z_sop, 3),
+ GROUP(tsin_c_z_din0, 3),
+ GROUP(tsin_c_z_clk, 3),
+ GROUP(tsin_d_z_valid, 3),
+ GROUP(tsin_d_z_sop, 3),
+ GROUP(tsin_d_z_din0, 3),
+ GROUP(tsin_d_z_clk, 3),
+
+ /* Bank Z func4 */
+ GROUP(spi4_mosi, 4),
+ GROUP(spi4_miso, 4),
+ GROUP(spi4_sclk, 4),
+ GROUP(spi4_ss0, 4),
+ GROUP(spi5_mosi, 4),
+ GROUP(spi5_miso, 4),
+ GROUP(spi5_sclk, 4),
+ GROUP(spi5_ss0, 4),
+
+ /* Bank T func1 */
+ GROUP(mclk1, 1),
+ GROUP(tdm_sclk1, 1),
+ GROUP(tdm_fs1, 1),
+ GROUP(tdm_d2, 1),
+ GROUP(tdm_d3, 1),
+ GROUP(tdm_d4, 1),
+ GROUP(tdm_d5, 1),
+ GROUP(tdm_d6, 1),
+ GROUP(tdm_d7, 1),
+ GROUP(tdm_d8, 1),
+ GROUP(tdm_d9, 1),
+ GROUP(tdm_d10, 1),
+ GROUP(tdm_d11, 1),
+ GROUP(mclk2, 1),
+ GROUP(tdm_sclk2, 1),
+ GROUP(tdm_fs2, 1),
+ GROUP(i2c1_sck, 1),
+ GROUP(i2c1_sda, 1),
+ GROUP(spi0_mosi, 1),
+ GROUP(spi0_miso, 1),
+ GROUP(spi0_sclk, 1),
+ GROUP(spi0_ss0, 1),
+ GROUP(spi0_ss1, 1),
+ GROUP(spi0_ss2, 1),
+
+ /* Bank T func2 */
+ GROUP(spdif_in_t, 2),
+ GROUP(spdif_out_t, 2),
+ GROUP(iso7816_clk_t, 2),
+ GROUP(iso7816_data_t, 2),
+ GROUP(tsin_a_sop_t, 2),
+ GROUP(tsin_a_din0_t, 2),
+ GROUP(tsin_a_clk_t, 2),
+ GROUP(tsin_a_valid_t, 2),
+ GROUP(i2c0_sck_t, 2),
+ GROUP(i2c0_sda_t, 2),
+ GROUP(i2c2_sck_t, 2),
+ GROUP(i2c2_sda_t, 2),
+
+ /* Bank T func3 */
+ GROUP(spi3_mosi, 3),
+ GROUP(spi3_miso, 3),
+ GROUP(spi3_sclk, 3),
+ GROUP(spi3_ss0, 3),
+
+ /* Bank M func1 */
+ GROUP(tdm_d12, 1),
+ GROUP(tdm_d13, 1),
+ GROUP(tdm_d14, 1),
+ GROUP(tdm_d15, 1),
+ GROUP(tdm_sclk3, 1),
+ GROUP(tdm_fs3, 1),
+ GROUP(i2c3_sda_m, 1),
+ GROUP(i2c3_sck_m, 1),
+ GROUP(spi1_mosi_m, 1),
+ GROUP(spi1_miso_m, 1),
+ GROUP(spi1_sclk_m, 1),
+ GROUP(spi1_ss0_m, 1),
+ GROUP(spi1_ss1_m, 1),
+ GROUP(spi1_ss2_m, 1),
+
+ /* Bank M func2 */
+ GROUP(pdm_din1_m0, 2),
+ GROUP(pdm_din2, 2),
+ GROUP(pdm_din3, 2),
+ GROUP(pdm_dclk, 2),
+ GROUP(pdm_din0, 2),
+ GROUP(pdm_din1_m5, 2),
+ GROUP(uart_d_tx_m, 2),
+ GROUP(uart_d_rx_m, 2),
+ GROUP(uart_d_cts_m, 2),
+ GROUP(uart_d_rts_m, 2),
+ GROUP(i2c2_sda_m, 2),
+ GROUP(i2c2_sck_m, 2),
+
+ /* Bank Y func1 */
+ GROUP(spi2_mosi, 1),
+ GROUP(spi2_miso, 1),
+ GROUP(spi2_sclk, 1),
+ GROUP(spi2_ss0, 1),
+ GROUP(spi2_ss1, 1),
+ GROUP(spi2_ss2, 1),
+ GROUP(uart_e_tx, 1),
+ GROUP(uart_e_rx, 1),
+ GROUP(uart_e_cts, 1),
+ GROUP(uart_e_rts, 1),
+ GROUP(uart_d_cts_y, 1),
+ GROUP(uart_d_rts_y, 1),
+ GROUP(uart_d_tx_y, 1),
+ GROUP(uart_d_rx_y, 1),
+ GROUP(i2c4_sck_y, 1),
+ GROUP(i2c4_sda_y, 1),
+ GROUP(i2c5_sck, 1),
+ GROUP(i2c5_sda, 1),
+
+ /* Bank Y func2 */
+ GROUP(tsin_c_y_sop, 2),
+ GROUP(tsin_c_y_din0, 2),
+ GROUP(tsin_c_y_clk, 2),
+ GROUP(tsin_c_y_valid, 2),
+ GROUP(tsin_d_y_sop, 2),
+ GROUP(tsin_d_y_din0, 2),
+ GROUP(tsin_d_y_clk, 2),
+ GROUP(tsin_d_y_valid, 2),
+ GROUP(pcieck_reqn_y, 2),
+
+ /* Bank Y func3 */
+ GROUP(pwm_e, 3),
+ GROUP(hsync, 3),
+ GROUP(vsync, 3),
+ GROUP(pwm_f, 3),
+ GROUP(sync_3d_out, 3),
+ GROUP(vx1_a_htpdn, 3),
+ GROUP(vx1_b_htpdn, 3),
+ GROUP(vx1_a_lockn, 3),
+ GROUP(vx1_b_lockn, 3),
+ GROUP(pwm_vs_y, 3),
+
+ /* Bank Y func4 */
+ GROUP(edp_a_hpd, 4),
+ GROUP(edp_b_hpd, 4),
+
+ /* Bank H func1 */
+ GROUP(mic_mute_key, 1),
+ GROUP(mic_mute_led, 1),
+ GROUP(i2c3_sck_h, 1),
+ GROUP(i2c3_sda_h, 1),
+ GROUP(i2c4_sck_h, 1),
+ GROUP(i2c4_sda_h, 1),
+ GROUP(eth_link_led, 1),
+ GROUP(eth_act_led, 1),
+
+ /* Bank H func2 */
+ GROUP(pwm_vs_h, 2),
+ GROUP(uart_f_tx, 2),
+ GROUP(uart_f_rx, 2),
+ GROUP(uart_f_cts, 2),
+ GROUP(uart_f_rts, 2),
+ GROUP(i2c0_sda_h, 2),
+ GROUP(i2c0_sck_h, 2),
+
+ /* Bank H func3 */
+ GROUP(pcieck_reqn_h, 3),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOB_0", "GPIOB_1", "GPIOB_2", "GPIOB_3", "GPIOB_4", "GPIOB_5",
+ "GPIOB_6", "GPIOB_7", "GPIOB_8", "GPIOB_9", "GPIOB_10",
+ "GPIOB_11", "GPIOB_12",
+
+ "GPIOC_0", "GPIOC_1", "GPIOC_2", "GPIOC_3", "GPIOC_4", "GPIOC_5",
+ "GPIOC_6",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4", "GPIOX_5",
+ "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9", "GPIOX_10", "GPIOX_11",
+ "GPIOX_12", "GPIOX_13", "GPIOX_14", "GPIOX_15", "GPIOX_16", "GPIOX_17",
+ "GPIOX_18", "GPIOX_19",
+
+ "GPIOW_0", "GPIOW_1", "GPIOW_2", "GPIOW_3", "GPIOW_4", "GPIOW_5",
+ "GPIOW_6", "GPIOW_7", "GPIOW_8", "GPIOW_9", "GPIOW_10", "GPIOW_11",
+ "GPIOW_12", "GPIOW_13", "GPIOW_14", "GPIOW_15", "GPIOW_16",
+
+ "GPIOD_0", "GPIOD_1", "GPIOD_2", "GPIOD_3", "GPIOD_4", "GPIOD_5",
+ "GPIOD_6", "GPIOD_7", "GPIOD_8", "GPIOD_9", "GPIOD_10", "GPIOD_11",
+ "GPIOD_12",
+
+ "GPIOE_0", "GPIOE_1", "GPIOE_2", "GPIOE_3", "GPIOE_4", "GPIOE_5",
+ "GPIOE_6",
+
+ "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4", "GPIOZ_5",
+ "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9", "GPIOZ_10", "GPIOZ_11",
+ "GPIOZ_12", "GPIOZ_13",
+
+ "GPIOT_0", "GPIOT_1", "GPIOT_2", "GPIOT_3", "GPIOT_4", "GPIOT_5",
+ "GPIOT_6", "GPIOT_7", "GPIOT_8", "GPIOT_9", "GPIOT_10", "GPIOT_11",
+ "GPIOT_12", "GPIOT_13", "GPIOT_14", "GPIOT_15", "GPIOT_16",
+ "GPIOT_17", "GPIOT_18", "GPIOT_19", "GPIOT_20", "GPIOT_21",
+ "GPIOT_22", "GPIOT_23",
+
+ "GPIOM_0", "GPIOM_1", "GPIOM_2", "GPIOM_3", "GPIOM_4", "GPIOM_5",
+ "GPIOM_6", "GPIOM_7", "GPIOM_8", "GPIOM_9", "GPIOM_10", "GPIOM_11",
+ "GPIOM_12", "GPIOM_13",
+
+ "GPIOY_0", "GPIOY_1", "GPIOY_2", "GPIOY_3", "GPIOY_4", "GPIOY_5",
+ "GPIOY_6", "GPIOY_7", "GPIOY_8", "GPIOY_9", "GPIOY_10", "GPIOY_11",
+ "GPIOY_12", "GPIOY_13", "GPIOY_14", "GPIOY_15", "GPIOY_16",
+ "GPIOY_17", "GPIOY_18",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4", "GPIOH_5",
+ "GPIOH_6", "GPIOH_7",
+
+ "GPIO_TEST_N",
+};
+
+static const char * const emmc_groups[] = {
+ "emmc_nand_d0", "emmc_nand_d1", "emmc_nand_d2", "emmc_nand_d3",
+ "emmc_nand_d4", "emmc_nand_d5", "emmc_nand_d6", "emmc_nand_d7",
+ "emmc_clk", "emmc_cmd", "emmc_nand_ds",
+};
+
+static const char * const nor_groups[] = {
+ "nor_hold", "nor_d", "nor_q", "nor_c", "nor_wp", "nor_cs",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3", "sdcard_clk",
+ "sdcard_cmd",
+};
+
+static const char * const sdio_groups[] = {
+ "sdio_d0", "sdio_d1", "sdio_d2", "sdio_d3", "sdio_clk", "sdio_cmd",
+};
+
+static const char * const gen_clk_groups[] = {
+ "gen_clk_out_c", "gen_clk_out_d",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_clk", "jtag_a_tms", "jtag_a_tdi", "jtag_a_tdo",
+};
+
+static const char * const jtag_b_groups[] = {
+ "jtag_b_tdo", "jtag_b_tdi", "jtag_b_clk", "jtag_b_tms",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx", "uart_c_rx", "uart_c_cts", "uart_c_rts",
+};
+
+static const char * const uart_d_groups[] = {
+ "uart_d_tx_m", "uart_d_rx_m", "uart_d_cts_m", "uart_d_rts_m",
+ "uart_d_rts_y", "uart_d_tx_y", "uart_d_rx_y", "uart_d_cts_y",
+};
+
+static const char * const uart_e_groups[] = {
+ "uart_e_tx", "uart_e_rx", "uart_e_cts", "uart_e_rts",
+};
+
+static const char * const uart_f_groups[] = {
+ "uart_f_tx", "uart_f_rx", "uart_f_cts", "uart_f_rts",
+};
+
+static const char * const uart_ao_a_groups[] = {
+ "uart_ao_a_rx_c", "uart_ao_a_tx_c", "uart_ao_a_tx_w2",
+ "uart_ao_a_rx_w3", "uart_ao_a_tx_w6", "uart_ao_a_rx_w7",
+ "uart_ao_a_tx_w10", "uart_ao_a_rx_w11", "uart_ao_a_tx_d0",
+ "uart_ao_a_rx_d1",
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_ao_b_tx", "uart_ao_b_rx", "uart_ao_b_cts", "uart_ao_b_rts",
+};
+
+static const char * const spi0_groups[] = {
+ "spi0_mosi", "spi0_miso", "spi0_sclk", "spi0_ss0", "spi0_ss1",
+ "spi0_ss2",
+};
+
+static const char * const spi1_groups[] = {
+ "spi1_mosi_c", "spi1_miso_c", "spi1_sclk_c", "spi1_ss0_c",
+ "spi1_mosi_m", "spi1_miso_m", "spi1_sclk_m", "spi1_ss0_m",
+ "spi1_ss1_m", "spi1_ss2_m",
+};
+
+static const char * const spi2_groups[] = {
+ "spi2_mosi", "spi2_miso", "spi2_sclk", "spi2_ss0", "spi2_ss1",
+ "spi2_ss2",
+};
+
+static const char * const spi3_groups[] = {
+ "spi3_mosi", "spi3_miso", "spi3_sclk", "spi3_ss0",
+};
+
+static const char * const spi4_groups[] = {
+ "spi4_mosi", "spi4_miso", "spi4_sclk", "spi4_ss0",
+};
+
+static const char * const spi5_groups[] = {
+ "spi5_mosi", "spi5_miso", "spi5_sclk", "spi5_ss0",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f",
+};
+
+static const char * const pwm_ao_c_hiz_groups[] = {
+ "pwm_ao_c_hiz",
+};
+
+static const char * const pwm_ao_g_hiz_groups[] = {
+ "pwm_ao_g_hiz",
+};
+
+static const char * const pwm_ao_a_groups[] = {
+ "pwm_ao_a",
+};
+
+static const char * const pwm_ao_b_groups[] = {
+ "pwm_ao_b",
+};
+
+static const char * const pwm_ao_c_groups[] = {
+ "pwm_ao_c_d", "pwm_ao_c_e",
+};
+
+static const char * const pwm_ao_d_groups[] = {
+ "pwm_ao_d",
+};
+
+static const char * const pwm_ao_e_groups[] = {
+ "pwm_ao_e",
+};
+
+static const char * const pwm_ao_f_groups[] = {
+ "pwm_ao_f",
+};
+
+static const char * const pwm_ao_h_groups[] = {
+ "pwm_ao_h_d5", "pwm_ao_h_d10",
+};
+
+static const char * const pwm_ao_g_groups[] = {
+ "pwm_ao_g_d11", "pwm_ao_g_d7", "pwm_ao_g_e",
+};
+
+static const char * const pwm_vs_groups[] = {
+ "pwm_vs_y", "pwm_vs_h",
+};
+
+static const char * const tdm_groups[] = {
+ "tdm_d0", "tdm_d1", "tdm_fs0", "tdm_sclk0", "tdm_sclk1", "tdm_fs1",
+ "tdm_d2", "tdm_d3", "tdm_d4", "tdm_d5", "tdm_d6", "tdm_d7",
+ "tdm_d8", "tdm_d9", "tdm_d10", "tdm_d11", "tdm_sclk2", "tdm_fs2",
+ "tdm_d12", "tdm_d13", "tdm_d14", "tdm_d15", "tdm_sclk3", "tdm_fs3",
+};
+
+static const char * const i2c0_slave_ao_groups[] = {
+ "i2c0_slave_ao_sck", "i2c0_slave_ao_sda",
+};
+
+static const char * const i2c0_ao_groups[] = {
+ "i2c0_ao_sck_d", "i2c0_ao_sda_d",
+ "i2c0_ao_sck_e", "i2c0_ao_sda_e",
+};
+
+static const char * const i2c1_ao_groups[] = {
+ "i2c1_ao_sck", "i2c1_ao_sda",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sck_t", "i2c0_sda_t", "i2c0_sck_h", "i2c0_sda_h",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sck", "i2c1_sda",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sda_x", "i2c2_sck_x",
+ "i2c2_sda_t", "i2c2_sck_t",
+ "i2c2_sda_m", "i2c2_sck_m",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sda_m", "i2c3_sck_m", "i2c3_sck_h", "i2c3_sda_h",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4_sck_y", "i2c4_sda_y", "i2c4_sck_h", "i2c4_sda_h",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5_sck", "i2c5_sda",
+};
+
+static const char * const clk12_24_groups[] = {
+ "clk12_24_x", "clk12_24_e",
+};
+
+static const char * const hdmirx_a_groups[] = {
+ "hdmirx_a_hpd", "hdmirx_a_det", "hdmirx_a_sda", "hdmirx_a_sck",
+};
+
+static const char * const hdmirx_b_groups[] = {
+ "hdmirx_b_hpd", "hdmirx_b_det", "hdmirx_b_sda", "hdmirx_b_sck",
+};
+
+static const char * const hdmirx_c_groups[] = {
+ "hdmirx_c_hpd", "hdmirx_c_det", "hdmirx_c_sda", "hdmirx_c_sck",
+};
+
+static const char * const cec_a_groups[] = {
+ "cec_a",
+};
+
+static const char * const cec_b_groups[] = {
+ "cec_b",
+};
+
+static const char * const hdmitx_groups[] = {
+ "hdmitx_sda_w13", "hdmitx_sck_w14", "hdmitx_hpd_in",
+ "hdmitx_sda_w2", "hdmitx_sck_w3",
+};
+
+static const char * const remote_out_groups[] = {
+ "remote_out_d4", "remote_out_d6",
+};
+
+static const char * const remote_in_groups[] = {
+ "remote_in",
+};
+
+static const char * const wd_rsto_groups[] = {
+ "wd_rsto",
+};
+
+static const char * const rtc_clk_groups[] = {
+ "rtc_clk_in", "rtc_clk_out",
+};
+
+static const char * const spdif_out_groups[] = {
+ "spdif_out_d", "spdif_out_t",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_d", "spdif_in_t",
+};
+
+static const char * const clk25m_groups[] = {
+ "clk25m",
+};
+
+static const char * const eth_groups[] = {
+ "eth_mdio", "eth_mdc", "eth_rgmii_rx_clk", "eth_rx_dv", "eth_rxd0",
+ "eth_rxd1", "eth_rxd2_rgmii", "eth_rxd3_rgmii", "eth_rgmii_tx_clk",
+ "eth_txen", "eth_txd0", "eth_txd1", "eth_txd2_rgmii",
+ "eth_txd3_rgmii", "eth_link_led", "eth_act_led",
+};
+
+static const char * const iso7816_groups[] = {
+ "iso7816_clk_z", "iso7816_data_z",
+ "iso7816_clk_t", "iso7816_data_t",
+};
+
+static const char * const tsin_a_groups[] = {
+ "tsin_a_sop_t", "tsin_a_din0_t", "tsin_a_clk_t", "tsin_a_valid_t",
+};
+
+static const char * const tsin_b_groups[] = {
+ "tsin_b_valid", "tsin_b_sop", "tsin_b_din0", "tsin_b_clk",
+ "tsin_b_fail", "tsin_b_din1", "tsin_b_din2", "tsin_b_din3",
+ "tsin_b_din4", "tsin_b_din5", "tsin_b_din6", "tsin_b_din7",
+};
+
+static const char * const tsin_c_groups[] = {
+ "tsin_c_z_valid", "tsin_c_z_sop", "tsin_c_z_din0", "tsin_c_z_clk",
+ "tsin_c_y_sop", "tsin_c_y_din0", "tsin_c_y_clk", "tsin_c_y_valid",
+};
+
+static const char * const tsin_d_groups[] = {
+ "tsin_d_z_valid", "tsin_d_z_sop", "tsin_d_z_din0", "tsin_d_z_clk",
+ "tsin_d_y_sop", "tsin_d_y_din0", "tsin_d_y_clk", "tsin_d_y_valid",
+};
+
+static const char * const mclk_groups[] = {
+ "mclk1", "mclk2",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din1_m0", "pdm_din2", "pdm_din3", "pdm_dclk", "pdm_din0",
+ "pdm_din1_m5",
+};
+
+static const char * const pcieck_groups[] = {
+ "pcieck_reqn_y", "pcieck_reqn_h",
+};
+
+static const char * const hsync_groups[] = {
+ "hsync",
+};
+
+static const char * const vsync_groups[] = {
+ "vsync",
+};
+
+static const char * const sync_3d_groups[] = {
+ "sync_3d_out",
+};
+
+static const char * const vx1_a_groups[] = {
+ "vx1_a_htpdn", "vx1_a_lockn",
+};
+
+static const char * const vx1_b_groups[] = {
+ "vx1_b_htpdn", "vx1_b_lockn",
+};
+
+static const char * const edp_a_groups[] = {
+ "edp_a_hpd",
+};
+
+static const char * const edp_b_groups[] = {
+ "edp_b_hpd",
+};
+
+static const char * const mic_mute_groups[] = {
+ "mic_mute_key", "mic_mute_led",
+};
+
+static struct meson_pmx_func t7_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(emmc),
+ FUNCTION(nor),
+ FUNCTION(sdcard),
+ FUNCTION(sdio),
+ FUNCTION(gen_clk),
+ FUNCTION(jtag_a),
+ FUNCTION(jtag_b),
+ FUNCTION(uart_c),
+ FUNCTION(uart_d),
+ FUNCTION(uart_e),
+ FUNCTION(uart_f),
+ FUNCTION(uart_ao_a),
+ FUNCTION(uart_ao_b),
+ FUNCTION(spi0),
+ FUNCTION(spi1),
+ FUNCTION(spi2),
+ FUNCTION(spi3),
+ FUNCTION(spi4),
+ FUNCTION(spi5),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(pwm_ao_c_hiz),
+ FUNCTION(pwm_ao_g_hiz),
+ FUNCTION(pwm_ao_a),
+ FUNCTION(pwm_ao_b),
+ FUNCTION(pwm_ao_c),
+ FUNCTION(pwm_ao_d),
+ FUNCTION(pwm_ao_e),
+ FUNCTION(pwm_ao_f),
+ FUNCTION(pwm_ao_h),
+ FUNCTION(pwm_ao_g),
+ FUNCTION(pwm_vs),
+ FUNCTION(tdm),
+ FUNCTION(i2c0_slave_ao),
+ FUNCTION(i2c0_ao),
+ FUNCTION(i2c1_ao),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(i2c4),
+ FUNCTION(i2c5),
+ FUNCTION(clk12_24),
+ FUNCTION(hdmirx_a),
+ FUNCTION(hdmirx_b),
+ FUNCTION(hdmirx_c),
+ FUNCTION(cec_a),
+ FUNCTION(cec_b),
+ FUNCTION(hdmitx),
+ FUNCTION(remote_out),
+ FUNCTION(remote_in),
+ FUNCTION(wd_rsto),
+ FUNCTION(rtc_clk),
+ FUNCTION(spdif_out),
+ FUNCTION(spdif_in),
+ FUNCTION(clk25m),
+ FUNCTION(eth),
+ FUNCTION(iso7816),
+ FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
+ FUNCTION(tsin_c),
+ FUNCTION(tsin_d),
+ FUNCTION(mclk),
+ FUNCTION(pdm),
+ FUNCTION(pcieck),
+ FUNCTION(hsync),
+ FUNCTION(vsync),
+ FUNCTION(sync_3d),
+ FUNCTION(vx1_a),
+ FUNCTION(vx1_b),
+ FUNCTION(edp_a),
+ FUNCTION(edp_b),
+ FUNCTION(mic_mute),
+};
+
+static struct meson_bank t7_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in ds */
+ BANK_DS("D", GPIOD_0, GPIOD_12, 57, 69,
+ 0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0),
+ BANK_DS("E", GPIOE_0, GPIOE_6, 70, 76,
+ 0x0b, 0, 0x0c, 0, 0x0a, 0, 0x09, 0, 0x08, 0, 0x0f, 0),
+ BANK_DS("Z", GPIOZ_0, GPIOZ_13, 77, 90,
+ 0x13, 0, 0x14, 0, 0x12, 0, 0x11, 0, 0x10, 0, 0x17, 0),
+ BANK_DS("H", GPIOH_0, GPIOH_7, 148, 155,
+ 0x1b, 0, 0x1c, 0, 0x1a, 0, 0x19, 0, 0x18, 0, 0x1f, 0),
+ BANK_DS("C", GPIOC_0, GPIOC_6, 13, 19,
+ 0x23, 0, 0x24, 0, 0x22, 0, 0x21, 0, 0x20, 0, 0x27, 0),
+ BANK_DS("B", GPIOB_0, GPIOB_12, 0, 12,
+ 0x2b, 0, 0x2c, 0, 0x2a, 0, 0x29, 0, 0x28, 0, 0x2f, 0),
+ BANK_DS("X", GPIOX_0, GPIOX_19, 20, 39,
+ 0x33, 0, 0x34, 0, 0x32, 0, 0x31, 0, 0x30, 0, 0x37, 0),
+ BANK_DS("T", GPIOT_0, GPIOT_23, 91, 114,
+ 0x43, 0, 0x44, 0, 0x42, 0, 0x41, 0, 0x40, 0, 0x47, 0),
+ BANK_DS("Y", GPIOY_0, GPIOY_18, 129, 147,
+ 0x53, 0, 0x54, 0, 0x52, 0, 0x51, 0, 0x50, 0, 0x57, 0),
+ BANK_DS("W", GPIOW_0, GPIOW_16, 40, 56,
+ 0x63, 0, 0x64, 0, 0x62, 0, 0x61, 0, 0x60, 0, 0x67, 0),
+ BANK_DS("M", GPIOM_0, GPIOM_13, 115, 128,
+ 0x73, 0, 0x74, 0, 0x72, 0, 0x71, 0, 0x70, 0, 0x77, 0),
+ BANK_DS("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 156, 156,
+ 0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0),
+};
+
+static struct meson_pmx_bank t7_periphs_pmx_banks[] = {
+ /* name first last reg offset */
+ BANK_PMX("D", GPIOD_0, GPIOD_12, 0x0a, 0),
+ BANK_PMX("E", GPIOE_0, GPIOE_6, 0x0c, 0),
+ BANK_PMX("Z", GPIOZ_0, GPIOZ_13, 0x05, 0),
+ BANK_PMX("H", GPIOH_0, GPIOH_7, 0x08, 0),
+ BANK_PMX("C", GPIOC_0, GPIOC_6, 0x07, 0),
+ BANK_PMX("B", GPIOB_0, GPIOB_12, 0x00, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_19, 0x02, 0),
+ BANK_PMX("T", GPIOT_0, GPIOT_23, 0x0f, 0),
+ BANK_PMX("Y", GPIOY_0, GPIOY_18, 0x13, 0),
+ BANK_PMX("W", GPIOW_0, GPIOW_16, 0x16, 0),
+ BANK_PMX("M", GPIOM_0, GPIOM_13, 0x0d, 0),
+ BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x09, 0),
+};
+
+static struct meson_axg_pmx_data t7_periphs_pmx_banks_data = {
+ .pmx_banks = t7_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(t7_periphs_pmx_banks),
+};
+
+static struct meson_pinctrl_data t7_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = t7_periphs_pins,
+ .groups = t7_periphs_groups,
+ .funcs = t7_periphs_functions,
+ .banks = t7_periphs_banks,
+ .num_pins = ARRAY_SIZE(t7_periphs_pins),
+ .num_groups = ARRAY_SIZE(t7_periphs_groups),
+ .num_funcs = ARRAY_SIZE(t7_periphs_functions),
+ .num_banks = ARRAY_SIZE(t7_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &t7_periphs_pmx_banks_data,
+ .parse_dt = &meson_a1_parse_dt_extra,
+};
+
+static const struct of_device_id t7_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,t7-periphs-pinctrl",
+ .data = &t7_periphs_pinctrl_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, t7_pinctrl_dt_match);
+
+static struct platform_driver t7_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "amlogic-t7-pinctrl",
+ .of_match_table = t7_pinctrl_dt_match,
+ },
+};
+module_platform_driver(t7_pinctrl_driver);
+
+MODULE_AUTHOR("Huqiang Qin <huqiang.qin@amlogic.com>");
+MODULE_DESCRIPTION("Pin controller and GPIO driver for Amlogic T7 SoC");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 040e418dbfc1..162dfc213669 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -12,8 +12,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -404,13 +404,8 @@ static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
static int armada_38x_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
- const struct of_device_id *match =
- of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- soc->variant = (unsigned) match->data & 0xff;
+ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
soc->controls = armada_38x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
soc->gpioranges = armada_38x_mpp_gpio_ranges;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
index c33f1cbaf661..d9c98faa7b0e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -12,8 +12,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -386,13 +386,8 @@ static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
static int armada_39x_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info;
- const struct of_device_id *match =
- of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- soc->variant = (unsigned) match->data & 0xff;
+ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
soc->controls = armada_39x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls);
soc->gpioranges = armada_39x_mpp_gpio_ranges;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
index 89bab536717d..7becf2781a0b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-mvebu.h"
@@ -106,10 +105,8 @@ static struct pinctrl_gpio_range armada_ap806_mpp_gpio_ranges[] = {
static int armada_ap806_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_ap806_pinctrl_info;
- const struct of_device_id *match =
- of_match_device(armada_ap806_pinctrl_of_match, &pdev->dev);
- if (!match || !pdev->dev.parent)
+ if (!pdev->dev.parent)
return -ENODEV;
soc->variant = 0; /* no variants for Armada AP806 */
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
index 8ba8f3e9121f..9a250c491f33 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
@@ -12,9 +12,9 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -638,8 +638,6 @@ static void mvebu_pinctrl_assign_variant(struct mvebu_mpp_mode *m,
static int armada_cp110_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc;
- const struct of_device_id *match =
- of_match_device(armada_cp110_pinctrl_of_match, &pdev->dev);
int i;
if (!pdev->dev.parent)
@@ -650,7 +648,7 @@ static int armada_cp110_pinctrl_probe(struct platform_device *pdev)
if (!soc)
return -ENOMEM;
- soc->variant = (unsigned long) match->data & 0xff;
+ soc->variant = (unsigned long)device_get_match_data(&pdev->dev) & 0xff;
soc->controls = armada_cp110_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_cp110_mpp_controls);
soc->modes = armada_cp110_mpp_modes;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index 48e2a6c56a83..487825bfd125 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -19,8 +19,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include <linux/bitops.h>
#include "pinctrl-mvebu.h"
@@ -568,14 +568,9 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev)
static int armada_xp_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
- const struct of_device_id *match =
- of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
int nregs;
- if (!match)
- return -ENODEV;
-
- soc->variant = (unsigned) match->data & 0xff;
+ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
switch (soc->variant) {
case V_MV78230:
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index bd74daa9ed66..1947da73e512 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -12,9 +12,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "pinctrl-mvebu.h"
@@ -765,13 +765,11 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
{
struct resource *res, *mpp_res;
struct resource fb_res;
- const struct of_device_id *match =
- of_match_device(dove_pinctrl_of_match, &pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
void __iomem *base;
int i;
- pdev->dev.platform_data = (void *)match->data;
+ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
/*
* General MPP Configuration Register is part of pdma registers.
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index d45c31f281c8..4789d7442f78 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -11,8 +11,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -470,10 +470,7 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
-
- pdev->dev.platform_data = (void *)match->data;
+ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
return mvebu_pinctrl_simple_mmio_probe(pdev);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index cc97d270be61..2b6ab7f2afc7 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -19,8 +19,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -218,10 +218,7 @@ static const struct of_device_id orion_pinctrl_of_match[] = {
static int orion_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(orion_pinctrl_of_match, &pdev->dev);
-
- pdev->dev.platform_data = (void*)match->data;
+ pdev->dev.platform_data = (void*)device_get_match_data(&pdev->dev);
mpp_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mpp_base))
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 6b90051af206..d3c32d809bac 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -985,7 +986,6 @@ static const struct of_device_id abx500_gpio_match[] = {
static int abx500_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct abx500_pinctrl *pct;
unsigned int id = -1;
int ret;
@@ -1006,12 +1006,7 @@ static int abx500_gpio_probe(struct platform_device *pdev)
pct->chip.parent = &pdev->dev;
pct->chip.base = -1; /* Dynamic allocation */
- match = of_match_device(abx500_gpio_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "gpio dt not matching\n");
- return -ENODEV;
- }
- id = (unsigned long)match->data;
+ id = (unsigned long)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */
switch (id) {
@@ -1079,12 +1074,11 @@ out_rem_chip:
* abx500_gpio_remove() - remove Ab8500-gpio driver
* @pdev: Platform device registered
*/
-static int abx500_gpio_remove(struct platform_device *pdev)
+static void abx500_gpio_remove(struct platform_device *pdev)
{
struct abx500_pinctrl *pct = platform_get_drvdata(pdev);
gpiochip_remove(&pct->chip);
- return 0;
}
static struct platform_driver abx500_gpio_driver = {
@@ -1093,7 +1087,7 @@ static struct platform_driver abx500_gpio_driver = {
.of_match_table = abx500_gpio_match,
},
.probe = abx500_gpio_probe,
- .remove = abx500_gpio_remove,
+ .remove_new = abx500_gpio_remove,
};
static int __init abx500_gpio_init(void)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 86a638077a69..863732287b1e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -16,9 +16,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1838,7 +1840,6 @@ static int nmk_pinctrl_resume(struct device *dev)
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
struct device_node *prcm_np;
struct nmk_pinctrl *npct;
@@ -1849,10 +1850,7 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (!npct)
return -ENOMEM;
- match = of_match_device(nmk_pinctrl_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- version = (unsigned int) match->data;
+ version = (unsigned int)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */
if (version == PINCTRL_NMK_STN8815)
diff --git a/drivers/pinctrl/nuvoton/Kconfig b/drivers/pinctrl/nuvoton/Kconfig
index 8fe61b348181..2abbfcec1fae 100644
--- a/drivers/pinctrl/nuvoton/Kconfig
+++ b/drivers/pinctrl/nuvoton/Kconfig
@@ -2,8 +2,7 @@
config PINCTRL_WPCM450
tristate "Pinctrl and GPIO driver for Nuvoton WPCM450"
- depends on ARCH_WPCM450 || COMPILE_TEST
- depends on OF
+ depends on (ARCH_WPCM450 || COMPILE_TEST) && OF
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -32,3 +31,17 @@ config PINCTRL_NPCM7XX
help
Say Y here to enable pin controller and GPIO support
for Nuvoton NPCM750/730/715/705 SoCs.
+
+config PINCTRL_NPCM8XX
+ tristate "Pinctrl and GPIO driver for Nuvoton NPCM8XX"
+ depends on (ARCH_NPCM || COMPILE_TEST) && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to enable pin controller and GPIO support for
+ the Nuvoton NPCM8XX SoC. This is strongly recommended when
+ building a kernel that will run on this chip.
diff --git a/drivers/pinctrl/nuvoton/Makefile b/drivers/pinctrl/nuvoton/Makefile
index 9e66f5dc74bf..08031eab0af6 100644
--- a/drivers/pinctrl/nuvoton/Makefile
+++ b/drivers/pinctrl/nuvoton/Makefile
@@ -3,3 +3,4 @@
obj-$(CONFIG_PINCTRL_WPCM450) += pinctrl-wpcm450.o
obj-$(CONFIG_PINCTRL_NPCM7XX) += pinctrl-npcm7xx.o
+obj-$(CONFIG_PINCTRL_NPCM8XX) += pinctrl-npcm8xx.o
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 843ffcd96877..1e658721aaba 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -171,7 +171,7 @@ static int npcmgpio_direction_input(struct gpio_chip *chip, unsigned int offset)
struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
int ret;
- ret = pinctrl_gpio_direction_input(offset + chip->base);
+ ret = pinctrl_gpio_direction_input(chip, offset);
if (ret)
return ret;
@@ -188,7 +188,7 @@ static int npcmgpio_direction_output(struct gpio_chip *chip,
dev_dbg(chip->parent, "gpio_direction_output: offset%d = %x\n", offset,
value);
- ret = pinctrl_gpio_direction_output(offset + chip->base);
+ ret = pinctrl_gpio_direction_output(chip, offset);
if (ret)
return ret;
@@ -201,19 +201,13 @@ static int npcmgpio_gpio_request(struct gpio_chip *chip, unsigned int offset)
int ret;
dev_dbg(chip->parent, "gpio_request: offset%d\n", offset);
- ret = pinctrl_gpio_request(offset + chip->base);
+ ret = pinctrl_gpio_request(chip, offset);
if (ret)
return ret;
return bank->request(chip, offset);
}
-static void npcmgpio_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- dev_dbg(chip->parent, "gpio_free: offset%d\n", offset);
- pinctrl_gpio_free(offset + chip->base);
-}
-
static void npcmgpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc;
@@ -1588,19 +1582,6 @@ static int npcm7xx_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static int npcm7xx_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- u32 *num_maps)
-{
- struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
-
- dev_dbg(npcm->dev, "dt_node_to_map: %s\n", np_config->name);
- return pinconf_generic_dt_node_to_map(pctldev, np_config,
- map, num_maps,
- PIN_MAP_TYPE_INVALID);
-}
-
static void npcm7xx_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, u32 num_maps)
{
@@ -1612,7 +1593,7 @@ static const struct pinctrl_ops npcm7xx_pinctrl_ops = {
.get_group_name = npcm7xx_get_group_name,
.get_group_pins = npcm7xx_get_group_pins,
.pin_dbg_show = npcm7xx_pin_dbg_show,
- .dt_node_to_map = npcm7xx_dt_node_to_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
.dt_free_map = npcm7xx_dt_free_map,
};
@@ -1929,7 +1910,7 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
- pctrl->gpio_bank[id].gc.free = npcmgpio_gpio_free;
+ pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
id++;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
new file mode 100644
index 000000000000..a377d36b0eb0
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -0,0 +1,2486 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Nuvoton Technology corporation.
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/* GCR registers */
+#define NPCM8XX_GCR_SRCNT 0x068
+#define NPCM8XX_GCR_FLOCKR1 0x074
+#define NPCM8XX_GCR_DSCNT 0x078
+#define NPCM8XX_GCR_I2CSEGSEL 0x0e0
+#define NPCM8XX_GCR_MFSEL1 0x260
+#define NPCM8XX_GCR_MFSEL2 0x264
+#define NPCM8XX_GCR_MFSEL3 0x268
+#define NPCM8XX_GCR_MFSEL4 0x26c
+#define NPCM8XX_GCR_MFSEL5 0x270
+#define NPCM8XX_GCR_MFSEL6 0x274
+#define NPCM8XX_GCR_MFSEL7 0x278
+
+#define SRCNT_ESPI BIT(3)
+
+/* GPIO registers */
+#define NPCM8XX_GP_N_TLOCK1 0x00
+#define NPCM8XX_GP_N_DIN 0x04
+#define NPCM8XX_GP_N_POL 0x08
+#define NPCM8XX_GP_N_DOUT 0x0c
+#define NPCM8XX_GP_N_OE 0x10
+#define NPCM8XX_GP_N_OTYP 0x14
+#define NPCM8XX_GP_N_MP 0x18
+#define NPCM8XX_GP_N_PU 0x1c
+#define NPCM8XX_GP_N_PD 0x20
+#define NPCM8XX_GP_N_DBNC 0x24
+#define NPCM8XX_GP_N_EVTYP 0x28
+#define NPCM8XX_GP_N_EVBE 0x2c
+#define NPCM8XX_GP_N_OBL0 0x30
+#define NPCM8XX_GP_N_OBL1 0x34
+#define NPCM8XX_GP_N_OBL2 0x38
+#define NPCM8XX_GP_N_OBL3 0x3c
+#define NPCM8XX_GP_N_EVEN 0x40
+#define NPCM8XX_GP_N_EVENS 0x44
+#define NPCM8XX_GP_N_EVENC 0x48
+#define NPCM8XX_GP_N_EVST 0x4c
+#define NPCM8XX_GP_N_SPLCK 0x50
+#define NPCM8XX_GP_N_MPLCK 0x54
+#define NPCM8XX_GP_N_IEM 0x58
+#define NPCM8XX_GP_N_OSRC 0x5c
+#define NPCM8XX_GP_N_ODSC 0x60
+#define NPCM8XX_GP_N_DOS 0x68
+#define NPCM8XX_GP_N_DOC 0x6c
+#define NPCM8XX_GP_N_OES 0x70
+#define NPCM8XX_GP_N_OEC 0x74
+#define NPCM8XX_GP_N_DBNCS0 0x80
+#define NPCM8XX_GP_N_DBNCS1 0x84
+#define NPCM8XX_GP_N_DBNCP0 0x88
+#define NPCM8XX_GP_N_DBNCP1 0x8c
+#define NPCM8XX_GP_N_DBNCP2 0x90
+#define NPCM8XX_GP_N_DBNCP3 0x94
+#define NPCM8XX_GP_N_TLOCK2 0xac
+
+#define NPCM8XX_GPIO_PER_BANK 32
+#define NPCM8XX_GPIO_BANK_NUM 8
+#define NPCM8XX_GCR_NONE 0
+
+#define NPCM8XX_DEBOUNCE_MAX 4
+#define NPCM8XX_DEBOUNCE_NSEC 40
+#define NPCM8XX_DEBOUNCE_VAL_MASK GENMASK(23, 4)
+#define NPCM8XX_DEBOUNCE_MAX_VAL 0xFFFFF7
+
+/* Structure for register banks */
+struct debounce_time {
+ bool set_val[NPCM8XX_DEBOUNCE_MAX];
+ u32 nanosec_val[NPCM8XX_DEBOUNCE_MAX];
+};
+
+struct npcm8xx_gpio {
+ struct gpio_chip gc;
+ void __iomem *base;
+ struct debounce_time debounce;
+ int irqbase;
+ int irq;
+ struct irq_chip irq_chip;
+ u32 pinctrl_id;
+ int (*direction_input)(struct gpio_chip *chip, unsigned int offset);
+ int (*direction_output)(struct gpio_chip *chip, unsigned int offset,
+ int value);
+ int (*request)(struct gpio_chip *chip, unsigned int offset);
+ void (*free)(struct gpio_chip *chip, unsigned int offset);
+};
+
+struct npcm8xx_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct device *dev;
+ struct npcm8xx_gpio gpio_bank[NPCM8XX_GPIO_BANK_NUM];
+ struct irq_domain *domain;
+ struct regmap *gcr_regmap;
+ void __iomem *regs;
+ u32 bank_num;
+};
+
+/* GPIO handling in the pinctrl driver */
+static void npcm_gpio_set(struct gpio_chip *gc, void __iomem *reg,
+ unsigned int pinmask)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ iowrite32(ioread32(reg) | pinmask, reg);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void npcm_gpio_clr(struct gpio_chip *gc, void __iomem *reg,
+ unsigned int pinmask)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ iowrite32(ioread32(reg) & ~pinmask, reg);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void npcmgpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
+
+ seq_printf(s, "DIN :%.8x DOUT:%.8x IE :%.8x OE :%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_DIN),
+ ioread32(bank->base + NPCM8XX_GP_N_DOUT),
+ ioread32(bank->base + NPCM8XX_GP_N_IEM),
+ ioread32(bank->base + NPCM8XX_GP_N_OE));
+ seq_printf(s, "PU :%.8x PD :%.8x DB :%.8x POL :%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_PU),
+ ioread32(bank->base + NPCM8XX_GP_N_PD),
+ ioread32(bank->base + NPCM8XX_GP_N_DBNC),
+ ioread32(bank->base + NPCM8XX_GP_N_POL));
+ seq_printf(s, "ETYP:%.8x EVBE:%.8x EVEN:%.8x EVST:%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_EVTYP),
+ ioread32(bank->base + NPCM8XX_GP_N_EVBE),
+ ioread32(bank->base + NPCM8XX_GP_N_EVEN),
+ ioread32(bank->base + NPCM8XX_GP_N_EVST));
+ seq_printf(s, "OTYP:%.8x OSRC:%.8x ODSC:%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_OTYP),
+ ioread32(bank->base + NPCM8XX_GP_N_OSRC),
+ ioread32(bank->base + NPCM8XX_GP_N_ODSC));
+ seq_printf(s, "OBL0:%.8x OBL1:%.8x OBL2:%.8x OBL3:%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_OBL0),
+ ioread32(bank->base + NPCM8XX_GP_N_OBL1),
+ ioread32(bank->base + NPCM8XX_GP_N_OBL2),
+ ioread32(bank->base + NPCM8XX_GP_N_OBL3));
+ seq_printf(s, "SLCK:%.8x MLCK:%.8x\n",
+ ioread32(bank->base + NPCM8XX_GP_N_SPLCK),
+ ioread32(bank->base + NPCM8XX_GP_N_MPLCK));
+}
+
+static int npcmgpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ ret = pinctrl_gpio_direction_input(chip, offset);
+ if (ret)
+ return ret;
+
+ return bank->direction_input(chip, offset);
+}
+
+static int npcmgpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ ret = pinctrl_gpio_direction_output(chip, offset);
+ if (ret)
+ return ret;
+
+ return bank->direction_output(chip, offset, value);
+}
+
+static int npcmgpio_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ ret = pinctrl_gpio_request(chip, offset);
+ if (ret)
+ return ret;
+
+ return bank->request(chip, offset);
+}
+
+static void npcmgpio_irq_handler(struct irq_desc *desc)
+{
+ unsigned long sts, en, bit;
+ struct npcm8xx_gpio *bank;
+ struct irq_chip *chip;
+ struct gpio_chip *gc;
+
+ gc = irq_desc_get_handler_data(desc);
+ bank = gpiochip_get_data(gc);
+ chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ sts = ioread32(bank->base + NPCM8XX_GP_N_EVST);
+ en = ioread32(bank->base + NPCM8XX_GP_N_EVEN);
+ sts &= en;
+ for_each_set_bit(bit, &sts, NPCM8XX_GPIO_PER_BANK)
+ generic_handle_domain_irq(gc->irq.domain, bit);
+ chained_irq_exit(chip, desc);
+}
+
+static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct npcm8xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = BIT(irqd_to_hwirq(d));
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
+ irq_set_handler_locked(d, handle_level_irq);
+ } else if (type & IRQ_TYPE_EDGE_BOTH) {
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
+ irq_set_handler_locked(d, handle_edge_irq);
+ }
+
+ return 0;
+}
+
+static void npcmgpio_irq_ack(struct irq_data *d)
+{
+ struct npcm8xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = irqd_to_hwirq(d);
+
+ iowrite32(BIT(gpio), bank->base + NPCM8XX_GP_N_EVST);
+}
+
+static void npcmgpio_irq_mask(struct irq_data *d)
+{
+ struct npcm8xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = irqd_to_hwirq(d);
+
+ iowrite32(BIT(gpio), bank->base + NPCM8XX_GP_N_EVENC);
+}
+
+static void npcmgpio_irq_unmask(struct irq_data *d)
+{
+ struct npcm8xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = irqd_to_hwirq(d);
+
+ iowrite32(BIT(gpio), bank->base + NPCM8XX_GP_N_EVENS);
+}
+
+static unsigned int npcmgpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = irqd_to_hwirq(d);
+
+ /* active-high, input, clear interrupt, enable interrupt */
+ npcmgpio_direction_input(gc, gpio);
+ npcmgpio_irq_ack(d);
+ npcmgpio_irq_unmask(d);
+
+ return 0;
+}
+
+static struct irq_chip npcmgpio_irqchip = {
+ .name = "NPCM8XX-GPIO-IRQ",
+ .irq_ack = npcmgpio_irq_ack,
+ .irq_unmask = npcmgpio_irq_unmask,
+ .irq_mask = npcmgpio_irq_mask,
+ .irq_set_type = npcmgpio_set_irq_type,
+ .irq_startup = npcmgpio_irq_startup,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static const int gpi36_pins[] = { 58 };
+static const int gpi35_pins[] = { 58 };
+
+static const int tp_jtag3_pins[] = { 44, 62, 45, 46 };
+static const int tp_uart_pins[] = { 50, 51 };
+
+static const int tp_smb2_pins[] = { 24, 25 };
+static const int tp_smb1_pins[] = { 142, 143 };
+
+static const int tp_gpio7_pins[] = { 96 };
+static const int tp_gpio6_pins[] = { 97 };
+static const int tp_gpio5_pins[] = { 98 };
+static const int tp_gpio4_pins[] = { 99 };
+static const int tp_gpio3_pins[] = { 100 };
+static const int tp_gpio2_pins[] = { 16 };
+static const int tp_gpio1_pins[] = { 9 };
+static const int tp_gpio0_pins[] = { 8 };
+
+static const int tp_gpio2b_pins[] = { 101 };
+static const int tp_gpio1b_pins[] = { 92 };
+static const int tp_gpio0b_pins[] = { 91 };
+
+static const int vgadig_pins[] = { 102, 103, 104, 105 };
+
+static const int nbu1crts_pins[] = { 44, 62 };
+
+static const int fm2_pins[] = { 224, 225, 226, 227, 228, 229, 230 };
+static const int fm1_pins[] = { 175, 176, 177, 203, 191, 192, 233 };
+static const int fm0_pins[] = { 194, 195, 196, 202, 199, 198, 197 };
+
+static const int gpio1836_pins[] = { 183, 184, 185, 186 };
+static const int gpio1889_pins[] = { 188, 189 };
+static const int gpo187_pins[] = { 187 };
+
+static const int cp1urxd_pins[] = { 41 };
+static const int r3rxer_pins[] = { 212 };
+
+static const int cp1gpio2c_pins[] = { 101 };
+static const int cp1gpio3c_pins[] = { 100 };
+
+static const int cp1gpio0b_pins[] = { 127 };
+static const int cp1gpio1b_pins[] = { 126 };
+static const int cp1gpio2b_pins[] = { 125 };
+static const int cp1gpio3b_pins[] = { 124 };
+static const int cp1gpio4b_pins[] = { 99 };
+static const int cp1gpio5b_pins[] = { 98 };
+static const int cp1gpio6b_pins[] = { 97 };
+static const int cp1gpio7b_pins[] = { 96 };
+
+static const int cp1gpio0_pins[] = { };
+static const int cp1gpio1_pins[] = { };
+static const int cp1gpio2_pins[] = { };
+static const int cp1gpio3_pins[] = { };
+static const int cp1gpio4_pins[] = { };
+static const int cp1gpio5_pins[] = { 17 };
+static const int cp1gpio6_pins[] = { 91 };
+static const int cp1gpio7_pins[] = { 92 };
+
+static const int cp1utxd_pins[] = { 42 };
+
+static const int spi1cs3_pins[] = { 192 };
+static const int spi1cs2_pins[] = { 191 };
+static const int spi1cs1_pins[] = { 233 };
+static const int spi1cs0_pins[] = { 203 };
+
+static const int spi1d23_pins[] = { 191, 192 };
+
+static const int j2j3_pins[] = { 44, 62, 45, 46 };
+
+static const int r3oen_pins[] = { 213 };
+static const int r2oen_pins[] = { 90 };
+static const int r1oen_pins[] = { 56 };
+static const int bu4b_pins[] = { 98, 99 };
+static const int bu4_pins[] = { 54, 55 };
+static const int bu5b_pins[] = { 100, 101 };
+static const int bu5_pins[] = { 52, 53 };
+static const int bu6_pins[] = { 50, 51 };
+static const int rmii3_pins[] = { 110, 111, 209, 211, 210, 214, 215 };
+
+static const int jm1_pins[] = { 136, 137, 138, 139, 140 };
+static const int jm2_pins[] = { 251 };
+
+static const int tpgpio5b_pins[] = { 58 };
+static const int tpgpio4b_pins[] = { 57 };
+
+static const int clkrun_pins[] = { 162 };
+
+static const int i3c5_pins[] = { 106, 107 };
+static const int i3c4_pins[] = { 33, 34 };
+static const int i3c3_pins[] = { 246, 247 };
+static const int i3c2_pins[] = { 244, 245 };
+static const int i3c1_pins[] = { 242, 243 };
+static const int i3c0_pins[] = { 240, 241 };
+
+static const int hsi1a_pins[] = { 43, 63 };
+static const int hsi2a_pins[] = { 48, 49 };
+static const int hsi1b_pins[] = { 44, 62 };
+static const int hsi2b_pins[] = { 50, 51 };
+static const int hsi1c_pins[] = { 45, 46, 47, 61 };
+static const int hsi2c_pins[] = { 45, 46, 47, 61 };
+
+static const int smb0_pins[] = { 115, 114 };
+static const int smb0b_pins[] = { 195, 194 };
+static const int smb0c_pins[] = { 202, 196 };
+static const int smb0d_pins[] = { 198, 199 };
+static const int smb0den_pins[] = { 197 };
+static const int smb1_pins[] = { 117, 116 };
+static const int smb1b_pins[] = { 126, 127 };
+static const int smb1c_pins[] = { 124, 125 };
+static const int smb1d_pins[] = { 4, 5 };
+static const int smb2_pins[] = { 119, 118 };
+static const int smb2b_pins[] = { 122, 123 };
+static const int smb2c_pins[] = { 120, 121 };
+static const int smb2d_pins[] = { 6, 7 };
+static const int smb3_pins[] = { 30, 31 };
+static const int smb3b_pins[] = { 39, 40 };
+static const int smb3c_pins[] = { 37, 38 };
+static const int smb3d_pins[] = { 59, 60 };
+static const int smb4_pins[] = { 28, 29 };
+static const int smb4b_pins[] = { 18, 19 };
+static const int smb4c_pins[] = { 20, 21 };
+static const int smb4d_pins[] = { 22, 23 };
+static const int smb4den_pins[] = { 17 };
+static const int smb5_pins[] = { 26, 27 };
+static const int smb5b_pins[] = { 13, 12 };
+static const int smb5c_pins[] = { 15, 14 };
+static const int smb5d_pins[] = { 94, 93 };
+static const int ga20kbc_pins[] = { 94, 93 };
+
+static const int smb6_pins[] = { 172, 171 };
+static const int smb6b_pins[] = { 2, 3 };
+static const int smb6c_pins[] = { 0, 1 };
+static const int smb6d_pins[] = { 10, 11 };
+static const int smb7_pins[] = { 174, 173 };
+static const int smb7b_pins[] = { 16, 141 };
+static const int smb7c_pins[] = { 24, 25 };
+static const int smb7d_pins[] = { 142, 143 };
+static const int smb8_pins[] = { 129, 128 };
+static const int smb9_pins[] = { 131, 130 };
+static const int smb10_pins[] = { 133, 132 };
+static const int smb11_pins[] = { 135, 134 };
+static const int smb12_pins[] = { 221, 220 };
+static const int smb13_pins[] = { 223, 222 };
+static const int smb14_pins[] = { 22, 23 };
+static const int smb14b_pins[] = { 32, 187 };
+static const int smb15_pins[] = { 20, 21 };
+static const int smb15b_pins[] = { 192, 191 };
+
+static const int smb16_pins[] = { 10, 11 };
+static const int smb16b_pins[] = { 218, 219 };
+static const int smb17_pins[] = { 3, 2 };
+static const int smb18_pins[] = { 0, 1 };
+static const int smb19_pins[] = { 60, 59 };
+static const int smb20_pins[] = { 234, 235 };
+static const int smb21_pins[] = { 169, 170 };
+static const int smb22_pins[] = { 40, 39 };
+static const int smb23_pins[] = { 38, 37 };
+static const int smb23b_pins[] = { 134, 135 };
+
+static const int fanin0_pins[] = { 64 };
+static const int fanin1_pins[] = { 65 };
+static const int fanin2_pins[] = { 66 };
+static const int fanin3_pins[] = { 67 };
+static const int fanin4_pins[] = { 68 };
+static const int fanin5_pins[] = { 69 };
+static const int fanin6_pins[] = { 70 };
+static const int fanin7_pins[] = { 71 };
+static const int fanin8_pins[] = { 72 };
+static const int fanin9_pins[] = { 73 };
+static const int fanin10_pins[] = { 74 };
+static const int fanin11_pins[] = { 75 };
+static const int fanin12_pins[] = { 76 };
+static const int fanin13_pins[] = { 77 };
+static const int fanin14_pins[] = { 78 };
+static const int fanin15_pins[] = { 79 };
+static const int faninx_pins[] = { 175, 176, 177, 203 };
+
+static const int pwm0_pins[] = { 80 };
+static const int pwm1_pins[] = { 81 };
+static const int pwm2_pins[] = { 82 };
+static const int pwm3_pins[] = { 83 };
+static const int pwm4_pins[] = { 144 };
+static const int pwm5_pins[] = { 145 };
+static const int pwm6_pins[] = { 146 };
+static const int pwm7_pins[] = { 147 };
+static const int pwm8_pins[] = { 220 };
+static const int pwm9_pins[] = { 221 };
+static const int pwm10_pins[] = { 234 };
+static const int pwm11_pins[] = { 235 };
+
+static const int uart1_pins[] = { 43, 45, 46, 47, 61, 62, 63 };
+static const int uart2_pins[] = { 48, 49, 50, 51, 52, 53, 54, 55 };
+
+static const int sg1mdio_pins[] = { 108, 109 };
+
+static const int rg2_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
+ 213, 214, 215 };
+static const int rg2mdio_pins[] = { 216, 217 };
+
+static const int ddr_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217 };
+
+static const int iox1_pins[] = { 0, 1, 2, 3 };
+static const int iox2_pins[] = { 4, 5, 6, 7 };
+static const int ioxh_pins[] = { 10, 11, 24, 25 };
+
+static const int mmc_pins[] = { 152, 154, 156, 157, 158, 159 };
+static const int mmcwp_pins[] = { 153 };
+static const int mmccd_pins[] = { 155 };
+static const int mmcrst_pins[] = { 155 };
+static const int mmc8_pins[] = { 148, 149, 150, 151 };
+
+static const int r1_pins[] = { 178, 179, 180, 181, 182, 193, 201 };
+static const int r1err_pins[] = { 56 };
+static const int r1md_pins[] = { 57, 58 };
+static const int r2_pins[] = { 84, 85, 86, 87, 88, 89, 200 };
+static const int r2err_pins[] = { 90 };
+static const int r2md_pins[] = { 91, 92 };
+static const int sd1_pins[] = { 136, 137, 138, 139, 140, 141, 142, 143 };
+static const int sd1pwr_pins[] = { 143 };
+
+static const int wdog1_pins[] = { 218 };
+static const int wdog2_pins[] = { 219 };
+
+static const int bmcuart0a_pins[] = { 41, 42 };
+static const int bmcuart0b_pins[] = { 48, 49 };
+static const int bmcuart1_pins[] = { 43, 44, 62, 63 };
+
+static const int scipme_pins[] = { 169 };
+static const int smi_pins[] = { 170 };
+static const int serirq_pins[] = { 168 };
+
+static const int clkout_pins[] = { 160 };
+static const int clkreq_pins[] = { 231 };
+
+static const int jtag2_pins[] = { 43, 44, 45, 46, 47 };
+static const int gspi_pins[] = { 12, 13, 14, 15 };
+
+static const int spix_pins[] = { 224, 225, 226, 227, 229, 230 };
+static const int spixcs1_pins[] = { 228 };
+
+static const int spi1_pins[] = { 175, 176, 177 };
+static const int pspi_pins[] = { 17, 18, 19 };
+
+static const int spi0cs1_pins[] = { 32 };
+
+static const int spi3_pins[] = { 183, 184, 185, 186 };
+static const int spi3cs1_pins[] = { 187 };
+static const int spi3quad_pins[] = { 188, 189 };
+static const int spi3cs2_pins[] = { 188 };
+static const int spi3cs3_pins[] = { 189 };
+
+static const int ddc_pins[] = { 204, 205, 206, 207 };
+
+static const int lpc_pins[] = { 95, 161, 163, 164, 165, 166, 167 };
+static const int lpcclk_pins[] = { 168 };
+static const int espi_pins[] = { 95, 161, 163, 164, 165, 166, 167, 168 };
+
+static const int lkgpo0_pins[] = { 16 };
+static const int lkgpo1_pins[] = { 8 };
+static const int lkgpo2_pins[] = { 9 };
+
+static const int nprd_smi_pins[] = { 190 };
+
+static const int hgpio0_pins[] = { 20 };
+static const int hgpio1_pins[] = { 21 };
+static const int hgpio2_pins[] = { 22 };
+static const int hgpio3_pins[] = { 23 };
+static const int hgpio4_pins[] = { 24 };
+static const int hgpio5_pins[] = { 25 };
+static const int hgpio6_pins[] = { 59 };
+static const int hgpio7_pins[] = { 60 };
+
+/*
+ * pin: name, number
+ * group: name, npins, pins
+ * function: name, ngroups, groups
+ */
+struct npcm8xx_pingroup {
+ const char *name;
+ const unsigned int *pins;
+ int npins;
+};
+
+#define NPCM8XX_GRPS \
+ NPCM8XX_GRP(gpi36), \
+ NPCM8XX_GRP(gpi35), \
+ NPCM8XX_GRP(tp_jtag3), \
+ NPCM8XX_GRP(tp_uart), \
+ NPCM8XX_GRP(tp_smb2), \
+ NPCM8XX_GRP(tp_smb1), \
+ NPCM8XX_GRP(tp_gpio7), \
+ NPCM8XX_GRP(tp_gpio6), \
+ NPCM8XX_GRP(tp_gpio5), \
+ NPCM8XX_GRP(tp_gpio4), \
+ NPCM8XX_GRP(tp_gpio3), \
+ NPCM8XX_GRP(tp_gpio2), \
+ NPCM8XX_GRP(tp_gpio1), \
+ NPCM8XX_GRP(tp_gpio0), \
+ NPCM8XX_GRP(tp_gpio2b), \
+ NPCM8XX_GRP(tp_gpio1b), \
+ NPCM8XX_GRP(tp_gpio0b), \
+ NPCM8XX_GRP(vgadig), \
+ NPCM8XX_GRP(nbu1crts), \
+ NPCM8XX_GRP(fm2), \
+ NPCM8XX_GRP(fm1), \
+ NPCM8XX_GRP(fm0), \
+ NPCM8XX_GRP(gpio1836), \
+ NPCM8XX_GRP(gpio1889), \
+ NPCM8XX_GRP(gpo187), \
+ NPCM8XX_GRP(cp1urxd), \
+ NPCM8XX_GRP(r3rxer), \
+ NPCM8XX_GRP(cp1gpio2c), \
+ NPCM8XX_GRP(cp1gpio3c), \
+ NPCM8XX_GRP(cp1gpio0b), \
+ NPCM8XX_GRP(cp1gpio1b), \
+ NPCM8XX_GRP(cp1gpio2b), \
+ NPCM8XX_GRP(cp1gpio3b), \
+ NPCM8XX_GRP(cp1gpio4b), \
+ NPCM8XX_GRP(cp1gpio5b), \
+ NPCM8XX_GRP(cp1gpio6b), \
+ NPCM8XX_GRP(cp1gpio7b), \
+ NPCM8XX_GRP(cp1gpio0), \
+ NPCM8XX_GRP(cp1gpio1), \
+ NPCM8XX_GRP(cp1gpio2), \
+ NPCM8XX_GRP(cp1gpio3), \
+ NPCM8XX_GRP(cp1gpio4), \
+ NPCM8XX_GRP(cp1gpio5), \
+ NPCM8XX_GRP(cp1gpio6), \
+ NPCM8XX_GRP(cp1gpio7), \
+ NPCM8XX_GRP(cp1utxd), \
+ NPCM8XX_GRP(spi1cs3), \
+ NPCM8XX_GRP(spi1cs2), \
+ NPCM8XX_GRP(spi1cs1), \
+ NPCM8XX_GRP(spi1cs0), \
+ NPCM8XX_GRP(spi1d23), \
+ NPCM8XX_GRP(j2j3), \
+ NPCM8XX_GRP(r3oen), \
+ NPCM8XX_GRP(r2oen), \
+ NPCM8XX_GRP(r1oen), \
+ NPCM8XX_GRP(bu4b), \
+ NPCM8XX_GRP(bu4), \
+ NPCM8XX_GRP(bu5b), \
+ NPCM8XX_GRP(bu5), \
+ NPCM8XX_GRP(bu6), \
+ NPCM8XX_GRP(rmii3), \
+ NPCM8XX_GRP(jm1), \
+ NPCM8XX_GRP(jm2), \
+ NPCM8XX_GRP(tpgpio5b), \
+ NPCM8XX_GRP(tpgpio4b), \
+ NPCM8XX_GRP(clkrun), \
+ NPCM8XX_GRP(i3c5), \
+ NPCM8XX_GRP(i3c4), \
+ NPCM8XX_GRP(i3c3), \
+ NPCM8XX_GRP(i3c2), \
+ NPCM8XX_GRP(i3c1), \
+ NPCM8XX_GRP(i3c0), \
+ NPCM8XX_GRP(hsi1a), \
+ NPCM8XX_GRP(hsi2a), \
+ NPCM8XX_GRP(hsi1b), \
+ NPCM8XX_GRP(hsi2b), \
+ NPCM8XX_GRP(hsi1c), \
+ NPCM8XX_GRP(hsi2c), \
+ NPCM8XX_GRP(smb0), \
+ NPCM8XX_GRP(smb0b), \
+ NPCM8XX_GRP(smb0c), \
+ NPCM8XX_GRP(smb0d), \
+ NPCM8XX_GRP(smb0den), \
+ NPCM8XX_GRP(smb1), \
+ NPCM8XX_GRP(smb1b), \
+ NPCM8XX_GRP(smb1c), \
+ NPCM8XX_GRP(smb1d), \
+ NPCM8XX_GRP(smb2), \
+ NPCM8XX_GRP(smb2b), \
+ NPCM8XX_GRP(smb2c), \
+ NPCM8XX_GRP(smb2d), \
+ NPCM8XX_GRP(smb3), \
+ NPCM8XX_GRP(smb3b), \
+ NPCM8XX_GRP(smb3c), \
+ NPCM8XX_GRP(smb3d), \
+ NPCM8XX_GRP(smb4), \
+ NPCM8XX_GRP(smb4b), \
+ NPCM8XX_GRP(smb4c), \
+ NPCM8XX_GRP(smb4d), \
+ NPCM8XX_GRP(smb4den), \
+ NPCM8XX_GRP(smb5), \
+ NPCM8XX_GRP(smb5b), \
+ NPCM8XX_GRP(smb5c), \
+ NPCM8XX_GRP(smb5d), \
+ NPCM8XX_GRP(ga20kbc), \
+ NPCM8XX_GRP(smb6), \
+ NPCM8XX_GRP(smb6b), \
+ NPCM8XX_GRP(smb6c), \
+ NPCM8XX_GRP(smb6d), \
+ NPCM8XX_GRP(smb7), \
+ NPCM8XX_GRP(smb7b), \
+ NPCM8XX_GRP(smb7c), \
+ NPCM8XX_GRP(smb7d), \
+ NPCM8XX_GRP(smb8), \
+ NPCM8XX_GRP(smb9), \
+ NPCM8XX_GRP(smb10), \
+ NPCM8XX_GRP(smb11), \
+ NPCM8XX_GRP(smb12), \
+ NPCM8XX_GRP(smb13), \
+ NPCM8XX_GRP(smb14), \
+ NPCM8XX_GRP(smb14b), \
+ NPCM8XX_GRP(smb15), \
+ NPCM8XX_GRP(smb15b), \
+ NPCM8XX_GRP(smb16), \
+ NPCM8XX_GRP(smb16b), \
+ NPCM8XX_GRP(smb17), \
+ NPCM8XX_GRP(smb18), \
+ NPCM8XX_GRP(smb19), \
+ NPCM8XX_GRP(smb20), \
+ NPCM8XX_GRP(smb21), \
+ NPCM8XX_GRP(smb22), \
+ NPCM8XX_GRP(smb23), \
+ NPCM8XX_GRP(smb23b), \
+ NPCM8XX_GRP(fanin0), \
+ NPCM8XX_GRP(fanin1), \
+ NPCM8XX_GRP(fanin2), \
+ NPCM8XX_GRP(fanin3), \
+ NPCM8XX_GRP(fanin4), \
+ NPCM8XX_GRP(fanin5), \
+ NPCM8XX_GRP(fanin6), \
+ NPCM8XX_GRP(fanin7), \
+ NPCM8XX_GRP(fanin8), \
+ NPCM8XX_GRP(fanin9), \
+ NPCM8XX_GRP(fanin10), \
+ NPCM8XX_GRP(fanin11), \
+ NPCM8XX_GRP(fanin12), \
+ NPCM8XX_GRP(fanin13), \
+ NPCM8XX_GRP(fanin14), \
+ NPCM8XX_GRP(fanin15), \
+ NPCM8XX_GRP(faninx), \
+ NPCM8XX_GRP(pwm0), \
+ NPCM8XX_GRP(pwm1), \
+ NPCM8XX_GRP(pwm2), \
+ NPCM8XX_GRP(pwm3), \
+ NPCM8XX_GRP(pwm4), \
+ NPCM8XX_GRP(pwm5), \
+ NPCM8XX_GRP(pwm6), \
+ NPCM8XX_GRP(pwm7), \
+ NPCM8XX_GRP(pwm8), \
+ NPCM8XX_GRP(pwm9), \
+ NPCM8XX_GRP(pwm10), \
+ NPCM8XX_GRP(pwm11), \
+ NPCM8XX_GRP(sg1mdio), \
+ NPCM8XX_GRP(rg2), \
+ NPCM8XX_GRP(rg2mdio), \
+ NPCM8XX_GRP(ddr), \
+ NPCM8XX_GRP(uart1), \
+ NPCM8XX_GRP(uart2), \
+ NPCM8XX_GRP(bmcuart0a), \
+ NPCM8XX_GRP(bmcuart0b), \
+ NPCM8XX_GRP(bmcuart1), \
+ NPCM8XX_GRP(iox1), \
+ NPCM8XX_GRP(iox2), \
+ NPCM8XX_GRP(ioxh), \
+ NPCM8XX_GRP(gspi), \
+ NPCM8XX_GRP(mmc), \
+ NPCM8XX_GRP(mmcwp), \
+ NPCM8XX_GRP(mmccd), \
+ NPCM8XX_GRP(mmcrst), \
+ NPCM8XX_GRP(mmc8), \
+ NPCM8XX_GRP(r1), \
+ NPCM8XX_GRP(r1err), \
+ NPCM8XX_GRP(r1md), \
+ NPCM8XX_GRP(r2), \
+ NPCM8XX_GRP(r2err), \
+ NPCM8XX_GRP(r2md), \
+ NPCM8XX_GRP(sd1), \
+ NPCM8XX_GRP(sd1pwr), \
+ NPCM8XX_GRP(wdog1), \
+ NPCM8XX_GRP(wdog2), \
+ NPCM8XX_GRP(scipme), \
+ NPCM8XX_GRP(smi), \
+ NPCM8XX_GRP(serirq), \
+ NPCM8XX_GRP(jtag2), \
+ NPCM8XX_GRP(spix), \
+ NPCM8XX_GRP(spixcs1), \
+ NPCM8XX_GRP(spi1), \
+ NPCM8XX_GRP(pspi), \
+ NPCM8XX_GRP(ddc), \
+ NPCM8XX_GRP(clkreq), \
+ NPCM8XX_GRP(clkout), \
+ NPCM8XX_GRP(spi3), \
+ NPCM8XX_GRP(spi3cs1), \
+ NPCM8XX_GRP(spi3quad), \
+ NPCM8XX_GRP(spi3cs2), \
+ NPCM8XX_GRP(spi3cs3), \
+ NPCM8XX_GRP(spi0cs1), \
+ NPCM8XX_GRP(lpc), \
+ NPCM8XX_GRP(lpcclk), \
+ NPCM8XX_GRP(espi), \
+ NPCM8XX_GRP(lkgpo0), \
+ NPCM8XX_GRP(lkgpo1), \
+ NPCM8XX_GRP(lkgpo2), \
+ NPCM8XX_GRP(nprd_smi), \
+ NPCM8XX_GRP(hgpio0), \
+ NPCM8XX_GRP(hgpio1), \
+ NPCM8XX_GRP(hgpio2), \
+ NPCM8XX_GRP(hgpio3), \
+ NPCM8XX_GRP(hgpio4), \
+ NPCM8XX_GRP(hgpio5), \
+ NPCM8XX_GRP(hgpio6), \
+ NPCM8XX_GRP(hgpio7), \
+ \
+
+enum {
+#define NPCM8XX_GRP(x) fn_ ## x
+ NPCM8XX_GRPS
+ NPCM8XX_GRP(none),
+ NPCM8XX_GRP(gpio),
+#undef NPCM8XX_GRP
+};
+
+static struct npcm8xx_pingroup npcm8xx_pingroups[] = {
+#define NPCM8XX_GRP(x) { .name = #x, .pins = x ## _pins, \
+ .npins = ARRAY_SIZE(x ## _pins) }
+ NPCM8XX_GRPS
+#undef NPCM8XX_GRP
+};
+
+#define NPCM8XX_SFUNC(a) NPCM8XX_FUNC(a, #a)
+#define NPCM8XX_FUNC(a, b...) static const char *a ## _grp[] = { b }
+#define NPCM8XX_MKFUNC(nm) { .name = #nm, .ngroups = ARRAY_SIZE(nm ## _grp), \
+ .groups = nm ## _grp }
+struct npcm8xx_func {
+ const char *name;
+ const unsigned int ngroups;
+ const char *const *groups;
+};
+
+NPCM8XX_SFUNC(gpi36);
+NPCM8XX_SFUNC(gpi35);
+NPCM8XX_SFUNC(tp_jtag3);
+NPCM8XX_SFUNC(tp_uart);
+NPCM8XX_SFUNC(tp_smb2);
+NPCM8XX_SFUNC(tp_smb1);
+NPCM8XX_SFUNC(tp_gpio7);
+NPCM8XX_SFUNC(tp_gpio6);
+NPCM8XX_SFUNC(tp_gpio5);
+NPCM8XX_SFUNC(tp_gpio4);
+NPCM8XX_SFUNC(tp_gpio3);
+NPCM8XX_SFUNC(tp_gpio2);
+NPCM8XX_SFUNC(tp_gpio1);
+NPCM8XX_SFUNC(tp_gpio0);
+NPCM8XX_SFUNC(tp_gpio2b);
+NPCM8XX_SFUNC(tp_gpio1b);
+NPCM8XX_SFUNC(tp_gpio0b);
+NPCM8XX_SFUNC(vgadig);
+NPCM8XX_SFUNC(nbu1crts);
+NPCM8XX_SFUNC(fm2);
+NPCM8XX_SFUNC(fm1);
+NPCM8XX_SFUNC(fm0);
+NPCM8XX_SFUNC(gpio1836);
+NPCM8XX_SFUNC(gpio1889);
+NPCM8XX_SFUNC(gpo187);
+NPCM8XX_SFUNC(cp1urxd);
+NPCM8XX_SFUNC(r3rxer);
+NPCM8XX_SFUNC(cp1gpio2c);
+NPCM8XX_SFUNC(cp1gpio3c);
+NPCM8XX_SFUNC(cp1gpio0b);
+NPCM8XX_SFUNC(cp1gpio1b);
+NPCM8XX_SFUNC(cp1gpio2b);
+NPCM8XX_SFUNC(cp1gpio3b);
+NPCM8XX_SFUNC(cp1gpio4b);
+NPCM8XX_SFUNC(cp1gpio5b);
+NPCM8XX_SFUNC(cp1gpio6b);
+NPCM8XX_SFUNC(cp1gpio7b);
+NPCM8XX_SFUNC(cp1gpio0);
+NPCM8XX_SFUNC(cp1gpio1);
+NPCM8XX_SFUNC(cp1gpio2);
+NPCM8XX_SFUNC(cp1gpio3);
+NPCM8XX_SFUNC(cp1gpio4);
+NPCM8XX_SFUNC(cp1gpio5);
+NPCM8XX_SFUNC(cp1gpio6);
+NPCM8XX_SFUNC(cp1gpio7);
+NPCM8XX_SFUNC(cp1utxd);
+NPCM8XX_SFUNC(spi1cs3);
+NPCM8XX_SFUNC(spi1cs2);
+NPCM8XX_SFUNC(spi1cs1);
+NPCM8XX_SFUNC(spi1cs0);
+NPCM8XX_SFUNC(spi1d23);
+NPCM8XX_SFUNC(j2j3);
+NPCM8XX_SFUNC(r3oen);
+NPCM8XX_SFUNC(r2oen);
+NPCM8XX_SFUNC(r1oen);
+NPCM8XX_SFUNC(bu4b);
+NPCM8XX_SFUNC(bu4);
+NPCM8XX_SFUNC(bu5b);
+NPCM8XX_SFUNC(bu5);
+NPCM8XX_SFUNC(bu6);
+NPCM8XX_SFUNC(rmii3);
+NPCM8XX_SFUNC(jm1);
+NPCM8XX_SFUNC(jm2);
+NPCM8XX_SFUNC(tpgpio5b);
+NPCM8XX_SFUNC(tpgpio4b);
+NPCM8XX_SFUNC(clkrun);
+NPCM8XX_SFUNC(i3c5);
+NPCM8XX_SFUNC(i3c4);
+NPCM8XX_SFUNC(i3c3);
+NPCM8XX_SFUNC(i3c2);
+NPCM8XX_SFUNC(i3c1);
+NPCM8XX_SFUNC(i3c0);
+NPCM8XX_SFUNC(hsi1a);
+NPCM8XX_SFUNC(hsi2a);
+NPCM8XX_SFUNC(hsi1b);
+NPCM8XX_SFUNC(hsi2b);
+NPCM8XX_SFUNC(hsi1c);
+NPCM8XX_SFUNC(hsi2c);
+NPCM8XX_SFUNC(smb0);
+NPCM8XX_SFUNC(smb0b);
+NPCM8XX_SFUNC(smb0c);
+NPCM8XX_SFUNC(smb0d);
+NPCM8XX_SFUNC(smb0den);
+NPCM8XX_SFUNC(smb1);
+NPCM8XX_SFUNC(smb1b);
+NPCM8XX_SFUNC(smb1c);
+NPCM8XX_SFUNC(smb1d);
+NPCM8XX_SFUNC(smb2);
+NPCM8XX_SFUNC(smb2b);
+NPCM8XX_SFUNC(smb2c);
+NPCM8XX_SFUNC(smb2d);
+NPCM8XX_SFUNC(smb3);
+NPCM8XX_SFUNC(smb3b);
+NPCM8XX_SFUNC(smb3c);
+NPCM8XX_SFUNC(smb3d);
+NPCM8XX_SFUNC(smb4);
+NPCM8XX_SFUNC(smb4b);
+NPCM8XX_SFUNC(smb4c);
+NPCM8XX_SFUNC(smb4d);
+NPCM8XX_SFUNC(smb4den);
+NPCM8XX_SFUNC(smb5);
+NPCM8XX_SFUNC(smb5b);
+NPCM8XX_SFUNC(smb5c);
+NPCM8XX_SFUNC(smb5d);
+NPCM8XX_SFUNC(ga20kbc);
+NPCM8XX_SFUNC(smb6);
+NPCM8XX_SFUNC(smb6b);
+NPCM8XX_SFUNC(smb6c);
+NPCM8XX_SFUNC(smb6d);
+NPCM8XX_SFUNC(smb7);
+NPCM8XX_SFUNC(smb7b);
+NPCM8XX_SFUNC(smb7c);
+NPCM8XX_SFUNC(smb7d);
+NPCM8XX_SFUNC(smb8);
+NPCM8XX_SFUNC(smb9);
+NPCM8XX_SFUNC(smb10);
+NPCM8XX_SFUNC(smb11);
+NPCM8XX_SFUNC(smb12);
+NPCM8XX_SFUNC(smb13);
+NPCM8XX_SFUNC(smb14);
+NPCM8XX_SFUNC(smb14b);
+NPCM8XX_SFUNC(smb15);
+NPCM8XX_SFUNC(smb16);
+NPCM8XX_SFUNC(smb16b);
+NPCM8XX_SFUNC(smb17);
+NPCM8XX_SFUNC(smb18);
+NPCM8XX_SFUNC(smb19);
+NPCM8XX_SFUNC(smb20);
+NPCM8XX_SFUNC(smb21);
+NPCM8XX_SFUNC(smb22);
+NPCM8XX_SFUNC(smb23);
+NPCM8XX_SFUNC(smb23b);
+NPCM8XX_SFUNC(fanin0);
+NPCM8XX_SFUNC(fanin1);
+NPCM8XX_SFUNC(fanin2);
+NPCM8XX_SFUNC(fanin3);
+NPCM8XX_SFUNC(fanin4);
+NPCM8XX_SFUNC(fanin5);
+NPCM8XX_SFUNC(fanin6);
+NPCM8XX_SFUNC(fanin7);
+NPCM8XX_SFUNC(fanin8);
+NPCM8XX_SFUNC(fanin9);
+NPCM8XX_SFUNC(fanin10);
+NPCM8XX_SFUNC(fanin11);
+NPCM8XX_SFUNC(fanin12);
+NPCM8XX_SFUNC(fanin13);
+NPCM8XX_SFUNC(fanin14);
+NPCM8XX_SFUNC(fanin15);
+NPCM8XX_SFUNC(faninx);
+NPCM8XX_SFUNC(pwm0);
+NPCM8XX_SFUNC(pwm1);
+NPCM8XX_SFUNC(pwm2);
+NPCM8XX_SFUNC(pwm3);
+NPCM8XX_SFUNC(pwm4);
+NPCM8XX_SFUNC(pwm5);
+NPCM8XX_SFUNC(pwm6);
+NPCM8XX_SFUNC(pwm7);
+NPCM8XX_SFUNC(pwm8);
+NPCM8XX_SFUNC(pwm9);
+NPCM8XX_SFUNC(pwm10);
+NPCM8XX_SFUNC(pwm11);
+NPCM8XX_SFUNC(sg1mdio);
+NPCM8XX_SFUNC(rg2);
+NPCM8XX_SFUNC(rg2mdio);
+NPCM8XX_SFUNC(ddr);
+NPCM8XX_SFUNC(uart1);
+NPCM8XX_SFUNC(uart2);
+NPCM8XX_SFUNC(bmcuart0a);
+NPCM8XX_SFUNC(bmcuart0b);
+NPCM8XX_SFUNC(bmcuart1);
+NPCM8XX_SFUNC(iox1);
+NPCM8XX_SFUNC(iox2);
+NPCM8XX_SFUNC(ioxh);
+NPCM8XX_SFUNC(gspi);
+NPCM8XX_SFUNC(mmc);
+NPCM8XX_SFUNC(mmcwp);
+NPCM8XX_SFUNC(mmccd);
+NPCM8XX_SFUNC(mmcrst);
+NPCM8XX_SFUNC(mmc8);
+NPCM8XX_SFUNC(r1);
+NPCM8XX_SFUNC(r1err);
+NPCM8XX_SFUNC(r1md);
+NPCM8XX_SFUNC(r2);
+NPCM8XX_SFUNC(r2err);
+NPCM8XX_SFUNC(r2md);
+NPCM8XX_SFUNC(sd1);
+NPCM8XX_SFUNC(sd1pwr);
+NPCM8XX_SFUNC(wdog1);
+NPCM8XX_SFUNC(wdog2);
+NPCM8XX_SFUNC(scipme);
+NPCM8XX_SFUNC(smi);
+NPCM8XX_SFUNC(serirq);
+NPCM8XX_SFUNC(jtag2);
+NPCM8XX_SFUNC(spix);
+NPCM8XX_SFUNC(spixcs1);
+NPCM8XX_SFUNC(spi1);
+NPCM8XX_SFUNC(pspi);
+NPCM8XX_SFUNC(ddc);
+NPCM8XX_SFUNC(clkreq);
+NPCM8XX_SFUNC(clkout);
+NPCM8XX_SFUNC(spi3);
+NPCM8XX_SFUNC(spi3cs1);
+NPCM8XX_SFUNC(spi3quad);
+NPCM8XX_SFUNC(spi3cs2);
+NPCM8XX_SFUNC(spi3cs3);
+NPCM8XX_SFUNC(spi0cs1);
+NPCM8XX_SFUNC(lpc);
+NPCM8XX_SFUNC(lpcclk);
+NPCM8XX_SFUNC(espi);
+NPCM8XX_SFUNC(lkgpo0);
+NPCM8XX_SFUNC(lkgpo1);
+NPCM8XX_SFUNC(lkgpo2);
+NPCM8XX_SFUNC(nprd_smi);
+NPCM8XX_SFUNC(hgpio0);
+NPCM8XX_SFUNC(hgpio1);
+NPCM8XX_SFUNC(hgpio2);
+NPCM8XX_SFUNC(hgpio3);
+NPCM8XX_SFUNC(hgpio4);
+NPCM8XX_SFUNC(hgpio5);
+NPCM8XX_SFUNC(hgpio6);
+NPCM8XX_SFUNC(hgpio7);
+
+/* Function names */
+static struct npcm8xx_func npcm8xx_funcs[] = {
+ NPCM8XX_MKFUNC(gpi36),
+ NPCM8XX_MKFUNC(gpi35),
+ NPCM8XX_MKFUNC(tp_jtag3),
+ NPCM8XX_MKFUNC(tp_uart),
+ NPCM8XX_MKFUNC(tp_smb2),
+ NPCM8XX_MKFUNC(tp_smb1),
+ NPCM8XX_MKFUNC(tp_gpio7),
+ NPCM8XX_MKFUNC(tp_gpio6),
+ NPCM8XX_MKFUNC(tp_gpio5),
+ NPCM8XX_MKFUNC(tp_gpio4),
+ NPCM8XX_MKFUNC(tp_gpio3),
+ NPCM8XX_MKFUNC(tp_gpio2),
+ NPCM8XX_MKFUNC(tp_gpio1),
+ NPCM8XX_MKFUNC(tp_gpio0),
+ NPCM8XX_MKFUNC(tp_gpio2b),
+ NPCM8XX_MKFUNC(tp_gpio1b),
+ NPCM8XX_MKFUNC(tp_gpio0b),
+ NPCM8XX_MKFUNC(vgadig),
+ NPCM8XX_MKFUNC(nbu1crts),
+ NPCM8XX_MKFUNC(fm2),
+ NPCM8XX_MKFUNC(fm1),
+ NPCM8XX_MKFUNC(fm0),
+ NPCM8XX_MKFUNC(gpio1836),
+ NPCM8XX_MKFUNC(gpio1889),
+ NPCM8XX_MKFUNC(gpo187),
+ NPCM8XX_MKFUNC(cp1urxd),
+ NPCM8XX_MKFUNC(r3rxer),
+ NPCM8XX_MKFUNC(cp1gpio2c),
+ NPCM8XX_MKFUNC(cp1gpio3c),
+ NPCM8XX_MKFUNC(cp1gpio0b),
+ NPCM8XX_MKFUNC(cp1gpio1b),
+ NPCM8XX_MKFUNC(cp1gpio2b),
+ NPCM8XX_MKFUNC(cp1gpio3b),
+ NPCM8XX_MKFUNC(cp1gpio4b),
+ NPCM8XX_MKFUNC(cp1gpio5b),
+ NPCM8XX_MKFUNC(cp1gpio6b),
+ NPCM8XX_MKFUNC(cp1gpio7b),
+ NPCM8XX_MKFUNC(cp1gpio0),
+ NPCM8XX_MKFUNC(cp1gpio1),
+ NPCM8XX_MKFUNC(cp1gpio2),
+ NPCM8XX_MKFUNC(cp1gpio3),
+ NPCM8XX_MKFUNC(cp1gpio4),
+ NPCM8XX_MKFUNC(cp1gpio5),
+ NPCM8XX_MKFUNC(cp1gpio6),
+ NPCM8XX_MKFUNC(cp1gpio7),
+ NPCM8XX_MKFUNC(cp1utxd),
+ NPCM8XX_MKFUNC(spi1cs3),
+ NPCM8XX_MKFUNC(spi1cs2),
+ NPCM8XX_MKFUNC(spi1cs1),
+ NPCM8XX_MKFUNC(spi1cs0),
+ NPCM8XX_MKFUNC(spi1d23),
+ NPCM8XX_MKFUNC(j2j3),
+ NPCM8XX_MKFUNC(r3oen),
+ NPCM8XX_MKFUNC(r2oen),
+ NPCM8XX_MKFUNC(r1oen),
+ NPCM8XX_MKFUNC(bu4b),
+ NPCM8XX_MKFUNC(bu4),
+ NPCM8XX_MKFUNC(bu5b),
+ NPCM8XX_MKFUNC(bu5),
+ NPCM8XX_MKFUNC(bu6),
+ NPCM8XX_MKFUNC(rmii3),
+ NPCM8XX_MKFUNC(jm1),
+ NPCM8XX_MKFUNC(jm2),
+ NPCM8XX_MKFUNC(tpgpio5b),
+ NPCM8XX_MKFUNC(tpgpio4b),
+ NPCM8XX_MKFUNC(clkrun),
+ NPCM8XX_MKFUNC(i3c5),
+ NPCM8XX_MKFUNC(i3c4),
+ NPCM8XX_MKFUNC(i3c3),
+ NPCM8XX_MKFUNC(i3c2),
+ NPCM8XX_MKFUNC(i3c1),
+ NPCM8XX_MKFUNC(i3c0),
+ NPCM8XX_MKFUNC(hsi1a),
+ NPCM8XX_MKFUNC(hsi2a),
+ NPCM8XX_MKFUNC(hsi1b),
+ NPCM8XX_MKFUNC(hsi2b),
+ NPCM8XX_MKFUNC(hsi1c),
+ NPCM8XX_MKFUNC(hsi2c),
+ NPCM8XX_MKFUNC(smb0),
+ NPCM8XX_MKFUNC(smb0b),
+ NPCM8XX_MKFUNC(smb0c),
+ NPCM8XX_MKFUNC(smb0d),
+ NPCM8XX_MKFUNC(smb0den),
+ NPCM8XX_MKFUNC(smb1),
+ NPCM8XX_MKFUNC(smb1b),
+ NPCM8XX_MKFUNC(smb1c),
+ NPCM8XX_MKFUNC(smb1d),
+ NPCM8XX_MKFUNC(smb2),
+ NPCM8XX_MKFUNC(smb2b),
+ NPCM8XX_MKFUNC(smb2c),
+ NPCM8XX_MKFUNC(smb2d),
+ NPCM8XX_MKFUNC(smb3),
+ NPCM8XX_MKFUNC(smb3b),
+ NPCM8XX_MKFUNC(smb3c),
+ NPCM8XX_MKFUNC(smb3d),
+ NPCM8XX_MKFUNC(smb4),
+ NPCM8XX_MKFUNC(smb4b),
+ NPCM8XX_MKFUNC(smb4c),
+ NPCM8XX_MKFUNC(smb4d),
+ NPCM8XX_MKFUNC(smb4den),
+ NPCM8XX_MKFUNC(smb5),
+ NPCM8XX_MKFUNC(smb5b),
+ NPCM8XX_MKFUNC(smb5c),
+ NPCM8XX_MKFUNC(smb5d),
+ NPCM8XX_MKFUNC(ga20kbc),
+ NPCM8XX_MKFUNC(smb6),
+ NPCM8XX_MKFUNC(smb6b),
+ NPCM8XX_MKFUNC(smb6c),
+ NPCM8XX_MKFUNC(smb6d),
+ NPCM8XX_MKFUNC(smb7),
+ NPCM8XX_MKFUNC(smb7b),
+ NPCM8XX_MKFUNC(smb7c),
+ NPCM8XX_MKFUNC(smb7d),
+ NPCM8XX_MKFUNC(smb8),
+ NPCM8XX_MKFUNC(smb9),
+ NPCM8XX_MKFUNC(smb10),
+ NPCM8XX_MKFUNC(smb11),
+ NPCM8XX_MKFUNC(smb12),
+ NPCM8XX_MKFUNC(smb13),
+ NPCM8XX_MKFUNC(smb14),
+ NPCM8XX_MKFUNC(smb14b),
+ NPCM8XX_MKFUNC(smb15),
+ NPCM8XX_MKFUNC(smb16),
+ NPCM8XX_MKFUNC(smb16b),
+ NPCM8XX_MKFUNC(smb17),
+ NPCM8XX_MKFUNC(smb18),
+ NPCM8XX_MKFUNC(smb19),
+ NPCM8XX_MKFUNC(smb20),
+ NPCM8XX_MKFUNC(smb21),
+ NPCM8XX_MKFUNC(smb22),
+ NPCM8XX_MKFUNC(smb23),
+ NPCM8XX_MKFUNC(smb23b),
+ NPCM8XX_MKFUNC(fanin0),
+ NPCM8XX_MKFUNC(fanin1),
+ NPCM8XX_MKFUNC(fanin2),
+ NPCM8XX_MKFUNC(fanin3),
+ NPCM8XX_MKFUNC(fanin4),
+ NPCM8XX_MKFUNC(fanin5),
+ NPCM8XX_MKFUNC(fanin6),
+ NPCM8XX_MKFUNC(fanin7),
+ NPCM8XX_MKFUNC(fanin8),
+ NPCM8XX_MKFUNC(fanin9),
+ NPCM8XX_MKFUNC(fanin10),
+ NPCM8XX_MKFUNC(fanin11),
+ NPCM8XX_MKFUNC(fanin12),
+ NPCM8XX_MKFUNC(fanin13),
+ NPCM8XX_MKFUNC(fanin14),
+ NPCM8XX_MKFUNC(fanin15),
+ NPCM8XX_MKFUNC(faninx),
+ NPCM8XX_MKFUNC(pwm0),
+ NPCM8XX_MKFUNC(pwm1),
+ NPCM8XX_MKFUNC(pwm2),
+ NPCM8XX_MKFUNC(pwm3),
+ NPCM8XX_MKFUNC(pwm4),
+ NPCM8XX_MKFUNC(pwm5),
+ NPCM8XX_MKFUNC(pwm6),
+ NPCM8XX_MKFUNC(pwm7),
+ NPCM8XX_MKFUNC(pwm8),
+ NPCM8XX_MKFUNC(pwm9),
+ NPCM8XX_MKFUNC(pwm10),
+ NPCM8XX_MKFUNC(pwm11),
+ NPCM8XX_MKFUNC(sg1mdio),
+ NPCM8XX_MKFUNC(rg2),
+ NPCM8XX_MKFUNC(rg2mdio),
+ NPCM8XX_MKFUNC(ddr),
+ NPCM8XX_MKFUNC(uart1),
+ NPCM8XX_MKFUNC(uart2),
+ NPCM8XX_MKFUNC(bmcuart0a),
+ NPCM8XX_MKFUNC(bmcuart0b),
+ NPCM8XX_MKFUNC(bmcuart1),
+ NPCM8XX_MKFUNC(iox1),
+ NPCM8XX_MKFUNC(iox2),
+ NPCM8XX_MKFUNC(ioxh),
+ NPCM8XX_MKFUNC(gspi),
+ NPCM8XX_MKFUNC(mmc),
+ NPCM8XX_MKFUNC(mmcwp),
+ NPCM8XX_MKFUNC(mmccd),
+ NPCM8XX_MKFUNC(mmcrst),
+ NPCM8XX_MKFUNC(mmc8),
+ NPCM8XX_MKFUNC(r1),
+ NPCM8XX_MKFUNC(r1err),
+ NPCM8XX_MKFUNC(r1md),
+ NPCM8XX_MKFUNC(r2),
+ NPCM8XX_MKFUNC(r2err),
+ NPCM8XX_MKFUNC(r2md),
+ NPCM8XX_MKFUNC(sd1),
+ NPCM8XX_MKFUNC(sd1pwr),
+ NPCM8XX_MKFUNC(wdog1),
+ NPCM8XX_MKFUNC(wdog2),
+ NPCM8XX_MKFUNC(scipme),
+ NPCM8XX_MKFUNC(smi),
+ NPCM8XX_MKFUNC(serirq),
+ NPCM8XX_MKFUNC(jtag2),
+ NPCM8XX_MKFUNC(spix),
+ NPCM8XX_MKFUNC(spixcs1),
+ NPCM8XX_MKFUNC(spi1),
+ NPCM8XX_MKFUNC(pspi),
+ NPCM8XX_MKFUNC(ddc),
+ NPCM8XX_MKFUNC(clkreq),
+ NPCM8XX_MKFUNC(clkout),
+ NPCM8XX_MKFUNC(spi3),
+ NPCM8XX_MKFUNC(spi3cs1),
+ NPCM8XX_MKFUNC(spi3quad),
+ NPCM8XX_MKFUNC(spi3cs2),
+ NPCM8XX_MKFUNC(spi3cs3),
+ NPCM8XX_MKFUNC(spi0cs1),
+ NPCM8XX_MKFUNC(lpc),
+ NPCM8XX_MKFUNC(lpcclk),
+ NPCM8XX_MKFUNC(espi),
+ NPCM8XX_MKFUNC(lkgpo0),
+ NPCM8XX_MKFUNC(lkgpo1),
+ NPCM8XX_MKFUNC(lkgpo2),
+ NPCM8XX_MKFUNC(nprd_smi),
+ NPCM8XX_MKFUNC(hgpio0),
+ NPCM8XX_MKFUNC(hgpio1),
+ NPCM8XX_MKFUNC(hgpio2),
+ NPCM8XX_MKFUNC(hgpio3),
+ NPCM8XX_MKFUNC(hgpio4),
+ NPCM8XX_MKFUNC(hgpio5),
+ NPCM8XX_MKFUNC(hgpio6),
+ NPCM8XX_MKFUNC(hgpio7),
+};
+
+#define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \
+ [a] { .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \
+ .fn1 = fn_ ## e, .reg1 = NPCM8XX_GCR_ ## f, .bit1 = g, \
+ .fn2 = fn_ ## h, .reg2 = NPCM8XX_GCR_ ## i, .bit2 = j, \
+ .fn3 = fn_ ## k, .reg3 = NPCM8XX_GCR_ ## l, .bit3 = m, \
+ .fn4 = fn_ ## n, .reg4 = NPCM8XX_GCR_ ## o, .bit4 = p, \
+ .flag = q }
+
+/* Drive strength controlled by NPCM8XX_GP_N_ODSC */
+#define DRIVE_STRENGTH_LO_SHIFT 8
+#define DRIVE_STRENGTH_HI_SHIFT 12
+#define DRIVE_STRENGTH_MASK GENMASK(15, 8)
+
+#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
+ ((hi) << DRIVE_STRENGTH_HI_SHIFT))
+#define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & GENMASK(3, 0))
+#define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & GENMASK(3, 0))
+
+#define GPI BIT(0) /* Not GPO */
+#define GPO BIT(1) /* Not GPI */
+#define SLEW BIT(2) /* Has Slew Control, NPCM8XX_GP_N_OSRC */
+#define SLEWLPC BIT(3) /* Has Slew Control, SRCNT.3 */
+
+struct npcm8xx_pincfg {
+ int flag;
+ int fn0, reg0, bit0;
+ int fn1, reg1, bit1;
+ int fn2, reg2, bit2;
+ int fn3, reg3, bit3;
+ int fn4, reg4, bit4;
+};
+
+static const struct npcm8xx_pincfg pincfg[] = {
+ /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FUNCTION 4 FUNCTION 5 FLAGS */
+ NPCM8XX_PINCFG(0, iox1, MFSEL1, 30, smb6c, I2CSEGSEL, 25, smb18, MFSEL5, 26, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(1, iox1, MFSEL1, 30, smb6c, I2CSEGSEL, 25, smb18, MFSEL5, 26, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(2, iox1, MFSEL1, 30, smb6b, I2CSEGSEL, 24, smb17, MFSEL5, 25, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(3, iox1, MFSEL1, 30, smb6b, I2CSEGSEL, 24, smb17, MFSEL5, 25, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(8, lkgpo1, FLOCKR1, 4, tp_gpio0b, MFSEL7, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(9, lkgpo2, FLOCKR1, 8, tp_gpio1b, MFSEL7, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(10, ioxh, MFSEL3, 18, smb6d, I2CSEGSEL, 26, smb16, MFSEL5, 24, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(11, ioxh, MFSEL3, 18, smb6d, I2CSEGSEL, 26, smb16, MFSEL5, 24, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(16, lkgpo0, FLOCKR1, 0, smb7b, I2CSEGSEL, 27, tp_gpio2b, MFSEL7, 10, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, smb4den, I2CSEGSEL, 23, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(18, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(19, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(20, hgpio0, MFSEL2, 24, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(21, hgpio1, MFSEL2, 25, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(22, hgpio2, MFSEL2, 26, smb14, MFSEL3, 7, smb4d, I2CSEGSEL, 16, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(23, hgpio3, MFSEL2, 27, smb14, MFSEL3, 7, smb4d, I2CSEGSEL, 16, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(24, hgpio4, MFSEL2, 28, ioxh, MFSEL3, 18, smb7c, I2CSEGSEL, 28, tp_smb2, MFSEL7, 28, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(25, hgpio5, MFSEL2, 29, ioxh, MFSEL3, 18, smb7c, I2CSEGSEL, 28, tp_smb2, MFSEL7, 28, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(29, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(30, smb3, MFSEL1, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(31, smb3, MFSEL1, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(32, spi0cs1, MFSEL1, 3, smb14b, MFSEL7, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(33, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(34, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(37, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(38, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(39, smb3b, I2CSEGSEL, 11, smb22, MFSEL5, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(40, smb3b, I2CSEGSEL, 11, smb22, MFSEL5, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(41, bmcuart0a, MFSEL1, 9, cp1urxd, MFSEL6, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(42, bmcuart0a, MFSEL1, 9, cp1utxd, MFSEL6, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO),
+ NPCM8XX_PINCFG(43, uart1, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(44, hsi1b, MFSEL1, 28, nbu1crts, MFSEL6, 15, jtag2, MFSEL4, 0, tp_jtag3, MFSEL7, 13, j2j3, MFSEL5, 2, GPO),
+ NPCM8XX_PINCFG(45, hsi1c, MFSEL1, 4, jtag2, MFSEL4, 0, j2j3, MFSEL5, 2, tp_jtag3, MFSEL7, 13, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(46, hsi1c, MFSEL1, 4, jtag2, MFSEL4, 0, j2j3, MFSEL5, 2, tp_jtag3, MFSEL7, 13, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(47, hsi1c, MFSEL1, 4, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 8)),
+ NPCM8XX_PINCFG(48, hsi2a, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(49, hsi2a, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(50, hsi2b, MFSEL1, 29, bu6, MFSEL5, 6, tp_uart, MFSEL7, 12, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(51, hsi2b, MFSEL1, 29, bu6, MFSEL5, 6, tp_uart, MFSEL7, 12, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(52, hsi2c, MFSEL1, 5, bu5, MFSEL5, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(53, hsi2c, MFSEL1, 5, bu5, MFSEL5, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(54, hsi2c, MFSEL1, 5, bu4, MFSEL5, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(55, hsi2c, MFSEL1, 5, bu4, MFSEL5, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(56, r1err, MFSEL1, 12, r1oen, MFSEL5, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(57, r1md, MFSEL1, 13, tpgpio4b, MFSEL5, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM8XX_PINCFG(58, r1md, MFSEL1, 13, tpgpio5b, MFSEL5, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM8XX_PINCFG(59, hgpio6, MFSEL2, 30, smb3d, I2CSEGSEL, 13, smb19, MFSEL5, 27, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(60, hgpio7, MFSEL2, 31, smb3d, I2CSEGSEL, 13, smb19, MFSEL5, 27, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(61, hsi1c, MFSEL1, 4, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(62, hsi1b, MFSEL1, 28, jtag2, MFSEL4, 0, j2j3, MFSEL5, 2, nbu1crts, MFSEL6, 15, tp_jtag3, MFSEL7, 13, GPO),
+ NPCM8XX_PINCFG(63, hsi1a, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(64, fanin0, MFSEL2, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(65, fanin1, MFSEL2, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(66, fanin2, MFSEL2, 2, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(67, fanin3, MFSEL2, 3, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(68, fanin4, MFSEL2, 4, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(69, fanin5, MFSEL2, 5, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(70, fanin6, MFSEL2, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(71, fanin7, MFSEL2, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(72, fanin8, MFSEL2, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(73, fanin9, MFSEL2, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(74, fanin10, MFSEL2, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(75, fanin11, MFSEL2, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(76, fanin12, MFSEL2, 12, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(90, r2err, MFSEL1, 15, r2oen, MFSEL5, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(91, r2md, MFSEL1, 16, cp1gpio6, MFSEL6, 8, tp_gpio0, MFSEL7, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM8XX_PINCFG(92, r2md, MFSEL1, 16, cp1gpio7, MFSEL6, 9, tp_gpio1, MFSEL7, 1, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM8XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(95, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(96, cp1gpio7b, MFSEL6, 24, tp_gpio7, MFSEL7, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(97, cp1gpio6b, MFSEL6, 25, tp_gpio6, MFSEL7, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(98, bu4b, MFSEL5, 13, cp1gpio5b, MFSEL6, 26, tp_gpio5, MFSEL7, 5, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(99, bu4b, MFSEL5, 13, cp1gpio4b, MFSEL6, 27, tp_gpio4, MFSEL7, 4, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(100, bu5b, MFSEL5, 12, cp1gpio3c, MFSEL6, 28, tp_gpio3, MFSEL7, 3, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(101, bu5b, MFSEL5, 12, cp1gpio2c, MFSEL6, 29, tp_gpio2, MFSEL7, 2, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(102, vgadig, MFSEL7, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(103, vgadig, MFSEL7, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(104, vgadig, MFSEL7, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(105, vgadig, MFSEL7, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(106, i3c5, MFSEL3, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(107, i3c5, MFSEL3, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(108, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(109, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(114, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(115, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(116, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(117, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(118, smb2, MFSEL1, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(119, smb2, MFSEL1, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(120, smb2c, I2CSEGSEL, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(121, smb2c, I2CSEGSEL, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(122, smb2b, I2CSEGSEL, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(123, smb2b, I2CSEGSEL, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(124, smb1c, I2CSEGSEL, 6, cp1gpio3b, MFSEL6, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(125, smb1c, I2CSEGSEL, 6, cp1gpio2b, MFSEL6, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(126, smb1b, I2CSEGSEL, 5, cp1gpio1b, MFSEL6, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(127, smb1b, I2CSEGSEL, 5, cp1gpio0b, MFSEL6, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(128, smb8, MFSEL4, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(129, smb8, MFSEL4, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(130, smb9, MFSEL4, 12, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(131, smb9, MFSEL4, 12, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(132, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(134, smb11, MFSEL4, 14, smb23b, MFSEL6, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(135, smb11, MFSEL4, 14, smb23b, MFSEL6, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(136, jm1, MFSEL5, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(137, jm1, MFSEL5, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(138, jm1, MFSEL5, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(139, jm1, MFSEL5, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(140, jm1, MFSEL5, 15, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(141, smb7b, I2CSEGSEL, 27, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(142, smb7d, I2CSEGSEL, 29, tp_smb1, MFSEL7, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(143, smb7d, I2CSEGSEL, 29, tp_smb1, MFSEL7, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM8XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(161, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(162, serirq, MFSEL1, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(163, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(164, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(165, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(166, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(167, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(168, lpcclk, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(169, scipme, MFSEL3, 0, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(170, smi, MFSEL1, 22, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(171, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(175, spi1, MFSEL3, 4, faninx, MFSEL3, 3, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(176, spi1, MFSEL3, 4, faninx, MFSEL3, 3, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(177, spi1, MFSEL3, 4, faninx, MFSEL3, 3, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(183, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(184, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(185, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(186, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(188, gpio1889, MFSEL7, 25, spi3cs2, MFSEL4, 18, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(189, gpio1889, MFSEL7, 25, spi3cs3, MFSEL4, 19, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(190, nprd_smi, FLOCKR1, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
+ NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
+ NPCM8XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(203, faninx, MFSEL3, 3, spi1cs0, MFSEL3, 4, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
+ NPCM8XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
+ NPCM8XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(211, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(212, rg2, MFSEL4, 24, ddr, MFSEL3, 26, r3rxer, MFSEL6, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(213, rg2, MFSEL4, 24, ddr, MFSEL3, 26, r3oen, MFSEL5, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(214, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(215, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(218, wdog1, MFSEL3, 19, smb16b, MFSEL7, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(219, wdog2, MFSEL3, 20, smb16b, MFSEL7, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(220, smb12, MFSEL3, 5, pwm8, MFSEL6, 11, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(221, smb12, MFSEL3, 5, pwm9, MFSEL6, 12, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(224, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(225, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(226, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(227, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(228, spixcs1, MFSEL4, 28, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 12) | SLEW),
+ NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEWLPC), /* slewlpc ? */
+ NPCM8XX_PINCFG(234, pwm10, MFSEL6, 13, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(235, pwm11, MFSEL6, 14, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(240, i3c0, MFSEL5, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(241, i3c0, MFSEL5, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(242, i3c1, MFSEL5, 19, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(243, i3c1, MFSEL5, 19, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(244, i3c2, MFSEL5, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(245, i3c2, MFSEL5, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(246, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(247, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
+ NPCM8XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
+ NPCM8XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
+};
+
+/* number, name, drv_data */
+static const struct pinctrl_pin_desc npcm8xx_pins[] = {
+ PINCTRL_PIN(0, "GPIO0/IOX1_DI/SMB6C_SDA/SMB18_SDA"),
+ PINCTRL_PIN(1, "GPIO1/IOX1_LD/SMB6C_SCL/SMB18_SCL"),
+ PINCTRL_PIN(2, "GPIO2/IOX1_CK/SMB6B_SDA/SMB17_SDA"),
+ PINCTRL_PIN(3, "GPIO3/IOX1_DO/SMB6B_SCL/SMB17_SCL"),
+ PINCTRL_PIN(4, "GPIO4/IOX2_DI/SMB1D_SDA"),
+ PINCTRL_PIN(5, "GPIO5/IOX2_LD/SMB1D_SCL"),
+ PINCTRL_PIN(6, "GPIO6/IOX2_CK/SMB2D_SDA"),
+ PINCTRL_PIN(7, "GPIO7/IOX2_D0/SMB2D_SCL"),
+ PINCTRL_PIN(8, "GPIO8/LKGPO1/TP_GPIO0"),
+ PINCTRL_PIN(9, "GPIO9/LKGPO2/TP_GPIO1"),
+ PINCTRL_PIN(10, "GPIO10/IOXH_LD/SMB6D_SCL/SMB16_SCL"),
+ PINCTRL_PIN(11, "GPIO11/IOXH_CK/SMB6D_SDA/SMB16_SDA"),
+ PINCTRL_PIN(12, "GPIO12/GSPI_CK/SMB5B_SCL"),
+ PINCTRL_PIN(13, "GPIO13/GSPI_DO/SMB5B_SDA"),
+ PINCTRL_PIN(14, "GPIO14/GSPI_DI/SMB5C_SCL"),
+ PINCTRL_PIN(15, "GPIO15/GSPI_CS/SMB5C_SDA"),
+ PINCTRL_PIN(16, "GPIO16/SMB7B_SDA/LKGPO0/TP_GPIO2"),
+ PINCTRL_PIN(17, "GPIO17/PSPI_DI/CP1_GPIO5"),
+ PINCTRL_PIN(18, "GPIO18/PSPI_D0/SMB4B_SDA"),
+ PINCTRL_PIN(19, "GPIO19/PSPI_CK/SMB4B_SCL"),
+ PINCTRL_PIN(20, "GPIO20/H_GPIO0/SMB4C_SDA/SMB15_SDA"),
+ PINCTRL_PIN(21, "GPIO21/H_GPIO1/SMB4C_SCL/SMB15_SCL"),
+ PINCTRL_PIN(22, "GPIO22/H_GPIO2/SMB4D_SDA/SMB14_SDA"),
+ PINCTRL_PIN(23, "GPIO23/H_GPIO3/SMB4D_SCL/SMB14_SCL"),
+ PINCTRL_PIN(24, "GPIO24/IOXH_DO/H_GPIO4/SMB7C_SCL/TP_SMB2_SCL"),
+ PINCTRL_PIN(25, "GPIO25/IOXH_DI/H_GPIO4/SMB7C_SDA/TP_SMB2_SDA"),
+ PINCTRL_PIN(26, "GPIO26/SMB5_SDA"),
+ PINCTRL_PIN(27, "GPIO27/SMB5_SCL"),
+ PINCTRL_PIN(28, "GPIO28/SMB4_SDA"),
+ PINCTRL_PIN(29, "GPIO29/SMB4_SCL"),
+ PINCTRL_PIN(30, "GPIO30/SMB3_SDA"),
+ PINCTRL_PIN(31, "GPIO31/SMB3_SCL"),
+ PINCTRL_PIN(32, "GPIO32/SMB14B_SCL/SPI0_nCS1"),
+ PINCTRL_PIN(33, "GPIO33/I3C4_SCL"),
+ PINCTRL_PIN(34, "GPIO34/I3C4_SDA"),
+ PINCTRL_PIN(37, "GPIO37/SMB3C_SDA/SMB23_SDA"),
+ PINCTRL_PIN(38, "GPIO38/SMB3C_SCL/SMB23_SCL"),
+ PINCTRL_PIN(39, "GPIO39/SMB3B_SDA/SMB22_SDA"),
+ PINCTRL_PIN(40, "GPIO40/SMB3B_SCL/SMB22_SCL"),
+ PINCTRL_PIN(41, "GPIO41/BU0_RXD/CP1U_RXD"),
+ PINCTRL_PIN(42, "GPIO42/BU0_TXD/CP1U_TXD"),
+ PINCTRL_PIN(43, "GPIO43/SI1_RXD/BU1_RXD"),
+ PINCTRL_PIN(44, "GPIO44/SI1_nCTS/BU1_nCTS/CP_TDI/TP_TDI/CP_TP_TDI"),
+ PINCTRL_PIN(45, "GPIO45/SI1_nDCD/CP_TMS_SWIO/TP_TMS_SWIO/CP_TP_TMS_SWIO"),
+ PINCTRL_PIN(46, "GPIO46/SI1_nDSR/CP_TCK_SWCLK/TP_TCK_SWCLK/CP_TP_TCK_SWCLK"),
+ PINCTRL_PIN(47, "GPIO47/SI1n_RI1"),
+ PINCTRL_PIN(48, "GPIO48/SI2_TXD/BU0_TXD/STRAP5"),
+ PINCTRL_PIN(49, "GPIO49/SI2_RXD/BU0_RXD"),
+ PINCTRL_PIN(50, "GPIO50/SI2_nCTS/BU6_TXD/TPU_TXD"),
+ PINCTRL_PIN(51, "GPIO51/SI2_nRTS/BU6_RXD/TPU_RXD"),
+ PINCTRL_PIN(52, "GPIO52/SI2_nDCD/BU5_RXD"),
+ PINCTRL_PIN(53, "GPIO53/SI2_nDTR_BOUT2/BU5_TXD"),
+ PINCTRL_PIN(54, "GPIO54/SI2_nDSR/BU4_TXD"),
+ PINCTRL_PIN(55, "GPIO55/SI2_RI2/BU4_RXD"),
+ PINCTRL_PIN(56, "GPIO56/R1_RXERR/R1_OEN"),
+ PINCTRL_PIN(57, "GPIO57/R1_MDC/TP_GPIO4"),
+ PINCTRL_PIN(58, "GPIO58/R1_MDIO/TP_GPIO5"),
+ PINCTRL_PIN(59, "GPIO59/H_GPIO06/SMB3D_SDA/SMB19_SDA"),
+ PINCTRL_PIN(60, "GPIO60/H_GPIO07/SMB3D_SCL/SMB19_SCL"),
+ PINCTRL_PIN(61, "GPIO61/SI1_nDTR_BOUT"),
+ PINCTRL_PIN(62, "GPIO62/SI1_nRTS/BU1_nRTS/CP_TDO_SWO/TP_TDO_SWO/CP_TP_TDO_SWO"),
+ PINCTRL_PIN(63, "GPIO63/BU1_TXD1/SI1_TXD"),
+ PINCTRL_PIN(64, "GPIO64/FANIN0"),
+ PINCTRL_PIN(65, "GPIO65/FANIN1"),
+ PINCTRL_PIN(66, "GPIO66/FANIN2"),
+ PINCTRL_PIN(67, "GPIO67/FANIN3"),
+ PINCTRL_PIN(68, "GPIO68/FANIN4"),
+ PINCTRL_PIN(69, "GPIO69/FANIN5"),
+ PINCTRL_PIN(70, "GPIO70/FANIN6"),
+ PINCTRL_PIN(71, "GPIO71/FANIN7"),
+ PINCTRL_PIN(72, "GPIO72/FANIN8"),
+ PINCTRL_PIN(73, "GPIO73/FANIN9"),
+ PINCTRL_PIN(74, "GPIO74/FANIN10"),
+ PINCTRL_PIN(75, "GPIO75/FANIN11"),
+ PINCTRL_PIN(76, "GPIO76/FANIN12"),
+ PINCTRL_PIN(77, "GPIO77/FANIN13"),
+ PINCTRL_PIN(78, "GPIO78/FANIN14"),
+ PINCTRL_PIN(79, "GPIO79/FANIN15"),
+ PINCTRL_PIN(80, "GPIO80/PWM0"),
+ PINCTRL_PIN(81, "GPIO81/PWM1"),
+ PINCTRL_PIN(82, "GPIO82/PWM2"),
+ PINCTRL_PIN(83, "GPIO83/PWM3"),
+ PINCTRL_PIN(84, "GPIO84/R2_TXD0"),
+ PINCTRL_PIN(85, "GPIO85/R2_TXD1"),
+ PINCTRL_PIN(86, "GPIO86/R2_TXEN"),
+ PINCTRL_PIN(87, "GPIO87/R2_RXD0"),
+ PINCTRL_PIN(88, "GPIO88/R2_RXD1"),
+ PINCTRL_PIN(89, "GPIO89/R2_CRSDV"),
+ PINCTRL_PIN(90, "GPIO90/R2_RXERR/R2_OEN"),
+ PINCTRL_PIN(91, "GPIO91/R2_MDC/CP1_GPIO6/TP_GPIO0"),
+ PINCTRL_PIN(92, "GPIO92/R2_MDIO/CP1_GPIO7/TP_GPIO1"),
+ PINCTRL_PIN(93, "GPIO93/GA20/SMB5D_SCL"),
+ PINCTRL_PIN(94, "GPIO94/nKBRST/SMB5D_SDA"),
+ PINCTRL_PIN(95, "GPIO95/nESPIRST/LPC_nLRESET"),
+ PINCTRL_PIN(96, "GPIO96/CP1_GPIO7/BU2_TXD/TP_GPIO7"),
+ PINCTRL_PIN(97, "GPIO97/CP1_GPIO6/BU2_RXD/TP_GPIO6"),
+ PINCTRL_PIN(98, "GPIO98/CP1_GPIO5/BU4_TXD/TP_GPIO5"),
+ PINCTRL_PIN(99, "GPIO99/CP1_GPIO4/BU4_RXD/TP_GPIO4"),
+ PINCTRL_PIN(100, "GPIO100/CP1_GPIO3/BU5_TXD/TP_GPIO3"),
+ PINCTRL_PIN(101, "GPIO101/CP1_GPIO2/BU5_RXD/TP_GPIO2"),
+ PINCTRL_PIN(102, "GPIO102/HSYNC"),
+ PINCTRL_PIN(103, "GPIO103/VSYNC"),
+ PINCTRL_PIN(104, "GPIO104/DDC_SCL"),
+ PINCTRL_PIN(105, "GPIO105/DDC_SDA"),
+ PINCTRL_PIN(106, "GPIO106/I3C5_SCL"),
+ PINCTRL_PIN(107, "GPIO107/I3C5_SDA"),
+ PINCTRL_PIN(108, "GPIO108/SG1_MDC"),
+ PINCTRL_PIN(109, "GPIO109/SG1_MDIO"),
+ PINCTRL_PIN(110, "GPIO110/RG2_TXD0/DDRV0/R3_TXD0"),
+ PINCTRL_PIN(111, "GPIO111/RG2_TXD1/DDRV1/R3_TXD1"),
+ PINCTRL_PIN(112, "GPIO112/RG2_TXD2/DDRV2"),
+ PINCTRL_PIN(113, "GPIO113/RG2_TXD3/DDRV3"),
+ PINCTRL_PIN(114, "GPIO114/SMB0_SCL"),
+ PINCTRL_PIN(115, "GPIO115/SMB0_SDA"),
+ PINCTRL_PIN(116, "GPIO116/SMB1_SCL"),
+ PINCTRL_PIN(117, "GPIO117/SMB1_SDA"),
+ PINCTRL_PIN(118, "GPIO118/SMB2_SCL"),
+ PINCTRL_PIN(119, "GPIO119/SMB2_SDA"),
+ PINCTRL_PIN(120, "GPIO120/SMB2C_SDA"),
+ PINCTRL_PIN(121, "GPIO121/SMB2C_SCL"),
+ PINCTRL_PIN(122, "GPIO122/SMB2B_SDA"),
+ PINCTRL_PIN(123, "GPIO123/SMB2B_SCL"),
+ PINCTRL_PIN(124, "GPIO124/SMB1C_SDA/CP1_GPIO3"),
+ PINCTRL_PIN(125, "GPIO125/SMB1C_SCL/CP1_GPIO2"),
+ PINCTRL_PIN(126, "GPIO126/SMB1B_SDA/CP1_GPIO1"),
+ PINCTRL_PIN(127, "GPIO127/SMB1B_SCL/CP1_GPIO0"),
+ PINCTRL_PIN(128, "GPIO128/SMB824_SCL"),
+ PINCTRL_PIN(129, "GPIO129/SMB824_SDA"),
+ PINCTRL_PIN(130, "GPIO130/SMB925_SCL"),
+ PINCTRL_PIN(131, "GPIO131/SMB925_SDA"),
+ PINCTRL_PIN(132, "GPIO132/SMB1026_SCL"),
+ PINCTRL_PIN(133, "GPIO133/SMB1026_SDA"),
+ PINCTRL_PIN(134, "GPIO134/SMB11_SCL/SMB23B_SCL"),
+ PINCTRL_PIN(135, "GPIO135/SMB11_SDA/SMB23B_SDA"),
+ PINCTRL_PIN(136, "GPIO136/JM1_TCK"),
+ PINCTRL_PIN(137, "GPIO137/JM1_TDO"),
+ PINCTRL_PIN(138, "GPIO138/JM1_TMS"),
+ PINCTRL_PIN(139, "GPIO139/JM1_TDI"),
+ PINCTRL_PIN(140, "GPIO140/JM1_nTRST"),
+ PINCTRL_PIN(141, "GPIO141/SMB7B_SCL"),
+ PINCTRL_PIN(142, "GPIO142/SMB7D_SCL/TPSMB1_SCL"),
+ PINCTRL_PIN(143, "GPIO143/SMB7D_SDA/TPSMB1_SDA"),
+ PINCTRL_PIN(144, "GPIO144/PWM4"),
+ PINCTRL_PIN(145, "GPIO145/PWM5"),
+ PINCTRL_PIN(146, "GPIO146/PWM6"),
+ PINCTRL_PIN(147, "GPIO147/PWM7"),
+ PINCTRL_PIN(148, "GPIO148/MMC_DT4"),
+ PINCTRL_PIN(149, "GPIO149/MMC_DT5"),
+ PINCTRL_PIN(150, "GPIO150/MMC_DT6"),
+ PINCTRL_PIN(151, "GPIO151/MMC_DT7"),
+ PINCTRL_PIN(152, "GPIO152/MMC_CLK"),
+ PINCTRL_PIN(153, "GPIO153/MMC_WP"),
+ PINCTRL_PIN(154, "GPIO154/MMC_CMD"),
+ PINCTRL_PIN(155, "GPIO155/MMC_nCD/MMC_nRSTLK"),
+ PINCTRL_PIN(156, "GPIO156/MMC_DT0"),
+ PINCTRL_PIN(157, "GPIO157/MMC_DT1"),
+ PINCTRL_PIN(158, "GPIO158/MMC_DT2"),
+ PINCTRL_PIN(159, "GPIO159/MMC_DT3"),
+ PINCTRL_PIN(160, "GPIO160/CLKOUT/RNGOSCOUT/GFXBYPCK"),
+ PINCTRL_PIN(161, "GPIO161/ESPI_nCS/LPC_nLFRAME"),
+ PINCTRL_PIN(162, "GPIO162/SERIRQ"),
+ PINCTRL_PIN(163, "GPIO163/ESPI_CK/LPC_LCLK"),
+ PINCTRL_PIN(164, "GPIO164/ESPI_IO0/LPC_LAD0"),
+ PINCTRL_PIN(165, "GPIO165/ESPI_IO1/LPC_LAD1"),
+ PINCTRL_PIN(166, "GPIO166/ESPI_IO2/LPC_LAD2"),
+ PINCTRL_PIN(167, "GPIO167/ESPI_IO3/LPC_LAD3"),
+ PINCTRL_PIN(168, "GPIO168/ESPI_nALERT/LPC_nCLKRUN"),
+ PINCTRL_PIN(169, "GPIO169/nSCIPME/SMB21_SCL"),
+ PINCTRL_PIN(170, "GPIO170/nSMI/SMB21_SDA"),
+ PINCTRL_PIN(171, "GPIO171/SMB6_SCL"),
+ PINCTRL_PIN(172, "GPIO172/SMB6_SDA"),
+ PINCTRL_PIN(173, "GPIO173/SMB7_SCL"),
+ PINCTRL_PIN(174, "GPIO174/SMB7_SDA"),
+ PINCTRL_PIN(175, "GPIO175/SPI1_CK/FANIN19/FM1_CK"),
+ PINCTRL_PIN(176, "GPIO176/SPI1_DO/FANIN18/FM1_DO/STRAP9"),
+ PINCTRL_PIN(177, "GPIO177/SPI1_DI/FANIN17/FM1_D1/STRAP10"),
+ PINCTRL_PIN(178, "GPIO178/R1_TXD0"),
+ PINCTRL_PIN(179, "GPIO179/R1_TXD1"),
+ PINCTRL_PIN(180, "GPIO180/R1_TXEN"),
+ PINCTRL_PIN(181, "GPIO181/R1_RXD0"),
+ PINCTRL_PIN(182, "GPIO182/R1_RXD1"),
+ PINCTRL_PIN(183, "GPIO183/SPI3_SEL"),
+ PINCTRL_PIN(184, "GPIO184/SPI3_D0/STRAP13"),
+ PINCTRL_PIN(185, "GPIO185/SPI3_D1"),
+ PINCTRL_PIN(186, "GPIO186/SPI3_nCS0"),
+ PINCTRL_PIN(187, "GPO187/SPI3_nCS1_SMB14B_SDA"),
+ PINCTRL_PIN(188, "GPIO188/SPI3_D2/SPI3_nCS2"),
+ PINCTRL_PIN(189, "GPIO189/SPI3_D3/SPI3_nCS3"),
+ PINCTRL_PIN(190, "GPIO190/nPRD_SMI"),
+ PINCTRL_PIN(191, "GPIO191/SPI1_D1/FANIN17/FM1_D1/STRAP10"),
+ PINCTRL_PIN(192, "GPIO192/SPI1_D3/SPI_nCS3/FM1_D3/SMB15_SCL"),
+ PINCTRL_PIN(193, "GPIO193/R1_CRSDV"),
+ PINCTRL_PIN(194, "GPIO194/SMB0B_SCL/FM0_CK"),
+ PINCTRL_PIN(195, "GPIO195/SMB0B_SDA/FM0_D0"),
+ PINCTRL_PIN(196, "GPIO196/SMB0C_SCL/FM0_D1"),
+ PINCTRL_PIN(197, "GPIO197/SMB0DEN/FM0_D3"),
+ PINCTRL_PIN(198, "GPIO198/SMB0D_SDA/FM0_D2"),
+ PINCTRL_PIN(199, "GPIO199/SMB0D_SCL/FM0_CSO"),
+ PINCTRL_PIN(200, "GPIO200/R2_CK"),
+ PINCTRL_PIN(201, "GPIO201/R1_CK"),
+ PINCTRL_PIN(202, "GPIO202/SMB0C_SDA/FM0_CSI"),
+ PINCTRL_PIN(203, "GPIO203/SPI1_nCS0/FANIN16/FM1_CSI"),
+ PINCTRL_PIN(208, "GPIO208/RG2_TXC/DVCK"),
+ PINCTRL_PIN(209, "GPIO209/RG2_TXCTL/DDRV4/R3_TXEN"),
+ PINCTRL_PIN(210, "GPIO210/RG2_RXD0/DDRV5/R3_RXD0"),
+ PINCTRL_PIN(211, "GPIO211/RG2_RXD1/DDRV6/R3_RXD1"),
+ PINCTRL_PIN(212, "GPIO212/RG2_RXD2/DDRV7/R3_RXD2"),
+ PINCTRL_PIN(213, "GPIO213/RG2_RXD3/DDRV8/R3_OEN"),
+ PINCTRL_PIN(214, "GPIO214/RG2_RXC/DDRV9/R3_CK"),
+ PINCTRL_PIN(215, "GPIO215/RG2_RXCTL/DDRV10/R3_CRSDV"),
+ PINCTRL_PIN(216, "GPIO216/RG2_MDC/DDRV11"),
+ PINCTRL_PIN(217, "GPIO217/RG2_MDIO/DVHSYNC"),
+ PINCTRL_PIN(218, "GPIO218/nWDO1/SMB16_SCL"),
+ PINCTRL_PIN(219, "GPIO219/nWDO2/SMB16_SDA"),
+ PINCTRL_PIN(220, "GPIO220/SMB12_SCL/PWM8"),
+ PINCTRL_PIN(221, "GPIO221/SMB12_SDA/PWM9"),
+ PINCTRL_PIN(222, "GPIO222/SMB13_SCL"),
+ PINCTRL_PIN(223, "GPIO223/SMB13_SDA"),
+ PINCTRL_PIN(224, "GPIO224/SPIX_CK/FM2_CK"),
+ PINCTRL_PIN(225, "GPO225/SPIX_D0/FM2_D0/STRAP1"),
+ PINCTRL_PIN(226, "GPO226/SPIX_D1/FM2_D1/STRAP2"),
+ PINCTRL_PIN(227, "GPIO227/SPIX_nCS0/FM2_CSI"),
+ PINCTRL_PIN(228, "GPIO228/SPIX_nCS1/FM2_CSO"),
+ PINCTRL_PIN(229, "GPO229/SPIX_D2/FM2_D2/STRAP3"),
+ PINCTRL_PIN(230, "GPO230/SPIX_D3/FM2_D3/STRAP6"),
+ PINCTRL_PIN(231, "GPIO231/EP_nCLKREQ"),
+ PINCTRL_PIN(233, "GPIO233/SPI1_nCS1/FM1_CSO"),
+ PINCTRL_PIN(234, "GPIO234/PWM10/SMB20_SCL"),
+ PINCTRL_PIN(235, "GPIO235/PWM11/SMB20_SDA"),
+ PINCTRL_PIN(240, "GPIO240/I3C0_SCL"),
+ PINCTRL_PIN(241, "GPIO241/I3C0_SDA"),
+ PINCTRL_PIN(242, "GPIO242/I3C1_SCL"),
+ PINCTRL_PIN(243, "GPIO243/I3C1_SDA"),
+ PINCTRL_PIN(244, "GPIO244/I3C2_SCL"),
+ PINCTRL_PIN(245, "GPIO245/I3C2_SDA"),
+ PINCTRL_PIN(246, "GPIO246/I3C3_SCL"),
+ PINCTRL_PIN(247, "GPIO247/I3C3_SDA"),
+ PINCTRL_PIN(250, "GPIO250/RG2_REFCK/DVVSYNC"),
+ PINCTRL_PIN(251, "JM2/CP1_GPIO"),
+ };
+
+/* Enable mode in pin group */
+static void npcm8xx_setfunc(struct regmap *gcr_regmap, const unsigned int *pin,
+ int pin_number, int mode)
+{
+ const struct npcm8xx_pincfg *cfg;
+ int i;
+
+ for (i = 0 ; i < pin_number ; i++) {
+ cfg = &pincfg[pin[i]];
+ if (mode == fn_gpio || cfg->fn0 == mode || cfg->fn1 == mode ||
+ cfg->fn2 == mode || cfg->fn3 == mode || cfg->fn4 == mode) {
+ if (cfg->reg0)
+ regmap_update_bits(gcr_regmap, cfg->reg0,
+ BIT(cfg->bit0),
+ (cfg->fn0 == mode) ?
+ BIT(cfg->bit0) : 0);
+ if (cfg->reg1)
+ regmap_update_bits(gcr_regmap, cfg->reg1,
+ BIT(cfg->bit1),
+ (cfg->fn1 == mode) ?
+ BIT(cfg->bit1) : 0);
+ if (cfg->reg2)
+ regmap_update_bits(gcr_regmap, cfg->reg2,
+ BIT(cfg->bit2),
+ (cfg->fn2 == mode) ?
+ BIT(cfg->bit2) : 0);
+ if (cfg->reg3)
+ regmap_update_bits(gcr_regmap, cfg->reg3,
+ BIT(cfg->bit3),
+ (cfg->fn3 == mode) ?
+ BIT(cfg->bit3) : 0);
+ if (cfg->reg4)
+ regmap_update_bits(gcr_regmap, cfg->reg4,
+ BIT(cfg->bit4),
+ (cfg->fn4 == mode) ?
+ BIT(cfg->bit4) : 0);
+ }
+ }
+}
+
+static int npcm8xx_get_slew_rate(struct npcm8xx_gpio *bank,
+ struct regmap *gcr_regmap, unsigned int pin)
+{
+ int gpio = pin % bank->gc.ngpio;
+ unsigned long pinmask = BIT(gpio);
+ u32 val;
+
+ if (pincfg[pin].flag & SLEW)
+ return ioread32(bank->base + NPCM8XX_GP_N_OSRC) & pinmask;
+ /* LPC Slew rate in SRCNT register */
+ if (pincfg[pin].flag & SLEWLPC) {
+ regmap_read(gcr_regmap, NPCM8XX_GCR_SRCNT, &val);
+ return !!(val & SRCNT_ESPI);
+ }
+
+ return -EINVAL;
+}
+
+static int npcm8xx_set_slew_rate(struct npcm8xx_gpio *bank,
+ struct regmap *gcr_regmap, unsigned int pin,
+ int arg)
+{
+ void __iomem *OSRC_Offset = bank->base + NPCM8XX_GP_N_OSRC;
+ int gpio = BIT(pin % bank->gc.ngpio);
+
+ if (pincfg[pin].flag & SLEW) {
+ switch (arg) {
+ case 0:
+ npcm_gpio_clr(&bank->gc, OSRC_Offset, gpio);
+ return 0;
+ case 1:
+ npcm_gpio_set(&bank->gc, OSRC_Offset, gpio);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!(pincfg[pin].flag & SLEWLPC))
+ return -EINVAL;
+
+ switch (arg) {
+ case 0:
+ regmap_update_bits(gcr_regmap, NPCM8XX_GCR_SRCNT,
+ SRCNT_ESPI, 0);
+ break;
+ case 1:
+ regmap_update_bits(gcr_regmap, NPCM8XX_GCR_SRCNT,
+ SRCNT_ESPI, SRCNT_ESPI);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int npcm8xx_get_drive_strength(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
+ int gpio = pin % bank->gc.ngpio;
+ unsigned long pinmask = BIT(gpio);
+ int flg, val;
+ u32 ds = 0;
+
+ flg = pincfg[pin].flag;
+ if (!(flg & DRIVE_STRENGTH_MASK))
+ return -EINVAL;
+
+ val = ioread32(bank->base + NPCM8XX_GP_N_ODSC) & pinmask;
+ ds = val ? DSHI(flg) : DSLO(flg);
+ dev_dbg(bank->gc.parent, "pin %d strength %d = %d\n", pin, val, ds);
+
+ return ds;
+}
+
+static int npcm8xx_set_drive_strength(struct npcm8xx_pinctrl *npcm,
+ unsigned int pin, int nval)
+{
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
+ int gpio = BIT(pin % bank->gc.ngpio);
+ int v;
+
+ v = pincfg[pin].flag & DRIVE_STRENGTH_MASK;
+
+ if (DSLO(v) == nval)
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_ODSC, gpio);
+ else if (DSHI(v) == nval)
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_ODSC, gpio);
+ else
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+/* pinctrl_ops */
+static int npcm8xx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(npcm8xx_pingroups);
+}
+
+static const char *npcm8xx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return npcm8xx_pingroups[selector].name;
+}
+
+static int npcm8xx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ *npins = npcm8xx_pingroups[selector].npins;
+ *pins = npcm8xx_pingroups[selector].pins;
+
+ return 0;
+}
+
+static int npcm8xx_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ u32 *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config,
+ map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+}
+
+static void npcm8xx_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, u32 num_maps)
+{
+ kfree(map);
+}
+
+static const struct pinctrl_ops npcm8xx_pinctrl_ops = {
+ .get_groups_count = npcm8xx_get_groups_count,
+ .get_group_name = npcm8xx_get_group_name,
+ .get_group_pins = npcm8xx_get_group_pins,
+ .dt_node_to_map = npcm8xx_dt_node_to_map,
+ .dt_free_map = npcm8xx_dt_free_map,
+};
+
+static int npcm8xx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(npcm8xx_funcs);
+}
+
+static const char *npcm8xx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ return npcm8xx_funcs[function].name;
+}
+
+static int npcm8xx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const ngroups)
+{
+ *ngroups = npcm8xx_funcs[function].ngroups;
+ *groups = npcm8xx_funcs[function].groups;
+
+ return 0;
+}
+
+static int npcm8xx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+
+ npcm8xx_setfunc(npcm->gcr_regmap, npcm8xx_pingroups[group].pins,
+ npcm8xx_pingroups[group].npins, group);
+
+ return 0;
+}
+
+static int npcm8xx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ const unsigned int *pin = &offset;
+ int mode = fn_gpio;
+
+ if (pin[0] >= 183 && pin[0] <= 189)
+ mode = pincfg[pin[0]].fn0;
+
+ npcm8xx_setfunc(npcm->gcr_regmap, &offset, 1, mode);
+
+ return 0;
+}
+
+static void npcm8xx_gpio_request_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ int virq;
+
+ virq = irq_find_mapping(npcm->domain, offset);
+ if (virq)
+ irq_dispose_mapping(virq);
+}
+
+static int npcm_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[offset / NPCM8XX_GPIO_PER_BANK];
+ int gpio = BIT(offset % bank->gc.ngpio);
+
+ if (input)
+ iowrite32(gpio, bank->base + NPCM8XX_GP_N_OEC);
+ else
+ iowrite32(gpio, bank->base + NPCM8XX_GP_N_OES);
+
+ return 0;
+}
+
+static const struct pinmux_ops npcm8xx_pinmux_ops = {
+ .get_functions_count = npcm8xx_get_functions_count,
+ .get_function_name = npcm8xx_get_function_name,
+ .get_function_groups = npcm8xx_get_function_groups,
+ .set_mux = npcm8xx_pinmux_set_mux,
+ .gpio_request_enable = npcm8xx_gpio_request_enable,
+ .gpio_disable_free = npcm8xx_gpio_request_free,
+ .gpio_set_direction = npcm_gpio_set_direction,
+};
+
+static int debounce_timing_setting(struct npcm8xx_gpio *bank, u32 gpio,
+ u32 nanosecs)
+{
+ void __iomem *DBNCS_offset = bank->base + NPCM8XX_GP_N_DBNCS0 + (gpio / 4);
+ int gpio_debounce = (gpio % 16) * 2, debounce_select, i;
+ u32 dbncp_val, dbncp_val_mod;
+
+ for (i = 0 ; i < NPCM8XX_DEBOUNCE_MAX ; i++) {
+ if (bank->debounce.set_val[i]) {
+ if (bank->debounce.nanosec_val[i] == nanosecs) {
+ debounce_select = i << gpio_debounce;
+ npcm_gpio_set(&bank->gc, DBNCS_offset,
+ debounce_select);
+ break;
+ }
+ } else {
+ bank->debounce.set_val[i] = true;
+ bank->debounce.nanosec_val[i] = nanosecs;
+ debounce_select = i << gpio_debounce;
+ npcm_gpio_set(&bank->gc, DBNCS_offset, debounce_select);
+ switch (nanosecs) {
+ case 1 ... 1040:
+ iowrite32(0, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 1041 ... 1640:
+ iowrite32(0x10, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 1641 ... 2280:
+ iowrite32(0x20, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 2281 ... 2700:
+ iowrite32(0x30, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 2701 ... 2856:
+ iowrite32(0x40, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 2857 ... 3496:
+ iowrite32(0x50, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 3497 ... 4136:
+ iowrite32(0x60, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ case 4137 ... 5025:
+ iowrite32(0x70, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ default:
+ dbncp_val = DIV_ROUND_CLOSEST(nanosecs, NPCM8XX_DEBOUNCE_NSEC);
+ if (dbncp_val > NPCM8XX_DEBOUNCE_MAX_VAL)
+ return -ENOTSUPP;
+ dbncp_val_mod = dbncp_val & GENMASK(3, 0);
+ if (dbncp_val_mod > GENMASK(2, 0))
+ dbncp_val += 0x10;
+ iowrite32(dbncp_val & NPCM8XX_DEBOUNCE_VAL_MASK,
+ bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
+ break;
+ }
+ break;
+ }
+ }
+
+ if (i == 4)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int npcm_set_debounce(struct npcm8xx_pinctrl *npcm, unsigned int pin,
+ u32 nanosecs)
+{
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
+ int gpio = BIT(pin % bank->gc.ngpio);
+ int ret;
+
+ if (nanosecs) {
+ ret = debounce_timing_setting(bank, pin % bank->gc.ngpio,
+ nanosecs);
+ if (ret)
+ dev_err(npcm->dev, "Pin %d, All four debounce timing values are used, please use one of exist debounce values\n", pin);
+ else
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_DBNC,
+ gpio);
+ return ret;
+ }
+
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_DBNC, gpio);
+
+ return 0;
+}
+
+/* pinconf_ops */
+static int npcm8xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
+ int gpio = pin % bank->gc.ngpio;
+ unsigned long pinmask = BIT(gpio);
+ u32 ie, oe, pu, pd;
+ int rc = 0;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pu = ioread32(bank->base + NPCM8XX_GP_N_PU) & pinmask;
+ pd = ioread32(bank->base + NPCM8XX_GP_N_PD) & pinmask;
+ if (param == PIN_CONFIG_BIAS_DISABLE)
+ rc = !pu && !pd;
+ else if (param == PIN_CONFIG_BIAS_PULL_UP)
+ rc = pu && !pd;
+ else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
+ rc = !pu && pd;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_INPUT_ENABLE:
+ ie = ioread32(bank->base + NPCM8XX_GP_N_IEM) & pinmask;
+ oe = ioread32(bank->base + NPCM8XX_GP_N_OE) & pinmask;
+ if (param == PIN_CONFIG_INPUT_ENABLE)
+ rc = (ie && !oe);
+ else if (param == PIN_CONFIG_OUTPUT)
+ rc = (!ie && oe);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ rc = !(ioread32(bank->base + NPCM8XX_GP_N_OTYP) & pinmask);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ rc = ioread32(bank->base + NPCM8XX_GP_N_OTYP) & pinmask;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ rc = ioread32(bank->base + NPCM8XX_GP_N_DBNC) & pinmask;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ rc = npcm8xx_get_drive_strength(pctldev, pin);
+ if (rc)
+ *config = pinconf_to_config_packed(param, rc);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ rc = npcm8xx_get_slew_rate(bank, npcm->gcr_regmap, pin);
+ if (rc >= 0)
+ *config = pinconf_to_config_packed(param, rc);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!rc)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int npcm8xx_config_set_one(struct npcm8xx_pinctrl *npcm,
+ unsigned int pin, unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+ struct npcm8xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
+ u32 arg = pinconf_to_config_argument(config);
+ int gpio = BIT(pin % bank->gc.ngpio);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ iowrite32(gpio, bank->base + NPCM8XX_GP_N_OEC);
+ bank->direction_input(&bank->gc, pin % bank->gc.ngpio);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ bank->direction_output(&bank->gc, pin % bank->gc.ngpio, arg);
+ iowrite32(gpio, bank->base + NPCM8XX_GP_N_OES);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_OTYP, gpio);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_OTYP, gpio);
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return npcm_set_debounce(npcm, pin, arg * 1000);
+ case PIN_CONFIG_SLEW_RATE:
+ return npcm8xx_set_slew_rate(bank, npcm->gcr_regmap, pin, arg);
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ return npcm8xx_set_drive_strength(npcm, pin, arg);
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int npcm8xx_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ int rc;
+
+ while (num_configs--) {
+ rc = npcm8xx_config_set_one(npcm, pin, *configs++);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops npcm8xx_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = npcm8xx_config_get,
+ .pin_config_set = npcm8xx_config_set,
+};
+
+/* pinctrl_desc */
+static struct pinctrl_desc npcm8xx_pinctrl_desc = {
+ .name = "npcm8xx-pinctrl",
+ .pins = npcm8xx_pins,
+ .npins = ARRAY_SIZE(npcm8xx_pins),
+ .pctlops = &npcm8xx_pinctrl_ops,
+ .pmxops = &npcm8xx_pinmux_ops,
+ .confops = &npcm8xx_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int npcmgpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
+
+ return gpiochip_add_pin_range(&bank->gc, dev_name(chip->parent),
+ bank->pinctrl_id, bank->gc.base,
+ bank->gc.ngpio);
+}
+
+static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
+{
+ struct fwnode_reference_args args;
+ struct device *dev = pctrl->dev;
+ struct fwnode_handle *child;
+ int ret = -ENXIO;
+ int id = 0, i;
+
+ for_each_gpiochip_node(dev, child) {
+ pctrl->gpio_bank[id].base = fwnode_iomap(child, 0);
+ if (!pctrl->gpio_bank[id].base)
+ return dev_err_probe(dev, -ENXIO, "fwnode_iomap id %d failed\n", id);
+
+ ret = bgpio_init(&pctrl->gpio_bank[id].gc, dev, 4,
+ pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DIN,
+ pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DOUT,
+ NULL,
+ NULL,
+ pctrl->gpio_bank[id].base + NPCM8XX_GP_N_IEM,
+ BGPIOF_READ_OUTPUT_REG_SET);
+ if (ret)
+ return dev_err_probe(dev, ret, "bgpio_init() failed\n");
+
+ ret = fwnode_property_get_reference_args(child, "gpio-ranges", NULL, 3, 0, &args);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "gpio-ranges fail for GPIO bank %u\n", id);
+
+ ret = fwnode_irq_get(child, 0);
+ if (!ret)
+ return dev_err_probe(dev, ret, "No IRQ for GPIO bank %u\n", id);
+
+ pctrl->gpio_bank[id].irq = ret;
+ pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
+ pctrl->gpio_bank[id].irqbase = id * NPCM8XX_GPIO_PER_BANK;
+ pctrl->gpio_bank[id].pinctrl_id = args.args[0];
+ pctrl->gpio_bank[id].gc.base = -1;
+ pctrl->gpio_bank[id].gc.ngpio = args.args[2];
+ pctrl->gpio_bank[id].gc.owner = THIS_MODULE;
+ pctrl->gpio_bank[id].gc.parent = dev;
+ pctrl->gpio_bank[id].gc.fwnode = child;
+ pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
+ pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
+ pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
+ pctrl->gpio_bank[id].direction_output = pctrl->gpio_bank[id].gc.direction_output;
+ pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
+ pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
+ pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
+ pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
+ for (i = 0 ; i < NPCM8XX_DEBOUNCE_MAX ; i++)
+ pctrl->gpio_bank[id].debounce.set_val[i] = false;
+ pctrl->gpio_bank[id].gc.add_pin_ranges = npcmgpio_add_pin_ranges;
+ id++;
+ }
+
+ pctrl->bank_num = id;
+ return ret;
+}
+
+static int npcm8xx_gpio_register(struct npcm8xx_pinctrl *pctrl)
+{
+ int ret, id;
+
+ for (id = 0 ; id < pctrl->bank_num ; id++) {
+ struct gpio_irq_chip *girq;
+
+ girq = &pctrl->gpio_bank[id].gc.irq;
+ girq->chip = &pctrl->gpio_bank[id].irq_chip;
+ girq->parent_handler = npcmgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctrl->dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = pctrl->gpio_bank[id].irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ ret = devm_gpiochip_add_data(pctrl->dev,
+ &pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id]);
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "Failed to add GPIO chip %u\n", id);
+ }
+
+ return 0;
+}
+
+static int npcm8xx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npcm8xx_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = dev;
+ platform_set_drvdata(pdev, pctrl);
+
+ pctrl->gcr_regmap =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "nuvoton,sysgcr");
+ if (IS_ERR(pctrl->gcr_regmap))
+ return dev_err_probe(dev, PTR_ERR(pctrl->gcr_regmap),
+ "Failed to find nuvoton,sysgcr property\n");
+
+ ret = npcm8xx_gpio_fw(pctrl);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to gpio dt-binding\n");
+
+ pctrl->pctldev = devm_pinctrl_register(dev, &npcm8xx_pinctrl_desc, pctrl);
+ if (IS_ERR(pctrl->pctldev))
+ return dev_err_probe(dev, PTR_ERR(pctrl->pctldev),
+ "Failed to register pinctrl device\n");
+
+ ret = npcm8xx_gpio_register(pctrl);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "Failed to register gpio\n");
+
+ return 0;
+}
+
+static const struct of_device_id npcm8xx_pinctrl_match[] = {
+ { .compatible = "nuvoton,npcm845-pinctrl" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm8xx_pinctrl_match);
+
+static struct platform_driver npcm8xx_pinctrl_driver = {
+ .probe = npcm8xx_pinctrl_probe,
+ .driver = {
+ .name = "npcm8xx-pinctrl",
+ .of_match_table = npcm8xx_pinctrl_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init npcm8xx_pinctrl_register(void)
+{
+ return platform_driver_register(&npcm8xx_pinctrl_driver);
+}
+arch_initcall(npcm8xx_pinctrl_register);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("tomer.maimon@nuvoton.com");
+MODULE_DESCRIPTION("Nuvoton NPCM8XX Pinctrl and GPIO driver");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 8a9961ac8712..0cff44b07b29 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -858,16 +858,6 @@ static int wpcm450_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static int wpcm450_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- u32 *num_maps)
-{
- return pinconf_generic_dt_node_to_map(pctldev, np_config,
- map, num_maps,
- PIN_MAP_TYPE_INVALID);
-}
-
static void wpcm450_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, u32 num_maps)
{
@@ -878,7 +868,7 @@ static const struct pinctrl_ops wpcm450_pinctrl_ops = {
.get_groups_count = wpcm450_get_groups_count,
.get_group_name = wpcm450_get_group_name,
.get_group_pins = wpcm450_get_group_pins,
- .dt_node_to_map = wpcm450_dt_node_to_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
.dt_free_map = wpcm450_dt_free_map,
};
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 365c4b0ca465..8313cb5f3b3c 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -10,17 +10,19 @@
#define pr_fmt(fmt) "generic pinconfig core: " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/array_size.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
+
#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+
#include "core.h"
#include "pinconf.h"
#include "pinctrl-utils.h"
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index d9d54065472e..96d853a8f339 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -9,16 +9,18 @@
*/
#define pr_fmt(fmt) "pinconfig core: " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
+#include <linux/array_size.h>
#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+
#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+
#include "core.h"
#include "pinconf.h"
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 74241b2ff21e..fb58acb843d9 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1166,7 +1166,7 @@ out2:
return ret;
}
-static int amd_gpio_remove(struct platform_device *pdev)
+static void amd_gpio_remove(struct platform_device *pdev)
{
struct amd_gpio *gpio_dev;
@@ -1174,8 +1174,6 @@ static int amd_gpio_remove(struct platform_device *pdev)
gpiochip_remove(&gpio_dev->gc);
acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -1197,7 +1195,7 @@ static struct platform_driver amd_gpio_driver = {
#endif
},
.probe = amd_gpio_probe,
- .remove = amd_gpio_remove,
+ .remove_new = amd_gpio_remove,
};
module_platform_driver(amd_gpio_driver);
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index d6c9f9dcff97..dd93f124e0a0 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -970,13 +970,11 @@ static int artpec6_pmx_probe(struct platform_device *pdev)
return 0;
}
-static int artpec6_pmx_remove(struct platform_device *pdev)
+static void artpec6_pmx_remove(struct platform_device *pdev)
{
struct artpec6_pmx *pmx = platform_get_drvdata(pdev);
pinctrl_unregister(pmx->pctl);
-
- return 0;
}
static const struct of_device_id artpec6_pinctrl_match[] = {
@@ -990,7 +988,7 @@ static struct platform_driver artpec6_pmx_driver = {
.of_match_table = artpec6_pinctrl_match,
},
.probe = artpec6_pmx_probe,
- .remove = artpec6_pmx_remove,
+ .remove_new = artpec6_pmx_remove,
};
static int __init artpec6_pmx_init(void)
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index f0e5d87ac50b..6a5f23cf7a2a 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -500,16 +500,11 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
"GPIO_SIGNAL_OUT_REG update failed: %d\n", ret);
}
-static int as3722_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int as3722_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
as3722_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int as3722_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -526,7 +521,7 @@ static const struct gpio_chip as3722_gpio_chip = {
.free = gpiochip_generic_free,
.get = as3722_gpio_get,
.set = as3722_gpio_set,
- .direction_input = as3722_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = as3722_gpio_direction_output,
.to_irq = as3722_gpio_to_irq,
.can_sleep = true,
@@ -587,12 +582,11 @@ fail_range_add:
return ret;
}
-static int as3722_pinctrl_remove(struct platform_device *pdev)
+static void as3722_pinctrl_remove(struct platform_device *pdev)
{
struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
gpiochip_remove(&as_pci->gpio_chip);
- return 0;
}
static const struct of_device_id as3722_pinctrl_of_match[] = {
@@ -607,7 +601,7 @@ static struct platform_driver as3722_pinctrl_driver = {
.of_match_table = as3722_pinctrl_of_match,
},
.probe = as3722_pinctrl_probe,
- .remove = as3722_pinctrl_remove,
+ .remove_new = as3722_pinctrl_remove,
};
module_platform_driver(as3722_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 608f55c5ba5f..5aa9d5c533c6 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -12,10 +12,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -1302,8 +1301,8 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- info->dev = dev;
- info->ops = of_device_get_match_data(dev);
+ info->dev = &pdev->dev;
+ info->ops = device_get_match_data(&pdev->dev);
at91_pinctrl_child_count(info, np);
/*
@@ -1845,7 +1844,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
if (IS_ERR(at91_chip->regbase))
return PTR_ERR(at91_chip->regbase);
- at91_chip->ops = of_device_get_match_data(dev);
+ at91_chip->ops = device_get_match_data(dev);
at91_chip->pioc_virq = irq;
at91_chip->clock = devm_clk_get_enabled(dev, NULL);
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 9f5b3ab8e184..2b4805e74eed 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -124,11 +124,6 @@ static int axp20x_gpio_get_reg(unsigned int offset)
return -EINVAL;
}
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct axp20x_pctl *pctl = gpiochip_get_data(chip);
@@ -474,7 +469,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
pctl->chip.get = axp20x_gpio_get;
pctl->chip.get_direction = axp20x_gpio_get_direction;
pctl->chip.set = axp20x_gpio_set;
- pctl->chip.direction_input = axp20x_gpio_input;
+ pctl->chip.direction_input = pinctrl_gpio_direction_input;
pctl->chip.direction_output = axp20x_gpio_output;
pctl->desc = of_device_get_match_data(dev);
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 58ca6fac7849..04285c930e94 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -554,7 +554,7 @@ out:
static int cy8c95x0_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
{
- return pinctrl_gpio_direction_input(gc->base + off);
+ return pinctrl_gpio_direction_input(gc, off);
}
static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
@@ -571,7 +571,7 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
if (ret)
return ret;
- return pinctrl_gpio_direction_output(gc->base + off);
+ return pinctrl_gpio_direction_output(gc, off);
}
static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
@@ -1346,9 +1346,7 @@ static int cy8c95x0_probe(struct i2c_client *client)
chip->dev = &client->dev;
/* Set the device type */
- chip->driver_data = (unsigned long)device_get_match_data(&client->dev);
- if (!chip->driver_data)
- chip->driver_data = i2c_match_id(cy8c95x0_id, client)->driver_data;
+ chip->driver_data = (uintptr_t)i2c_get_match_data(client);
if (!chip->driver_data)
return -ENODEV;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 2f220a47b749..ee718f6e2556 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -133,6 +133,8 @@ struct ingenic_pinctrl {
struct pinctrl_pin_desc *pdesc;
const struct ingenic_chip_info *info;
+
+ struct gpio_chip *gc;
};
struct ingenic_gpio_chip {
@@ -3558,17 +3560,11 @@ static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
return (int) ingenic_gpio_get_value(jzgc, offset);
}
-static int ingenic_gpio_direction_input(struct gpio_chip *gc,
- unsigned int offset)
-{
- return pinctrl_gpio_direction_input(gc->base + offset);
-}
-
static int ingenic_gpio_direction_output(struct gpio_chip *gc,
unsigned int offset, int value)
{
ingenic_gpio_set(gc, offset, value);
- return pinctrl_gpio_direction_output(gc->base + offset);
+ return pinctrl_gpio_direction_output(gc, offset);
}
static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
@@ -3678,7 +3674,7 @@ static int ingenic_gpio_irq_request(struct irq_data *data)
irq_hw_number_t irq = irqd_to_hwirq(data);
int ret;
- ret = ingenic_gpio_direction_input(gpio_chip, irq);
+ ret = pinctrl_gpio_direction_input(gpio_chip, irq);
if (ret)
return ret;
@@ -4052,7 +4048,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_OUTPUT:
- ret = pinctrl_gpio_direction_output(pin);
+ ret = pinctrl_gpio_direction_output(jzpc->gc,
+ pin - jzpc->gc->base);
if (ret)
return ret;
@@ -4172,6 +4169,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
if (!jzgc)
return -ENOMEM;
+ jzpc->gc = &jzgc->gc;
+
jzgc->jzpc = jzpc;
jzgc->reg_base = bank * jzpc->info->reg_offset;
@@ -4192,7 +4191,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.set = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
- jzgc->gc.direction_input = ingenic_gpio_direction_input;
+ jzgc->gc.direction_input = pinctrl_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
jzgc->gc.get_direction = ingenic_gpio_get_direction;
jzgc->gc.request = gpiochip_generic_request;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.h b/drivers/pinctrl/pinctrl-mcp23s08.h
index b15516af7783..105bce9e0f50 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.h
+++ b/drivers/pinctrl/pinctrl-mcp23s08.h
@@ -22,6 +22,14 @@ struct regmap;
struct pinctrl_dev;
+struct mcp23s08_info {
+ const struct regmap_config *regmap;
+ const char *label;
+ unsigned int type;
+ u16 ngpio;
+ bool reg_shift;
+};
+
struct mcp23s08 {
u8 addr;
bool irq_active_high;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
index 3dd1bd8e73eb..04e8e7d079f0 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
@@ -10,9 +10,8 @@
static int mcp230xx_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct mcp23s08_info *info;
struct device *dev = &client->dev;
- unsigned int type = id->driver_data;
struct mcp23s08 *mcp;
int ret;
@@ -20,40 +19,21 @@ static int mcp230xx_probe(struct i2c_client *client)
if (!mcp)
return -ENOMEM;
- switch (type) {
- case MCP_TYPE_008:
- mcp->regmap = devm_regmap_init_i2c(client, &mcp23x08_regmap);
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
- mcp->chip.label = "mcp23008";
- break;
-
- case MCP_TYPE_017:
- mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23017";
- break;
-
- case MCP_TYPE_018:
- mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23018";
- break;
-
- default:
- dev_err(dev, "invalid device type (%d)\n", type);
- return -EINVAL;
- }
+ info = i2c_get_match_data(client);
+ if (!info)
+ return dev_err_probe(dev, -EINVAL, "invalid device type\n");
+ mcp->reg_shift = info->reg_shift;
+ mcp->chip.ngpio = info->ngpio;
+ mcp->chip.label = info->label;
+ mcp->regmap = devm_regmap_init_i2c(client, info->regmap);
if (IS_ERR(mcp->regmap))
return PTR_ERR(mcp->regmap);
mcp->irq = client->irq;
mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
- ret = mcp23s08_probe_one(mcp, dev, client->addr, type, -1);
+ ret = mcp23s08_probe_one(mcp, dev, client->addr, info->type, -1);
if (ret)
return ret;
@@ -62,36 +42,45 @@ static int mcp230xx_probe(struct i2c_client *client)
return 0;
}
+static const struct mcp23s08_info mcp23008_i2c = {
+ .regmap = &mcp23x08_regmap,
+ .label = "mcp23008",
+ .type = MCP_TYPE_008,
+ .ngpio = 8,
+ .reg_shift = 0,
+};
+
+static const struct mcp23s08_info mcp23017_i2c = {
+ .regmap = &mcp23x17_regmap,
+ .label = "mcp23017",
+ .type = MCP_TYPE_017,
+ .ngpio = 16,
+ .reg_shift = 1,
+};
+
+static const struct mcp23s08_info mcp23018_i2c = {
+ .regmap = &mcp23x17_regmap,
+ .label = "mcp23018",
+ .type = MCP_TYPE_018,
+ .ngpio = 16,
+ .reg_shift = 1,
+};
+
static const struct i2c_device_id mcp230xx_id[] = {
- { "mcp23008", MCP_TYPE_008 },
- { "mcp23017", MCP_TYPE_017 },
- { "mcp23018", MCP_TYPE_018 },
+ { "mcp23008", (kernel_ulong_t)&mcp23008_i2c },
+ { "mcp23017", (kernel_ulong_t)&mcp23017_i2c },
+ { "mcp23018", (kernel_ulong_t)&mcp23018_i2c },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
static const struct of_device_id mcp23s08_i2c_of_match[] = {
- {
- .compatible = "microchip,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "microchip,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
- {
- .compatible = "microchip,mcp23018",
- .data = (void *) MCP_TYPE_018,
- },
+ { .compatible = "microchip,mcp23008", .data = &mcp23008_i2c },
+ { .compatible = "microchip,mcp23017", .data = &mcp23017_i2c },
+ { .compatible = "microchip,mcp23018", .data = &mcp23018_i2c },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "mcp,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
+ { .compatible = "mcp,mcp23008", .data = &mcp23008_i2c },
+ { .compatible = "mcp,mcp23017", .data = &mcp23017_i2c },
{ }
};
MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
index ea059b9c5542..4a872fff5fe8 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -80,21 +80,18 @@ static const struct regmap_bus mcp23sxx_spi_regmap = {
};
static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
- unsigned int addr, unsigned int type)
+ unsigned int addr,
+ const struct mcp23s08_info *info)
{
- const struct regmap_config *config;
struct regmap_config *copy;
const char *name;
- switch (type) {
+ switch (info->type) {
case MCP_TYPE_S08:
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
if (!mcp->chip.label)
return -ENOMEM;
- config = &mcp23x08_regmap;
name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
if (!name)
return -ENOMEM;
@@ -102,13 +99,10 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
break;
case MCP_TYPE_S17:
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
if (!mcp->chip.label)
return -ENOMEM;
- config = &mcp23x17_regmap;
name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
if (!name)
return -ENOMEM;
@@ -116,20 +110,18 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
break;
case MCP_TYPE_S18:
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23s18";
-
- config = &mcp23x17_regmap;
- name = config->name;
+ mcp->chip.label = info->label;
+ name = info->regmap->name;
break;
default:
- dev_err(dev, "invalid device type (%d)\n", type);
+ dev_err(dev, "invalid device type (%d)\n", info->type);
return -EINVAL;
}
- copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL);
+ mcp->reg_shift = info->reg_shift;
+ mcp->chip.ngpio = info->ngpio;
+ copy = devm_kmemdup(dev, info->regmap, sizeof(*info->regmap), GFP_KERNEL);
if (!copy)
return -ENOMEM;
@@ -143,22 +135,17 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
static int mcp23s08_probe(struct spi_device *spi)
{
- struct device *dev = &spi->dev;
struct mcp23s08_driver_data *data;
+ const struct mcp23s08_info *info;
+ struct device *dev = &spi->dev;
unsigned long spi_present_mask;
- const void *match;
- unsigned int addr;
unsigned int ngpio = 0;
+ unsigned int addr;
int chips;
- int type;
int ret;
u32 v;
- match = device_get_match_data(dev);
- if (match)
- type = (int)(uintptr_t)match;
- else
- type = spi_get_device_id(spi)->driver_data;
+ info = spi_get_device_match_data(spi);
ret = device_property_read_u32(dev, "microchip,spi-present-mask", &v);
if (ret) {
@@ -187,7 +174,7 @@ static int mcp23s08_probe(struct spi_device *spi)
data->mcp[addr] = &data->chip[--chips];
data->mcp[addr]->irq = spi->irq;
- ret = mcp23s08_spi_regmap_init(data->mcp[addr], dev, addr, type);
+ ret = mcp23s08_spi_regmap_init(data->mcp[addr], dev, addr, info);
if (ret)
return ret;
@@ -197,7 +184,8 @@ static int mcp23s08_probe(struct spi_device *spi)
if (!data->mcp[addr]->pinctrl_desc.name)
return -ENOMEM;
- ret = mcp23s08_probe_one(data->mcp[addr], dev, 0x40 | (addr << 1), type, -1);
+ ret = mcp23s08_probe_one(data->mcp[addr], dev, 0x40 | (addr << 1),
+ info->type, -1);
if (ret < 0)
return ret;
@@ -208,36 +196,43 @@ static int mcp23s08_probe(struct spi_device *spi)
return 0;
}
+static const struct mcp23s08_info mcp23s08_spi = {
+ .regmap = &mcp23x08_regmap,
+ .type = MCP_TYPE_S08,
+ .ngpio = 8,
+ .reg_shift = 0,
+};
+
+static const struct mcp23s08_info mcp23s17_spi = {
+ .regmap = &mcp23x17_regmap,
+ .type = MCP_TYPE_S17,
+ .ngpio = 16,
+ .reg_shift = 1,
+};
+
+static const struct mcp23s08_info mcp23s18_spi = {
+ .regmap = &mcp23x17_regmap,
+ .label = "mcp23s18",
+ .type = MCP_TYPE_S18,
+ .ngpio = 16,
+ .reg_shift = 1,
+};
+
static const struct spi_device_id mcp23s08_ids[] = {
- { "mcp23s08", MCP_TYPE_S08 },
- { "mcp23s17", MCP_TYPE_S17 },
- { "mcp23s18", MCP_TYPE_S18 },
+ { "mcp23s08", (kernel_ulong_t)&mcp23s08_spi },
+ { "mcp23s17", (kernel_ulong_t)&mcp23s17_spi },
+ { "mcp23s18", (kernel_ulong_t)&mcp23s18_spi },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
static const struct of_device_id mcp23s08_spi_of_match[] = {
- {
- .compatible = "microchip,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "microchip,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
- {
- .compatible = "microchip,mcp23s18",
- .data = (void *) MCP_TYPE_S18,
- },
+ { .compatible = "microchip,mcp23s08", .data = &mcp23s08_spi },
+ { .compatible = "microchip,mcp23s17", .data = &mcp23s17_spi },
+ { .compatible = "microchip,mcp23s18", .data = &mcp23s18_spi },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "mcp,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
+ { .compatible = "mcp,mcp23s08", .data = &mcp23s08_spi },
+ { .compatible = "mcp,mcp23s17", .data = &mcp23s17_spi },
{ }
};
MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index f8ae2e974221..52aadd6d72a8 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1776,12 +1776,6 @@ static int ocelot_gpio_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_IN;
}
-static int ocelot_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int ocelot_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
@@ -1795,7 +1789,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
pin);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static const struct gpio_chip ocelot_gpiolib_chip = {
@@ -1804,7 +1798,7 @@ static const struct gpio_chip ocelot_gpiolib_chip = {
.set = ocelot_gpio_set,
.get = ocelot_gpio_get,
.get_direction = ocelot_gpio_get_direction,
- .direction_input = ocelot_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = ocelot_gpio_direction_output,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index 2639a9ee82cd..56d916f2cee6 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -286,17 +286,11 @@ static void rk805_gpio_set(struct gpio_chip *chip,
offset, value);
}
-static int rk805_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int rk805_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
rk805_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -330,7 +324,7 @@ static const struct gpio_chip rk805_gpio_chip = {
.get_direction = rk805_gpio_get_direction,
.get = rk805_gpio_get,
.set = rk805_gpio_set,
- .direction_input = rk805_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = rk805_gpio_direction_output,
.can_sleep = true,
.base = -1,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 45e416f68e74..3bedf36a0019 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3429,7 +3429,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int rockchip_pinctrl_remove(struct platform_device *pdev)
+static void rockchip_pinctrl_remove(struct platform_device *pdev)
{
struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
struct rockchip_pin_bank *bank;
@@ -3450,8 +3450,6 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev)
}
mutex_unlock(&bank->deferred_lock);
}
-
- return 0;
}
static struct rockchip_pin_bank px30_pin_banks[] = {
@@ -3982,7 +3980,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
static struct platform_driver rockchip_pinctrl_driver = {
.probe = rockchip_pinctrl_probe,
- .remove = rockchip_pinctrl_remove,
+ .remove_new = rockchip_pinctrl_remove,
.driver = {
.name = "rockchip-pinctrl",
.pm = &rockchip_pinctrl_dev_pm_ops,
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 461a7c02d4a3..8267be769635 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -239,32 +239,32 @@ static struct lock_class_key pcs_request_class;
* does not help in this case.
*/
-static unsigned __maybe_unused pcs_readb(void __iomem *reg)
+static unsigned int pcs_readb(void __iomem *reg)
{
return readb(reg);
}
-static unsigned __maybe_unused pcs_readw(void __iomem *reg)
+static unsigned int pcs_readw(void __iomem *reg)
{
return readw(reg);
}
-static unsigned __maybe_unused pcs_readl(void __iomem *reg)
+static unsigned int pcs_readl(void __iomem *reg)
{
return readl(reg);
}
-static void __maybe_unused pcs_writeb(unsigned val, void __iomem *reg)
+static void pcs_writeb(unsigned int val, void __iomem *reg)
{
writeb(val, reg);
}
-static void __maybe_unused pcs_writew(unsigned val, void __iomem *reg)
+static void pcs_writew(unsigned int val, void __iomem *reg)
{
writew(val, reg);
}
-static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
+static void pcs_writel(unsigned int val, void __iomem *reg)
{
writel(val, reg);
}
@@ -1925,16 +1925,11 @@ free:
return ret;
}
-static int pcs_remove(struct platform_device *pdev)
+static void pcs_remove(struct platform_device *pdev)
{
struct pcs_device *pcs = platform_get_drvdata(pdev);
- if (!pcs)
- return 0;
-
pcs_free_resources(pcs);
-
- return 0;
}
static const struct pcs_soc_data pinctrl_single_omap_wkup = {
@@ -1982,7 +1977,7 @@ MODULE_DEVICE_TABLE(of, pcs_of_match);
static struct platform_driver pcs_driver = {
.probe = pcs_probe,
- .remove = pcs_remove,
+ .remove_new = pcs_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = pcs_of_match,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index c1f36b164ea5..1485573b523c 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -717,20 +717,13 @@ static void st_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
__st_gpio_set(bank, offset, value);
}
-static int st_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- pinctrl_gpio_direction_input(chip->base + offset);
-
- return 0;
-}
-
static int st_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct st_gpio_bank *bank = gpiochip_get_data(chip);
__st_gpio_set(bank, offset, value);
- pinctrl_gpio_direction_output(chip->base + offset);
+ pinctrl_gpio_direction_output(chip, offset);
return 0;
}
@@ -1330,7 +1323,7 @@ static int st_gpio_irq_request_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- st_gpio_direction_input(gc, d->hwirq);
+ pinctrl_gpio_direction_input(gc, d->hwirq);
return gpiochip_reqres_irq(gc, d->hwirq);
}
@@ -1488,7 +1481,7 @@ static const struct gpio_chip st_gpio_template = {
.free = gpiochip_generic_free,
.get = st_gpio_get,
.set = st_gpio_set,
- .direction_input = st_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = st_gpio_direction_output,
.get_direction = st_gpio_get_direction,
.ngpio = ST_GPIO_PINS_PER_BANK,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 0974bbf57b54..6313be370eb7 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -734,14 +734,18 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int stmfx_pinctrl_remove(struct platform_device *pdev)
+static void stmfx_pinctrl_remove(struct platform_device *pdev)
{
struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
+ int ret;
- return stmfx_function_disable(stmfx,
- STMFX_FUNC_GPIO |
- STMFX_FUNC_ALTGPIO_LOW |
- STMFX_FUNC_ALTGPIO_HIGH);
+ ret = stmfx_function_disable(stmfx,
+ STMFX_FUNC_GPIO |
+ STMFX_FUNC_ALTGPIO_LOW |
+ STMFX_FUNC_ALTGPIO_HIGH);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to disable pins (%pe)\n",
+ ERR_PTR(ret));
}
#ifdef CONFIG_PM_SLEEP
@@ -850,7 +854,7 @@ static struct platform_driver stmfx_pinctrl_driver = {
.pm = &stmfx_pinctrl_dev_pm_ops,
},
.probe = stmfx_pinctrl_probe,
- .remove = stmfx_pinctrl_remove,
+ .remove_new = stmfx_pinctrl_remove,
};
module_platform_driver(stmfx_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index fef1ee7b7945..fd0331a87cda 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1116,7 +1116,6 @@ static const struct regmap_config sx150x_regmap_config = {
static int sx150x_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WRITE_WORD_DATA;
struct device *dev = &client->dev;
@@ -1135,11 +1134,7 @@ static int sx150x_probe(struct i2c_client *client)
pctl->dev = dev;
pctl->client = client;
- if (dev->of_node)
- pctl->data = of_device_get_match_data(dev);
- else
- pctl->data = (struct sx150x_device_data *)id->driver_data;
-
+ pctl->data = i2c_get_match_data(client);
if (!pctl->data)
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index ab49bd708969..c3b76e6511ac 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -804,13 +804,11 @@ fail:
return ret;
}
-static int tb10x_pinctrl_remove(struct platform_device *pdev)
+static void tb10x_pinctrl_remove(struct platform_device *pdev)
{
struct tb10x_pinctrl *state = platform_get_drvdata(pdev);
mutex_destroy(&state->mutex);
-
- return 0;
}
@@ -822,7 +820,7 @@ MODULE_DEVICE_TABLE(of, tb10x_pinctrl_dt_ids);
static struct platform_driver tb10x_pinctrl_pdrv = {
.probe = tb10x_pinctrl_probe,
- .remove = tb10x_pinctrl_remove,
+ .remove_new = tb10x_pinctrl_remove,
.driver = {
.name = "tb10x_pinctrl",
.of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index 3580e0fd94ed..40862f7bd6ca 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -6,12 +6,14 @@
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/of.h>
#include <linux/slab.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
#include "core.h"
#include "pinctrl-utils.h"
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index cf0383f575d9..f4256a918165 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -11,12 +11,12 @@
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include "pinctrl-lantiq.h"
@@ -1451,7 +1451,6 @@ MODULE_DEVICE_TABLE(of, xway_match);
static int pinmux_xway_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
int ret, i;
@@ -1460,10 +1459,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
- match = of_match_device(xway_match, &pdev->dev);
- if (match)
- xway_soc = (const struct pinctrl_xway_soc *) match->data;
- else
+ xway_soc = device_get_match_data(&pdev->dev);
+ if (!xway_soc)
xway_soc = &danube_pinctrl;
/* find out how many pads we have */
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 2a180a5d64a4..23d2da0b99b9 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -12,12 +12,12 @@
*/
#define pr_fmt(fmt) "pinmux core: " fmt
+#include <linux/array_size.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/radix-tree.h>
@@ -173,10 +173,8 @@ static int pin_request(struct pinctrl_dev *pctldev,
else
status = 0;
- if (status) {
- dev_err(pctldev->dev, "request() failed for pin %d\n", pin);
+ if (status)
module_put(pctldev->owner);
- }
out_free_pin:
if (status) {
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index 20c3b9025044..a18df4162299 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -629,7 +629,7 @@ static struct platform_driver apq8064_pinctrl_driver = {
.of_match_table = apq8064_pinctrl_of_match,
},
.probe = apq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init apq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
index 3fc0a40762b6..afada80e52a2 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -1207,7 +1207,7 @@ static struct platform_driver apq8084_pinctrl_driver = {
.of_match_table = apq8084_pinctrl_of_match,
},
.probe = apq8084_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init apq8084_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index 1f7944dd829d..cb13576ad6cf 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -710,7 +710,7 @@ static struct platform_driver ipq4019_pinctrl_driver = {
.of_match_table = ipq4019_pinctrl_of_match,
},
.probe = ipq4019_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq4019_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index e2951f81c3ee..68f65b57003e 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -754,7 +754,7 @@ static struct platform_driver ipq5018_pinctrl_driver = {
.of_match_table = ipq5018_pinctrl_of_match,
},
.probe = ipq5018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq5018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 625f8014051f..882175118970 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -834,7 +834,7 @@ static struct platform_driver ipq5332_pinctrl_driver = {
.of_match_table = ipq5332_pinctrl_of_match,
},
.probe = ipq5332_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq5332_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index 0ad08647dbcd..ac330d8712b5 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -1080,7 +1080,7 @@ static struct platform_driver ipq6018_pinctrl_driver = {
.of_match_table = ipq6018_pinctrl_of_match,
},
.probe = ipq6018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq6018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index e2bb94e86aef..e10e1bc4c911 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -631,7 +631,7 @@ static struct platform_driver ipq8064_pinctrl_driver = {
.of_match_table = ipq8064_pinctrl_of_match,
},
.probe = ipq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 337f3a1c92c1..fee32c1d1d3e 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -1041,7 +1041,7 @@ static struct platform_driver ipq8074_pinctrl_driver = {
.of_match_table = ipq8074_pinctrl_of_match,
},
.probe = ipq8074_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq8074_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index e2491617b236..20ab59cb621b 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -799,7 +799,7 @@ static struct platform_driver ipq9574_pinctrl_driver = {
.of_match_table = ipq9574_pinctrl_of_match,
},
.probe = ipq9574_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init ipq9574_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 0b2839d27fd6..9651aed048cf 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -495,7 +495,7 @@ err_pinctrl:
}
EXPORT_SYMBOL_GPL(lpi_pinctrl_probe);
-int lpi_pinctrl_remove(struct platform_device *pdev)
+void lpi_pinctrl_remove(struct platform_device *pdev)
{
struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
int i;
@@ -505,8 +505,6 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
for (i = 0; i < pctrl->data->npins; i++)
pinctrl_generic_remove_group(pctrl->ctrl, i);
-
- return 0;
}
EXPORT_SYMBOL_GPL(lpi_pinctrl_remove);
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 29047bb80bb8..387d83ee95b5 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -85,6 +85,6 @@ struct lpi_pinctrl_variant_data {
};
int lpi_pinctrl_probe(struct platform_device *pdev);
-int lpi_pinctrl_remove(struct platform_device *pdev);
+void lpi_pinctrl_remove(struct platform_device *pdev);
#endif /*__PINCTRL_LPASS_LPI_H__*/
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index e7cd3ef1cf3e..415d24e16267 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -1059,7 +1059,7 @@ static struct platform_driver mdm9607_pinctrl_driver = {
.of_match_table = mdm9607_pinctrl_of_match,
},
.probe = mdm9607_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init mdm9607_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index 0a2ae383d3d5..3f2eafea0b24 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -446,7 +446,7 @@ static struct platform_driver mdm9615_pinctrl_driver = {
.of_match_table = mdm9615_pinctrl_of_match,
},
.probe = mdm9615_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init mdm9615_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 115b83e2d8e6..395040346d0f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1547,15 +1547,13 @@ int msm_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL(msm_pinctrl_probe);
-int msm_pinctrl_remove(struct platform_device *pdev)
+void msm_pinctrl_remove(struct platform_device *pdev)
{
struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
unregister_restart_handler(&pctrl->restart_nb);
-
- return 0;
}
EXPORT_SYMBOL(msm_pinctrl_remove);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 1d2f2e904da1..4968d08a384d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -166,6 +166,6 @@ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data);
-int msm_pinctrl_remove(struct platform_device *pdev);
+void msm_pinctrl_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index 994619840a70..40806c0650ef 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -336,6 +336,7 @@ enum msm8226_functions {
msm_mux_blsp_i2c3,
msm_mux_blsp_i2c4,
msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
msm_mux_blsp_spi1,
msm_mux_blsp_spi2,
msm_mux_blsp_spi3,
@@ -436,6 +437,8 @@ static const char * const blsp_spi5_groups[] = {
"gpio16", "gpio17", "gpio18", "gpio19"
};
+static const char * const blsp_i2c6_groups[] = { "gpio22", "gpio23" };
+
static const char * const cci_i2c0_groups[] = { "gpio29", "gpio30" };
static const char * const cam_mclk0_groups[] = { "gpio26" };
@@ -459,6 +462,7 @@ static const struct pinfunction msm8226_functions[] = {
MSM_PIN_FUNCTION(blsp_i2c3),
MSM_PIN_FUNCTION(blsp_i2c4),
MSM_PIN_FUNCTION(blsp_i2c5),
+ MSM_PIN_FUNCTION(blsp_i2c6),
MSM_PIN_FUNCTION(blsp_spi1),
MSM_PIN_FUNCTION(blsp_spi2),
MSM_PIN_FUNCTION(blsp_spi3),
@@ -507,8 +511,8 @@ static const struct msm_pingroup msm8226_groups[] = {
PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA),
PINGROUP(20, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(21, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(22, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(23, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, NA, NA, blsp_i2c6, NA, NA, NA, NA),
+ PINGROUP(23, NA, NA, blsp_i2c6, NA, NA, NA, NA),
PINGROUP(24, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(25, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(26, cam_mclk0, NA, NA, NA, NA, NA, NA),
@@ -612,6 +616,16 @@ static const struct msm_pingroup msm8226_groups[] = {
#define NUM_GPIO_PINGROUPS 117
+static const struct msm_gpio_wakeirq_map msm8226_mpm_map[] = {
+ { 1, 3 }, { 4, 4 }, { 5, 5 }, { 9, 6 }, { 13, 7 }, { 17, 8 },
+ { 21, 9 }, { 27, 10 }, { 29, 11 }, { 31, 12 }, { 33, 13 }, { 35, 14 },
+ { 37, 15 }, { 38, 16 }, { 39, 17 }, { 41, 18 }, { 46, 19 }, { 48, 20 },
+ { 49, 21 }, { 50, 22 }, { 51, 23 }, { 52, 24 }, { 54, 25 }, { 62, 26 },
+ { 63, 27 }, { 64, 28 }, { 65, 29 }, { 66, 30 }, { 67, 31 }, { 68, 32 },
+ { 69, 33 }, { 71, 34 }, { 72, 35 }, { 106, 36 }, { 107, 37 }, { 108, 38 },
+ { 109, 39 }, { 110, 40 }, { 111, 54 }, { 113, 55 }, { 115, 41 },
+};
+
static const struct msm_pinctrl_soc_data msm8226_pinctrl = {
.pins = msm8226_pins,
.npins = ARRAY_SIZE(msm8226_pins),
@@ -620,6 +634,8 @@ static const struct msm_pinctrl_soc_data msm8226_pinctrl = {
.groups = msm8226_groups,
.ngroups = ARRAY_SIZE(msm8226_groups),
.ngpios = NUM_GPIO_PINGROUPS,
+ .wakeirq_map = msm8226_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8226_mpm_map),
};
static int msm8226_pinctrl_probe(struct platform_device *pdev)
@@ -638,7 +654,7 @@ static struct platform_driver msm8226_pinctrl_driver = {
.of_match_table = msm8226_pinctrl_of_match,
},
.probe = msm8226_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8226_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 999a5f867eb5..dba6d531b4a1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -981,7 +981,7 @@ static struct platform_driver msm8660_pinctrl_driver = {
.of_match_table = msm8660_pinctrl_of_match,
},
.probe = msm8660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 756856d20d6b..14b17ba9f906 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -929,7 +929,7 @@ static struct platform_driver msm8909_pinctrl_driver = {
.of_match_table = msm8909_pinctrl_of_match,
},
.probe = msm8909_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8909_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index cea5c54f92fe..184dcf842273 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -969,7 +969,7 @@ static struct platform_driver msm8916_pinctrl_driver = {
.of_match_table = msm8916_pinctrl_of_match,
},
.probe = msm8916_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8916_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 998351bdfee1..c2253821ae8d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1816,7 +1816,7 @@ static struct platform_driver msm8953_pinctrl_driver = {
.of_match_table = msm8953_pinctrl_of_match,
},
.probe = msm8953_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8953_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index ebe230b3b437..6b9148d226e9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -1246,7 +1246,7 @@ static struct platform_driver msm8960_pinctrl_driver = {
.of_match_table = msm8960_pinctrl_of_match,
},
.probe = msm8960_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8960_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index c30d80e4e98c..9a951888e8a1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -1096,7 +1096,7 @@ static struct platform_driver msm8976_pinctrl_driver = {
.of_match_table = msm8976_pinctrl_of_match,
},
.probe = msm8976_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8976_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index b1a6759ab4a5..1ed1dd32d6c7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1343,7 +1343,7 @@ static struct platform_driver msm8994_pinctrl_driver = {
.of_match_table = msm8994_pinctrl_of_match,
},
.probe = msm8994_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8994_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index 46cc0b49dbab..777c2a74036e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1880,6 +1880,18 @@ static const struct msm_pingroup msm8996_groups[] = {
SDC_QDSD_PINGROUP(sdc1_rclk, 0x12c000, 15, 0),
};
+static const struct msm_gpio_wakeirq_map msm8996_mpm_map[] = {
+ { 1, 3 }, { 5, 4 }, { 9, 5 }, { 11, 6 }, { 66, 7 }, { 22, 8 }, { 24, 9 }, { 26, 10 },
+ { 34, 11 }, { 36, 12 }, { 37, 13 }, { 38, 14 }, { 40, 15 }, { 42, 16 }, { 46, 17 },
+ { 50, 18 }, { 53, 19 }, { 54, 20 }, { 56, 21 }, { 57, 22 }, { 58, 23 }, { 59, 24 },
+ { 60, 25 }, { 61, 26 }, { 62, 27 }, { 63, 28 }, { 64, 29 }, { 71, 30 }, { 73, 31 },
+ { 77, 32 }, { 78, 33 }, { 79, 34 }, { 80, 35 }, { 82, 36 }, { 86, 37 }, { 91, 38 },
+ { 92, 39 }, { 95, 40 }, { 97, 41 }, { 101, 42 }, { 104, 43 }, { 106, 44 }, { 108, 45 },
+ { 112, 46 }, { 113, 47 }, { 110, 48 }, { 127, 50 }, { 115, 51 }, { 116, 54 }, { 117, 55 },
+ { 118, 56 }, { 119, 57 }, { 120, 58 }, { 121, 59 }, { 122, 60 }, { 123, 61 }, { 124, 62 },
+ { 125, 63 }, { 126, 64 }, { 129, 65 }, { 131, 66 }, { 132, 67 }, { 133, 68 }, { 145, 69 },
+};
+
static const struct msm_pinctrl_soc_data msm8996_pinctrl = {
.pins = msm8996_pins,
.npins = ARRAY_SIZE(msm8996_pins),
@@ -1888,6 +1900,8 @@ static const struct msm_pinctrl_soc_data msm8996_pinctrl = {
.groups = msm8996_groups,
.ngroups = ARRAY_SIZE(msm8996_groups),
.ngpios = 150,
+ .wakeirq_map = msm8996_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8996_mpm_map),
};
static int msm8996_pinctrl_probe(struct platform_device *pdev)
@@ -1906,7 +1920,7 @@ static struct platform_driver msm8996_pinctrl_driver = {
.of_match_table = msm8996_pinctrl_of_match,
},
.probe = msm8996_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8996_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index b7cbf32b3125..4aaf45e54f3a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1535,7 +1535,7 @@ static struct platform_driver msm8998_pinctrl_driver = {
.of_match_table = msm8998_pinctrl_of_match,
},
.probe = msm8998_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8998_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index d5fe62992849..750a8272ded7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -1045,6 +1045,16 @@ static const struct msm_pingroup msm8x74_groups[] = {
#define NUM_GPIO_PINGROUPS 146
+static const struct msm_gpio_wakeirq_map msm8x74_mpm_map[] = {
+ { 1, 4 }, { 5, 5 }, { 9, 6 }, { 18, 7 }, { 20, 8 }, { 24, 9 },
+ { 27, 10 }, { 28, 11 }, { 34, 12 }, { 35, 13 }, { 37, 14 }, { 42, 15 },
+ { 44, 16 }, { 46, 17 }, { 50, 18 }, { 54, 19 }, { 59, 20 }, { 61, 21 },
+ { 62, 22 }, { 64, 23 }, { 65, 24 }, { 66, 25 }, { 67, 26 }, { 68, 27 },
+ { 71, 28 }, { 72, 29 }, { 73, 30 }, { 74, 31 }, { 75, 32 }, { 77, 33 },
+ { 79, 34 }, { 80, 35 }, { 82, 36 }, { 86, 37 }, { 92, 38 }, { 93, 39 },
+ { 95, 40 }, { 102, 3 }, { 144, 41 },
+};
+
static const struct msm_pinctrl_soc_data msm8x74_pinctrl = {
.pins = msm8x74_pins,
.npins = ARRAY_SIZE(msm8x74_pins),
@@ -1053,6 +1063,8 @@ static const struct msm_pinctrl_soc_data msm8x74_pinctrl = {
.groups = msm8x74_groups,
.ngroups = ARRAY_SIZE(msm8x74_groups),
.ngpios = NUM_GPIO_PINGROUPS,
+ .wakeirq_map = msm8x74_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8x74_mpm_map),
};
static int msm8x74_pinctrl_probe(struct platform_device *pdev)
@@ -1071,7 +1083,7 @@ static struct platform_driver msm8x74_pinctrl_driver = {
.of_match_table = msm8x74_pinctrl_of_match,
},
.probe = msm8x74_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init msm8x74_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index ba699eac9ee8..f5c1c427b44e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -1113,7 +1113,7 @@ static struct platform_driver qcm2290_pinctrl_driver = {
.of_match_table = qcm2290_pinctrl_of_match,
},
.probe = qcm2290_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init qcm2290_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index ae7224012f8a..9a875b7dc998 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1644,7 +1644,7 @@ static struct platform_driver qcs404_pinctrl_driver = {
.of_match_table = qcs404_pinctrl_of_match,
},
.probe = qcs404_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init qcs404_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index b5808fcfb13c..4d2f6f495163 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -145,7 +145,7 @@ static struct platform_driver qdf2xxx_pinctrl_driver = {
.acpi_match_table = qdf2xxx_acpi_ids,
},
.probe = qdf2xxx_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init qdf2xxx_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index 47bc529ef550..da4f940bc8d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -1248,7 +1248,7 @@ static struct platform_driver qdu1000_tlmm_driver = {
.of_match_table = qdu1000_tlmm_of_match,
},
.probe = qdu1000_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init qdu1000_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index 8fdea25d8d67..5459c0c681a2 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1530,7 +1530,7 @@ static struct platform_driver sa8775p_pinctrl_driver = {
.of_match_table = sa8775p_pinctrl_of_match,
},
.probe = sa8775p_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sa8775p_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6eb0c73791c0..c27aaa599b91 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1159,7 +1159,7 @@ static struct platform_driver sc7180_pinctrl_driver = {
.of_match_table = sc7180_pinctrl_of_match,
},
.probe = sc7180_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sc7180_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index d615b6c55b89..99156217c6a5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -158,7 +158,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 0c10eeb60b55..c2db663e396e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1505,7 +1505,7 @@ static struct platform_driver sc7280_pinctrl_driver = {
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sc7280_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index d6a79ad41a40..cfa7c8be9770 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1720,7 +1720,7 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sc8180x_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
index 4b9c0beac32e..b33483056f42 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
@@ -199,7 +199,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 96f4fb5a5d29..4b1c49697698 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1926,7 +1926,7 @@ static struct platform_driver sc8280xp_pinctrl_driver = {
.of_match_table = sc8280xp_pinctrl_of_match,
},
.probe = sc8280xp_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sc8280xp_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index c2e0d5c034ac..b93837c00954 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1399,6 +1399,18 @@ static const struct msm_pingroup sdm660_groups[] = {
SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
};
+static const struct msm_gpio_wakeirq_map sdm660_mpm_map[] = {
+ { 1, 3 }, { 5, 4 }, { 9, 5 }, { 10, 6 }, { 66, 7 }, { 22, 8 }, { 25, 9 }, { 28, 10 },
+ { 58, 11 }, { 41, 13 }, { 43, 14 }, { 40, 15 }, { 42, 16 }, { 46, 17 }, { 50, 18 },
+ { 44, 19 }, { 56, 21 }, { 45, 22 }, { 68, 23 }, { 69, 24 }, { 70, 25 }, { 71, 26 },
+ { 72, 27 }, { 73, 28 }, { 64, 29 }, { 2, 30 }, { 13, 31 }, { 111, 32 }, { 74, 33 },
+ { 75, 34 }, { 76, 35 }, { 82, 36 }, { 17, 37 }, { 77, 38 }, { 47, 39 }, { 54, 40 },
+ { 48, 41 }, { 101, 42 }, { 49, 43 }, { 51, 44 }, { 86, 45 }, { 90, 46 }, { 91, 47 },
+ { 52, 48 }, { 55, 50 }, { 6, 51 }, { 65, 53 }, { 67, 55 }, { 83, 56 }, { 84, 57 },
+ { 85, 58 }, { 87, 59 }, { 21, 63 }, { 78, 64 }, { 113, 65 }, { 60, 66 }, { 98, 67 },
+ { 30, 68 }, { 31, 70 }, { 29, 71 }, { 107, 76 }, { 109, 83 }, { 103, 84 }, { 105, 85 },
+};
+
static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
.pins = sdm660_pins,
.npins = ARRAY_SIZE(sdm660_pins),
@@ -1409,6 +1421,8 @@ static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
.ngpios = 114,
.tiles = sdm660_tiles,
.ntiles = ARRAY_SIZE(sdm660_tiles),
+ .wakeirq_map = sdm660_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sdm660_mpm_map),
};
static int sdm660_pinctrl_probe(struct platform_device *pdev)
@@ -1428,7 +1442,7 @@ static struct platform_driver sdm660_pinctrl_driver = {
.of_match_table = sdm660_pinctrl_of_match,
},
.probe = sdm660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdm660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index cc3cce077de4..1e694a966953 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1318,7 +1318,7 @@ static struct platform_driver sdm670_pinctrl_driver = {
.of_match_table = sdm670_pinctrl_of_match,
},
.probe = sdm670_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdm670_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index cc05c415ed15..3f3265e0018d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1351,7 +1351,7 @@ static struct platform_driver sdm845_pinctrl_driver = {
.acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match),
},
.probe = sdm845_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdm845_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 8826db9d21d0..c88b8bfcacb6 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -990,7 +990,7 @@ static struct platform_driver sdx55_pinctrl_driver = {
.of_match_table = sdx55_pinctrl_of_match,
},
.probe = sdx55_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdx55_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index f6f319c997fc..bd44ec0fcab4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -939,7 +939,7 @@ static struct platform_driver sdx65_pinctrl_driver = {
.of_match_table = sdx65_pinctrl_of_match,
},
.probe = sdx65_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdx65_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index 3cfe8c7f04df..396f6fc779a2 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -1124,7 +1124,7 @@ static struct platform_driver sdx75_pinctrl_driver = {
.of_match_table = sdx75_pinctrl_of_match,
},
.probe = sdx75_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sdx75_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
index 2b09bf171a2c..e8a6f6f6af54 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
@@ -167,7 +167,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index 2a06025f4885..7ce04144b6ed 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -867,6 +867,16 @@ static const struct msm_pingroup sm6115_groups[] = {
[120] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x73000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm6115_mpm_map[] = {
+ { 0, 84 }, { 3, 75 }, { 4, 16 }, { 6, 59 }, { 8, 63 }, { 11, 17 }, { 13, 18 },
+ { 14, 51 }, { 17, 20 }, { 18, 52 }, { 19, 53 }, { 24, 6 }, { 25, 71 }, { 27, 73 },
+ { 28, 41 }, { 31, 27 }, { 32, 54 }, { 33, 55 }, { 34, 56 }, { 35, 57 }, { 36, 58 },
+ { 39, 28 }, { 46, 29 }, { 62, 60 }, { 63, 61 }, { 64, 62 }, { 65, 30 }, { 66, 31 },
+ { 67, 32 }, { 69, 33 }, { 70, 34 }, { 72, 72 }, { 75, 35 }, { 79, 36 }, { 80, 21 },
+ { 81, 38 }, { 83, 9 }, { 84, 39 }, { 85, 40 }, { 86, 19 }, { 87, 42 }, { 88, 43 },
+ { 89, 45 }, { 91, 74 }, { 93, 46 }, { 94, 47 }, { 95, 48 }, { 96, 49 }, { 97, 50 },
+};
+
static const struct msm_pinctrl_soc_data sm6115_tlmm = {
.pins = sm6115_pins,
.npins = ARRAY_SIZE(sm6115_pins),
@@ -877,6 +887,8 @@ static const struct msm_pinctrl_soc_data sm6115_tlmm = {
.ngpios = 114,
.tiles = sm6115_tiles,
.ntiles = ARRAY_SIZE(sm6115_tiles),
+ .wakeirq_map = sm6115_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6115_mpm_map),
};
static int sm6115_tlmm_probe(struct platform_device *pdev)
@@ -895,7 +907,7 @@ static struct platform_driver sm6115_tlmm_driver = {
.of_match_table = sm6115_tlmm_of_match,
},
.probe = sm6115_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm6115_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index d5e2b896954c..65de34c30759 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -1221,6 +1221,21 @@ static const struct msm_pingroup sm6125_groups[] = {
[140] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x58b000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm6125_mpm_map[] = {
+ { 1, 14 }, { 3, 15 }, { 4, 16 }, { 9, 17 }, { 13, 18 }, { 14, 23 },
+ { 15, 19 }, { 17, 20 }, { 19, 21 }, { 21, 22 }, { 22, 84 }, { 25, 24 },
+ { 26, 25 }, { 27, 26 }, { 29, 27 }, { 33, 28 }, { 36, 29 }, { 42, 30 },
+ { 43, 5 }, { 44, 31 }, { 45, 6 }, { 47, 32 }, { 50, 33 }, { 59, 7 },
+ { 70, 34 }, { 72, 8 }, { 75, 35 }, { 79, 36 }, { 80, 37 }, { 81, 38 },
+ { 82, 39 }, { 83, 9 }, { 85, 40 }, { 86, 41 }, { 88, 42 }, { 89, 43 },
+ { 91, 44 }, { 92, 45 }, { 93, 46 }, { 94, 47 }, { 95, 48 }, { 96, 49 },
+ { 97, 70 }, { 98, 50 }, { 99, 51 }, { 100, 64 }, { 101, 52 },
+ { 102, 53 }, { 105, 54 }, { 107, 55 }, { 110, 56 }, { 111, 57 },
+ { 112, 58 }, { 118, 59 }, { 120, 71 }, { 122, 60 }, { 123, 61 },
+ { 124, 13 }, { 126, 62 }, { 128, 63 }, { 130, 65 }, { 131, 66 },
+ { 132, 67 },
+};
+
static const struct msm_pinctrl_soc_data sm6125_tlmm = {
.pins = sm6125_pins,
.npins = ARRAY_SIZE(sm6125_pins),
@@ -1231,6 +1246,8 @@ static const struct msm_pinctrl_soc_data sm6125_tlmm = {
.ngpios = 134,
.tiles = sm6125_tiles,
.ntiles = ARRAY_SIZE(sm6125_tiles),
+ .wakeirq_map = sm6125_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6125_mpm_map),
};
static int sm6125_tlmm_probe(struct platform_device *pdev)
@@ -1249,7 +1266,7 @@ static struct platform_driver sm6125_tlmm_driver = {
.of_match_table = sm6125_tlmm_of_match,
},
.probe = sm6125_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm6125_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index f3828c07b134..4aeb1ba43ee3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1373,7 +1373,7 @@ static struct platform_driver sm6350_tlmm_driver = {
.of_match_table = sm6350_tlmm_of_match,
},
.probe = sm6350_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm6350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index c82c8516932e..d86630d7125c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1516,7 +1516,7 @@ static struct platform_driver sm6375_tlmm_driver = {
.of_match_table = sm6375_tlmm_of_match,
},
.probe = sm6375_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm6375_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 33657cf98fb9..c25357ca1963 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -1254,7 +1254,7 @@ static struct platform_driver sm7150_tlmm_driver = {
.of_match_table = sm7150_tlmm_of_match,
},
.probe = sm7150_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm7150_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 01aea9c70b7a..f8f5bee74f1d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1542,7 +1542,7 @@ static struct platform_driver sm8150_pinctrl_driver = {
.of_match_table = sm8150_pinctrl_of_match,
},
.probe = sm8150_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm8150_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
index ddbc6317f2a7..cb10ce8d5d28 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
@@ -155,7 +155,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index e9961a49ff98..54fda77bf296 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1351,7 +1351,7 @@ static struct platform_driver sm8250_pinctrl_driver = {
.of_match_table = sm8250_pinctrl_of_match,
},
.probe = sm8250_pinctrl_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm8250_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
index f889c779bccd..297cc95ac3c0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
@@ -158,7 +158,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 9c69458bd910..ac7f2820f2cb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1642,7 +1642,7 @@ static struct platform_driver sm8350_tlmm_driver = {
.of_match_table = sm8350_tlmm_of_match,
},
.probe = sm8350_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm8350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index e22d03ce292e..2e7896791fc0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -232,7 +232,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index d11bb1ee9e3d..617286711695 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1677,7 +1677,7 @@ static struct platform_driver sm8450_tlmm_driver = {
.of_match_table = sm8450_tlmm_of_match,
},
.probe = sm8450_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm8450_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
index db1a46fee9c6..64458c3fbe5e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
@@ -240,7 +240,7 @@ static struct platform_driver lpi_pinctrl_driver = {
.of_match_table = lpi_pinctrl_of_match,
},
.probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
+ .remove_new = lpi_pinctrl_remove,
};
module_platform_driver(lpi_pinctrl_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 3c847d9cb5d9..9184e0183755 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1762,7 +1762,7 @@ static struct platform_driver sm8550_tlmm_driver = {
.of_match_table = sm8550_tlmm_of_match,
},
.probe = sm8550_tlmm_probe,
- .remove = msm_pinctrl_remove,
+ .remove_new = msm_pinctrl_remove,
};
static int __init sm8550_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index deded9c6fd7d..f4e2c88a7c82 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1185,12 +1185,11 @@ err_range:
return ret;
}
-static int pmic_gpio_remove(struct platform_device *pdev)
+static void pmic_gpio_remove(struct platform_device *pdev)
{
struct pmic_gpio_state *state = platform_get_drvdata(pdev);
gpiochip_remove(&state->chip);
- return 0;
}
static const struct of_device_id pmic_gpio_of_match[] = {
@@ -1265,7 +1264,7 @@ static struct platform_driver pmic_gpio_driver = {
.of_match_table = pmic_gpio_of_match,
},
.probe = pmic_gpio_probe,
- .remove = pmic_gpio_remove,
+ .remove_new = pmic_gpio_remove,
};
module_platform_driver(pmic_gpio_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index fe0393829c20..d16ece90d926 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -971,12 +971,11 @@ err_range:
return ret;
}
-static int pmic_mpp_remove(struct platform_device *pdev)
+static void pmic_mpp_remove(struct platform_device *pdev)
{
struct pmic_mpp_state *state = platform_get_drvdata(pdev);
gpiochip_remove(&state->chip);
- return 0;
}
static const struct of_device_id pmic_mpp_of_match[] = {
@@ -1001,7 +1000,7 @@ static struct platform_driver pmic_mpp_driver = {
.of_match_table = pmic_mpp_of_match,
},
.probe = pmic_mpp_probe,
- .remove = pmic_mpp_remove,
+ .remove_new = pmic_mpp_remove,
};
module_platform_driver(pmic_mpp_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index e0d43d076c01..9cd5247ea574 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -853,13 +853,11 @@ unregister_gpiochip:
return ret;
}
-static int pm8xxx_gpio_remove(struct platform_device *pdev)
+static void pm8xxx_gpio_remove(struct platform_device *pdev)
{
struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
-
- return 0;
}
static struct platform_driver pm8xxx_gpio_driver = {
@@ -868,7 +866,7 @@ static struct platform_driver pm8xxx_gpio_driver = {
.of_match_table = pm8xxx_gpio_of_match,
},
.probe = pm8xxx_gpio_probe,
- .remove = pm8xxx_gpio_remove,
+ .remove_new = pm8xxx_gpio_remove,
};
module_platform_driver(pm8xxx_gpio_driver);
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 985d1a0ee8f8..3aee6835a2de 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -936,13 +936,11 @@ unregister_gpiochip:
return ret;
}
-static int pm8xxx_mpp_remove(struct platform_device *pdev)
+static void pm8xxx_mpp_remove(struct platform_device *pdev)
{
struct pm8xxx_mpp *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
-
- return 0;
}
static struct platform_driver pm8xxx_mpp_driver = {
@@ -951,7 +949,7 @@ static struct platform_driver pm8xxx_mpp_driver = {
.of_match_table = pm8xxx_mpp_of_match,
},
.probe = pm8xxx_mpp_probe,
- .remove = pm8xxx_mpp_remove,
+ .remove_new = pm8xxx_mpp_remove,
};
module_platform_driver(pm8xxx_mpp_driver);
diff --git a/drivers/pinctrl/realtek/Kconfig b/drivers/pinctrl/realtek/Kconfig
new file mode 100644
index 000000000000..0fc6bd4fcb7e
--- /dev/null
+++ b/drivers/pinctrl/realtek/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config PINCTRL_RTD
+ tristate "Realtek DHC core pin controller driver"
+ depends on ARCH_REALTEK
+ default y
+ select PINMUX
+ select GENERIC_PINCONF
+
+config PINCTRL_RTD1619B
+ tristate "Realtek DHC 1619B pin controller driver"
+ depends on PINCTRL_RTD
+ default y
+
+config PINCTRL_RTD1319D
+ tristate "Realtek DHC 1319D pin controller driver"
+ depends on PINCTRL_RTD
+ default y
+
+config PINCTRL_RTD1315E
+ tristate "Realtek DHC 1315E pin controller driver"
+ depends on PINCTRL_RTD
+ default y
diff --git a/drivers/pinctrl/realtek/Makefile b/drivers/pinctrl/realtek/Makefile
new file mode 100644
index 000000000000..c7bace0001e9
--- /dev/null
+++ b/drivers/pinctrl/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Realtek DHC pin control drivers
+obj-$(CONFIG_PINCTRL_RTD) += pinctrl-rtd.o
+obj-$(CONFIG_PINCTRL_RTD1619B) += pinctrl-rtd1619b.o
+obj-$(CONFIG_PINCTRL_RTD1319D) += pinctrl-rtd1319d.o
+obj-$(CONFIG_PINCTRL_RTD1315E) += pinctrl-rtd1315e.o
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c
new file mode 100644
index 000000000000..9c7a1af4ba69
--- /dev/null
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek DHC pin controller driver
+ *
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-rtd.h"
+
+struct rtd_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pcdev;
+ void __iomem *base;
+ struct pinctrl_desc desc;
+ const struct rtd_pinctrl_desc *info;
+ struct regmap *regmap_pinctrl;
+};
+
+/* custom pinconf parameters */
+#define RTD_DRIVE_STRENGH_P (PIN_CONFIG_END + 1)
+#define RTD_DRIVE_STRENGH_N (PIN_CONFIG_END + 2)
+#define RTD_DUTY_CYCLE (PIN_CONFIG_END + 3)
+
+static const struct pinconf_generic_params rtd_custom_bindings[] = {
+ {"realtek,drive-strength-p", RTD_DRIVE_STRENGH_P, 0},
+ {"realtek,drive-strength-n", RTD_DRIVE_STRENGH_N, 0},
+ {"realtek,duty-cycle", RTD_DUTY_CYCLE, 0},
+};
+
+static int rtd_pinctrl_get_groups_count(struct pinctrl_dev *pcdev)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ return data->info->num_groups;
+}
+
+static const char *rtd_pinctrl_get_group_name(struct pinctrl_dev *pcdev,
+ unsigned int selector)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ return data->info->groups[selector].name;
+}
+
+static int rtd_pinctrl_get_group_pins(struct pinctrl_dev *pcdev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ *pins = data->info->groups[selector].pins;
+ *num_pins = data->info->groups[selector].num_pins;
+
+ return 0;
+}
+
+static void rtd_pinctrl_dbg_show(struct pinctrl_dev *pcdev,
+ struct seq_file *s,
+ unsigned int offset)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+ const struct rtd_pin_desc *mux = &data->info->muxes[offset];
+ const struct rtd_pin_mux_desc *func;
+ u32 val;
+ u32 mask;
+ u32 pin_val;
+ int is_map;
+
+ if (!mux->name) {
+ seq_puts(s, "[not defined]");
+ return;
+ }
+ val = readl_relaxed(data->base + mux->mux_offset);
+ mask = mux->mux_mask;
+ pin_val = val & mask;
+
+ is_map = 0;
+ func = &mux->functions[0];
+ seq_puts(s, "function: ");
+ while (func->name) {
+ if (func->mux_value == pin_val) {
+ is_map = 1;
+ seq_printf(s, "[%s] ", func->name);
+ } else {
+ seq_printf(s, "%s ", func->name);
+ }
+ func++;
+ }
+ if (!is_map)
+ seq_puts(s, "[not defined]");
+}
+
+static const struct pinctrl_ops rtd_pinctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+ .get_groups_count = rtd_pinctrl_get_groups_count,
+ .get_group_name = rtd_pinctrl_get_group_name,
+ .get_group_pins = rtd_pinctrl_get_group_pins,
+ .pin_dbg_show = rtd_pinctrl_dbg_show,
+};
+
+static int rtd_pinctrl_get_functions_count(struct pinctrl_dev *pcdev)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ return data->info->num_functions;
+}
+
+static const char *rtd_pinctrl_get_function_name(struct pinctrl_dev *pcdev,
+ unsigned int selector)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ return data->info->functions[selector].name;
+}
+
+static int rtd_pinctrl_get_function_groups(struct pinctrl_dev *pcdev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+
+ *groups = data->info->functions[selector].groups;
+ *num_groups = data->info->functions[selector].num_groups;
+
+ return 0;
+}
+
+static const struct rtd_pin_desc *rtd_pinctrl_find_mux(struct rtd_pinctrl *data, unsigned int pin)
+{
+ if (!data->info->muxes[pin].name)
+ return &data->info->muxes[pin];
+
+ return NULL;
+}
+
+static int rtd_pinctrl_set_one_mux(struct pinctrl_dev *pcdev,
+ unsigned int pin, const char *func_name)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+ const struct rtd_pin_desc *mux;
+ int ret = 0;
+ int i;
+
+ mux = rtd_pinctrl_find_mux(data, pin);
+ if (!mux)
+ return 0;
+
+ if (!mux->functions) {
+ if (!mux->name)
+ dev_err(pcdev->dev, "NULL pin has no functions\n");
+ else
+ dev_err(pcdev->dev, "No functions available for pin %s\n", mux->name);
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; mux->functions[i].name; i++) {
+ if (strcmp(mux->functions[i].name, func_name) != 0)
+ continue;
+ ret = regmap_update_bits(data->regmap_pinctrl, mux->mux_offset, mux->mux_mask,
+ mux->functions[i].mux_value);
+ return ret;
+ }
+
+ if (!mux->name) {
+ dev_err(pcdev->dev, "NULL pin provided for function %s\n", func_name);
+ return -EINVAL;
+ }
+
+ dev_err(pcdev->dev, "No function %s available for pin %s\n", func_name, mux->name);
+
+ return -EINVAL;
+}
+
+static int rtd_pinctrl_set_mux(struct pinctrl_dev *pcdev,
+ unsigned int function, unsigned int group)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+ const unsigned int *pins;
+ unsigned int num_pins;
+ const char *func_name;
+ const char *group_name;
+ int i, ret;
+
+ func_name = data->info->functions[function].name;
+ group_name = data->info->groups[group].name;
+
+ ret = rtd_pinctrl_get_group_pins(pcdev, group, &pins, &num_pins);
+ if (ret) {
+ dev_err(pcdev->dev, "Getting pins for group %s failed\n", group_name);
+ return ret;
+ }
+
+ for (i = 0; i < num_pins; i++) {
+ ret = rtd_pinctrl_set_one_mux(pcdev, pins[i], func_name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtd_pinctrl_gpio_request_enable(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ return rtd_pinctrl_set_one_mux(pcdev, offset, "gpio");
+}
+
+static const struct pinmux_ops rtd_pinmux_ops = {
+ .get_functions_count = rtd_pinctrl_get_functions_count,
+ .get_function_name = rtd_pinctrl_get_function_name,
+ .get_function_groups = rtd_pinctrl_get_function_groups,
+ .set_mux = rtd_pinctrl_set_mux,
+ .gpio_request_enable = rtd_pinctrl_gpio_request_enable,
+};
+
+static const struct pinctrl_pin_desc
+ *rtd_pinctrl_get_pin_by_number(struct rtd_pinctrl *data, int number)
+{
+ int i;
+
+ for (i = 0; i < data->info->num_pins; i++) {
+ if (data->info->pins[i].number == number)
+ return &data->info->pins[i];
+ }
+
+ return NULL;
+}
+
+static const struct rtd_pin_config_desc
+ *rtd_pinctrl_find_config(struct rtd_pinctrl *data, unsigned int pin)
+{
+ if (!data->info->configs[pin].name)
+ return &data->info->configs[pin];
+
+ return NULL;
+}
+
+static const struct rtd_pin_sconfig_desc *rtd_pinctrl_find_sconfig(struct rtd_pinctrl *data,
+ unsigned int pin)
+{
+ int i;
+ const struct pinctrl_pin_desc *pin_desc;
+ const char *pin_name;
+
+ pin_desc = rtd_pinctrl_get_pin_by_number(data, pin);
+ if (!pin_desc)
+ return NULL;
+
+ pin_name = pin_desc->name;
+
+ for (i = 0; i < data->info->num_sconfigs; i++) {
+ if (strcmp(data->info->sconfigs[i].name, pin_name) == 0)
+ return &data->info->sconfigs[i];
+ }
+
+ return NULL;
+}
+
+static int rtd_pconf_parse_conf(struct rtd_pinctrl *data,
+ unsigned int pinnr,
+ enum pin_config_param param,
+ enum pin_config_param arg)
+{
+ const struct rtd_pin_config_desc *config_desc;
+ const struct rtd_pin_sconfig_desc *sconfig_desc;
+ u8 set_val = 0;
+ u16 strength;
+ u32 val;
+ u32 mask;
+ u32 pulsel_off, pulen_off, smt_off, curr_off, pow_off, reg_off, p_off, n_off;
+ const char *name = data->info->pins[pinnr].name;
+ int ret = 0;
+
+ config_desc = rtd_pinctrl_find_config(data, pinnr);
+ if (!config_desc) {
+ dev_err(data->dev, "Not support pin config for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ switch ((u32)param) {
+ case PIN_CONFIG_INPUT_SCHMITT:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (config_desc->smt_offset == NA) {
+ dev_err(data->dev, "Not support input schmitt for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ smt_off = config_desc->base_bit + config_desc->smt_offset;
+ reg_off = config_desc->reg_offset;
+ set_val = arg;
+
+ mask = BIT(smt_off);
+ val = set_val ? BIT(smt_off) : 0;
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (config_desc->pud_en_offset == NA) {
+ dev_err(data->dev, "Not support push pull for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ pulen_off = config_desc->base_bit + config_desc->pud_en_offset;
+ reg_off = config_desc->reg_offset;
+
+ mask = BIT(pulen_off);
+ val = 0;
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (config_desc->pud_en_offset == NA) {
+ dev_err(data->dev, "Not support bias disable for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ pulen_off = config_desc->base_bit + config_desc->pud_en_offset;
+ reg_off = config_desc->reg_offset;
+
+ mask = BIT(pulen_off);
+ val = 0;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (config_desc->pud_en_offset == NA) {
+ dev_err(data->dev, "Not support bias pull up for pin:%s\n", name);
+ return -ENOTSUPP;
+ }
+ pulen_off = config_desc->base_bit + config_desc->pud_en_offset;
+ pulsel_off = config_desc->base_bit + config_desc->pud_sel_offset;
+ reg_off = config_desc->reg_offset;
+
+ mask = BIT(pulen_off) | BIT(pulsel_off);
+ val = mask;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (config_desc->pud_en_offset == NA) {
+ dev_err(data->dev, "Not support bias pull down for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ pulen_off = config_desc->base_bit + config_desc->pud_en_offset;
+ pulsel_off = config_desc->base_bit + config_desc->pud_sel_offset;
+ reg_off = config_desc->reg_offset;
+
+ mask = BIT(pulen_off) | BIT(pulsel_off);
+ val = BIT(pulen_off);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ curr_off = config_desc->base_bit + config_desc->curr_offset;
+ reg_off = config_desc->reg_offset;
+ strength = arg;
+ val = 0;
+ switch (config_desc->curr_type) {
+ case PADDRI_4_8:
+ if (strength == 4)
+ val = 0;
+ else if (strength == 8)
+ val = BIT(curr_off);
+ else
+ return -EINVAL;
+ break;
+ case PADDRI_2_4:
+ if (strength == 2)
+ val = 0;
+ else if (strength == 4)
+ val = BIT(curr_off);
+ else
+ return -EINVAL;
+ break;
+ case NA:
+ dev_err(data->dev, "Not support drive strength for pin: %s\n", name);
+ return -ENOTSUPP;
+ default:
+ return -EINVAL;
+ }
+ mask = BIT(curr_off);
+ break;
+
+ case PIN_CONFIG_POWER_SOURCE:
+ if (config_desc->power_offset == NA) {
+ dev_err(data->dev, "Not support power source for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ reg_off = config_desc->reg_offset;
+ pow_off = config_desc->base_bit + config_desc->power_offset;
+ if (pow_off >= 32) {
+ reg_off += 0x4;
+ pow_off -= 32;
+ }
+ set_val = arg;
+ mask = BIT(pow_off);
+ val = set_val ? mask : 0;
+ break;
+
+ case RTD_DRIVE_STRENGH_P:
+ sconfig_desc = rtd_pinctrl_find_sconfig(data, pinnr);
+ if (!sconfig_desc) {
+ dev_err(data->dev, "Not support P driving for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ set_val = arg;
+ reg_off = sconfig_desc->reg_offset;
+ p_off = sconfig_desc->pdrive_offset;
+ if (p_off >= 32) {
+ reg_off += 0x4;
+ p_off -= 32;
+ }
+ mask = GENMASK(p_off + sconfig_desc->pdrive_maskbits - 1, p_off);
+ val = set_val << p_off;
+ break;
+
+ case RTD_DRIVE_STRENGH_N:
+ sconfig_desc = rtd_pinctrl_find_sconfig(data, pinnr);
+ if (!sconfig_desc) {
+ dev_err(data->dev, "Not support N driving for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ set_val = arg;
+ reg_off = sconfig_desc->reg_offset;
+ n_off = sconfig_desc->ndrive_offset;
+ if (n_off >= 32) {
+ reg_off += 0x4;
+ n_off -= 32;
+ }
+ mask = GENMASK(n_off + sconfig_desc->ndrive_maskbits - 1, n_off);
+ val = set_val << n_off;
+ break;
+
+ case RTD_DUTY_CYCLE:
+ sconfig_desc = rtd_pinctrl_find_sconfig(data, pinnr);
+ if (!sconfig_desc || sconfig_desc->dcycle_offset == NA) {
+ dev_err(data->dev, "Not support duty cycle for pin: %s\n", name);
+ return -ENOTSUPP;
+ }
+ set_val = arg;
+ reg_off = config_desc->reg_offset;
+ mask = GENMASK(sconfig_desc->dcycle_offset +
+ sconfig_desc->dcycle_maskbits - 1, sconfig_desc->dcycle_offset);
+ val = set_val << sconfig_desc->dcycle_offset;
+ break;
+
+ default:
+ dev_err(data->dev, "unsupported pinconf: %d\n", (u32)param);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap_pinctrl, reg_off, mask, val);
+ if (ret)
+ dev_err(data->dev, "could not update pinconf(%d) for pin(%s)\n", (u32)param, name);
+
+ return ret;
+}
+
+static int rtd_pin_config_get(struct pinctrl_dev *pcdev, unsigned int pinnr,
+ unsigned long *config)
+{
+ unsigned int param = pinconf_to_config_param(*config);
+ unsigned int arg = 0;
+
+ switch (param) {
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int rtd_pin_config_set(struct pinctrl_dev *pcdev, unsigned int pinnr,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = rtd_pconf_parse_conf(data, pinnr,
+ pinconf_to_config_param(configs[i]),
+ pinconf_to_config_argument(configs[i]));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtd_pin_config_group_set(struct pinctrl_dev *pcdev, unsigned int group,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct rtd_pinctrl *data = pinctrl_dev_get_drvdata(pcdev);
+ const unsigned int *pins;
+ unsigned int num_pins;
+ const char *group_name;
+ int i, ret;
+
+ group_name = data->info->groups[group].name;
+
+ ret = rtd_pinctrl_get_group_pins(pcdev, group, &pins, &num_pins);
+ if (ret) {
+ dev_err(pcdev->dev, "Getting pins for group %s failed\n", group_name);
+ return ret;
+ }
+
+ for (i = 0; i < num_pins; i++) {
+ ret = rtd_pin_config_set(pcdev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops rtd_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = rtd_pin_config_get,
+ .pin_config_set = rtd_pin_config_set,
+ .pin_config_group_set = rtd_pin_config_group_set,
+};
+
+static struct regmap_config rtd_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .use_relaxed_mmio = true,
+};
+
+int rtd_pinctrl_probe(struct platform_device *pdev, const struct rtd_pinctrl_desc *desc)
+{
+ struct rtd_pinctrl *data;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->base = of_iomap(pdev->dev.of_node, 0);
+ if (!data->base)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->info = desc;
+ data->desc.name = dev_name(&pdev->dev);
+ data->desc.pins = data->info->pins;
+ data->desc.npins = data->info->num_pins;
+ data->desc.pctlops = &rtd_pinctrl_ops;
+ data->desc.pmxops = &rtd_pinmux_ops;
+ data->desc.confops = &rtd_pinconf_ops;
+ data->desc.custom_params = rtd_custom_bindings;
+ data->desc.num_custom_params = ARRAY_SIZE(rtd_custom_bindings);
+ data->desc.owner = THIS_MODULE;
+ data->regmap_pinctrl = devm_regmap_init_mmio(data->dev, data->base,
+ &rtd_pinctrl_regmap_config);
+
+ if (IS_ERR(data->regmap_pinctrl)) {
+ dev_err(data->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(data->regmap_pinctrl));
+ ret = PTR_ERR(data->regmap_pinctrl);
+ goto unmap;
+ }
+
+ data->pcdev = pinctrl_register(&data->desc, &pdev->dev, data);
+ if (IS_ERR(data->pcdev)) {
+ ret = PTR_ERR(data->pcdev);
+ goto unmap;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ dev_dbg(&pdev->dev, "probed\n");
+
+ return 0;
+
+unmap:
+ iounmap(data->base);
+ return ret;
+}
+EXPORT_SYMBOL(rtd_pinctrl_probe);
+
+MODULE_DESCRIPTION("Realtek DHC SoC pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.h b/drivers/pinctrl/realtek/pinctrl-rtd.h
new file mode 100644
index 000000000000..e15130896abc
--- /dev/null
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ */
+
+#define NA 0xffffffff
+#define PADDRI_4_8 1
+#define PADDRI_2_4 0
+
+struct rtd_pin_group_desc {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int num_pins;
+};
+
+struct rtd_pin_func_desc {
+ const char *name;
+ const char * const *groups;
+ unsigned int num_groups;
+};
+
+struct rtd_pin_mux_desc {
+ const char *name;
+ u32 mux_value;
+};
+
+struct rtd_pin_config_desc {
+ const char *name;
+ unsigned int reg_offset;
+ unsigned int base_bit;
+ unsigned int pud_en_offset;
+ unsigned int pud_sel_offset;
+ unsigned int curr_offset;
+ unsigned int smt_offset;
+ unsigned int power_offset;
+ unsigned int curr_type;
+};
+
+struct rtd_pin_sconfig_desc {
+ const char *name;
+ unsigned int reg_offset;
+ unsigned int dcycle_offset;
+ unsigned int dcycle_maskbits;
+ unsigned int ndrive_offset;
+ unsigned int ndrive_maskbits;
+ unsigned int pdrive_offset;
+ unsigned int pdrive_maskbits;
+};
+
+struct rtd_pin_desc {
+ const char *name;
+ unsigned int mux_offset;
+ u32 mux_mask;
+ const struct rtd_pin_mux_desc *functions;
+};
+
+struct rtd_pin_reg_list {
+ unsigned int reg_offset;
+ unsigned int val;
+};
+
+#define SHIFT_LEFT(_val, _shift) ((_val) << (_shift))
+
+#define RTK_PIN_MUX(_name, _mux_off, _mux_mask, ...) \
+ { \
+ .name = # _name, \
+ .mux_offset = _mux_off, \
+ .mux_mask = _mux_mask, \
+ .functions = (const struct rtd_pin_mux_desc []) { \
+ __VA_ARGS__, { } \
+ }, \
+ }
+
+#define RTK_PIN_CONFIG(_name, _reg_off, _base_bit, _pud_en_off, \
+ _pud_sel_off, _curr_off, _smt_off, _pow_off, _curr_type) \
+ { \
+ .name = # _name, \
+ .reg_offset = _reg_off, \
+ .base_bit = _base_bit, \
+ .pud_en_offset = _pud_en_off, \
+ .pud_sel_offset = _pud_sel_off, \
+ .curr_offset = _curr_off, \
+ .smt_offset = _smt_off, \
+ .power_offset = _pow_off, \
+ .curr_type = _curr_type, \
+ }
+
+#define RTK_PIN_SCONFIG(_name, _reg_off, _d_offset, _d_mask, \
+ _n_offset, _n_mask, _p_offset, _p_mask) \
+ { \
+ .name = # _name, \
+ .reg_offset = _reg_off, \
+ .dcycle_offset = _d_offset, \
+ .dcycle_maskbits = _d_mask, \
+ .ndrive_offset = _n_offset, \
+ .ndrive_maskbits = _n_mask, \
+ .pdrive_offset = _p_offset, \
+ .pdrive_maskbits = _p_mask, \
+ }
+
+#define RTK_PIN_FUNC(_mux_val, _name) \
+ { \
+ .name = _name, \
+ .mux_value = _mux_val, \
+ }
+
+struct rtd_pinctrl_desc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int num_pins;
+ const struct rtd_pin_group_desc *groups;
+ unsigned int num_groups;
+ const struct rtd_pin_func_desc *functions;
+ unsigned int num_functions;
+ const struct rtd_pin_desc *muxes;
+ unsigned int num_muxes;
+ const struct rtd_pin_config_desc *configs;
+ unsigned int num_configs;
+ const struct rtd_pin_sconfig_desc *sconfigs;
+ unsigned int num_sconfigs;
+ struct rtd_pin_reg_list *lists;
+ unsigned int num_regs;
+};
+
+int rtd_pinctrl_probe(struct platform_device *pdev, const struct rtd_pinctrl_desc *desc);
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd1315e.c b/drivers/pinctrl/realtek/pinctrl-rtd1315e.c
new file mode 100644
index 000000000000..10afc736a52b
--- /dev/null
+++ b/drivers/pinctrl/realtek/pinctrl-rtd1315e.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek DHC 1315E pin controller driver
+ *
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-rtd.h"
+
+enum rtd13xxe_iso_pins {
+ RTD1315E_ISO_GPIO_0 = 0,
+ RTD1315E_ISO_GPIO_1,
+ RTD1315E_ISO_EMMC_RST_N,
+ RTD1315E_ISO_EMMC_DD_SB,
+ RTD1315E_ISO_EMMC_CLK,
+ RTD1315E_ISO_EMMC_CMD,
+ RTD1315E_ISO_GPIO_6,
+ RTD1315E_ISO_GPIO_7,
+ RTD1315E_ISO_GPIO_8,
+ RTD1315E_ISO_GPIO_9,
+ RTD1315E_ISO_GPIO_10,
+ RTD1315E_ISO_GPIO_11,
+ RTD1315E_ISO_GPIO_12,
+ RTD1315E_ISO_GPIO_13,
+ RTD1315E_ISO_GPIO_14,
+ RTD1315E_ISO_GPIO_15,
+ RTD1315E_ISO_GPIO_16,
+ RTD1315E_ISO_GPIO_17,
+ RTD1315E_ISO_GPIO_18,
+ RTD1315E_ISO_GPIO_19,
+ RTD1315E_ISO_GPIO_20,
+ RTD1315E_ISO_EMMC_DATA_0,
+ RTD1315E_ISO_EMMC_DATA_1,
+ RTD1315E_ISO_EMMC_DATA_2,
+ RTD1315E_ISO_USB_CC2,
+ RTD1315E_ISO_GPIO_25,
+ RTD1315E_ISO_GPIO_26,
+ RTD1315E_ISO_GPIO_27,
+ RTD1315E_ISO_GPIO_28,
+ RTD1315E_ISO_GPIO_29,
+ RTD1315E_ISO_GPIO_30,
+ RTD1315E_ISO_GPIO_31,
+ RTD1315E_ISO_GPIO_32,
+ RTD1315E_ISO_GPIO_33,
+ RTD1315E_ISO_GPIO_34,
+ RTD1315E_ISO_GPIO_35,
+ RTD1315E_ISO_HIF_DATA,
+ RTD1315E_ISO_HIF_EN,
+ RTD1315E_ISO_HIF_RDY,
+ RTD1315E_ISO_HIF_CLK,
+ RTD1315E_ISO_GPIO_DUMMY_40,
+ RTD1315E_ISO_GPIO_DUMMY_41,
+ RTD1315E_ISO_GPIO_DUMMY_42,
+ RTD1315E_ISO_GPIO_DUMMY_43,
+ RTD1315E_ISO_GPIO_DUMMY_44,
+ RTD1315E_ISO_GPIO_DUMMY_45,
+ RTD1315E_ISO_GPIO_46,
+ RTD1315E_ISO_GPIO_47,
+ RTD1315E_ISO_GPIO_48,
+ RTD1315E_ISO_GPIO_49,
+ RTD1315E_ISO_GPIO_50,
+ RTD1315E_ISO_USB_CC1,
+ RTD1315E_ISO_EMMC_DATA_3,
+ RTD1315E_ISO_EMMC_DATA_4,
+ RTD1315E_ISO_IR_RX,
+ RTD1315E_ISO_UR0_RX,
+ RTD1315E_ISO_UR0_TX,
+ RTD1315E_ISO_GPIO_57,
+ RTD1315E_ISO_GPIO_58,
+ RTD1315E_ISO_GPIO_59,
+ RTD1315E_ISO_GPIO_60,
+ RTD1315E_ISO_GPIO_61,
+ RTD1315E_ISO_GPIO_62,
+ RTD1315E_ISO_GPIO_DUMMY_63,
+ RTD1315E_ISO_GPIO_DUMMY_64,
+ RTD1315E_ISO_GPIO_DUMMY_65,
+ RTD1315E_ISO_GPIO_66,
+ RTD1315E_ISO_GPIO_67,
+ RTD1315E_ISO_GPIO_68,
+ RTD1315E_ISO_GPIO_69,
+ RTD1315E_ISO_GPIO_70,
+ RTD1315E_ISO_GPIO_71,
+ RTD1315E_ISO_GPIO_72,
+ RTD1315E_ISO_GPIO_DUMMY_73,
+ RTD1315E_ISO_EMMC_DATA_5,
+ RTD1315E_ISO_EMMC_DATA_6,
+ RTD1315E_ISO_EMMC_DATA_7,
+ RTD1315E_ISO_GPIO_DUMMY_77,
+ RTD1315E_ISO_GPIO_78,
+ RTD1315E_ISO_GPIO_79,
+ RTD1315E_ISO_GPIO_80,
+ RTD1315E_ISO_GPIO_81,
+ RTD1315E_ISO_UR2_LOC,
+ RTD1315E_ISO_GSPI_LOC,
+ RTD1315E_ISO_HI_WIDTH,
+ RTD1315E_ISO_SF_EN,
+ RTD1315E_ISO_ARM_TRACE_DBG_EN,
+ RTD1315E_ISO_EJTAG_AUCPU_LOC,
+ RTD1315E_ISO_EJTAG_ACPU_LOC,
+ RTD1315E_ISO_EJTAG_VCPU_LOC,
+ RTD1315E_ISO_EJTAG_SCPU_LOC,
+ RTD1315E_ISO_DMIC_LOC,
+ RTD1315E_ISO_VTC_DMIC_LOC,
+ RTD1315E_ISO_VTC_TDM_LOC,
+ RTD1315E_ISO_VTC_I2SI_LOC,
+ RTD1315E_ISO_TDM_AI_LOC,
+ RTD1315E_ISO_AI_LOC,
+ RTD1315E_ISO_SPDIF_LOC,
+ RTD1315E_ISO_HIF_EN_LOC,
+ RTD1315E_ISO_SCAN_SWITCH,
+ RTD1315E_ISO_WD_RSET,
+ RTD1315E_ISO_BOOT_SEL,
+ RTD1315E_ISO_RESET_N,
+ RTD1315E_ISO_TESTMODE,
+};
+
+static const struct pinctrl_pin_desc rtd1315e_iso_pins[] = {
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_0, "gpio_0"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_1, "gpio_1"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_RST_N, "emmc_rst_n"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DD_SB, "emmc_dd_sb"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_CLK, "emmc_clk"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_CMD, "emmc_cmd"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_6, "gpio_6"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_7, "gpio_7"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_8, "gpio_8"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_9, "gpio_9"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_10, "gpio_10"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_11, "gpio_11"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_12, "gpio_12"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_13, "gpio_13"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_14, "gpio_14"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_15, "gpio_15"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_16, "gpio_16"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_17, "gpio_17"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_18, "gpio_18"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_19, "gpio_19"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_20, "gpio_20"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_0, "emmc_data_0"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_1, "emmc_data_1"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_2, "emmc_data_2"),
+ PINCTRL_PIN(RTD1315E_ISO_USB_CC2, "usb_cc2"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_25, "gpio_25"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_26, "gpio_26"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_27, "gpio_27"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_28, "gpio_28"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_29, "gpio_29"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_30, "gpio_30"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_31, "gpio_31"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_32, "gpio_32"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_33, "gpio_33"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_34, "gpio_34"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_35, "gpio_35"),
+ PINCTRL_PIN(RTD1315E_ISO_HIF_DATA, "hif_data"),
+ PINCTRL_PIN(RTD1315E_ISO_HIF_EN, "hif_en"),
+ PINCTRL_PIN(RTD1315E_ISO_HIF_RDY, "hif_rdy"),
+ PINCTRL_PIN(RTD1315E_ISO_HIF_CLK, "hif_clk"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_40, "gpio_dummy_40"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_41, "gpio_dummy_41"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_42, "gpio_dummy_42"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_43, "gpio_dummy_43"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_44, "gpio_dummy_44"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_45, "gpio_dummy_45"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_46, "gpio_46"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_47, "gpio_47"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_48, "gpio_48"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_49, "gpio_49"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_50, "gpio_50"),
+ PINCTRL_PIN(RTD1315E_ISO_USB_CC1, "usb_cc1"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_3, "emmc_data_3"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_4, "emmc_data_4"),
+ PINCTRL_PIN(RTD1315E_ISO_IR_RX, "ir_rx"),
+ PINCTRL_PIN(RTD1315E_ISO_UR0_RX, "ur0_rx"),
+ PINCTRL_PIN(RTD1315E_ISO_UR0_TX, "ur0_tx"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_57, "gpio_57"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_58, "gpio_58"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_59, "gpio_59"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_60, "gpio_60"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_61, "gpio_61"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_62, "gpio_62"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_63, "gpio_dummy_63"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_64, "gpio_dummy_64"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_65, "gpio_dummy_65"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_66, "gpio_66"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_67, "gpio_67"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_68, "gpio_68"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_69, "gpio_69"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_70, "gpio_70"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_71, "gpio_71"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_72, "gpio_72"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_73, "gpio_dummy_73"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_5, "emmc_data_5"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_6, "emmc_data_6"),
+ PINCTRL_PIN(RTD1315E_ISO_EMMC_DATA_7, "emmc_data_7"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_DUMMY_77, "gpio_dummy_77"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_78, "gpio_78"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_79, "gpio_79"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_80, "gpio_80"),
+ PINCTRL_PIN(RTD1315E_ISO_GPIO_81, "gpio_81"),
+ PINCTRL_PIN(RTD1315E_ISO_UR2_LOC, "ur2_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_GSPI_LOC, "gspi_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_HI_WIDTH, "hi_width"),
+ PINCTRL_PIN(RTD1315E_ISO_SF_EN, "sf_en"),
+ PINCTRL_PIN(RTD1315E_ISO_ARM_TRACE_DBG_EN, "arm_trace_dbg_en"),
+ PINCTRL_PIN(RTD1315E_ISO_EJTAG_AUCPU_LOC, "ejtag_aucpu_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_EJTAG_ACPU_LOC, "ejtag_acpu_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_EJTAG_VCPU_LOC, "ejtag_vcpu_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_EJTAG_SCPU_LOC, "ejtag_scpu_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_DMIC_LOC, "dmic_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_VTC_DMIC_LOC, "vtc_dmic_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_VTC_TDM_LOC, "vtc_tdm_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_VTC_I2SI_LOC, "vtc_i2si_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_TDM_AI_LOC, "tdm_ai_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_AI_LOC, "ai_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_SPDIF_LOC, "spdif_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_HIF_EN_LOC, "hif_en_loc"),
+ PINCTRL_PIN(RTD1315E_ISO_SCAN_SWITCH, "scan_switch"),
+ PINCTRL_PIN(RTD1315E_ISO_WD_RSET, "wd_rset"),
+ PINCTRL_PIN(RTD1315E_ISO_BOOT_SEL, "boot_sel"),
+ PINCTRL_PIN(RTD1315E_ISO_RESET_N, "reset_n"),
+ PINCTRL_PIN(RTD1315E_ISO_TESTMODE, "testmode"),
+};
+
+/* Tagged as __maybe_unused since there are pins we may use in the future */
+#define DECLARE_RTD1315E_PIN(_pin, _name) \
+ static const unsigned int rtd1315e_## _name ##_pins[] __maybe_unused = { _pin }
+
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_0, gpio_0);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_1, gpio_1);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_RST_N, emmc_rst_n);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DD_SB, emmc_dd_sb);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_CLK, emmc_clk);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_CMD, emmc_cmd);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_6, gpio_6);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_7, gpio_7);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_8, gpio_8);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_9, gpio_9);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_10, gpio_10);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_11, gpio_11);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_12, gpio_12);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_13, gpio_13);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_14, gpio_14);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_15, gpio_15);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_16, gpio_16);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_17, gpio_17);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_18, gpio_18);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_19, gpio_19);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_20, gpio_20);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_0, emmc_data_0);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_1, emmc_data_1);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_2, emmc_data_2);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_USB_CC2, usb_cc2);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_25, gpio_25);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_26, gpio_26);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_27, gpio_27);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_28, gpio_28);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_29, gpio_29);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_30, gpio_30);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_31, gpio_31);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_32, gpio_32);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_33, gpio_33);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_34, gpio_34);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_35, gpio_35);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HIF_DATA, hif_data);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HIF_EN, hif_en);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HIF_RDY, hif_rdy);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HIF_CLK, hif_clk);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_40, gpio_dummy_40);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_41, gpio_dummy_41);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_42, gpio_dummy_42);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_43, gpio_dummy_43);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_44, gpio_dummy_44);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_45, gpio_dummy_45);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_46, gpio_46);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_47, gpio_47);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_48, gpio_48);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_49, gpio_49);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_50, gpio_50);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_USB_CC1, usb_cc1);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_3, emmc_data_3);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_4, emmc_data_4);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_IR_RX, ir_rx);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_UR0_RX, ur0_rx);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_UR0_TX, ur0_tx);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_57, gpio_57);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_58, gpio_58);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_59, gpio_59);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_60, gpio_60);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_61, gpio_61);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_62, gpio_62);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_63, gpio_dummy_63);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_64, gpio_dummy_64);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_65, gpio_dummy_65);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_66, gpio_66);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_67, gpio_67);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_68, gpio_68);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_69, gpio_69);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_70, gpio_70);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_71, gpio_71);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_72, gpio_72);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_73, gpio_dummy_73);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_5, emmc_data_5);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_6, emmc_data_6);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EMMC_DATA_7, emmc_data_7);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_DUMMY_77, gpio_dummy_77);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_78, gpio_78);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_79, gpio_79);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_80, gpio_80);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GPIO_81, gpio_81);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_UR2_LOC, ur2_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_GSPI_LOC, gspi_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HI_WIDTH, hi_width);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_SF_EN, sf_en);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_ARM_TRACE_DBG_EN, arm_trace_dbg_en);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EJTAG_AUCPU_LOC, ejtag_aucpu_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EJTAG_ACPU_LOC, ejtag_acpu_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EJTAG_VCPU_LOC, ejtag_vcpu_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_EJTAG_SCPU_LOC, ejtag_scpu_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_DMIC_LOC, dmic_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_VTC_DMIC_LOC, vtc_dmic_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_VTC_TDM_LOC, vtc_tdm_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_VTC_I2SI_LOC, vtc_i2si_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_TDM_AI_LOC, tdm_ai_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_AI_LOC, ai_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_SPDIF_LOC, spdif_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_HIF_EN_LOC, hif_en_loc);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_SCAN_SWITCH, scan_switch);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_WD_RSET, wd_rset);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_BOOT_SEL, boot_sel);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_RESET_N, reset_n);
+DECLARE_RTD1315E_PIN(RTD1315E_ISO_TESTMODE, testmode);
+
+#define RTD1315E_GROUP(_name) \
+ { \
+ .name = # _name, \
+ .pins = rtd1315e_ ## _name ## _pins, \
+ .num_pins = ARRAY_SIZE(rtd1315e_ ## _name ## _pins), \
+ }
+
+static const struct rtd_pin_group_desc rtd1315e_pin_groups[] = {
+ RTD1315E_GROUP(gpio_0),
+ RTD1315E_GROUP(gpio_1),
+ RTD1315E_GROUP(emmc_rst_n),
+ RTD1315E_GROUP(emmc_dd_sb),
+ RTD1315E_GROUP(emmc_clk),
+ RTD1315E_GROUP(emmc_cmd),
+ RTD1315E_GROUP(gpio_6),
+ RTD1315E_GROUP(gpio_7),
+ RTD1315E_GROUP(gpio_8),
+ RTD1315E_GROUP(gpio_9),
+ RTD1315E_GROUP(gpio_10),
+ RTD1315E_GROUP(gpio_11),
+ RTD1315E_GROUP(gpio_12),
+ RTD1315E_GROUP(gpio_13),
+ RTD1315E_GROUP(gpio_14),
+ RTD1315E_GROUP(gpio_15),
+ RTD1315E_GROUP(gpio_16),
+ RTD1315E_GROUP(gpio_17),
+ RTD1315E_GROUP(gpio_18),
+ RTD1315E_GROUP(gpio_19),
+ RTD1315E_GROUP(gpio_20),
+ RTD1315E_GROUP(emmc_data_0),
+ RTD1315E_GROUP(emmc_data_1),
+ RTD1315E_GROUP(emmc_data_2),
+ RTD1315E_GROUP(usb_cc2),
+ RTD1315E_GROUP(gpio_25),
+ RTD1315E_GROUP(gpio_26),
+ RTD1315E_GROUP(gpio_27),
+ RTD1315E_GROUP(gpio_28),
+ RTD1315E_GROUP(gpio_29),
+ RTD1315E_GROUP(gpio_30),
+ RTD1315E_GROUP(gpio_31),
+ RTD1315E_GROUP(gpio_32),
+ RTD1315E_GROUP(gpio_33),
+ RTD1315E_GROUP(gpio_34),
+ RTD1315E_GROUP(gpio_35),
+ RTD1315E_GROUP(hif_data),
+ RTD1315E_GROUP(hif_en),
+ RTD1315E_GROUP(hif_rdy),
+ RTD1315E_GROUP(hif_clk),
+ RTD1315E_GROUP(gpio_dummy_40),
+ RTD1315E_GROUP(gpio_dummy_41),
+ RTD1315E_GROUP(gpio_dummy_42),
+ RTD1315E_GROUP(gpio_dummy_43),
+ RTD1315E_GROUP(gpio_dummy_44),
+ RTD1315E_GROUP(gpio_dummy_45),
+ RTD1315E_GROUP(gpio_46),
+ RTD1315E_GROUP(gpio_47),
+ RTD1315E_GROUP(gpio_48),
+ RTD1315E_GROUP(gpio_49),
+ RTD1315E_GROUP(gpio_50),
+ RTD1315E_GROUP(usb_cc1),
+ RTD1315E_GROUP(emmc_data_3),
+ RTD1315E_GROUP(emmc_data_4),
+ RTD1315E_GROUP(ir_rx),
+ RTD1315E_GROUP(ur0_rx),
+ RTD1315E_GROUP(ur0_tx),
+ RTD1315E_GROUP(gpio_57),
+ RTD1315E_GROUP(gpio_58),
+ RTD1315E_GROUP(gpio_59),
+ RTD1315E_GROUP(gpio_60),
+ RTD1315E_GROUP(gpio_61),
+ RTD1315E_GROUP(gpio_62),
+ RTD1315E_GROUP(gpio_dummy_63),
+ RTD1315E_GROUP(gpio_dummy_64),
+ RTD1315E_GROUP(gpio_dummy_65),
+ RTD1315E_GROUP(gpio_66),
+ RTD1315E_GROUP(gpio_67),
+ RTD1315E_GROUP(gpio_68),
+ RTD1315E_GROUP(gpio_69),
+ RTD1315E_GROUP(gpio_70),
+ RTD1315E_GROUP(gpio_71),
+ RTD1315E_GROUP(gpio_72),
+ RTD1315E_GROUP(gpio_dummy_73),
+ RTD1315E_GROUP(emmc_data_5),
+ RTD1315E_GROUP(emmc_data_6),
+ RTD1315E_GROUP(emmc_data_7),
+ RTD1315E_GROUP(gpio_dummy_77),
+ RTD1315E_GROUP(gpio_78),
+ RTD1315E_GROUP(gpio_79),
+ RTD1315E_GROUP(gpio_80),
+ RTD1315E_GROUP(gpio_81),
+ RTD1315E_GROUP(ur2_loc),
+ RTD1315E_GROUP(gspi_loc),
+ RTD1315E_GROUP(hi_width),
+ RTD1315E_GROUP(sf_en),
+ RTD1315E_GROUP(arm_trace_dbg_en),
+ RTD1315E_GROUP(ejtag_aucpu_loc),
+ RTD1315E_GROUP(ejtag_acpu_loc),
+ RTD1315E_GROUP(ejtag_vcpu_loc),
+ RTD1315E_GROUP(ejtag_scpu_loc),
+ RTD1315E_GROUP(dmic_loc),
+ RTD1315E_GROUP(vtc_dmic_loc),
+ RTD1315E_GROUP(vtc_tdm_loc),
+ RTD1315E_GROUP(vtc_i2si_loc),
+ RTD1315E_GROUP(tdm_ai_loc),
+ RTD1315E_GROUP(ai_loc),
+ RTD1315E_GROUP(spdif_loc),
+ RTD1315E_GROUP(hif_en_loc),
+
+};
+
+static const char * const rtd1315e_gpio_groups[] = {
+ "gpio_0", "gpio_1", "emmc_rst_n", "emmc_dd_sb", "emmc_clk",
+ "emmc_cmd", "gpio_6", "gpio_7", "gpio_8", "gpio_9",
+ "gpio_10", "gpio_11", "gpio_12", "gpio_13", "gpio_14",
+ "gpio_15", "gpio_16", "gpio_17", "gpio_18", "gpio_19",
+ "gpio_20", "emmc_data_0", "emmc_data_1", "emmc_data_2", "usb_cc2",
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "gpio_29",
+ "gpio_30", "gpio_31", "gpio_32", "gpio_33", "gpio_34",
+ "gpio_35", "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "gpio_46", "gpio_47", "gpio_48", "gpio_49",
+ "gpio_50", "usb_cc1", "emmc_data_3", "emmc_data_4", "ir_rx",
+ "ur0_rx", "ur0_tx", "gpio_57", "gpio_58", "gpio_59",
+ "gpio_60", "gpio_61", "gpio_62", "gpio_66", "gpio_67",
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72",
+ "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1315e_nf_groups[] = {
+ "emmc_rst_n", "emmc_clk", "emmc_cmd", "emmc_data_0",
+ "emmc_data_1", "emmc_data_2", "emmc_data_3", "emmc_data_4",
+ "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1315e_emmc_groups[] = {
+ "emmc_rst_n", "emmc_dd_sb", "emmc_clk", "emmc_cmd",
+ "emmc_data_0", "emmc_data_1", "emmc_data_2", "emmc_data_3",
+ "emmc_data_4", "emmc_data_5", "emmc_data_6", "emmc_data_7" };
+
+static const char * const rtd1315e_ao_groups[] = {
+ "gpio_66", "gpio_67", "gpio_68", "gpio_69", "gpio_70",
+ "gpio_71", "gpio_72" };
+static const char * const rtd1315e_gspi_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "gspi_loc" };
+static const char * const rtd1315e_gspi_loc1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11", "gspi_loc" };
+static const char * const rtd1315e_uart0_groups[] = { "ur0_rx", "ur0_tx"};
+static const char * const rtd1315e_uart1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11" };
+static const char * const rtd1315e_uart2_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "ur2_loc" };
+static const char * const rtd1315e_uart2_loc1_groups[] = {
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "ur2_loc" };
+static const char * const rtd1315e_i2c0_groups[] = { "gpio_12", "gpio_13" };
+static const char * const rtd1315e_i2c1_groups[] = { "gpio_16", "gpio_17" };
+static const char * const rtd1315e_i2c4_groups[] = { "gpio_34", "gpio_35" };
+static const char * const rtd1315e_i2c5_groups[] = { "gpio_29", "gpio_46" };
+static const char * const rtd1315e_pcie1_groups[] = { "gpio_25" };
+static const char * const rtd1315e_etn_led_groups[] = { "gpio_14", "gpio_15" };
+static const char * const rtd1315e_etn_phy_groups[] = { "gpio_14", "gpio_15" };
+static const char * const rtd1315e_spi_groups[] = {
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1315e_pwm0_loc0_groups[] = { "gpio_26" };
+static const char * const rtd1315e_pwm0_loc1_groups[] = { "gpio_20" };
+static const char * const rtd1315e_pwm1_loc0_groups[] = { "gpio_27" };
+static const char * const rtd1315e_pwm1_loc1_groups[] = { "gpio_29" };
+
+static const char * const rtd1315e_pwm2_loc0_groups[] = { "gpio_28" };
+static const char * const rtd1315e_pwm2_loc1_groups[] = { "gpio_30" };
+static const char * const rtd1315e_pwm3_loc0_groups[] = { "gpio_47" };
+static const char * const rtd1315e_pwm3_loc1_groups[] = { "gpio_31" };
+static const char * const rtd1315e_spdif_optical_loc0_groups[] = { "gpio_20", "spdif_loc" };
+static const char * const rtd1315e_spdif_optical_loc1_groups[] = { "gpio_6", "spdif_loc" };
+static const char * const rtd1315e_usb_cc1_groups[] = { "usb_cc1" };
+static const char * const rtd1315e_usb_cc2_groups[] = { "usb_cc2" };
+
+static const char * const rtd1315e_sd_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35",
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1315e_dmic_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_1", "gpio_6", "dmic_loc" };
+static const char * const rtd1315e_dmic_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35",
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "dmic_loc" };
+static const char * const rtd1315e_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_1", "ai_loc" };
+static const char * const rtd1315e_ai_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk", "ai_loc" };
+static const char * const rtd1315e_tdm_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59",
+ "gpio_60", "tdm_ai_loc" };
+static const char * const rtd1315e_tdm_ai_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk", "tdm_ai_loc" };
+static const char * const rtd1315e_hi_loc0_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1315e_hi_m_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1315e_vtc_i2so_groups[] = {
+ "gpio_67", "gpio_68", "gpio_69", "gpio_70"};
+static const char * const rtd1315e_vtc_i2si_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "vtc_i2si_loc" };
+static const char * const rtd1315e_vtc_i2si_loc1_groups[] = {
+ "gpio_32", "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_i2si_loc" };
+static const char * const rtd1315e_vtc_dmic_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60",
+ "vtc_dmic_loc" };
+static const char * const rtd1315e_vtc_dmic_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_dmic_loc" };
+static const char * const rtd1315e_vtc_tdm_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60",
+ "vtc_tdm_loc" };
+static const char * const rtd1315e_vtc_tdm_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_tdm_loc" };
+static const char * const rtd1315e_dc_fan_groups[] = { "gpio_47" };
+static const char * const rtd1315e_pll_test_loc0_groups[] = { "gpio_0", "gpio_1" };
+static const char * const rtd1315e_pll_test_loc1_groups[] = { "gpio_48", "gpio_49" };
+static const char * const rtd1315e_spdif_groups[] = { "gpio_50" };
+static const char * const rtd1315e_ir_rx_groups[] = { "ir_rx" };
+static const char * const rtd1315e_uart2_disable_groups[] = { "ur2_loc" };
+static const char * const rtd1315e_gspi_disable_groups[] = { "gspi_loc" };
+static const char * const rtd1315e_hi_width_disable_groups[] = { "hi_width" };
+static const char * const rtd1315e_hi_width_1bit_groups[] = { "hi_width" };
+static const char * const rtd1315e_sf_disable_groups[] = { "sf_en" };
+static const char * const rtd1315e_sf_enable_groups[] = { "sf_en" };
+static const char * const rtd1315e_scpu_ejtag_loc0_groups[] = {
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72",
+ "ejtag_scpu_loc" };
+static const char * const rtd1315e_scpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_scpu_loc" };
+static const char * const rtd1315e_scpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_scpu_loc" };
+static const char * const rtd1315e_scpu_ejtag_loc3_groups[] = {
+ "hif_data" };
+static const char * const rtd1315e_acpu_ejtag_loc0_groups[] = {
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72",
+ "ejtag_acpu_loc" };
+static const char * const rtd1315e_acpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_acpu_loc" };
+static const char * const rtd1315e_acpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_acpu_loc" };
+static const char * const rtd1315e_vcpu_ejtag_loc0_groups[] = {
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1315e_vcpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1315e_vcpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1315e_aucpu_ejtag_loc0_groups[] = {
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1315e_aucpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1315e_aucpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1315e_gpu_ejtag_groups[] = {
+ "gpio_68", "gpio_69", "gpio_70", "gpio_71", "gpio_72" };
+
+static const char * const rtd1315e_iso_tristate_groups[] = {
+ "emmc_rst_n", "emmc_dd_sb", "emmc_clk", "emmc_cmd",
+ "emmc_data_0", "emmc_data_1", "emmc_data_2", "emmc_data_3",
+ "emmc_data_4", "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_1", "gpio_7", "gpio_8", "gpio_9", "gpio_10",
+ "gpio_11", "usb_cc2", "gpio_32", "gpio_33", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk", "ir_rx", "ur0_rx",
+ "ur0_tx", "gpio_66", "gpio_67", "gpio_68", "gpio_69", "gpio_70",
+ "gpio_71", "gpio_72", "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1315e_dbg_out0_groups[] = {
+ "gpio_0", "gpio_12", "gpio_13", "gpio_16", "gpio_17", "gpio_26",
+ "gpio_27", "gpio_28", "gpio_29", "gpio_30", "gpio_34", "gpio_35",
+ "gpio_46", "gpio_48", "gpio_49", "usb_cc1", "gpio_57", "gpio_58", "gpio_59", "gpio_60" };
+static const char * const rtd1315e_dbg_out1_groups[] = {
+ "gpio_6", "gpio_14", "gpio_15", "gpio_18", "gpio_19", "gpio_20",
+ "gpio_25", "gpio_31", "gpio_47", "gpio_50", "gpio_59", "gpio_61",
+ "gpio_62" };
+static const char * const rtd1315e_standby_dbg_groups[] = {
+ "gpio_1", "gpio_6", "ir_rx" };
+static const char * const rtd1315e_arm_trace_debug_disable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1315e_arm_trace_debug_enable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1315e_aucpu_ejtag_disable_groups[] = { "ejtag_aucpu_loc" };
+static const char * const rtd1315e_acpu_ejtag_disable_groups[] = { "ejtag_acpu_loc" };
+static const char * const rtd1315e_vcpu_ejtag_disable_groups[] = { "ejtag_vcpu_loc" };
+static const char * const rtd1315e_scpu_ejtag_disable_groups[] = { "ejtag_scpu_loc" };
+static const char * const rtd1315e_vtc_dmic_loc_disable_groups[] = { "vtc_dmic_loc" };
+static const char * const rtd1315e_vtc_tdm_disable_groups[] = { "vtc_tdm_loc" };
+static const char * const rtd1315e_vtc_i2si_disable_groups[] = { "vtc_i2si_loc" };
+static const char * const rtd1315e_tdm_ai_disable_groups[] = { "tdm_ai_loc" };
+static const char * const rtd1315e_ai_disable_groups[] = { "ai_loc" };
+static const char * const rtd1315e_spdif_disable_groups[] = { "spdif_loc" };
+static const char * const rtd1315e_hif_disable_groups[] = { "hif_en_loc" };
+static const char * const rtd1315e_hif_enable_groups[] = { "hif_en_loc" };
+static const char * const rtd1315e_test_loop_groups[] = { "gpio_50" };
+static const char * const rtd1315e_pmic_pwrup_groups[] = { "gpio_78" };
+
+#define RTD1315E_FUNC(_name) \
+ { \
+ .name = # _name, \
+ .groups = rtd1315e_ ## _name ## _groups, \
+ .num_groups = ARRAY_SIZE(rtd1315e_ ## _name ## _groups), \
+ }
+
+static const struct rtd_pin_func_desc rtd1315e_pin_functions[] = {
+ RTD1315E_FUNC(gpio),
+ RTD1315E_FUNC(nf),
+ RTD1315E_FUNC(emmc),
+ RTD1315E_FUNC(ao),
+ RTD1315E_FUNC(gspi_loc0),
+ RTD1315E_FUNC(gspi_loc1),
+ RTD1315E_FUNC(uart0),
+ RTD1315E_FUNC(uart1),
+ RTD1315E_FUNC(uart2_loc0),
+ RTD1315E_FUNC(uart2_loc1),
+ RTD1315E_FUNC(i2c0),
+ RTD1315E_FUNC(i2c1),
+ RTD1315E_FUNC(i2c4),
+ RTD1315E_FUNC(i2c5),
+ RTD1315E_FUNC(pcie1),
+ RTD1315E_FUNC(etn_led),
+ RTD1315E_FUNC(etn_phy),
+ RTD1315E_FUNC(spi),
+ RTD1315E_FUNC(pwm0_loc0),
+ RTD1315E_FUNC(pwm0_loc1),
+ RTD1315E_FUNC(pwm1_loc0),
+ RTD1315E_FUNC(pwm1_loc1),
+ RTD1315E_FUNC(pwm2_loc0),
+ RTD1315E_FUNC(pwm2_loc1),
+ RTD1315E_FUNC(pwm3_loc0),
+ RTD1315E_FUNC(pwm3_loc1),
+ RTD1315E_FUNC(spdif_optical_loc0),
+ RTD1315E_FUNC(spdif_optical_loc1),
+ RTD1315E_FUNC(usb_cc1),
+ RTD1315E_FUNC(usb_cc2),
+ RTD1315E_FUNC(sd),
+ RTD1315E_FUNC(dmic_loc0),
+ RTD1315E_FUNC(dmic_loc1),
+ RTD1315E_FUNC(ai_loc0),
+ RTD1315E_FUNC(ai_loc1),
+ RTD1315E_FUNC(tdm_ai_loc0),
+ RTD1315E_FUNC(tdm_ai_loc1),
+ RTD1315E_FUNC(hi_loc0),
+ RTD1315E_FUNC(hi_m),
+ RTD1315E_FUNC(vtc_i2so),
+ RTD1315E_FUNC(vtc_i2si_loc0),
+ RTD1315E_FUNC(vtc_i2si_loc1),
+ RTD1315E_FUNC(vtc_dmic_loc0),
+ RTD1315E_FUNC(vtc_dmic_loc1),
+ RTD1315E_FUNC(vtc_tdm_loc0),
+ RTD1315E_FUNC(vtc_tdm_loc1),
+ RTD1315E_FUNC(dc_fan),
+ RTD1315E_FUNC(pll_test_loc0),
+ RTD1315E_FUNC(pll_test_loc1),
+ RTD1315E_FUNC(ir_rx),
+ RTD1315E_FUNC(uart2_disable),
+ RTD1315E_FUNC(gspi_disable),
+ RTD1315E_FUNC(hi_width_disable),
+ RTD1315E_FUNC(hi_width_1bit),
+ RTD1315E_FUNC(sf_disable),
+ RTD1315E_FUNC(sf_enable),
+ RTD1315E_FUNC(scpu_ejtag_loc0),
+ RTD1315E_FUNC(scpu_ejtag_loc1),
+ RTD1315E_FUNC(scpu_ejtag_loc2),
+ RTD1315E_FUNC(scpu_ejtag_loc3),
+ RTD1315E_FUNC(acpu_ejtag_loc0),
+ RTD1315E_FUNC(acpu_ejtag_loc1),
+ RTD1315E_FUNC(acpu_ejtag_loc2),
+ RTD1315E_FUNC(vcpu_ejtag_loc0),
+ RTD1315E_FUNC(vcpu_ejtag_loc1),
+ RTD1315E_FUNC(vcpu_ejtag_loc2),
+ RTD1315E_FUNC(aucpu_ejtag_loc0),
+ RTD1315E_FUNC(aucpu_ejtag_loc1),
+ RTD1315E_FUNC(aucpu_ejtag_loc2),
+ RTD1315E_FUNC(gpu_ejtag),
+ RTD1315E_FUNC(iso_tristate),
+ RTD1315E_FUNC(dbg_out0),
+ RTD1315E_FUNC(dbg_out1),
+ RTD1315E_FUNC(standby_dbg),
+ RTD1315E_FUNC(spdif),
+ RTD1315E_FUNC(arm_trace_debug_disable),
+ RTD1315E_FUNC(arm_trace_debug_enable),
+ RTD1315E_FUNC(aucpu_ejtag_disable),
+ RTD1315E_FUNC(acpu_ejtag_disable),
+ RTD1315E_FUNC(vcpu_ejtag_disable),
+ RTD1315E_FUNC(scpu_ejtag_disable),
+ RTD1315E_FUNC(vtc_dmic_loc_disable),
+ RTD1315E_FUNC(vtc_tdm_disable),
+ RTD1315E_FUNC(vtc_i2si_disable),
+ RTD1315E_FUNC(tdm_ai_disable),
+ RTD1315E_FUNC(ai_disable),
+ RTD1315E_FUNC(spdif_disable),
+ RTD1315E_FUNC(hif_disable),
+ RTD1315E_FUNC(hif_enable),
+ RTD1315E_FUNC(test_loop),
+ RTD1315E_FUNC(pmic_pwrup),
+};
+
+#undef RTD1315E_FUNC
+
+static const struct rtd_pin_desc rtd1315e_iso_muxes[ARRAY_SIZE(rtd1315e_iso_pins)] = {
+ [RTD1315E_ISO_EMMC_RST_N] = RTK_PIN_MUX(emmc_rst_n, 0x0, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DD_SB] = RTK_PIN_MUX(emmc_dd_sb, 0x0, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_CLK] = RTK_PIN_MUX(emmc_clk, 0x0, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_CMD] = RTK_PIN_MUX(emmc_cmd, 0x0, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_0] = RTK_PIN_MUX(emmc_data_0, 0x0, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_1] = RTK_PIN_MUX(emmc_data_1, 0x0, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_2] = RTK_PIN_MUX(emmc_data_2, 0x0, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_3] = RTK_PIN_MUX(emmc_data_3, 0x0, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1315E_ISO_EMMC_DATA_4] = RTK_PIN_MUX(emmc_data_4, 0x4, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_5] = RTK_PIN_MUX(emmc_data_5, 0x4, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_6] = RTK_PIN_MUX(emmc_data_6, 0x4, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1315E_ISO_EMMC_DATA_7] = RTK_PIN_MUX(emmc_data_7, 0x4, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_0] = RTK_PIN_MUX(gpio_0, 0x4, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "pll_test_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_1] = RTK_PIN_MUX(gpio_1, 0x4, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "pll_test_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_6] = RTK_PIN_MUX(gpio_6, 0x4, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "spdif_optical_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_7] = RTK_PIN_MUX(gpio_7, 0x4, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1315E_ISO_GPIO_8] = RTK_PIN_MUX(gpio_8, 0x8, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_9] = RTK_PIN_MUX(gpio_9, 0x8, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_10] = RTK_PIN_MUX(gpio_10, 0x8, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_11] = RTK_PIN_MUX(gpio_11, 0x8, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_12] = RTK_PIN_MUX(gpio_12, 0x8, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "i2c0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_13] = RTK_PIN_MUX(gpio_13, 0x8, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "i2c0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_14] = RTK_PIN_MUX(gpio_14, 0x8, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "etn_phy"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_15] = RTK_PIN_MUX(gpio_15, 0x8, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "etn_phy"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out1")),
+
+ [RTD1315E_ISO_GPIO_16] = RTK_PIN_MUX(gpio_16, 0xc, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "i2c1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_17] = RTK_PIN_MUX(gpio_17, 0xc, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "i2c1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_18] = RTK_PIN_MUX(gpio_18, 0xc, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_19] = RTK_PIN_MUX(gpio_19, 0xc, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_20] = RTK_PIN_MUX(gpio_20, 0xc, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "pwm0_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "spdif_optical_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out1")),
+ [RTD1315E_ISO_USB_CC2] = RTK_PIN_MUX(usb_cc2, 0xc, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "usb_cc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_25] = RTK_PIN_MUX(gpio_25, 0xc, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "pcie1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_26] = RTK_PIN_MUX(gpio_26, 0xc, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "pwm0_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out0")),
+
+ [RTD1315E_ISO_GPIO_27] = RTK_PIN_MUX(gpio_27, 0x10, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "pwm1_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_28] = RTK_PIN_MUX(gpio_28, 0x10, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "pwm2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_29] = RTK_PIN_MUX(gpio_29, 0x10, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "i2c5"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "pwm1_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_30] = RTK_PIN_MUX(gpio_30, 0x10, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "pwm2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_31] = RTK_PIN_MUX(gpio_31, 0x10, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "pwm3_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_32] = RTK_PIN_MUX(gpio_32, 0x10, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 20), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 20), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 20), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_33] = RTK_PIN_MUX(gpio_33, 0x10, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 24), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 24), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 24), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_34] = RTK_PIN_MUX(gpio_34, 0x10, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "i2c4"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 28), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out0")),
+
+ [RTD1315E_ISO_GPIO_35] = RTK_PIN_MUX(gpio_35, 0x14, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "i2c4"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1315E_ISO_HIF_DATA] = RTK_PIN_MUX(hif_data, 0x14, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 4), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 4), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 4), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 4), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 4), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 4), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 4), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 4), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 4), "scpu_ejtag_loc3"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1315E_ISO_HIF_EN] = RTK_PIN_MUX(hif_en, 0x14, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 8), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 8), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 8), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 8), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 8), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 8), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1315E_ISO_HIF_RDY] = RTK_PIN_MUX(hif_rdy, 0x14, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 12), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 12), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 12), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 12), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 12), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 12), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 12), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_HIF_CLK] = RTK_PIN_MUX(hif_clk, 0x14, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 16), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 16), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 16), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 16), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 16), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 16), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 16), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_46] = RTK_PIN_MUX(gpio_46, 0x14, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "i2c5"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_47] = RTK_PIN_MUX(gpio_47, 0x14, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "dc_fan"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "pwm3_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_48] = RTK_PIN_MUX(gpio_48, 0x14, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "pll_test_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out0")),
+
+ [RTD1315E_ISO_GPIO_49] = RTK_PIN_MUX(gpio_49, 0x18, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "pll_test_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_50] = RTK_PIN_MUX(gpio_50, 0x18, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "spdif"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 4), "test_loop"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out1")),
+ [RTD1315E_ISO_USB_CC1] = RTK_PIN_MUX(usb_cc1, 0x18, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "usb_cc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out0")),
+ [RTD1315E_ISO_IR_RX] = RTK_PIN_MUX(ir_rx, 0x18, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "ir_rx"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_UR0_RX] = RTK_PIN_MUX(ur0_rx, 0x18, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "uart0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1315E_ISO_UR0_TX] = RTK_PIN_MUX(ur0_tx, 0x18, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "uart0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_57] = RTK_PIN_MUX(gpio_57, 0x18, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 24), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 24), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 24), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 24), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 24), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 24), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 24), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_58] = RTK_PIN_MUX(gpio_58, 0x18, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 28), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 28), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 28), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 28), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 28), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 28), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 28), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out0")),
+
+ [RTD1315E_ISO_GPIO_59] = RTK_PIN_MUX(gpio_59, 0x1c, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 0), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 0), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 0), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 0), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 0), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 0), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 0), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_60] = RTK_PIN_MUX(gpio_60, 0x1c, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 4), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 4), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 4), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 4), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 4), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1315E_ISO_GPIO_61] = RTK_PIN_MUX(gpio_61, 0x1c, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 8), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 8), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 8), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_62] = RTK_PIN_MUX(gpio_62, 0x1c, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out1")),
+ [RTD1315E_ISO_GPIO_66] = RTK_PIN_MUX(gpio_66, 0x1c, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 16), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_67] = RTK_PIN_MUX(gpio_67, 0x1c, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 20), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 20), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_68] = RTK_PIN_MUX(gpio_68, 0x1c, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "gpu_ejtag"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 24), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 24), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 24), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 24), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_69] = RTK_PIN_MUX(gpio_69, 0x1c, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 28), "gpu_ejtag"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 28), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 28), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 28), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 28), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1315E_ISO_GPIO_70] = RTK_PIN_MUX(gpio_70, 0x20, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 0), "gpu_ejtag"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 0), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 0), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 0), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 0), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_71] = RTK_PIN_MUX(gpio_71, 0x20, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 4), "gpu_ejtag"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 4), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 4), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_72] = RTK_PIN_MUX(gpio_72, 0x20, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "gpu_ejtag"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 8), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_78] = RTK_PIN_MUX(gpio_78, 0x20, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "pmic_pwrup"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_79] = RTK_PIN_MUX(gpio_79, 0x20, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_80] = RTK_PIN_MUX(gpio_80, 0x20, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1315E_ISO_GPIO_81] = RTK_PIN_MUX(gpio_81, 0x20, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+
+ [RTD1315E_ISO_UR2_LOC] = RTK_PIN_MUX(ur2_loc, 0x120, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "uart2_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "uart2_loc1")),
+ [RTD1315E_ISO_GSPI_LOC] = RTK_PIN_MUX(gspi_loc, 0x120, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "gspi_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "gspi_loc1")),
+ [RTD1315E_ISO_HI_WIDTH] = RTK_PIN_MUX(hi_width, 0x120, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "hi_width_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "hi_width_1bit")),
+ [RTD1315E_ISO_SF_EN] = RTK_PIN_MUX(sf_en, 0x120, GENMASK(11, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "sf_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 11), "sf_enable")),
+ [RTD1315E_ISO_ARM_TRACE_DBG_EN] = RTK_PIN_MUX(arm_trace_dbg_en, 0x120, GENMASK(12, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "arm_trace_debug_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "arm_trace_debug_enable")),
+ [RTD1315E_ISO_EJTAG_AUCPU_LOC] = RTK_PIN_MUX(ejtag_aucpu_loc, 0x120, GENMASK(16, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "aucpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 14), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 14), "aucpu_ejtag_loc2")),
+ [RTD1315E_ISO_EJTAG_ACPU_LOC] = RTK_PIN_MUX(ejtag_acpu_loc, 0x120, GENMASK(19, 17),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 17), "acpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 17), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 17), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 17), "acpu_ejtag_loc2")),
+ [RTD1315E_ISO_EJTAG_VCPU_LOC] = RTK_PIN_MUX(ejtag_vcpu_loc, 0x120, GENMASK(22, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "vcpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "vcpu_ejtag_loc2")),
+ [RTD1315E_ISO_EJTAG_SCPU_LOC] = RTK_PIN_MUX(ejtag_scpu_loc, 0x120, GENMASK(25, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "scpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 23), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 23), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 23), "scpu_ejtag_loc2")),
+ [RTD1315E_ISO_DMIC_LOC] = RTK_PIN_MUX(dmic_loc, 0x120, GENMASK(27, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 26), "dmic_loc1")),
+
+ [RTD1315E_ISO_VTC_DMIC_LOC] = RTK_PIN_MUX(vtc_dmic_loc, 0x128, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "vtc_dmic_loc_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "vtc_dmic_loc1")),
+ [RTD1315E_ISO_VTC_TDM_LOC] = RTK_PIN_MUX(vtc_tdm_loc, 0x128, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "vtc_tdm_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "vtc_tdm_loc1")),
+ [RTD1315E_ISO_VTC_I2SI_LOC] = RTK_PIN_MUX(vtc_i2si_loc, 0x128, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "vtc_i2si_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "vtc_i2si_loc1")),
+ [RTD1315E_ISO_TDM_AI_LOC] = RTK_PIN_MUX(tdm_ai_loc, 0x128, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "tdm_ai_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 6), "tdm_ai_loc1")),
+ [RTD1315E_ISO_AI_LOC] = RTK_PIN_MUX(ai_loc, 0x128, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "ai_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "ai_loc1")),
+ [RTD1315E_ISO_SPDIF_LOC] = RTK_PIN_MUX(spdif_loc, 0x128, GENMASK(11, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "spdif_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "spdif_optical_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "spdif_optical_loc1")),
+
+ [RTD1315E_ISO_HIF_EN_LOC] = RTK_PIN_MUX(hif_en_loc, 0x12c, GENMASK(2, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "hif_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "hif_enable")),
+};
+
+static const struct rtd_pin_config_desc rtd1315e_iso_configs[ARRAY_SIZE(rtd1315e_iso_pins)] = {
+ [RTD1315E_ISO_BOOT_SEL] = RTK_PIN_CONFIG(boot_sel, 0x24, 0, 0, 1, NA, 2, 3, NA),
+ [RTD1315E_ISO_EMMC_CLK] = RTK_PIN_CONFIG(emmc_clk, 0x24, 4, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_CMD] = RTK_PIN_CONFIG(emmc_cmd, 0x24, 17, 0, 1, NA, 2, 13, NA),
+ [RTD1315E_ISO_EMMC_DATA_0] = RTK_PIN_CONFIG(emmc_data_0, 0x28, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_1] = RTK_PIN_CONFIG(emmc_data_1, 0x28, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_2] = RTK_PIN_CONFIG(emmc_data_2, 0x2c, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_3] = RTK_PIN_CONFIG(emmc_data_3, 0x2c, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_4] = RTK_PIN_CONFIG(emmc_data_4, 0x30, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_5] = RTK_PIN_CONFIG(emmc_data_5, 0x30, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_6] = RTK_PIN_CONFIG(emmc_data_6, 0x34, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DATA_7] = RTK_PIN_CONFIG(emmc_data_7, 0x34, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_DD_SB] = RTK_PIN_CONFIG(emmc_dd_sb, 0x38, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_EMMC_RST_N] = RTK_PIN_CONFIG(emmc_rst_n, 0x38, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_1] = RTK_PIN_CONFIG(gpio_1, 0x3c, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_6] = RTK_PIN_CONFIG(gpio_6, 0x3c, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_7] = RTK_PIN_CONFIG(gpio_7, 0x3c, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_8] = RTK_PIN_CONFIG(gpio_8, 0x3c, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_9] = RTK_PIN_CONFIG(gpio_9, 0x3c, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_10] = RTK_PIN_CONFIG(gpio_10, 0x3c, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_11] = RTK_PIN_CONFIG(gpio_11, 0x40, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_12] = RTK_PIN_CONFIG(gpio_12, 0x40, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_13] = RTK_PIN_CONFIG(gpio_13, 0x40, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_14] = RTK_PIN_CONFIG(gpio_14, 0x40, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_15] = RTK_PIN_CONFIG(gpio_15, 0x40, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_16] = RTK_PIN_CONFIG(gpio_16, 0x40, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_17] = RTK_PIN_CONFIG(gpio_17, 0x44, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_18] = RTK_PIN_CONFIG(gpio_18, 0x44, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_19] = RTK_PIN_CONFIG(gpio_19, 0x44, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_20] = RTK_PIN_CONFIG(gpio_20, 0x44, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_25] = RTK_PIN_CONFIG(gpio_25, 0x44, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_26] = RTK_PIN_CONFIG(gpio_26, 0x44, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_27] = RTK_PIN_CONFIG(gpio_27, 0x48, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_28] = RTK_PIN_CONFIG(gpio_28, 0x48, 6, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_29] = RTK_PIN_CONFIG(gpio_29, 0x48, 12, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_30] = RTK_PIN_CONFIG(gpio_30, 0x48, 17, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_31] = RTK_PIN_CONFIG(gpio_31, 0x4c, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_32] = RTK_PIN_CONFIG(gpio_32, 0x4c, 5, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_33] = RTK_PIN_CONFIG(gpio_33, 0x4c, 18, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_34] = RTK_PIN_CONFIG(gpio_34, 0x50, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_35] = RTK_PIN_CONFIG(gpio_35, 0x50, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_46] = RTK_PIN_CONFIG(gpio_46, 0x50, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_47] = RTK_PIN_CONFIG(gpio_47, 0x50, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_48] = RTK_PIN_CONFIG(gpio_48, 0x50, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_49] = RTK_PIN_CONFIG(gpio_49, 0x50, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_50] = RTK_PIN_CONFIG(gpio_50, 0x54, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_57] = RTK_PIN_CONFIG(gpio_57, 0x54, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_58] = RTK_PIN_CONFIG(gpio_58, 0x54, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_59] = RTK_PIN_CONFIG(gpio_59, 0x54, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_60] = RTK_PIN_CONFIG(gpio_60, 0x54, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_61] = RTK_PIN_CONFIG(gpio_61, 0x54, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_62] = RTK_PIN_CONFIG(gpio_62, 0x58, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_66] = RTK_PIN_CONFIG(gpio_66, 0x58, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_67] = RTK_PIN_CONFIG(gpio_67, 0x58, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_68] = RTK_PIN_CONFIG(gpio_68, 0x58, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_69] = RTK_PIN_CONFIG(gpio_69, 0x58, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_70] = RTK_PIN_CONFIG(gpio_70, 0x58, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_71] = RTK_PIN_CONFIG(gpio_71, 0x5c, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_72] = RTK_PIN_CONFIG(gpio_72, 0x5c, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_GPIO_78] = RTK_PIN_CONFIG(gpio_78, 0x5c, 10, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_79] = RTK_PIN_CONFIG(gpio_79, 0x60, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_80] = RTK_PIN_CONFIG(gpio_80, 0x60, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_GPIO_81] = RTK_PIN_CONFIG(gpio_81, 0x64, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_HIF_CLK] = RTK_PIN_CONFIG(hif_clk, 0x64, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_HIF_DATA] = RTK_PIN_CONFIG(hif_data, 0x68, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_HIF_EN] = RTK_PIN_CONFIG(hif_en, 0x68, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_HIF_RDY] = RTK_PIN_CONFIG(hif_rdy, 0x68, 26, 0, 1, NA, 2, 12, NA),
+ [RTD1315E_ISO_IR_RX] = RTK_PIN_CONFIG(ir_rx, 0x6c, 7, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_RESET_N] = RTK_PIN_CONFIG(reset_n, 0x6c, 12, 0, 1, NA, 2, 3, PADDRI_4_8),
+ [RTD1315E_ISO_SCAN_SWITCH] = RTK_PIN_CONFIG(scan_switch, 0x6c, 16, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1315E_ISO_TESTMODE] = RTK_PIN_CONFIG(testmode, 0x6c, 19, 0, 1, NA, 2, 3, PADDRI_4_8),
+ [RTD1315E_ISO_UR0_RX] = RTK_PIN_CONFIG(ur0_rx, 0x6c, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_UR0_TX] = RTK_PIN_CONFIG(ur0_tx, 0x6c, 28, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1315E_ISO_USB_CC1] = RTK_PIN_CONFIG(usb_cc1, 0x70, 1, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1315E_ISO_USB_CC2] = RTK_PIN_CONFIG(usb_cc2, 0x70, 4, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1315E_ISO_WD_RSET] = RTK_PIN_CONFIG(wd_rset, 0x70, 7, 1, 2, 0, 3, 4, PADDRI_4_8),
+};
+
+static const struct rtd_pin_sconfig_desc rtd1315e_iso_sconfigs[] = {
+ RTK_PIN_SCONFIG(emmc_clk, 0x24, 7, 3, 10, 3, 13, 3),
+ RTK_PIN_SCONFIG(emmc_cmd, 0x24, 20, 3, 23, 3, 26, 3),
+ RTK_PIN_SCONFIG(emmc_data_0, 0x28, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_1, 0x28, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_2, 0x2c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_3, 0x2c, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_4, 0x30, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_5, 0x30, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_6, 0x34, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_7, 0x34, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_dd_sb, 0x38, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_rst_n, 0x38, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_32, 0x4c, 8, 3, 11, 3, 14, 3),
+ RTK_PIN_SCONFIG(gpio_33, 0x4c, 21, 3, 24, 3, 27, 3),
+ RTK_PIN_SCONFIG(gpio_78, 0x5c, 13, 3, 16, 3, 19, 3),
+ RTK_PIN_SCONFIG(gpio_79, 0x60, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_80, 0x60, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_81, 0x64, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_clk, 0x64, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(hif_data, 0x68, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_en, 0x68, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(hif_rdy, 0x68, 29, 3, 32, 3, 35, 3),
+
+};
+
+static const struct rtd_pinctrl_desc rtd1315e_iso_pinctrl_desc = {
+ .pins = rtd1315e_iso_pins,
+ .num_pins = ARRAY_SIZE(rtd1315e_iso_pins),
+ .groups = rtd1315e_pin_groups,
+ .num_groups = ARRAY_SIZE(rtd1315e_pin_groups),
+ .functions = rtd1315e_pin_functions,
+ .num_functions = ARRAY_SIZE(rtd1315e_pin_functions),
+ .muxes = rtd1315e_iso_muxes,
+ .num_muxes = ARRAY_SIZE(rtd1315e_iso_muxes),
+ .configs = rtd1315e_iso_configs,
+ .num_configs = ARRAY_SIZE(rtd1315e_iso_configs),
+ .sconfigs = rtd1315e_iso_sconfigs,
+ .num_sconfigs = ARRAY_SIZE(rtd1315e_iso_sconfigs),
+};
+
+static int rtd1315e_pinctrl_probe(struct platform_device *pdev)
+{
+ return rtd_pinctrl_probe(pdev, &rtd1315e_iso_pinctrl_desc);
+}
+
+static const struct of_device_id rtd1315e_pinctrl_of_match[] = {
+ { .compatible = "realtek,rtd1315e-pinctrl", },
+ {},
+};
+
+static struct platform_driver rtd1315e_pinctrl_driver = {
+ .driver = {
+ .name = "rtd1315e-pinctrl",
+ .of_match_table = rtd1315e_pinctrl_of_match,
+ },
+ .probe = rtd1315e_pinctrl_probe,
+};
+
+static int __init rtd1315e_pinctrl_init(void)
+{
+ return platform_driver_register(&rtd1315e_pinctrl_driver);
+}
+arch_initcall(rtd1315e_pinctrl_init);
+
+static void __exit rtd1315e_pinctrl_exit(void)
+{
+ platform_driver_unregister(&rtd1315e_pinctrl_driver);
+}
+module_exit(rtd1315e_pinctrl_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Realtek Semiconductor Corporation");
+MODULE_DESCRIPTION("Realtek DHC SoC RTD1315E pinctrl driver");
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd1319d.c b/drivers/pinctrl/realtek/pinctrl-rtd1319d.c
new file mode 100644
index 000000000000..b1a654ac30dc
--- /dev/null
+++ b/drivers/pinctrl/realtek/pinctrl-rtd1319d.c
@@ -0,0 +1,1610 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek DHC 1319D pin controller driver
+ *
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-rtd.h"
+
+enum rtd13xxd_iso_pins {
+ RTD1319D_ISO_GPIO_0 = 0,
+ RTD1319D_ISO_GPIO_1,
+ RTD1319D_ISO_GPIO_2,
+ RTD1319D_ISO_GPIO_3,
+ RTD1319D_ISO_GPIO_4,
+ RTD1319D_ISO_GPIO_5,
+ RTD1319D_ISO_GPIO_6,
+ RTD1319D_ISO_GPIO_7,
+ RTD1319D_ISO_GPIO_8,
+ RTD1319D_ISO_GPIO_9,
+ RTD1319D_ISO_GPIO_10,
+ RTD1319D_ISO_GPIO_11,
+ RTD1319D_ISO_GPIO_12,
+ RTD1319D_ISO_GPIO_13,
+ RTD1319D_ISO_GPIO_14,
+ RTD1319D_ISO_GPIO_15,
+ RTD1319D_ISO_GPIO_16,
+ RTD1319D_ISO_GPIO_17,
+ RTD1319D_ISO_GPIO_18,
+ RTD1319D_ISO_GPIO_19,
+ RTD1319D_ISO_GPIO_20,
+ RTD1319D_ISO_GPIO_21,
+ RTD1319D_ISO_GPIO_22,
+ RTD1319D_ISO_GPIO_23,
+ RTD1319D_ISO_USB_CC2,
+ RTD1319D_ISO_GPIO_25,
+ RTD1319D_ISO_GPIO_26,
+ RTD1319D_ISO_GPIO_27,
+ RTD1319D_ISO_GPIO_28,
+ RTD1319D_ISO_GPIO_29,
+ RTD1319D_ISO_GPIO_30,
+ RTD1319D_ISO_GPIO_31,
+ RTD1319D_ISO_GPIO_32,
+ RTD1319D_ISO_GPIO_33,
+ RTD1319D_ISO_GPIO_34,
+ RTD1319D_ISO_GPIO_35,
+ RTD1319D_ISO_HIF_DATA,
+ RTD1319D_ISO_HIF_EN,
+ RTD1319D_ISO_HIF_RDY,
+ RTD1319D_ISO_HIF_CLK,
+ RTD1319D_ISO_GPIO_40,
+ RTD1319D_ISO_GPIO_41,
+ RTD1319D_ISO_GPIO_42,
+ RTD1319D_ISO_GPIO_43,
+ RTD1319D_ISO_GPIO_44,
+ RTD1319D_ISO_GPIO_45,
+ RTD1319D_ISO_GPIO_46,
+ RTD1319D_ISO_GPIO_47,
+ RTD1319D_ISO_GPIO_48,
+ RTD1319D_ISO_GPIO_49,
+ RTD1319D_ISO_GPIO_50,
+ RTD1319D_ISO_USB_CC1,
+ RTD1319D_ISO_GPIO_52,
+ RTD1319D_ISO_GPIO_53,
+ RTD1319D_ISO_IR_RX,
+ RTD1319D_ISO_UR0_RX,
+ RTD1319D_ISO_UR0_TX,
+ RTD1319D_ISO_GPIO_57,
+ RTD1319D_ISO_GPIO_58,
+ RTD1319D_ISO_GPIO_59,
+ RTD1319D_ISO_GPIO_60,
+ RTD1319D_ISO_GPIO_61,
+ RTD1319D_ISO_GPIO_62,
+ RTD1319D_ISO_GPIO_63,
+ RTD1319D_ISO_GPIO_64,
+ RTD1319D_ISO_EMMC_RST_N,
+ RTD1319D_ISO_EMMC_DD_SB,
+ RTD1319D_ISO_EMMC_CLK,
+ RTD1319D_ISO_EMMC_CMD,
+ RTD1319D_ISO_EMMC_DATA_0,
+ RTD1319D_ISO_EMMC_DATA_1,
+ RTD1319D_ISO_EMMC_DATA_2,
+ RTD1319D_ISO_EMMC_DATA_3,
+ RTD1319D_ISO_EMMC_DATA_4,
+ RTD1319D_ISO_EMMC_DATA_5,
+ RTD1319D_ISO_EMMC_DATA_6,
+ RTD1319D_ISO_EMMC_DATA_7,
+ RTD1319D_ISO_GPIO_DUMMY_77,
+ RTD1319D_ISO_GPIO_78,
+ RTD1319D_ISO_GPIO_79,
+ RTD1319D_ISO_GPIO_80,
+ RTD1319D_ISO_GPIO_81,
+ RTD1319D_ISO_UR2_LOC,
+ RTD1319D_ISO_GSPI_LOC,
+ RTD1319D_ISO_HI_WIDTH,
+ RTD1319D_ISO_SF_EN,
+ RTD1319D_ISO_ARM_TRACE_DBG_EN,
+ RTD1319D_ISO_EJTAG_AUCPU_LOC,
+ RTD1319D_ISO_EJTAG_ACPU_LOC,
+ RTD1319D_ISO_EJTAG_VCPU_LOC,
+ RTD1319D_ISO_EJTAG_SCPU_LOC,
+ RTD1319D_ISO_DMIC_LOC,
+ RTD1319D_ISO_EJTAG_SECPU_LOC,
+ RTD1319D_ISO_VTC_DMIC_LOC,
+ RTD1319D_ISO_VTC_TDM_LOC,
+ RTD1319D_ISO_VTC_I2SI_LOC,
+ RTD1319D_ISO_TDM_AI_LOC,
+ RTD1319D_ISO_AI_LOC,
+ RTD1319D_ISO_SPDIF_LOC,
+ RTD1319D_ISO_HIF_EN_LOC,
+ RTD1319D_ISO_SC0_LOC,
+ RTD1319D_ISO_SC1_LOC,
+ RTD1319D_ISO_SCAN_SWITCH,
+ RTD1319D_ISO_WD_RSET,
+ RTD1319D_ISO_BOOT_SEL,
+ RTD1319D_ISO_RESET_N,
+ RTD1319D_ISO_TESTMODE,
+};
+
+static const struct pinctrl_pin_desc rtd1319d_iso_pins[] = {
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_0, "gpio_0"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_1, "gpio_1"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_2, "gpio_2"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_3, "gpio_3"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_4, "gpio_4"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_5, "gpio_5"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_6, "gpio_6"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_7, "gpio_7"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_8, "gpio_8"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_9, "gpio_9"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_10, "gpio_10"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_11, "gpio_11"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_12, "gpio_12"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_13, "gpio_13"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_14, "gpio_14"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_15, "gpio_15"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_16, "gpio_16"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_17, "gpio_17"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_18, "gpio_18"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_19, "gpio_19"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_20, "gpio_20"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_21, "gpio_21"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_22, "gpio_22"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_23, "gpio_23"),
+ PINCTRL_PIN(RTD1319D_ISO_USB_CC2, "usb_cc2"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_25, "gpio_25"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_26, "gpio_26"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_27, "gpio_27"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_28, "gpio_28"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_29, "gpio_29"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_30, "gpio_30"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_31, "gpio_31"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_32, "gpio_32"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_33, "gpio_33"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_34, "gpio_34"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_35, "gpio_35"),
+ PINCTRL_PIN(RTD1319D_ISO_HIF_DATA, "hif_data"),
+ PINCTRL_PIN(RTD1319D_ISO_HIF_EN, "hif_en"),
+ PINCTRL_PIN(RTD1319D_ISO_HIF_RDY, "hif_rdy"),
+ PINCTRL_PIN(RTD1319D_ISO_HIF_CLK, "hif_clk"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_40, "gpio_40"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_41, "gpio_41"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_42, "gpio_42"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_43, "gpio_43"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_44, "gpio_44"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_45, "gpio_45"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_46, "gpio_46"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_47, "gpio_47"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_48, "gpio_48"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_49, "gpio_49"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_50, "gpio_50"),
+ PINCTRL_PIN(RTD1319D_ISO_USB_CC1, "usb_cc1"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_52, "gpio_52"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_53, "gpio_53"),
+ PINCTRL_PIN(RTD1319D_ISO_IR_RX, "ir_rx"),
+ PINCTRL_PIN(RTD1319D_ISO_UR0_RX, "ur0_rx"),
+ PINCTRL_PIN(RTD1319D_ISO_UR0_TX, "ur0_tx"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_57, "gpio_57"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_58, "gpio_58"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_59, "gpio_59"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_60, "gpio_60"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_61, "gpio_61"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_62, "gpio_62"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_63, "gpio_63"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_64, "gpio_64"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_RST_N, "emmc_rst_n"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DD_SB, "emmc_dd_sb"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_CLK, "emmc_clk"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_CMD, "emmc_cmd"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_0, "emmc_data_0"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_1, "emmc_data_1"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_2, "emmc_data_2"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_3, "emmc_data_3"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_4, "emmc_data_4"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_5, "emmc_data_5"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_6, "emmc_data_6"),
+ PINCTRL_PIN(RTD1319D_ISO_EMMC_DATA_7, "emmc_data_7"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_DUMMY_77, "dummy"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_78, "gpio_78"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_79, "gpio_79"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_80, "gpio_80"),
+ PINCTRL_PIN(RTD1319D_ISO_GPIO_81, "gpio_81"),
+ PINCTRL_PIN(RTD1319D_ISO_UR2_LOC, "ur2_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_GSPI_LOC, "gspi_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_HI_WIDTH, "hi_width"),
+ PINCTRL_PIN(RTD1319D_ISO_SF_EN, "sf_en"),
+ PINCTRL_PIN(RTD1319D_ISO_ARM_TRACE_DBG_EN, "arm_trace_dbg_en"),
+ PINCTRL_PIN(RTD1319D_ISO_EJTAG_AUCPU_LOC, "ejtag_aucpu_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_EJTAG_ACPU_LOC, "ejtag_acpu_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_EJTAG_VCPU_LOC, "ejtag_vcpu_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_EJTAG_SCPU_LOC, "ejtag_scpu_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_DMIC_LOC, "dmic_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_EJTAG_SECPU_LOC, "ejtag_secpu_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_VTC_DMIC_LOC, "vtc_dmic_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_VTC_TDM_LOC, "vtc_tdm_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_VTC_I2SI_LOC, "vtc_i2si_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_TDM_AI_LOC, "tdm_ai_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_AI_LOC, "ai_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_SPDIF_LOC, "spdif_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_HIF_EN_LOC, "hif_en_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_SC0_LOC, "sc0_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_SC1_LOC, "sc1_loc"),
+ PINCTRL_PIN(RTD1319D_ISO_SCAN_SWITCH, "scan_switch"),
+ PINCTRL_PIN(RTD1319D_ISO_WD_RSET, "wd_rset"),
+ PINCTRL_PIN(RTD1319D_ISO_BOOT_SEL, "boot_sel"),
+ PINCTRL_PIN(RTD1319D_ISO_RESET_N, "reset_n"),
+ PINCTRL_PIN(RTD1319D_ISO_TESTMODE, "testmode"),
+};
+
+/* Tagged as __maybe_unused since there are pins we may use in the future */
+#define DECLARE_RTD1319D_PIN(_pin, _name) \
+ static const unsigned int rtd1319d_## _name ##_pins[] __maybe_unused = { _pin }
+
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_0, gpio_0);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_1, gpio_1);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_2, gpio_2);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_3, gpio_3);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_4, gpio_4);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_5, gpio_5);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_6, gpio_6);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_7, gpio_7);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_8, gpio_8);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_9, gpio_9);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_10, gpio_10);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_11, gpio_11);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_12, gpio_12);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_13, gpio_13);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_14, gpio_14);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_15, gpio_15);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_16, gpio_16);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_17, gpio_17);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_18, gpio_18);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_19, gpio_19);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_20, gpio_20);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_21, gpio_21);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_22, gpio_22);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_23, gpio_23);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_USB_CC2, usb_cc2);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_25, gpio_25);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_26, gpio_26);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_27, gpio_27);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_28, gpio_28);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_29, gpio_29);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_30, gpio_30);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_31, gpio_31);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_32, gpio_32);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_33, gpio_33);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_34, gpio_34);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_35, gpio_35);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HIF_DATA, hif_data);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HIF_EN, hif_en);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HIF_RDY, hif_rdy);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HIF_CLK, hif_clk);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_40, gpio_40);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_41, gpio_41);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_42, gpio_42);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_43, gpio_43);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_44, gpio_44);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_45, gpio_45);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_46, gpio_46);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_47, gpio_47);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_48, gpio_48);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_49, gpio_49);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_50, gpio_50);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_USB_CC1, usb_cc1);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_52, gpio_52);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_53, gpio_53);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_IR_RX, ir_rx);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_UR0_RX, ur0_rx);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_UR0_TX, ur0_tx);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_57, gpio_57);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_58, gpio_58);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_59, gpio_59);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_60, gpio_60);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_61, gpio_61);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_62, gpio_62);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_63, gpio_63);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_64, gpio_64);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_RST_N, emmc_rst_n);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DD_SB, emmc_dd_sb);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_CLK, emmc_clk);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_CMD, emmc_cmd);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_0, emmc_data_0);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_1, emmc_data_1);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_2, emmc_data_2);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_3, emmc_data_3);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_4, emmc_data_4);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_5, emmc_data_5);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_6, emmc_data_6);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EMMC_DATA_7, emmc_data_7);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_78, gpio_78);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_79, gpio_79);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_80, gpio_80);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GPIO_81, gpio_81);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_UR2_LOC, ur2_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_GSPI_LOC, gspi_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HI_WIDTH, hi_width);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_SF_EN, sf_en);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_ARM_TRACE_DBG_EN, arm_trace_dbg_en);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EJTAG_AUCPU_LOC, ejtag_aucpu_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EJTAG_ACPU_LOC, ejtag_acpu_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EJTAG_VCPU_LOC, ejtag_vcpu_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EJTAG_SCPU_LOC, ejtag_scpu_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_DMIC_LOC, dmic_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_EJTAG_SECPU_LOC, ejtag_secpu_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_VTC_DMIC_LOC, vtc_dmic_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_VTC_TDM_LOC, vtc_tdm_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_VTC_I2SI_LOC, vtc_i2si_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_TDM_AI_LOC, tdm_ai_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_AI_LOC, ai_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_SPDIF_LOC, spdif_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_HIF_EN_LOC, hif_en_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_SC0_LOC, sc0_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_SC1_LOC, sc1_loc);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_SCAN_SWITCH, scan_switch);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_WD_RSET, wd_rset);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_BOOT_SEL, boot_sel);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_RESET_N, reset_n);
+DECLARE_RTD1319D_PIN(RTD1319D_ISO_TESTMODE, testmode);
+
+#define RTD1319D_GROUP(_name) \
+ { \
+ .name = # _name, \
+ .pins = rtd1319d_ ## _name ## _pins, \
+ .num_pins = ARRAY_SIZE(rtd1319d_ ## _name ## _pins), \
+ }
+
+static const struct rtd_pin_group_desc rtd1319d_pin_groups[] = {
+ RTD1319D_GROUP(gpio_0),
+ RTD1319D_GROUP(gpio_1),
+ RTD1319D_GROUP(gpio_2),
+ RTD1319D_GROUP(gpio_3),
+ RTD1319D_GROUP(gpio_4),
+ RTD1319D_GROUP(gpio_5),
+ RTD1319D_GROUP(gpio_6),
+ RTD1319D_GROUP(gpio_7),
+ RTD1319D_GROUP(gpio_8),
+ RTD1319D_GROUP(gpio_9),
+ RTD1319D_GROUP(gpio_10),
+ RTD1319D_GROUP(gpio_11),
+ RTD1319D_GROUP(gpio_12),
+ RTD1319D_GROUP(gpio_13),
+ RTD1319D_GROUP(gpio_14),
+ RTD1319D_GROUP(gpio_15),
+ RTD1319D_GROUP(gpio_16),
+ RTD1319D_GROUP(gpio_17),
+ RTD1319D_GROUP(gpio_18),
+ RTD1319D_GROUP(gpio_19),
+ RTD1319D_GROUP(gpio_20),
+ RTD1319D_GROUP(gpio_21),
+ RTD1319D_GROUP(gpio_22),
+ RTD1319D_GROUP(gpio_23),
+ RTD1319D_GROUP(usb_cc2),
+ RTD1319D_GROUP(gpio_25),
+ RTD1319D_GROUP(gpio_26),
+ RTD1319D_GROUP(gpio_27),
+ RTD1319D_GROUP(gpio_28),
+ RTD1319D_GROUP(gpio_29),
+ RTD1319D_GROUP(gpio_30),
+ RTD1319D_GROUP(gpio_31),
+ RTD1319D_GROUP(gpio_32),
+ RTD1319D_GROUP(gpio_33),
+ RTD1319D_GROUP(gpio_34),
+ RTD1319D_GROUP(gpio_35),
+ RTD1319D_GROUP(hif_data),
+ RTD1319D_GROUP(hif_en),
+ RTD1319D_GROUP(hif_rdy),
+ RTD1319D_GROUP(hif_clk),
+ RTD1319D_GROUP(gpio_40),
+ RTD1319D_GROUP(gpio_41),
+ RTD1319D_GROUP(gpio_42),
+ RTD1319D_GROUP(gpio_43),
+ RTD1319D_GROUP(gpio_44),
+ RTD1319D_GROUP(gpio_45),
+ RTD1319D_GROUP(gpio_46),
+ RTD1319D_GROUP(gpio_47),
+ RTD1319D_GROUP(gpio_48),
+ RTD1319D_GROUP(gpio_49),
+ RTD1319D_GROUP(gpio_50),
+ RTD1319D_GROUP(usb_cc1),
+ RTD1319D_GROUP(gpio_52),
+ RTD1319D_GROUP(gpio_53),
+ RTD1319D_GROUP(ir_rx),
+ RTD1319D_GROUP(ur0_rx),
+ RTD1319D_GROUP(ur0_tx),
+ RTD1319D_GROUP(gpio_57),
+ RTD1319D_GROUP(gpio_58),
+ RTD1319D_GROUP(gpio_59),
+ RTD1319D_GROUP(gpio_60),
+ RTD1319D_GROUP(gpio_61),
+ RTD1319D_GROUP(gpio_62),
+ RTD1319D_GROUP(gpio_63),
+ RTD1319D_GROUP(gpio_64),
+ RTD1319D_GROUP(emmc_rst_n),
+ RTD1319D_GROUP(emmc_dd_sb),
+ RTD1319D_GROUP(emmc_clk),
+ RTD1319D_GROUP(emmc_cmd),
+ RTD1319D_GROUP(emmc_data_0),
+ RTD1319D_GROUP(emmc_data_1),
+ RTD1319D_GROUP(emmc_data_2),
+ RTD1319D_GROUP(emmc_data_3),
+ RTD1319D_GROUP(emmc_data_4),
+ RTD1319D_GROUP(emmc_data_5),
+ RTD1319D_GROUP(emmc_data_6),
+ RTD1319D_GROUP(emmc_data_7),
+ RTD1319D_GROUP(gpio_78),
+ RTD1319D_GROUP(gpio_79),
+ RTD1319D_GROUP(gpio_80),
+ RTD1319D_GROUP(gpio_81),
+ RTD1319D_GROUP(ur2_loc),
+ RTD1319D_GROUP(gspi_loc),
+ RTD1319D_GROUP(hi_width),
+ RTD1319D_GROUP(sf_en),
+ RTD1319D_GROUP(arm_trace_dbg_en),
+ RTD1319D_GROUP(ejtag_aucpu_loc),
+ RTD1319D_GROUP(ejtag_acpu_loc),
+ RTD1319D_GROUP(ejtag_vcpu_loc),
+ RTD1319D_GROUP(ejtag_scpu_loc),
+ RTD1319D_GROUP(dmic_loc),
+ RTD1319D_GROUP(ejtag_secpu_loc),
+ RTD1319D_GROUP(vtc_dmic_loc),
+ RTD1319D_GROUP(vtc_tdm_loc),
+ RTD1319D_GROUP(vtc_i2si_loc),
+ RTD1319D_GROUP(tdm_ai_loc),
+ RTD1319D_GROUP(ai_loc),
+ RTD1319D_GROUP(spdif_loc),
+ RTD1319D_GROUP(hif_en_loc),
+ RTD1319D_GROUP(sc0_loc),
+ RTD1319D_GROUP(sc1_loc),
+};
+
+static const char * const rtd1319d_gpio_groups[] = {
+ "gpio_0", "gpio_1", "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "gpio_7", "gpio_8", "gpio_9",
+ "gpio_10", "gpio_11", "gpio_12", "gpio_13", "gpio_14",
+ "gpio_15", "gpio_16", "gpio_17", "gpio_18", "gpio_19",
+ "gpio_20", "gpio_21", "gpio_22", "gpio_23", "usb_cc2",
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "gpio_29",
+ "gpio_30", "gpio_31", "gpio_32", "gpio_33", "gpio_34",
+ "gpio_35", "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "gpio_40", "gpio_41", "gpio_42", "gpio_43", "gpio_44",
+ "gpio_45", "gpio_46", "gpio_47", "gpio_48", "gpio_49",
+ "gpio_50", "usb_cc1", "gpio_52", "gpio_53", "ir_rx",
+ "ur0_rx", "ur0_tx", "gpio_57", "gpio_58", "gpio_59",
+ "gpio_60", "gpio_61", "gpio_62", "gpio_63", "gpio_64",
+ "emmc_rst_n", "emmc_dd_sb", "emmc_clk", "emmc_cmd",
+ "emmc_data_0", "emmc_data_1", "emmc_data_2", "emmc_data_3",
+ "emmc_data_4", "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1319d_nf_groups[] = {
+ "emmc_rst_n", "emmc_clk", "emmc_cmd", "emmc_data_0",
+ "emmc_data_1", "emmc_data_2", "emmc_data_3", "emmc_data_4",
+ "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81" };
+static const char * const rtd1319d_emmc_groups[] = {
+ "emmc_rst_n", "emmc_dd_sb", "emmc_clk", "emmc_cmd",
+ "emmc_data_0", "emmc_data_1", "emmc_data_2", "emmc_data_3",
+ "emmc_data_4", "emmc_data_5", "emmc_data_6", "emmc_data_7" };
+static const char * const rtd1319d_tp0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_57", "gpio_58",
+ "gpio_59", "gpio_60", "gpio_61", "gpio_62", "gpio_63",
+ "gpio_64" };
+static const char * const rtd1319d_tp1_groups[] = {
+ "gpio_61", "gpio_62", "gpio_63", "gpio_64" };
+static const char * const rtd1319d_sc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_31" };
+static const char * const rtd1319d_sc0_data0_groups[] = { "gpio_20", "sc0_loc" };
+static const char * const rtd1319d_sc0_data1_groups[] = { "gpio_30", "sc0_loc" };
+static const char * const rtd1319d_sc0_data2_groups[] = { "gpio_47", "sc0_loc" };
+static const char * const rtd1319d_sc1_groups[] = {
+ "gpio_2", "gpio_3", "gpio_5" };
+static const char * const rtd1319d_sc1_data0_groups[] = { "gpio_52", "sc1_loc" };
+static const char * const rtd1319d_sc1_data1_groups[] = { "gpio_34", "sc1_loc" };
+static const char * const rtd1319d_sc1_data2_groups[] = { "gpio_35", "sc1_loc" };
+static const char * const rtd1319d_ao_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_61", "gpio_62",
+ "gpio_63", "gpio_64" };
+static const char * const rtd1319d_gspi_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "gspi_loc" };
+static const char * const rtd1319d_gspi_loc1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11", "gspi_loc" };
+static const char * const rtd1319d_uart0_groups[] = { "ur0_rx", "ur0_tx"};
+static const char * const rtd1319d_uart1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11" };
+static const char * const rtd1319d_uart2_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "ur2_loc" };
+static const char * const rtd1319d_uart2_loc1_groups[] = {
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "ur2_loc" };
+static const char * const rtd1319d_i2c0_groups[] = { "gpio_12", "gpio_13" };
+static const char * const rtd1319d_i2c1_groups[] = { "gpio_16", "gpio_17" };
+static const char * const rtd1319d_i2c3_groups[] = { "gpio_26", "gpio_27" };
+static const char * const rtd1319d_i2c4_groups[] = { "gpio_34", "gpio_35" };
+static const char * const rtd1319d_i2c5_groups[] = { "gpio_29", "gpio_46" };
+static const char * const rtd1319d_pcie1_groups[] = { "gpio_22" };
+static const char * const rtd1319d_sdio_groups[] = {
+ "gpio_40", "gpio_41", "gpio_42", "gpio_43", "gpio_44",
+ "gpio_45" };
+static const char * const rtd1319d_etn_led_groups[] = { "gpio_14", "gpio_15" };
+static const char * const rtd1319d_etn_phy_groups[] = { "gpio_14", "gpio_15" };
+static const char * const rtd1319d_spi_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31" };
+static const char * const rtd1319d_pwm0_loc0_groups[] = { "gpio_26" };
+static const char * const rtd1319d_pwm0_loc1_groups[] = { "gpio_20" };
+static const char * const rtd1319d_pwm1_loc0_groups[] = { "gpio_27" };
+static const char * const rtd1319d_pwm1_loc1_groups[] = { "gpio_21" };
+
+static const char * const rtd1319d_pwm2_loc0_groups[] = { "gpio_28" };
+static const char * const rtd1319d_pwm2_loc1_groups[] = { "gpio_22" };
+static const char * const rtd1319d_pwm3_loc0_groups[] = { "gpio_47" };
+static const char * const rtd1319d_pwm3_loc1_groups[] = { "gpio_23" };
+static const char * const rtd1319d_qam_agc_if0_groups[] = { "gpio_21" };
+static const char * const rtd1319d_qam_agc_if1_groups[] = { "gpio_23" };
+static const char * const rtd1319d_spdif_optical_loc0_groups[] = { "gpio_21", "spdif_loc" };
+static const char * const rtd1319d_spdif_optical_loc1_groups[] = { "gpio_6", "spdif_loc" };
+static const char * const rtd1319d_usb_cc1_groups[] = { "usb_cc1" };
+static const char * const rtd1319d_usb_cc2_groups[] = { "usb_cc2" };
+static const char * const rtd1319d_vfd_groups[] = {
+ "gpio_26", "gpio_27", "gpio_28" };
+static const char * const rtd1319d_sd_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35",
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1319d_dmic_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_63", "gpio_64", "dmic_loc" };
+static const char * const rtd1319d_dmic_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35",
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "dmic_loc" };
+static const char * const rtd1319d_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_63", "ai_loc" };
+static const char * const rtd1319d_ai_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk", "ai_loc" };
+static const char * const rtd1319d_tdm_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59",
+ "gpio_60", "tdm_ai_loc" };
+static const char * const rtd1319d_tdm_ai_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk", "tdm_ai_loc" };
+static const char * const rtd1319d_hi_loc0_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1319d_hi_m_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1319d_vtc_i2so_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_64"};
+static const char * const rtd1319d_vtc_i2si_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "vtc_i2si_loc" };
+static const char * const rtd1319d_vtc_i2si_loc1_groups[] = {
+ "gpio_32", "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_i2si_loc" };
+static const char * const rtd1319d_vtc_dmic_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60",
+ "vtc_dmic_loc" };
+static const char * const rtd1319d_vtc_dmic_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_dmic_loc" };
+static const char * const rtd1319d_vtc_tdm_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60",
+ "vtc_tdm_loc" };
+static const char * const rtd1319d_vtc_tdm_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "vtc_tdm_loc" };
+static const char * const rtd1319d_dc_fan_groups[] = { "gpio_47" };
+static const char * const rtd1319d_pll_test_loc0_groups[] = { "gpio_52", "gpio_53" };
+static const char * const rtd1319d_pll_test_loc1_groups[] = { "gpio_48", "gpio_49" };
+static const char * const rtd1319d_spdif_groups[] = { "gpio_50" };
+static const char * const rtd1319d_ir_rx_groups[] = { "ir_rx" };
+static const char * const rtd1319d_uart2_disable_groups[] = { "ur2_loc" };
+static const char * const rtd1319d_gspi_disable_groups[] = { "gspi_loc" };
+static const char * const rtd1319d_hi_width_disable_groups[] = { "hi_width" };
+static const char * const rtd1319d_hi_width_1bit_groups[] = { "hi_width" };
+static const char * const rtd1319d_sf_disable_groups[] = { "sf_en" };
+static const char * const rtd1319d_sf_enable_groups[] = { "sf_en" };
+static const char * const rtd1319d_scpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "ejtag_scpu_loc" };
+static const char * const rtd1319d_scpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_scpu_loc" };
+static const char * const rtd1319d_scpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_scpu_loc" };
+static const char * const rtd1319d_acpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "ejtag_acpu_loc" };
+static const char * const rtd1319d_acpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_acpu_loc" };
+static const char * const rtd1319d_acpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_acpu_loc" };
+static const char * const rtd1319d_vcpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1319d_vcpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1319d_vcpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_vcpu_loc" };
+static const char * const rtd1319d_secpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "ejtag_secpu_loc" };
+static const char * const rtd1319d_secpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_secpu_loc" };
+static const char * const rtd1319d_secpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_secpu_loc" };
+static const char * const rtd1319d_aucpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1319d_aucpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en", "hif_clk",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1319d_aucpu_ejtag_loc2_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "ejtag_aucpu_loc" };
+static const char * const rtd1319d_iso_tristate_groups[] = {
+ "emmc_rst_n", "emmc_dd_sb", "emmc_clk", "emmc_cmd",
+ "emmc_data_0", "emmc_data_1", "emmc_data_2", "emmc_data_3",
+ "emmc_data_4", "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "gpio_78", "gpio_79", "gpio_80", "gpio_81", "gpio_1",
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11", "gpio_22",
+ "gpio_23", "usb_cc2", "gpio_25", "gpio_28", "gpio_29",
+ "gpio_30", "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_rdy", "hif_clk", "gpio_40", "gpio_41", "gpio_42",
+ "gpio_43", "gpio_44", "gpio_45", "gpio_46", "usb_cc1",
+ "ir_rx", "ur0_rx", "ur0_tx", "gpio_62", "gpio_63", "gpio_64" };
+static const char * const rtd1319d_dbg_out0_groups[] = {
+ "gpio_12", "gpio_13", "gpio_16", "gpio_17", "gpio_26", "gpio_27",
+ "gpio_34", "gpio_35", "gpio_48", "gpio_49", "gpio_57", "gpio_58",
+ "gpio_59", "gpio_60", "gpio_61" };
+static const char * const rtd1319d_dbg_out1_groups[] = {
+ "gpio_0", "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6",
+ "gpio_7", "gpio_14", "gpio_15", "gpio_18", "gpio_19", "gpio_20",
+ "gpio_21", "gpio_31", "gpio_47", "gpio_50", "gpio_52", "gpio_53" };
+static const char * const rtd1319d_standby_dbg_groups[] = {
+ "gpio_2", "gpio_3", "ir_rx" };
+static const char * const rtd1319d_arm_trace_debug_disable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1319d_arm_trace_debug_enable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1319d_aucpu_ejtag_disable_groups[] = { "ejtag_aucpu_loc" };
+static const char * const rtd1319d_acpu_ejtag_disable_groups[] = { "ejtag_acpu_loc" };
+static const char * const rtd1319d_vcpu_ejtag_disable_groups[] = { "ejtag_vcpu_loc" };
+static const char * const rtd1319d_scpu_ejtag_disable_groups[] = { "ejtag_scpu_loc" };
+static const char * const rtd1319d_secpu_ejtag_disable_groups[] = { "ejtag_secpu_loc" };
+static const char * const rtd1319d_vtc_dmic_loc_disable_groups[] = { "vtc_dmic_loc" };
+static const char * const rtd1319d_vtc_tdm_disable_groups[] = { "vtc_tdm_loc" };
+static const char * const rtd1319d_vtc_i2si_disable_groups[] = { "vtc_i2si_loc" };
+static const char * const rtd1319d_tdm_ai_disable_groups[] = { "tdm_ai_loc" };
+static const char * const rtd1319d_ai_disable_groups[] = { "ai_loc" };
+static const char * const rtd1319d_spdif_disable_groups[] = { "spdif_loc" };
+static const char * const rtd1319d_hif_disable_groups[] = { "hif_en_loc" };
+static const char * const rtd1319d_hif_enable_groups[] = { "hif_en_loc" };
+static const char * const rtd1319d_test_loop_groups[] = { "gpio_27" };
+static const char * const rtd1319d_pmic_pwrup_groups[] = { "gpio_78" };
+
+#define RTD1319D_FUNC(_name) \
+ { \
+ .name = # _name, \
+ .groups = rtd1319d_ ## _name ## _groups, \
+ .num_groups = ARRAY_SIZE(rtd1319d_ ## _name ## _groups), \
+ }
+
+static const struct rtd_pin_func_desc rtd1319d_pin_functions[] = {
+ RTD1319D_FUNC(gpio),
+ RTD1319D_FUNC(nf),
+ RTD1319D_FUNC(emmc),
+ RTD1319D_FUNC(tp0),
+ RTD1319D_FUNC(tp1),
+ RTD1319D_FUNC(sc0),
+ RTD1319D_FUNC(sc0_data0),
+ RTD1319D_FUNC(sc0_data1),
+ RTD1319D_FUNC(sc0_data2),
+ RTD1319D_FUNC(sc1),
+ RTD1319D_FUNC(sc1_data0),
+ RTD1319D_FUNC(sc1_data1),
+ RTD1319D_FUNC(sc1_data2),
+ RTD1319D_FUNC(ao),
+ RTD1319D_FUNC(gspi_loc0),
+ RTD1319D_FUNC(gspi_loc1),
+ RTD1319D_FUNC(uart0),
+ RTD1319D_FUNC(uart1),
+ RTD1319D_FUNC(uart2_loc0),
+ RTD1319D_FUNC(uart2_loc1),
+ RTD1319D_FUNC(i2c0),
+ RTD1319D_FUNC(i2c1),
+ RTD1319D_FUNC(i2c3),
+ RTD1319D_FUNC(i2c4),
+ RTD1319D_FUNC(i2c5),
+ RTD1319D_FUNC(pcie1),
+ RTD1319D_FUNC(sdio),
+ RTD1319D_FUNC(etn_led),
+ RTD1319D_FUNC(etn_phy),
+ RTD1319D_FUNC(spi),
+ RTD1319D_FUNC(pwm0_loc0),
+ RTD1319D_FUNC(pwm0_loc1),
+ RTD1319D_FUNC(pwm1_loc0),
+ RTD1319D_FUNC(pwm1_loc1),
+ RTD1319D_FUNC(pwm2_loc0),
+ RTD1319D_FUNC(pwm2_loc1),
+ RTD1319D_FUNC(pwm3_loc0),
+ RTD1319D_FUNC(pwm3_loc1),
+ RTD1319D_FUNC(qam_agc_if0),
+ RTD1319D_FUNC(qam_agc_if1),
+ RTD1319D_FUNC(spdif_optical_loc0),
+ RTD1319D_FUNC(spdif_optical_loc1),
+ RTD1319D_FUNC(usb_cc1),
+ RTD1319D_FUNC(usb_cc2),
+ RTD1319D_FUNC(vfd),
+ RTD1319D_FUNC(sd),
+ RTD1319D_FUNC(dmic_loc0),
+ RTD1319D_FUNC(dmic_loc1),
+ RTD1319D_FUNC(ai_loc0),
+ RTD1319D_FUNC(ai_loc1),
+ RTD1319D_FUNC(tdm_ai_loc0),
+ RTD1319D_FUNC(tdm_ai_loc1),
+ RTD1319D_FUNC(hi_loc0),
+ RTD1319D_FUNC(hi_m),
+ RTD1319D_FUNC(vtc_i2so),
+ RTD1319D_FUNC(vtc_i2si_loc0),
+ RTD1319D_FUNC(vtc_i2si_loc1),
+ RTD1319D_FUNC(vtc_dmic_loc0),
+ RTD1319D_FUNC(vtc_dmic_loc1),
+ RTD1319D_FUNC(vtc_tdm_loc0),
+ RTD1319D_FUNC(vtc_tdm_loc1),
+ RTD1319D_FUNC(dc_fan),
+ RTD1319D_FUNC(pll_test_loc0),
+ RTD1319D_FUNC(pll_test_loc1),
+ RTD1319D_FUNC(ir_rx),
+ RTD1319D_FUNC(uart2_disable),
+ RTD1319D_FUNC(gspi_disable),
+ RTD1319D_FUNC(hi_width_disable),
+ RTD1319D_FUNC(hi_width_1bit),
+ RTD1319D_FUNC(sf_disable),
+ RTD1319D_FUNC(sf_enable),
+ RTD1319D_FUNC(scpu_ejtag_loc0),
+ RTD1319D_FUNC(scpu_ejtag_loc1),
+ RTD1319D_FUNC(scpu_ejtag_loc2),
+ RTD1319D_FUNC(acpu_ejtag_loc0),
+ RTD1319D_FUNC(acpu_ejtag_loc1),
+ RTD1319D_FUNC(acpu_ejtag_loc2),
+ RTD1319D_FUNC(vcpu_ejtag_loc0),
+ RTD1319D_FUNC(vcpu_ejtag_loc1),
+ RTD1319D_FUNC(vcpu_ejtag_loc2),
+ RTD1319D_FUNC(secpu_ejtag_loc0),
+ RTD1319D_FUNC(secpu_ejtag_loc1),
+ RTD1319D_FUNC(secpu_ejtag_loc2),
+ RTD1319D_FUNC(aucpu_ejtag_loc0),
+ RTD1319D_FUNC(aucpu_ejtag_loc1),
+ RTD1319D_FUNC(aucpu_ejtag_loc2),
+ RTD1319D_FUNC(iso_tristate),
+ RTD1319D_FUNC(dbg_out0),
+ RTD1319D_FUNC(dbg_out1),
+ RTD1319D_FUNC(standby_dbg),
+ RTD1319D_FUNC(spdif),
+ RTD1319D_FUNC(arm_trace_debug_disable),
+ RTD1319D_FUNC(arm_trace_debug_enable),
+ RTD1319D_FUNC(aucpu_ejtag_disable),
+ RTD1319D_FUNC(acpu_ejtag_disable),
+ RTD1319D_FUNC(vcpu_ejtag_disable),
+ RTD1319D_FUNC(scpu_ejtag_disable),
+ RTD1319D_FUNC(secpu_ejtag_disable),
+ RTD1319D_FUNC(vtc_dmic_loc_disable),
+ RTD1319D_FUNC(vtc_tdm_disable),
+ RTD1319D_FUNC(vtc_i2si_disable),
+ RTD1319D_FUNC(tdm_ai_disable),
+ RTD1319D_FUNC(ai_disable),
+ RTD1319D_FUNC(spdif_disable),
+ RTD1319D_FUNC(hif_disable),
+ RTD1319D_FUNC(hif_enable),
+ RTD1319D_FUNC(test_loop),
+ RTD1319D_FUNC(pmic_pwrup),
+};
+
+#undef RTD1319D_FUNC
+
+static const struct rtd_pin_desc rtd1319d_iso_muxes[] = {
+ [RTD1319D_ISO_EMMC_RST_N] = RTK_PIN_MUX(emmc_rst_n, 0x0, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DD_SB] = RTK_PIN_MUX(emmc_dd_sb, 0x0, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_CLK] = RTK_PIN_MUX(emmc_clk, 0x0, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_CMD] = RTK_PIN_MUX(emmc_cmd, 0x0, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_0] = RTK_PIN_MUX(emmc_data_0, 0x0, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_1] = RTK_PIN_MUX(emmc_data_1, 0x0, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_2] = RTK_PIN_MUX(emmc_data_2, 0x0, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_3] = RTK_PIN_MUX(emmc_data_3, 0x0, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_EMMC_DATA_4] = RTK_PIN_MUX(emmc_data_4, 0x4, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_5] = RTK_PIN_MUX(emmc_data_5, 0x4, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_6] = RTK_PIN_MUX(emmc_data_6, 0x4, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1319D_ISO_EMMC_DATA_7] = RTK_PIN_MUX(emmc_data_7, 0x4, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_78] = RTK_PIN_MUX(gpio_78, 0x4, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "pmic_pwrup"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_79] = RTK_PIN_MUX(gpio_79, 0x4, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_80] = RTK_PIN_MUX(gpio_80, 0x4, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_81] = RTK_PIN_MUX(gpio_81, 0x4, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_GPIO_0] = RTK_PIN_MUX(gpio_0, 0x8, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_1] = RTK_PIN_MUX(gpio_1, 0x8, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_2] = RTK_PIN_MUX(gpio_2, 0x8, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 8), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 8), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 8), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_3] = RTK_PIN_MUX(gpio_3, 0x8, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 12), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 12), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 12), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 12), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 12), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 12), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_4] = RTK_PIN_MUX(gpio_4, 0x8, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 16), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 16), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 16), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 16), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_5] = RTK_PIN_MUX(gpio_5, 0x8, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 20), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 20), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_6] = RTK_PIN_MUX(gpio_6, 0x8, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "spdif_optical_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 24), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 24), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 24), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_7] = RTK_PIN_MUX(gpio_7, 0x8, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out1")),
+
+ [RTD1319D_ISO_GPIO_8] = RTK_PIN_MUX(gpio_8, 0xc, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_9] = RTK_PIN_MUX(gpio_9, 0xc, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_10] = RTK_PIN_MUX(gpio_10, 0xc, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_11] = RTK_PIN_MUX(gpio_11, 0xc, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_12] = RTK_PIN_MUX(gpio_12, 0xc, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "i2c0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_13] = RTK_PIN_MUX(gpio_13, 0xc, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "i2c0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_14] = RTK_PIN_MUX(gpio_14, 0xc, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "etn_phy"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_15] = RTK_PIN_MUX(gpio_15, 0xc, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "etn_phy"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out1")),
+
+ [RTD1319D_ISO_GPIO_16] = RTK_PIN_MUX(gpio_16, 0x10, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "i2c1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_17] = RTK_PIN_MUX(gpio_17, 0x10, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "i2c1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_18] = RTK_PIN_MUX(gpio_18, 0x10, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_19] = RTK_PIN_MUX(gpio_19, 0x10, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_20] = RTK_PIN_MUX(gpio_20, 0x10, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "pwm0_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "sc0_data0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_21] = RTK_PIN_MUX(gpio_21, 0x10, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "pwm1_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "qam_agc_if0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "spdif_optical_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_22] = RTK_PIN_MUX(gpio_22, 0x10, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "pwm2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "pcie1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_23] = RTK_PIN_MUX(gpio_23, 0x10, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "pwm3_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "qam_agc_if1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_USB_CC2] = RTK_PIN_MUX(usb_cc2, 0x14, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "usb_cc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_25] = RTK_PIN_MUX(gpio_25, 0x14, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_26] = RTK_PIN_MUX(gpio_26, 0x14, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "pwm0_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "i2c3"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_27] = RTK_PIN_MUX(gpio_27, 0x14, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "pwm1_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 12), "i2c3"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "test_loop"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_28] = RTK_PIN_MUX(gpio_28, 0x14, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "pwm2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_29] = RTK_PIN_MUX(gpio_29, 0x14, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "i2c5"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_30] = RTK_PIN_MUX(gpio_30, 0x14, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "sc0_data1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_31] = RTK_PIN_MUX(gpio_31, 0x14, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out1")),
+
+ [RTD1319D_ISO_GPIO_32] = RTK_PIN_MUX(gpio_32, 0x18, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 0), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 0), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 0), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 0), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 0), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_33] = RTK_PIN_MUX(gpio_33, 0x18, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 4), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 4), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 4), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_34] = RTK_PIN_MUX(gpio_34, 0x18, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "i2c4"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "sc1_data1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_35] = RTK_PIN_MUX(gpio_35, 0x18, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "i2c4"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 12), "sc1_data2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out0")),
+ [RTD1319D_ISO_HIF_DATA] = RTK_PIN_MUX(hif_data, 0x18, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 16), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 16), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 16), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 16), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 16), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 16), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 16), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 16), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1319D_ISO_HIF_EN] = RTK_PIN_MUX(hif_en, 0x18, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 20), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 20), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 20), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 20), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 20), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 20), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 20), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 20), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1319D_ISO_HIF_RDY] = RTK_PIN_MUX(hif_rdy, 0x18, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 24), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 24), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 24), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 24), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 24), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 24), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_HIF_CLK] = RTK_PIN_MUX(hif_clk, 0x18, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 28), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 28), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 28), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 28), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 28), "hi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xa, 28), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 28), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 28), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 28), "vtc_dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 28), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_GPIO_40] = RTK_PIN_MUX(gpio_40, 0x1c, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_41] = RTK_PIN_MUX(gpio_41, 0x1c, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_42] = RTK_PIN_MUX(gpio_42, 0x1c, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_43] = RTK_PIN_MUX(gpio_43, 0x1c, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_44] = RTK_PIN_MUX(gpio_44, 0x1c, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_45] = RTK_PIN_MUX(gpio_45, 0x1c, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "sdio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_46] = RTK_PIN_MUX(gpio_46, 0x1c, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "i2c5"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_47] = RTK_PIN_MUX(gpio_47, 0x1c, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "dc_fan"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "pwm3_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 28), "sc0_data2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "dbg_out1")),
+
+ [RTD1319D_ISO_GPIO_48] = RTK_PIN_MUX(gpio_48, 0x20, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "pll_test_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_49] = RTK_PIN_MUX(gpio_49, 0x20, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "pll_test_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_50] = RTK_PIN_MUX(gpio_50, 0x20, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "spdif"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out1")),
+ [RTD1319D_ISO_USB_CC1] = RTK_PIN_MUX(usb_cc1, 0x20, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "usb_cc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_52] = RTK_PIN_MUX(gpio_52, 0x20, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "pll_test_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "sc1_data0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out1")),
+ [RTD1319D_ISO_GPIO_53] = RTK_PIN_MUX(gpio_53, 0x20, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "pll_test_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out1")),
+ [RTD1319D_ISO_IR_RX] = RTK_PIN_MUX(ir_rx, 0x20, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "ir_rx"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_UR0_RX] = RTK_PIN_MUX(ur0_rx, 0x20, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "uart0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_UR0_TX] = RTK_PIN_MUX(ur0_tx, 0x24, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_57] = RTK_PIN_MUX(gpio_57, 0x24, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 4), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 4), "secpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 4), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 4), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 4), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 4), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 4), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 4), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_58] = RTK_PIN_MUX(gpio_58, 0x24, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 8), "secpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 8), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 8), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 8), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 8), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 8), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 8), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_59] = RTK_PIN_MUX(gpio_59, 0x24, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 12), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 12), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 12), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 12), "secpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 12), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 12), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 12), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 12), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 12), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 12), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_60] = RTK_PIN_MUX(gpio_60, 0x24, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 16), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 16), "secpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 16), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 16), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xc, 16), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xd, 16), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 16), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 16), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_61] = RTK_PIN_MUX(gpio_61, 0x24, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 20), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 20), "secpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 20), "aucpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 20), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xe, 20), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 20), "dbg_out0")),
+ [RTD1319D_ISO_GPIO_62] = RTK_PIN_MUX(gpio_62, 0x24, GENMASK(27, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 24), "iso_tristate")),
+ [RTD1319D_ISO_GPIO_63] = RTK_PIN_MUX(gpio_63, 0x24, GENMASK(31, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 28), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 28), "iso_tristate")),
+
+ [RTD1319D_ISO_GPIO_64] = RTK_PIN_MUX(gpio_64, 0x28, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 0), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xb, 0), "vtc_i2so"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0xf, 0), "iso_tristate")),
+
+ [RTD1319D_ISO_UR2_LOC] = RTK_PIN_MUX(ur2_loc, 0x120, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "uart2_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "uart2_loc1")),
+ [RTD1319D_ISO_GSPI_LOC] = RTK_PIN_MUX(gspi_loc, 0x120, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "gspi_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "gspi_loc1")),
+ [RTD1319D_ISO_HI_WIDTH] = RTK_PIN_MUX(hi_width, 0x120, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "hi_width_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "hi_width_1bit")),
+ [RTD1319D_ISO_SF_EN] = RTK_PIN_MUX(sf_en, 0x120, GENMASK(11, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "sf_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 11), "sf_enable")),
+ [RTD1319D_ISO_ARM_TRACE_DBG_EN] = RTK_PIN_MUX(arm_trace_dbg_en, 0x120, GENMASK(12, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "arm_trace_debug_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "arm_trace_debug_enable")),
+ [RTD1319D_ISO_EJTAG_AUCPU_LOC] = RTK_PIN_MUX(ejtag_aucpu_loc, 0x120, GENMASK(16, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "aucpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "aucpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 14), "aucpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 14), "aucpu_ejtag_loc2")),
+ [RTD1319D_ISO_EJTAG_ACPU_LOC] = RTK_PIN_MUX(ejtag_acpu_loc, 0x120, GENMASK(19, 17),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 17), "acpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 17), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 17), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 17), "acpu_ejtag_loc2")),
+ [RTD1319D_ISO_EJTAG_VCPU_LOC] = RTK_PIN_MUX(ejtag_vcpu_loc, 0x120, GENMASK(22, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "vcpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "vcpu_ejtag_loc2")),
+ [RTD1319D_ISO_EJTAG_SCPU_LOC] = RTK_PIN_MUX(ejtag_scpu_loc, 0x120, GENMASK(25, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "scpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 23), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 23), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 23), "scpu_ejtag_loc2")),
+ [RTD1319D_ISO_DMIC_LOC] = RTK_PIN_MUX(dmic_loc, 0x120, GENMASK(27, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 26), "dmic_loc1")),
+
+ [RTD1319D_ISO_EJTAG_SECPU_LOC] = RTK_PIN_MUX(ejtag_secpu_loc, 0x124, GENMASK(20, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 18), "secpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "secpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "secpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 18), "secpu_ejtag_loc2")),
+
+ [RTD1319D_ISO_VTC_DMIC_LOC] = RTK_PIN_MUX(vtc_dmic_loc, 0x128, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "vtc_dmic_loc_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "vtc_dmic_loc1")),
+ [RTD1319D_ISO_VTC_TDM_LOC] = RTK_PIN_MUX(vtc_tdm_loc, 0x128, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "vtc_tdm_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "vtc_tdm_loc1")),
+ [RTD1319D_ISO_VTC_I2SI_LOC] = RTK_PIN_MUX(vtc_i2si_loc, 0x128, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "vtc_i2si_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "vtc_i2si_loc1")),
+ [RTD1319D_ISO_TDM_AI_LOC] = RTK_PIN_MUX(tdm_ai_loc, 0x128, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "tdm_ai_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 6), "tdm_ai_loc1")),
+ [RTD1319D_ISO_AI_LOC] = RTK_PIN_MUX(ai_loc, 0x128, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "ai_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "ai_loc1")),
+ [RTD1319D_ISO_SPDIF_LOC] = RTK_PIN_MUX(spdif_loc, 0x128, GENMASK(11, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "spdif_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "spdif_optical_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "spdif_optical_loc1")),
+
+ [RTD1319D_ISO_HIF_EN_LOC] = RTK_PIN_MUX(hif_en_loc, 0x12c, GENMASK(2, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "hif_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "hif_enable")),
+ [RTD1319D_ISO_SC0_LOC] = RTK_PIN_MUX(sc0_loc, 0x188, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "sc0_data0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "sc0_data1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "sc0_data2")),
+ [RTD1319D_ISO_SC1_LOC] = RTK_PIN_MUX(sc1_loc, 0x188, GENMASK(11, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "sc1_data0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "sc1_data1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "sc1_data2")),
+
+ [RTD1319D_ISO_TESTMODE] = {0},
+};
+
+static const struct rtd_pin_config_desc rtd1319d_iso_configs[] = {
+ [RTD1319D_ISO_SCAN_SWITCH] = RTK_PIN_CONFIG(scan_switch, 0x2c, 0, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_18] = RTK_PIN_CONFIG(gpio_18, 0x2c, 3, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_19] = RTK_PIN_CONFIG(gpio_19, 0x2c, 8, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_20] = RTK_PIN_CONFIG(gpio_20, 0x2c, 13, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_31] = RTK_PIN_CONFIG(gpio_31, 0x2c, 18, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_8] = RTK_PIN_CONFIG(gpio_8, 0x2c, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_9] = RTK_PIN_CONFIG(gpio_9, 0x30, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_10] = RTK_PIN_CONFIG(gpio_10, 0x30, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_11] = RTK_PIN_CONFIG(gpio_11, 0x30, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_0] = RTK_PIN_CONFIG(gpio_0, 0x30, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_1] = RTK_PIN_CONFIG(gpio_1, 0x30, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_5] = RTK_PIN_CONFIG(gpio_5, 0x30, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_6] = RTK_PIN_CONFIG(gpio_6, 0x34, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_12] = RTK_PIN_CONFIG(gpio_12, 0x34, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_13] = RTK_PIN_CONFIG(gpio_13, 0x34, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_22] = RTK_PIN_CONFIG(gpio_22, 0x34, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_USB_CC2] = RTK_PIN_CONFIG(usb_cc2, 0x34, 20, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_29] = RTK_PIN_CONFIG(gpio_29, 0x34, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_46] = RTK_PIN_CONFIG(gpio_46, 0x38, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_47] = RTK_PIN_CONFIG(gpio_47, 0x38, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_USB_CC1] = RTK_PIN_CONFIG(usb_cc1, 0x38, 10, NA, NA, 0, 1, 2, PADDRI_4_8),
+ [RTD1319D_ISO_WD_RSET] = RTK_PIN_CONFIG(wd_rset, 0x38, 13, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_IR_RX] = RTK_PIN_CONFIG(ir_rx, 0x38, 18, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_BOOT_SEL] = RTK_PIN_CONFIG(boot_sel, 0x38, 23, 0, 1, NA, 2, 3, PADDRI_4_8),
+ [RTD1319D_ISO_RESET_N] = RTK_PIN_CONFIG(reset_n, 0x38, 27, 0, 1, NA, 2, 3, PADDRI_4_8),
+ [RTD1319D_ISO_TESTMODE] = RTK_PIN_CONFIG(testmode, 0x3c, 0, 0, 1, NA, 2, 3, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_40] = RTK_PIN_CONFIG(gpio_40, 0x3c, 4, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_41] = RTK_PIN_CONFIG(gpio_41, 0x3c, 17, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_42] = RTK_PIN_CONFIG(gpio_42, 0x40, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_43] = RTK_PIN_CONFIG(gpio_43, 0x40, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_44] = RTK_PIN_CONFIG(gpio_44, 0x44, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_45] = RTK_PIN_CONFIG(gpio_45, 0x44, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_0] = RTK_PIN_CONFIG(emmc_data_0, 0x48, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_1] = RTK_PIN_CONFIG(emmc_data_1, 0x48, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_2] = RTK_PIN_CONFIG(emmc_data_2, 0x4c, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_3] = RTK_PIN_CONFIG(emmc_data_3, 0x4c, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_4] = RTK_PIN_CONFIG(emmc_data_4, 0x50, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_5] = RTK_PIN_CONFIG(emmc_data_5, 0x50, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_6] = RTK_PIN_CONFIG(emmc_data_6, 0x54, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DATA_7] = RTK_PIN_CONFIG(emmc_data_7, 0x54, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_DD_SB] = RTK_PIN_CONFIG(emmc_dd_sb, 0x58, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_RST_N] = RTK_PIN_CONFIG(emmc_rst_n, 0x58, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_EMMC_CMD] = RTK_PIN_CONFIG(emmc_cmd, 0x5c, 0, 0, 1, NA, 2, 13, NA),
+ [RTD1319D_ISO_EMMC_CLK] = RTK_PIN_CONFIG(emmc_clk, 0x5c, 14, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_80] = RTK_PIN_CONFIG(gpio_80, 0x60, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_78] = RTK_PIN_CONFIG(gpio_78, 0x60, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_79] = RTK_PIN_CONFIG(gpio_79, 0x64, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_81] = RTK_PIN_CONFIG(gpio_81, 0x64, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_2] = RTK_PIN_CONFIG(gpio_2, 0x64, 26, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_3] = RTK_PIN_CONFIG(gpio_3, 0x68, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_4] = RTK_PIN_CONFIG(gpio_4, 0x68, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_57] = RTK_PIN_CONFIG(gpio_57, 0x68, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_58] = RTK_PIN_CONFIG(gpio_58, 0x68, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_59] = RTK_PIN_CONFIG(gpio_59, 0x68, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_60] = RTK_PIN_CONFIG(gpio_60, 0x68, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_61] = RTK_PIN_CONFIG(gpio_61, 0x6c, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_62] = RTK_PIN_CONFIG(gpio_62, 0x6c, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_63] = RTK_PIN_CONFIG(gpio_63, 0x6c, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_64] = RTK_PIN_CONFIG(gpio_64, 0x6c, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_7] = RTK_PIN_CONFIG(gpio_7, 0x6c, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_16] = RTK_PIN_CONFIG(gpio_16, 0x6c, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_17] = RTK_PIN_CONFIG(gpio_17, 0x70, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_21] = RTK_PIN_CONFIG(gpio_21, 0x70, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_23] = RTK_PIN_CONFIG(gpio_23, 0x70, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_50] = RTK_PIN_CONFIG(gpio_50, 0x70, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_HIF_EN] = RTK_PIN_CONFIG(hif_en, 0x74, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_HIF_DATA] = RTK_PIN_CONFIG(hif_data, 0x74, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_33] = RTK_PIN_CONFIG(gpio_33, 0x78, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_32] = RTK_PIN_CONFIG(gpio_32, 0x78, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_HIF_CLK] = RTK_PIN_CONFIG(hif_clk, 0x7c, 0, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_HIF_RDY] = RTK_PIN_CONFIG(hif_rdy, 0x7c, 13, 0, 1, NA, 2, 12, NA),
+ [RTD1319D_ISO_GPIO_14] = RTK_PIN_CONFIG(gpio_14, 0x7c, 26, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_15] = RTK_PIN_CONFIG(gpio_15, 0x80, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_25] = RTK_PIN_CONFIG(gpio_25, 0x80, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_26] = RTK_PIN_CONFIG(gpio_26, 0x80, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_27] = RTK_PIN_CONFIG(gpio_27, 0x80, 16, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_28] = RTK_PIN_CONFIG(gpio_28, 0x80, 22, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_30] = RTK_PIN_CONFIG(gpio_30, 0x84, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_34] = RTK_PIN_CONFIG(gpio_34, 0x84, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_35] = RTK_PIN_CONFIG(gpio_35, 0x84, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_UR0_TX] = RTK_PIN_CONFIG(ur0_tx, 0x84, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_UR0_RX] = RTK_PIN_CONFIG(ur0_rx, 0x84, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_48] = RTK_PIN_CONFIG(gpio_48, 0x84, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_49] = RTK_PIN_CONFIG(gpio_49, 0x88, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_52] = RTK_PIN_CONFIG(gpio_52, 0x88, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1319D_ISO_GPIO_53] = RTK_PIN_CONFIG(gpio_53, 0x88, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+};
+
+static const struct rtd_pin_sconfig_desc rtd1319d_iso_sconfigs[] = {
+ RTK_PIN_SCONFIG(gpio_40, 0x3c, 7, 3, 10, 3, 13, 3),
+ RTK_PIN_SCONFIG(gpio_41, 0x3c, 20, 3, 23, 3, 26, 3),
+ RTK_PIN_SCONFIG(gpio_42, 0x40, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_43, 0x40, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_44, 0x44, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_45, 0x44, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_0, 0x48, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_1, 0x48, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_2, 0x4c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_3, 0x4c, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_4, 0x50, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_5, 0x50, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_6, 0x54, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_7, 0x54, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_dd_sb, 0x58, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_rst_n, 0x58, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_cmd, 0x5c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_clk, 0x5c, 17, 3, 20, 3, 23, 3),
+ RTK_PIN_SCONFIG(gpio_80, 0x60, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_78, 0x60, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_79, 0x64, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_81, 0x64, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(hif_en, 0x74, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_data, 0x74, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_33, 0x78, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_32, 0x78, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(hif_clk, 0x7c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_rdy, 0x7c, 16, 3, 19, 3, 22, 3),
+};
+
+static const struct rtd_pinctrl_desc rtd1319d_iso_pinctrl_desc = {
+ .pins = rtd1319d_iso_pins,
+ .num_pins = ARRAY_SIZE(rtd1319d_iso_pins),
+ .groups = rtd1319d_pin_groups,
+ .num_groups = ARRAY_SIZE(rtd1319d_pin_groups),
+ .functions = rtd1319d_pin_functions,
+ .num_functions = ARRAY_SIZE(rtd1319d_pin_functions),
+ .muxes = rtd1319d_iso_muxes,
+ .num_muxes = ARRAY_SIZE(rtd1319d_iso_muxes),
+ .configs = rtd1319d_iso_configs,
+ .num_configs = ARRAY_SIZE(rtd1319d_iso_configs),
+ .sconfigs = rtd1319d_iso_sconfigs,
+ .num_sconfigs = ARRAY_SIZE(rtd1319d_iso_sconfigs),
+};
+
+static int rtd1319d_pinctrl_probe(struct platform_device *pdev)
+{
+ return rtd_pinctrl_probe(pdev, &rtd1319d_iso_pinctrl_desc);
+}
+
+static const struct of_device_id rtd1319d_pinctrl_of_match[] = {
+ { .compatible = "realtek,rtd1319d-pinctrl", },
+ {},
+};
+
+static struct platform_driver rtd1319d_pinctrl_driver = {
+ .driver = {
+ .name = "rtd1319d-pinctrl",
+ .of_match_table = rtd1319d_pinctrl_of_match,
+ },
+ .probe = rtd1319d_pinctrl_probe,
+};
+
+static int __init rtd1319d_pinctrl_init(void)
+{
+ return platform_driver_register(&rtd1319d_pinctrl_driver);
+}
+arch_initcall(rtd1319d_pinctrl_init);
+
+static void __exit rtd1319d_pinctrl_exit(void)
+{
+ platform_driver_unregister(&rtd1319d_pinctrl_driver);
+}
+module_exit(rtd1319d_pinctrl_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Realtek Semiconductor Corporation");
+MODULE_DESCRIPTION("Realtek DHC SoC RTD1319D pinctrl driver");
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd1619b.c b/drivers/pinctrl/realtek/pinctrl-rtd1619b.c
new file mode 100644
index 000000000000..b07e50d6356d
--- /dev/null
+++ b/drivers/pinctrl/realtek/pinctrl-rtd1619b.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek DHC 1619B pin controller driver
+ *
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-rtd.h"
+
+enum rtd16xxb_iso_pins {
+ RTD1619B_ISO_GPIO_0 = 0,
+ RTD1619B_ISO_GPIO_1,
+ RTD1619B_ISO_GPIO_2,
+ RTD1619B_ISO_GPIO_3,
+ RTD1619B_ISO_GPIO_4,
+ RTD1619B_ISO_GPIO_5,
+ RTD1619B_ISO_GPIO_6,
+ RTD1619B_ISO_GPIO_7,
+ RTD1619B_ISO_GPIO_8,
+ RTD1619B_ISO_GPIO_9,
+ RTD1619B_ISO_GPIO_10,
+ RTD1619B_ISO_GPIO_11,
+ RTD1619B_ISO_GPIO_12,
+ RTD1619B_ISO_GPIO_13,
+ RTD1619B_ISO_GPIO_14,
+ RTD1619B_ISO_GPIO_15,
+ RTD1619B_ISO_GPIO_16,
+ RTD1619B_ISO_GPIO_17,
+ RTD1619B_ISO_GPIO_18,
+ RTD1619B_ISO_GPIO_19,
+ RTD1619B_ISO_GPIO_20,
+ RTD1619B_ISO_GPIO_21,
+ RTD1619B_ISO_GPIO_22,
+ RTD1619B_ISO_GPIO_23,
+ RTD1619B_ISO_USB_CC2,
+ RTD1619B_ISO_GPIO_25,
+ RTD1619B_ISO_GPIO_26,
+ RTD1619B_ISO_GPIO_27,
+ RTD1619B_ISO_GPIO_28,
+ RTD1619B_ISO_GPIO_29,
+ RTD1619B_ISO_GPIO_30,
+ RTD1619B_ISO_GPIO_31,
+ RTD1619B_ISO_GPIO_32,
+ RTD1619B_ISO_GPIO_33,
+ RTD1619B_ISO_GPIO_34,
+ RTD1619B_ISO_GPIO_35,
+ RTD1619B_ISO_HIF_DATA,
+ RTD1619B_ISO_HIF_EN,
+ RTD1619B_ISO_HIF_RDY,
+ RTD1619B_ISO_HIF_CLK,
+ RTD1619B_ISO_GPIO_40,
+ RTD1619B_ISO_GPIO_41,
+ RTD1619B_ISO_GPIO_42,
+ RTD1619B_ISO_GPIO_43,
+ RTD1619B_ISO_GPIO_44,
+ RTD1619B_ISO_GPIO_45,
+ RTD1619B_ISO_GPIO_46,
+ RTD1619B_ISO_GPIO_47,
+ RTD1619B_ISO_GPIO_48,
+ RTD1619B_ISO_GPIO_49,
+ RTD1619B_ISO_GPIO_50,
+ RTD1619B_ISO_USB_CC1,
+ RTD1619B_ISO_GPIO_52,
+ RTD1619B_ISO_GPIO_53,
+ RTD1619B_ISO_IR_RX,
+ RTD1619B_ISO_UR0_RX,
+ RTD1619B_ISO_UR0_TX,
+ RTD1619B_ISO_GPIO_57,
+ RTD1619B_ISO_GPIO_58,
+ RTD1619B_ISO_GPIO_59,
+ RTD1619B_ISO_GPIO_60,
+ RTD1619B_ISO_GPIO_61,
+ RTD1619B_ISO_GPIO_62,
+ RTD1619B_ISO_GPIO_63,
+ RTD1619B_ISO_GPIO_64,
+ RTD1619B_ISO_GPIO_65,
+ RTD1619B_ISO_GPIO_66,
+ RTD1619B_ISO_GPIO_67,
+ RTD1619B_ISO_GPIO_68,
+ RTD1619B_ISO_GPIO_69,
+ RTD1619B_ISO_GPIO_70,
+ RTD1619B_ISO_GPIO_71,
+ RTD1619B_ISO_GPIO_72,
+ RTD1619B_ISO_GPIO_73,
+ RTD1619B_ISO_GPIO_74,
+ RTD1619B_ISO_GPIO_75,
+ RTD1619B_ISO_GPIO_76,
+ RTD1619B_ISO_EMMC_CMD,
+ RTD1619B_ISO_SPI_CE_N,
+ RTD1619B_ISO_SPI_SCK,
+ RTD1619B_ISO_SPI_SO,
+ RTD1619B_ISO_SPI_SI,
+ RTD1619B_ISO_EMMC_RST_N,
+ RTD1619B_ISO_EMMC_DD_SB,
+ RTD1619B_ISO_EMMC_CLK,
+ RTD1619B_ISO_EMMC_DATA_0,
+ RTD1619B_ISO_EMMC_DATA_1,
+ RTD1619B_ISO_EMMC_DATA_2,
+ RTD1619B_ISO_EMMC_DATA_3,
+ RTD1619B_ISO_EMMC_DATA_4,
+ RTD1619B_ISO_EMMC_DATA_5,
+ RTD1619B_ISO_EMMC_DATA_6,
+ RTD1619B_ISO_EMMC_DATA_7,
+ RTD1619B_ISO_UR2_LOC,
+ RTD1619B_ISO_GSPI_LOC,
+ RTD1619B_ISO_SDIO_LOC,
+ RTD1619B_ISO_HI_LOC,
+ RTD1619B_ISO_HI_WIDTH,
+ RTD1619B_ISO_SF_EN,
+ RTD1619B_ISO_ARM_TRACE_DBG_EN,
+ RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC0,
+ RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC0,
+ RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC1,
+ RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC1,
+ RTD1619B_ISO_EJTAG_ACPU_LOC,
+ RTD1619B_ISO_EJTAG_VCPU_LOC,
+ RTD1619B_ISO_EJTAG_SCPU_LOC,
+ RTD1619B_ISO_DMIC_LOC,
+ RTD1619B_ISO_ISO_GSPI_LOC,
+ RTD1619B_ISO_EJTAG_VE3_LOC,
+ RTD1619B_ISO_EJTAG_AUCPU0_LOC,
+ RTD1619B_ISO_EJTAG_AUCPU1_LOC,
+};
+
+static const struct pinctrl_pin_desc rtd1619b_iso_pins[] = {
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_0, "gpio_0"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_1, "gpio_1"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_2, "gpio_2"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_3, "gpio_3"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_4, "gpio_4"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_5, "gpio_5"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_6, "gpio_6"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_7, "gpio_7"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_8, "gpio_8"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_9, "gpio_9"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_10, "gpio_10"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_11, "gpio_11"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_12, "gpio_12"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_13, "gpio_13"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_14, "gpio_14"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_15, "gpio_15"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_16, "gpio_16"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_17, "gpio_17"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_18, "gpio_18"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_19, "gpio_19"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_20, "gpio_20"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_21, "gpio_21"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_22, "gpio_22"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_23, "gpio_23"),
+ PINCTRL_PIN(RTD1619B_ISO_USB_CC2, "usb_cc2"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_25, "gpio_25"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_26, "gpio_26"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_27, "gpio_27"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_28, "gpio_28"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_29, "gpio_29"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_30, "gpio_30"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_31, "gpio_31"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_32, "gpio_32"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_33, "gpio_33"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_34, "gpio_34"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_35, "gpio_35"),
+ PINCTRL_PIN(RTD1619B_ISO_HIF_DATA, "hif_data"),
+ PINCTRL_PIN(RTD1619B_ISO_HIF_EN, "hif_en"),
+ PINCTRL_PIN(RTD1619B_ISO_HIF_RDY, "hif_rdy"),
+ PINCTRL_PIN(RTD1619B_ISO_HIF_CLK, "hif_clk"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_40, "gpio_40"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_41, "gpio_41"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_42, "gpio_42"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_43, "gpio_43"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_44, "gpio_44"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_45, "gpio_45"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_46, "gpio_46"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_47, "gpio_47"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_48, "gpio_48"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_49, "gpio_49"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_50, "gpio_50"),
+ PINCTRL_PIN(RTD1619B_ISO_USB_CC1, "usb_cc1"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_52, "gpio_52"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_53, "gpio_53"),
+ PINCTRL_PIN(RTD1619B_ISO_IR_RX, "ir_rx"),
+ PINCTRL_PIN(RTD1619B_ISO_UR0_RX, "ur0_rx"),
+ PINCTRL_PIN(RTD1619B_ISO_UR0_TX, "ur0_tx"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_57, "gpio_57"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_58, "gpio_58"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_59, "gpio_59"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_60, "gpio_60"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_61, "gpio_61"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_62, "gpio_62"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_63, "gpio_63"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_64, "gpio_64"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_65, "gpio_65"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_66, "gpio_66"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_67, "gpio_67"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_68, "gpio_68"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_69, "gpio_69"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_70, "gpio_70"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_71, "gpio_71"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_72, "gpio_72"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_73, "gpio_73"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_74, "gpio_74"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_75, "gpio_75"),
+ PINCTRL_PIN(RTD1619B_ISO_GPIO_76, "gpio_76"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_CMD, "emmc_cmd"),
+ PINCTRL_PIN(RTD1619B_ISO_SPI_CE_N, "spi_ce_n"),
+ PINCTRL_PIN(RTD1619B_ISO_SPI_SCK, "spi_sck"),
+ PINCTRL_PIN(RTD1619B_ISO_SPI_SO, "spi_so"),
+ PINCTRL_PIN(RTD1619B_ISO_SPI_SI, "spi_si"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_RST_N, "emmc_rst_n"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DD_SB, "emmc_dd_sb"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_CLK, "emmc_clk"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_0, "emmc_data_0"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_1, "emmc_data_1"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_2, "emmc_data_2"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_3, "emmc_data_3"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_4, "emmc_data_4"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_5, "emmc_data_5"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_6, "emmc_data_6"),
+ PINCTRL_PIN(RTD1619B_ISO_EMMC_DATA_7, "emmc_data_7"),
+ PINCTRL_PIN(RTD1619B_ISO_UR2_LOC, "ur2_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_GSPI_LOC, "gspi_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_SDIO_LOC, "sdio_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_HI_LOC, "hi_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_HI_WIDTH, "hi_width"),
+ PINCTRL_PIN(RTD1619B_ISO_SF_EN, "sf_en"),
+ PINCTRL_PIN(RTD1619B_ISO_ARM_TRACE_DBG_EN, "arm_trace_dbg_en"),
+ PINCTRL_PIN(RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC0, "pwm_01_open_drain_en_loc0"),
+ PINCTRL_PIN(RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC0, "pwm_23_open_drain_en_loc0"),
+ PINCTRL_PIN(RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC1, "pwm_01_open_drain_en_loc1"),
+ PINCTRL_PIN(RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC1, "pwm_23_open_drain_en_loc1"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_ACPU_LOC, "ejtag_acpu_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_VCPU_LOC, "ejtag_vcpu_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_SCPU_LOC, "ejtag_scpu_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_DMIC_LOC, "dmic_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_ISO_GSPI_LOC, "iso_gspi_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_VE3_LOC, "ejtag_ve3_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_AUCPU0_LOC, "ejtag_aucpu0_loc"),
+ PINCTRL_PIN(RTD1619B_ISO_EJTAG_AUCPU1_LOC, "ejtag_aucpu1_loc"),
+};
+
+#define DECLARE_RTD1619B_PIN(_pin, _name) \
+ static const unsigned int rtd1619b_## _name ##_pins[] = { _pin }
+
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_0, gpio_0);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_1, gpio_1);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_2, gpio_2);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_3, gpio_3);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_4, gpio_4);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_5, gpio_5);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_6, gpio_6);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_7, gpio_7);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_8, gpio_8);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_9, gpio_9);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_10, gpio_10);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_11, gpio_11);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_12, gpio_12);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_13, gpio_13);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_14, gpio_14);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_15, gpio_15);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_16, gpio_16);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_17, gpio_17);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_18, gpio_18);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_19, gpio_19);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_20, gpio_20);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_21, gpio_21);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_22, gpio_22);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_23, gpio_23);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_USB_CC2, usb_cc2);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_25, gpio_25);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_26, gpio_26);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_27, gpio_27);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_28, gpio_28);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_29, gpio_29);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_30, gpio_30);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_31, gpio_31);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_32, gpio_32);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_33, gpio_33);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_34, gpio_34);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_35, gpio_35);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HIF_DATA, hif_data);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HIF_EN, hif_en);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HIF_RDY, hif_rdy);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HIF_CLK, hif_clk);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_40, gpio_40);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_41, gpio_41);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_42, gpio_42);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_43, gpio_43);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_44, gpio_44);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_45, gpio_45);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_46, gpio_46);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_47, gpio_47);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_48, gpio_48);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_49, gpio_49);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_50, gpio_50);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_USB_CC1, usb_cc1);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_52, gpio_52);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_53, gpio_53);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_IR_RX, ir_rx);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_UR0_RX, ur0_rx);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_UR0_TX, ur0_tx);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_57, gpio_57);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_58, gpio_58);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_59, gpio_59);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_60, gpio_60);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_61, gpio_61);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_62, gpio_62);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_63, gpio_63);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_64, gpio_64);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_65, gpio_65);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_66, gpio_66);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_67, gpio_67);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_68, gpio_68);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_69, gpio_69);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_70, gpio_70);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_71, gpio_71);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_72, gpio_72);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_73, gpio_73);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_74, gpio_74);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_75, gpio_75);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GPIO_76, gpio_76);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_CMD, emmc_cmd);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SPI_CE_N, spi_ce_n);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SPI_SCK, spi_sck);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SPI_SO, spi_so);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SPI_SI, spi_si);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_RST_N, emmc_rst_n);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DD_SB, emmc_dd_sb);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_CLK, emmc_clk);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_0, emmc_data_0);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_1, emmc_data_1);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_2, emmc_data_2);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_3, emmc_data_3);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_4, emmc_data_4);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_5, emmc_data_5);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_6, emmc_data_6);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EMMC_DATA_7, emmc_data_7);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_UR2_LOC, ur2_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_GSPI_LOC, gspi_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SDIO_LOC, sdio_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HI_LOC, hi_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_HI_WIDTH, hi_width);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_SF_EN, sf_en);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_ARM_TRACE_DBG_EN, arm_trace_dbg_en);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC0, pwm_01_open_drain_en_loc0);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC0, pwm_23_open_drain_en_loc0);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC1, pwm_01_open_drain_en_loc1);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC1, pwm_23_open_drain_en_loc1);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_ACPU_LOC, ejtag_acpu_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_VCPU_LOC, ejtag_vcpu_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_SCPU_LOC, ejtag_scpu_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_DMIC_LOC, dmic_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_ISO_GSPI_LOC, iso_gspi_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_VE3_LOC, ejtag_ve3_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_AUCPU0_LOC, ejtag_aucpu0_loc);
+DECLARE_RTD1619B_PIN(RTD1619B_ISO_EJTAG_AUCPU1_LOC, ejtag_aucpu1_loc);
+
+#define RTD1619B_GROUP(_name) \
+ { \
+ .name = # _name, \
+ .pins = rtd1619b_ ## _name ## _pins, \
+ .num_pins = ARRAY_SIZE(rtd1619b_ ## _name ## _pins), \
+ }
+
+static const struct rtd_pin_group_desc rtd1619b_pin_groups[] = {
+ RTD1619B_GROUP(gpio_0),
+ RTD1619B_GROUP(gpio_1),
+ RTD1619B_GROUP(gpio_2),
+ RTD1619B_GROUP(gpio_3),
+ RTD1619B_GROUP(gpio_4),
+ RTD1619B_GROUP(gpio_5),
+ RTD1619B_GROUP(gpio_6),
+ RTD1619B_GROUP(gpio_7),
+ RTD1619B_GROUP(gpio_8),
+ RTD1619B_GROUP(gpio_9),
+ RTD1619B_GROUP(gpio_10),
+ RTD1619B_GROUP(gpio_11),
+ RTD1619B_GROUP(gpio_12),
+ RTD1619B_GROUP(gpio_13),
+ RTD1619B_GROUP(gpio_14),
+ RTD1619B_GROUP(gpio_15),
+ RTD1619B_GROUP(gpio_16),
+ RTD1619B_GROUP(gpio_17),
+ RTD1619B_GROUP(gpio_18),
+ RTD1619B_GROUP(gpio_19),
+ RTD1619B_GROUP(gpio_20),
+ RTD1619B_GROUP(gpio_21),
+ RTD1619B_GROUP(gpio_22),
+ RTD1619B_GROUP(gpio_23),
+ RTD1619B_GROUP(usb_cc2),
+ RTD1619B_GROUP(gpio_25),
+ RTD1619B_GROUP(gpio_26),
+ RTD1619B_GROUP(gpio_27),
+ RTD1619B_GROUP(gpio_28),
+ RTD1619B_GROUP(gpio_29),
+ RTD1619B_GROUP(gpio_30),
+ RTD1619B_GROUP(gpio_31),
+ RTD1619B_GROUP(gpio_32),
+ RTD1619B_GROUP(gpio_33),
+ RTD1619B_GROUP(gpio_34),
+ RTD1619B_GROUP(gpio_35),
+ RTD1619B_GROUP(hif_data),
+ RTD1619B_GROUP(hif_en),
+ RTD1619B_GROUP(hif_rdy),
+ RTD1619B_GROUP(hif_clk),
+ RTD1619B_GROUP(gpio_40),
+ RTD1619B_GROUP(gpio_41),
+ RTD1619B_GROUP(gpio_42),
+ RTD1619B_GROUP(gpio_43),
+ RTD1619B_GROUP(gpio_44),
+ RTD1619B_GROUP(gpio_45),
+ RTD1619B_GROUP(gpio_46),
+ RTD1619B_GROUP(gpio_47),
+ RTD1619B_GROUP(gpio_48),
+ RTD1619B_GROUP(gpio_49),
+ RTD1619B_GROUP(gpio_50),
+ RTD1619B_GROUP(usb_cc1),
+ RTD1619B_GROUP(gpio_52),
+ RTD1619B_GROUP(gpio_53),
+ RTD1619B_GROUP(ir_rx),
+ RTD1619B_GROUP(ur0_rx),
+ RTD1619B_GROUP(ur0_tx),
+ RTD1619B_GROUP(gpio_57),
+ RTD1619B_GROUP(gpio_58),
+ RTD1619B_GROUP(gpio_59),
+ RTD1619B_GROUP(gpio_60),
+ RTD1619B_GROUP(gpio_61),
+ RTD1619B_GROUP(gpio_62),
+ RTD1619B_GROUP(gpio_63),
+ RTD1619B_GROUP(gpio_64),
+ RTD1619B_GROUP(gpio_65),
+ RTD1619B_GROUP(gpio_66),
+ RTD1619B_GROUP(gpio_67),
+ RTD1619B_GROUP(gpio_68),
+ RTD1619B_GROUP(gpio_69),
+ RTD1619B_GROUP(gpio_70),
+ RTD1619B_GROUP(gpio_71),
+ RTD1619B_GROUP(gpio_72),
+ RTD1619B_GROUP(gpio_73),
+ RTD1619B_GROUP(gpio_74),
+ RTD1619B_GROUP(gpio_75),
+ RTD1619B_GROUP(gpio_76),
+ RTD1619B_GROUP(emmc_cmd),
+ RTD1619B_GROUP(spi_ce_n),
+ RTD1619B_GROUP(spi_sck),
+ RTD1619B_GROUP(spi_so),
+ RTD1619B_GROUP(spi_si),
+ RTD1619B_GROUP(emmc_rst_n),
+ RTD1619B_GROUP(emmc_dd_sb),
+ RTD1619B_GROUP(emmc_clk),
+ RTD1619B_GROUP(emmc_data_0),
+ RTD1619B_GROUP(emmc_data_1),
+ RTD1619B_GROUP(emmc_data_2),
+ RTD1619B_GROUP(emmc_data_3),
+ RTD1619B_GROUP(emmc_data_4),
+ RTD1619B_GROUP(emmc_data_5),
+ RTD1619B_GROUP(emmc_data_6),
+ RTD1619B_GROUP(emmc_data_7),
+ RTD1619B_GROUP(ur2_loc),
+ RTD1619B_GROUP(gspi_loc),
+ RTD1619B_GROUP(sdio_loc),
+ RTD1619B_GROUP(hi_loc),
+ RTD1619B_GROUP(hi_width),
+ RTD1619B_GROUP(sf_en),
+ RTD1619B_GROUP(arm_trace_dbg_en),
+ RTD1619B_GROUP(pwm_01_open_drain_en_loc0),
+ RTD1619B_GROUP(pwm_23_open_drain_en_loc0),
+ RTD1619B_GROUP(pwm_01_open_drain_en_loc1),
+ RTD1619B_GROUP(pwm_23_open_drain_en_loc1),
+ RTD1619B_GROUP(ejtag_acpu_loc),
+ RTD1619B_GROUP(ejtag_vcpu_loc),
+ RTD1619B_GROUP(ejtag_scpu_loc),
+ RTD1619B_GROUP(dmic_loc),
+ RTD1619B_GROUP(iso_gspi_loc),
+ RTD1619B_GROUP(ejtag_ve3_loc),
+ RTD1619B_GROUP(ejtag_aucpu0_loc),
+ RTD1619B_GROUP(ejtag_aucpu1_loc),
+};
+
+static const char * const rtd1619b_gpio_groups[] = {
+ "gpio_0", "gpio_1", "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "gpio_7", "gpio_8", "gpio_9",
+ "gpio_10", "gpio_11", "gpio_12", "gpio_13", "gpio_14",
+ "gpio_15", "gpio_16", "gpio_17", "gpio_18", "gpio_19",
+ "gpio_20", "gpio_21", "gpio_22", "gpio_23", "usb_cc2",
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "gpio_29",
+ "gpio_30", "gpio_31", "gpio_32", "gpio_33", "gpio_34",
+ "gpio_35", "hif_data", "hif_en", "hif_rdy", "hif_clk",
+ "gpio_40", "gpio_41", "gpio_42", "gpio_43", "gpio_44",
+ "gpio_45", "gpio_46", "gpio_47", "gpio_48", "gpio_49",
+ "gpio_50", "usb_cc1", "gpio_52", "gpio_53", "ir_rx",
+ "ur0_rx", "ur0_tx", "gpio_57", "gpio_58", "gpio_59",
+ "gpio_60", "gpio_61", "gpio_62", "gpio_63", "gpio_64",
+ "gpio_65", "gpio_66", "gpio_67", "gpio_68", "gpio_69",
+ "gpio_70", "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "emmc_cmd", "spi_ce_n", "spi_sck",
+ "spi_so", "spi_si"};
+static const char * const rtd1619b_nf_groups[] = {
+ "emmc_rst_n", "emmc_clk", "emmc_cmd", "emmc_data_0", "emmc_data_1",
+ "emmc_data_2", "emmc_data_3", "emmc_data_4",
+ "emmc_data_5", "emmc_data_6", "emmc_data_7", "spi_ce_n",
+ "spi_sck", "spi_so", "spi_si" };
+static const char * const rtd1619b_nf_spi_groups[] = {
+ "emmc_data_0", "emmc_data_1",
+ "emmc_data_2", "emmc_data_3", "emmc_data_4",
+ "emmc_data_5" };
+static const char * const rtd1619b_spi_groups[] = {
+ "spi_ce_n", "spi_sck", "spi_so", "spi_si" };
+static const char * const rtd1619b_emmc_groups[] = {
+ "emmc_rst_n", "emmc_clk", "emmc_data_0", "emmc_data_1",
+ "emmc_data_2", "emmc_data_3", "emmc_data_4",
+ "emmc_data_5", "emmc_data_6", "emmc_data_7",
+ "emmc_dd_sb", "emmc_cmd"};
+
+static const char * const rtd1619b_pmic_groups[] = { "spi_ce_n" };
+static const char * const rtd1619b_spdif_groups[] = { "gpio_50" };
+static const char * const rtd1619b_spdif_coaxial_groups[] = { "gpio_1" };
+static const char * const rtd1619b_spdif_optical_loc0_groups[] = { "gpio_6" };
+static const char * const rtd1619b_spdif_optical_loc1_groups[] = { "gpio_21" };
+
+static const char * const rtd1619b_emmc_spi_groups[] = {
+ "gpio_1", "gpio_2", "gpio_3", "gpio_4", "gpio_5", "gpio_6" };
+static const char * const rtd1619b_sc1_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4", "gpio_5" };
+static const char * const rtd1619b_uart0_groups[] = { "ur0_rx", "ur0_tx" };
+static const char * const rtd1619b_uart1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11" };
+static const char * const rtd1619b_uart2_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "ur2_loc" };
+static const char * const rtd1619b_uart2_loc1_groups[] = {
+ "gpio_25", "gpio_26", "gpio_27", "gpio_28", "ur2_loc" };
+static const char * const rtd1619b_gspi_loc1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11", "gspi_loc" };
+static const char * const rtd1619b_iso_gspi_loc1_groups[] = {
+ "gpio_8", "gpio_9", "gpio_10", "gpio_11", "iso_gspi_loc" };
+static const char * const rtd1619b_i2c0_groups[] = { "gpio_12", "gpio_13" };
+static const char * const rtd1619b_i2c1_groups[] = { "gpio_16", "gpio_17" };
+static const char * const rtd1619b_i2c3_groups[] = { "gpio_63", "gpio_64" };
+static const char * const rtd1619b_i2c4_groups[] = { "gpio_34", "gpio_35" };
+static const char * const rtd1619b_i2c5_groups[] = { "gpio_29", "gpio_46" };
+static const char * const rtd1619b_pwm0_groups[] = { "gpio_20", "gpio_26" };
+static const char * const rtd1619b_pwm1_groups[] = { "gpio_21", "gpio_27" };
+static const char * const rtd1619b_pwm2_groups[] = { "gpio_22", "gpio_28" };
+static const char * const rtd1619b_pwm3_groups[] = { "gpio_23", "gpio_47" };
+static const char * const rtd1619b_etn_led_groups[] = { "gpio_14", "gpio_15", "gpio_23" };
+static const char * const rtd1619b_etn_phy_groups[] = { "gpio_14", "gpio_15" };
+static const char * const rtd1619b_etn_clk_groups[] = { "gpio_14" };
+static const char * const rtd1619b_sc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31" };
+static const char * const rtd1619b_vfd_groups[] = {
+ "gpio_26", "gpio_27", "gpio_28" };
+
+static const char * const rtd1619b_gspi_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "gspi_loc" };
+static const char * const rtd1619b_iso_gspi_loc0_groups[] = {
+ "gpio_18", "gpio_19", "gpio_20", "gpio_31", "iso_gspi_loc" };
+static const char * const rtd1619b_pcie1_groups[] = { "gpio_25" };
+static const char * const rtd1619b_pcie2_groups[] = { "gpio_52" };
+static const char * const rtd1619b_sd_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_sdio_loc0_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_rdy", "hif_clk", "sdio_loc" };
+static const char * const rtd1619b_sdio_loc1_groups[] = {
+ "gpio_40", "gpio_41", "gpio_42", "gpio_43", "gpio_44",
+ "gpio_45", "sdio_loc" };
+static const char * const rtd1619b_hi_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_hi_m_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_dc_fan_groups[] = { "gpio_47" };
+static const char * const rtd1619b_pll_test_loc0_groups[] = { "gpio_52", "gpio_53" };
+static const char * const rtd1619b_pll_test_loc1_groups[] = { "gpio_48", "gpio_49" };
+static const char * const rtd1619b_tdm_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60" };
+static const char * const rtd1619b_tdm_ai_loc1_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_dmic_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_63", "gpio_64", "dmic_loc"};
+static const char * const rtd1619b_dmic_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "gpio_35", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk", "dmic_loc" };
+static const char * const rtd1619b_ai_loc0_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61",
+ "gpio_62", "gpio_63" };
+static const char * const rtd1619b_ai_loc1_groups[] = {
+ "gpio_32", "gpio_33", "gpio_34", "hif_data",
+ "hif_en", "hif_rdy", "hif_clk"};
+static const char * const rtd1619b_tp0_groups[] = {
+ "gpio_66", "gpio_67", "gpio_68", "gpio_69",
+ "gpio_70", "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76" };
+static const char * const rtd1619b_tp1_groups[] = {
+ "gpio_69", "gpio_70", "gpio_71", "gpio_72" };
+static const char * const rtd1619b_ao_groups[] = {
+ "gpio_66", "gpio_67", "gpio_68", "gpio_69",
+ "gpio_70", "gpio_71", "gpio_72" };
+static const char * const rtd1619b_uart2_disable_groups[] = { "ur2_loc" };
+static const char * const rtd1619b_gspi_disable_groups[] = { "gspi_loc" };
+static const char * const rtd1619b_sdio_disable_groups[] = { "sdio_loc" };
+static const char * const rtd1619b_hi_loc_disable_groups[] = { "hi_loc" };
+static const char * const rtd1619b_hi_loc0_groups[] = { "hi_loc" };
+static const char * const rtd1619b_hi_width_disable_groups[] = { "hi_width" };
+static const char * const rtd1619b_hi_width_1bit_groups[] = { "hi_width" };
+
+static const char * const rtd1619b_vtc_i2si_loc0_groups[] = {
+ "gpio_32", "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_vtc_tdm_loc0_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_vtc_dmic_loc0_groups[] = {
+ "hif_data", "hif_en", "hif_rdy", "hif_clk" };
+static const char * const rtd1619b_vtc_i2si_loc1_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60", "gpio_61" };
+static const char * const rtd1619b_vtc_tdm_loc1_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60" };
+static const char * const rtd1619b_vtc_dmic_loc1_groups[] = {
+ "gpio_57", "gpio_58", "gpio_59", "gpio_60" };
+static const char * const rtd1619b_vtc_i2so_groups[] = {
+ "gpio_66", "gpio_67", "gpio_68", "gpio_69" };
+static const char * const rtd1619b_ve3_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_ve3_loc" };
+static const char * const rtd1619b_aucpu0_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_aucpu0_loc" };
+static const char * const rtd1619b_aucpu1_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_aucpu1_loc" };
+static const char * const rtd1619b_ve3_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_ve3_loc" };
+static const char * const rtd1619b_aucpu0_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_aucpu0_loc" };
+static const char * const rtd1619b_aucpu1_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_aucpu1_loc" };
+
+static const char * const rtd1619b_ve3_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_ve3_loc" };
+static const char * const rtd1619b_aucpu0_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_aucpu0_loc" };
+static const char * const rtd1619b_aucpu1_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_aucpu1_loc" };
+
+static const char * const rtd1619b_scpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_scpu_loc" };
+static const char * const rtd1619b_acpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_acpu_loc" };
+static const char * const rtd1619b_vcpu_ejtag_loc0_groups[] = {
+ "gpio_2", "gpio_3", "gpio_4",
+ "gpio_5", "gpio_6", "ejtag_vcpu_loc" };
+static const char * const rtd1619b_scpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_scpu_loc" };
+static const char * const rtd1619b_acpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_acpu_loc" };
+static const char * const rtd1619b_vcpu_ejtag_loc1_groups[] = {
+ "gpio_32", "gpio_33", "hif_data", "hif_en",
+ "hif_clk", "ejtag_vcpu_loc" };
+
+static const char * const rtd1619b_scpu_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_scpu_loc" };
+static const char * const rtd1619b_acpu_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_acpu_loc" };
+static const char * const rtd1619b_vcpu_ejtag_loc2_groups[] = {
+ "gpio_71", "gpio_72", "gpio_73", "gpio_74",
+ "gpio_75", "gpio_76", "ejtag_vcpu_loc"};
+static const char * const rtd1619b_ve3_ejtag_disable_groups[] = { "ejtag_ve3_loc" };
+static const char * const rtd1619b_aucpu0_ejtag_disable_groups[] = { "ejtag_aucpu0_loc" };
+static const char * const rtd1619b_aucpu1_ejtag_disable_groups[] = { "ejtag_aucpu1_loc" };
+
+static const char * const rtd1619b_acpu_ejtag_disable_groups[] = { "ejtag_acpu_loc" };
+static const char * const rtd1619b_vcpu_ejtag_disable_groups[] = { "ejtag_vcpu_loc" };
+static const char * const rtd1619b_scpu_ejtag_disable_groups[] = { "ejtag_scpu_loc" };
+static const char * const rtd1619b_sf_disable_groups[] = { "sf_en" };
+static const char * const rtd1619b_sf_enable_groups[] = { "sf_en" };
+static const char * const rtd1619b_iso_gspi_disable_groups[] = { "iso_gspi_loc" };
+static const char * const rtd1619b_arm_trace_debug_disable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1619b_arm_trace_debug_enable_groups[] = { "arm_trace_dbg_en" };
+static const char * const rtd1619b_pwm_normal_groups[] = {
+ "pwm_01_open_drain_en_loc0", "pwm_23_open_drain_en_loc0",
+ "pwm_01_open_drain_en_loc1", "pwm_23_open_drain_en_loc1" };
+static const char * const rtd1619b_pwm_open_drain_groups[] = {
+ "pwm_01_open_drain_en_loc0", "pwm_23_open_drain_en_loc0",
+ "pwm_01_open_drain_en_loc1", "pwm_23_open_drain_en_loc1" };
+static const char * const rtd1619b_standby_dbg_groups[] = {
+ "gpio_2", "gpio_3", "ir_rx" };
+
+static const char * const rtd1619b_usb_cc1_groups[] = { "usb_cc1" };
+static const char * const rtd1619b_usb_cc2_groups[] = { "usb_cc2" };
+static const char * const rtd1619b_ir_rx_groups[] = { "ir_rx" };
+static const char * const rtd1619b_test_loop_dis_groups[] = { "gpio_50" };
+
+#define RTD1619B_FUNC(_name) \
+ { \
+ .name = # _name, \
+ .groups = rtd1619b_ ## _name ## _groups, \
+ .num_groups = ARRAY_SIZE(rtd1619b_ ## _name ## _groups), \
+ }
+
+static const struct rtd_pin_func_desc rtd1619b_pin_functions[] = {
+ RTD1619B_FUNC(gpio),
+ RTD1619B_FUNC(nf),
+ RTD1619B_FUNC(nf_spi),
+ RTD1619B_FUNC(spi),
+ RTD1619B_FUNC(pmic),
+ RTD1619B_FUNC(spdif),
+ RTD1619B_FUNC(spdif_coaxial),
+ RTD1619B_FUNC(spdif_optical_loc0),
+ RTD1619B_FUNC(spdif_optical_loc1),
+ RTD1619B_FUNC(emmc_spi),
+ RTD1619B_FUNC(emmc),
+ RTD1619B_FUNC(sc1),
+ RTD1619B_FUNC(uart0),
+ RTD1619B_FUNC(uart1),
+ RTD1619B_FUNC(uart2_loc0),
+ RTD1619B_FUNC(uart2_loc1),
+ RTD1619B_FUNC(gspi_loc1),
+ RTD1619B_FUNC(iso_gspi_loc1),
+ RTD1619B_FUNC(i2c0),
+ RTD1619B_FUNC(i2c1),
+ RTD1619B_FUNC(i2c3),
+ RTD1619B_FUNC(i2c4),
+ RTD1619B_FUNC(i2c5),
+ RTD1619B_FUNC(pwm0),
+ RTD1619B_FUNC(pwm1),
+ RTD1619B_FUNC(pwm2),
+ RTD1619B_FUNC(pwm3),
+ RTD1619B_FUNC(etn_led),
+ RTD1619B_FUNC(etn_phy),
+ RTD1619B_FUNC(etn_clk),
+ RTD1619B_FUNC(sc0),
+ RTD1619B_FUNC(vfd),
+ RTD1619B_FUNC(gspi_loc0),
+ RTD1619B_FUNC(iso_gspi_loc0),
+ RTD1619B_FUNC(pcie1),
+ RTD1619B_FUNC(pcie2),
+ RTD1619B_FUNC(sd),
+ RTD1619B_FUNC(sdio_loc0),
+ RTD1619B_FUNC(sdio_loc1),
+ RTD1619B_FUNC(hi),
+ RTD1619B_FUNC(hi_m),
+ RTD1619B_FUNC(dc_fan),
+ RTD1619B_FUNC(pll_test_loc0),
+ RTD1619B_FUNC(pll_test_loc1),
+ RTD1619B_FUNC(usb_cc1),
+ RTD1619B_FUNC(usb_cc2),
+ RTD1619B_FUNC(ir_rx),
+ RTD1619B_FUNC(tdm_ai_loc0),
+ RTD1619B_FUNC(tdm_ai_loc1),
+ RTD1619B_FUNC(dmic_loc0),
+ RTD1619B_FUNC(dmic_loc1),
+ RTD1619B_FUNC(ai_loc0),
+ RTD1619B_FUNC(ai_loc1),
+ RTD1619B_FUNC(tp0),
+ RTD1619B_FUNC(tp1),
+ RTD1619B_FUNC(ao),
+ RTD1619B_FUNC(uart2_disable),
+ RTD1619B_FUNC(gspi_disable),
+ RTD1619B_FUNC(sdio_disable),
+ RTD1619B_FUNC(hi_loc_disable),
+ RTD1619B_FUNC(hi_loc0),
+ RTD1619B_FUNC(hi_width_disable),
+ RTD1619B_FUNC(hi_width_1bit),
+ RTD1619B_FUNC(vtc_i2si_loc0),
+ RTD1619B_FUNC(vtc_tdm_loc0),
+ RTD1619B_FUNC(vtc_dmic_loc0),
+ RTD1619B_FUNC(vtc_i2si_loc1),
+ RTD1619B_FUNC(vtc_tdm_loc1),
+ RTD1619B_FUNC(vtc_dmic_loc1),
+ RTD1619B_FUNC(vtc_i2so),
+ RTD1619B_FUNC(ve3_ejtag_loc0),
+ RTD1619B_FUNC(aucpu0_ejtag_loc0),
+ RTD1619B_FUNC(aucpu1_ejtag_loc0),
+ RTD1619B_FUNC(ve3_ejtag_loc1),
+ RTD1619B_FUNC(aucpu0_ejtag_loc1),
+ RTD1619B_FUNC(aucpu1_ejtag_loc1),
+ RTD1619B_FUNC(ve3_ejtag_loc2),
+ RTD1619B_FUNC(aucpu0_ejtag_loc2),
+ RTD1619B_FUNC(aucpu1_ejtag_loc2),
+ RTD1619B_FUNC(scpu_ejtag_loc0),
+ RTD1619B_FUNC(acpu_ejtag_loc0),
+ RTD1619B_FUNC(vcpu_ejtag_loc0),
+ RTD1619B_FUNC(scpu_ejtag_loc1),
+ RTD1619B_FUNC(acpu_ejtag_loc1),
+ RTD1619B_FUNC(vcpu_ejtag_loc1),
+ RTD1619B_FUNC(scpu_ejtag_loc2),
+ RTD1619B_FUNC(acpu_ejtag_loc2),
+ RTD1619B_FUNC(vcpu_ejtag_loc2),
+ RTD1619B_FUNC(ve3_ejtag_disable),
+ RTD1619B_FUNC(aucpu0_ejtag_disable),
+ RTD1619B_FUNC(aucpu1_ejtag_disable),
+ RTD1619B_FUNC(acpu_ejtag_disable),
+ RTD1619B_FUNC(vcpu_ejtag_disable),
+ RTD1619B_FUNC(scpu_ejtag_disable),
+ RTD1619B_FUNC(iso_gspi_disable),
+ RTD1619B_FUNC(sf_disable),
+ RTD1619B_FUNC(sf_enable),
+ RTD1619B_FUNC(arm_trace_debug_disable),
+ RTD1619B_FUNC(arm_trace_debug_enable),
+ RTD1619B_FUNC(pwm_normal),
+ RTD1619B_FUNC(pwm_open_drain),
+ RTD1619B_FUNC(standby_dbg),
+ RTD1619B_FUNC(test_loop_dis),
+};
+
+#undef RTD1619B_FUNC
+
+static const struct rtd_pin_desc rtd1619b_iso_muxes[] = {
+ [RTD1619B_ISO_EMMC_RST_N] = RTK_PIN_MUX(emmc_rst_n, 0x0, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "emmc")),
+ [RTD1619B_ISO_EMMC_DD_SB] = RTK_PIN_MUX(emmc_dd_sb, 0x0, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "emmc")),
+ [RTD1619B_ISO_EMMC_CLK] = RTK_PIN_MUX(emmc_clk, 0x0, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "emmc")),
+ [RTD1619B_ISO_EMMC_CMD] = RTK_PIN_MUX(emmc_cmd, 0x0, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 6), "emmc")),
+ [RTD1619B_ISO_EMMC_DATA_0] = RTK_PIN_MUX(emmc_data_0, 0x0, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_1] = RTK_PIN_MUX(emmc_data_1, 0x0, GENMASK(11, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 10), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_2] = RTK_PIN_MUX(emmc_data_2, 0x0, GENMASK(13, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 12), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_3] = RTK_PIN_MUX(emmc_data_3, 0x0, GENMASK(15, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 14), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 14), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_4] = RTK_PIN_MUX(emmc_data_4, 0x0, GENMASK(17, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 16), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_5] = RTK_PIN_MUX(emmc_data_5, 0x0, GENMASK(19, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "emmc"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 18), "nf_spi")),
+ [RTD1619B_ISO_EMMC_DATA_6] = RTK_PIN_MUX(emmc_data_6, 0x0, GENMASK(21, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "emmc")),
+ [RTD1619B_ISO_EMMC_DATA_7] = RTK_PIN_MUX(emmc_data_7, 0x0, GENMASK(23, 22),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 22), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 22), "emmc")),
+ [RTD1619B_ISO_SPI_CE_N] = RTK_PIN_MUX(spi_ce_n, 0x0, GENMASK(25, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 24), "pmic")),
+ [RTD1619B_ISO_SPI_SCK] = RTK_PIN_MUX(spi_sck, 0x0, GENMASK(27, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 26), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 26), "spi")),
+ [RTD1619B_ISO_SPI_SO] = RTK_PIN_MUX(spi_so, 0x0, GENMASK(29, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "spi")),
+ [RTD1619B_ISO_SPI_SI] = RTK_PIN_MUX(spi_si, 0x0, GENMASK(31, 30),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 30), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 30), "nf"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 30), "spi")),
+
+ [RTD1619B_ISO_GPIO_0] = RTK_PIN_MUX(gpio_0, 0x4, GENMASK(0, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio")),
+ [RTD1619B_ISO_GPIO_1] = RTK_PIN_MUX(gpio_1, 0x4, GENMASK(3, 1),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 1), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 1), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 1), "spdif_coaxial")),
+ [RTD1619B_ISO_GPIO_2] = RTK_PIN_MUX(gpio_2, 0x4, GENMASK(7, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 4), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 4), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 4), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 4), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 4), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 4), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 4), "aucpu1_ejtag_loc0")),
+ [RTD1619B_ISO_GPIO_3] = RTK_PIN_MUX(gpio_3, 0x4, GENMASK(11, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "standby_dbg"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 8), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 8), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 8), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 8), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 8), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 8), "aucpu1_ejtag_loc0")),
+ [RTD1619B_ISO_GPIO_4] = RTK_PIN_MUX(gpio_4, 0x4, GENMASK(15, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 12), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 12), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 12), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 12), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 12), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 12), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 12), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 12), "aucpu1_ejtag_loc0")),
+ [RTD1619B_ISO_GPIO_5] = RTK_PIN_MUX(gpio_5, 0x4, GENMASK(19, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 16), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 16), "sc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 16), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 16), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 16), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 16), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 16), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 16), "aucpu1_ejtag_loc0")),
+ [RTD1619B_ISO_GPIO_6] = RTK_PIN_MUX(gpio_6, 0x4, GENMASK(23, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 20), "emmc_spi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "spdif_optical_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 20), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 20), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 20), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 20), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 20), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 20), "aucpu1_ejtag_loc0")),
+ [RTD1619B_ISO_GPIO_7] = RTK_PIN_MUX(gpio_7, 0x4, GENMASK(24, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio")),
+ [RTD1619B_ISO_GPIO_8] = RTK_PIN_MUX(gpio_8, 0x4, GENMASK(27, 25),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 25), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 25), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 25), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 25), "iso_gspi_loc1")),
+ [RTD1619B_ISO_GPIO_9] = RTK_PIN_MUX(gpio_9, 0x4, GENMASK(30, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 28), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 28), "iso_gspi_loc1")),
+ [RTD1619B_ISO_GPIO_10] = RTK_PIN_MUX(gpio_10, 0x8, GENMASK(2, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "iso_gspi_loc1")),
+ [RTD1619B_ISO_GPIO_11] = RTK_PIN_MUX(gpio_11, 0x8, GENMASK(5, 3),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 3), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 3), "uart1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 3), "gspi_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 3), "iso_gspi_loc1")),
+
+ [RTD1619B_ISO_GPIO_12] = RTK_PIN_MUX(gpio_12, 0x8, GENMASK(6, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "i2c0")),
+ [RTD1619B_ISO_GPIO_13] = RTK_PIN_MUX(gpio_13, 0x8, GENMASK(7, 7),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 7), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 7), "i2c0")),
+ [RTD1619B_ISO_GPIO_14] = RTK_PIN_MUX(gpio_14, 0x8, GENMASK(10, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "etn_phy"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 8), "etn_clk")),
+ [RTD1619B_ISO_GPIO_15] = RTK_PIN_MUX(gpio_15, 0x8, GENMASK(12, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 11), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 11), "etn_phy")),
+ [RTD1619B_ISO_GPIO_16] = RTK_PIN_MUX(gpio_16, 0x8, GENMASK(13, 13),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 13), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 13), "i2c1")),
+ [RTD1619B_ISO_GPIO_17] = RTK_PIN_MUX(gpio_17, 0x8, GENMASK(14, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "i2c1")),
+ [RTD1619B_ISO_GPIO_18] = RTK_PIN_MUX(gpio_18, 0x8, GENMASK(17, 15),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 15), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 15), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 15), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 15), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 15), "iso_gspi_loc0")),
+ [RTD1619B_ISO_GPIO_19] = RTK_PIN_MUX(gpio_19, 0x8, GENMASK(20, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 18), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 18), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 18), "iso_gspi_loc0")),
+ [RTD1619B_ISO_GPIO_20] = RTK_PIN_MUX(gpio_20, 0x8, GENMASK(23, 21),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 21), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 21), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 21), "pwm0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 21), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 21), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 21), "iso_gspi_loc0")),
+ [RTD1619B_ISO_GPIO_21] = RTK_PIN_MUX(gpio_21, 0x8, GENMASK(26, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "pwm1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 24), "spdif_optical_loc1")),
+ [RTD1619B_ISO_GPIO_22] = RTK_PIN_MUX(gpio_22, 0x8, GENMASK(28, 27),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 27), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 27), "pwm2")),
+ [RTD1619B_ISO_GPIO_23] = RTK_PIN_MUX(gpio_23, 0x8, GENMASK(30, 29),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 29), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 29), "etn_led"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 29), "pwm3")),
+ [RTD1619B_ISO_USB_CC2] = RTK_PIN_MUX(usb_cc2, 0x8, GENMASK(31, 31),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 31), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 31), "usb_cc2")),
+ [RTD1619B_ISO_GPIO_25] = RTK_PIN_MUX(gpio_25, 0xc, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "pcie1")),
+ [RTD1619B_ISO_GPIO_26] = RTK_PIN_MUX(gpio_26, 0xc, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 2), "pwm0")),
+ [RTD1619B_ISO_GPIO_27] = RTK_PIN_MUX(gpio_27, 0xc, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "pwm1")),
+
+ [RTD1619B_ISO_GPIO_28] = RTK_PIN_MUX(gpio_28, 0xc, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "uart2_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 6), "vfd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 6), "pwm2")),
+ [RTD1619B_ISO_GPIO_29] = RTK_PIN_MUX(gpio_29, 0xc, GENMASK(8, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "i2c5")),
+ [RTD1619B_ISO_GPIO_30] = RTK_PIN_MUX(gpio_30, 0xc, GENMASK(9, 9),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 9), "gpio")),
+ [RTD1619B_ISO_GPIO_31] = RTK_PIN_MUX(gpio_31, 0xc, GENMASK(12, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "sc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 10), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 10), "iso_gspi_loc0")),
+ [RTD1619B_ISO_GPIO_32] = RTK_PIN_MUX(gpio_32, 0xc, GENMASK(17, 13),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 13), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 13), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 13), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 13), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 13), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 13), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 13), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 13), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 13), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x14, 13), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x15, 13), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x16, 13), "aucpu1_ejtag_loc1")),
+ [RTD1619B_ISO_GPIO_33] = RTK_PIN_MUX(gpio_33, 0xc, GENMASK(22, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 18), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 18), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 18), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 18), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 18), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 18), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x14, 18), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x15, 18), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x16, 18), "aucpu1_ejtag_loc1")),
+ [RTD1619B_ISO_GPIO_34] = RTK_PIN_MUX(gpio_34, 0xc, GENMASK(25, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 23), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 23), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 23), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 23), "i2c4")),
+ [RTD1619B_ISO_GPIO_35] = RTK_PIN_MUX(gpio_35, 0xc, GENMASK(28, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 26), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 26), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 26), "i2c4")),
+ [RTD1619B_ISO_HIF_DATA] = RTK_PIN_MUX(hif_data, 0x10, GENMASK(4, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 0), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 0), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 0), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 0), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 0), "hi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 0), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 0), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 0), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 0), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x14, 0), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x15, 0), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x16, 0), "aucpu1_ejtag_loc1")),
+ [RTD1619B_ISO_HIF_EN] = RTK_PIN_MUX(hif_en, 0x10, GENMASK(9, 5),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 5), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 5), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 5), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 5), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 5), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 5), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 5), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 5), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 5), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 5), "hi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 5), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 5), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 5), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 5), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x14, 5), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x15, 5), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x16, 5), "aucpu1_ejtag_loc1")),
+ [RTD1619B_ISO_HIF_RDY] = RTK_PIN_MUX(hif_rdy, 0x10, GENMASK(13, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 10), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 10), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 10), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 10), "hi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 10), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 10), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 10), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 10), "vtc_dmic_loc0")),
+
+ [RTD1619B_ISO_HIF_CLK] = RTK_PIN_MUX(hif_clk, 0x10, GENMASK(18, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "sd"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 14), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 14), "dmic_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 14), "tdm_ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 14), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 14), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 14), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 14), "ai_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 14), "hi"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 14), "hi_m"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 14), "vtc_i2si_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 14), "vtc_tdm_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 14), "vtc_dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x14, 14), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x15, 14), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x16, 14), "aucpu1_ejtag_loc1")),
+ [RTD1619B_ISO_GPIO_40] = RTK_PIN_MUX(gpio_40, 0x10, GENMASK(20, 19),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 19), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 19), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_41] = RTK_PIN_MUX(gpio_41, 0x10, GENMASK(22, 21),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 21), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 21), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_42] = RTK_PIN_MUX(gpio_42, 0x10, GENMASK(24, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 23), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_43] = RTK_PIN_MUX(gpio_43, 0x10, GENMASK(26, 25),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 25), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 25), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_44] = RTK_PIN_MUX(gpio_44, 0x10, GENMASK(28, 27),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 27), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 27), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_45] = RTK_PIN_MUX(gpio_45, 0x10, GENMASK(30, 29),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 29), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 29), "sdio_loc1")),
+ [RTD1619B_ISO_GPIO_46] = RTK_PIN_MUX(gpio_46, 0x10, GENMASK(31, 31),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 31), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 31), "i2c5")),
+ [RTD1619B_ISO_GPIO_47] = RTK_PIN_MUX(gpio_47, 0x14, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "dc_fan"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "pwm3")),
+ [RTD1619B_ISO_GPIO_48] = RTK_PIN_MUX(gpio_48, 0x14, GENMASK(2, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "pll_test_loc1")),
+ [RTD1619B_ISO_GPIO_49] = RTK_PIN_MUX(gpio_49, 0x14, GENMASK(3, 3),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 3), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 3), "pll_test_loc1")),
+ [RTD1619B_ISO_GPIO_50] = RTK_PIN_MUX(gpio_50, 0x14, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "spdif"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "test_loop_dis")),
+ [RTD1619B_ISO_USB_CC1] = RTK_PIN_MUX(usb_cc1, 0x14, GENMASK(6, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "usb_cc1")),
+ [RTD1619B_ISO_GPIO_52] = RTK_PIN_MUX(gpio_52, 0x14, GENMASK(8, 7),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 7), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 7), "pll_test_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 7), "pcie2")),
+ [RTD1619B_ISO_GPIO_53] = RTK_PIN_MUX(gpio_53, 0x14, GENMASK(9, 9),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 9), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 9), "pll_test_loc0")),
+ [RTD1619B_ISO_IR_RX] = RTK_PIN_MUX(ir_rx, 0x14, GENMASK(11, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 10), "ir_rx"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 10), "standby_dbg")),
+ [RTD1619B_ISO_UR0_RX] = RTK_PIN_MUX(ur0_rx, 0x14, GENMASK(12, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "uart0")),
+
+ [RTD1619B_ISO_UR0_TX] = RTK_PIN_MUX(ur0_tx, 0x14, GENMASK(13, 13),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 13), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 13), "uart0")),
+ [RTD1619B_ISO_GPIO_57] = RTK_PIN_MUX(gpio_57, 0x14, GENMASK(17, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 14), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 14), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 14), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 14), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 14), "vtc_dmic_loc1")),
+ [RTD1619B_ISO_GPIO_58] = RTK_PIN_MUX(gpio_58, 0x14, GENMASK(21, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 18), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 18), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 18), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 18), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 18), "vtc_dmic_loc1")),
+ [RTD1619B_ISO_GPIO_59] = RTK_PIN_MUX(gpio_59, 0x14, GENMASK(25, 22),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 22), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 22), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 22), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 22), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 22), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 22), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 22), "vtc_dmic_loc1")),
+ [RTD1619B_ISO_GPIO_60] = RTK_PIN_MUX(gpio_60, 0x14, GENMASK(29, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 26), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "tdm_ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 26), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 26), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 26), "vtc_i2si_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x12, 26), "vtc_tdm_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x13, 26), "vtc_dmic_loc1")),
+ [RTD1619B_ISO_GPIO_61] = RTK_PIN_MUX(gpio_61, 0x18, GENMASK(3, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 0), "vtc_i2si_loc1")),
+ [RTD1619B_ISO_GPIO_62] = RTK_PIN_MUX(gpio_62, 0x18, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 4), "dmic_loc0")),
+ [RTD1619B_ISO_GPIO_63] = RTK_PIN_MUX(gpio_63, 0x18, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "ai_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 6), "i2c3"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 6), "dmic_loc0")),
+ [RTD1619B_ISO_GPIO_64] = RTK_PIN_MUX(gpio_64, 0x18, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 8), "i2c3"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 8), "dmic_loc0")),
+ [RTD1619B_ISO_GPIO_65] = RTK_PIN_MUX(gpio_65, 0x18, GENMASK(10, 10),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 10), "gpio")),
+ [RTD1619B_ISO_GPIO_66] = RTK_PIN_MUX(gpio_66, 0x18, GENMASK(14, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 11), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 11), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 11), "vtc_i2so")),
+ [RTD1619B_ISO_GPIO_67] = RTK_PIN_MUX(gpio_67, 0x18, GENMASK(18, 15),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 15), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 15), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 15), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 15), "vtc_i2so")),
+ [RTD1619B_ISO_GPIO_68] = RTK_PIN_MUX(gpio_68, 0x18, GENMASK(22, 19),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 19), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 19), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 19), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 19), "vtc_i2so")),
+ [RTD1619B_ISO_GPIO_69] = RTK_PIN_MUX(gpio_69, 0x18, GENMASK(26, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 23), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 23), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 23), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x11, 23), "vtc_i2so")),
+
+ [RTD1619B_ISO_GPIO_70] = RTK_PIN_MUX(gpio_70, 0x18, GENMASK(29, 27),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 27), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 27), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 27), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 27), "ao")),
+ [RTD1619B_ISO_GPIO_71] = RTK_PIN_MUX(gpio_71, 0x1c, GENMASK(2, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 0), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 0), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 0), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 0), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 0), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 0), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 0), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 0), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_GPIO_72] = RTK_PIN_MUX(gpio_72, 0x1c, GENMASK(6, 3),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 3), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 3), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x3, 3), "tp1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 3), "ao"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 3), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 3), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 3), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 3), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 3), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 3), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_GPIO_73] = RTK_PIN_MUX(gpio_73, 0x1c, GENMASK(10, 7),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 7), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 7), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 7), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 7), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 7), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 7), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 7), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 7), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_GPIO_74] = RTK_PIN_MUX(gpio_74, 0x1c, GENMASK(14, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 11), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 11), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 11), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 11), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 11), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 11), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 11), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_GPIO_75] = RTK_PIN_MUX(gpio_75, 0x1c, GENMASK(18, 15),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 15), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 15), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 15), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 15), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 15), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 15), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 15), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 15), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_GPIO_76] = RTK_PIN_MUX(gpio_76, 0x1c, GENMASK(22, 19),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 19), "gpio"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 19), "tp0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x5, 19), "scpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x6, 19), "acpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x7, 19), "vcpu_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x8, 19), "ve3_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x9, 19), "aucpu0_ejtag_loc2"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x10, 19), "aucpu1_ejtag_loc2")),
+
+ [RTD1619B_ISO_UR2_LOC] = RTK_PIN_MUX(ur2_loc, 0x120, GENMASK(1, 0),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 0), "uart2_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 0), "uart2_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 0), "uart2_loc1")),
+ [RTD1619B_ISO_GSPI_LOC] = RTK_PIN_MUX(gspi_loc, 0x120, GENMASK(3, 2),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 2), "gspi_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 2), "gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 2), "gspi_loc1")),
+ [RTD1619B_ISO_SDIO_LOC] = RTK_PIN_MUX(sdio_loc, 0x120, GENMASK(5, 4),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 4), "sdio_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 4), "sdio_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 4), "sdio_loc1")),
+ [RTD1619B_ISO_HI_LOC] = RTK_PIN_MUX(hi_loc, 0x120, GENMASK(7, 6),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 6), "hi_loc_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 6), "hi_loc0")),
+ [RTD1619B_ISO_HI_WIDTH] = RTK_PIN_MUX(hi_width, 0x120, GENMASK(9, 8),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 8), "hi_width_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 8), "hi_width_1bit")),
+ [RTD1619B_ISO_SF_EN] = RTK_PIN_MUX(sf_en, 0x120, GENMASK(11, 11),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 11), "sf_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 11), "sf_enable")),
+ [RTD1619B_ISO_ARM_TRACE_DBG_EN] = RTK_PIN_MUX(arm_trace_dbg_en, 0x120, GENMASK(12, 12),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 12), "arm_trace_debug_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 12), "arm_trace_debug_enable")),
+ [RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC0] = RTK_PIN_MUX(pwm_01_open_drain_en_loc0, 0x120,
+ GENMASK(13, 13),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 13), "pwm_normal"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 13), "pwm_open_drain")),
+ [RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC0] = RTK_PIN_MUX(pwm_23_open_drain_en_loc0, 0x120,
+ GENMASK(14, 14),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 14), "pwm_normal"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 14), "pwm_open_drain")),
+ [RTD1619B_ISO_PWM_01_OPEN_DRAIN_EN_LOC1] = RTK_PIN_MUX(pwm_01_open_drain_en_loc1, 0x120,
+ GENMASK(15, 15),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 15), "pwm_normal"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 15), "pwm_open_drain")),
+ [RTD1619B_ISO_PWM_23_OPEN_DRAIN_EN_LOC1] = RTK_PIN_MUX(pwm_23_open_drain_en_loc1, 0x120,
+ GENMASK(16, 16),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 16), "pwm_normal"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 16), "pwm_open_drain")),
+ [RTD1619B_ISO_EJTAG_ACPU_LOC] = RTK_PIN_MUX(ejtag_acpu_loc, 0x120, GENMASK(19, 17),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 17), "acpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 17), "acpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 17), "acpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 17), "acpu_ejtag_loc2")),
+ [RTD1619B_ISO_EJTAG_VCPU_LOC] = RTK_PIN_MUX(ejtag_vcpu_loc, 0x120, GENMASK(22, 20),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 20), "vcpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 20), "vcpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 20), "vcpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 20), "vcpu_ejtag_loc2")),
+ [RTD1619B_ISO_EJTAG_SCPU_LOC] = RTK_PIN_MUX(ejtag_scpu_loc, 0x120, GENMASK(25, 23),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 23), "scpu_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 23), "scpu_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 23), "scpu_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 23), "scpu_ejtag_loc2")),
+ [RTD1619B_ISO_DMIC_LOC] = RTK_PIN_MUX(dmic_loc, 0x120, GENMASK(27, 26),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 26), "dmic_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 26), "dmic_loc1")),
+ [RTD1619B_ISO_ISO_GSPI_LOC] = RTK_PIN_MUX(iso_gspi_loc, 0x120, GENMASK(29, 28),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 28), "iso_gspi_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 28), "iso_gspi_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 28), "iso_gspi_loc1")),
+ [RTD1619B_ISO_EJTAG_VE3_LOC] = RTK_PIN_MUX(ejtag_ve3_loc, 0x124, GENMASK(20, 18),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 18), "ve3_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 18), "ve3_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 18), "ve3_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 18), "ve3_ejtag_loc2")),
+ [RTD1619B_ISO_EJTAG_AUCPU1_LOC] = RTK_PIN_MUX(ejtag_aucpu1_loc, 0x124, GENMASK(23, 21),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 21), "aucpu1_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 21), "aucpu1_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 21), "aucpu1_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 21), "aucpu1_ejtag_loc2")),
+ [RTD1619B_ISO_EJTAG_AUCPU0_LOC] = RTK_PIN_MUX(ejtag_aucpu0_loc, 0x124, GENMASK(26, 24),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x0, 24), "aucpu0_ejtag_disable"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x1, 24), "aucpu0_ejtag_loc0"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x2, 24), "aucpu0_ejtag_loc1"),
+ RTK_PIN_FUNC(SHIFT_LEFT(0x4, 24), "aucpu0_ejtag_loc2")),
+};
+
+static const struct rtd_pin_config_desc rtd1619b_iso_configs[] = {
+ [RTD1619B_ISO_GPIO_17] = RTK_PIN_CONFIG(gpio_17, 0x20, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_19] = RTK_PIN_CONFIG(gpio_19, 0x20, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_50] = RTK_PIN_CONFIG(gpio_50, 0x20, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_21] = RTK_PIN_CONFIG(gpio_21, 0x20, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_66] = RTK_PIN_CONFIG(gpio_66, 0x20, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_67] = RTK_PIN_CONFIG(gpio_67, 0x20, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_68] = RTK_PIN_CONFIG(gpio_68, 0x24, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_69] = RTK_PIN_CONFIG(gpio_69, 0x24, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_70] = RTK_PIN_CONFIG(gpio_70, 0x24, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_71] = RTK_PIN_CONFIG(gpio_71, 0x24, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_72] = RTK_PIN_CONFIG(gpio_72, 0x24, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_73] = RTK_PIN_CONFIG(gpio_73, 0x24, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_74] = RTK_PIN_CONFIG(gpio_74, 0x28, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_75] = RTK_PIN_CONFIG(gpio_75, 0x28, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_76] = RTK_PIN_CONFIG(gpio_76, 0x28, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_SPI_SI] = RTK_PIN_CONFIG(spi_si, 0x28, 15, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_SPI_SCK] = RTK_PIN_CONFIG(spi_sck, 0x2c, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_SPI_CE_N] = RTK_PIN_CONFIG(spi_ce_n, 0x2c, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_SPI_SO] = RTK_PIN_CONFIG(spi_so, 0x2c, 26, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_CLK] = RTK_PIN_CONFIG(emmc_clk, 0x30, 7, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_CMD] = RTK_PIN_CONFIG(emmc_cmd, 0x34, 0, 0, 1, 3, 2, 13, NA),
+ [RTD1619B_ISO_EMMC_RST_N] = RTK_PIN_CONFIG(emmc_rst_n, 0x34, 14, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DD_SB] = RTK_PIN_CONFIG(emmc_dd_sb, 0x34, 27, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_5] = RTK_PIN_CONFIG(emmc_data_5, 0x38, 8, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_3] = RTK_PIN_CONFIG(emmc_data_3, 0x3c, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_4] = RTK_PIN_CONFIG(emmc_data_4, 0x3c, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_0] = RTK_PIN_CONFIG(emmc_data_0, 0x3c, 26, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_1] = RTK_PIN_CONFIG(emmc_data_1, 0x40, 7, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_2] = RTK_PIN_CONFIG(emmc_data_2, 0x44, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_7] = RTK_PIN_CONFIG(emmc_data_7, 0x44, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_EMMC_DATA_6] = RTK_PIN_CONFIG(emmc_data_6, 0x44, 26, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_USB_CC1] = RTK_PIN_CONFIG(usb_cc1, 0x48, 7, NA, NA, 0, 1, 9, PADDRI_4_8),
+ [RTD1619B_ISO_USB_CC2] = RTK_PIN_CONFIG(usb_cc2, 0x48, 10, NA, NA, 0, 1, 9, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_26] = RTK_PIN_CONFIG(gpio_26, 0x48, 13, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_27] = RTK_PIN_CONFIG(gpio_27, 0x48, 18, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_28] = RTK_PIN_CONFIG(gpio_28, 0x48, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_29] = RTK_PIN_CONFIG(gpio_29, 0x4c, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_46] = RTK_PIN_CONFIG(gpio_46, 0x4c, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_48] = RTK_PIN_CONFIG(gpio_48, 0x4c, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_49] = RTK_PIN_CONFIG(gpio_49, 0x4c, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_53] = RTK_PIN_CONFIG(gpio_53, 0x4c, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_65] = RTK_PIN_CONFIG(gpio_65, 0x4c, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_14] = RTK_PIN_CONFIG(gpio_14, 0x50, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_15] = RTK_PIN_CONFIG(gpio_15, 0x50, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_23] = RTK_PIN_CONFIG(gpio_23, 0x50, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_IR_RX] = RTK_PIN_CONFIG(ir_rx, 0x50, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_57] = RTK_PIN_CONFIG(gpio_57, 0x50, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_58] = RTK_PIN_CONFIG(gpio_58, 0x50, 30, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_59] = RTK_PIN_CONFIG(gpio_59, 0x54, 3, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_60] = RTK_PIN_CONFIG(gpio_60, 0x54, 8, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_61] = RTK_PIN_CONFIG(gpio_61, 0x54, 13, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_62] = RTK_PIN_CONFIG(gpio_62, 0x54, 18, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_63] = RTK_PIN_CONFIG(gpio_63, 0x54, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_64] = RTK_PIN_CONFIG(gpio_64, 0x58, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_UR0_TX] = RTK_PIN_CONFIG(ur0_tx, 0x58, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_UR0_RX] = RTK_PIN_CONFIG(ur0_rx, 0x58, 13, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_12] = RTK_PIN_CONFIG(gpio_12, 0x58, 18, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_13] = RTK_PIN_CONFIG(gpio_13, 0x58, 23, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_30] = RTK_PIN_CONFIG(gpio_30, 0x58, 28, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_7] = RTK_PIN_CONFIG(gpio_7, 0x5c, 1, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_16] = RTK_PIN_CONFIG(gpio_16, 0x5c, 6, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_18] = RTK_PIN_CONFIG(gpio_18, 0x5c, 11, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_20] = RTK_PIN_CONFIG(gpio_20, 0x5c, 16, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_22] = RTK_PIN_CONFIG(gpio_22, 0x5c, 21, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_31] = RTK_PIN_CONFIG(gpio_31, 0x5c, 26, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_47] = RTK_PIN_CONFIG(gpio_47, 0x60, 12, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_25] = RTK_PIN_CONFIG(gpio_25, 0x60, 17, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_6] = RTK_PIN_CONFIG(gpio_6, 0x60, 22, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_5] = RTK_PIN_CONFIG(gpio_5, 0x60, 27, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_4] = RTK_PIN_CONFIG(gpio_4, 0x64, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_3] = RTK_PIN_CONFIG(gpio_3, 0x64, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_2] = RTK_PIN_CONFIG(gpio_2, 0x64, 10, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_1] = RTK_PIN_CONFIG(gpio_1, 0x64, 15, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_0] = RTK_PIN_CONFIG(gpio_0, 0x64, 20, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_52] = RTK_PIN_CONFIG(gpio_52, 0x64, 25, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_35] = RTK_PIN_CONFIG(gpio_35, 0x68, 0, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_34] = RTK_PIN_CONFIG(gpio_34, 0x68, 5, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_40] = RTK_PIN_CONFIG(gpio_40, 0x68, 10, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_41] = RTK_PIN_CONFIG(gpio_41, 0x6c, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_42] = RTK_PIN_CONFIG(gpio_42, 0x6c, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_43] = RTK_PIN_CONFIG(gpio_43, 0x70, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_44] = RTK_PIN_CONFIG(gpio_44, 0x70, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_45] = RTK_PIN_CONFIG(gpio_45, 0x70, 26, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_8] = RTK_PIN_CONFIG(gpio_8, 0x74, 7, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_9] = RTK_PIN_CONFIG(gpio_9, 0x74, 12, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_10] = RTK_PIN_CONFIG(gpio_10, 0x74, 17, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_GPIO_11] = RTK_PIN_CONFIG(gpio_11, 0x74, 22, 1, 2, 0, 3, 4, PADDRI_4_8),
+ [RTD1619B_ISO_HIF_RDY] = RTK_PIN_CONFIG(hif_rdy, 0x78, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_HIF_CLK] = RTK_PIN_CONFIG(hif_clk, 0x78, 13, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_32] = RTK_PIN_CONFIG(gpio_32, 0x78, 26, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_GPIO_33] = RTK_PIN_CONFIG(gpio_33, 0x7c, 7, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_HIF_DATA] = RTK_PIN_CONFIG(hif_data, 0x80, 0, 0, 1, 3, 2, 12, NA),
+ [RTD1619B_ISO_HIF_EN] = RTK_PIN_CONFIG(hif_en, 0x80, 13, 0, 1, 3, 2, 12, NA),
+};
+
+static const struct rtd_pin_sconfig_desc rtd1619b_iso_sconfigs[] = {
+ RTK_PIN_SCONFIG(spi_si, 0x28, 18, 3, 21, 3, 24, 3),
+ RTK_PIN_SCONFIG(spi_sck, 0x2c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(spi_ce_n, 0x2c, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(spi_so, 0x2c, 29, 3, 32, 3, 35, 3),
+ RTK_PIN_SCONFIG(emmc_clk, 0x30, 10, 3, 13, 3, 16, 3),
+ RTK_PIN_SCONFIG(emmc_cmd, 0x34, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_rst_n, 0x34, 17, 3, 20, 3, 23, 3),
+ RTK_PIN_SCONFIG(emmc_dd_sb, 0x34, 30, 3, 33, 3, 36, 3),
+ RTK_PIN_SCONFIG(emmc_data_5, 0x38, 11, 3, 14, 3, 17, 3),
+ RTK_PIN_SCONFIG(emmc_data_3, 0x3c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_4, 0x3c, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_0, 0x3c, 29, 3, 32, 3, 35, 3),
+ RTK_PIN_SCONFIG(emmc_data_1, 0x40, 10, 3, 13, 3, 16, 3),
+ RTK_PIN_SCONFIG(emmc_data_2, 0x44, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(emmc_data_7, 0x44, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(emmc_data_6, 0x44, 29, 3, 32, 3, 35, 3),
+ RTK_PIN_SCONFIG(gpio_40, 0x68, 13, 3, 16, 3, 19, 3),
+ RTK_PIN_SCONFIG(gpio_41, 0x6c, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_42, 0x6c, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_43, 0x70, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(gpio_44, 0x70, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_45, 0x70, 29, 3, 32, 3, 35, 3),
+ RTK_PIN_SCONFIG(hif_rdy, 0x78, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_clk, 0x78, 16, 3, 19, 3, 22, 3),
+ RTK_PIN_SCONFIG(gpio_32, 0x78, 29, 3, 32, 3, 35, 3),
+ RTK_PIN_SCONFIG(gpio_33, 0x7c, 10, 3, 13, 3, 16, 3),
+ RTK_PIN_SCONFIG(hif_data, 0x80, 3, 3, 6, 3, 9, 3),
+ RTK_PIN_SCONFIG(hif_en, 0x80, 16, 3, 19, 3, 22, 3),
+};
+
+static const struct rtd_pinctrl_desc rtd1619b_iso_pinctrl_desc = {
+ .pins = rtd1619b_iso_pins,
+ .num_pins = ARRAY_SIZE(rtd1619b_iso_pins),
+ .groups = rtd1619b_pin_groups,
+ .num_groups = ARRAY_SIZE(rtd1619b_pin_groups),
+ .functions = rtd1619b_pin_functions,
+ .num_functions = ARRAY_SIZE(rtd1619b_pin_functions),
+ .muxes = rtd1619b_iso_muxes,
+ .num_muxes = ARRAY_SIZE(rtd1619b_iso_muxes),
+ .configs = rtd1619b_iso_configs,
+ .num_configs = ARRAY_SIZE(rtd1619b_iso_configs),
+ .sconfigs = rtd1619b_iso_sconfigs,
+ .num_sconfigs = ARRAY_SIZE(rtd1619b_iso_sconfigs),
+};
+
+static int rtd1619b_pinctrl_probe(struct platform_device *pdev)
+{
+ return rtd_pinctrl_probe(pdev, &rtd1619b_iso_pinctrl_desc);
+}
+
+static const struct of_device_id rtd1619b_pinctrl_of_match[] = {
+ { .compatible = "realtek,rtd1619b-pinctrl", },
+ {},
+};
+
+static struct platform_driver rtd1619b_pinctrl_driver = {
+ .driver = {
+ .name = "rtd1619b-pinctrl",
+ .of_match_table = rtd1619b_pinctrl_of_match,
+ },
+ .probe = rtd1619b_pinctrl_probe,
+};
+
+static int __init rtd1619b_pinctrl_init(void)
+{
+ return platform_driver_register(&rtd1619b_pinctrl_driver);
+}
+arch_initcall(rtd1619b_pinctrl_init);
+
+static void __exit rtd1619b_pinctrl_exit(void)
+{
+ platform_driver_unregister(&rtd1619b_pinctrl_driver);
+}
+module_exit(rtd1619b_pinctrl_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Realtek Semiconductor Corporation");
+MODULE_DESCRIPTION("Realtek DHC SoC RTD1619B pinctrl driver");
diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c
index 5758daf94fe2..a5136dacaaf2 100644
--- a/drivers/pinctrl/renesas/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
@@ -135,12 +135,12 @@ static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
return -EINVAL;
- return pinctrl_gpio_request(gc->base + offset);
+ return pinctrl_gpio_request(gc, offset);
}
static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_gpio_free(gc->base + offset);
+ return pinctrl_gpio_free(gc, offset);
}
static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
@@ -164,7 +164,7 @@ static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
static int gpio_pin_direction_input(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_gpio_direction_input(gc->base + offset);
+ return pinctrl_gpio_direction_input(gc, offset);
}
static int gpio_pin_direction_output(struct gpio_chip *gc, unsigned offset,
@@ -172,7 +172,7 @@ static int gpio_pin_direction_output(struct gpio_chip *gc, unsigned offset,
{
gpio_pin_set_value(gpiochip_get_data(gc), offset, value);
- return pinctrl_gpio_direction_output(gc->base + offset);
+ return pinctrl_gpio_direction_output(gc, offset);
}
static int gpio_pin_get(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index c52761d80f7b..db92d6d91d8e 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
@@ -1424,6 +1424,24 @@ I2C_PFC_MUX(i2c3_b, SDA3_B, SCL3_B);
I2C_PFC_PIN(i2c3_c, RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 23));
I2C_PFC_MUX(i2c3_c, SDA3_C, SCL3_C);
+/* - LBSC ------------------------------------------------------------------- */
+SH_PFC_PINS(lbsc_cs0, PIN_CS0);
+SH_PFC_MUX1(lbsc_cs0, CS0);
+SH_PFC_PINS(lbsc_cs1, PIN_CS1_A26);
+SH_PFC_MUX1(lbsc_cs1, CS1_A26);
+SH_PFC_PINS(lbsc_ex_cs0, RCAR_GP_PIN(1, 3));
+SH_PFC_MUX1(lbsc_ex_cs0, EX_CS0);
+SH_PFC_PINS(lbsc_ex_cs1, RCAR_GP_PIN(1, 4));
+SH_PFC_MUX1(lbsc_ex_cs1, EX_CS1);
+SH_PFC_PINS(lbsc_ex_cs2, RCAR_GP_PIN(1, 5));
+SH_PFC_MUX1(lbsc_ex_cs2, EX_CS2);
+SH_PFC_PINS(lbsc_ex_cs3, RCAR_GP_PIN(1, 6));
+SH_PFC_MUX1(lbsc_ex_cs3, EX_CS3);
+SH_PFC_PINS(lbsc_ex_cs4, RCAR_GP_PIN(1, 7));
+SH_PFC_MUX1(lbsc_ex_cs4, EX_CS4);
+SH_PFC_PINS(lbsc_ex_cs5, RCAR_GP_PIN(1, 8));
+SH_PFC_MUX1(lbsc_ex_cs5, EX_CS5);
+
/* - MMC macro -------------------------------------------------------------- */
#define MMC_PFC_PINS(name, args...) SH_PFC_PINS(name, args)
#define MMC_PFC_CTRL(name, clk, cmd) SH_PFC_MUX2(name, clk, cmd)
@@ -1724,6 +1742,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c3_a),
SH_PFC_PIN_GROUP(i2c3_b),
SH_PFC_PIN_GROUP(i2c3_c),
+ SH_PFC_PIN_GROUP(lbsc_cs0),
+ SH_PFC_PIN_GROUP(lbsc_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs0),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs2),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs3),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs4),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs5),
SH_PFC_PIN_GROUP(mmc_ctrl),
BUS_DATA_PIN_GROUP(mmc_data, 1),
BUS_DATA_PIN_GROUP(mmc_data, 4),
@@ -1897,6 +1923,17 @@ static const char * const i2c3_groups[] = {
"i2c3_c",
};
+static const char * const lbsc_groups[] = {
+ "lbsc_cs0",
+ "lbsc_cs1",
+ "lbsc_ex_cs0",
+ "lbsc_ex_cs1",
+ "lbsc_ex_cs2",
+ "lbsc_ex_cs3",
+ "lbsc_ex_cs4",
+ "lbsc_ex_cs5",
+};
+
static const char * const mmc_groups[] = {
"mmc_ctrl",
"mmc_data1",
@@ -2049,6 +2086,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(lbsc),
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(scif_clk),
SH_PFC_FUNCTION(scif0),
diff --git a/drivers/pinctrl/renesas/pfc-r8a7779.c b/drivers/pinctrl/renesas/pfc-r8a7779.c
index 1172a359384d..e1a8cb669120 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7779.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7779.c
@@ -1942,7 +1942,7 @@ static const unsigned int intc_irq3_b_pins[] = {
static const unsigned int intc_irq3_b_mux[] = {
IRQ3_B_MARK,
};
-/* - LSBC ------------------------------------------------------------------- */
+/* - LBSC ------------------------------------------------------------------- */
static const unsigned int lbsc_cs0_pins[] = {
/* CS */
RCAR_GP_PIN(0, 13),
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 37cdfe4b04f9..9de350ad7e7d 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -55,14 +55,22 @@
#define PIN_CFG_FILONOFF BIT(10)
#define PIN_CFG_FILNUM BIT(11)
#define PIN_CFG_FILCLKSEL BIT(12)
+#define PIN_CFG_IOLH_C BIT(13)
+#define PIN_CFG_SOFT_PS BIT(14)
-#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH_A | \
- PIN_CFG_SR | \
+#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
+ (PIN_CFG_IOLH_##group | \
PIN_CFG_PUPD | \
PIN_CFG_FILONOFF | \
PIN_CFG_FILNUM | \
PIN_CFG_FILCLKSEL)
+#define RZG2L_MPXED_PIN_FUNCS (RZG2L_MPXED_COMMON_PIN_FUNCS(A) | \
+ PIN_CFG_SR)
+
+#define RZG3S_MPXED_PIN_FUNCS(group) (RZG2L_MPXED_COMMON_PIN_FUNCS(group) | \
+ PIN_CFG_SOFT_PS)
+
#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \
PIN_CFG_FILONOFF | \
PIN_CFG_FILNUM | \
@@ -74,8 +82,6 @@
*/
#define RZG2L_GPIO_PORT_PACK(n, a, f) (((n) << 28) | ((a) << 20) | (f))
#define RZG2L_GPIO_PORT_GET_PINCNT(x) (((x) & GENMASK(30, 28)) >> 28)
-#define RZG2L_GPIO_PORT_GET_INDEX(x) (((x) & GENMASK(26, 20)) >> 20)
-#define RZG2L_GPIO_PORT_GET_CFGS(x) ((x) & GENMASK(19, 0))
/*
* BIT(31) indicates dedicated pin, p is the register index while
@@ -85,20 +91,22 @@
#define RZG2L_SINGLE_PIN BIT(31)
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
((p) << 24) | ((b) << 20) | (f))
-#define RZG2L_SINGLE_PIN_GET_PORT_OFFSET(x) (((x) & GENMASK(30, 24)) >> 24)
#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
-#define RZG2L_SINGLE_PIN_GET_CFGS(x) ((x) & GENMASK(19, 0))
-
-#define P(n) (0x0000 + 0x10 + (n))
-#define PM(n) (0x0100 + 0x20 + (n) * 2)
-#define PMC(n) (0x0200 + 0x10 + (n))
-#define PFC(n) (0x0400 + 0x40 + (n) * 4)
-#define PIN(n) (0x0800 + 0x10 + (n))
-#define IOLH(n) (0x1000 + (n) * 8)
-#define IEN(n) (0x1800 + (n) * 8)
-#define ISEL(n) (0x2c80 + (n) * 8)
-#define PWPR (0x3014)
-#define SD_CH(n) (0x3000 + (n) * 4)
+
+#define RZG2L_PIN_CFG_TO_CAPS(cfg) ((cfg) & GENMASK(19, 0))
+#define RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg) ((cfg) & RZG2L_SINGLE_PIN ? \
+ (((cfg) & GENMASK(30, 24)) >> 24) : \
+ (((cfg) & GENMASK(26, 20)) >> 20))
+
+#define P(off) (0x0000 + (off))
+#define PM(off) (0x0100 + (off) * 2)
+#define PMC(off) (0x0200 + (off))
+#define PFC(off) (0x0400 + (off) * 4)
+#define PIN(off) (0x0800 + (off))
+#define IOLH(off) (0x1000 + (off) * 8)
+#define IEN(off) (0x1800 + (off) * 8)
+#define ISEL(off) (0x2C00 + (off) * 8)
+#define SD_CH(off, ch) ((off) + (ch) * 4)
#define QSPI (0x3008)
#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
@@ -117,13 +125,59 @@
#define PM_OUTPUT 0x2
#define RZG2L_PIN_ID_TO_PORT(id) ((id) / RZG2L_PINS_PER_PORT)
-#define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10)
#define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT)
#define RZG2L_TINT_MAX_INTERRUPT 32
#define RZG2L_TINT_IRQ_START_INDEX 9
#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i))
+/**
+ * struct rzg2l_register_offsets - specific register offsets
+ * @pwpr: PWPR register offset
+ * @sd_ch: SD_CH register offset
+ */
+struct rzg2l_register_offsets {
+ u16 pwpr;
+ u16 sd_ch;
+};
+
+/**
+ * enum rzg2l_iolh_index - starting indices in IOLH specific arrays
+ * @RZG2L_IOLH_IDX_1V8: starting index for 1V8 power source
+ * @RZG2L_IOLH_IDX_2V5: starting index for 2V5 power source
+ * @RZG2L_IOLH_IDX_3V3: starting index for 3V3 power source
+ * @RZG2L_IOLH_IDX_MAX: maximum index
+ */
+enum rzg2l_iolh_index {
+ RZG2L_IOLH_IDX_1V8 = 0,
+ RZG2L_IOLH_IDX_2V5 = 4,
+ RZG2L_IOLH_IDX_3V3 = 8,
+ RZG2L_IOLH_IDX_MAX = 12,
+};
+
+/* Maximum number of driver strength entries per power source. */
+#define RZG2L_IOLH_MAX_DS_ENTRIES (4)
+
+/**
+ * struct rzg2l_hwcfg - hardware configuration data structure
+ * @regs: hardware specific register offsets
+ * @iolh_groupa_ua: IOLH group A uA specific values
+ * @iolh_groupb_ua: IOLH group B uA specific values
+ * @iolh_groupc_ua: IOLH group C uA specific values
+ * @iolh_groupb_oi: IOLH group B output impedance specific values
+ * @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported)
+ * @func_base: base number for port function (see register PFC)
+ */
+struct rzg2l_hwcfg {
+ const struct rzg2l_register_offsets regs;
+ u16 iolh_groupa_ua[RZG2L_IOLH_IDX_MAX];
+ u16 iolh_groupb_ua[RZG2L_IOLH_IDX_MAX];
+ u16 iolh_groupc_ua[RZG2L_IOLH_IDX_MAX];
+ u16 iolh_groupb_oi[4];
+ bool drive_strength_ua;
+ u8 func_base;
+};
+
struct rzg2l_dedicated_configs {
const char *name;
u32 config;
@@ -133,9 +187,20 @@ struct rzg2l_pinctrl_data {
const char * const *port_pins;
const u32 *port_pin_configs;
unsigned int n_ports;
- struct rzg2l_dedicated_configs *dedicated_pins;
+ const struct rzg2l_dedicated_configs *dedicated_pins;
unsigned int n_port_pins;
unsigned int n_dedicated_pins;
+ const struct rzg2l_hwcfg *hwcfg;
+};
+
+/**
+ * struct rzg2l_pinctrl_pin_settings - pin data
+ * @power_source: power source
+ * @drive_strength_ua: drive strength (in micro amps)
+ */
+struct rzg2l_pinctrl_pin_settings {
+ u16 power_source;
+ u16 drive_strength_ua;
};
struct rzg2l_pinctrl {
@@ -155,44 +220,46 @@ struct rzg2l_pinctrl {
spinlock_t lock; /* lock read/write registers */
struct mutex mutex; /* serialize adding groups and functions */
+
+ struct rzg2l_pinctrl_pin_settings *settings;
};
-static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
-static const unsigned int iolh_groupb_oi[] = { 100, 66, 50, 33 };
+static const u16 available_ps[] = { 1800, 2500, 3300 };
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
- u8 port, u8 pin, u8 func)
+ u8 pin, u8 off, u8 func)
{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&pctrl->lock, flags);
/* Set pin to 'Non-use (Hi-Z input protection)' */
- reg = readw(pctrl->base + PM(port));
+ reg = readw(pctrl->base + PM(off));
reg &= ~(PM_MASK << (pin * 2));
- writew(reg, pctrl->base + PM(port));
+ writew(reg, pctrl->base + PM(off));
/* Temporarily switch to GPIO mode with PMC register */
- reg = readb(pctrl->base + PMC(port));
- writeb(reg & ~BIT(pin), pctrl->base + PMC(port));
+ reg = readb(pctrl->base + PMC(off));
+ writeb(reg & ~BIT(pin), pctrl->base + PMC(off));
/* Set the PWPR register to allow PFC register to write */
- writel(0x0, pctrl->base + PWPR); /* B0WI=0, PFCWE=0 */
- writel(PWPR_PFCWE, pctrl->base + PWPR); /* B0WI=0, PFCWE=1 */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
/* Select Pin function mode with PFC register */
- reg = readl(pctrl->base + PFC(port));
+ reg = readl(pctrl->base + PFC(off));
reg &= ~(PFC_MASK << (pin * 4));
- writel(reg | (func << (pin * 4)), pctrl->base + PFC(port));
+ writel(reg | (func << (pin * 4)), pctrl->base + PFC(off));
/* Set the PWPR register to be write-protected */
- writel(0x0, pctrl->base + PWPR); /* B0WI=0, PFCWE=0 */
- writel(PWPR_B0WI, pctrl->base + PWPR); /* B0WI=1, PFCWE=0 */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
/* Switch to Peripheral pin function with PMC register */
- reg = readb(pctrl->base + PMC(port));
- writeb(reg | BIT(pin), pctrl->base + PMC(port));
+ reg = readb(pctrl->base + PMC(off));
+ writeb(reg | BIT(pin), pctrl->base + PMC(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
};
@@ -202,6 +269,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int group_selector)
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
@@ -218,11 +286,14 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
pins = group->pins;
for (i = 0; i < group->num_pins; i++) {
- dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
- RZG2L_PIN_ID_TO_PORT(pins[i]), RZG2L_PIN_ID_TO_PIN(pins[i]),
- psel_val[i]);
- rzg2l_pinctrl_set_pfc_mode(pctrl, RZG2L_PIN_ID_TO_PORT(pins[i]),
- RZG2L_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ unsigned int *pin_data = pctrl->desc.pins[pins[i]].drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
+
+ dev_dbg(pctrl->dev, "port:%u pin: %u off:%x PSEL:%u\n",
+ RZG2L_PIN_ID_TO_PORT(pins[i]), pin, off, psel_val[i] - hwcfg->func_base);
+
+ rzg2l_pinctrl_set_pfc_mode(pctrl, pin, off, psel_val[i] - hwcfg->func_base);
}
return 0;
@@ -468,14 +539,14 @@ static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
u32 cfg, u32 port, u8 bit)
{
u8 pincount = RZG2L_GPIO_PORT_GET_PINCNT(cfg);
- u32 port_index = RZG2L_GPIO_PORT_GET_INDEX(cfg);
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
u32 data;
if (bit >= pincount || port >= pctrl->data->n_port_pins)
return -EINVAL;
data = pctrl->data->port_pin_configs[port];
- if (port_index != RZG2L_GPIO_PORT_GET_INDEX(data))
+ if (off != RZG2L_PIN_CFG_TO_PORT_OFFSET(data))
return -EINVAL;
return 0;
@@ -514,31 +585,178 @@ static void rzg2l_rmw_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
spin_unlock_irqrestore(&pctrl->lock, flags);
}
+static int rzg2l_caps_to_pwr_reg(const struct rzg2l_register_offsets *regs, u32 caps)
+{
+ if (caps & PIN_CFG_IO_VMC_SD0)
+ return SD_CH(regs->sd_ch, 0);
+ if (caps & PIN_CFG_IO_VMC_SD1)
+ return SD_CH(regs->sd_ch, 1);
+ if (caps & PIN_CFG_IO_VMC_QSPI)
+ return QSPI;
+
+ return -EINVAL;
+}
+
+static int rzg2l_get_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps)
+{
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ int pwr_reg;
+
+ if (caps & PIN_CFG_SOFT_PS)
+ return pctrl->settings[pin].power_source;
+
+ pwr_reg = rzg2l_caps_to_pwr_reg(regs, caps);
+ if (pwr_reg < 0)
+ return pwr_reg;
+
+ return (readl(pctrl->base + pwr_reg) & PVDD_MASK) ? 1800 : 3300;
+}
+
+static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps, u32 ps)
+{
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ int pwr_reg;
+
+ if (caps & PIN_CFG_SOFT_PS) {
+ pctrl->settings[pin].power_source = ps;
+ return 0;
+ }
+
+ pwr_reg = rzg2l_caps_to_pwr_reg(regs, caps);
+ if (pwr_reg < 0)
+ return pwr_reg;
+
+ writel((ps == 1800) ? PVDD_1800 : PVDD_3300, pctrl->base + pwr_reg);
+ pctrl->settings[pin].power_source = ps;
+
+ return 0;
+}
+
+static bool rzg2l_ps_is_supported(u16 ps)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(available_ps); i++) {
+ if (available_ps[i] == ps)
+ return true;
+ }
+
+ return false;
+}
+
+static enum rzg2l_iolh_index rzg2l_ps_to_iolh_idx(u16 ps)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(available_ps); i++) {
+ if (available_ps[i] == ps)
+ break;
+ }
+
+ /*
+ * We multiply with RZG2L_IOLH_MAX_DS_ENTRIES as we have
+ * RZG2L_IOLH_MAX_DS_ENTRIES DS values per power source
+ */
+ return i * RZG2L_IOLH_MAX_DS_ENTRIES;
+}
+
+static u16 rzg2l_iolh_val_to_ua(const struct rzg2l_hwcfg *hwcfg, u32 caps, u8 val)
+{
+ if (caps & PIN_CFG_IOLH_A)
+ return hwcfg->iolh_groupa_ua[val];
+
+ if (caps & PIN_CFG_IOLH_B)
+ return hwcfg->iolh_groupb_ua[val];
+
+ if (caps & PIN_CFG_IOLH_C)
+ return hwcfg->iolh_groupc_ua[val];
+
+ /* Should not happen. */
+ return 0;
+}
+
+static int rzg2l_iolh_ua_to_val(const struct rzg2l_hwcfg *hwcfg, u32 caps,
+ enum rzg2l_iolh_index ps_index, u16 ua)
+{
+ const u16 *array = NULL;
+ unsigned int i;
+
+ if (caps & PIN_CFG_IOLH_A)
+ array = &hwcfg->iolh_groupa_ua[ps_index];
+
+ if (caps & PIN_CFG_IOLH_B)
+ array = &hwcfg->iolh_groupb_ua[ps_index];
+
+ if (caps & PIN_CFG_IOLH_C)
+ array = &hwcfg->iolh_groupc_ua[ps_index];
+
+ if (!array)
+ return -EINVAL;
+
+ for (i = 0; i < RZG2L_IOLH_MAX_DS_ENTRIES; i++) {
+ if (array[i] == ua)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static bool rzg2l_ds_is_supported(struct rzg2l_pinctrl *pctrl, u32 caps,
+ enum rzg2l_iolh_index iolh_idx,
+ u16 ds)
+{
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const u16 *array = NULL;
+ unsigned int i;
+
+ if (caps & PIN_CFG_IOLH_A)
+ array = hwcfg->iolh_groupa_ua;
+
+ if (caps & PIN_CFG_IOLH_B)
+ array = hwcfg->iolh_groupb_ua;
+
+ if (caps & PIN_CFG_IOLH_C)
+ array = hwcfg->iolh_groupc_ua;
+
+ /* Should not happen. */
+ if (!array)
+ return false;
+
+ if (!array[iolh_idx])
+ return false;
+
+ for (i = 0; i < RZG2L_IOLH_MAX_DS_ENTRIES; i++) {
+ if (array[iolh_idx + i] == ds)
+ return true;
+ }
+
+ return false;
+}
+
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *config)
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
unsigned int *pin_data = pin->drv_data;
unsigned int arg = 0;
- unsigned long flags;
- void __iomem *addr;
- u32 port_offset;
- u32 cfg = 0;
- u8 bit = 0;
+ u32 off, cfg;
+ int ret;
+ u8 bit;
if (!pin_data)
return -EINVAL;
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
- cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
} else {
- cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
- port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
bit = RZG2L_PIN_ID_TO_PIN(_pin);
if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
@@ -549,49 +767,58 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_ENABLE:
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
+ arg = rzg2l_read_pin_config(pctrl, IEN(off), bit, IEN_MASK);
if (!arg)
return -EINVAL;
break;
- case PIN_CONFIG_POWER_SOURCE: {
- u32 pwr_reg = 0x0;
+ case PIN_CONFIG_POWER_SOURCE:
+ ret = rzg2l_get_power_source(pctrl, _pin, cfg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int index;
- if (cfg & PIN_CFG_IO_VMC_SD0)
- pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IO_VMC_SD1)
- pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IO_VMC_QSPI)
- pwr_reg = QSPI;
- else
+ if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
- addr = pctrl->base + pwr_reg;
- arg = (readl(addr) & PVDD_MASK) ? 1800 : 3300;
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ index = rzg2l_read_pin_config(pctrl, IOLH(off), bit, IOLH_MASK);
+ /*
+ * Drive strenght mA is supported only by group A and only
+ * for 3V3 port source.
+ */
+ arg = hwcfg->iolh_groupa_ua[index + RZG2L_IOLH_IDX_3V3] / 1000;
break;
}
- case PIN_CONFIG_DRIVE_STRENGTH: {
- unsigned int index;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA: {
+ enum rzg2l_iolh_index iolh_idx;
+ u8 val;
- if (!(cfg & PIN_CFG_IOLH_A))
+ if (!(cfg & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C)) ||
+ !hwcfg->drive_strength_ua)
return -EINVAL;
- index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
- arg = iolh_groupa_mA[index];
+ ret = rzg2l_get_power_source(pctrl, _pin, cfg);
+ if (ret < 0)
+ return ret;
+ iolh_idx = rzg2l_ps_to_iolh_idx(ret);
+ val = rzg2l_read_pin_config(pctrl, IOLH(off), bit, IOLH_MASK);
+ arg = rzg2l_iolh_val_to_ua(hwcfg, cfg, iolh_idx + val);
break;
}
case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
unsigned int index;
- if (!(cfg & PIN_CFG_IOLH_B))
+ if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
return -EINVAL;
- index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
- arg = iolh_groupb_oi[index];
+ index = rzg2l_read_pin_config(pctrl, IOLH(off), bit, IOLH_MASK);
+ arg = hwcfg->iolh_groupb_oi[index];
break;
}
@@ -611,25 +838,23 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
unsigned int *pin_data = pin->drv_data;
enum pin_config_param param;
- unsigned long flags;
- void __iomem *addr;
- u32 port_offset;
unsigned int i;
- u32 cfg = 0;
- u8 bit = 0;
+ u32 cfg, off;
+ int ret;
+ u8 bit;
if (!pin_data)
return -EINVAL;
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
- cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
} else {
- cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
- port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
bit = RZG2L_PIN_ID_TO_PIN(_pin);
if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
@@ -646,66 +871,56 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- rzg2l_rmw_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK, !!arg);
+ rzg2l_rmw_pin_config(pctrl, IEN(off), bit, IEN_MASK, !!arg);
break;
}
- case PIN_CONFIG_POWER_SOURCE: {
- unsigned int mV = pinconf_to_config_argument(_configs[i]);
- u32 pwr_reg = 0x0;
-
- if (mV != 1800 && mV != 3300)
- return -EINVAL;
-
- if (cfg & PIN_CFG_IO_VMC_SD0)
- pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IO_VMC_SD1)
- pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IO_VMC_QSPI)
- pwr_reg = QSPI;
- else
- return -EINVAL;
-
- addr = pctrl->base + pwr_reg;
- spin_lock_irqsave(&pctrl->lock, flags);
- writel((mV == 1800) ? PVDD_1800 : PVDD_3300, addr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ case PIN_CONFIG_POWER_SOURCE:
+ settings.power_source = pinconf_to_config_argument(_configs[i]);
break;
- }
case PIN_CONFIG_DRIVE_STRENGTH: {
unsigned int arg = pinconf_to_config_argument(_configs[i]);
unsigned int index;
- if (!(cfg & PIN_CFG_IOLH_A))
+ if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
return -EINVAL;
- for (index = 0; index < ARRAY_SIZE(iolh_groupa_mA); index++) {
- if (arg == iolh_groupa_mA[index])
+ for (index = RZG2L_IOLH_IDX_3V3;
+ index < RZG2L_IOLH_IDX_3V3 + RZG2L_IOLH_MAX_DS_ENTRIES; index++) {
+ if (arg == (hwcfg->iolh_groupa_ua[index] / 1000))
break;
}
- if (index >= ARRAY_SIZE(iolh_groupa_mA))
+ if (index == (RZG2L_IOLH_IDX_3V3 + RZG2L_IOLH_MAX_DS_ENTRIES))
return -EINVAL;
- rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
break;
}
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (!(cfg & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C)) ||
+ !hwcfg->drive_strength_ua)
+ return -EINVAL;
+
+ settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]);
+ break;
+
case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
unsigned int arg = pinconf_to_config_argument(_configs[i]);
unsigned int index;
- if (!(cfg & PIN_CFG_IOLH_B))
+ if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
return -EINVAL;
- for (index = 0; index < ARRAY_SIZE(iolh_groupb_oi); index++) {
- if (arg == iolh_groupb_oi[index])
+ for (index = 0; index < ARRAY_SIZE(hwcfg->iolh_groupb_oi); index++) {
+ if (arg == hwcfg->iolh_groupb_oi[index])
break;
}
- if (index >= ARRAY_SIZE(iolh_groupb_oi))
+ if (index == ARRAY_SIZE(hwcfg->iolh_groupb_oi))
return -EINVAL;
- rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
break;
}
@@ -714,6 +929,39 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
}
}
+ /* Apply power source. */
+ if (settings.power_source != pctrl->settings[_pin].power_source) {
+ ret = rzg2l_ps_is_supported(settings.power_source);
+ if (!ret)
+ return -EINVAL;
+
+ /* Apply power source. */
+ ret = rzg2l_set_power_source(pctrl, _pin, cfg, settings.power_source);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply drive strength. */
+ if (settings.drive_strength_ua != pctrl->settings[_pin].drive_strength_ua) {
+ enum rzg2l_iolh_index iolh_idx;
+ int val;
+
+ iolh_idx = rzg2l_ps_to_iolh_idx(settings.power_source);
+ ret = rzg2l_ds_is_supported(pctrl, cfg, iolh_idx,
+ settings.drive_strength_ua);
+ if (!ret)
+ return -EINVAL;
+
+ /* Get register value for this PS/DS tuple. */
+ val = rzg2l_iolh_ua_to_val(hwcfg, cfg, iolh_idx, settings.drive_strength_ua);
+ if (val < 0)
+ return val;
+
+ /* Apply drive strength. */
+ rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, val);
+ pctrl->settings[_pin].drive_strength_ua = settings.drive_strength_ua;
+ }
+
return 0;
}
@@ -795,41 +1043,52 @@ static const struct pinconf_ops rzg2l_pinctrl_confops = {
static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ u32 *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 port = RZG2L_PIN_ID_TO_PORT(offset);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
u8 reg8;
int ret;
- ret = pinctrl_gpio_request(chip->base + offset);
+ ret = rzg2l_validate_gpio_pin(pctrl, *pin_data, port, bit);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_gpio_request(chip, offset);
if (ret)
return ret;
spin_lock_irqsave(&pctrl->lock, flags);
/* Select GPIO mode in PMC Register */
- reg8 = readb(pctrl->base + PMC(port));
+ reg8 = readb(pctrl->base + PMC(off));
reg8 &= ~BIT(bit);
- writeb(reg8, pctrl->base + PMC(port));
+ writeb(reg8, pctrl->base + PMC(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
-static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 port,
- u8 bit, bool output)
+static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 offset,
+ bool output)
{
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
u16 reg16;
spin_lock_irqsave(&pctrl->lock, flags);
- reg16 = readw(pctrl->base + PM(port));
+ reg16 = readw(pctrl->base + PM(off));
reg16 &= ~(PM_MASK << (bit * 2));
reg16 |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2);
- writew(reg16, pctrl->base + PM(port));
+ writew(reg16, pctrl->base + PM(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -837,13 +1096,15 @@ static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 port,
static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
- if (!(readb(pctrl->base + PMC(port)) & BIT(bit))) {
+ if (!(readb(pctrl->base + PMC(off)) & BIT(bit))) {
u16 reg16;
- reg16 = readw(pctrl->base + PM(port));
+ reg16 = readw(pctrl->base + PM(off));
reg16 = (reg16 >> (bit * 2)) & PM_MASK;
if (reg16 == PM_OUTPUT)
return GPIO_LINE_DIRECTION_OUT;
@@ -856,10 +1117,8 @@ static int rzg2l_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 port = RZG2L_PIN_ID_TO_PORT(offset);
- u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
- rzg2l_gpio_set_direction(pctrl, port, bit, false);
+ rzg2l_gpio_set_direction(pctrl, offset, false);
return 0;
}
@@ -868,19 +1127,21 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
u8 reg8;
spin_lock_irqsave(&pctrl->lock, flags);
- reg8 = readb(pctrl->base + P(port));
+ reg8 = readb(pctrl->base + P(off));
if (value)
- writeb(reg8 | BIT(bit), pctrl->base + P(port));
+ writeb(reg8 | BIT(bit), pctrl->base + P(off));
else
- writeb(reg8 & ~BIT(bit), pctrl->base + P(port));
+ writeb(reg8 & ~BIT(bit), pctrl->base + P(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -889,11 +1150,9 @@ static int rzg2l_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 port = RZG2L_PIN_ID_TO_PORT(offset);
- u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
rzg2l_gpio_set(chip, offset, value);
- rzg2l_gpio_set_direction(pctrl, port, bit, true);
+ rzg2l_gpio_set_direction(pctrl, offset, true);
return 0;
}
@@ -901,17 +1160,19 @@ static int rzg2l_gpio_direction_output(struct gpio_chip *chip,
static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 port = RZG2L_PIN_ID_TO_PORT(offset);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
u16 reg16;
- reg16 = readw(pctrl->base + PM(port));
+ reg16 = readw(pctrl->base + PM(off));
reg16 = (reg16 >> (bit * 2)) & PM_MASK;
if (reg16 == PM_INPUT)
- return !!(readb(pctrl->base + PIN(port)) & BIT(bit));
+ return !!(readb(pctrl->base + PIN(off)) & BIT(bit));
else if (reg16 == PM_OUTPUT)
- return !!(readb(pctrl->base + P(port)) & BIT(bit));
+ return !!(readb(pctrl->base + P(off)) & BIT(bit));
else
return -EINVAL;
}
@@ -920,7 +1181,7 @@ static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
unsigned int virq;
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
virq = irq_find_mapping(chip->irq.domain, offset);
if (virq)
@@ -985,7 +1246,7 @@ static const char * const rzg2l_gpio_names[] = {
"P48_0", "P48_1", "P48_2", "P48_3", "P48_4", "P48_5", "P48_6", "P48_7",
};
-static const u32 rzg2l_gpio_configs[] = {
+static const u32 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x10, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x11, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x12, RZG2L_MPXED_PIN_FUNCS),
@@ -1059,7 +1320,37 @@ static const u32 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x22, RZG2L_MPXED_PIN_FUNCS),
};
-static struct {
+static const u32 r9a08g045_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(4, 0x20, RZG3S_MPXED_PIN_FUNCS(A)), /* P0 */
+ RZG2L_GPIO_PORT_PACK(5, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH0)), /* P1 */
+ RZG2L_GPIO_PORT_PACK(4, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH0)), /* P2 */
+ RZG2L_GPIO_PORT_PACK(4, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH0)), /* P3 */
+ RZG2L_GPIO_PORT_PACK(6, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH0)), /* P4 */
+ RZG2L_GPIO_PORT_PACK(5, 0x21, RZG3S_MPXED_PIN_FUNCS(A)), /* P5 */
+ RZG2L_GPIO_PORT_PACK(5, 0x22, RZG3S_MPXED_PIN_FUNCS(A)), /* P6 */
+ RZG2L_GPIO_PORT_PACK(5, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH1)), /* P7 */
+ RZG2L_GPIO_PORT_PACK(5, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH1)), /* P8 */
+ RZG2L_GPIO_PORT_PACK(4, 0x36, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH1)), /* P9 */
+ RZG2L_GPIO_PORT_PACK(5, 0x37, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
+ PIN_CFG_IO_VMC_ETH1)), /* P10 */
+ RZG2L_GPIO_PORT_PACK(4, 0x23, RZG3S_MPXED_PIN_FUNCS(B) | PIN_CFG_IEN), /* P11 */
+ RZG2L_GPIO_PORT_PACK(2, 0x24, RZG3S_MPXED_PIN_FUNCS(B) | PIN_CFG_IEN), /* P12 */
+ RZG2L_GPIO_PORT_PACK(5, 0x25, RZG3S_MPXED_PIN_FUNCS(A)), /* P13 */
+ RZG2L_GPIO_PORT_PACK(3, 0x26, RZG3S_MPXED_PIN_FUNCS(A)), /* P14 */
+ RZG2L_GPIO_PORT_PACK(4, 0x27, RZG3S_MPXED_PIN_FUNCS(A)), /* P15 */
+ RZG2L_GPIO_PORT_PACK(2, 0x28, RZG3S_MPXED_PIN_FUNCS(A)), /* P16 */
+ RZG2L_GPIO_PORT_PACK(4, 0x29, RZG3S_MPXED_PIN_FUNCS(A)), /* P17 */
+ RZG2L_GPIO_PORT_PACK(6, 0x2a, RZG3S_MPXED_PIN_FUNCS(A)), /* P18 */
+};
+
+static const struct {
struct rzg2l_dedicated_configs common[35];
struct rzg2l_dedicated_configs rzg2l_pins[7];
} rzg2l_dedicated_pins = {
@@ -1145,6 +1436,46 @@ static struct {
}
};
+static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
+ PIN_CFG_FILCLKSEL)) },
+ { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
+ PIN_CFG_SOFT_PS)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
+ { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0x6, 0, PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS) },
+ { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD0)) },
+ { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
+ PIN_CFG_IO_VMC_SD1)) },
+};
+
static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
{
unsigned int gpioint;
@@ -1170,15 +1501,16 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
unsigned int hwirq = irqd_to_hwirq(d);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
unsigned long flags;
void __iomem *addr;
- u32 port;
- u8 bit;
- port = RZG2L_PIN_ID_TO_PORT(hwirq);
- bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+ irq_chip_disable_parent(d);
- addr = pctrl->base + ISEL(port);
+ addr = pctrl->base + ISEL(off);
if (bit >= 4) {
bit -= 4;
addr += 4;
@@ -1189,7 +1521,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
spin_unlock_irqrestore(&pctrl->lock, flags);
gpiochip_disable_irq(gc, hwirq);
- irq_chip_disable_parent(d);
}
static void rzg2l_gpio_irq_enable(struct irq_data *d)
@@ -1197,17 +1528,16 @@ static void rzg2l_gpio_irq_enable(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
unsigned int hwirq = irqd_to_hwirq(d);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
+ unsigned int *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
unsigned long flags;
void __iomem *addr;
- u32 port;
- u8 bit;
gpiochip_enable_irq(gc, hwirq);
- port = RZG2L_PIN_ID_TO_PORT(hwirq);
- bit = RZG2L_PIN_ID_TO_PIN(hwirq);
-
- addr = pctrl->base + ISEL(port);
+ addr = pctrl->base + ISEL(off);
if (bit >= 4) {
bit -= 4;
addr += 4;
@@ -1414,6 +1744,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
{
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct pinctrl_pin_desc *pins;
unsigned int i, j;
u32 *pin_data;
@@ -1456,6 +1787,22 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
pins[index].drv_data = &pin_data[index];
}
+ pctrl->settings = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pctrl->settings),
+ GFP_KERNEL);
+ if (!pctrl->settings)
+ return -ENOMEM;
+
+ for (i = 0; hwcfg->drive_strength_ua && i < pctrl->desc.npins; i++) {
+ if (pin_data[i] & PIN_CFG_SOFT_PS) {
+ pctrl->settings[i].power_source = 3300;
+ } else {
+ ret = rzg2l_get_power_source(pctrl, i, pin_data[i]);
+ if (ret < 0)
+ continue;
+ pctrl->settings[i].power_source = ret;
+ }
+ }
+
ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
&pctrl->pctl);
if (ret) {
@@ -1484,12 +1831,15 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
- BUILD_BUG_ON(ARRAY_SIZE(rzg2l_gpio_configs) * RZG2L_PINS_PER_PORT >
+ BUILD_BUG_ON(ARRAY_SIZE(r9a07g044_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
BUILD_BUG_ON(ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
+ BUILD_BUG_ON(ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg2l_gpio_names));
+
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
@@ -1523,6 +1873,47 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static const struct rzg2l_hwcfg rzg2l_hwcfg = {
+ .regs = {
+ .pwpr = 0x3014,
+ .sd_ch = 0x3000,
+ },
+ .iolh_groupa_ua = {
+ /* 3v3 power source */
+ [RZG2L_IOLH_IDX_3V3] = 2000, 4000, 8000, 12000,
+ },
+ .iolh_groupb_oi = { 100, 66, 50, 33, },
+};
+
+static const struct rzg2l_hwcfg rzg3s_hwcfg = {
+ .regs = {
+ .pwpr = 0x3000,
+ .sd_ch = 0x3004,
+ },
+ .iolh_groupa_ua = {
+ /* 1v8 power source */
+ [RZG2L_IOLH_IDX_1V8] = 2200, 4400, 9000, 10000,
+ /* 3v3 power source */
+ [RZG2L_IOLH_IDX_3V3] = 1900, 4000, 8000, 9000,
+ },
+ .iolh_groupb_ua = {
+ /* 1v8 power source */
+ [RZG2L_IOLH_IDX_1V8] = 7000, 8000, 9000, 10000,
+ /* 3v3 power source */
+ [RZG2L_IOLH_IDX_3V3] = 4000, 6000, 8000, 9000,
+ },
+ .iolh_groupc_ua = {
+ /* 1v8 power source */
+ [RZG2L_IOLH_IDX_1V8] = 5200, 6000, 6550, 6800,
+ /* 2v5 source */
+ [RZG2L_IOLH_IDX_2V5] = 4700, 5300, 5800, 6100,
+ /* 3v3 power source */
+ [RZG2L_IOLH_IDX_3V3] = 4500, 5200, 5700, 6050,
+ },
+ .drive_strength_ua = true,
+ .func_base = 1,
+};
+
static struct rzg2l_pinctrl_data r9a07g043_data = {
.port_pins = rzg2l_gpio_names,
.port_pin_configs = r9a07g043_gpio_configs,
@@ -1530,16 +1921,28 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
.dedicated_pins = rzg2l_dedicated_pins.common,
.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
+ .hwcfg = &rzg2l_hwcfg,
};
static struct rzg2l_pinctrl_data r9a07g044_data = {
.port_pins = rzg2l_gpio_names,
- .port_pin_configs = rzg2l_gpio_configs,
- .n_ports = ARRAY_SIZE(rzg2l_gpio_configs),
+ .port_pin_configs = r9a07g044_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a07g044_gpio_configs),
.dedicated_pins = rzg2l_dedicated_pins.common,
- .n_port_pins = ARRAY_SIZE(rzg2l_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_port_pins = ARRAY_SIZE(r9a07g044_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
ARRAY_SIZE(rzg2l_dedicated_pins.rzg2l_pins),
+ .hwcfg = &rzg2l_hwcfg,
+};
+
+static struct rzg2l_pinctrl_data r9a08g045_data = {
+ .port_pins = rzg2l_gpio_names,
+ .port_pin_configs = r9a08g045_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a08g045_gpio_configs),
+ .dedicated_pins = rzg3s_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzg3s_dedicated_pins),
+ .hwcfg = &rzg3s_hwcfg,
};
static const struct of_device_id rzg2l_pinctrl_of_table[] = {
@@ -1551,6 +1954,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.compatible = "renesas,r9a07g044-pinctrl",
.data = &r9a07g044_data,
},
+ {
+ .compatible = "renesas,r9a08g045-pinctrl",
+ .data = &r9a08g045_data,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index 374b9f281324..4b2f107824fe 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -920,13 +920,11 @@ err_clk:
return ret;
}
-static int rzn1_pinctrl_remove(struct platform_device *pdev)
+static void rzn1_pinctrl_remove(struct platform_device *pdev)
{
struct rzn1_pinctrl *ipctl = platform_get_drvdata(pdev);
clk_disable_unprepare(ipctl->clk);
-
- return 0;
}
static const struct of_device_id rzn1_pinctrl_match[] = {
@@ -937,7 +935,7 @@ MODULE_DEVICE_TABLE(of, rzn1_pinctrl_match);
static struct platform_driver rzn1_pinctrl_driver = {
.probe = rzn1_pinctrl_probe,
- .remove = rzn1_pinctrl_remove,
+ .remove_new = rzn1_pinctrl_remove,
.driver = {
.name = "rzn1-pinctrl",
.of_match_table = rzn1_pinctrl_match,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 52aeafaba4b6..21d7d5ac8c4a 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -754,7 +754,7 @@ static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
int ret;
- ret = pinctrl_gpio_request(chip->base + offset);
+ ret = pinctrl_gpio_request(chip, offset);
if (ret)
return ret;
@@ -832,7 +832,7 @@ static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
- pinctrl_gpio_free(chip->base + offset);
+ pinctrl_gpio_free(chip, offset);
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index a8212fc126bf..6b58ec84e34b 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -616,6 +616,7 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
+ muxed_banks*sizeof(struct samsung_pin_bank *), GFP_KERNEL);
if (!muxed_data)
return -ENOMEM;
+ muxed_data->nr_banks = muxed_banks;
irq_set_chained_handler_and_data(irq, exynos_irq_demux_eint16_31,
muxed_data);
@@ -628,7 +629,6 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
muxed_data->banks[idx++] = bank;
}
- muxed_data->nr_banks = muxed_banks;
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 7bd6d82c9f36..3ac52c2cf998 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -159,7 +159,7 @@ struct exynos_weint_data {
*/
struct exynos_muxed_weint_data {
unsigned int nr_banks;
- struct samsung_pin_bank *banks[];
+ struct samsung_pin_bank *banks[] __counted_by(nr_banks);
};
int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index e54847040b4a..79babbb39ced 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -45,8 +45,6 @@ static struct pin_config {
{ "samsung,pin-val", PINCFG_TYPE_DAT },
};
-static unsigned int pin_base;
-
static int samsung_get_group_count(struct pinctrl_dev *pctldev)
{
struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -389,8 +387,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
func = &drvdata->pmx_functions[selector];
grp = &drvdata->pin_groups[group];
- pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->pin_base,
- &reg, &pin_offset, &bank);
+ pin_to_reg_bank(drvdata, grp->pins[0], &reg, &pin_offset, &bank);
type = bank->type;
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC];
@@ -441,8 +438,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long flags;
drvdata = pinctrl_dev_get_drvdata(pctldev);
- pin_to_reg_bank(drvdata, pin - drvdata->pin_base, &reg_base,
- &pin_offset, &bank);
+ pin_to_reg_bank(drvdata, pin, &reg_base, &pin_offset, &bank);
type = bank->type;
if (cfg_type >= PINCFG_TYPE_NUM || !type->fld_width[cfg_type])
@@ -665,6 +661,21 @@ static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return (virq) ? : -ENXIO;
}
+static int samsung_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+
+ bank->grange.name = bank->name;
+ bank->grange.id = bank->id;
+ bank->grange.pin_base = bank->pin_base;
+ bank->grange.base = gc->base;
+ bank->grange.npins = bank->nr_pins;
+ bank->grange.gc = &bank->gpio_chip;
+ pinctrl_add_gpio_range(bank->drvdata->pctl_dev, &bank->grange);
+
+ return 0;
+}
+
static struct samsung_pin_group *samsung_pinctrl_create_groups(
struct device *dev,
struct samsung_pinctrl_drv_data *drvdata,
@@ -876,7 +887,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
/* dynamically populate the pin number and pin name for pindesc */
for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
- pdesc->number = pin + drvdata->pin_base;
+ pdesc->number = pin;
/*
* allocate space for storing the dynamically generated names for all
@@ -892,6 +903,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
/* for each pin, the name of the pin is pin-bank name + pin number */
for (bank = 0; bank < drvdata->nr_banks; bank++) {
pin_bank = &drvdata->pin_banks[bank];
+ pin_bank->id = bank;
for (pin = 0; pin < pin_bank->nr_pins; pin++) {
sprintf(pin_names, "%s-%d", pin_bank->name, pin);
pdesc = pindesc + pin_bank->pin_base + pin;
@@ -904,23 +916,11 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- drvdata->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc,
- drvdata);
- if (IS_ERR(drvdata->pctl_dev)) {
+ ret = devm_pinctrl_register_and_init(&pdev->dev, ctrldesc, drvdata,
+ &drvdata->pctl_dev);
+ if (ret) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(drvdata->pctl_dev);
- }
-
- for (bank = 0; bank < drvdata->nr_banks; ++bank) {
- pin_bank = &drvdata->pin_banks[bank];
- pin_bank->grange.name = pin_bank->name;
- pin_bank->grange.id = bank;
- pin_bank->grange.pin_base = drvdata->pin_base
- + pin_bank->pin_base;
- pin_bank->grange.base = pin_bank->grange.pin_base;
- pin_bank->grange.npins = pin_bank->nr_pins;
- pin_bank->grange.gc = &pin_bank->gpio_chip;
- pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
+ return ret;
}
return 0;
@@ -947,6 +947,7 @@ static const struct gpio_chip samsung_gpiolib_chip = {
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
.to_irq = samsung_gpio_to_irq,
+ .add_pin_ranges = samsung_add_pin_ranges,
.owner = THIS_MODULE,
};
@@ -963,7 +964,7 @@ static int samsung_gpiolib_register(struct platform_device *pdev,
bank->gpio_chip = samsung_gpiolib_chip;
gc = &bank->gpio_chip;
- gc->base = bank->grange.base;
+ gc->base = -1; /* Dynamic allocation */
gc->ngpio = bank->nr_pins;
gc->parent = &pdev->dev;
gc->fwnode = bank->fwnode;
@@ -1124,9 +1125,6 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
samsung_banks_node_get(&pdev->dev, d);
- d->pin_base = pin_base;
- pin_base += d->nr_pins;
-
return ctrl;
}
@@ -1176,6 +1174,10 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ret)
goto err_unregister;
+ ret = pinctrl_enable(drvdata->pctl_dev);
+ if (ret)
+ goto err_unregister;
+
platform_set_drvdata(pdev, drvdata);
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 9af93e3d8d9f..9b3db50adef3 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -148,6 +148,7 @@ struct samsung_pin_bank_data {
* @eint_mask: bit mask of pins which support EINT function.
* @eint_offset: SoC-specific EINT register or interrupt offset of bank.
* @name: name to be prefixed for each pin in this pin bank.
+ * @id: id of the bank, propagated to the pin range.
* @pin_base: starting pin number of the bank.
* @soc_priv: per-bank private data for SoC-specific code.
* @of_node: OF node of the bank.
@@ -170,6 +171,7 @@ struct samsung_pin_bank {
u32 eint_mask;
u32 eint_offset;
const char *name;
+ u32 id;
u32 pin_base;
void *soc_priv;
@@ -267,7 +269,6 @@ struct samsung_pin_ctrl {
* @nr_groups: number of such pin groups.
* @pmx_functions: list of pin functions available to the driver.
* @nr_function: number of such pin functions.
- * @pin_base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
* @retention_ctrl: retention control runtime data.
* @suspend: platform specific suspend callback, executed during pin controller
@@ -291,7 +292,6 @@ struct samsung_pinctrl_drv_data {
struct samsung_pin_bank *pin_banks;
unsigned int nr_banks;
- unsigned int pin_base;
unsigned int nr_pins;
struct samsung_retention_ctrl *retention_ctrl;
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 722681e0b89b..a05570c7d833 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -204,14 +204,13 @@ static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
static int plgpio_request(struct gpio_chip *chip, unsigned offset)
{
struct plgpio *plgpio = gpiochip_get_data(chip);
- int gpio = chip->base + offset;
unsigned long flags;
int ret = 0;
if (offset >= chip->ngpio)
return -EINVAL;
- ret = pinctrl_gpio_request(gpio);
+ ret = pinctrl_gpio_request(chip, offset);
if (ret)
return ret;
@@ -249,14 +248,13 @@ err1:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
err0:
- pinctrl_gpio_free(gpio);
+ pinctrl_gpio_free(chip, offset);
return ret;
}
static void plgpio_free(struct gpio_chip *chip, unsigned offset)
{
struct plgpio *plgpio = gpiochip_get_data(chip);
- int gpio = chip->base + offset;
unsigned long flags;
if (offset >= chip->ngpio)
@@ -280,7 +278,7 @@ disable_clk:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
- pinctrl_gpio_free(gpio);
+ pinctrl_gpio_free(chip, offset);
}
/* PLGPIO IRQ */
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
index d14f382f2392..9c3c39dc6550 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
@@ -943,7 +943,7 @@ static struct platform_driver sprd_pinctrl_driver = {
.of_match_table = sprd_pinctrl_of_match,
},
.probe = sprd_pinctrl_probe,
- .remove = sprd_pinctrl_remove,
+ .remove_new = sprd_pinctrl_remove,
.shutdown = sprd_pinctrl_shutdown,
};
module_platform_driver(sprd_pinctrl_driver);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index ccdcc91c7fa5..d0b6d3e655a2 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -1110,12 +1110,11 @@ int sprd_pinctrl_core_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(sprd_pinctrl_core_probe);
-int sprd_pinctrl_remove(struct platform_device *pdev)
+void sprd_pinctrl_remove(struct platform_device *pdev)
{
struct sprd_pinctrl *sprd_pctl = platform_get_drvdata(pdev);
pinctrl_unregister(sprd_pctl->pctl);
- return 0;
}
EXPORT_SYMBOL_GPL(sprd_pinctrl_remove);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.h b/drivers/pinctrl/sprd/pinctrl-sprd.h
index 69544a3cd635..190bcdb3bb6a 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.h
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.h
@@ -53,7 +53,7 @@ struct sprd_pins_info {
int sprd_pinctrl_core_probe(struct platform_device *pdev,
struct sprd_pins_info *sprd_soc_pin_info,
int pins_cnt);
-int sprd_pinctrl_remove(struct platform_device *pdev);
+void sprd_pinctrl_remove(struct platform_device *pdev);
void sprd_pinctrl_shutdown(struct platform_device *pdev);
#endif /* __PINCTRL_SPRD_H__ */
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index 530fe340a9a1..ea70b8c61679 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -916,16 +916,6 @@ static struct pinctrl_desc starfive_desc = {
.custom_conf_items = starfive_pinconf_custom_conf_items,
};
-static int starfive_gpio_request(struct gpio_chip *gc, unsigned int gpio)
-{
- return pinctrl_gpio_request(gc->base + gpio);
-}
-
-static void starfive_gpio_free(struct gpio_chip *gc, unsigned int gpio)
-{
- pinctrl_gpio_free(gc->base + gpio);
-}
-
static int starfive_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
{
struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
@@ -1309,8 +1299,8 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.label = dev_name(dev);
sfp->gc.owner = THIS_MODULE;
- sfp->gc.request = starfive_gpio_request;
- sfp->gc.free = starfive_gpio_free;
+ sfp->gc.request = pinctrl_gpio_request;
+ sfp->gc.free = pinctrl_gpio_free;
sfp->gc.get_direction = starfive_gpio_get_direction;
sfp->gc.direction_input = starfive_gpio_direction_input;
sfp->gc.direction_output = starfive_gpio_direction_output;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 640f827a9b2c..9d71e8c13310 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -545,16 +545,6 @@ static const struct pinconf_ops jh7110_pinconf_ops = {
.is_generic = true,
};
-static int jh7110_gpio_request(struct gpio_chip *gc, unsigned int gpio)
-{
- return pinctrl_gpio_request(gc->base + gpio);
-}
-
-static void jh7110_gpio_free(struct gpio_chip *gc, unsigned int gpio)
-{
- pinctrl_gpio_free(gc->base + gpio);
-}
-
static int jh7110_gpio_get_direction(struct gpio_chip *gc,
unsigned int gpio)
{
@@ -940,8 +930,8 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
sfp->gc.label = dev_name(dev);
sfp->gc.owner = THIS_MODULE;
- sfp->gc.request = jh7110_gpio_request;
- sfp->gc.free = jh7110_gpio_free;
+ sfp->gc.request = pinctrl_gpio_request;
+ sfp->gc.free = pinctrl_gpio_free;
sfp->gc.get_direction = jh7110_gpio_get_direction;
sfp->gc.direction_input = jh7110_gpio_direction_input;
sfp->gc.direction_output = jh7110_gpio_direction_output;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a73385a431de..64e8201c7eac 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -217,12 +217,7 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return pinctrl_gpio_request(chip->base + offset);
-}
-
-static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- pinctrl_gpio_free(chip->base + offset);
+ return pinctrl_gpio_request(chip, offset);
}
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -239,18 +234,13 @@ static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
__stm32_gpio_set(bank, offset, value);
}
-static int stm32_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int stm32_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
__stm32_gpio_set(bank, offset, value);
- pinctrl_gpio_direction_output(chip->base + offset);
+ pinctrl_gpio_direction_output(chip, offset);
return 0;
}
@@ -316,10 +306,10 @@ static int stm32_gpio_init_valid_mask(struct gpio_chip *chip,
static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
- .free = stm32_gpio_free,
+ .free = pinctrl_gpio_free,
.get = stm32_gpio_get,
.set = stm32_gpio_set,
- .direction_input = stm32_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
.get_direction = stm32_gpio_get_direction,
@@ -381,7 +371,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
int ret;
- ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
+ ret = pinctrl_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
if (ret)
return ret;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index 73f012823a98..ecf6d2438e21 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -15,33 +15,81 @@
#include "pinctrl-sunxi.h"
static const struct sunxi_desc_pin h616_pins[] = {
- /* Internal connection to the AC200 part */
+ /* Internally connected to the AC200 part in the H616 SoC */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
- SUNXI_FUNCTION(0x2, "emac1")), /* ERXD1 */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ERXD1 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
- SUNXI_FUNCTION(0x2, "emac1")), /* ERXD0 */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ERXD0 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
- SUNXI_FUNCTION(0x2, "emac1")), /* ECRS_DV */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ECRS_DV */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
- SUNXI_FUNCTION(0x2, "emac1")), /* ERXERR */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ERXERR */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
- SUNXI_FUNCTION(0x2, "emac1")), /* ETXD1 */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ETXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
- SUNXI_FUNCTION(0x2, "emac1")), /* ETXD0 */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ETXD0 */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DOUT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
- SUNXI_FUNCTION(0x2, "emac1")), /* ETXCK */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ETXCK */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
- SUNXI_FUNCTION(0x2, "emac1")), /* ETXEN */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* ETXEN */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
- SUNXI_FUNCTION(0x2, "emac1")), /* EMDC */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* EMDC */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
- SUNXI_FUNCTION(0x2, "emac1")), /* EMDIO */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac1"), /* EMDIO */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
- SUNXI_FUNCTION(0x2, "i2c3")), /* SCK */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
- SUNXI_FUNCTION(0x2, "i2c3")), /* SDA */
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
- SUNXI_FUNCTION(0x2, "pwm5")),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -148,6 +196,310 @@ static const struct sunxi_desc_pin h616_pins[] = {
SUNXI_FUNCTION(0x4, "spi0"), /* HOLD */
SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PC_EINT16 */
/* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP0 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PD_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN0 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* ERR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PD_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP1 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PD_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN1 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* DVLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PD_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP2 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PD_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN2 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PD_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VPC */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PD_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VNC */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PD_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP3 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PD_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN3 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PD_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VP0 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PD_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VN0 */
+ SUNXI_FUNCTION(0x4, "ts0"), /* D7 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PD_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VP1 */
+ SUNXI_FUNCTION(0x4, "sim"), /* VPPEN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PD_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VN1 */
+ SUNXI_FUNCTION(0x4, "sim"), /* VPPPP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PD_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VP2 */
+ SUNXI_FUNCTION(0x4, "sim"), /* PWREN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PD_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VN2 */
+ SUNXI_FUNCTION(0x4, "sim"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PD_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VPC */
+ SUNXI_FUNCTION(0x4, "sim"), /* DATA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)), /* PD_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VNC */
+ SUNXI_FUNCTION(0x4, "sim"), /* RST */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)), /* PD_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VP3 */
+ SUNXI_FUNCTION(0x4, "sim"), /* DET */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 18)), /* PD_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* VN3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 19)), /* PD_EINT19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 20)), /* PD_EINT20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 21)), /* PD_EINT21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 22)), /* PD_EINT22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 23)), /* PD_EINT23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 24)), /* PD_EINT24 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 25)), /* PD_EINT25 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 26)), /* PD_EINT26 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 27)), /* PD_EINT27 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 28),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 28)), /* PD_EINT28 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PE_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PE_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PE_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PE_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PE_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PE_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PE_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PE_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PE_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PE_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PE_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PE_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PE_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PE_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PE_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PE_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D12 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 16)), /* PE_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D13 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 17)), /* PE_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D14 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PE_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D15 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 19)), /* PE_EINT19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 20)), /* PE_EINT20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 21)), /* PE_EINT21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* FSIN0 */
+ SUNXI_FUNCTION(0x4, "tcon0"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 22)), /* PE_EINT22 */
+ /* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 734c71ef005b..6bf8db424bec 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -120,7 +120,7 @@ static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
/* EINVAL=missing, which is fine since it's optional */
if (ret != -EINVAL)
dev_err(dev,
- "could not parse property nvidia,function\n");
+ "%pOF: could not parse property nvidia,function\n", np);
function = NULL;
}
@@ -134,8 +134,8 @@ static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto exit;
/* EINVAL=missing, which is fine since it's optional */
} else if (ret != -EINVAL) {
- dev_err(dev, "could not parse property %s\n",
- cfg_params[i].property);
+ dev_err(dev, "%pOF: could not parse property %s\n",
+ np, cfg_params[i].property);
}
}
@@ -146,7 +146,7 @@ static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
reserve++;
ret = of_property_count_strings(np, "nvidia,pins");
if (ret < 0) {
- dev_err(dev, "could not parse property nvidia,pins\n");
+ dev_err(dev, "%pOF: could not parse property nvidia,pins\n", np);
goto exit;
}
reserve *= ret;
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index c1477f657839..040f2c46a868 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -14,7 +14,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -822,7 +823,6 @@ static int ti_iodelay_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = of_node_get(dev->of_node);
- const struct of_device_id *match;
struct resource *res;
struct ti_iodelay_device *iod;
int ret = 0;
@@ -833,20 +833,18 @@ static int ti_iodelay_probe(struct platform_device *pdev)
goto exit_out;
}
- match = of_match_device(ti_iodelay_of_match, dev);
- if (!match) {
- ret = -EINVAL;
- dev_err(dev, "No DATA match\n");
- goto exit_out;
- }
-
iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL);
if (!iod) {
ret = -ENOMEM;
goto exit_out;
}
iod->dev = dev;
- iod->reg_data = match->data;
+ iod->reg_data = device_get_match_data(dev);
+ if (!iod->reg_data) {
+ ret = -EINVAL;
+ dev_err(dev, "No DATA match\n");
+ goto exit_out;
+ }
/* So far We can assume there is only 1 bank of registers */
iod->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -896,29 +894,22 @@ exit_out:
/**
* ti_iodelay_remove() - standard remove
* @pdev: platform device
- *
- * Return: 0 if all went fine, else appropriate error value.
*/
-static int ti_iodelay_remove(struct platform_device *pdev)
+static void ti_iodelay_remove(struct platform_device *pdev)
{
struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
- if (!iod)
- return 0;
-
if (iod->pctl)
pinctrl_unregister(iod->pctl);
ti_iodelay_pinconf_deinit_dev(iod);
/* Expect other allocations to be freed by devm */
-
- return 0;
}
static struct platform_driver ti_iodelay_driver = {
.probe = ti_iodelay_probe,
- .remove = ti_iodelay_remove,
+ .remove_new = ti_iodelay_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ti_iodelay_of_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 18d3a4f69e63..fbd0fcdb42c8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -31,7 +31,7 @@ struct uniphier_pinctrl_reg_region {
struct list_head node;
unsigned int base;
unsigned int nregs;
- u32 vals[];
+ u32 vals[] __counted_by(nregs);
};
struct uniphier_pinctrl_priv {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 6fac30de1c6a..fce92111a32e 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -526,16 +526,11 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
wmt_clearbits(data, reg_data_out, BIT(bit));
}
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
wmt_gpio_set_value(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return pinctrl_gpio_direction_output(chip, offset);
}
static const struct gpio_chip wmt_gpio_chip = {
@@ -544,7 +539,7 @@ static const struct gpio_chip wmt_gpio_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get_direction = wmt_gpio_get_direction,
- .direction_input = wmt_gpio_direction_input,
+ .direction_input = pinctrl_gpio_direction_input,
.direction_output = wmt_gpio_direction_output,
.get = wmt_gpio_get_value,
.set = wmt_gpio_set_value,
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 67000e4a8082..2b2f14a1b711 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -492,6 +492,8 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
{
struct cros_typec_port *port = typec->ports[port_num];
struct typec_displayport_data dp_data;
+ u32 cable_tbt_vdo;
+ u32 cable_dp_vdo;
int ret;
if (typec->pd_ctrl_ver < 2) {
@@ -524,6 +526,32 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
port->state.data = &dp_data;
port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
+ /* Get cable VDO for cables with DPSID to check DPAM2.1 is supported */
+ cable_dp_vdo = cros_typec_get_cable_vdo(port, USB_TYPEC_DP_SID);
+
+ /**
+ * Get cable VDO for thunderbolt cables and cables with DPSID but does not
+ * support DPAM2.1.
+ */
+ cable_tbt_vdo = cros_typec_get_cable_vdo(port, USB_TYPEC_TBT_SID);
+
+ if (cable_dp_vdo & DP_CAP_DPAM_VERSION) {
+ dp_data.conf |= cable_dp_vdo;
+ } else if (cable_tbt_vdo) {
+ dp_data.conf |= TBT_CABLE_SPEED(cable_tbt_vdo) << DP_CONF_SIGNALLING_SHIFT;
+
+ /* Cable Type */
+ if (cable_tbt_vdo & TBT_CABLE_OPTICAL)
+ dp_data.conf |= DP_CONF_CABLE_TYPE_OPTICAL << DP_CONF_CABLE_TYPE_SHIFT;
+ else if (cable_tbt_vdo & TBT_CABLE_RETIMER)
+ dp_data.conf |= DP_CONF_CABLE_TYPE_RE_TIMER << DP_CONF_CABLE_TYPE_SHIFT;
+ else if (cable_tbt_vdo & TBT_CABLE_ACTIVE_PASSIVE)
+ dp_data.conf |= DP_CONF_CABLE_TYPE_RE_DRIVER << DP_CONF_CABLE_TYPE_SHIFT;
+ } else if (PD_IDH_PTYPE(port->c_identity.id_header) == IDH_PTYPE_PCABLE) {
+ dp_data.conf |= VDO_TYPEC_CABLE_SPEED(port->c_identity.vdo[0]) <<
+ DP_CONF_SIGNALLING_SHIFT;
+ }
+
ret = cros_typec_retimer_set(port->retimer, port->state);
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 959b1878cae6..a1ee1a74fc3c 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -349,7 +349,7 @@ out:
static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data)
{
- struct ucode_cpu_info uci;
+ struct cpu_signature sig;
/* Provide a specific error message when loading an older/unsupported image */
if (data->hdrver != MC_HEADER_TYPE_IFS) {
@@ -362,11 +362,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_
return -EINVAL;
}
- intel_cpu_collect_info(&uci);
+ intel_collect_cpu_info(&sig);
- if (!intel_find_matching_signature((void *)data,
- uci.cpu_sig.sig,
- uci.cpu_sig.pf)) {
+ if (!intel_find_matching_signature((void *)data, &sig)) {
dev_err(dev, "cpu signature, processor flags not matching\n");
return -EINVAL;
}
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index b3a1ba326a3e..2382749a2f53 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -237,7 +237,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CHARGER_IRQS; i++) {
ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
tps65217_charger_irq,
- IRQF_ONESHOT, "tps65217-charger",
+ IRQF_SHARED, "tps65217-charger",
charger);
if (ret) {
dev_err(charger->dev,
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 282cd7d24077..3f7a74788802 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -108,6 +108,7 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
container_of(pccontext->clk, struct ptp_clock, clock);
struct timestamp_event_queue *queue;
char debugfsname[32];
+ unsigned long flags;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
@@ -119,7 +120,9 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
}
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
spin_lock_init(&queue->lock);
+ spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_add_tail(&queue->qlist, &ptp->tsevqs);
+ spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
pccontext->private_clkdata = queue;
/* Debugfs contents */
@@ -139,16 +142,16 @@ int ptp_release(struct posix_clock_context *pccontext)
{
struct timestamp_event_queue *queue = pccontext->private_clkdata;
unsigned long flags;
+ struct ptp_clock *ptp =
+ container_of(pccontext->clk, struct ptp_clock, clock);
- if (queue) {
- debugfs_remove(queue->debugfs_instance);
- pccontext->private_clkdata = NULL;
- spin_lock_irqsave(&queue->lock, flags);
- list_del(&queue->qlist);
- spin_unlock_irqrestore(&queue->lock, flags);
- bitmap_free(queue->mask);
- kfree(queue);
- }
+ debugfs_remove(queue->debugfs_instance);
+ pccontext->private_clkdata = NULL;
+ spin_lock_irqsave(&ptp->tsevqs_lock, flags);
+ list_del(&queue->qlist);
+ spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
+ bitmap_free(queue->mask);
+ kfree(queue);
return 0;
}
@@ -585,7 +588,5 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
free_event:
kfree(event);
exit:
- if (result < 0)
- ptp_release(pccontext);
return result;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 3d1b0a97301c..3134568af622 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -179,11 +179,11 @@ static void ptp_clock_release(struct device *dev)
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
/* Delete first entry */
+ spin_lock_irqsave(&ptp->tsevqs_lock, flags);
tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
qlist);
- spin_lock_irqsave(&tsevq->lock, flags);
list_del(&tsevq->qlist);
- spin_unlock_irqrestore(&tsevq->lock, flags);
+ spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
bitmap_free(tsevq->mask);
kfree(tsevq);
debugfs_remove(ptp->debugfs_root);
@@ -247,6 +247,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (!queue)
goto no_memory_queue;
list_add_tail(&queue->qlist, &ptp->tsevqs);
+ spin_lock_init(&ptp->tsevqs_lock);
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
if (!queue->mask)
goto no_memory_bitmap;
@@ -407,6 +408,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
{
struct timestamp_event_queue *tsevq;
struct pps_event_time evt;
+ unsigned long flags;
switch (event->type) {
@@ -415,10 +417,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
case PTP_CLOCK_EXTTS:
/* Enqueue timestamp on selected queues */
+ spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
if (test_bit((unsigned int)event->index, tsevq->mask))
enqueue_external_timestamp(tsevq, event);
}
+ spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
wake_up_interruptible(&ptp->tsev_wq);
break;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 52f87e394aa6..35fde0a05746 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -44,6 +44,7 @@ struct ptp_clock {
struct pps_device *pps_source;
long dialed_frequency; /* remembers the frequency adjustment */
struct list_head tsevqs; /* timestamp fifo list */
+ spinlock_t tsevqs_lock; /* protects tsevqs from concurrent access */
struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
wait_queue_head_t tsev_wq;
int defunct; /* tells readers to go away when clock is being removed */
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 8ebcddf91f7b..4b956d661755 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -173,8 +173,8 @@ config PWM_CLPS711X
will be called pwm-clps711x.
config PWM_CRC
- bool "Intel Crystalcove (CRC) PWM support"
- depends on X86 && INTEL_SOC_PMIC
+ tristate "Intel Crystalcove (CRC) PWM support"
+ depends on INTEL_SOC_PMIC
help
Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
control.
@@ -186,9 +186,19 @@ config PWM_CROS_EC
PWM driver for exposing a PWM attached to the ChromeOS Embedded
Controller.
+config PWM_DWC_CORE
+ tristate
+ depends on HAS_IOMEM
+ help
+ PWM driver for Synopsys DWC PWM Controller.
+
+ To compile this driver as a module, build the dependecies as
+ modules, this will be called pwm-dwc-core.
+
config PWM_DWC
- tristate "DesignWare PWM Controller"
- depends on PCI
+ tristate "DesignWare PWM Controller (PCI bus)"
+ depends on HAS_IOMEM && PCI
+ select PWM_DWC_CORE
help
PWM driver for Synopsys DWC PWM Controller attached to a PCI bus.
@@ -407,7 +417,7 @@ config PWM_MEDIATEK
config PWM_MICROCHIP_CORE
tristate "Microchip corePWM PWM support"
- depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
depends on HAS_IOMEM && OF
help
PWM driver for Microchip FPGA soft IP core.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index c822389c2a24..c5ec9e168ee7 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_CLK) += pwm-clk.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
+obj-$(CONFIG_PWM_DWC_CORE) += pwm-dwc-core.o
obj-$(CONFIG_PWM_DWC) += pwm-dwc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index dc66e3405bf5..29078486534d 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -89,13 +89,13 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
if (test_bit(PWMF_REQUESTED, &pwm->flags))
return -EBUSY;
- if (!try_module_get(pwm->chip->ops->owner))
+ if (!try_module_get(pwm->chip->owner))
return -ENODEV;
if (pwm->chip->ops->request) {
err = pwm->chip->ops->request(pwm->chip, pwm);
if (err) {
- module_put(pwm->chip->ops->owner);
+ module_put(pwm->chip->owner);
return err;
}
}
@@ -208,36 +208,6 @@ static void of_pwmchip_remove(struct pwm_chip *chip)
of_node_put(chip->dev->of_node);
}
-/**
- * pwm_set_chip_data() - set private chip data for a PWM
- * @pwm: PWM device
- * @data: pointer to chip-specific data
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_set_chip_data(struct pwm_device *pwm, void *data)
-{
- if (!pwm)
- return -EINVAL;
-
- pwm->chip_data = data;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_set_chip_data);
-
-/**
- * pwm_get_chip_data() - get private chip data for a PWM
- * @pwm: PWM device
- *
- * Returns: A pointer to the chip-private data for the PWM device.
- */
-void *pwm_get_chip_data(struct pwm_device *pwm)
-{
- return pwm ? pwm->chip_data : NULL;
-}
-EXPORT_SYMBOL_GPL(pwm_get_chip_data);
-
static bool pwm_ops_check(const struct pwm_chip *chip)
{
const struct pwm_ops *ops = chip->ops;
@@ -253,14 +223,16 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
}
/**
- * pwmchip_add() - register a new PWM chip
+ * __pwmchip_add() - register a new PWM chip
* @chip: the PWM chip to add
+ * @owner: reference to the module providing the chip.
*
- * Register a new PWM chip.
+ * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
+ * pwmchip_add wrapper to do this right.
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_add(struct pwm_chip *chip)
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
{
struct pwm_device *pwm;
unsigned int i;
@@ -272,6 +244,8 @@ int pwmchip_add(struct pwm_chip *chip)
if (!pwm_ops_check(chip))
return -EINVAL;
+ chip->owner = owner;
+
chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
if (!chip->pwms)
return -ENOMEM;
@@ -306,7 +280,7 @@ int pwmchip_add(struct pwm_chip *chip)
return 0;
}
-EXPORT_SYMBOL_GPL(pwmchip_add);
+EXPORT_SYMBOL_GPL(__pwmchip_add);
/**
* pwmchip_remove() - remove a PWM chip
@@ -338,17 +312,17 @@ static void devm_pwmchip_remove(void *data)
pwmchip_remove(chip);
}
-int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
{
int ret;
- ret = pwmchip_add(chip);
+ ret = __pwmchip_add(chip, owner);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
}
-EXPORT_SYMBOL_GPL(devm_pwmchip_add);
+EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
/**
* pwm_request_from_chip() - request a PWM device relative to a PWM chip
@@ -976,10 +950,9 @@ void pwm_put(struct pwm_device *pwm)
if (pwm->chip->ops->free)
pwm->chip->ops->free(pwm->chip, pwm);
- pwm_set_chip_data(pwm, NULL);
pwm->label = NULL;
- module_put(pwm->chip->ops->owner);
+ module_put(pwm->chip->owner);
out:
mutex_unlock(&pwm_lock);
}
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 583a7d69c741..670d33daea84 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -181,7 +181,6 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops ab8500_pwm_ops = {
.apply = ab8500_pwm_apply,
.get_state = ab8500_pwm_get_state,
- .owner = THIS_MODULE,
};
static int ab8500_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
index 8e7d67fb5fbe..4d755b628d9e 100644
--- a/drivers/pwm/pwm-apple.c
+++ b/drivers/pwm/pwm-apple.c
@@ -99,7 +99,6 @@ static int apple_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops apple_pwm_ops = {
.apply = apple_pwm_apply,
.get_state = apple_pwm_get_state,
- .owner = THIS_MODULE,
};
static int apple_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index e271d920151e..07920e034757 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -170,7 +170,6 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops atmel_hlcdc_pwm_ops = {
.apply = atmel_hlcdc_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index c00dd37c5fbd..98b33c016c3c 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -364,7 +364,6 @@ static const struct pwm_ops atmel_tcb_pwm_ops = {
.request = atmel_tcb_pwm_request,
.free = atmel_tcb_pwm_free,
.apply = atmel_tcb_pwm_apply,
- .owner = THIS_MODULE,
};
static struct atmel_tcb_config tcb_rm9200_config = {
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 1f73325d1bea..47bcc8a3bf9d 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -402,7 +402,6 @@ static int atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops atmel_pwm_ops = {
.apply = atmel_pwm_apply,
.get_state = atmel_pwm_get_state,
- .owner = THIS_MODULE,
};
static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
@@ -547,7 +546,7 @@ disable_clk:
static struct platform_driver atmel_pwm_driver = {
.driver = {
.name = "atmel-pwm",
- .of_match_table = of_match_ptr(atmel_pwm_dt_ids),
+ .of_match_table = atmel_pwm_dt_ids,
},
.probe = atmel_pwm_probe,
};
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 7d70b6f186a6..758254025683 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -183,7 +183,6 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops iproc_pwm_ops = {
.apply = iproc_pwmc_apply,
.get_state = iproc_pwmc_get_state,
- .owner = THIS_MODULE,
};
static int iproc_pwmc_probe(struct platform_device *pdev)
@@ -207,18 +206,10 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
if (IS_ERR(ip->base))
return PTR_ERR(ip->base);
- ip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ip->clk)) {
- dev_err(&pdev->dev, "failed to get clock: %ld\n",
- PTR_ERR(ip->clk));
- return PTR_ERR(ip->clk);
- }
-
- ret = clk_prepare_enable(ip->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
- return ret;
- }
+ ip->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(ip->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ip->clk),
+ "failed to get clock\n");
/* Set full drive and normal polarity for all channels */
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
@@ -230,22 +221,12 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
- ret = pwmchip_add(&ip->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- clk_disable_unprepare(ip->clk);
- }
-
- return ret;
-}
-
-static void iproc_pwmc_remove(struct platform_device *pdev)
-{
- struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+ ret = devm_pwmchip_add(&pdev->dev, &ip->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add PWM chip\n");
- pwmchip_remove(&ip->chip);
-
- clk_disable_unprepare(ip->clk);
+ return 0;
}
static const struct of_device_id bcm_iproc_pwmc_dt[] = {
@@ -260,7 +241,6 @@ static struct platform_driver iproc_pwmc_driver = {
.of_match_table = bcm_iproc_pwmc_dt,
},
.probe = iproc_pwmc_probe,
- .remove_new = iproc_pwmc_remove,
};
module_platform_driver(iproc_pwmc_driver);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index e5b00cc9f7a7..15d6ed03c3ce 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -269,7 +269,6 @@ static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops kona_pwm_ops = {
.apply = kona_pwmc_apply,
- .owner = THIS_MODULE,
};
static int kona_pwmc_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index bdfc2a5ec0d6..9777babd5b95 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -129,7 +129,6 @@ static const struct pwm_ops bcm2835_pwm_ops = {
.request = bcm2835_pwm_request,
.free = bcm2835_pwm_free,
.apply = bcm2835_pwm_apply,
- .owner = THIS_MODULE,
};
static int bcm2835_pwm_probe(struct platform_device *pdev)
@@ -147,41 +146,42 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
- pc->clk = devm_clk_get(&pdev->dev, NULL);
+ pc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
"clock not found\n");
- ret = clk_prepare_enable(pc->clk);
- if (ret)
- return ret;
-
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
pc->chip.npwm = 2;
- platform_set_drvdata(pdev, pc);
-
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0)
- goto add_fail;
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add pwmchip\n");
return 0;
+}
+
+static int bcm2835_pwm_suspend(struct device *dev)
+{
+ struct bcm2835_pwm *pc = dev_get_drvdata(dev);
-add_fail:
clk_disable_unprepare(pc->clk);
- return ret;
+
+ return 0;
}
-static void bcm2835_pwm_remove(struct platform_device *pdev)
+static int bcm2835_pwm_resume(struct device *dev)
{
- struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
-
- pwmchip_remove(&pc->chip);
+ struct bcm2835_pwm *pc = dev_get_drvdata(dev);
- clk_disable_unprepare(pc->clk);
+ return clk_prepare_enable(pc->clk);
}
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835_pwm_pm_ops, bcm2835_pwm_suspend,
+ bcm2835_pwm_resume);
+
static const struct of_device_id bcm2835_pwm_of_match[] = {
{ .compatible = "brcm,bcm2835-pwm", },
{ /* sentinel */ }
@@ -192,9 +192,9 @@ static struct platform_driver bcm2835_pwm_driver = {
.driver = {
.name = "bcm2835-pwm",
.of_match_table = bcm2835_pwm_of_match,
+ .pm = pm_ptr(&bcm2835_pwm_pm_ops),
},
.probe = bcm2835_pwm_probe,
- .remove_new = bcm2835_pwm_remove,
};
module_platform_driver(bcm2835_pwm_driver);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 0971c666afd1..ba2d79991769 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -39,6 +39,8 @@
#define BERLIN_PWM_TCNT 0xc
#define BERLIN_PWM_MAX_TCNT 65535
+#define BERLIN_PWM_NUMPWMS 4
+
struct berlin_pwm_channel {
u32 enable;
u32 ctrl;
@@ -50,6 +52,7 @@ struct berlin_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
+ struct berlin_pwm_channel channel[BERLIN_PWM_NUMPWMS];
};
static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
@@ -70,24 +73,6 @@ static inline void berlin_pwm_writel(struct berlin_pwm_chip *bpc,
writel_relaxed(value, bpc->base + channel * 0x10 + offset);
}
-static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct berlin_pwm_channel *channel;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return -ENOMEM;
-
- return pwm_set_chip_data(pwm, channel);
-}
-
-static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
-
- kfree(channel);
-}
-
static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns)
{
@@ -202,10 +187,7 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
static const struct pwm_ops berlin_pwm_ops = {
- .request = berlin_pwm_request,
- .free = berlin_pwm_free,
.apply = berlin_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id berlin_pwm_match[] = {
@@ -227,39 +209,23 @@ static int berlin_pwm_probe(struct platform_device *pdev)
if (IS_ERR(bpc->base))
return PTR_ERR(bpc->base);
- bpc->clk = devm_clk_get(&pdev->dev, NULL);
+ bpc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(bpc->clk))
return PTR_ERR(bpc->clk);
- ret = clk_prepare_enable(bpc->clk);
- if (ret)
- return ret;
-
bpc->chip.dev = &pdev->dev;
bpc->chip.ops = &berlin_pwm_ops;
- bpc->chip.npwm = 4;
+ bpc->chip.npwm = BERLIN_PWM_NUMPWMS;
- ret = pwmchip_add(&bpc->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- clk_disable_unprepare(bpc->clk);
- return ret;
- }
+ ret = devm_pwmchip_add(&pdev->dev, &bpc->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
platform_set_drvdata(pdev, bpc);
return 0;
}
-static void berlin_pwm_remove(struct platform_device *pdev)
-{
- struct berlin_pwm_chip *bpc = platform_get_drvdata(pdev);
-
- pwmchip_remove(&bpc->chip);
-
- clk_disable_unprepare(bpc->clk);
-}
-
#ifdef CONFIG_PM_SLEEP
static int berlin_pwm_suspend(struct device *dev)
{
@@ -267,11 +233,7 @@ static int berlin_pwm_suspend(struct device *dev)
unsigned int i;
for (i = 0; i < bpc->chip.npwm; i++) {
- struct berlin_pwm_channel *channel;
-
- channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
- if (!channel)
- continue;
+ struct berlin_pwm_channel *channel = &bpc->channel[i];
channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
@@ -295,11 +257,7 @@ static int berlin_pwm_resume(struct device *dev)
return ret;
for (i = 0; i < bpc->chip.npwm; i++) {
- struct berlin_pwm_channel *channel;
-
- channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
- if (!channel)
- continue;
+ struct berlin_pwm_channel *channel = &bpc->channel[i];
berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
@@ -316,7 +274,6 @@ static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
static struct platform_driver berlin_pwm_driver = {
.probe = berlin_pwm_probe,
- .remove_new = berlin_pwm_remove,
.driver = {
.name = "berlin-pwm",
.of_match_table = berlin_pwm_match,
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index a3faa9a3de7c..b723c2d4f485 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -220,7 +220,6 @@ static int brcmstb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops brcmstb_pwm_ops = {
.apply = brcmstb_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id brcmstb_pwm_of_match[] = {
@@ -238,17 +237,10 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- p->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "failed to obtain clock\n");
- return PTR_ERR(p->clk);
- }
-
- ret = clk_prepare_enable(p->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
- return ret;
- }
+ p->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(p->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(p->clk),
+ "failed to obtain clock\n");
platform_set_drvdata(pdev, p);
@@ -257,30 +249,14 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
p->chip.npwm = 2;
p->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(p->base)) {
- ret = PTR_ERR(p->base);
- goto out_clk;
- }
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
- ret = pwmchip_add(&p->chip);
- if (ret) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- goto out_clk;
- }
+ ret = devm_pwmchip_add(&pdev->dev, &p->chip);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
return 0;
-
-out_clk:
- clk_disable_unprepare(p->clk);
- return ret;
-}
-
-static void brcmstb_pwm_remove(struct platform_device *pdev)
-{
- struct brcmstb_pwm *p = platform_get_drvdata(pdev);
-
- pwmchip_remove(&p->chip);
- clk_disable_unprepare(p->clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -288,7 +264,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
{
struct brcmstb_pwm *p = dev_get_drvdata(dev);
- clk_disable(p->clk);
+ clk_disable_unprepare(p->clk);
return 0;
}
@@ -297,9 +273,7 @@ static int brcmstb_pwm_resume(struct device *dev)
{
struct brcmstb_pwm *p = dev_get_drvdata(dev);
- clk_enable(p->clk);
-
- return 0;
+ return clk_prepare_enable(p->clk);
}
#endif
@@ -308,7 +282,6 @@ static SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
static struct platform_driver brcmstb_pwm_driver = {
.probe = brcmstb_pwm_probe,
- .remove_new = brcmstb_pwm_remove,
.driver = {
.name = "pwm-brcmstb",
.of_match_table = brcmstb_pwm_of_match,
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
index 0ee4d2aee4df..9dd88b386907 100644
--- a/drivers/pwm/pwm-clk.c
+++ b/drivers/pwm/pwm-clk.c
@@ -77,7 +77,6 @@ static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_clk_ops = {
.apply = pwm_clk_apply,
- .owner = THIS_MODULE,
};
static int pwm_clk_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index b0d91142da8d..42179b3f7ec3 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -72,7 +72,6 @@ static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops clps711x_pwm_ops = {
.request = clps711x_pwm_request,
.apply = clps711x_pwm_apply,
- .owner = THIS_MODULE,
};
static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index b9f063dc6b5f..2b0b659eee97 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -184,5 +184,8 @@ static struct platform_driver crystalcove_pwm_driver = {
.name = "crystal_cove_pwm",
},
};
+module_platform_driver(crystalcove_pwm_driver);
-builtin_platform_driver(crystalcove_pwm_driver);
+MODULE_ALIAS("platform:crystal_cove_pwm");
+MODULE_DESCRIPTION("Intel Crystalcove (CRC) PWM support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index baaac0c33aa0..4fbd23e4ef69 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -22,12 +22,14 @@
* @ec: Pointer to EC device
* @chip: PWM controller chip
* @use_pwm_type: Use PWM types instead of generic channels
+ * @channel: array with per-channel data
*/
struct cros_ec_pwm_device {
struct device *dev;
struct cros_ec_device *ec;
struct pwm_chip chip;
bool use_pwm_type;
+ struct cros_ec_pwm *channel;
};
/**
@@ -43,26 +45,6 @@ static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chi
return container_of(chip, struct cros_ec_pwm_device, chip);
}
-static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct cros_ec_pwm *channel;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return -ENOMEM;
-
- pwm_set_chip_data(pwm, channel);
-
- return 0;
-}
-
-static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
-
- kfree(channel);
-}
-
static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
{
switch (dt_index) {
@@ -158,7 +140,7 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
- struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+ struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
u16 duty_cycle;
int ret;
@@ -188,7 +170,7 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
- struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+ struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
int ret;
ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
@@ -237,11 +219,8 @@ cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
}
static const struct pwm_ops cros_ec_pwm_ops = {
- .request = cros_ec_pwm_request,
- .free = cros_ec_pwm_free,
.get_state = cros_ec_pwm_get_state,
.apply = cros_ec_pwm_apply,
- .owner = THIS_MODULE,
};
/*
@@ -286,10 +265,8 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
struct pwm_chip *chip;
int ret;
- if (!ec) {
- dev_err(dev, "no parent EC device\n");
- return -EINVAL;
- }
+ if (!ec)
+ return dev_err_probe(dev, -EINVAL, "no parent EC device\n");
ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
if (!ec_pwm)
@@ -310,32 +287,23 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
chip->npwm = CROS_EC_PWM_DT_COUNT;
} else {
ret = cros_ec_num_pwms(ec_pwm);
- if (ret < 0) {
- dev_err(dev, "Couldn't find PWMs: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
chip->npwm = ret;
}
- dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
-
- ret = pwmchip_add(chip);
- if (ret < 0) {
- dev_err(dev, "cannot register PWM: %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, ec_pwm);
+ ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel),
+ GFP_KERNEL);
+ if (!ec_pwm->channel)
+ return -ENOMEM;
- return ret;
-}
+ dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
-static void cros_ec_pwm_remove(struct platform_device *dev)
-{
- struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
- struct pwm_chip *chip = &ec_pwm->chip;
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "cannot register PWM\n");
- pwmchip_remove(chip);
+ return 0;
}
#ifdef CONFIG_OF
@@ -349,7 +317,6 @@ MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
static struct platform_driver cros_ec_pwm_driver = {
.probe = cros_ec_pwm_probe,
- .remove_new = cros_ec_pwm_remove,
.driver = {
.name = "cros-ec-pwm",
.of_match_table = of_match_ptr(cros_ec_pwm_of_match),
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
new file mode 100644
index 000000000000..ea63dd741f5c
--- /dev/null
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver core
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE dwc_pwm
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+
+#include "pwm-dwc.h"
+
+static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
+{
+ u32 reg;
+
+ reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
+
+ if (enabled)
+ reg |= DWC_TIM_CTRL_EN;
+ else
+ reg &= ~DWC_TIM_CTRL_EN;
+
+ dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
+}
+
+static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ u64 tmp;
+ u32 ctrl;
+ u32 high;
+ u32 low;
+
+ /*
+ * Calculate width of low and high period in terms of input clock
+ * periods and check are the result within HW limits between 1 and
+ * 2^32 periods.
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, dwc->clk_ns);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ low = tmp - 1;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
+ dwc->clk_ns);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ high = tmp - 1;
+
+ /*
+ * Specification says timer usage flow is to disable timer, then
+ * program it followed by enable. It also says Load Count is loaded
+ * into timer after it is enabled - either after a disable or
+ * a reset. Based on measurements it happens also without disable
+ * whenever Load Count is updated. But follow the specification.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+
+ /*
+ * Write Load Count and Load Count 2 registers. Former defines the
+ * width of low period and latter the width of high period in terms
+ * multiple of input clock periods:
+ * Width = ((Count + 1) * input clock period).
+ */
+ dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
+ dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+ /*
+ * Set user-defined mode, timer reloads from Load Count registers
+ * when it counts down to 0.
+ * Set PWM mode, it makes output to toggle and width of low and high
+ * periods are set by Load Count registers.
+ */
+ ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
+ dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
+
+ /*
+ * Enable timer. Output starts from low period.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
+
+ return 0;
+}
+
+static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+
+ if (state->polarity != PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ if (state->enabled) {
+ if (!pwm->state.enabled)
+ pm_runtime_get_sync(chip->dev);
+ return __dwc_pwm_configure_timer(dwc, pwm, state);
+ } else {
+ if (pwm->state.enabled) {
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+ pm_runtime_put_sync(chip->dev);
+ }
+ }
+
+ return 0;
+}
+
+static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ u64 duty, period;
+ u32 ctrl, ld, ld2;
+
+ pm_runtime_get_sync(chip->dev);
+
+ ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm->hwpwm));
+ ld = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
+ ld2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+ state->enabled = !!(ctrl & DWC_TIM_CTRL_EN);
+
+ /*
+ * If we're not in PWM, technically the output is a 50-50
+ * based on the timer load-count only.
+ */
+ if (ctrl & DWC_TIM_CTRL_PWM) {
+ duty = (ld + 1) * dwc->clk_ns;
+ period = (ld2 + 1) * dwc->clk_ns;
+ period += duty;
+ } else {
+ duty = (ld + 1) * dwc->clk_ns;
+ period = duty * 2;
+ }
+
+ state->polarity = PWM_POLARITY_INVERSED;
+ state->period = period;
+ state->duty_cycle = duty;
+
+ pm_runtime_put_sync(chip->dev);
+
+ return 0;
+}
+
+static const struct pwm_ops dwc_pwm_ops = {
+ .apply = dwc_pwm_apply,
+ .get_state = dwc_pwm_get_state,
+};
+
+struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
+{
+ struct dwc_pwm *dwc;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return NULL;
+
+ dwc->clk_ns = 10;
+ dwc->chip.dev = dev;
+ dwc->chip.ops = &dwc_pwm_ops;
+ dwc->chip.npwm = DWC_TIMERS_TOTAL;
+
+ dev_set_drvdata(dev, dwc);
+ return dwc;
+}
+EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_DESCRIPTION("DesignWare PWM Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 3bbb26c862c3..bd9cadb497d7 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * DesignWare PWM Controller driver
+ * DesignWare PWM Controller driver (PCI part)
*
* Copyright (C) 2018-2020 Intel Corporation
*
@@ -13,6 +13,8 @@
* periods are one or more input clock periods long.
*/
+#define DEFAULT_MOUDLE_NAMESPACE dwc_pwm
+
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/kernel.h>
@@ -21,198 +23,7 @@
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
-#define DWC_TIM_LD_CNT(n) ((n) * 0x14)
-#define DWC_TIM_LD_CNT2(n) (((n) * 4) + 0xb0)
-#define DWC_TIM_CUR_VAL(n) (((n) * 0x14) + 0x04)
-#define DWC_TIM_CTRL(n) (((n) * 0x14) + 0x08)
-#define DWC_TIM_EOI(n) (((n) * 0x14) + 0x0c)
-#define DWC_TIM_INT_STS(n) (((n) * 0x14) + 0x10)
-
-#define DWC_TIMERS_INT_STS 0xa0
-#define DWC_TIMERS_EOI 0xa4
-#define DWC_TIMERS_RAW_INT_STS 0xa8
-#define DWC_TIMERS_COMP_VERSION 0xac
-
-#define DWC_TIMERS_TOTAL 8
-#define DWC_CLK_PERIOD_NS 10
-
-/* Timer Control Register */
-#define DWC_TIM_CTRL_EN BIT(0)
-#define DWC_TIM_CTRL_MODE BIT(1)
-#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
-#define DWC_TIM_CTRL_MODE_USER (1 << 1)
-#define DWC_TIM_CTRL_INT_MASK BIT(2)
-#define DWC_TIM_CTRL_PWM BIT(3)
-
-struct dwc_pwm_ctx {
- u32 cnt;
- u32 cnt2;
- u32 ctrl;
-};
-
-struct dwc_pwm {
- struct pwm_chip chip;
- void __iomem *base;
- struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
-};
-#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
-
-static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
-{
- return readl(dwc->base + offset);
-}
-
-static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
-{
- writel(value, dwc->base + offset);
-}
-
-static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
-{
- u32 reg;
-
- reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
-
- if (enabled)
- reg |= DWC_TIM_CTRL_EN;
- else
- reg &= ~DWC_TIM_CTRL_EN;
-
- dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
-}
-
-static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
- struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- u64 tmp;
- u32 ctrl;
- u32 high;
- u32 low;
-
- /*
- * Calculate width of low and high period in terms of input clock
- * periods and check are the result within HW limits between 1 and
- * 2^32 periods.
- */
- tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, DWC_CLK_PERIOD_NS);
- if (tmp < 1 || tmp > (1ULL << 32))
- return -ERANGE;
- low = tmp - 1;
-
- tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
- DWC_CLK_PERIOD_NS);
- if (tmp < 1 || tmp > (1ULL << 32))
- return -ERANGE;
- high = tmp - 1;
-
- /*
- * Specification says timer usage flow is to disable timer, then
- * program it followed by enable. It also says Load Count is loaded
- * into timer after it is enabled - either after a disable or
- * a reset. Based on measurements it happens also without disable
- * whenever Load Count is updated. But follow the specification.
- */
- __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
-
- /*
- * Write Load Count and Load Count 2 registers. Former defines the
- * width of low period and latter the width of high period in terms
- * multiple of input clock periods:
- * Width = ((Count + 1) * input clock period).
- */
- dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
- dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
-
- /*
- * Set user-defined mode, timer reloads from Load Count registers
- * when it counts down to 0.
- * Set PWM mode, it makes output to toggle and width of low and high
- * periods are set by Load Count registers.
- */
- ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
- dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
-
- /*
- * Enable timer. Output starts from low period.
- */
- __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
-
- return 0;
-}
-
-static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
-
- if (state->polarity != PWM_POLARITY_INVERSED)
- return -EINVAL;
-
- if (state->enabled) {
- if (!pwm->state.enabled)
- pm_runtime_get_sync(chip->dev);
- return __dwc_pwm_configure_timer(dwc, pwm, state);
- } else {
- if (pwm->state.enabled) {
- __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
- pm_runtime_put_sync(chip->dev);
- }
- }
-
- return 0;
-}
-
-static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
-{
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- u64 duty, period;
-
- pm_runtime_get_sync(chip->dev);
-
- state->enabled = !!(dwc_pwm_readl(dwc,
- DWC_TIM_CTRL(pwm->hwpwm)) & DWC_TIM_CTRL_EN);
-
- duty = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
- duty += 1;
- duty *= DWC_CLK_PERIOD_NS;
- state->duty_cycle = duty;
-
- period = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
- period += 1;
- period *= DWC_CLK_PERIOD_NS;
- period += duty;
- state->period = period;
-
- state->polarity = PWM_POLARITY_INVERSED;
-
- pm_runtime_put_sync(chip->dev);
-
- return 0;
-}
-
-static const struct pwm_ops dwc_pwm_ops = {
- .apply = dwc_pwm_apply,
- .get_state = dwc_pwm_get_state,
- .owner = THIS_MODULE,
-};
-
-static struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
-{
- struct dwc_pwm *dwc;
-
- dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
- if (!dwc)
- return NULL;
-
- dwc->chip.dev = dev;
- dwc->chip.ops = &dwc_pwm_ops;
- dwc->chip.npwm = DWC_TIMERS_TOTAL;
-
- dev_set_drvdata(dev, dwc);
- return dwc;
-}
+#include "pwm-dwc.h"
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
new file mode 100644
index 000000000000..64795247c54c
--- /dev/null
+++ b/drivers/pwm/pwm-dwc.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+
+MODULE_IMPORT_NS(dwc_pwm);
+
+#define DWC_TIM_LD_CNT(n) ((n) * 0x14)
+#define DWC_TIM_LD_CNT2(n) (((n) * 4) + 0xb0)
+#define DWC_TIM_CUR_VAL(n) (((n) * 0x14) + 0x04)
+#define DWC_TIM_CTRL(n) (((n) * 0x14) + 0x08)
+#define DWC_TIM_EOI(n) (((n) * 0x14) + 0x0c)
+#define DWC_TIM_INT_STS(n) (((n) * 0x14) + 0x10)
+
+#define DWC_TIMERS_INT_STS 0xa0
+#define DWC_TIMERS_EOI 0xa4
+#define DWC_TIMERS_RAW_INT_STS 0xa8
+#define DWC_TIMERS_COMP_VERSION 0xac
+
+#define DWC_TIMERS_TOTAL 8
+
+/* Timer Control Register */
+#define DWC_TIM_CTRL_EN BIT(0)
+#define DWC_TIM_CTRL_MODE BIT(1)
+#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
+#define DWC_TIM_CTRL_MODE_USER (1 << 1)
+#define DWC_TIM_CTRL_INT_MASK BIT(2)
+#define DWC_TIM_CTRL_PWM BIT(3)
+
+struct dwc_pwm_ctx {
+ u32 cnt;
+ u32 cnt2;
+ u32 ctrl;
+};
+
+struct dwc_pwm {
+ struct pwm_chip chip;
+ void __iomem *base;
+ unsigned int clk_ns;
+ struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
+};
+#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
+
+static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
+{
+ return readl(dwc->base + offset);
+}
+
+static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
+{
+ writel(value, dwc->base + offset);
+}
+
+extern struct dwc_pwm *dwc_pwm_alloc(struct device *dev);
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index c45a75e65c86..51e072572a87 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -159,7 +159,6 @@ static const struct pwm_ops ep93xx_pwm_ops = {
.request = ep93xx_pwm_request,
.free = ep93xx_pwm_free,
.apply = ep93xx_pwm_apply,
- .owner = THIS_MODULE,
};
static int ep93xx_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index b7c6045c5d08..d1b6d1aa4773 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -350,7 +350,6 @@ static const struct pwm_ops fsl_pwm_ops = {
.request = fsl_pwm_request,
.free = fsl_pwm_free,
.apply = fsl_pwm_apply,
- .owner = THIS_MODULE,
};
static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index f7ba6fe9a349..c435776e2f78 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -185,7 +185,6 @@ static const struct pwm_ops hibvt_pwm_ops = {
.get_state = hibvt_pwm_get_state,
.apply = hibvt_pwm_apply,
- .owner = THIS_MODULE,
};
static int hibvt_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 326af85888e7..116fa060e302 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -208,7 +208,6 @@ static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops img_pwm_ops = {
.apply = img_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct img_pwm_soc_data pistachio_pwm = {
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 98ab65c89685..dc6aafeb9f7b 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -332,7 +332,6 @@ static const struct pwm_ops imx_tpm_pwm_ops = {
.free = pwm_imx_tpm_free,
.get_state = pwm_imx_tpm_get_state,
.apply = pwm_imx_tpm_apply,
- .owner = THIS_MODULE,
};
static int pwm_imx_tpm_probe(struct platform_device *pdev)
@@ -351,18 +350,11 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
if (IS_ERR(tpm->base))
return PTR_ERR(tpm->base);
- tpm->clk = devm_clk_get(&pdev->dev, NULL);
+ tpm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(tpm->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
"failed to get PWM clock\n");
- ret = clk_prepare_enable(tpm->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to prepare or enable clock: %d\n", ret);
- return ret;
- }
-
tpm->chip.dev = &pdev->dev;
tpm->chip.ops = &imx_tpm_pwm_ops;
@@ -372,22 +364,11 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
mutex_init(&tpm->lock);
- ret = pwmchip_add(&tpm->chip);
- if (ret) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- clk_disable_unprepare(tpm->clk);
- }
-
- return ret;
-}
-
-static void pwm_imx_tpm_remove(struct platform_device *pdev)
-{
- struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
-
- pwmchip_remove(&tpm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &tpm->chip);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
- clk_disable_unprepare(tpm->clk);
+ return 0;
}
static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
@@ -437,7 +418,6 @@ static struct platform_driver imx_tpm_pwm_driver = {
.pm = &imx_tpm_pwm_pm,
},
.probe = pwm_imx_tpm_probe,
- .remove_new = pwm_imx_tpm_remove,
};
module_platform_driver(imx_tpm_pwm_driver);
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 0651983bed19..d175d895f22a 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -146,7 +146,6 @@ static int pwm_imx1_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_imx1_ops = {
.apply = pwm_imx1_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id pwm_imx1_dt_ids[] = {
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 29a3089c534c..7d9bc43f12b0 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -296,7 +296,6 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_imx27_ops = {
.apply = pwm_imx27_apply,
.get_state = pwm_imx27_get_state,
- .owner = THIS_MODULE,
};
static const struct of_device_id pwm_imx27_dt_ids[] = {
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 0cd7dd548e82..54ecae7f937e 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -107,7 +107,6 @@ static int lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops lgm_pwm_ops = {
.get_state = lgm_pwm_get_state,
.apply = lgm_pwm_apply,
- .owner = THIS_MODULE,
};
static void lgm_pwm_init(struct lgm_pwm_chip *pc)
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 47b3141135f3..378ab036edfe 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -166,7 +166,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
static const struct pwm_ops iqs620_pwm_ops = {
.apply = iqs620_pwm_apply,
.get_state = iqs620_pwm_get_state,
- .owner = THIS_MODULE,
};
static void iqs620_pwm_notifier_unregister(void *context)
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index ef1293f2a897..e9375de60ad6 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -27,6 +27,7 @@ struct soc_info {
struct jz4740_pwm_chip {
struct pwm_chip chip;
struct regmap *map;
+ struct clk *clk[];
};
static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
@@ -70,14 +71,15 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- pwm_set_chip_data(pwm, clk);
+ jz->clk[pwm->hwpwm] = clk;
return 0;
}
static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct clk *clk = pwm_get_chip_data(pwm);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
+ struct clk *clk = jz->clk[pwm->hwpwm];
clk_disable_unprepare(clk);
clk_put(clk);
@@ -121,9 +123,9 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
+ struct jz4740_pwm_chip *jz = to_jz4740(pwm->chip);
unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
- struct clk *clk = pwm_get_chip_data(pwm);
+ struct clk *clk = jz->clk[pwm->hwpwm];
unsigned long period, duty;
long rate;
int err;
@@ -173,16 +175,16 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
/* Reset counter to 0 */
- regmap_write(jz4740->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
+ regmap_write(jz->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
/* Set duty */
- regmap_write(jz4740->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
+ regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
/* Set period */
- regmap_write(jz4740->map, TCU_REG_TDFRc(pwm->hwpwm), period);
+ regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), period);
/* Set abrupt shutdown */
- regmap_set_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
TCU_TCSR_PWM_SD);
/*
@@ -199,10 +201,10 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* state instead of its inactive state.
*/
if ((state->polarity == PWM_POLARITY_NORMAL) ^ state->enabled)
- regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
TCU_TCSR_PWM_INITL_HIGH, 0);
else
- regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
TCU_TCSR_PWM_INITL_HIGH,
TCU_TCSR_PWM_INITL_HIGH);
@@ -216,34 +218,34 @@ static const struct pwm_ops jz4740_pwm_ops = {
.request = jz4740_pwm_request,
.free = jz4740_pwm_free,
.apply = jz4740_pwm_apply,
- .owner = THIS_MODULE,
};
static int jz4740_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct jz4740_pwm_chip *jz4740;
+ struct jz4740_pwm_chip *jz;
const struct soc_info *info;
info = device_get_match_data(dev);
if (!info)
return -EINVAL;
- jz4740 = devm_kzalloc(dev, sizeof(*jz4740), GFP_KERNEL);
- if (!jz4740)
+ jz = devm_kzalloc(dev, struct_size(jz, clk, info->num_pwms),
+ GFP_KERNEL);
+ if (!jz)
return -ENOMEM;
- jz4740->map = device_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(jz4740->map)) {
- dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz4740->map));
- return PTR_ERR(jz4740->map);
+ jz->map = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(jz->map)) {
+ dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz->map));
+ return PTR_ERR(jz->map);
}
- jz4740->chip.dev = dev;
- jz4740->chip.ops = &jz4740_pwm_ops;
- jz4740->chip.npwm = info->num_pwms;
+ jz->chip.dev = dev;
+ jz->chip.ops = &jz4740_pwm_ops;
+ jz->chip.npwm = info->num_pwms;
- return devm_pwmchip_add(dev, &jz4740->chip);
+ return devm_pwmchip_add(dev, &jz->chip);
}
static const struct soc_info jz4740_soc_info = {
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index ac02d8bb4a0b..ac824ecc3f64 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -178,7 +178,6 @@ static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
static const struct pwm_ops keembay_pwm_ops = {
- .owner = THIS_MODULE,
.apply = keembay_pwm_apply,
.get_state = keembay_pwm_get_state,
};
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 4b133a17f4be..32350a357278 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -23,6 +23,7 @@ struct lp3943_pwm {
struct pwm_chip chip;
struct lp3943 *lp3943;
struct lp3943_platform_data *pdata;
+ struct lp3943_pwm_map pwm_map[LP3943_NUM_PWMS];
};
static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
@@ -35,13 +36,9 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
{
struct lp3943_platform_data *pdata = lp3943_pwm->pdata;
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
- struct lp3943_pwm_map *pwm_map;
+ struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[hwpwm];
int i, offset;
- pwm_map = kzalloc(sizeof(*pwm_map), GFP_KERNEL);
- if (!pwm_map)
- return ERR_PTR(-ENOMEM);
-
pwm_map->output = pdata->pwms[hwpwm]->output;
pwm_map->num_outputs = pdata->pwms[hwpwm]->num_outputs;
@@ -49,10 +46,8 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
- if (test_and_set_bit(offset, &lp3943->pin_used)) {
- kfree(pwm_map);
+ if (test_and_set_bit(offset, &lp3943->pin_used))
return ERR_PTR(-EBUSY);
- }
}
return pwm_map;
@@ -67,7 +62,7 @@ static int lp3943_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (IS_ERR(pwm_map))
return PTR_ERR(pwm_map);
- return pwm_set_chip_data(pwm, pwm_map);
+ return 0;
}
static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
@@ -80,14 +75,12 @@ static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
offset = pwm_map->output[i];
clear_bit(offset, &lp3943->pin_used);
}
-
- kfree(pwm_map);
}
static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
- struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+ struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
lp3943_pwm_free_map(lp3943_pwm, pwm_map);
}
@@ -159,7 +152,7 @@ static int lp3943_pwm_set_mode(struct lp3943_pwm *lp3943_pwm,
static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
- struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+ struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
u8 val;
if (pwm->hwpwm == 0)
@@ -178,7 +171,7 @@ static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
- struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+ struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
/*
* LP3943 outputs are open-drain, so the pin should be configured
@@ -216,7 +209,6 @@ static const struct pwm_ops lp3943_pwm_ops = {
.request = lp3943_pwm_request,
.free = lp3943_pwm_free,
.apply = lp3943_pwm_apply,
- .owner = THIS_MODULE,
};
static int lp3943_pwm_parse_dt(struct device *dev,
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 7a19a840bca5..ef7d0da137ed 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -341,7 +341,6 @@ static const struct pwm_ops lpc18xx_pwm_ops = {
.apply = lpc18xx_pwm_apply,
.request = lpc18xx_pwm_request,
.free = lpc18xx_pwm_free,
- .owner = THIS_MODULE,
};
static const struct of_device_id lpc18xx_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 806f0bb3ad6d..78f664e41e6e 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -115,7 +115,6 @@ static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops lpc32xx_pwm_ops = {
.apply = lpc32xx_pwm_apply,
- .owner = THIS_MODULE,
};
static int lpc32xx_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 23fe332b2394..a6ea3ce7e019 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -243,7 +243,6 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_lpss_ops = {
.apply = pwm_lpss_apply,
.get_state = pwm_lpss_get_state,
- .owner = THIS_MODULE,
};
struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 6adb0ed01906..373abfd25acb 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -229,7 +229,6 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_mediatek_ops = {
.apply = pwm_mediatek_apply,
- .owner = THIS_MODULE,
};
static int pwm_mediatek_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 25519cddc2a9..5bea53243ed2 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -335,7 +335,6 @@ static const struct pwm_ops meson_pwm_ops = {
.free = meson_pwm_free,
.apply = meson_pwm_apply,
.get_state = meson_pwm_get_state,
- .owner = THIS_MODULE,
};
static const char * const pwm_meson8b_parent_names[] = {
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index e7525c98105e..c0c53968f3e9 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -435,7 +435,6 @@ static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm
static const struct pwm_ops mchp_core_pwm_ops = {
.apply = mchp_core_pwm_apply,
.get_state = mchp_core_pwm_get_state,
- .owner = THIS_MODULE,
};
static const struct of_device_id mchp_core_of_match[] = {
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index a83bd6e18b07..a72f7be36996 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -227,7 +227,6 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
static const struct pwm_ops mtk_disp_pwm_ops = {
.apply = mtk_disp_pwm_apply,
.get_state = mtk_disp_pwm_get_state,
- .owner = THIS_MODULE,
};
static int mtk_disp_pwm_probe(struct platform_device *pdev)
@@ -247,34 +246,25 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
mdp->clk_main = devm_clk_get(&pdev->dev, "main");
if (IS_ERR(mdp->clk_main))
- return PTR_ERR(mdp->clk_main);
+ return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_main),
+ "Failed to get main clock\n");
mdp->clk_mm = devm_clk_get(&pdev->dev, "mm");
if (IS_ERR(mdp->clk_mm))
- return PTR_ERR(mdp->clk_mm);
+ return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_mm),
+ "Failed to get mm clock\n");
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.npwm = 1;
- ret = pwmchip_add(&mdp->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
- return ret;
- }
-
- platform_set_drvdata(pdev, mdp);
+ ret = devm_pwmchip_add(&pdev->dev, &mdp->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
return 0;
}
-static void mtk_disp_pwm_remove(struct platform_device *pdev)
-{
- struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
-
- pwmchip_remove(&mdp->chip);
-}
-
static const struct mtk_pwm_data mt2701_pwm_data = {
.enable_mask = BIT(16),
.con0 = 0xa8,
@@ -320,7 +310,6 @@ static struct platform_driver mtk_disp_pwm_driver = {
.of_match_table = mtk_disp_pwm_of_match,
},
.probe = mtk_disp_pwm_probe,
- .remove_new = mtk_disp_pwm_remove,
};
module_platform_driver(mtk_disp_pwm_driver);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 766dbc58dad8..1b5e787d78f1 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -115,7 +115,6 @@ static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops mxs_pwm_ops = {
.apply = mxs_pwm_apply,
- .owner = THIS_MODULE,
};
static int mxs_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 7514ea384ec5..78606039eda2 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -126,7 +126,6 @@ static int ntxec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm_dev,
}
static const struct pwm_ops ntxec_pwm_ops = {
- .owner = THIS_MODULE,
.apply = ntxec_pwm_apply,
/*
* No .get_state callback, because the current state cannot be read
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 4889fbd8a431..13161e08dd6e 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -311,7 +311,6 @@ unlock_mutex:
static const struct pwm_ops pwm_omap_dmtimer_ops = {
.apply = pwm_omap_dmtimer_apply,
- .owner = THIS_MODULE,
};
static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
@@ -466,7 +465,7 @@ MODULE_DEVICE_TABLE(of, pwm_omap_dmtimer_of_match);
static struct platform_driver pwm_omap_dmtimer_driver = {
.driver = {
.name = "omap-dmtimer-pwm",
- .of_match_table = of_match_ptr(pwm_omap_dmtimer_of_match),
+ .of_match_table = pwm_omap_dmtimer_of_match,
},
.probe = pwm_omap_dmtimer_probe,
.remove_new = pwm_omap_dmtimer_remove,
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 3038a68412a7..e79b1de8c4d8 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -505,7 +505,6 @@ static const struct pwm_ops pca9685_pwm_ops = {
.get_state = pca9685_pwm_get_state,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
- .owner = THIS_MODULE,
};
static const struct regmap_config pca9685_regmap_i2c_config = {
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 1e475ed10180..76685f926c75 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -24,7 +24,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <asm/div64.h>
@@ -135,7 +135,6 @@ static int pxa_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pxa_pwm_ops = {
.apply = pxa_pwm_apply,
- .owner = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 2939b71a7ba7..1ad814fdec6b 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -135,7 +135,6 @@ static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops raspberrypi_pwm_ops = {
.get_state = raspberrypi_pwm_get_state,
.apply = raspberrypi_pwm_apply,
- .owner = THIS_MODULE,
};
static int raspberrypi_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 5b5f357c44de..13269f55fccf 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -198,7 +198,6 @@ static const struct pwm_ops rcar_pwm_ops = {
.request = rcar_pwm_request,
.free = rcar_pwm_free,
.apply = rcar_pwm_apply,
- .owner = THIS_MODULE,
};
static int rcar_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index d7311614c846..4239f2c3e8b2 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -85,6 +85,7 @@ struct tpu_device {
void __iomem *base;
struct clk *clk;
+ struct tpu_pwm_device tpd[TPU_CHANNEL_MAX];
};
#define to_tpu_device(c) container_of(c, struct tpu_device, chip)
@@ -215,9 +216,7 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (pwm->hwpwm >= TPU_CHANNEL_MAX)
return -EINVAL;
- tpd = kzalloc(sizeof(*tpd), GFP_KERNEL);
- if (tpd == NULL)
- return -ENOMEM;
+ tpd = &tpu->tpd[pwm->hwpwm];
tpd->tpu = tpu;
tpd->channel = pwm->hwpwm;
@@ -228,24 +227,22 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
tpd->timer_on = false;
- pwm_set_chip_data(pwm, tpd);
-
return 0;
}
static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+ struct tpu_device *tpu = to_tpu_device(chip);
+ struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
tpu_pwm_timer_stop(tpd);
- kfree(tpd);
}
static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns, bool enabled)
{
- struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
struct tpu_device *tpu = to_tpu_device(chip);
+ struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
unsigned int prescaler;
bool duty_only = false;
u32 clk_rate;
@@ -353,7 +350,8 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
- struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+ struct tpu_device *tpu = to_tpu_device(chip);
+ struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
tpd->polarity = polarity;
@@ -362,7 +360,8 @@ static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+ struct tpu_device *tpu = to_tpu_device(chip);
+ struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
int ret;
ret = tpu_pwm_timer_start(tpd);
@@ -384,7 +383,8 @@ static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+ struct tpu_device *tpu = to_tpu_device(chip);
+ struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
/* The timer must be running to modify the pin output configuration. */
tpu_pwm_timer_start(tpd);
@@ -431,7 +431,6 @@ static const struct pwm_ops tpu_pwm_ops = {
.request = tpu_pwm_request,
.free = tpu_pwm_free,
.apply = tpu_pwm_apply,
- .owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 03ee18fb82d5..cce4381e188a 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -228,7 +228,6 @@ out:
static const struct pwm_ops rockchip_pwm_ops = {
.get_state = rockchip_pwm_get_state,
.apply = rockchip_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct rockchip_pwm_data pwm_data_v1 = {
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
index a56cecb0e46e..bdda315b3bd3 100644
--- a/drivers/pwm/pwm-rz-mtu3.c
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -438,7 +438,6 @@ static const struct pwm_ops rz_mtu3_pwm_ops = {
.free = rz_mtu3_pwm_free,
.get_state = rz_mtu3_pwm_get_state,
.apply = rz_mtu3_pwm_apply,
- .owner = THIS_MODULE,
};
static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index e8828f57ab15..69d9f4577b34 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -77,6 +77,7 @@ struct samsung_pwm_channel {
* @base_clk: base clock used to drive the timers
* @tclk0: external clock 0 (can be ERR_PTR if not present)
* @tclk1: external clock 1 (can be ERR_PTR if not present)
+ * @channel: per channel driver data
*/
struct samsung_pwm_chip {
struct pwm_chip chip;
@@ -88,6 +89,7 @@ struct samsung_pwm_chip {
struct clk *base_clk;
struct clk *tclk0;
struct clk *tclk1;
+ struct samsung_pwm_channel channel[SAMSUNG_PWM_NUM];
};
#ifndef CONFIG_CLKSRC_SAMSUNG_PWM
@@ -117,21 +119,21 @@ static inline unsigned int to_tcon_channel(unsigned int channel)
return (channel == 0) ? 0 : (channel + 1);
}
-static void __pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+static void __pwm_samsung_manual_update(struct samsung_pwm_chip *our_chip,
struct pwm_device *pwm)
{
unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
u32 tcon;
- tcon = readl(chip->base + REG_TCON);
+ tcon = readl(our_chip->base + REG_TCON);
tcon |= TCON_MANUALUPDATE(tcon_chan);
- writel(tcon, chip->base + REG_TCON);
+ writel(tcon, our_chip->base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(tcon_chan);
- writel(tcon, chip->base + REG_TCON);
+ writel(tcon, our_chip->base + REG_TCON);
}
-static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm,
+static void pwm_samsung_set_divisor(struct samsung_pwm_chip *our_chip,
unsigned int channel, u8 divisor)
{
u8 shift = TCFG1_SHIFT(channel);
@@ -139,39 +141,39 @@ static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm,
u32 reg;
u8 bits;
- bits = (fls(divisor) - 1) - pwm->variant.div_base;
+ bits = (fls(divisor) - 1) - our_chip->variant.div_base;
spin_lock_irqsave(&samsung_pwm_lock, flags);
- reg = readl(pwm->base + REG_TCFG1);
+ reg = readl(our_chip->base + REG_TCFG1);
reg &= ~(TCFG1_MUX_MASK << shift);
reg |= bits << shift;
- writel(reg, pwm->base + REG_TCFG1);
+ writel(reg, our_chip->base + REG_TCFG1);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
-static int pwm_samsung_is_tdiv(struct samsung_pwm_chip *chip, unsigned int chan)
+static int pwm_samsung_is_tdiv(struct samsung_pwm_chip *our_chip, unsigned int chan)
{
- struct samsung_pwm_variant *variant = &chip->variant;
+ struct samsung_pwm_variant *variant = &our_chip->variant;
u32 reg;
- reg = readl(chip->base + REG_TCFG1);
+ reg = readl(our_chip->base + REG_TCFG1);
reg >>= TCFG1_SHIFT(chan);
reg &= TCFG1_MUX_MASK;
return (BIT(reg) & variant->tclk_mask) == 0;
}
-static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *chip,
+static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *our_chip,
unsigned int chan)
{
unsigned long rate;
u32 reg;
- rate = clk_get_rate(chip->base_clk);
+ rate = clk_get_rate(our_chip->base_clk);
- reg = readl(chip->base + REG_TCFG0);
+ reg = readl(our_chip->base + REG_TCFG0);
if (chan >= 2)
reg >>= TCFG0_PRESCALER1_SHIFT;
reg &= TCFG0_PRESCALER_MASK;
@@ -179,28 +181,28 @@ static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *chip,
return rate / (reg + 1);
}
-static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
+static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
unsigned int chan, unsigned long freq)
{
- struct samsung_pwm_variant *variant = &chip->variant;
+ struct samsung_pwm_variant *variant = &our_chip->variant;
unsigned long rate;
struct clk *clk;
u8 div;
- if (!pwm_samsung_is_tdiv(chip, chan)) {
- clk = (chan < 2) ? chip->tclk0 : chip->tclk1;
+ if (!pwm_samsung_is_tdiv(our_chip, chan)) {
+ clk = (chan < 2) ? our_chip->tclk0 : our_chip->tclk1;
if (!IS_ERR(clk)) {
rate = clk_get_rate(clk);
if (rate)
return rate;
}
- dev_warn(chip->chip.dev,
+ dev_warn(our_chip->chip.dev,
"tclk of PWM %d is inoperational, using tdiv\n", chan);
}
- rate = pwm_samsung_get_tin_rate(chip, chan);
- dev_dbg(chip->chip.dev, "tin parent at %lu\n", rate);
+ rate = pwm_samsung_get_tin_rate(our_chip, chan);
+ dev_dbg(our_chip->chip.dev, "tin parent at %lu\n", rate);
/*
* Compare minimum PWM frequency that can be achieved with possible
@@ -220,7 +222,7 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
div = variant->div_base;
}
- pwm_samsung_set_divisor(chip, chan, BIT(div));
+ pwm_samsung_set_divisor(our_chip, chan, BIT(div));
return rate >> div;
}
@@ -228,7 +230,6 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
- struct samsung_pwm_channel *our_chan;
if (!(our_chip->variant.output_mask & BIT(pwm->hwpwm))) {
dev_warn(chip->dev,
@@ -237,20 +238,11 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
return -EINVAL;
}
- our_chan = kzalloc(sizeof(*our_chan), GFP_KERNEL);
- if (!our_chan)
- return -ENOMEM;
-
- pwm_set_chip_data(pwm, our_chan);
+ memset(&our_chip->channel[pwm->hwpwm], 0, sizeof(our_chip->channel[pwm->hwpwm]));
return 0;
}
-static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- kfree(pwm_get_chip_data(pwm));
-}
-
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
@@ -302,14 +294,14 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
-static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+static void pwm_samsung_manual_update(struct samsung_pwm_chip *our_chip,
struct pwm_device *pwm)
{
unsigned long flags;
spin_lock_irqsave(&samsung_pwm_lock, flags);
- __pwm_samsung_manual_update(chip, pwm);
+ __pwm_samsung_manual_update(our_chip, pwm);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -318,7 +310,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns, bool force_period)
{
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
- struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
+ struct samsung_pwm_channel *chan = &our_chip->channel[pwm->hwpwm];
u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
@@ -393,7 +385,7 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
return __pwm_samsung_config(chip, pwm, duty_ns, period_ns, false);
}
-static void pwm_samsung_set_invert(struct samsung_pwm_chip *chip,
+static void pwm_samsung_set_invert(struct samsung_pwm_chip *our_chip,
unsigned int channel, bool invert)
{
unsigned int tcon_chan = to_tcon_channel(channel);
@@ -402,17 +394,17 @@ static void pwm_samsung_set_invert(struct samsung_pwm_chip *chip,
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = readl(chip->base + REG_TCON);
+ tcon = readl(our_chip->base + REG_TCON);
if (invert) {
- chip->inverter_mask |= BIT(channel);
+ our_chip->inverter_mask |= BIT(channel);
tcon |= TCON_INVERT(tcon_chan);
} else {
- chip->inverter_mask &= ~BIT(channel);
+ our_chip->inverter_mask &= ~BIT(channel);
tcon &= ~TCON_INVERT(tcon_chan);
}
- writel(tcon, chip->base + REG_TCON);
+ writel(tcon, our_chip->base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -473,9 +465,7 @@ static int pwm_samsung_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops pwm_samsung_ops = {
.request = pwm_samsung_request,
- .free = pwm_samsung_free,
.apply = pwm_samsung_apply,
- .owner = THIS_MODULE,
};
#ifdef CONFIG_OF
@@ -517,9 +507,9 @@ static const struct of_device_id samsung_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, samsung_pwm_matches);
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
+static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
{
- struct device_node *np = chip->chip.dev->of_node;
+ struct device_node *np = our_chip->chip.dev->of_node;
const struct of_device_id *match;
struct property *prop;
const __be32 *cur;
@@ -529,22 +519,22 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
if (!match)
return -ENODEV;
- memcpy(&chip->variant, match->data, sizeof(chip->variant));
+ memcpy(&our_chip->variant, match->data, sizeof(our_chip->variant));
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
- dev_err(chip->chip.dev,
+ dev_err(our_chip->chip.dev,
"%s: invalid channel index in samsung,pwm-outputs property\n",
__func__);
continue;
}
- chip->variant.output_mask |= BIT(val);
+ our_chip->variant.output_mask |= BIT(val);
}
return 0;
}
#else
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
+static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
{
return -ENODEV;
}
@@ -553,21 +543,21 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
static int pwm_samsung_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct samsung_pwm_chip *chip;
+ struct samsung_pwm_chip *our_chip;
unsigned int chan;
int ret;
- chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
- if (chip == NULL)
+ our_chip = devm_kzalloc(&pdev->dev, sizeof(*our_chip), GFP_KERNEL);
+ if (our_chip == NULL)
return -ENOMEM;
- chip->chip.dev = &pdev->dev;
- chip->chip.ops = &pwm_samsung_ops;
- chip->chip.npwm = SAMSUNG_PWM_NUM;
- chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+ our_chip->chip.dev = &pdev->dev;
+ our_chip->chip.ops = &pwm_samsung_ops;
+ our_chip->chip.npwm = SAMSUNG_PWM_NUM;
+ our_chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- ret = pwm_samsung_parse_dt(chip);
+ ret = pwm_samsung_parse_dt(our_chip);
if (ret)
return ret;
} else {
@@ -576,58 +566,58 @@ static int pwm_samsung_probe(struct platform_device *pdev)
return -EINVAL;
}
- memcpy(&chip->variant, pdev->dev.platform_data,
- sizeof(chip->variant));
+ memcpy(&our_chip->variant, pdev->dev.platform_data,
+ sizeof(our_chip->variant));
}
- chip->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(chip->base))
- return PTR_ERR(chip->base);
+ our_chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(our_chip->base))
+ return PTR_ERR(our_chip->base);
- chip->base_clk = devm_clk_get(&pdev->dev, "timers");
- if (IS_ERR(chip->base_clk)) {
+ our_chip->base_clk = devm_clk_get(&pdev->dev, "timers");
+ if (IS_ERR(our_chip->base_clk)) {
dev_err(dev, "failed to get timer base clk\n");
- return PTR_ERR(chip->base_clk);
+ return PTR_ERR(our_chip->base_clk);
}
- ret = clk_prepare_enable(chip->base_clk);
+ ret = clk_prepare_enable(our_chip->base_clk);
if (ret < 0) {
dev_err(dev, "failed to enable base clock\n");
return ret;
}
for (chan = 0; chan < SAMSUNG_PWM_NUM; ++chan)
- if (chip->variant.output_mask & BIT(chan))
- pwm_samsung_set_invert(chip, chan, true);
+ if (our_chip->variant.output_mask & BIT(chan))
+ pwm_samsung_set_invert(our_chip, chan, true);
/* Following clocks are optional. */
- chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
- chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
+ our_chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
+ our_chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
- platform_set_drvdata(pdev, chip);
+ platform_set_drvdata(pdev, our_chip);
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add(&our_chip->chip);
if (ret < 0) {
dev_err(dev, "failed to register PWM chip\n");
- clk_disable_unprepare(chip->base_clk);
+ clk_disable_unprepare(our_chip->base_clk);
return ret;
}
dev_dbg(dev, "base_clk at %lu, tclk0 at %lu, tclk1 at %lu\n",
- clk_get_rate(chip->base_clk),
- !IS_ERR(chip->tclk0) ? clk_get_rate(chip->tclk0) : 0,
- !IS_ERR(chip->tclk1) ? clk_get_rate(chip->tclk1) : 0);
+ clk_get_rate(our_chip->base_clk),
+ !IS_ERR(our_chip->tclk0) ? clk_get_rate(our_chip->tclk0) : 0,
+ !IS_ERR(our_chip->tclk1) ? clk_get_rate(our_chip->tclk1) : 0);
return 0;
}
static void pwm_samsung_remove(struct platform_device *pdev)
{
- struct samsung_pwm_chip *chip = platform_get_drvdata(pdev);
+ struct samsung_pwm_chip *our_chip = platform_get_drvdata(pdev);
- pwmchip_remove(&chip->chip);
+ pwmchip_remove(&our_chip->chip);
- clk_disable_unprepare(chip->base_clk);
+ clk_disable_unprepare(our_chip->base_clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -639,9 +629,9 @@ static int pwm_samsung_resume(struct device *dev)
for (i = 0; i < SAMSUNG_PWM_NUM; i++) {
struct pwm_device *pwm = &chip->pwms[i];
- struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
+ struct samsung_pwm_channel *chan = &our_chip->channel[i];
- if (!chan)
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
if (our_chip->variant.output_mask & BIT(i))
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index eabddb7c7820..089e50bdbbf0 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -203,7 +203,6 @@ static const struct pwm_ops pwm_sifive_ops = {
.free = pwm_sifive_free,
.get_state = pwm_sifive_get_state,
.apply = pwm_sifive_apply,
- .owner = THIS_MODULE,
};
static int pwm_sifive_clock_notifier(struct notifier_block *nb,
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 9e42e3a74ad6..88b01ff9e460 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -200,7 +200,6 @@ static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops sl28cpld_pwm_ops = {
.apply = sl28cpld_pwm_apply,
.get_state = sl28cpld_pwm_get_state,
- .owner = THIS_MODULE,
};
static int sl28cpld_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 4e1cfd8d7c03..ff991319feef 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -189,7 +189,6 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops spear_pwm_ops = {
.apply = spear_pwm_apply,
- .owner = THIS_MODULE,
};
static int spear_pwm_probe(struct platform_device *pdev)
@@ -207,26 +206,21 @@ static int spear_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- pc->clk = devm_clk_get(&pdev->dev, NULL);
+ pc->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
- return PTR_ERR(pc->clk);
-
- platform_set_drvdata(pdev, pc);
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
+ "Failed to get clock\n");
pc->chip.dev = &pdev->dev;
pc->chip.ops = &spear_pwm_ops;
pc->chip.npwm = NUM_PWM;
- ret = clk_prepare(pc->clk);
- if (ret)
- return ret;
-
if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
- if (ret) {
- clk_unprepare(pc->clk);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to enable clk\n");
+
/*
* Following enables PWM chip, channels would still be
* enabled individually through their control register
@@ -238,23 +232,11 @@ static int spear_pwm_probe(struct platform_device *pdev)
clk_disable(pc->clk);
}
- ret = pwmchip_add(&pc->chip);
- if (ret < 0) {
- clk_unprepare(pc->clk);
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- }
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
- return ret;
-}
-
-static void spear_pwm_remove(struct platform_device *pdev)
-{
- struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
-
- pwmchip_remove(&pc->chip);
-
- /* clk was prepared in probe, hence unprepare it here */
- clk_unprepare(pc->clk);
+ return 0;
}
static const struct of_device_id spear_pwm_of_match[] = {
@@ -271,7 +253,6 @@ static struct platform_driver spear_pwm_driver = {
.of_match_table = spear_pwm_of_match,
},
.probe = spear_pwm_probe,
- .remove_new = spear_pwm_remove,
};
module_platform_driver(spear_pwm_driver);
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 1499c8c1fe37..77939e161006 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -40,6 +40,11 @@ struct sprd_pwm_chip {
struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
};
+static inline struct sprd_pwm_chip* sprd_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct sprd_pwm_chip, chip);
+}
+
/*
* The list of clocks required by PWM channels, and each channel has 2 clocks:
* enable clock and pwm clock.
@@ -69,8 +74,7 @@ static void sprd_pwm_write(struct sprd_pwm_chip *spc, u32 hwid,
static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
- struct sprd_pwm_chip *spc =
- container_of(chip, struct sprd_pwm_chip, chip);
+ struct sprd_pwm_chip *spc = sprd_pwm_from_chip(chip);
struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
u32 val, duty, prescale;
u64 tmp;
@@ -162,8 +166,7 @@ static int sprd_pwm_config(struct sprd_pwm_chip *spc, struct pwm_device *pwm,
static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct sprd_pwm_chip *spc =
- container_of(chip, struct sprd_pwm_chip, chip);
+ struct sprd_pwm_chip *spc = sprd_pwm_from_chip(chip);
struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
struct pwm_state *cstate = &pwm->state;
int ret;
@@ -210,7 +213,6 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops sprd_pwm_ops = {
.apply = sprd_pwm_apply,
.get_state = sprd_pwm_get_state,
- .owner = THIS_MODULE,
};
static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
@@ -240,10 +242,8 @@ static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
chn->clk_rate = clk_get_rate(clk_pwm);
}
- if (!i) {
- dev_err(spc->dev, "no available PWM channels\n");
- return -ENODEV;
- }
+ if (!i)
+ return dev_err_probe(spc->dev, -ENODEV, "no available PWM channels\n");
spc->num_pwms = i;
@@ -264,7 +264,6 @@ static int sprd_pwm_probe(struct platform_device *pdev)
return PTR_ERR(spc->base);
spc->dev = &pdev->dev;
- platform_set_drvdata(pdev, spc);
ret = sprd_pwm_clk_init(spc);
if (ret)
@@ -274,20 +273,13 @@ static int sprd_pwm_probe(struct platform_device *pdev)
spc->chip.ops = &sprd_pwm_ops;
spc->chip.npwm = spc->num_pwms;
- ret = pwmchip_add(&spc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &spc->chip);
if (ret)
dev_err(&pdev->dev, "failed to add PWM chip\n");
return ret;
}
-static void sprd_pwm_remove(struct platform_device *pdev)
-{
- struct sprd_pwm_chip *spc = platform_get_drvdata(pdev);
-
- pwmchip_remove(&spc->chip);
-}
-
static const struct of_device_id sprd_pwm_of_match[] = {
{ .compatible = "sprd,ums512-pwm", },
{ },
@@ -300,7 +292,6 @@ static struct platform_driver sprd_pwm_driver = {
.of_match_table = sprd_pwm_of_match,
},
.probe = sprd_pwm_probe,
- .remove_new = sprd_pwm_remove,
};
module_platform_driver(sprd_pwm_driver);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index b1d1373648a3..dc92cea31cd0 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
unsigned int cpt_num_devs;
unsigned int max_pwm_cnt;
unsigned int max_prescale;
+ struct sti_cpt_ddata *ddata;
};
struct sti_pwm_chip {
@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
struct sti_pwm_compat_data *cdata = pc->cdata;
- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
+ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
struct device *dev = pc->dev;
unsigned int effective_ticks;
unsigned long long high, low;
@@ -420,7 +421,6 @@ static const struct pwm_ops sti_pwm_ops = {
.capture = sti_pwm_capture,
.apply = sti_pwm_apply,
.free = sti_pwm_free,
- .owner = THIS_MODULE,
};
static irqreturn_t sti_pwm_interrupt(int irq, void *data)
@@ -440,7 +440,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
while (cpt_int_stat) {
devicenum = ffs(cpt_int_stat) - 1;
- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
+ ddata = &pc->cdata->ddata[devicenum];
/*
* Capture input:
@@ -638,30 +638,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
dev_err(dev, "failed to prepare clock\n");
return ret;
}
+
+ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
+ if (!cdata->ddata)
+ return -ENOMEM;
}
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.npwm = pc->cdata->pwm_num_devs;
- ret = pwmchip_add(&pc->chip);
- if (ret < 0) {
- clk_unprepare(pc->pwm_clk);
- clk_unprepare(pc->cpt_clk);
- return ret;
- }
-
for (i = 0; i < cdata->cpt_num_devs; i++) {
- struct sti_cpt_ddata *ddata;
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
init_waitqueue_head(&ddata->wait);
mutex_init(&ddata->lock);
+ }
- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ clk_unprepare(pc->pwm_clk);
+ clk_unprepare(pc->cpt_clk);
+ return ret;
}
platform_set_drvdata(pdev, pc);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index bb3a045a7334..b67974cc1872 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -189,7 +189,6 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
}
static const struct pwm_ops stm32_pwm_lp_ops = {
- .owner = THIS_MODULE,
.apply = stm32_pwm_lp_apply,
.get_state = stm32_pwm_lp_get_state,
};
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 3d6be7749e23..3303a754ea02 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -487,7 +487,6 @@ static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
}
static const struct pwm_ops stm32pwm_ops = {
- .owner = THIS_MODULE,
.apply = stm32_pwm_apply_locked,
.capture = IS_ENABLED(CONFIG_DMA_ENGINE) ? stm32_pwm_capture : NULL,
};
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index e205405c4828..a46f5b4dd816 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -287,7 +287,6 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops stmpe_24xx_pwm_ops = {
.apply = stmpe_24xx_pwm_apply,
- .owner = THIS_MODULE,
};
static int __init stmpe_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index c84fcf1a13dc..1a439025540d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -325,7 +325,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops sun4i_pwm_ops = {
.apply = sun4i_pwm_apply,
.get_state = sun4i_pwm_get_state,
- .owner = THIS_MODULE,
};
static const struct sun4i_pwm_data sun4i_pwm_dual_nobypass = {
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
index 7705c7b86c3a..773e2f80526e 100644
--- a/drivers/pwm/pwm-sunplus.c
+++ b/drivers/pwm/pwm-sunplus.c
@@ -163,7 +163,6 @@ static int sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops sunplus_pwm_ops = {
.apply = sunplus_pwm_apply,
.get_state = sunplus_pwm_get_state,
- .owner = THIS_MODULE,
};
static void sunplus_pwm_clk_release(void *data)
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index a169a34e0778..39ea51e08c94 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -268,7 +268,6 @@ static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops tegra_pwm_ops = {
.apply = tegra_pwm_apply,
- .owner = THIS_MODULE,
};
static int tegra_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 8c94b266c1b2..11e3549cf103 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -205,7 +205,6 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops ecap_pwm_ops = {
.apply = ecap_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id ecap_of_match[] = {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index ecbfd7e954ec..66ac2655845f 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -437,7 +437,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops ehrpwm_pwm_ops = {
.free = ehrpwm_pwm_free,
.apply = ehrpwm_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id ehrpwm_of_match[] = {
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 8fb84b441853..625233f4703a 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -189,7 +189,6 @@ static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops twl4030_pwmled_ops = {
.apply = twl4030_pwmled_apply,
- .owner = THIS_MODULE,
};
static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -342,7 +341,6 @@ static const struct pwm_ops twl6030_pwmled_ops = {
.apply = twl6030_pwmled_apply,
.request = twl6030_pwmled_request,
.free = twl6030_pwmled_free,
- .owner = THIS_MODULE,
};
static int twl_pwmled_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 86567add79db..603d31f27470 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -333,12 +333,10 @@ static const struct pwm_ops twl4030_pwm_ops = {
.apply = twl4030_pwm_apply,
.request = twl4030_pwm_request,
.free = twl4030_pwm_free,
- .owner = THIS_MODULE,
};
static const struct pwm_ops twl6030_pwm_ops = {
.apply = twl6030_pwm_apply,
- .owner = THIS_MODULE,
};
static int twl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index 7f7591a2384c..8d736d558122 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -129,7 +129,6 @@ static int visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops visconti_pwm_ops = {
.apply = visconti_pwm_apply,
.get_state = visconti_pwm_get_state,
- .owner = THIS_MODULE,
};
static int visconti_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 6d46db51daac..5568d5312d3c 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -221,7 +221,6 @@ static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops vt8500_pwm_ops = {
.apply = vt8500_pwm_apply,
- .owner = THIS_MODULE,
};
static const struct of_device_id vt8500_pwm_dt_ids[] = {
@@ -236,10 +235,8 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- if (!np) {
- dev_err(&pdev->dev, "invalid devicetree node\n");
- return -EINVAL;
- }
+ if (!np)
+ return dev_err_probe(&pdev->dev, -EINVAL, "invalid devicetree node\n");
vt8500 = devm_kzalloc(&pdev->dev, sizeof(*vt8500), GFP_KERNEL);
if (vt8500 == NULL)
@@ -249,45 +246,23 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
vt8500->chip.ops = &vt8500_pwm_ops;
vt8500->chip.npwm = VT8500_NR_PWMS;
- vt8500->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(vt8500->clk)) {
- dev_err(&pdev->dev, "clock source not specified\n");
- return PTR_ERR(vt8500->clk);
- }
+ vt8500->clk = devm_clk_get_prepared(&pdev->dev, NULL);
+ if (IS_ERR(vt8500->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vt8500->clk), "clock source not specified\n");
vt8500->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vt8500->base))
return PTR_ERR(vt8500->base);
- ret = clk_prepare(vt8500->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to prepare clock\n");
- return ret;
- }
-
- ret = pwmchip_add(&vt8500->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip\n");
- clk_unprepare(vt8500->clk);
- return ret;
- }
-
- platform_set_drvdata(pdev, vt8500);
- return ret;
-}
-
-static void vt8500_pwm_remove(struct platform_device *pdev)
-{
- struct vt8500_chip *vt8500 = platform_get_drvdata(pdev);
-
- pwmchip_remove(&vt8500->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &vt8500->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
- clk_unprepare(vt8500->clk);
+ return 0;
}
static struct platform_driver vt8500_pwm_driver = {
.probe = vt8500_pwm_probe,
- .remove_new = vt8500_pwm_remove,
.driver = {
.name = "vt8500-pwm",
.of_match_table = vt8500_pwm_dt_ids,
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
index 85153ee90809..5f3c2a6fed11 100644
--- a/drivers/pwm/pwm-xilinx.c
+++ b/drivers/pwm/pwm-xilinx.c
@@ -198,7 +198,6 @@ static int xilinx_pwm_get_state(struct pwm_chip *chip,
static const struct pwm_ops xilinx_pwm_ops = {
.apply = xilinx_pwm_apply,
.get_state = xilinx_pwm_get_state,
- .owner = THIS_MODULE,
};
static const struct regmap_config xilinx_pwm_regmap_config = {
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index a115730ebf14..27afbb9d544b 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -250,7 +250,9 @@ static DEFINE_MUTEX(mport_devs_lock);
static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
#endif
-static struct class *dev_class;
+static const struct class dev_class = {
+ .name = DRV_NAME,
+};
static dev_t dev_number;
static void mport_release_mapping(struct kref *ref);
@@ -2379,7 +2381,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
device_initialize(&md->dev);
md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
- md->dev.class = dev_class;
+ md->dev.class = &dev_class;
md->dev.parent = &mport->dev;
md->dev.release = mport_device_release;
dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
@@ -2600,10 +2602,10 @@ static int __init mport_init(void)
int ret;
/* Create device class needed by udev */
- dev_class = class_create(DRV_NAME);
- if (IS_ERR(dev_class)) {
+ ret = class_register(&dev_class);
+ if (ret) {
rmcd_error("Unable to create " DRV_NAME " class");
- return PTR_ERR(dev_class);
+ return ret;
}
ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
@@ -2624,7 +2626,7 @@ static int __init mport_init(void)
err_cli:
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
err_chr:
- class_destroy(dev_class);
+ class_unregister(&dev_class);
return ret;
}
@@ -2634,7 +2636,7 @@ err_chr:
static void __exit mport_exit(void)
{
class_interface_unregister(&rio_mport_interface);
- class_destroy(dev_class);
+ class_unregister(&dev_class);
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
}
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 49f8d111e546..9135227301c8 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -233,7 +233,9 @@ static DEFINE_IDR(ch_idr);
static LIST_HEAD(cm_dev_list);
static DECLARE_RWSEM(rdev_sem);
-static struct class *dev_class;
+static const struct class dev_class = {
+ .name = DRV_NAME,
+};
static unsigned int dev_major;
static unsigned int dev_minor_base;
static dev_t dev_number;
@@ -2072,7 +2074,7 @@ static int riocm_cdev_add(dev_t devno)
return ret;
}
- riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
+ riocm_cdev.dev = device_create(&dev_class, NULL, devno, NULL, DEV_NAME);
if (IS_ERR(riocm_cdev.dev)) {
cdev_del(&riocm_cdev.cdev);
return PTR_ERR(riocm_cdev.dev);
@@ -2293,15 +2295,15 @@ static int __init riocm_init(void)
int ret;
/* Create device class needed by udev */
- dev_class = class_create(DRV_NAME);
- if (IS_ERR(dev_class)) {
+ ret = class_register(&dev_class);
+ if (ret) {
riocm_error("Cannot create " DRV_NAME " class");
- return PTR_ERR(dev_class);
+ return ret;
}
ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
if (ret) {
- class_destroy(dev_class);
+ class_unregister(&dev_class);
return ret;
}
@@ -2349,7 +2351,7 @@ err_cl:
class_interface_unregister(&rio_mport_interface);
err_reg:
unregister_chrdev_region(dev_number, 1);
- class_destroy(dev_class);
+ class_unregister(&dev_class);
return ret;
}
@@ -2364,7 +2366,7 @@ static void __exit riocm_exit(void)
device_unregister(riocm_cdev.dev);
cdev_del(&(riocm_cdev.cdev));
- class_destroy(dev_class);
+ class_unregister(&dev_class);
unregister_chrdev_region(dev_number, 1);
}
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index ea6fa1100a00..6d7736a031f7 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -47,6 +47,7 @@
#define MT8192_SCP2SPM_IPC_CLR 0x4094
#define MT8192_GIPC_IN_SET 0x4098
#define MT8192_HOST_IPC_INT_BIT BIT(0)
+#define MT8195_CORE1_HOST_IPC_INT_BIT BIT(4)
#define MT8192_CORE0_SW_RSTN_CLR 0x10000
#define MT8192_CORE0_SW_RSTN_SET 0x10004
@@ -54,8 +55,28 @@
#define MT8192_CORE0_WDT_IRQ 0x10030
#define MT8192_CORE0_WDT_CFG 0x10034
+#define MT8195_SYS_STATUS 0x4004
+#define MT8195_CORE0_WDT BIT(16)
+#define MT8195_CORE1_WDT BIT(17)
+
#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS GENMASK(7, 4)
+#define MT8195_CPU1_SRAM_PD 0x1084
+#define MT8195_SSHUB2APMCU_IPC_SET 0x4088
+#define MT8195_SSHUB2APMCU_IPC_CLR 0x408C
+#define MT8195_CORE1_SW_RSTN_CLR 0x20000
+#define MT8195_CORE1_SW_RSTN_SET 0x20004
+#define MT8195_CORE1_MEM_ATT_PREDEF 0x20008
+#define MT8195_CORE1_WDT_IRQ 0x20030
+#define MT8195_CORE1_WDT_CFG 0x20034
+
+#define MT8195_SEC_CTRL 0x85000
+#define MT8195_CORE_OFFSET_ENABLE_D BIT(13)
+#define MT8195_CORE_OFFSET_ENABLE_I BIT(12)
+#define MT8195_L2TCM_OFFSET_RANGE_0_LOW 0x850b0
+#define MT8195_L2TCM_OFFSET_RANGE_0_HIGH 0x850b4
+#define MT8195_L2TCM_OFFSET 0x850d0
+
#define SCP_FW_VER_LEN 32
#define SCP_SHARE_BUFFER_SIZE 288
@@ -91,17 +112,24 @@ struct mtk_scp_of_data {
size_t ipi_buf_offset;
};
+struct mtk_scp_of_cluster {
+ void __iomem *reg_base;
+ void __iomem *l1tcm_base;
+ size_t l1tcm_size;
+ phys_addr_t l1tcm_phys;
+ struct list_head mtk_scp_list;
+ /* Prevent concurrent operations of this structure and L2TCM power control. */
+ struct mutex cluster_lock;
+ u32 l2tcm_refcnt;
+};
+
struct mtk_scp {
struct device *dev;
struct rproc *rproc;
struct clk *clk;
- void __iomem *reg_base;
void __iomem *sram_base;
size_t sram_size;
phys_addr_t sram_phys;
- void __iomem *l1tcm_base;
- size_t l1tcm_size;
- phys_addr_t l1tcm_phys;
const struct mtk_scp_of_data *data;
@@ -119,6 +147,9 @@ struct mtk_scp {
size_t dram_size;
struct rproc_subdev *rpmsg_subdev;
+
+ struct list_head elem;
+ struct mtk_scp_of_cluster *cluster;
};
/**
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index dcc94ee2458d..a35409eda0cf 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -68,8 +68,14 @@ EXPORT_SYMBOL_GPL(scp_put);
static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
{
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+ struct mtk_scp *scp_node;
+
dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
- rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+
+ /* report watchdog timeout to all cores */
+ list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
+ rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
}
static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
@@ -106,7 +112,7 @@ static void scp_ipi_handler(struct mtk_scp *scp)
scp_ipi_lock(scp, id);
handler = ipi_desc[id].handler;
if (!handler) {
- dev_err(scp->dev, "No such ipi id = %d\n", id);
+ dev_err(scp->dev, "No handler for ipi id = %d\n", id);
scp_ipi_unlock(scp, id);
return;
}
@@ -152,35 +158,45 @@ static void mt8183_scp_reset_assert(struct mtk_scp *scp)
{
u32 val;
- val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
val &= ~MT8183_SW_RSTN_BIT;
- writel(val, scp->reg_base + MT8183_SW_RSTN);
+ writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
}
static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
{
u32 val;
- val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
val |= MT8183_SW_RSTN_BIT;
- writel(val, scp->reg_base + MT8183_SW_RSTN);
+ writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
}
static void mt8192_scp_reset_assert(struct mtk_scp *scp)
{
- writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
}
static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
{
- writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+}
+
+static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
+{
+ writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
+}
+
+static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
+{
+ writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
}
static void mt8183_scp_irq_handler(struct mtk_scp *scp)
{
u32 scp_to_host;
- scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+ scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
scp_ipi_handler(scp);
else
@@ -188,14 +204,14 @@ static void mt8183_scp_irq_handler(struct mtk_scp *scp)
/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
- scp->reg_base + MT8183_SCP_TO_HOST);
+ scp->cluster->reg_base + MT8183_SCP_TO_HOST);
}
static void mt8192_scp_irq_handler(struct mtk_scp *scp)
{
u32 scp_to_host;
- scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+ scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
scp_ipi_handler(scp);
@@ -205,13 +221,48 @@ static void mt8192_scp_irq_handler(struct mtk_scp *scp)
* MT8192_SCP2APMCU_IPC.
*/
writel(MT8192_SCP_IPC_INT_BIT,
- scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+ scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
} else {
scp_wdt_handler(scp, scp_to_host);
- writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
}
}
+static void mt8195_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+ scp_ipi_handler(scp);
+ } else {
+ u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
+
+ if (reason & MT8195_CORE0_WDT)
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
+
+ if (reason & MT8195_CORE1_WDT)
+ writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
+
+ scp_wdt_handler(scp, reason);
+ }
+
+ writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+}
+
+static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
+
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
+ scp_ipi_handler(scp);
+
+ writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
+}
+
static irqreturn_t scp_irq_handler(int irq, void *priv)
{
struct mtk_scp *scp = priv;
@@ -341,26 +392,26 @@ static int mt8195_scp_clk_get(struct mtk_scp *scp)
static int mt8183_scp_before_load(struct mtk_scp *scp)
{
/* Clear SCP to host interrupt */
- writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+ writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
/* Reset clocks before loading FW */
- writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
- writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Initialize TCM before loading FW. */
- writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
- writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
/* Turn on the power of SCP's SRAM before using it. */
- writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
/*
* Set I-cache and D-cache size before loading SCP FW.
* SCP SRAM logical address may change when cache size setting differs.
*/
writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
- scp->reg_base + MT8183_SCP_CACHE_CON);
- writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+ scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
return 0;
}
@@ -386,28 +437,28 @@ static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
static int mt8186_scp_before_load(struct mtk_scp *scp)
{
/* Clear SCP to host interrupt */
- writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+ writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
/* Reset clocks before loading FW */
- writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
- writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
- scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
/* Initialize TCM before loading FW. */
- writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
- writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
- writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
- writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+ writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+ writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
/*
* Set I-cache and D-cache size before loading SCP FW.
* SCP SRAM logical address may change when cache size setting differs.
*/
writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
- scp->reg_base + MT8183_SCP_CACHE_CON);
- writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+ scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
return 0;
}
@@ -415,40 +466,100 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
static int mt8192_scp_before_load(struct mtk_scp *scp)
{
/* clear SPM interrupt, SCP2SPM_IPC_CLR */
- writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+ writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
- writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
/* enable SRAM clock */
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
- scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* enable MPU for all memory regions */
- writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+ writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
+{
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ mutex_lock(&scp_cluster->cluster_lock);
+
+ if (scp_cluster->l2tcm_refcnt == 0) {
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ /* Power on L2TCM */
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ }
+
+ scp_cluster->l2tcm_refcnt += 1;
+
+ mutex_unlock(&scp_cluster->cluster_lock);
return 0;
}
static int mt8195_scp_before_load(struct mtk_scp *scp)
{
- /* clear SPM interrupt, SCP2SPM_IPC_CLR */
- writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
- writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+ mt8195_scp_l2tcm_on(scp);
- /* enable SRAM clock */
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
- scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
- MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
- scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* enable MPU for all memory regions */
- writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+ writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
+{
+ u32 sec_ctrl;
+ struct mtk_scp *scp_c0;
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ scp->data->scp_reset_assert(scp);
+
+ mt8195_scp_l2tcm_on(scp);
+
+ scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
+
+ /*
+ * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
+ * on SRAM when SCP core 1 accesses SRAM.
+ *
+ * This configuration solves booting the SCP core 0 and core 1 from
+ * different SRAM address because core 0 and core 1 both boot from
+ * the head of SRAM by default. this must be configured before boot SCP core 1.
+ *
+ * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
+ * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
+ * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
+ * The shift action is tranparent to software.
+ */
+ writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
+ writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
+
+ scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
+ writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
+
+ /* enable SRAM offset when fetching instruction and data */
+ sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
+ sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
+ writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
return 0;
}
@@ -567,11 +678,11 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
}
/* optional memory region */
- if (scp->l1tcm_size &&
- da >= scp->l1tcm_phys &&
- (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
- offset = da - scp->l1tcm_phys;
- return (void __force *)scp->l1tcm_base + offset;
+ if (scp->cluster->l1tcm_size &&
+ da >= scp->cluster->l1tcm_phys &&
+ (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
+ offset = da - scp->cluster->l1tcm_phys;
+ return (void __force *)scp->cluster->l1tcm_base + offset;
}
/* optional memory region */
@@ -595,34 +706,62 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iome
static void mt8183_scp_stop(struct mtk_scp *scp)
{
/* Disable SCP watchdog */
- writel(0, scp->reg_base + MT8183_WDT_CFG);
+ writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
}
static void mt8192_scp_stop(struct mtk_scp *scp)
{
/* Disable SRAM clock */
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
- scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* Disable SCP watchdog */
- writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+ writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
+{
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ mutex_lock(&scp_cluster->cluster_lock);
+
+ if (scp_cluster->l2tcm_refcnt > 0)
+ scp_cluster->l2tcm_refcnt -= 1;
+
+ if (scp_cluster->l2tcm_refcnt == 0) {
+ /* Power off L2TCM */
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ }
+
+ mutex_unlock(&scp_cluster->cluster_lock);
}
static void mt8195_scp_stop(struct mtk_scp *scp)
{
- /* Disable SRAM clock */
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
- scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
- MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
- scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+ mt8195_scp_l2tcm_off(scp);
+
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_c1_stop(struct mtk_scp *scp)
+{
+ mt8195_scp_l2tcm_off(scp);
+
+ /* Power off CPU SRAM */
+ scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
/* Disable SCP watchdog */
- writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+ writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
}
static int scp_stop(struct rproc *rproc)
@@ -811,7 +950,9 @@ static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
}
}
-static int scp_probe(struct platform_device *pdev)
+static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
+ struct mtk_scp_of_cluster *scp_cluster,
+ const struct mtk_scp_of_data *of_data)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -823,52 +964,38 @@ static int scp_probe(struct platform_device *pdev)
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret < 0 && ret != -EINVAL)
- return ret;
+ return ERR_PTR(ret);
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
- if (!rproc)
- return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
+ if (!rproc) {
+ dev_err(dev, "unable to allocate remoteproc\n");
+ return ERR_PTR(-ENOMEM);
+ }
scp = rproc->priv;
scp->rproc = rproc;
scp->dev = dev;
- scp->data = of_device_get_match_data(dev);
+ scp->data = of_data;
+ scp->cluster = scp_cluster;
platform_set_drvdata(pdev, scp);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
scp->sram_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scp->sram_base))
- return dev_err_probe(dev, PTR_ERR(scp->sram_base),
- "Failed to parse and map sram memory\n");
+ if (IS_ERR(scp->sram_base)) {
+ dev_err(dev, "Failed to parse and map sram memory\n");
+ return ERR_CAST(scp->sram_base);
+ }
scp->sram_size = resource_size(res);
scp->sram_phys = res->start;
- /* l1tcm is an optional memory region */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
- scp->l1tcm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scp->l1tcm_base)) {
- ret = PTR_ERR(scp->l1tcm_base);
- if (ret != -EINVAL) {
- return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
- }
- } else {
- scp->l1tcm_size = resource_size(res);
- scp->l1tcm_phys = res->start;
- }
-
- scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
- if (IS_ERR(scp->reg_base))
- return dev_err_probe(dev, PTR_ERR(scp->reg_base),
- "Failed to parse and map cfg memory\n");
-
ret = scp->data->scp_clk_get(scp);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ret = scp_map_memory_region(scp);
if (ret)
- return ret;
+ return ERR_PTR(ret);
mutex_init(&scp->send_lock);
for (i = 0; i < SCP_IPI_MAX; i++)
@@ -895,11 +1022,7 @@ static int scp_probe(struct platform_device *pdev)
goto remove_subdev;
}
- ret = rproc_add(rproc);
- if (ret)
- goto remove_subdev;
-
- return 0;
+ return scp;
remove_subdev:
scp_remove_rpmsg_subdev(scp);
@@ -910,15 +1033,13 @@ release_dev_mem:
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
- return ret;
+ return ERR_PTR(ret);
}
-static void scp_remove(struct platform_device *pdev)
+static void scp_free(struct mtk_scp *scp)
{
- struct mtk_scp *scp = platform_get_drvdata(pdev);
int i;
- rproc_del(scp->rproc);
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
scp_unmap_memory_region(scp);
@@ -927,6 +1048,186 @@ static void scp_remove(struct platform_device *pdev)
mutex_destroy(&scp->send_lock);
}
+static int scp_add_single_core(struct platform_device *pdev,
+ struct mtk_scp_of_cluster *scp_cluster)
+{
+ struct device *dev = &pdev->dev;
+ struct list_head *scp_list = &scp_cluster->mtk_scp_list;
+ struct mtk_scp *scp;
+ int ret;
+
+ scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ ret = rproc_add(scp->rproc);
+ if (ret) {
+ dev_err(dev, "Failed to add rproc\n");
+ scp_free(scp);
+ return ret;
+ }
+
+ list_add_tail(&scp->elem, scp_list);
+
+ return 0;
+}
+
+static int scp_add_multi_core(struct platform_device *pdev,
+ struct mtk_scp_of_cluster *scp_cluster)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct platform_device *cpdev;
+ struct device_node *child;
+ struct list_head *scp_list = &scp_cluster->mtk_scp_list;
+ const struct mtk_scp_of_data **cluster_of_data;
+ struct mtk_scp *scp, *temp;
+ int core_id = 0;
+ int ret;
+
+ cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
+
+ for_each_available_child_of_node(np, child) {
+ if (!cluster_of_data[core_id]) {
+ ret = -EINVAL;
+ dev_err(dev, "Not support core %d\n", core_id);
+ of_node_put(child);
+ goto init_fail;
+ }
+
+ cpdev = of_find_device_by_node(child);
+ if (!cpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "Not found platform device for core %d\n", core_id);
+ of_node_put(child);
+ goto init_fail;
+ }
+
+ scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
+ put_device(&cpdev->dev);
+ if (IS_ERR(scp)) {
+ ret = PTR_ERR(scp);
+ dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
+ of_node_put(child);
+ goto init_fail;
+ }
+
+ ret = rproc_add(scp->rproc);
+ if (ret) {
+ dev_err(dev, "Failed to add rproc of core %d\n", core_id);
+ of_node_put(child);
+ scp_free(scp);
+ goto init_fail;
+ }
+
+ list_add_tail(&scp->elem, scp_list);
+ core_id++;
+ }
+
+ /*
+ * Here we are setting the platform device for @pdev to the last @scp that was
+ * created, which is needed because (1) scp_rproc_init() is calling
+ * platform_set_drvdata() on the child platform devices and (2) we need a handle to
+ * the cluster list in scp_remove().
+ */
+ platform_set_drvdata(pdev, scp);
+
+ return 0;
+
+init_fail:
+ list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
+ list_del(&scp->elem);
+ rproc_del(scp->rproc);
+ scp_free(scp);
+ }
+
+ return ret;
+}
+
+static bool scp_is_single_core(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ int num_cores = 0;
+
+ for_each_child_of_node(np, child)
+ if (of_device_is_compatible(child, "mediatek,scp-core"))
+ num_cores++;
+
+ return num_cores < 2;
+}
+
+static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
+{
+ int ret;
+
+ if (scp_is_single_core(pdev))
+ ret = scp_add_single_core(pdev, scp_cluster);
+ else
+ ret = scp_add_multi_core(pdev, scp_cluster);
+
+ return ret;
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_scp_of_cluster *scp_cluster;
+ struct resource *res;
+ int ret;
+
+ scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
+ if (!scp_cluster)
+ return -ENOMEM;
+
+ scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(scp_cluster->reg_base))
+ return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
+ "Failed to parse and map cfg memory\n");
+
+ /* l1tcm is an optional memory region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
+ scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp_cluster->l1tcm_base)) {
+ ret = PTR_ERR(scp_cluster->l1tcm_base);
+ if (ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+
+ scp_cluster->l1tcm_base = NULL;
+ } else {
+ scp_cluster->l1tcm_size = resource_size(res);
+ scp_cluster->l1tcm_phys = res->start;
+ }
+
+ INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
+ mutex_init(&scp_cluster->cluster_lock);
+
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
+
+ ret = scp_cluster_init(pdev, scp_cluster);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void scp_remove(struct platform_device *pdev)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+ struct mtk_scp *temp;
+
+ list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
+ list_del(&scp->elem);
+ rproc_del(scp->rproc);
+ scp_free(scp);
+ }
+ mutex_destroy(&scp_cluster->cluster_lock);
+}
+
static const struct mtk_scp_of_data mt8183_of_data = {
.scp_clk_get = mt8183_scp_clk_get,
.scp_before_load = mt8183_scp_before_load,
@@ -980,7 +1281,7 @@ static const struct mtk_scp_of_data mt8192_of_data = {
static const struct mtk_scp_of_data mt8195_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
.scp_before_load = mt8195_scp_before_load,
- .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_irq_handler = mt8195_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
.scp_stop = mt8195_scp_stop,
@@ -989,12 +1290,31 @@ static const struct mtk_scp_of_data mt8195_of_data = {
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
+static const struct mtk_scp_of_data mt8195_of_data_c1 = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8195_scp_c1_before_load,
+ .scp_irq_handler = mt8195_scp_c1_irq_handler,
+ .scp_reset_assert = mt8195_scp_c1_reset_assert,
+ .scp_reset_deassert = mt8195_scp_c1_reset_deassert,
+ .scp_stop = mt8195_scp_c1_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
+ &mt8195_of_data,
+ &mt8195_of_data_c1,
+ NULL
+};
+
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
+ { .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
{},
};
MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
index 9c7c17b9d181..cd0b60106ec2 100644
--- a/drivers/remoteproc/mtk_scp_ipi.c
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -177,7 +177,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
mutex_lock(&scp->send_lock);
/* Wait until SCP receives the last command */
- ret = readl_poll_timeout_atomic(scp->reg_base + scp->data->host_to_scp_reg,
+ ret = readl_poll_timeout_atomic(scp->cluster->reg_base + scp->data->host_to_scp_reg,
val, !val, 0, SCP_TIMEOUT_US);
if (ret) {
dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
@@ -192,7 +192,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
scp->ipi_id_ack[id] = false;
/* send the command to SCP */
writel(scp->data->host_to_scp_int_bit,
- scp->reg_base + scp->data->host_to_scp_reg);
+ scp->cluster->reg_base + scp->data->host_to_scp_reg);
if (wait) {
/* wait for SCP's ACK */
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 22fe7b5f5236..394b2c1cb5e2 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -2322,7 +2322,6 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.proxy_clk_names = (char*[]){
"xo",
- "pnoc",
"qdss",
NULL
},
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index b5447dd2dd35..913a5d2068e8 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -813,6 +813,21 @@ static const struct adsp_data sm6350_adsp_resource = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sm6375_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .auto_boot = false,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct adsp_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -1161,6 +1176,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
@@ -1180,6 +1196,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
{ .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
{ .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm6375-adsp-pas", .data = &sm6350_adsp_resource},
+ { .compatible = "qcom,sm6375-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sm6375-mpss-pas", .data = &sm6375_mpss_resource},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index e3ce01d98b4c..cb163766c56d 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -16,10 +16,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
@@ -341,7 +340,6 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
static int st_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
struct st_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
@@ -349,19 +347,17 @@ static int st_rproc_probe(struct platform_device *pdev)
int enabled;
int ret, i;
- match = of_match_device(st_rproc_match, dev);
- if (!match || !match->data) {
- dev_err(dev, "No device match found\n");
- return -ENODEV;
- }
-
rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ddata = rproc->priv;
- ddata->config = (struct st_rproc_config *)match->data;
+ ddata->config = (struct st_rproc_config *)device_get_match_data(dev);
+ if (!ddata->config) {
+ ret = -ENODEV;
+ goto free_rproc;
+ }
platform_set_drvdata(pdev, rproc);
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 9d9b13530f78..4f469f0bcf8b 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -712,9 +712,9 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
unsigned int tzen;
int err, irq;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq == -EPROBE_DEFER)
- return dev_err_probe(dev, irq, "failed to get interrupt\n");
+ return irq;
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index feca6de68da2..4395edea9a64 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -39,12 +39,14 @@ enum zynqmp_r5_cluster_mode {
* struct mem_bank_data - Memory Bank description
*
* @addr: Start address of memory bank
+ * @da: device address
* @size: Size of Memory bank
* @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
* @bank_name: name of the bank for remoteproc framework
*/
struct mem_bank_data {
phys_addr_t addr;
+ u32 da;
size_t size;
u32 pm_domain_id;
char *bank_name;
@@ -75,11 +77,19 @@ struct mbox_info {
* Hardcoded TCM bank values. This will be removed once TCM bindings are
* accepted for system-dt specifications and upstreamed in linux kernel
*/
-static const struct mem_bank_data zynqmp_tcm_banks[] = {
- {0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
- {0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
- {0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
- {0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
+static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
+ {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
+ {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
+ {0xffe90000UL, 0x0, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
+ {0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
+};
+
+/* In lockstep mode cluster combines each 64KB TCM and makes 128KB TCM */
+static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
+ {0xffe00000UL, 0x0, 0x20000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 128KB each */
+ {0xffe20000UL, 0x20000, 0x20000UL, PD_R5_0_BTCM, "btcm0"},
+ {0, 0, 0, PD_R5_1_ATCM, ""},
+ {0, 0, 0, PD_R5_1_BTCM, ""},
};
/**
@@ -526,30 +536,6 @@ static int tcm_mem_map(struct rproc *rproc,
/* clear TCMs */
memset_io(va, 0, mem->len);
- /*
- * The R5s expect their TCM banks to be at address 0x0 and 0x2000,
- * while on the Linux side they are at 0xffexxxxx.
- *
- * Zero out the high 12 bits of the address. This will give
- * expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
- */
- mem->da &= 0x000fffff;
-
- /*
- * TCM Banks 1A and 1B still have to be translated.
- *
- * Below handle these two banks' absolute addresses (0xffe90000 and
- * 0xffeb0000) and convert to the expected relative addresses
- * (0x0 and 0x20000).
- */
- if (mem->da == 0x90000 || mem->da == 0xB0000)
- mem->da -= 0x90000;
-
- /* if translated TCM bank address is not valid report error */
- if (mem->da != 0x0 && mem->da != 0x20000) {
- dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
- return -EINVAL;
- }
return 0;
}
@@ -571,6 +557,7 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
u32 pm_domain_id;
size_t bank_size;
char *bank_name;
+ u32 da;
r5_core = rproc->priv;
dev = r5_core->dev;
@@ -583,6 +570,7 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
*/
for (i = 0; i < num_banks; i++) {
bank_addr = r5_core->tcm_banks[i]->addr;
+ da = r5_core->tcm_banks[i]->da;
bank_name = r5_core->tcm_banks[i]->bank_name;
bank_size = r5_core->tcm_banks[i]->size;
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
@@ -595,11 +583,11 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
goto release_tcm_split;
}
- dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
- bank_name, bank_addr, bank_size);
+ dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx",
+ bank_name, bank_addr, da, bank_size);
rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
- bank_size, bank_addr,
+ bank_size, da,
tcm_mem_map, tcm_mem_unmap,
bank_name);
if (!rproc_mem) {
@@ -640,6 +628,7 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
struct device *dev;
u32 pm_domain_id;
char *bank_name;
+ u32 da;
r5_core = rproc->priv;
dev = r5_core->dev;
@@ -650,14 +639,11 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
/*
* In lockstep mode, TCM is contiguous memory block
* However, each TCM block still needs to be enabled individually.
- * So, Enable each TCM block individually, but add their size
- * to create contiguous memory region.
+ * So, Enable each TCM block individually.
+ * Although ATCM and BTCM is contiguous memory block, add two separate
+ * carveouts for both.
*/
- bank_addr = r5_core->tcm_banks[0]->addr;
- bank_name = r5_core->tcm_banks[0]->bank_name;
-
for (i = 0; i < num_banks; i++) {
- bank_size += r5_core->tcm_banks[i]->size;
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
/* Turn on each TCM bank individually */
@@ -668,23 +654,32 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
goto release_tcm_lockstep;
}
- }
- dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
- bank_name, bank_addr, bank_size);
-
- /* Register TCM address range, TCM map and unmap functions */
- rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
- bank_size, bank_addr,
- tcm_mem_map, tcm_mem_unmap,
- bank_name);
- if (!rproc_mem) {
- ret = -ENOMEM;
- goto release_tcm_lockstep;
- }
+ bank_size = r5_core->tcm_banks[i]->size;
+ if (bank_size == 0)
+ continue;
- /* If registration is success, add carveouts */
- rproc_add_carveout(rproc, rproc_mem);
+ bank_addr = r5_core->tcm_banks[i]->addr;
+ da = r5_core->tcm_banks[i]->da;
+ bank_name = r5_core->tcm_banks[i]->bank_name;
+
+ /* Register TCM address range, TCM map and unmap functions */
+ rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
+ bank_size, da,
+ tcm_mem_map, tcm_mem_unmap,
+ bank_name);
+ if (!rproc_mem) {
+ ret = -ENOMEM;
+ zynqmp_pm_release_node(pm_domain_id);
+ goto release_tcm_lockstep;
+ }
+
+ /* If registration is success, add carveouts */
+ rproc_add_carveout(rproc, rproc_mem);
+
+ dev_dbg(dev, "TCM carveout lockstep mode %s addr=0x%llx, da=0x%x, size=0x%lx",
+ bank_name, bank_addr, da, bank_size);
+ }
return 0;
@@ -895,12 +890,19 @@ free_rproc:
*/
static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
{
+ const struct mem_bank_data *zynqmp_tcm_banks;
struct device *dev = cluster->dev;
struct zynqmp_r5_core *r5_core;
int tcm_bank_count, tcm_node;
int i, j;
- tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
+ if (cluster->mode == SPLIT_MODE) {
+ zynqmp_tcm_banks = zynqmp_tcm_banks_split;
+ tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_split);
+ } else {
+ zynqmp_tcm_banks = zynqmp_tcm_banks_lockstep;
+ tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_lockstep);
+ }
/* count per core tcm banks */
tcm_bank_count = tcm_bank_count / cluster->core_count;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 32b550c91d9f..8abc7d022ff7 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -545,7 +545,7 @@ static int rpmsg_dev_probe(struct device *dev)
goto out;
if (rpdrv->callback) {
- strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ strscpy(chinfo.name, rpdev->id.name, sizeof(chinfo.name));
chinfo.src = rpdev->src;
chinfo.dst = RPMSG_ADDR_ANY;
diff --git a/drivers/rpmsg/rpmsg_ns.c b/drivers/rpmsg/rpmsg_ns.c
index c70ad03ff2e9..bde8c8d433e0 100644
--- a/drivers/rpmsg/rpmsg_ns.c
+++ b/drivers/rpmsg/rpmsg_ns.c
@@ -50,7 +50,7 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
- strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, msg->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpmsg32_to_cpu(rpdev, msg->addr);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 905ac7910c98..dc87965f8164 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -329,7 +329,7 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
- strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name));
nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_CREATE);
@@ -353,7 +353,7 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
- strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name));
nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_DESTROY);
@@ -424,7 +424,7 @@ static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
*/
rpdev->announce = rpdev->src != RPMSG_ADDR_ANY;
- strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
+ strscpy(rpdev->id.name, chinfo->name, sizeof(rpdev->id.name));
rpdev->dev.parent = &vrp->vdev->dev;
rpdev->dev.release = virtio_rpmsg_release_device;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d7502433c78a..3814e0845e77 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1351,7 +1351,7 @@ config RTC_DRV_DIGICOLOR
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
depends on OF
help
Support for Freescale IMX DryIce RTC
@@ -1984,4 +1984,15 @@ config RTC_DRV_POLARFIRE_SOC
This driver can also be built as a module, if so, the module
will be called "rtc-mpfs".
+config RTC_DRV_SSD202D
+ tristate "SigmaStar SSD202D RTC"
+ depends on ARCH_MSTARV7 || COMPILE_TEST
+ default ARCH_MSTARV7
+ help
+ If you say yes here you get support for the SigmaStar SSD202D On-Chip
+ Real Time Clock.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ssd20xd".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fd209883ee2e..7b03c3abfd78 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o
obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MSC313) += rtc-msc313.o
+obj-$(CONFIG_RTC_DRV_SSD202D) += rtc-ssd202d.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
index 07ede21cee34..37df7e80525b 100644
--- a/drivers/rtc/nvmem.c
+++ b/drivers/rtc/nvmem.c
@@ -21,6 +21,7 @@ int devm_rtc_nvmem_register(struct rtc_device *rtc,
nvmem_config->dev = dev;
nvmem_config->owner = rtc->owner;
+ nvmem_config->add_legacy_fixed_of_cells = true;
nvmem = devm_nvmem_register(dev, nvmem_config);
if (IS_ERR(nvmem))
dev_err(dev, "failed to register nvmem device for RTC\n");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index add4f71d7b3b..c16fe711a0d9 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -558,7 +558,7 @@ err_clk:
/*
* Disable and remove the RTC driver
*/
-static int __exit at91_rtc_remove(struct platform_device *pdev)
+static void __exit at91_rtc_remove(struct platform_device *pdev)
{
/* Disable all interrupts */
at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
@@ -566,8 +566,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
AT91_RTC_CALEV);
clk_disable_unprepare(sclk);
-
- return 0;
}
static void at91_rtc_shutdown(struct platform_device *pdev)
@@ -635,8 +633,14 @@ static int at91_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
-static struct platform_driver at91_rtc_driver = {
- .remove = __exit_p(at91_rtc_remove),
+/*
+ * at91_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver at91_rtc_driver __refdata = {
+ .remove_new = __exit_p(at91_rtc_remove),
.shutdown = at91_rtc_shutdown,
.driver = {
.name = "at91_rtc",
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 3cdc015692ca..1a65a4e0dc00 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright © 2014-2017 Broadcom
+ * Copyright © 2014-2023 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
u32 rate;
unsigned long rtc_alarm;
bool alarm_en;
+ bool alarm_expired;
};
#define BRCMSTB_WKTMR_EVENT 0x00
@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
(void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
+ if (timer->alarm_expired) {
+ timer->alarm_expired = false;
+ /* maintain call balance */
+ enable_irq(timer->alarm_irq);
+ }
}
static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
return IRQ_HANDLED;
if (timer->alarm_en) {
- if (!device_may_wakeup(timer->dev))
+ if (device_may_wakeup(timer->dev)) {
+ disable_irq_nosync(irq);
+ timer->alarm_expired = true;
+ } else {
writel_relaxed(WKTMR_ALARM_EVENT,
timer->base + BRCMSTB_WKTMR_EVENT);
+ }
rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
+ } else {
+ writel_relaxed(WKTMR_ALARM_EVENT,
+ timer->base + BRCMSTB_WKTMR_EVENT);
}
return IRQ_HANDLED;
@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
!brcmstb_waketmr_is_pending(timer))
return -EINVAL;
timer->alarm_en = true;
- if (timer->alarm_irq)
+ if (timer->alarm_irq) {
+ if (timer->alarm_expired) {
+ timer->alarm_expired = false;
+ /* maintain call balance */
+ enable_irq(timer->alarm_irq);
+ }
enable_irq(timer->alarm_irq);
+ }
} else if (!enabled && timer->alarm_en) {
if (timer->alarm_irq)
disable_irq(timer->alarm_irq);
@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
return brcmstb_waketmr_prepare_suspend(timer);
}
+static int brcmstb_waketmr_suspend_noirq(struct device *dev)
+{
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+
+ /* Catch any alarms occurring prior to noirq */
+ if (timer->alarm_expired && device_may_wakeup(dev))
+ return -EBUSY;
+
+ return 0;
+}
+
static int brcmstb_waketmr_resume(struct device *dev)
{
struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
return ret;
}
+#else
+#define brcmstb_waketmr_suspend NULL
+#define brcmstb_waketmr_suspend_noirq NULL
+#define brcmstb_waketmr_resume NULL
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
- brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
+static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
+ .suspend = brcmstb_waketmr_suspend,
+ .suspend_noirq = brcmstb_waketmr_suspend_noirq,
+ .resume = brcmstb_waketmr_resume,
+};
static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
{ .compatible = "brcm,brcmstb-waketimer" },
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index dc6b0f4a54e2..fa8bf82df948 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -227,7 +227,7 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
enabled == 1 ? "yes" : "no",
pending == 1 ? "yes" : "no");
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ if (alm.timezone == EFI_UNSPECIFIED_TIMEZONE)
seq_puts(seq, "Timezone\t: unspecified\n");
else
/* XXX fixme: convert to string? */
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index acae7f16808f..1fdd20d01560 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -148,9 +149,16 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(ep93xx_rtc->rtc);
}
+static const struct of_device_id ep93xx_rtc_of_ids[] = {
+ { .compatible = "cirrus,ep9301-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_rtc_of_ids);
+
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
+ .of_match_table = ep93xx_rtc_of_ids,
},
.probe = ep93xx_rtc_probe,
};
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 4b712e5ab08a..284011c419db 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -830,7 +830,7 @@ err:
return rc;
}
-static int __exit dryice_rtc_remove(struct platform_device *pdev)
+static void __exit dryice_rtc_remove(struct platform_device *pdev)
{
struct imxdi_dev *imxdi = platform_get_drvdata(pdev);
@@ -840,8 +840,6 @@ static int __exit dryice_rtc_remove(struct platform_device *pdev)
writel(0, imxdi->ioaddr + DIER);
clk_disable_unprepare(imxdi->clk);
-
- return 0;
}
static const struct of_device_id dryice_dt_ids[] = {
@@ -851,12 +849,18 @@ static const struct of_device_id dryice_dt_ids[] = {
MODULE_DEVICE_TABLE(of, dryice_dt_ids);
-static struct platform_driver dryice_rtc_driver = {
+/*
+ * dryice_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver dryice_rtc_driver __refdata = {
.driver = {
.name = "imxdi_rtc",
.of_match_table = dryice_dt_ids,
},
- .remove = __exit_p(dryice_rtc_remove),
+ .remove_new = __exit_p(dryice_rtc_remove),
};
module_platform_driver_probe(dryice_rtc_driver, dryice_rtc_probe);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 6c526e2ec56d..db31da56bfa7 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -282,7 +282,7 @@ out:
return ret;
}
-static int __exit mv_rtc_remove(struct platform_device *pdev)
+static void __exit mv_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -291,8 +291,6 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -303,8 +301,14 @@ static const struct of_device_id rtc_mv_of_match_table[] = {
MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
#endif
-static struct platform_driver mv_rtc_driver = {
- .remove = __exit_p(mv_rtc_remove),
+/*
+ * mv_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver mv_rtc_driver __refdata = {
+ .remove_new = __exit_p(mv_rtc_remove),
.driver = {
.name = "rtc-mv",
.of_match_table = of_match_ptr(rtc_mv_of_match_table),
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 5b10ab06cd2e..c6155c48a4ac 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -18,12 +18,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/rtc.h>
#include <linux/rtc/rtc-omap.h>
@@ -729,16 +729,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
struct omap_rtc *rtc;
u8 reg, mask, new_ctrl;
const struct platform_device_id *id_entry;
- const struct of_device_id *of_id;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- of_id = of_match_device(omap_rtc_of_match, &pdev->dev);
- if (of_id) {
- rtc->type = of_id->data;
+ rtc->type = device_get_match_data(&pdev->dev);
+ if (rtc->type) {
rtc->is_pmic_controller = rtc->type->has_pmic_mode &&
of_device_is_system_power_controller(pdev->dev.of_node);
} else {
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 8c7a98a5452c..d6651611a0c6 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -166,13 +166,7 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(pcap_rtc->rtc);
}
-static int __exit pcap_rtc_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver pcap_rtc_driver = {
- .remove = __exit_p(pcap_rtc_remove),
.driver = {
.name = "pcap-rtc",
},
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 06194674d71c..540042b9eec8 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client)
if (client->irq > 0 || wakeup_source) {
regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
}
if (client->irq > 0) {
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index e400c78252e8..cdb39fc4cab5 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -365,12 +365,11 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __exit pxa_rtc_remove(struct platform_device *pdev)
+static void __exit pxa_rtc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
pxa_rtc_release(dev);
- return 0;
}
#ifdef CONFIG_OF
@@ -403,8 +402,14 @@ static int pxa_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pxa_rtc_pm_ops, pxa_rtc_suspend, pxa_rtc_resume);
-static struct platform_driver pxa_rtc_driver = {
- .remove = __exit_p(pxa_rtc_remove),
+/*
+ * pxa_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver pxa_rtc_driver __refdata = {
+ .remove_new = __exit_p(pxa_rtc_remove),
.driver = {
.name = "pxa-rtc",
.of_match_table = of_match_ptr(pxa_rtc_dt_ids),
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 5dbaeb7af648..ef913cf8593f 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/delay.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -55,12 +56,23 @@ struct rtc7301_priv {
u8 bank;
};
-static const struct regmap_config rtc7301_regmap_config = {
+/*
+ * When the device is memory-mapped, some platforms pack the registers into
+ * 32-bit access using the lower 8 bits at each 4-byte stride, while others
+ * expose them as simply consecutive bytes.
+ */
+static const struct regmap_config rtc7301_regmap_32_config = {
.reg_bits = 32,
.val_bits = 8,
.reg_stride = 4,
};
+static const struct regmap_config rtc7301_regmap_8_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_stride = 1,
+};
+
static u8 rtc7301_read(struct rtc7301_priv *priv, unsigned int reg)
{
int reg_stride = regmap_get_reg_stride(priv->regmap);
@@ -356,7 +368,9 @@ static int __init rtc7301_rtc_probe(struct platform_device *dev)
void __iomem *regs;
struct rtc7301_priv *priv;
struct rtc_device *rtc;
+ static const struct regmap_config *mapconf;
int ret;
+ u32 val;
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -366,8 +380,25 @@ static int __init rtc7301_rtc_probe(struct platform_device *dev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ ret = device_property_read_u32(&dev->dev, "reg-io-width", &val);
+ if (ret)
+ /* Default to 32bit accesses */
+ val = 4;
+
+ switch (val) {
+ case 1:
+ mapconf = &rtc7301_regmap_8_config;
+ break;
+ case 4:
+ mapconf = &rtc7301_regmap_32_config;
+ break;
+ default:
+ dev_err(&dev->dev, "invalid reg-io-width %d\n", val);
+ return -EINVAL;
+ }
+
priv->regmap = devm_regmap_init_mmio(&dev->dev, regs,
- &rtc7301_regmap_config);
+ mapconf);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index cd146b574143..27a191fa3704 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -469,7 +469,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
{
struct sh_rtc *rtc;
struct resource *res;
- char clk_name[6];
+ char clk_name[14];
int clk_id, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -620,7 +620,7 @@ err_unmap:
return ret;
}
-static int __exit sh_rtc_remove(struct platform_device *pdev)
+static void __exit sh_rtc_remove(struct platform_device *pdev)
{
struct sh_rtc *rtc = platform_get_drvdata(pdev);
@@ -628,8 +628,6 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
sh_rtc_setcie(&pdev->dev, 0);
clk_disable(rtc->clk);
-
- return 0;
}
static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
@@ -668,13 +666,19 @@ static const struct of_device_id sh_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sh_rtc_of_match);
-static struct platform_driver sh_rtc_platform_driver = {
+/*
+ * sh_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver sh_rtc_platform_driver __refdata = {
.driver = {
.name = DRV_NAME,
.pm = &sh_rtc_pm_ops,
.of_match_table = sh_rtc_of_match,
},
- .remove = __exit_p(sh_rtc_remove),
+ .remove_new = __exit_p(sh_rtc_remove),
};
module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
diff --git a/drivers/rtc/rtc-ssd202d.c b/drivers/rtc/rtc-ssd202d.c
new file mode 100644
index 000000000000..ed6493260096
--- /dev/null
+++ b/drivers/rtc/rtc-ssd202d.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Real time clocks driver for MStar/SigmaStar SSD202D SoCs.
+ *
+ * (C) 2021 Daniel Palmer
+ * (C) 2023 Romain Perier
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/regmap.h>
+#include <linux/pm.h>
+
+#define REG_CTRL 0x0
+#define REG_CTRL1 0x4
+#define REG_ISO_CTRL 0xc
+#define REG_WRDATA_L 0x10
+#define REG_WRDATA_H 0x14
+#define REG_ISOACK 0x20
+#define REG_RDDATA_L 0x24
+#define REG_RDDATA_H 0x28
+#define REG_RDCNT_L 0x30
+#define REG_RDCNT_H 0x34
+#define REG_CNT_TRIG 0x38
+#define REG_PWRCTRL 0x3c
+#define REG_RTC_TEST 0x54
+
+#define CNT_RD_TRIG_BIT BIT(0)
+#define CNT_RD_BIT BIT(0)
+#define BASE_WR_BIT BIT(1)
+#define BASE_RD_BIT BIT(2)
+#define CNT_RST_BIT BIT(3)
+#define ISO_CTRL_ACK_MASK BIT(3)
+#define ISO_CTRL_ACK_SHIFT 3
+#define SW0_WR_BIT BIT(5)
+#define SW1_WR_BIT BIT(6)
+#define SW0_RD_BIT BIT(7)
+#define SW1_RD_BIT BIT(8)
+
+#define ISO_CTRL_MASK GENMASK(2, 0)
+
+struct ssd202d_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *base;
+};
+
+static u8 read_iso_en(void __iomem *base)
+{
+ return readb(base + REG_RTC_TEST) & 0x1;
+}
+
+static u8 read_iso_ctrl_ack(void __iomem *base)
+{
+ return (readb(base + REG_ISOACK) & ISO_CTRL_ACK_MASK) >> ISO_CTRL_ACK_SHIFT;
+}
+
+static int ssd202d_rtc_isoctrl(struct ssd202d_rtc *priv)
+{
+ static const unsigned int sequence[] = { 0x0, 0x1, 0x3, 0x7, 0x5, 0x1, 0x0 };
+ unsigned int val;
+ struct device *dev = &priv->rtc_dev->dev;
+ int i, ret;
+
+ /*
+ * This gates iso_en by writing a special sequence of bytes to iso_ctrl
+ * and ensuring that it has been correctly applied by reading iso_ctrl_ack
+ */
+ for (i = 0; i < ARRAY_SIZE(sequence); i++) {
+ writeb(sequence[i] & ISO_CTRL_MASK, priv->base + REG_ISO_CTRL);
+
+ ret = read_poll_timeout(read_iso_ctrl_ack, val, val == (i % 2), 100,
+ 20 * 100, true, priv->base);
+ if (ret) {
+ dev_dbg(dev, "Timeout waiting for ack byte %i (%x) of sequence\n", i,
+ sequence[i]);
+ return ret;
+ }
+ }
+
+ /*
+ * At this point iso_en should be raised for 1ms
+ */
+ ret = read_poll_timeout(read_iso_en, val, val, 100, 22 * 100, true, priv->base);
+ if (ret)
+ dev_dbg(dev, "Timeout waiting for iso_en\n");
+ mdelay(2);
+ return 0;
+}
+
+static void ssd202d_rtc_read_reg(struct ssd202d_rtc *priv, unsigned int reg,
+ unsigned int field, unsigned int *base)
+{
+ unsigned int l, h;
+ u16 val;
+
+ /* Ask for the content of an RTC value into RDDATA by gating iso_en,
+ * then iso_en is gated and the content of RDDATA can be read
+ */
+ val = readw(priv->base + reg);
+ writew(val | field, priv->base + reg);
+ ssd202d_rtc_isoctrl(priv);
+ writew(val & ~field, priv->base + reg);
+
+ l = readw(priv->base + REG_RDDATA_L);
+ h = readw(priv->base + REG_RDDATA_H);
+
+ *base = (h << 16) | l;
+}
+
+static void ssd202d_rtc_write_reg(struct ssd202d_rtc *priv, unsigned int reg,
+ unsigned int field, u32 base)
+{
+ u16 val;
+
+ /* Set the content of an RTC value from WRDATA by gating iso_en */
+ val = readw(priv->base + reg);
+ writew(val | field, priv->base + reg);
+ writew(base, priv->base + REG_WRDATA_L);
+ writew(base >> 16, priv->base + REG_WRDATA_H);
+ ssd202d_rtc_isoctrl(priv);
+ writew(val & ~field, priv->base + reg);
+}
+
+static int ssd202d_rtc_read_counter(struct ssd202d_rtc *priv, unsigned int *counter)
+{
+ unsigned int l, h;
+ u16 val;
+
+ val = readw(priv->base + REG_CTRL1);
+ writew(val | CNT_RD_BIT, priv->base + REG_CTRL1);
+ ssd202d_rtc_isoctrl(priv);
+ writew(val & ~CNT_RD_BIT, priv->base + REG_CTRL1);
+
+ val = readw(priv->base + REG_CTRL1);
+ writew(val | CNT_RD_TRIG_BIT, priv->base + REG_CNT_TRIG);
+ writew(val & ~CNT_RD_TRIG_BIT, priv->base + REG_CNT_TRIG);
+
+ l = readw(priv->base + REG_RDCNT_L);
+ h = readw(priv->base + REG_RDCNT_H);
+
+ *counter = (h << 16) | l;
+
+ return 0;
+}
+
+static int ssd202d_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ssd202d_rtc *priv = dev_get_drvdata(dev);
+ unsigned int sw0, base, counter;
+ u32 seconds;
+ int ret;
+
+ /* Check that RTC is enabled by SW */
+ ssd202d_rtc_read_reg(priv, REG_CTRL, SW0_RD_BIT, &sw0);
+ if (sw0 != 1)
+ return -EINVAL;
+
+ /* Get RTC base value from RDDATA */
+ ssd202d_rtc_read_reg(priv, REG_CTRL, BASE_RD_BIT, &base);
+ /* Get RTC counter value from RDDATA */
+ ret = ssd202d_rtc_read_counter(priv, &counter);
+ if (ret)
+ return ret;
+
+ seconds = base + counter;
+
+ rtc_time64_to_tm(seconds, tm);
+
+ return 0;
+}
+
+static int ssd202d_rtc_reset_counter(struct ssd202d_rtc *priv)
+{
+ u16 val;
+
+ val = readw(priv->base + REG_CTRL);
+ writew(val | CNT_RST_BIT, priv->base + REG_CTRL);
+ ssd202d_rtc_isoctrl(priv);
+ writew(val & ~CNT_RST_BIT, priv->base + REG_CTRL);
+ ssd202d_rtc_isoctrl(priv);
+
+ return 0;
+}
+
+static int ssd202d_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ssd202d_rtc *priv = dev_get_drvdata(dev);
+ unsigned long seconds = rtc_tm_to_time64(tm);
+
+ ssd202d_rtc_write_reg(priv, REG_CTRL, BASE_WR_BIT, seconds);
+ ssd202d_rtc_reset_counter(priv);
+ ssd202d_rtc_write_reg(priv, REG_CTRL, SW0_WR_BIT, 1);
+
+ return 0;
+}
+
+static const struct rtc_class_ops ssd202d_rtc_ops = {
+ .read_time = ssd202d_rtc_read_time,
+ .set_time = ssd202d_rtc_set_time,
+};
+
+static int ssd202d_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ssd202d_rtc *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct ssd202d_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->rtc_dev->ops = &ssd202d_rtc_ops;
+ priv->rtc_dev->range_max = U32_MAX;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_rtc_register_device(priv->rtc_dev);
+}
+
+static const struct of_device_id ssd202d_rtc_of_match_table[] = {
+ { .compatible = "mstar,ssd202d-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ssd202d_rtc_of_match_table);
+
+static struct platform_driver ssd202d_rtc_driver = {
+ .probe = ssd202d_rtc_probe,
+ .driver = {
+ .name = "ssd202d-rtc",
+ .of_match_table = ssd202d_rtc_of_match_table,
+ },
+};
+module_platform_driver(ssd202d_rtc_driver);
+
+MODULE_AUTHOR("Daniel Palmer <daniel@thingy.jp>");
+MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
+MODULE_DESCRIPTION("MStar SSD202D RTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 65c7f2d565d8..9418a9270d03 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -16,7 +16,7 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <asm/asm-extable.h>
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/diag.h>
#include "hmcdrv_ftp.h"
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8f74db689a0c..d53ee34d398f 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -81,7 +81,7 @@ static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
struct sclp_trace_entry e;
memset(&e, 0, sizeof(e));
- strncpy(e.id, id, sizeof(e.id));
+ strtomem(e.id, id);
e.a = a;
e.b = b;
debug_event(&sclp_debug, prio, &e, sizeof(e));
@@ -706,8 +706,8 @@ void
sclp_sync_wait(void)
{
unsigned long long old_tick;
+ struct ctlreg cr0, cr0_sync;
unsigned long flags;
- unsigned long cr0, cr0_sync;
static u64 sync_count;
u64 timeout;
int irq_context;
@@ -732,10 +732,10 @@ sclp_sync_wait(void)
/* Enable service-signal interruption, disable timer interrupts */
old_tick = local_tick_disable();
trace_hardirqs_on();
- __ctl_store(cr0, 0, 0);
- cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
- cr0_sync |= 1UL << (63 - 54);
- __ctl_load(cr0_sync, 0, 0);
+ local_ctl_store(0, &cr0);
+ cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
+ cr0_sync.val |= 1UL << (63 - 54);
+ local_ctl_load(0, &cr0_sync);
__arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
@@ -745,7 +745,7 @@ sclp_sync_wait(void)
cpu_relax();
}
local_irq_disable();
- __ctl_load(cr0, 0, 0);
+ local_ctl_load(0, &cr0);
if (!irq_context)
_local_bh_enable();
local_tick_enable(old_tick);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 8b4575a0db9f..11c428f4c7cf 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -19,7 +19,7 @@
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -353,7 +353,6 @@ static int sclp_mem_notifier(struct notifier_block *nb,
sclp_mem_change_state(start, size, 0);
break;
default:
- rc = -EINVAL;
break;
}
mutex_unlock(&sclp_mem_mutex);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index fdc8668f3fba..60a247fdb2a7 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -10,7 +10,7 @@
#include <linux/errno.h>
#include <linux/memblock.h>
-#include <asm/ctl_reg.h>
+#include <asm/ctlreg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
#include <asm/setup.h>
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index dbd5c53d8edf..9f6165cafdc3 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/lowcore.h>
+#include <asm/ctlreg.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include <asm/sections.h>
@@ -31,11 +32,11 @@ void sclp_early_wait_irq(void)
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
- __ctl_store(cr0.val, 0, 0);
+ local_ctl_store(0, &cr0.reg);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
- __ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.reg);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
@@ -58,7 +59,7 @@ void sclp_early_wait_irq(void)
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
- __ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.reg);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 5440f285f349..675d7ed82356 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -334,7 +334,7 @@ static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
return 0;
if (chp->cmg == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->cmg);
+ return sprintf(buf, "%d\n", chp->cmg);
}
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 0abd77f4b664..f8b04ce61556 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -393,8 +393,8 @@ static void format_node_data(char *params, char *id, struct node_descriptor *nd)
memset(id, 0, NODEID_LEN);
if (nd->validity != ND_VALIDITY_VALID) {
- strncpy(params, "n/a", PARAMS_LEN - 1);
- strncpy(id, "n/a", NODEID_LEN - 1);
+ strscpy(params, "n/a", PARAMS_LEN);
+ strscpy(id, "n/a", NODEID_LEN);
return;
}
@@ -881,8 +881,8 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
- secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
- secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
+ secm_area->cub_addr1 = virt_to_phys(css->cub_addr1);
+ secm_area->cub_addr2 = virt_to_phys(css->cub_addr2);
secm_area->operation_code = enable ? 0 : 1;
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index 7b02a6349c4d..4916dd0a7eb1 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -12,8 +12,8 @@
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <asm/ctlreg.h>
#include <asm/crw.h>
-#include <asm/ctl_reg.h>
#include "ioasm.h"
static DEFINE_MUTEX(crw_handler_mutex);
@@ -156,7 +156,7 @@ static int __init crw_machine_check_init(void)
task = kthread_run(crw_collect_info, NULL, "kmcheck");
if (IS_ERR(task))
return PTR_ERR(task);
- ctl_set_bit(14, 28); /* enable channel report MCH */
+ system_ctl_set_bit(14, CR14_CHANNEL_REPORT_SUBMASK_BIT);
return 0;
}
device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
index 77fde9f5ea8b..dbc2ac7711e5 100644
--- a/drivers/s390/cio/isc.c
+++ b/drivers/s390/cio/isc.c
@@ -33,7 +33,7 @@ void isc_register(unsigned int isc)
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
- ctl_set_bit(6, 31 - isc);
+ system_ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
@@ -61,7 +61,7 @@ void isc_unregister(unsigned int isc)
goto out_unlock;
}
if (isc_refs[isc] == 1)
- ctl_clear_bit(6, 31 - isc);
+ system_ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 339812efe822..5dd33155d5d5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -352,7 +352,7 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/*
* ap_queue_info(): Check and get AP queue info.
* Returns: 1 if APQN exists and info is filled,
- * 0 if APQN seems to exit but there is no info
+ * 0 if APQN seems to exist but there is no info
* available (eg. caused by an asynch pending error)
* -1 invalid APQN, TAPQ error or AP queue status which
* indicates there is no APQN.
@@ -373,36 +373,33 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
/* call TAPQ on this APQN */
status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
- /* handle pending async error with return 'no info available' */
- if (status.async)
- return 0;
-
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_BUSY:
- /*
- * According to the architecture in all these cases the
- * info should be filled. All bits 0 is not possible as
- * there is at least one of the mode bits set.
- */
- if (WARN_ON_ONCE(!tapq_info.value))
- return 0;
- *q_type = tapq_info.at;
- *q_fac = tapq_info.fac;
- *q_depth = tapq_info.qd;
- *q_ml = tapq_info.ml;
- *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
- *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
- return 1;
+ /* For all these RCs the tapq info should be available */
+ break;
default:
- /*
- * A response code which indicates, there is no info available.
- */
- return -1;
+ /* On a pending async error the info should be available */
+ if (!status.async)
+ return -1;
+ break;
}
+
+ /* There should be at least one of the mode bits set */
+ if (WARN_ON_ONCE(!tapq_info.value))
+ return 0;
+
+ *q_type = tapq_info.at;
+ *q_fac = tapq_info.fac;
+ *q_depth = tapq_info.qd;
+ *q_ml = tapq_info.ml;
+ *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+ *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
+
+ return 1;
}
void ap_wait(enum ap_sm_wait wait)
@@ -1022,6 +1019,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
void ap_bus_force_rescan(void)
{
+ /* Only trigger AP bus scans after the initial scan is done */
+ if (atomic64_read(&ap_scan_bus_count) <= 0)
+ return;
+
/* processing a asynchronous bus rescan */
del_timer(&ap_config_timer);
queue_work(system_long_wq, &ap_scan_work);
@@ -1865,15 +1866,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* get it and thus adjust reference counter */
get_device(dev);
- if (decfg)
+ if (decfg) {
AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
__func__, ac->id, dom);
- else if (chkstop)
+ } else if (chkstop) {
AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
__func__, ac->id, dom);
- else
+ } else {
+ /* nudge the queue's state machine */
+ ap_queue_init_state(aq);
AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
__func__, ac->id, dom);
+ }
goto put_dev_and_continue;
}
/* handle state changes on already existing queue device */
@@ -1895,10 +1899,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
} else if (!chkstop && aq->chkstop) {
/* checkstop off */
aq->chkstop = false;
- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
- aq->dev_state = AP_DEV_STATE_OPERATING;
- aq->sm_state = AP_SM_STATE_RESET_START;
- }
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
__func__, ac->id, dom);
@@ -1922,10 +1924,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
} else if (!decfg && !aq->config) {
/* config on this queue device */
aq->config = true;
- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
- aq->dev_state = AP_DEV_STATE_OPERATING;
- aq->sm_state = AP_SM_STATE_RESET_START;
- }
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
__func__, ac->id, dom);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index be54b070c031..b0771ca0849b 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -206,7 +206,7 @@ struct ap_queue {
bool config; /* configured state */
bool chkstop; /* checkstop state */
ap_qid_t qid; /* AP queue id. */
- bool interrupt; /* indicate if interrupts are enabled */
+ bool se_bound; /* SE bound state */
unsigned int assoc_idx; /* SE association index */
int queue_count; /* # messages currently on AP queue. */
int pendingq_count; /* # requests on pendingq list. */
@@ -271,6 +271,7 @@ enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
+bool ap_queue_usable(struct ap_queue *aq);
void *ap_airq_ptr(void);
int ap_sb_available(void);
@@ -287,6 +288,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_init_state(struct ap_queue *aq);
+void _ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
int comp_type, unsigned int functions, int ml);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 1336e632adc4..3934a0cc13e7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -33,6 +33,11 @@ static inline bool ap_q_supports_assoc(struct ap_queue *aq)
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
}
+static inline bool ap_q_needs_bind(struct ap_queue *aq)
+{
+ return ap_q_supports_bind(aq) && ap_sb_available();
+}
+
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
* @aq: The AP queue
@@ -195,13 +200,13 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
return AP_SM_WAIT_AGAIN;
}
aq->sm_state = AP_SM_STATE_IDLE;
- return AP_SM_WAIT_NONE;
+ break;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
- return aq->interrupt ?
+ return status.irq_enabled ?
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
aq->sm_state = AP_SM_STATE_IDLE;
- return AP_SM_WAIT_NONE;
+ break;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
@@ -210,6 +215,16 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
+ /* Check and maybe enable irq support (again) on this queue */
+ if (!status.irq_enabled && status.queue_empty) {
+ void *lsi_ptr = ap_airq_ptr();
+
+ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
+ aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
+ return AP_SM_WAIT_AGAIN;
+ }
+ }
+ return AP_SM_WAIT_NONE;
}
/**
@@ -249,7 +264,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
fallthrough;
case AP_RESPONSE_Q_FULL:
aq->sm_state = AP_SM_STATE_QUEUE_FULL;
- return aq->interrupt ?
+ return status.irq_enabled ?
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
@@ -302,8 +317,8 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
- aq->interrupt = false;
aq->rapq_fbit = 0;
+ aq->se_bound = false;
return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
@@ -377,7 +392,6 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
if (status.irq_enabled == 1) {
/* Irqs are now enabled */
- aq->interrupt = true;
aq->sm_state = (aq->queue_count > 0) ?
AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
}
@@ -620,16 +634,21 @@ static ssize_t interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
int rc = 0;
spin_lock_bh(&aq->lock);
- if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
+ if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
- else if (aq->interrupt)
- rc = sysfs_emit(buf, "Interrupts enabled.\n");
- else
- rc = sysfs_emit(buf, "Interrupts disabled.\n");
+ } else {
+ status = ap_tapq(aq->qid, NULL);
+ if (status.irq_enabled)
+ rc = sysfs_emit(buf, "Interrupts enabled.\n");
+ else
+ rc = sysfs_emit(buf, "Interrupts disabled.\n");
+ }
spin_unlock_bh(&aq->lock);
+
return rc;
}
@@ -868,7 +887,12 @@ static ssize_t se_bind_store(struct device *dev,
}
status = ap_bapq(aq->qid);
spin_unlock_bh(&aq->lock);
- if (status.response_code) {
+ if (!status.response_code) {
+ aq->se_bound = true;
+ AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ } else {
AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid),
@@ -1021,7 +1045,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
if (ap_sb_available() && is_prot_virt_guest())
aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
- aq->interrupt = false;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
@@ -1074,6 +1097,42 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
EXPORT_SYMBOL(ap_queue_message);
/**
+ * ap_queue_usable(): Check if queue is usable just now.
+ * @aq: The AP queue device to test for usability.
+ * This function is intended for the scheduler to query if it makes
+ * sense to enqueue a message into this AP queue device by calling
+ * ap_queue_message(). The perspective is very short-term as the
+ * state machine and device state(s) may change at any time.
+ */
+bool ap_queue_usable(struct ap_queue *aq)
+{
+ bool rc = true;
+
+ spin_lock_bh(&aq->lock);
+
+ /* check for not configured or checkstopped */
+ if (!aq->config || aq->chkstop) {
+ rc = false;
+ goto unlock_and_out;
+ }
+
+ /* device state needs to be ok */
+ if (aq->dev_state != AP_DEV_STATE_OPERATING) {
+ rc = false;
+ goto unlock_and_out;
+ }
+
+ /* SE guest's queues additionally need to be bound */
+ if (ap_q_needs_bind(aq) && !aq->se_bound)
+ rc = false;
+
+unlock_and_out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+EXPORT_SYMBOL(ap_queue_usable);
+
+/**
* ap_cancel_message(): Cancel a crypto request.
* @aq: The AP device that has the message queued
* @ap_msg: The message that is to be removed
@@ -1160,14 +1219,19 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
-void ap_queue_init_state(struct ap_queue *aq)
+void _ap_queue_init_state(struct ap_queue *aq)
{
- spin_lock_bh(&aq->lock);
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
aq->last_err_rc = 0;
aq->assoc_idx = ASSOC_IDX_INVALID;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+}
+
+void ap_queue_init_state(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ce04caa7913f..dcd6c7299fa9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -693,7 +693,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo ||
- !zq->queue->config || zq->queue->chkstop)
+ !ap_queue_usable(zq->queue))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
@@ -798,7 +798,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo_crt ||
- !zq->queue->config || zq->queue->chkstop)
+ !ap_queue_usable(zq->queue))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
@@ -916,7 +916,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
for_each_zcrypt_queue(zq, zc) {
/* check for device usable and eligible */
if (!zq->online || !zq->ops->send_cprb ||
- !zq->queue->config || zq->queue->chkstop ||
+ !ap_queue_usable(zq->queue) ||
(tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
@@ -1087,7 +1087,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->send_ep11_cprb ||
- !zq->queue->config || zq->queue->chkstop ||
+ !ap_queue_usable(zq->queue) ||
(targets &&
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
@@ -1186,7 +1186,7 @@ static long zcrypt_rng(char *buffer)
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rng ||
- !zq->queue->config || zq->queue->chkstop)
+ !ap_queue_usable(zq->queue))
continue;
if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
continue;
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index c815722d0ac8..050462d95222 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -52,7 +52,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ap_card *ac = to_ap_card(dev);
- int online = ac->config && zc->online ? 1 : 0;
+ int online = ac->config && !ac->chkstop && zc->online ? 1 : 0;
return sysfs_emit(buf, "%d\n", online);
}
@@ -70,7 +70,7 @@ static ssize_t online_store(struct device *dev,
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
- if (online && !ac->config)
+ if (online && (!ac->config || ac->chkstop))
return -ENODEV;
zc->online = online;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 9cfce9ff2e65..5c4532ab0040 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -279,7 +279,11 @@ static const struct {
{ 1, "BSI2009" },
{ 2, "FIPS2011" },
{ 3, "BSI2011" },
+ { 4, "SIGG-IMPORT" },
+ { 5, "SIGG" },
{ 6, "BSICC2017" },
+ { 7, "FIPS2021" },
+ { 8, "FIPS2024" },
{ 0, NULL }
};
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index d36177e65a3d..a44fcfcec938 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -98,8 +98,22 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
case REP88_ERROR_KEY_TYPE: /* 0x34 */
/* RY indicates malformed request */
- ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
- __func__, card, queue, ehdr->reply_code);
+ if (ehdr->reply_code == REP82_ERROR_FILTERED_BY_HYPERVISOR &&
+ ehdr->type == TYPE86_RSP_CODE) {
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ } __packed * head = reply->msg;
+ unsigned int apfs = *((u32 *)head->fmt2.apfs);
+
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x apfs=0x%x => rc=EINVAL\n",
+ __func__, card, queue,
+ ehdr->reply_code, apfs);
+ } else {
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
+ __func__, card, queue,
+ ehdr->reply_code);
+ }
return -EINVAL;
case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 112a80e8e6c2..67d8e0ae0eec 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -42,7 +42,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
struct ap_queue *aq = to_ap_queue(dev);
- int online = aq->config && zq->online ? 1 : 0;
+ int online = aq->config && !aq->chkstop && zq->online ? 1 : 0;
return sysfs_emit(buf, "%d\n", online);
}
@@ -59,7 +59,8 @@ static ssize_t online_store(struct device *dev,
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
- if (online && (!aq->config || !aq->card->config))
+ if (online && (!aq->config || !aq->card->config ||
+ aq->chkstop || aq->card->chkstop))
return -ENODEV;
if (online && !zc->online)
return -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6af2511e070c..cf8506d0f185 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3675,7 +3675,7 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
/*
- * check if weed have to switch to non-packing mode or if
+ * check if we have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 9729ce86db59..a877347d37d3 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -141,7 +141,7 @@ static s32 cpm_muram_alloc_common(unsigned long size,
*
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure.
- * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
s32 cpm_muram_alloc(unsigned long size, unsigned long align)
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(cpm_muram_free);
* @size: number of bytes to allocate
* This function returns @offset if the area was available, a negative
* errno otherwise.
- * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 0e7bc3c40f9d..41b0d9adf68e 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1001,7 +1001,7 @@ static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
return ret;
}
-static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
+static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
{
int retry = bus->clk_stop_timeout;
int val;
@@ -1015,7 +1015,8 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
}
val &= SDW_SCP_STAT_CLK_STP_NF;
if (!val) {
- dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n",
+ dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
+ prepare ? "prepare" : "deprepare",
dev_num);
return 0;
}
@@ -1024,7 +1025,8 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
retry--;
} while (retry);
- dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n",
+ dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
+ prepare ? "prepare" : "deprepare",
dev_num);
return -ETIMEDOUT;
@@ -1095,7 +1097,7 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
*/
if (!simple_clk_stop) {
ret = sdw_bus_wait_for_clk_prep_deprep(bus,
- SDW_BROADCAST_DEV_NUM);
+ SDW_BROADCAST_DEV_NUM, true);
/*
* if there are no Slave devices present and the reply is
* Command_Ignored/-ENODATA, we don't need to continue with the
@@ -1215,7 +1217,7 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
* state machine
*/
if (!simple_clk_stop) {
- ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+ ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
if (ret < 0)
dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
}
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 2a1096dab63d..9ebdd0cd0b1c 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
},
.driver_data = (void *)hp_omen_16,
},
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 55be9f4b8d59..a1e2d6c98186 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -60,6 +60,7 @@
#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
+#define SWRM_INTERRUPT_STATUS_CMD_IGNORED_AND_EXEC_CONTINUED BIT(19)
#define SWRM_INTERRUPT_MAX 17
#define SWRM_V1_3_INTERRUPT_MASK_ADDR 0x204
#define SWRM_V1_3_INTERRUPT_CLEAR 0x208
@@ -776,6 +777,17 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
break;
case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
break;
+ case SWRM_INTERRUPT_STATUS_CMD_IGNORED_AND_EXEC_CONTINUED:
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err(ctrl->dev,
+ "%s: SWR CMD ignored, fifo status %x\n",
+ __func__, value);
+
+ /* Wait 3.5ms to clear */
+ usleep_range(3500, 3505);
+ break;
default:
dev_err_ratelimited(ctrl->dev,
"%s: SWR unknown interrupt value: %d\n",
@@ -801,8 +813,8 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *ctrl)
int comp_sts;
do {
- ctrl->reg_read(ctrl, SWRM_COMP_STATUS, &comp_sts);
-
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_FRAME_GEN_ENABLED],
+ &comp_sts);
if (comp_sts & SWRM_FRM_GEN_ENABLED)
return true;
@@ -1550,7 +1562,7 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->hclk = devm_clk_get(dev, "iface");
if (IS_ERR(ctrl->hclk)) {
- ret = PTR_ERR(ctrl->hclk);
+ ret = dev_err_probe(dev, PTR_ERR(ctrl->hclk), "unable to get iface clock\n");
goto err_init;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d4ac184bce95..35dbfacecf1c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -616,6 +616,17 @@ config SPI_FSL_ESPI
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
+config SPI_LJCA
+ tristate "Intel La Jolla Cove Adapter SPI support"
+ depends on USB_LJCA
+ default USB_LJCA
+ help
+ Select this option to enable SPI driver for the Intel
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-ljca.
+
config SPI_MESON_SPICC
tristate "Amlogic Meson SPICC controller"
depends on COMMON_CLK
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6af54842b9fa..4ff8d725ba5e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o
obj-$(CONFIG_SPI_LOONGSON_PCI) += spi-loongson-pci.o
diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c
new file mode 100644
index 000000000000..c5a066c73817
--- /dev/null
+++ b/drivers/spi/spi-ljca.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-SPI driver
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/usb/ljca.h>
+
+#define LJCA_SPI_BUS_MAX_HZ 48000000
+
+#define LJCA_SPI_BUF_SIZE 60u
+#define LJCA_SPI_MAX_XFER_SIZE \
+ (LJCA_SPI_BUF_SIZE - sizeof(struct ljca_spi_xfer_packet))
+
+#define LJCA_SPI_CLK_MODE_POLARITY BIT(0)
+#define LJCA_SPI_CLK_MODE_PHASE BIT(1)
+
+#define LJCA_SPI_XFER_INDICATOR_ID GENMASK(5, 0)
+#define LJCA_SPI_XFER_INDICATOR_CMPL BIT(6)
+#define LJCA_SPI_XFER_INDICATOR_INDEX BIT(7)
+
+/* SPI commands */
+enum ljca_spi_cmd {
+ LJCA_SPI_INIT = 1,
+ LJCA_SPI_READ,
+ LJCA_SPI_WRITE,
+ LJCA_SPI_WRITEREAD,
+ LJCA_SPI_DEINIT,
+};
+
+enum {
+ LJCA_SPI_BUS_SPEED_24M,
+ LJCA_SPI_BUS_SPEED_12M,
+ LJCA_SPI_BUS_SPEED_8M,
+ LJCA_SPI_BUS_SPEED_6M,
+ LJCA_SPI_BUS_SPEED_4_8M, /*4.8MHz*/
+ LJCA_SPI_BUS_SPEED_MIN = LJCA_SPI_BUS_SPEED_4_8M,
+};
+
+enum {
+ LJCA_SPI_CLOCK_LOW_POLARITY,
+ LJCA_SPI_CLOCK_HIGH_POLARITY,
+};
+
+enum {
+ LJCA_SPI_CLOCK_FIRST_PHASE,
+ LJCA_SPI_CLOCK_SECOND_PHASE,
+};
+
+struct ljca_spi_init_packet {
+ u8 index;
+ u8 speed;
+ u8 mode;
+} __packed;
+
+struct ljca_spi_xfer_packet {
+ u8 indicator;
+ u8 len;
+ u8 data[] __counted_by(len);
+} __packed;
+
+struct ljca_spi_dev {
+ struct ljca_client *ljca;
+ struct spi_controller *controller;
+ struct ljca_spi_info *spi_info;
+ u8 speed;
+ u8 mode;
+
+ u8 obuf[LJCA_SPI_BUF_SIZE];
+ u8 ibuf[LJCA_SPI_BUF_SIZE];
+};
+
+static int ljca_spi_read_write(struct ljca_spi_dev *ljca_spi, const u8 *w_data,
+ u8 *r_data, int len, int id, int complete,
+ int cmd)
+{
+ struct ljca_spi_xfer_packet *w_packet =
+ (struct ljca_spi_xfer_packet *)ljca_spi->obuf;
+ struct ljca_spi_xfer_packet *r_packet =
+ (struct ljca_spi_xfer_packet *)ljca_spi->ibuf;
+ int ret;
+
+ w_packet->indicator = FIELD_PREP(LJCA_SPI_XFER_INDICATOR_ID, id) |
+ FIELD_PREP(LJCA_SPI_XFER_INDICATOR_CMPL, complete) |
+ FIELD_PREP(LJCA_SPI_XFER_INDICATOR_INDEX,
+ ljca_spi->spi_info->id);
+
+ if (cmd == LJCA_SPI_READ) {
+ w_packet->len = sizeof(u16);
+ *(__le16 *)&w_packet->data[0] = cpu_to_le16(len);
+ } else {
+ w_packet->len = len;
+ memcpy(w_packet->data, w_data, len);
+ }
+
+ ret = ljca_transfer(ljca_spi->ljca, cmd, (u8 *)w_packet,
+ struct_size(w_packet, data, w_packet->len),
+ (u8 *)r_packet, LJCA_SPI_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+ else if (ret < sizeof(*r_packet) || r_packet->len <= 0)
+ return -EIO;
+
+ if (r_data)
+ memcpy(r_data, r_packet->data, r_packet->len);
+
+ return 0;
+}
+
+static int ljca_spi_init(struct ljca_spi_dev *ljca_spi, u8 div, u8 mode)
+{
+ struct ljca_spi_init_packet w_packet = {};
+ int ret;
+
+ if (ljca_spi->mode == mode && ljca_spi->speed == div)
+ return 0;
+
+ w_packet.index = ljca_spi->spi_info->id;
+ w_packet.speed = div;
+ w_packet.mode = FIELD_PREP(LJCA_SPI_CLK_MODE_POLARITY,
+ (mode & SPI_CPOL) ? LJCA_SPI_CLOCK_HIGH_POLARITY :
+ LJCA_SPI_CLOCK_LOW_POLARITY) |
+ FIELD_PREP(LJCA_SPI_CLK_MODE_PHASE,
+ (mode & SPI_CPHA) ? LJCA_SPI_CLOCK_SECOND_PHASE :
+ LJCA_SPI_CLOCK_FIRST_PHASE);
+
+ ret = ljca_transfer(ljca_spi->ljca, LJCA_SPI_INIT, (u8 *)&w_packet,
+ sizeof(w_packet), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ ljca_spi->mode = mode;
+ ljca_spi->speed = div;
+
+ return 0;
+}
+
+static int ljca_spi_deinit(struct ljca_spi_dev *ljca_spi)
+{
+ struct ljca_spi_init_packet w_packet = {};
+ int ret;
+
+ w_packet.index = ljca_spi->spi_info->id;
+
+ ret = ljca_transfer(ljca_spi->ljca, LJCA_SPI_DEINIT, (u8 *)&w_packet,
+ sizeof(w_packet), NULL, 0);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int ljca_spi_transfer(struct ljca_spi_dev *ljca_spi,
+ const u8 *tx_data, u8 *rx_data, u16 len)
+{
+ int complete, cur_len;
+ int remaining = len;
+ int cmd, ret, i;
+ int offset = 0;
+
+ if (tx_data && rx_data)
+ cmd = LJCA_SPI_WRITEREAD;
+ else if (tx_data)
+ cmd = LJCA_SPI_WRITE;
+ else if (rx_data)
+ cmd = LJCA_SPI_READ;
+ else
+ return -EINVAL;
+
+ for (i = 0; remaining > 0; i++) {
+ cur_len = min_t(unsigned int, remaining, LJCA_SPI_MAX_XFER_SIZE);
+ complete = (cur_len == remaining);
+
+ ret = ljca_spi_read_write(ljca_spi,
+ tx_data ? tx_data + offset : NULL,
+ rx_data ? rx_data + offset : NULL,
+ cur_len, i, complete, cmd);
+ if (ret)
+ return ret;
+
+ offset += cur_len;
+ remaining -= cur_len;
+ }
+
+ return 0;
+}
+
+static int ljca_spi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u8 div = DIV_ROUND_UP(controller->max_speed_hz, xfer->speed_hz) / 2 - 1;
+ struct ljca_spi_dev *ljca_spi = spi_controller_get_devdata(controller);
+ int ret;
+
+ div = min_t(u8, LJCA_SPI_BUS_SPEED_MIN, div);
+
+ ret = ljca_spi_init(ljca_spi, div, spi->mode);
+ if (ret) {
+ dev_err(&ljca_spi->ljca->auxdev.dev,
+ "cannot initialize transfer ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ljca_spi_transfer(ljca_spi, xfer->tx_buf, xfer->rx_buf, xfer->len);
+ if (ret)
+ dev_err(&ljca_spi->ljca->auxdev.dev,
+ "transfer failed len: %d\n", xfer->len);
+
+ return ret;
+}
+
+static int ljca_spi_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *aux_dev_id)
+{
+ struct ljca_client *ljca = auxiliary_dev_to_ljca_client(auxdev);
+ struct spi_controller *controller;
+ struct ljca_spi_dev *ljca_spi;
+ int ret;
+
+ controller = devm_spi_alloc_master(&auxdev->dev, sizeof(*ljca_spi));
+ if (!controller)
+ return -ENOMEM;
+
+ ljca_spi = spi_controller_get_devdata(controller);
+ ljca_spi->ljca = ljca;
+ ljca_spi->spi_info = dev_get_platdata(&auxdev->dev);
+ ljca_spi->controller = controller;
+
+ controller->bus_num = -1;
+ controller->mode_bits = SPI_CPHA | SPI_CPOL;
+ controller->transfer_one = ljca_spi_transfer_one;
+ controller->auto_runtime_pm = false;
+ controller->max_speed_hz = LJCA_SPI_BUS_MAX_HZ;
+
+ device_set_node(&ljca_spi->controller->dev, dev_fwnode(&auxdev->dev));
+ auxiliary_set_drvdata(auxdev, controller);
+
+ ret = spi_register_controller(controller);
+ if (ret)
+ dev_err(&auxdev->dev, "Failed to register controller\n");
+
+ return ret;
+}
+
+static void ljca_spi_dev_remove(struct auxiliary_device *auxdev)
+{
+ struct spi_controller *controller = auxiliary_get_drvdata(auxdev);
+ struct ljca_spi_dev *ljca_spi = spi_controller_get_devdata(controller);
+
+ spi_unregister_controller(controller);
+ ljca_spi_deinit(ljca_spi);
+}
+
+static int ljca_spi_dev_suspend(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(controller);
+}
+
+static int ljca_spi_dev_resume(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+
+ return spi_controller_resume(controller);
+}
+
+static const struct dev_pm_ops ljca_spi_pm = {
+ SYSTEM_SLEEP_PM_OPS(ljca_spi_dev_suspend, ljca_spi_dev_resume)
+};
+
+static const struct auxiliary_device_id ljca_spi_id_table[] = {
+ { "usb_ljca.ljca-spi", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ljca_spi_id_table);
+
+static struct auxiliary_driver ljca_spi_driver = {
+ .driver.pm = &ljca_spi_pm,
+ .probe = ljca_spi_probe,
+ .remove = ljca_spi_dev_remove,
+ .id_table = ljca_spi_id_table,
+};
+module_auxiliary_driver(ljca_spi_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LJCA);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index f9aef39cac2e..784b9f673ead 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -28,8 +28,6 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
-source "drivers/staging/rtl8192u/Kconfig"
-
source "drivers/staging/rtl8192e/Kconfig"
source "drivers/staging/rtl8723bs/Kconfig"
@@ -74,8 +72,6 @@ source "drivers/staging/axis-fifo/Kconfig"
source "drivers/staging/fieldbus/Kconfig"
-source "drivers/staging/qlge/Kconfig"
-
source "drivers/staging/vme_user/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ffa70dda481d..2ea99c7b05d9 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,7 +4,6 @@
obj-y += media/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
-obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
@@ -27,4 +26,3 @@ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
-obj-$(CONFIG_QLGE) += qlge/
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 2c2b5f1c1df3..f86ed9d470b8 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -310,12 +310,11 @@ static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
return fbtft_probe_common(_display, NULL, pdev); \
} \
\
-static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
+static void fbtft_driver_remove_pdev(struct platform_device *pdev) \
{ \
struct fb_info *info = platform_get_drvdata(pdev); \
\
fbtft_remove_common(&pdev->dev, info); \
- return 0; \
} \
\
FBTFT_DT_TABLE(_compatible) \
@@ -329,7 +328,7 @@ static struct platform_driver fbtft_driver_platform_driver = { \
.of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_pdev, \
- .remove = fbtft_driver_remove_pdev, \
+ .remove_new = fbtft_driver_remove_pdev, \
}; \
\
static int __init fbtft_driver_module_init(void) \
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 6f69758a8b27..34d18b09bedd 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -218,7 +218,10 @@ static const struct regulator_desc can_power_desc = {
.ops = &can_power_ops,
};
-static struct class *controller_class;
+static const struct class controller_class = {
+ .name = "arcx_anybus_controller",
+};
+
static DEFINE_IDA(controller_index_ida);
static int controller_probe(struct platform_device *pdev)
@@ -301,7 +304,7 @@ static int controller_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out_ida;
}
- cd->class_dev->class = controller_class;
+ cd->class_dev->class = &controller_class;
cd->class_dev->groups = controller_attribute_groups;
cd->class_dev->parent = dev;
cd->class_dev->id = id;
@@ -351,12 +354,12 @@ static int __init controller_init(void)
{
int err;
- controller_class = class_create("arcx_anybus_controller");
- if (IS_ERR(controller_class))
- return PTR_ERR(controller_class);
+ err = class_register(&controller_class);
+ if (err)
+ return err;
err = platform_driver_register(&controller_driver);
if (err)
- class_destroy(controller_class);
+ class_unregister(&controller_class);
return err;
}
@@ -364,7 +367,7 @@ static int __init controller_init(void)
static void __exit controller_exit(void)
{
platform_driver_unregister(&controller_driver);
- class_destroy(controller_class);
+ class_unregister(&controller_class);
ida_destroy(&controller_index_ida);
}
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index 32b2e817ff04..15c246d3b1a3 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -160,6 +160,7 @@ static ssize_t gdm_tty_write(struct tty_struct *tty, const u8 *buf, size_t len)
while (remain) {
size_t sending_len = min_t(size_t, MUX_TX_MAX_SIZE, remain);
+
gdm->tty_dev->send_func(gdm->tty_dev->priv_dev,
(void *)(buf + sent_len),
sending_len,
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
index 2b3a766c7de7..daca5b48b986 100644
--- a/drivers/staging/greybus/audio_manager_private.h
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -12,10 +12,9 @@
#include "audio_manager.h"
-int gb_audio_manager_module_create(
- struct gb_audio_manager_module **module,
- struct kset *manager_kset,
- int id, struct gb_audio_manager_module_descriptor *desc);
+int gb_audio_manager_module_create(struct gb_audio_manager_module **module,
+ struct kset *manager_kset, int id,
+ struct gb_audio_manager_module_descriptor *desc);
/* module destroyed via kobject_put */
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index 7e01790a4659..b67315641d18 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -36,7 +36,10 @@ struct gb_cap {
dev_t dev_num;
};
-static struct class *cap_class;
+static const struct class cap_class = {
+ .name = "gb_authenticate",
+};
+
static dev_t cap_dev_num;
static DEFINE_IDA(cap_minors_map);
static LIST_HEAD(cap_list);
@@ -336,7 +339,7 @@ int gb_cap_connection_init(struct gb_connection *connection)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
- cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
+ cap->class_device = device_create(&cap_class, cap->parent, cap->dev_num,
NULL, "gb-authenticate-%d", minor);
if (IS_ERR(cap->class_device)) {
ret = PTR_ERR(cap->class_device);
@@ -370,7 +373,7 @@ void gb_cap_connection_exit(struct gb_connection *connection)
cap = gb_connection_get_data(connection);
- device_destroy(cap_class, cap->dev_num);
+ device_destroy(&cap_class, cap->dev_num);
cdev_del(&cap->cdev);
ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
@@ -402,9 +405,9 @@ int cap_init(void)
{
int ret;
- cap_class = class_create("gb_authenticate");
- if (IS_ERR(cap_class))
- return PTR_ERR(cap_class);
+ ret = class_register(&cap_class);
+ if (ret)
+ return ret;
ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
"gb_authenticate");
@@ -414,13 +417,13 @@ int cap_init(void)
return 0;
err_remove_class:
- class_destroy(cap_class);
+ class_unregister(&cap_class);
return ret;
}
void cap_exit(void)
{
unregister_chrdev_region(cap_dev_num, NUM_MINORS);
- class_destroy(cap_class);
+ class_unregister(&cap_class);
ida_destroy(&cap_minors_map);
}
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index a8efb86de140..79581457c4af 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -491,8 +491,6 @@ static void gb_bootrom_disconnect(struct gb_bundle *bundle)
{
struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
- dev_dbg(&bundle->dev, "%s\n", __func__);
-
gb_connection_disable(bootrom->connection);
/* Disable timeouts */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index cdbb42cd413b..a8173aa3a995 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -220,7 +220,7 @@ static int gb_camera_operation_sync_flags(struct gb_connection *connection,
}
static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
- struct gb_camera_configure_streams_response *resp)
+ struct gb_camera_configure_streams_response *resp)
{
unsigned int max_pkt_size = 0;
unsigned int i;
@@ -267,8 +267,7 @@ static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
* Validate the stream configuration response verifying padding is correctly
* set and the returned number of streams is supported
*/
-static const int gb_camera_configure_streams_validate_response(
- struct gb_camera *gcam,
+static const int gb_camera_configure_streams_validate_response(struct gb_camera *gcam,
struct gb_camera_configure_streams_response *resp,
unsigned int nstreams)
{
@@ -378,8 +377,8 @@ struct ap_csi_config_request {
#define GB_CAMERA_CSI_CLK_FREQ_MARGIN 150000000U
static int gb_camera_setup_data_connection(struct gb_camera *gcam,
- struct gb_camera_configure_streams_response *resp,
- struct gb_camera_csi_params *csi_params)
+ struct gb_camera_configure_streams_response *resp,
+ struct gb_camera_csi_params *csi_params)
{
struct ap_csi_config_request csi_cfg;
struct gb_connection *conn;
@@ -783,8 +782,8 @@ static ssize_t gb_camera_op_capabilities(void *priv, char *data, size_t len)
}
static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
- unsigned int *flags, struct gb_camera_stream *streams,
- struct gb_camera_csi_params *csi_params)
+ unsigned int *flags, struct gb_camera_stream *streams,
+ struct gb_camera_csi_params *csi_params)
{
struct gb_camera *gcam = priv;
struct gb_camera_stream_config *gb_streams;
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index cd9141e4b794..93137a3c4907 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -55,7 +55,10 @@ struct fw_mgmt {
*/
#define NUM_MINORS U8_MAX
-static struct class *fw_mgmt_class;
+static const struct class fw_mgmt_class = {
+ .name = "gb_fw_mgmt",
+};
+
static dev_t fw_mgmt_dev_num;
static DEFINE_IDA(fw_mgmt_minors_map);
static LIST_HEAD(fw_mgmt_list);
@@ -629,7 +632,7 @@ int gb_fw_mgmt_connection_init(struct gb_connection *connection)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
- fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
+ fw_mgmt->class_device = device_create(&fw_mgmt_class, fw_mgmt->parent,
fw_mgmt->dev_num, NULL,
"gb-fw-mgmt-%d", minor);
if (IS_ERR(fw_mgmt->class_device)) {
@@ -664,7 +667,7 @@ void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
fw_mgmt = gb_connection_get_data(connection);
- device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
+ device_destroy(&fw_mgmt_class, fw_mgmt->dev_num);
cdev_del(&fw_mgmt->cdev);
ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
@@ -696,9 +699,9 @@ int fw_mgmt_init(void)
{
int ret;
- fw_mgmt_class = class_create("gb_fw_mgmt");
- if (IS_ERR(fw_mgmt_class))
- return PTR_ERR(fw_mgmt_class);
+ ret = class_register(&fw_mgmt_class);
+ if (ret)
+ return ret;
ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
"gb_fw_mgmt");
@@ -708,13 +711,13 @@ int fw_mgmt_init(void)
return 0;
err_remove_class:
- class_destroy(fw_mgmt_class);
+ class_unregister(&fw_mgmt_class);
return ret;
}
void fw_mgmt_exit(void)
{
unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
- class_destroy(fw_mgmt_class);
+ class_unregister(&fw_mgmt_class);
ida_destroy(&fw_mgmt_minors_map);
}
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index 57cc1960d059..a3cb68cfa0f9 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -258,7 +258,6 @@ static const struct pwm_ops gb_pwm_ops = {
.request = gb_pwm_request,
.free = gb_pwm_free,
.apply = gb_pwm_apply,
- .owner = THIS_MODULE,
};
static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 8bca8cb12cc6..b9c6eff7cdc1 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -29,10 +29,13 @@ struct gb_raw {
struct raw_data {
struct list_head entry;
u32 len;
- u8 data[];
+ u8 data[] __counted_by(len);
+};
+
+static const struct class raw_class = {
+ .name = "gb_raw",
};
-static struct class *raw_class;
static int raw_major;
static const struct file_operations raw_fops;
static DEFINE_IDA(minors);
@@ -70,7 +73,7 @@ static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
goto exit;
}
- raw_data = kmalloc(sizeof(*raw_data) + len, GFP_KERNEL);
+ raw_data = kmalloc(struct_size(raw_data, data, len), GFP_KERNEL);
if (!raw_data) {
retval = -ENOMEM;
goto exit;
@@ -195,7 +198,7 @@ static int gb_raw_probe(struct gb_bundle *bundle,
if (retval)
goto error_connection_disable;
- raw->device = device_create(raw_class, &connection->bundle->dev,
+ raw->device = device_create(&raw_class, &connection->bundle->dev,
raw->dev, raw, "gb!raw%d", minor);
if (IS_ERR(raw->device)) {
retval = PTR_ERR(raw->device);
@@ -229,7 +232,7 @@ static void gb_raw_disconnect(struct gb_bundle *bundle)
struct raw_data *temp;
// FIXME - handle removing a connection when the char device node is open.
- device_destroy(raw_class, raw->dev);
+ device_destroy(&raw_class, raw->dev);
cdev_del(&raw->cdev);
gb_connection_disable(connection);
ida_simple_remove(&minors, MINOR(raw->dev));
@@ -340,11 +343,9 @@ static int raw_init(void)
dev_t dev;
int retval;
- raw_class = class_create("gb_raw");
- if (IS_ERR(raw_class)) {
- retval = PTR_ERR(raw_class);
+ retval = class_register(&raw_class);
+ if (retval)
goto error_class;
- }
retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
if (retval < 0)
@@ -361,7 +362,7 @@ static int raw_init(void)
error_gb:
unregister_chrdev_region(dev, NUM_MINORS);
error_chrdev:
- class_destroy(raw_class);
+ class_unregister(&raw_class);
error_class:
return retval;
}
@@ -371,7 +372,7 @@ static void __exit raw_exit(void)
{
greybus_deregister(&gb_raw_driver);
unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
- class_destroy(raw_class);
+ class_unregister(&raw_class);
ida_destroy(&minors);
}
module_exit(raw_exit);
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index d3968fe2ebb8..a60631c1f449 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -10,6 +10,5 @@ source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
-source "drivers/staging/iio/resolver/Kconfig"
endmenu
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index c50f1019f829..628583535393 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -8,4 +8,3 @@ obj-y += adc/
obj-y += addac/
obj-y += frequency/
obj-y += impedance-analyzer/
-obj-y += resolver/
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 6f9eebd6c7ee..6c390c4eb26d 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -299,11 +299,6 @@ static void ad9832_reg_disable(void *reg)
regulator_disable(reg);
}
-static void ad9832_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static int ad9832_probe(struct spi_device *spi)
{
struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -350,18 +345,10 @@ static int ad9832_probe(struct spi_device *spi)
if (ret)
return ret;
- st->mclk = devm_clk_get(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad9832_clk_disable, st->mclk);
- if (ret)
- return ret;
-
st->spi = spi;
mutex_init(&st->lock);
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 285df0e489a6..a7a5cdcc6590 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -394,13 +394,6 @@ static void ad9834_disable_reg(void *data)
regulator_disable(reg);
}
-static void ad9834_disable_clk(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static int ad9834_probe(struct spi_device *spi)
{
struct ad9834_state *st;
@@ -429,22 +422,12 @@ static int ad9834_probe(struct spi_device *spi)
}
st = iio_priv(indio_dev);
mutex_init(&st->lock);
- st->mclk = devm_clk_get(&spi->dev, NULL);
+ st->mclk = devm_clk_get_enabled(&spi->dev, NULL);
if (IS_ERR(st->mclk)) {
- ret = PTR_ERR(st->mclk);
- return ret;
- }
-
- ret = clk_prepare_enable(st->mclk);
- if (ret) {
dev_err(&spi->dev, "Failed to enable master clock\n");
- return ret;
+ return PTR_ERR(st->mclk);
}
- ret = devm_add_action_or_reset(&spi->dev, ad9834_disable_clk, st->mclk);
- if (ret)
- return ret;
-
st->spi = spi;
st->devid = spi_get_device_id(spi)->driver_data;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 46db6d91542a..e748a5d04e97 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -667,13 +667,6 @@ static void ad5933_reg_disable(void *data)
regulator_disable(st->reg);
}
-static void ad5933_clk_disable(void *data)
-{
- struct ad5933_state *st = data;
-
- clk_disable_unprepare(st->mclk);
-}
-
static int ad5933_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -712,23 +705,12 @@ static int ad5933_probe(struct i2c_client *client)
st->vref_mv = ret / 1000;
- st->mclk = devm_clk_get(&client->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&client->dev, "mclk");
if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT)
return PTR_ERR(st->mclk);
- if (!IS_ERR(st->mclk)) {
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&client->dev,
- ad5933_clk_disable,
- st);
- if (ret)
- return ret;
-
+ if (!IS_ERR(st->mclk))
ext_clk_hz = clk_get_rate(st->mclk);
- }
if (ext_clk_hz) {
st->mclk_hz = ext_clk_hz;
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
deleted file mode 100644
index 6d1e2622e0b0..000000000000
--- a/drivers/staging/iio/resolver/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Resolver/Synchro drivers
-#
-menu "Resolver to digital converters"
-
-config AD2S1210
- tristate "Analog Devices ad2s1210 driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- help
- Say yes here to build support for Analog Devices spi resolver
- to digital converters, ad2s1210, provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad2s1210.
-
-endmenu
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
deleted file mode 100644
index 398631f7e79b..000000000000
--- a/drivers/staging/iio/resolver/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Resolver/Synchro drivers
-#
-
-obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
deleted file mode 100644
index 06de5823eb8e..000000000000
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/of.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define DRV_NAME "ad2s1210"
-
-#define AD2S1210_DEF_CONTROL 0x7E
-
-#define AD2S1210_MSB_IS_HIGH 0x80
-#define AD2S1210_MSB_IS_LOW 0x7F
-#define AD2S1210_PHASE_LOCK_RANGE_44 0x20
-#define AD2S1210_ENABLE_HYSTERESIS 0x10
-#define AD2S1210_SET_ENRES1 0x08
-#define AD2S1210_SET_ENRES0 0x04
-#define AD2S1210_SET_RES1 0x02
-#define AD2S1210_SET_RES0 0x01
-
-#define AD2S1210_SET_RESOLUTION (AD2S1210_SET_RES1 | AD2S1210_SET_RES0)
-
-#define AD2S1210_REG_POSITION 0x80
-#define AD2S1210_REG_VELOCITY 0x82
-#define AD2S1210_REG_LOS_THRD 0x88
-#define AD2S1210_REG_DOS_OVR_THRD 0x89
-#define AD2S1210_REG_DOS_MIS_THRD 0x8A
-#define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B
-#define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C
-#define AD2S1210_REG_LOT_HIGH_THRD 0x8D
-#define AD2S1210_REG_LOT_LOW_THRD 0x8E
-#define AD2S1210_REG_EXCIT_FREQ 0x91
-#define AD2S1210_REG_CONTROL 0x92
-#define AD2S1210_REG_SOFT_RESET 0xF0
-#define AD2S1210_REG_FAULT 0xFF
-
-#define AD2S1210_MIN_CLKIN 6144000
-#define AD2S1210_MAX_CLKIN 10240000
-#define AD2S1210_MIN_EXCIT 2000
-#define AD2S1210_MAX_EXCIT 20000
-#define AD2S1210_MIN_FCW 0x4
-#define AD2S1210_MAX_FCW 0x50
-
-#define AD2S1210_DEF_EXCIT 10000
-
-enum ad2s1210_mode {
- MOD_POS = 0,
- MOD_VEL,
- MOD_CONFIG,
- MOD_RESERVED,
-};
-
-enum ad2s1210_gpios {
- AD2S1210_SAMPLE,
- AD2S1210_A0,
- AD2S1210_A1,
- AD2S1210_RES0,
- AD2S1210_RES1,
-};
-
-struct ad2s1210_gpio {
- const char *name;
- unsigned long flags;
-};
-
-static const struct ad2s1210_gpio gpios[] = {
- [AD2S1210_SAMPLE] = { .name = "adi,sample", .flags = GPIOD_OUT_LOW },
- [AD2S1210_A0] = { .name = "adi,a0", .flags = GPIOD_OUT_LOW },
- [AD2S1210_A1] = { .name = "adi,a1", .flags = GPIOD_OUT_LOW },
- [AD2S1210_RES0] = { .name = "adi,res0", .flags = GPIOD_OUT_LOW },
- [AD2S1210_RES1] = { .name = "adi,res1", .flags = GPIOD_OUT_LOW },
-};
-
-static const unsigned int ad2s1210_resolution_value[] = { 10, 12, 14, 16 };
-
-struct ad2s1210_state {
- struct mutex lock;
- struct spi_device *sdev;
- struct gpio_desc *gpios[5];
- unsigned int fclkin;
- unsigned int fexcit;
- bool hysteresis;
- u8 resolution;
- enum ad2s1210_mode mode;
- u8 rx[2] __aligned(IIO_DMA_MINALIGN);
- u8 tx[2];
-};
-
-static const int ad2s1210_mode_vals[4][2] = {
- [MOD_POS] = { 0, 0 },
- [MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 1 },
-};
-
-static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
- struct ad2s1210_state *st)
-{
- gpiod_set_value(st->gpios[AD2S1210_A0], ad2s1210_mode_vals[mode][0]);
- gpiod_set_value(st->gpios[AD2S1210_A1], ad2s1210_mode_vals[mode][1]);
- st->mode = mode;
-}
-
-/* write 1 bytes (address or data) to the chip */
-static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
-{
- int ret;
-
- ad2s1210_set_mode(MOD_CONFIG, st);
- st->tx[0] = data;
- ret = spi_write(st->sdev, st->tx, 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* read value from one of the registers */
-static int ad2s1210_config_read(struct ad2s1210_state *st,
- unsigned char address)
-{
- struct spi_transfer xfers[] = {
- {
- .len = 1,
- .rx_buf = &st->rx[0],
- .tx_buf = &st->tx[0],
- .cs_change = 1,
- }, {
- .len = 1,
- .rx_buf = &st->rx[1],
- .tx_buf = &st->tx[1],
- },
- };
- int ret = 0;
-
- ad2s1210_set_mode(MOD_CONFIG, st);
- st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
- st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, xfers, 2);
- if (ret < 0)
- return ret;
-
- return st->rx[1];
-}
-
-static inline
-int ad2s1210_update_frequency_control_word(struct ad2s1210_state *st)
-{
- int ret;
- unsigned char fcw;
-
- fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
- if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW) {
- dev_err(&st->sdev->dev, "ad2s1210: FCW out of range\n");
- return -ERANGE;
- }
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_EXCIT_FREQ);
- if (ret < 0)
- return ret;
-
- return ad2s1210_config_write(st, fcw);
-}
-
-static const int ad2s1210_res_pins[4][2] = {
- { 0, 0 }, {0, 1}, {1, 0}, {1, 1}
-};
-
-static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st)
-{
- gpiod_set_value(st->gpios[AD2S1210_RES0],
- ad2s1210_res_pins[(st->resolution - 10) / 2][0]);
- gpiod_set_value(st->gpios[AD2S1210_RES1],
- ad2s1210_res_pins[(st->resolution - 10) / 2][1]);
-}
-
-static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
-{
- int ret;
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_SOFT_RESET);
- if (ret < 0)
- return ret;
-
- return ad2s1210_config_write(st, 0x0);
-}
-
-static ssize_t ad2s1210_show_fclkin(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%u\n", st->fclkin);
-}
-
-static ssize_t ad2s1210_store_fclkin(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned int fclkin;
- int ret;
-
- ret = kstrtouint(buf, 10, &fclkin);
- if (ret)
- return ret;
- if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) {
- dev_err(dev, "ad2s1210: fclkin out of range\n");
- return -EINVAL;
- }
-
- mutex_lock(&st->lock);
- st->fclkin = fclkin;
-
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_fexcit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%u\n", st->fexcit);
-}
-
-static ssize_t ad2s1210_store_fexcit(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned int fexcit;
- int ret;
-
- ret = kstrtouint(buf, 10, &fexcit);
- if (ret < 0)
- return ret;
- if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) {
- dev_err(dev,
- "ad2s1210: excitation frequency out of range\n");
- return -EINVAL;
- }
- mutex_lock(&st->lock);
- st->fexcit = fexcit;
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_control(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- mutex_unlock(&st->lock);
- return ret < 0 ? ret : sprintf(buf, "0x%x\n", ret);
-}
-
-static ssize_t ad2s1210_store_control(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char udata;
- unsigned char data;
- int ret;
-
- ret = kstrtou8(buf, 16, &udata);
- if (ret)
- return -EINVAL;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = udata & AD2S1210_MSB_IS_LOW;
- ret = ad2s1210_config_write(st, data);
- if (ret < 0)
- goto error_ret;
-
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- if (ret & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- dev_err(dev,
- "ad2s1210: write control register fail\n");
- goto error_ret;
- }
- st->resolution =
- ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
- ad2s1210_set_resolution_pin(st);
- ret = len;
- st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS);
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static ssize_t ad2s1210_show_resolution(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%d\n", st->resolution);
-}
-
-static ssize_t ad2s1210_store_resolution(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char data;
- unsigned char udata;
- int ret;
-
- ret = kstrtou8(buf, 10, &udata);
- if (ret || udata < 10 || udata > 16) {
- dev_err(dev, "ad2s1210: resolution out of range\n");
- return -EINVAL;
- }
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = ret;
- data &= ~AD2S1210_SET_RESOLUTION;
- data |= (udata - 10) >> 1;
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = ret;
- if (data & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- dev_err(dev, "ad2s1210: setting resolution fail\n");
- goto error_ret;
- }
- st->resolution =
- ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
- ad2s1210_set_resolution_pin(st);
- ret = len;
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-/* read the fault register since last sample */
-static ssize_t ad2s1210_show_fault(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
- mutex_unlock(&st->lock);
-
- return ret ? ret : sprintf(buf, "0x%x\n", ret);
-}
-
-static ssize_t ad2s1210_clear_fault(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 0);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 1);
- ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
- if (ret < 0)
- goto error_ret;
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 0);
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 1);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_reg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, iattr->address);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t ad2s1210_store_reg(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char data;
- int ret;
- struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
-
- ret = kstrtou8(buf, 10, &data);
- if (ret)
- return -EINVAL;
- mutex_lock(&st->lock);
- ret = ad2s1210_config_write(st, iattr->address);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
-error_ret:
- mutex_unlock(&st->lock);
- return ret < 0 ? ret : len;
-}
-
-static int ad2s1210_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- struct ad2s1210_state *st = iio_priv(indio_dev);
- u16 negative;
- int ret = 0;
- u16 pos;
- s16 vel;
-
- mutex_lock(&st->lock);
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 0);
- /* delay (6 * tck + 20) nano seconds */
- udelay(1);
-
- switch (chan->type) {
- case IIO_ANGL:
- ad2s1210_set_mode(MOD_POS, st);
- break;
- case IIO_ANGL_VEL:
- ad2s1210_set_mode(MOD_VEL, st);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret < 0)
- goto error_ret;
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret < 0)
- goto error_ret;
-
- switch (chan->type) {
- case IIO_ANGL:
- pos = be16_to_cpup((__be16 *)st->rx);
- if (st->hysteresis)
- pos >>= 16 - st->resolution;
- *val = pos;
- ret = IIO_VAL_INT;
- break;
- case IIO_ANGL_VEL:
- vel = be16_to_cpup((__be16 *)st->rx);
- vel >>= 16 - st->resolution;
- if (vel & 0x8000) {
- negative = (0xffff >> st->resolution) << st->resolution;
- vel |= negative;
- }
- *val = vel;
- ret = IIO_VAL_INT;
- break;
- default:
- mutex_unlock(&st->lock);
- return -EINVAL;
- }
-
-error_ret:
- gpiod_set_value(st->gpios[AD2S1210_SAMPLE], 1);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static IIO_DEVICE_ATTR(fclkin, 0644,
- ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
-static IIO_DEVICE_ATTR(fexcit, 0644,
- ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
-static IIO_DEVICE_ATTR(control, 0644,
- ad2s1210_show_control, ad2s1210_store_control, 0);
-static IIO_DEVICE_ATTR(bits, 0644,
- ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
-static IIO_DEVICE_ATTR(fault, 0644,
- ad2s1210_show_fault, ad2s1210_clear_fault, 0);
-
-static IIO_DEVICE_ATTR(los_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOS_THRD);
-static IIO_DEVICE_ATTR(dos_ovr_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_OVR_THRD);
-static IIO_DEVICE_ATTR(dos_mis_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_MIS_THRD);
-static IIO_DEVICE_ATTR(dos_rst_max_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_RST_MAX_THRD);
-static IIO_DEVICE_ATTR(dos_rst_min_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_RST_MIN_THRD);
-static IIO_DEVICE_ATTR(lot_high_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOT_HIGH_THRD);
-static IIO_DEVICE_ATTR(lot_low_thrd, 0644,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOT_LOW_THRD);
-
-static const struct iio_chan_spec ad2s1210_channels[] = {
- {
- .type = IIO_ANGL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_ANGL_VEL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
-
-static struct attribute *ad2s1210_attributes[] = {
- &iio_dev_attr_fclkin.dev_attr.attr,
- &iio_dev_attr_fexcit.dev_attr.attr,
- &iio_dev_attr_control.dev_attr.attr,
- &iio_dev_attr_bits.dev_attr.attr,
- &iio_dev_attr_fault.dev_attr.attr,
- &iio_dev_attr_los_thrd.dev_attr.attr,
- &iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
- &iio_dev_attr_dos_mis_thrd.dev_attr.attr,
- &iio_dev_attr_dos_rst_max_thrd.dev_attr.attr,
- &iio_dev_attr_dos_rst_min_thrd.dev_attr.attr,
- &iio_dev_attr_lot_high_thrd.dev_attr.attr,
- &iio_dev_attr_lot_low_thrd.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad2s1210_attribute_group = {
- .attrs = ad2s1210_attributes,
-};
-
-static int ad2s1210_initial(struct ad2s1210_state *st)
-{
- unsigned char data;
- int ret;
-
- mutex_lock(&st->lock);
- ad2s1210_set_resolution_pin(st);
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = AD2S1210_DEF_CONTROL & ~(AD2S1210_SET_RESOLUTION);
- data |= (st->resolution - 10) >> 1;
- ret = ad2s1210_config_write(st, data);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
-
- if (ret & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- goto error_ret;
- }
-
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static const struct iio_info ad2s1210_info = {
- .read_raw = ad2s1210_read_raw,
- .attrs = &ad2s1210_attribute_group,
-};
-
-static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
-{
- struct spi_device *spi = st->sdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- st->gpios[i] = devm_gpiod_get(&spi->dev, gpios[i].name,
- gpios[i].flags);
- if (IS_ERR(st->gpios[i])) {
- ret = PTR_ERR(st->gpios[i]);
- dev_err(&spi->dev,
- "ad2s1210: failed to request %s GPIO: %d\n",
- gpios[i].name, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int ad2s1210_probe(struct spi_device *spi)
-{
- struct iio_dev *indio_dev;
- struct ad2s1210_state *st;
- int ret;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- ret = ad2s1210_setup_gpios(st);
- if (ret < 0)
- return ret;
-
- spi_set_drvdata(spi, indio_dev);
-
- mutex_init(&st->lock);
- st->sdev = spi;
- st->hysteresis = true;
- st->mode = MOD_CONFIG;
- st->resolution = 12;
- st->fexcit = AD2S1210_DEF_EXCIT;
-
- indio_dev->info = &ad2s1210_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ad2s1210_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels);
- indio_dev->name = spi_get_device_id(spi)->name;
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- st->fclkin = spi->max_speed_hz;
- spi->mode = SPI_MODE_3;
- spi_setup(spi);
- ad2s1210_initial(st);
-
- return 0;
-}
-
-static const struct of_device_id ad2s1210_of_match[] = {
- { .compatible = "adi,ad2s1210", },
- { }
-};
-MODULE_DEVICE_TABLE(of, ad2s1210_of_match);
-
-static const struct spi_device_id ad2s1210_id[] = {
- { "ad2s1210" },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad2s1210_id);
-
-static struct spi_driver ad2s1210_driver = {
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(ad2s1210_of_match),
- },
- .probe = ad2s1210_probe,
- .id_table = ad2s1210_id,
-};
-module_spi_driver(ad2s1210_driver);
-
-MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 9fb118e77a1f..f1d44e4955fc 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -395,9 +395,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
- spin_lock(&priv->tx_dev.tx_dev_lock);
+ spin_lock_bh(&priv->tx_dev.tx_dev_lock);
result = enqueue_txdev(priv, p, size, complete_handler, skb);
- spin_unlock(&priv->tx_dev.tx_dev_lock);
+ spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
if (txq_has_space(priv))
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index 75c985da75b5..1265fc58a232 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -26,18 +26,6 @@ config VIDEO_ATOMISP
To compile this driver as a module, choose M here: the
module will be called atomisp
-config VIDEO_ATOMISP_ISP2401
- bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)"
- depends on VIDEO_ATOMISP
- help
- Enable support for Atom ISP2401-based boards.
-
- Select this option for Anniedale (Merrifield+ / Moorefield)
- and Cherrytrail SoCs.
-
- Disabling it enables support for Atom ISP2400-based boards
- (Merrifield and Baytrail SoCs).
-
if VIDEO_ATOMISP
source "drivers/staging/media/atomisp/i2c/Kconfig"
endif
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 38b370124109..fcd3e51ae9ce 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -150,13 +150,8 @@ atomisp-objs += \
pci/hive_isp_css_common/host/timed_ctrl.o \
pci/hive_isp_css_common/host/vmem.o \
pci/hive_isp_css_shared/host/tag.o \
- pci/system_local.o
-
-# These will be needed when clean merge CHT support nicely into the driver
-# Keep them here handy for when we get to that point
-#
-
-obj-cht = \
+ pci/system_local.o \
+ pci/runtime/isys/src/ibuf_ctrl_rmgr.o \
pci/css_2401_system/host/csi_rx.o \
pci/css_2401_system/host/ibuf_ctrl.o \
pci/css_2401_system/host/isys_dma.o \
@@ -306,11 +301,4 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
#DEFINES += -DPUNIT_CAMERA_BUSY
#DEFINES += -DUSE_KMEM_CACHE
-ifeq ($(CONFIG_VIDEO_ATOMISP_ISP2401),y)
-atomisp-objs += \
- $(obj-cht) \
- pci/runtime/isys/src/ibuf_ctrl_rmgr.o
-DEFINES += -DISP2401
-endif
-
ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index ecf8ba67b7af..d99cc898cd99 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -60,9 +60,6 @@ TODO
2. Items which SHOULD also be fixed eventually:
-* Remove VIDEO_ATOMISP_ISP2401, making the driver to auto-detect the
- register address differences between ISP2400 and ISP2401
-
* The driver is intended to drive the PCI exposed versions of the device.
It will not detect those devices enumerated via ACPI as a field of the
i915 GPU driver (only a problem on BYT).
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index 2d4165cda2f1..f62d183b787f 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -57,18 +57,6 @@ config VIDEO_ATOMISP_GC0310
This is a Video4Linux2 sensor-level driver for the Galaxycore
GC0310 0.3MP sensor.
-config VIDEO_ATOMISP_OV5693
- tristate "Omnivision ov5693 sensor support"
- depends on ACPI
- depends on I2C && VIDEO_DEV
- help
- This is a Video4Linux2 sensor-level driver for the Micron
- ov5693 5 Mpixel camera.
-
- ov5693 is video camera sensor.
-
- It currently only works with the atomisp driver.
-
#
# Kconfig for flash drivers
#
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index fc55af5f3422..e946cc91e5ff 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -3,7 +3,6 @@
# Makefile for sensor drivers
#
-obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 9a11793f34f7..58cddf11c9ab 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -83,7 +83,6 @@ struct gc0310_device {
struct mutex input_lock;
bool is_streaming;
- struct fwnode_handle *ep_fwnode;
struct gpio_desc *reset;
struct gpio_desc *powerdown;
@@ -442,11 +441,6 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
dev_dbg(&client->dev, "%s S enable=%d\n", __func__, enable);
mutex_lock(&dev->input_lock);
- if (dev->is_streaming == enable) {
- dev_warn(&client->dev, "stream already %s\n", enable ? "started" : "stopped");
- goto error_unlock;
- }
-
if (enable) {
ret = pm_runtime_get_sync(&client->dev);
if (ret < 0)
@@ -498,7 +492,6 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
error_power_down:
pm_runtime_put(&client->dev);
dev->is_streaming = false;
-error_unlock:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -599,37 +592,37 @@ static void gc0310_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrls.handler);
mutex_destroy(&dev->input_lock);
- fwnode_handle_put(dev->ep_fwnode);
pm_runtime_disable(&client->dev);
}
static int gc0310_probe(struct i2c_client *client)
{
+ struct fwnode_handle *ep_fwnode;
struct gc0310_device *dev;
int ret;
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
/*
* Sometimes the fwnode graph is initialized by the bridge driver.
* Bridge drivers doing this may also add GPIO mappings, wait for this.
*/
- dev->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
- if (!dev->ep_fwnode)
+ ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ if (!ep_fwnode)
return dev_err_probe(&client->dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n");
+ fwnode_handle_put(ep_fwnode);
+
+ dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
dev->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dev->reset)) {
- fwnode_handle_put(dev->ep_fwnode);
return dev_err_probe(&client->dev, PTR_ERR(dev->reset),
"getting reset GPIO\n");
}
dev->powerdown = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH);
if (IS_ERR(dev->powerdown)) {
- fwnode_handle_put(dev->ep_fwnode);
return dev_err_probe(&client->dev, PTR_ERR(dev->powerdown),
"getting powerdown GPIO\n");
}
@@ -652,7 +645,6 @@ static int gc0310_probe(struct i2c_client *client)
dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
- dev->sd.fwnode = dev->ep_fwnode;
ret = gc0310_init_controls(dev);
if (ret) {
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
deleted file mode 100644
index 3275f2be229e..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
deleted file mode 100644
index f1362cd69f6e..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for AD5823 VCM.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __AD5823_H__
-#define __AD5823_H__
-
-#include <linux/types.h>
-
-#define AD5823_VCM_ADDR 0x0c
-
-#define AD5823_REG_RESET 0x01
-#define AD5823_REG_MODE 0x02
-#define AD5823_REG_VCM_MOVE_TIME 0x03
-#define AD5823_REG_VCM_CODE_MSB 0x04
-#define AD5823_REG_VCM_CODE_LSB 0x05
-#define AD5823_REG_VCM_THRESHOLD_MSB 0x06
-#define AD5823_REG_VCM_THRESHOLD_LSB 0x07
-
-#define AD5823_REG_LENGTH 0x1
-
-#define AD5823_RING_CTRL_ENABLE 0x04
-#define AD5823_RING_CTRL_DISABLE 0x00
-
-#define AD5823_RESONANCE_PERIOD 100000
-#define AD5823_RESONANCE_COEF 512
-#define AD5823_HIGH_FREQ_RANGE 0x80
-
-#define VCM_CODE_MSB_MASK 0xfc
-#define AD5823_INIT_FOCUS_POS 350
-
-enum ad5823_tok_type {
- AD5823_8BIT = 0x1,
- AD5823_16BIT = 0x2,
-};
-
-enum ad5823_vcm_mode {
- AD5823_ARC_RES0 = 0x0, /* Actuator response control RES1 */
- AD5823_ARC_RES1 = 0x1, /* Actuator response control RES0.5 */
- AD5823_ARC_RES2 = 0x2, /* Actuator response control RES2 */
- AD5823_ESRC = 0x3, /* Enhanced slew rate control */
- AD5823_DIRECT = 0x4, /* Direct control */
-};
-
-#define AD5823_INVALID_CONFIG 0xffffffff
-#define AD5823_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
deleted file mode 100644
index 460a4e34c55b..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ /dev/null
@@ -1,1763 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for OmniVision OV5693 1080p HD camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/moduleparam.h>
-#include <media/v4l2-device.h>
-#include <linux/io.h>
-#include <linux/acpi.h>
-#include "../../include/linux/atomisp_gmin_platform.h"
-
-#include "ov5693.h"
-#include "ad5823.h"
-
-#define __cci_delay(t) \
- do { \
- if ((t) < 10) { \
- usleep_range((t) * 1000, ((t) + 1) * 1000); \
- } else { \
- msleep((t)); \
- } \
- } while (0)
-
-/* Value 30ms reached through experimentation on byt ecs.
- * The DS specifies a much lower value but when using a smaller value
- * the I2C bus sometimes locks up permanently when starting the camera.
- * This issue could not be reproduced on cht, so we can reduce the
- * delay value to a lower value when insmod.
- */
-static uint up_delay = 30;
-module_param(up_delay, uint, 0644);
-MODULE_PARM_DESC(up_delay,
- "Delay prior to the first CCI transaction for ov5693");
-
-static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- int err;
- struct i2c_msg msg;
- u8 buf[2];
-
- buf[0] = reg;
- buf[1] = val;
-
- msg.addr = VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
-
- err = i2c_transfer(client->adapter, &msg, 1);
- if (err != 1) {
- dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n",
- __func__, err);
- return -EIO;
- }
- return 0;
-}
-
-static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
-
- buf[0] = reg;
- buf[1] = val;
- msg.addr = AD5823_VCM_ADDR;
- msg.flags = 0;
- msg.len = 0x02;
- msg.buf = &buf[0];
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
-
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = AD5823_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 0x01;
- msg[0].buf = &buf[0];
-
- msg[1].addr = 0x0c;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 0x01;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static const u32 ov5693_embedded_effective_size = 28;
-
-/* i2c read/write stuff */
-static int ov5693_read_reg(struct i2c_client *client,
- u16 data_length, u16 reg, u16 *val)
-{
- int err;
- struct i2c_msg msg[2];
- unsigned char data[6];
-
- if (!client->adapter) {
- dev_err(&client->dev, "%s error, no client->adapter\n",
- __func__);
- return -ENODEV;
- }
-
- if (data_length != OV5693_8BIT && data_length != OV5693_16BIT
- && data_length != OV5693_32BIT) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- memset(msg, 0, sizeof(msg));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = data;
-
- /* high byte goes out first */
- data[0] = (u8)(reg >> 8);
- data[1] = (u8)(reg & 0xff);
-
- msg[1].addr = client->addr;
- msg[1].len = data_length;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = data;
-
- err = i2c_transfer(client->adapter, msg, 2);
- if (err != 2) {
- if (err >= 0)
- err = -EIO;
- dev_err(&client->dev,
- "read from offset 0x%x error %d", reg, err);
- return err;
- }
-
- *val = 0;
- /* high byte comes first */
- if (data_length == OV5693_8BIT)
- *val = (u8)data[0];
- else if (data_length == OV5693_16BIT)
- *val = be16_to_cpu(*(__be16 *)&data[0]);
- else
- *val = be32_to_cpu(*(__be32 *)&data[0]);
-
- return 0;
-}
-
-static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = len;
- msg.buf = data;
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- return ret == num_msg ? 0 : -EIO;
-}
-
-static int vcm_dw_i2c_write(struct i2c_client *client, u16 data)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
- __be16 val;
-
- val = cpu_to_be16(data);
- msg.addr = VCM_ADDR;
- msg.flags = 0;
- msg.len = OV5693_16BIT;
- msg.buf = (void *)&val;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- return ret == num_msg ? 0 : -EIO;
-}
-
-/*
- * Theory: per datasheet, the two VCMs both allow for a 2-byte read.
- * The DW9714 doesn't actually specify what this does (it has a
- * two-byte write-only protocol, but specifies the read sequence as
- * legal), but it returns the same data (zeroes) always, after an
- * undocumented initial NAK. The AD5823 has a one-byte address
- * register to which all writes go, and subsequent reads will cycle
- * through the 8 bytes of registers. Notably, the default values (the
- * device is always power-cycled affirmatively, so we can rely on
- * these) in AD5823 are not pairwise repetitions of the same 16 bit
- * word. So all we have to do is sequentially read two bytes at a
- * time and see if we detect a difference in any of the first four
- * pairs.
- */
-static int vcm_detect(struct i2c_client *client)
-{
- int i, ret;
- struct i2c_msg msg;
- u16 data0 = 0, data;
-
- for (i = 0; i < 4; i++) {
- msg.addr = VCM_ADDR;
- msg.flags = I2C_M_RD;
- msg.len = sizeof(data);
- msg.buf = (u8 *)&data;
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- /*
- * DW9714 always fails the first read and returns
- * zeroes for subsequent ones
- */
- if (i == 0 && ret == -EREMOTEIO) {
- data0 = 0;
- continue;
- }
-
- if (i == 0)
- data0 = data;
-
- if (data != data0)
- return VCM_AD5823;
- }
- return ret == 1 ? VCM_DW9714 : ret;
-}
-
-static int ov5693_write_reg(struct i2c_client *client, u16 data_length,
- u16 reg, u16 val)
-{
- int ret;
- unsigned char data[4] = {0};
- __be16 *wreg = (void *)data;
- const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
-
- if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) {
- dev_err(&client->dev,
- "%s error, invalid data_length\n", __func__);
- return -EINVAL;
- }
-
- /* high byte goes out first */
- *wreg = cpu_to_be16(reg);
-
- if (data_length == OV5693_8BIT) {
- data[2] = (u8)(val);
- } else {
- /* OV5693_16BIT */
- __be16 *wdata = (void *)&data[2];
-
- *wdata = cpu_to_be16(val);
- }
-
- ret = ov5693_i2c_write(client, len, data);
- if (ret)
- dev_err(&client->dev,
- "write error: wrote 0x%x to offset 0x%x error %d",
- val, reg, ret);
-
- return ret;
-}
-
-/*
- * ov5693_write_reg_array - Initializes a list of OV5693 registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- *
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and
- * __ov5693_write_reg_is_consecutive() are internal functions to
- * ov5693_write_reg_array_fast() and should be not used anywhere else.
- *
- */
-
-static int __ov5693_flush_reg_array(struct i2c_client *client,
- struct ov5693_write_ctrl *ctrl)
-{
- u16 size;
- __be16 *reg = (void *)&ctrl->buffer.addr;
-
- if (ctrl->index == 0)
- return 0;
-
- size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
-
- *reg = cpu_to_be16(ctrl->buffer.addr);
- ctrl->index = 0;
-
- return ov5693_i2c_write(client, size, (u8 *)reg);
-}
-
-static int __ov5693_buf_reg_array(struct i2c_client *client,
- struct ov5693_write_ctrl *ctrl,
- const struct ov5693_reg *next)
-{
- int size;
- __be16 *data16;
-
- switch (next->type) {
- case OV5693_8BIT:
- size = 1;
- ctrl->buffer.data[ctrl->index] = (u8)next->val;
- break;
- case OV5693_16BIT:
- size = 2;
-
- data16 = (void *)&ctrl->buffer.data[ctrl->index];
- *data16 = cpu_to_be16((u16)next->val);
- break;
- default:
- return -EINVAL;
- }
-
- /* When first item is added, we need to store its starting address */
- if (ctrl->index == 0)
- ctrl->buffer.addr = next->reg;
-
- ctrl->index += size;
-
- /*
- * Buffer cannot guarantee free space for u32? Better flush it to avoid
- * possible lack of memory for next item.
- */
- if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE)
- return __ov5693_flush_reg_array(client, ctrl);
-
- return 0;
-}
-
-static int __ov5693_write_reg_is_consecutive(struct i2c_client *client,
- struct ov5693_write_ctrl *ctrl,
- const struct ov5693_reg *next)
-{
- if (ctrl->index == 0)
- return 1;
-
- return ctrl->buffer.addr + ctrl->index == next->reg;
-}
-
-static int ov5693_write_reg_array(struct i2c_client *client,
- const struct ov5693_reg *reglist)
-{
- const struct ov5693_reg *next = reglist;
- struct ov5693_write_ctrl ctrl;
- int err;
-
- ctrl.index = 0;
- for (; next->type != OV5693_TOK_TERM; next++) {
- switch (next->type & OV5693_TOK_MASK) {
- case OV5693_TOK_DELAY:
- err = __ov5693_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- msleep(next->val);
- break;
- default:
- /*
- * If next address is not consecutive, data needs to be
- * flushed before proceed.
- */
- if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
- next)) {
- err = __ov5693_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- }
- err = __ov5693_buf_reg_array(client, &ctrl, next);
- if (err) {
- dev_err(&client->dev,
- "%s: write error, aborted\n",
- __func__);
- return err;
- }
- break;
- }
- }
-
- return __ov5693_flush_reg_array(client, &ctrl);
-}
-
-static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
- int gain, int digitgain)
-
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- u16 vts, hts;
- int ret, exp_val;
-
- hts = ov5693_res[dev->fmt_idx].pixels_per_line;
- vts = ov5693_res[dev->fmt_idx].lines_per_frame;
- /*
- * If coarse_itg is larger than 1<<15, can not write to reg directly.
- * The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts
- * to the reg.
- */
- if (coarse_itg > (1 << 15)) {
- hts = hts * 2;
- coarse_itg = (int)coarse_itg / 2;
- }
- /* group hold */
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_GROUP_ACCESS, 0x00);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_GROUP_ACCESS);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_TIMING_HTS_H);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_TIMING_HTS_L, hts & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_TIMING_HTS_L);
- return ret;
- }
- /* Increase the VTS to match exposure + MARGIN */
- if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN)
- vts = (u16)coarse_itg + OV5693_INTEGRATION_TIME_MARGIN;
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_TIMING_VTS_H);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_TIMING_VTS_L, vts & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_TIMING_VTS_L);
- return ret;
- }
-
- /* set exposure */
-
- /* Lower four bit should be 0*/
- exp_val = coarse_itg << 4;
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_L, exp_val & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_EXPOSURE_L);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_EXPOSURE_M);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_EXPOSURE_H);
- return ret;
- }
-
- /* Analog gain */
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_AGC_L, gain & 0xff);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_AGC_L);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_AGC_H, (gain >> 8) & 0xff);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_AGC_H);
- return ret;
- }
-
- /* Digital gain */
- if (digitgain) {
- ret = ov5693_write_reg(client, OV5693_16BIT,
- OV5693_MWB_RED_GAIN_H, digitgain);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_MWB_RED_GAIN_H);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_16BIT,
- OV5693_MWB_GREEN_GAIN_H, digitgain);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_MWB_RED_GAIN_H);
- return ret;
- }
-
- ret = ov5693_write_reg(client, OV5693_16BIT,
- OV5693_MWB_BLUE_GAIN_H, digitgain);
- if (ret) {
- dev_err(&client->dev, "%s: write %x error, aborted\n",
- __func__, OV5693_MWB_RED_GAIN_H);
- return ret;
- }
- }
-
- /* End group */
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_GROUP_ACCESS, 0x10);
- if (ret)
- return ret;
-
- /* Delay launch group */
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_GROUP_ACCESS, 0xa0);
- if (ret)
- return ret;
- return ret;
-}
-
-static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure,
- int gain, int digitgain)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __ov5693_set_exposure(sd, exposure, gain, digitgain);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static long ov5693_s_exposure(struct v4l2_subdev *sd,
- struct atomisp_exposure *exposure)
-{
- u16 coarse_itg = exposure->integration_time[0];
- u16 analog_gain = exposure->gain[0];
- u16 digital_gain = exposure->gain[1];
-
- /* we should not accept the invalid value below */
- if (analog_gain == 0) {
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- v4l2_err(client, "%s: invalid value\n", __func__);
- return -EINVAL;
- }
- return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
-}
-
-static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
- u16 addr, u8 *buf)
-{
- u16 index;
- int ret;
- u16 *pVal = NULL;
-
- for (index = 0; index <= size; index++) {
- pVal = (u16 *)(buf + index);
- ret =
- ov5693_read_reg(client, OV5693_8BIT, addr + index,
- pVal);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- int ret;
- int i;
- u8 *b = buf;
-
- dev->otp_size = 0;
- for (i = 1; i < OV5693_OTP_BANK_MAX; i++) {
- /*set bank NO and OTP read mode. */
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG,
- (i | 0xc0)); //[7:6] 2'b11 [5:0] bank no
- if (ret) {
- dev_err(&client->dev, "failed to prepare OTP page\n");
- return ret;
- }
- //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0));
-
- /*enable read */
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG,
- OV5693_OTP_MODE_READ); // enable :1
- if (ret) {
- dev_err(&client->dev,
- "failed to set OTP reading mode page");
- return ret;
- }
- //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ);
-
- /* Reading the OTP data array */
- ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE,
- OV5693_OTP_START_ADDR,
- b);
- if (ret) {
- dev_err(&client->dev, "failed to read OTP data\n");
- return ret;
- }
-
- //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15));
-
- //Intel OTP map, try to read 320byts first.
- if (i == 21) {
- if ((*b) == 0) {
- dev->otp_size = 320;
- break;
- } else {
- b = buf;
- continue;
- }
- } else if (i ==
- 24) { //if the first 320bytes data doesn't not exist, try to read the next 32bytes data.
- if ((*b) == 0) {
- dev->otp_size = 32;
- break;
- } else {
- b = buf;
- continue;
- }
- } else if (i ==
- 27) { //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again.
- if ((*b) == 0) {
- dev->otp_size = 32;
- break;
- } else {
- dev->otp_size = 0; // no OTP data.
- break;
- }
- }
-
- b = b + OV5693_OTP_BANK_SIZE;
- }
- return 0;
-}
-
-/*
- * Read otp data and store it into a kmalloced buffer.
- * The caller must kfree the buffer when no more needed.
- * @size: set to the size of the returned otp data.
- */
-static void *ov5693_otp_read(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
-
- buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- //otp valid after mipi on and sw stream on
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00);
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_SW_STREAM, OV5693_START_STREAMING);
-
- ret = __ov5693_otp_read(sd, buf);
-
- //mipi off and sw stream off after otp read
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f);
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_SW_STREAM, OV5693_STOP_STREAMING);
-
- /* Driver has failed to find valid data */
- if (ret) {
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
- }
-
- return buf;
-}
-
-static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case ATOMISP_IOC_S_EXPOSURE:
- return ov5693_s_exposure(sd, arg);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * This returns the exposure time being used. This should only be used
- * for filling in EXIF data, not for actual image processing.
- */
-static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 reg_v, reg_v2;
- int ret;
-
- /* get exposure */
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_L,
- &reg_v);
- if (ret)
- goto err;
-
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_M,
- &reg_v2);
- if (ret)
- goto err;
-
- reg_v += reg_v2 << 8;
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_EXPOSURE_H,
- &reg_v2);
- if (ret)
- goto err;
-
- *value = reg_v + (((u32)reg_v2 << 16));
-err:
- return ret;
-}
-
-static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 vcm_code;
-
- ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code);
- if (ret)
- return ret;
-
- /* set reg VCM_CODE_MSB Bit[1:0] */
- vcm_code = (vcm_code & VCM_CODE_MSB_MASK) |
- ((val >> 8) & ~VCM_CODE_MSB_MASK);
- ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code);
- if (ret)
- return ret;
-
- /* set reg VCM_CODE_LSB Bit[7:0] */
- ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff));
- if (ret)
- return ret;
-
- /* set required vcm move time */
- vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF
- - AD5823_HIGH_FREQ_RANGE;
- ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code);
-
- return ret;
-}
-
-static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- value = min(value, AD5823_MAX_FOCUS_POS);
- return ad5823_t_focus_vcm(sd, value);
-}
-
-static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value);
- value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS);
- if (dev->vcm == VCM_DW9714) {
- if (dev->vcm_update) {
- ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = vcm_dw_i2c_write(client, DIRECT_VCM);
- if (ret)
- return ret;
- ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dev->vcm_update = false;
- }
- ret = vcm_dw_i2c_write(client,
- vcm_val(value, VCM_DEFAULT_S));
- } else if (dev->vcm == VCM_AD5823) {
- ad5823_t_focus_abs(sd, value);
- }
- if (ret == 0) {
- dev->number_of_steps = value - dev->focus;
- dev->focus = value;
- dev->timestamp_t_focus_abs = ktime_get();
- } else
- dev_err(&client->dev,
- "%s: i2c failed. ret %d\n", __func__, ret);
-
- return ret;
-}
-
-static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- return ov5693_t_focus_abs(sd, dev->focus + value);
-}
-
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- ktime_t temptime;
- ktime_t timedelay = ns_to_ktime(min_t(u32,
- abs(dev->number_of_steps) * DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS));
-
- temptime = ktime_sub(ktime_get(), (dev->timestamp_t_focus_abs));
- if (ktime_compare(temptime, timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
-
- *value = status;
-
- return 0;
-}
-
-static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- s32 val;
-
- ov5693_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = dev->focus - dev->number_of_steps;
- else
- *value = dev->focus;
-
- return 0;
-}
-
-static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- dev->number_of_steps = value;
- dev->vcm_update = true;
- return 0;
-}
-
-static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- dev->number_of_steps = value;
- dev->vcm_update = true;
- return 0;
-}
-
-static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov5693_device *dev =
- container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_FOCUS_ABSOLUTE:
- dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n",
- __func__, ctrl->val);
- ret = ov5693_t_focus_abs(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_RELATIVE:
- dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n",
- __func__, ctrl->val);
- ret = ov5693_t_focus_rel(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_SLEW:
- ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_TIMING:
- ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov5693_device *dev =
- container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- ret = ov5693_q_exposure(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_STATUS:
- ret = ov5693_q_focus_status(&dev->sd, &ctrl->val);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = ov5693_s_ctrl,
- .g_volatile_ctrl = ov5693_g_volatile_ctrl
-};
-
-static const struct v4l2_ctrl_config ov5693_controls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .min = 0x0,
- .max = 0xffff,
- .step = 0x01,
- .def = 0x00,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move absolute",
- .min = 0,
- .max = OV5693_VCM_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_RELATIVE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move relative",
- .min = OV5693_VCM_MAX_FOCUS_NEG,
- .max = OV5693_VCM_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_STATUS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus status",
- .min = 0,
- .max = 100, /* allow enum to grow in the future */
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_SLEW,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm slew",
- .min = 0,
- .max = OV5693_VCM_SLEW_STEP_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_TIMING,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm step time",
- .min = 0,
- .max = OV5693_VCM_SLEW_TIME_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-};
-
-static int ov5693_init(struct v4l2_subdev *sd)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- pr_info("%s\n", __func__);
- mutex_lock(&dev->input_lock);
- dev->vcm_update = false;
-
- if (dev->vcm == VCM_AD5823) {
- ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */
- if (ret)
- dev_err(&client->dev,
- "vcm reset failed\n");
- /*change the mode*/
- ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB,
- AD5823_RING_CTRL_ENABLE);
- if (ret)
- dev_err(&client->dev,
- "vcm enable ringing failed\n");
- ret = ad5823_i2c_write(client, AD5823_REG_MODE,
- AD5823_ARC_RES1);
- if (ret)
- dev_err(&client->dev,
- "vcm change mode failed\n");
- }
-
- /*change initial focus value for ad5823*/
- if (dev->vcm == VCM_AD5823) {
- dev->focus = AD5823_INIT_FOCUS_POS;
- ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS);
- } else {
- dev->focus = 0;
- ov5693_t_focus_abs(sd, 0);
- }
-
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static int power_ctrl(struct v4l2_subdev *sd, bool flag)
-{
- int ret;
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- if (!dev || !dev->platform_data)
- return -ENODEV;
-
- /*
- * This driver assumes "internal DVDD, PWDNB tied to DOVDD".
- * In this set up only gpio0 (XSHUTDN) should be available
- * but in some products (for example ECS) gpio1 (PWDNB) is
- * also available. If gpio1 is available we emulate it being
- * tied to DOVDD here.
- */
- if (flag) {
- ret = dev->platform_data->v2p8_ctrl(sd, 1);
- dev->platform_data->gpio1_ctrl(sd, 1);
- if (ret == 0) {
- ret = dev->platform_data->v1p8_ctrl(sd, 1);
- if (ret) {
- dev->platform_data->gpio1_ctrl(sd, 0);
- ret = dev->platform_data->v2p8_ctrl(sd, 0);
- }
- }
- } else {
- dev->platform_data->gpio1_ctrl(sd, 0);
- ret = dev->platform_data->v1p8_ctrl(sd, 0);
- ret |= dev->platform_data->v2p8_ctrl(sd, 0);
- }
-
- return ret;
-}
-
-static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- if (!dev || !dev->platform_data)
- return -ENODEV;
-
- return dev->platform_data->gpio0_ctrl(sd, flag);
-}
-
-static int __power_up(struct v4l2_subdev *sd)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dev->platform_data) {
- dev_err(&client->dev,
- "no camera_sensor_platform_data");
- return -ENODEV;
- }
-
- /* power control */
- ret = power_ctrl(sd, 1);
- if (ret)
- goto fail_power;
-
- /* according to DS, at least 5ms is needed between DOVDD and PWDN */
- /* add this delay time to 10~11ms*/
- usleep_range(10000, 11000);
-
- /* gpio ctrl */
- ret = gpio_ctrl(sd, 1);
- if (ret) {
- ret = gpio_ctrl(sd, 1);
- if (ret)
- goto fail_power;
- }
-
- /* flis clock control */
- ret = dev->platform_data->flisclk_ctrl(sd, 1);
- if (ret)
- goto fail_clk;
-
- __cci_delay(up_delay);
-
- return 0;
-
-fail_clk:
- gpio_ctrl(sd, 0);
-fail_power:
- power_ctrl(sd, 0);
- dev_err(&client->dev, "sensor power-up failed\n");
-
- return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- dev->focus = OV5693_INVALID_CONFIG;
- if (!dev->platform_data) {
- dev_err(&client->dev,
- "no camera_sensor_platform_data");
- return -ENODEV;
- }
-
- ret = dev->platform_data->flisclk_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "flisclk failed\n");
-
- /* gpio ctrl */
- ret = gpio_ctrl(sd, 0);
- if (ret) {
- ret = gpio_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "gpio failed 2\n");
- }
-
- /* power control */
- ret = power_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "vprog failed.\n");
-
- return ret;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
- static const int retry_count = 4;
- int i, ret;
-
- for (i = 0; i < retry_count; i++) {
- ret = __power_up(sd);
- if (!ret)
- return 0;
-
- power_down(sd);
- }
- return ret;
-}
-
-static int ov5693_s_power(struct v4l2_subdev *sd, int on)
-{
- int ret;
-
- pr_info("%s: on %d\n", __func__, on);
- if (on == 0)
- return power_down(sd);
- else {
- ret = power_up(sd);
- if (!ret) {
- ret = ov5693_init(sd);
- /* restore settings */
- ov5693_res = ov5693_res_preview;
- N_RES = N_RES_PREVIEW;
- }
- }
- return ret;
-}
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between res_w/res_h and w/h.
- * distance = (res_w/res_h - w/h) / (w/h) * 8192
- * res->width/height smaller than w/h wouldn't be considered.
- * The gap of ratio larger than 1/8 wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
-static int distance(struct ov5693_resolution *res, u32 w, u32 h)
-{
- int ratio;
- int distance;
-
- if (w == 0 || h == 0 ||
- res->width < w || res->height < h)
- return -1;
-
- ratio = res->width << 13;
- ratio /= w;
- ratio *= h;
- ratio /= res->height;
-
- distance = abs(ratio - 8192);
-
- if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
- return -1;
-
- return distance;
-}
-
-/* Return the nearest higher resolution index
- * Firstly try to find the approximate aspect ratio resolution
- * If we find multiple same AR resolutions, choose the
- * minimal size.
- */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- int min_res_w = INT_MAX;
- struct ov5693_resolution *tmp_res = NULL;
-
- for (i = 0; i < N_RES; i++) {
- tmp_res = &ov5693_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- min_res_w = ov5693_res[i].width;
- continue;
- }
- if (dist == min_dist && ov5693_res[i].width < min_res_w)
- idx = i;
- }
-
- return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
- int i;
-
- for (i = 0; i < N_RES; i++) {
- if (w != ov5693_res[i].width)
- continue;
- if (h != ov5693_res[i].height)
- continue;
-
- return i;
- }
-
- return -1;
-}
-
-/* TODO: remove it. */
-static int startup(struct v4l2_subdev *sd)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- ret = ov5693_write_reg(client, OV5693_8BIT,
- OV5693_SW_RESET, 0x01);
- if (ret) {
- dev_err(&client->dev, "ov5693 reset err.\n");
- return ret;
- }
-
- ret = ov5693_write_reg_array(client, ov5693_global_setting);
- if (ret) {
- dev_err(&client->dev, "ov5693 write register err.\n");
- return ret;
- }
-
- ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs);
- if (ret) {
- dev_err(&client->dev, "ov5693 write register err.\n");
- return ret;
- }
-
- return ret;
-}
-
-static int ov5693_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct camera_mipi_info *ov5693_info = NULL;
- int ret = 0;
- int idx;
-
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
- ov5693_info = v4l2_get_subdev_hostdata(sd);
- if (!ov5693_info)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- idx = nearest_resolution_index(fmt->width, fmt->height);
- if (idx == -1) {
- /* return the largest resolution */
- fmt->width = ov5693_res[N_RES - 1].width;
- fmt->height = ov5693_res[N_RES - 1].height;
- } else {
- fmt->width = ov5693_res[idx].width;
- fmt->height = ov5693_res[idx].height;
- }
-
- fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *fmt;
- mutex_unlock(&dev->input_lock);
- return 0;
- }
-
- dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- dev_err(&client->dev, "get resolution fail\n");
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- ret = startup(sd);
- if (ret) {
- int i = 0;
-
- dev_err(&client->dev, "ov5693 startup err, retry to power up\n");
- for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) {
- dev_err(&client->dev,
- "ov5693 retry to power up %d/%d times, result: ",
- i + 1, OV5693_POWER_UP_RETRY_NUM);
- power_down(sd);
- ret = power_up(sd);
- if (!ret) {
- mutex_unlock(&dev->input_lock);
- ov5693_init(sd);
- mutex_lock(&dev->input_lock);
- } else {
- dev_err(&client->dev, "power up failed, continue\n");
- continue;
- }
- ret = startup(sd);
- if (ret) {
- dev_err(&client->dev, " startup FAILED!\n");
- } else {
- dev_err(&client->dev, " startup SUCCESS!\n");
- break;
- }
- }
- }
-
- /*
- * After sensor settings are set to HW, sometimes stream is started.
- * This would cause ISP timeout because ISP is not ready to receive
- * data yet. So add stop streaming here.
- */
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
- OV5693_STOP_STREAMING);
- if (ret)
- dev_warn(&client->dev, "ov5693 stream off err\n");
-
- ov5693_info->metadata_width = fmt->width * 10 / 8;
- ov5693_info->metadata_height = 1;
- ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size;
-
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static int ov5693_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- if (format->pad)
- return -EINVAL;
-
- if (!fmt)
- return -EINVAL;
-
- fmt->width = ov5693_res[dev->fmt_idx].width;
- fmt->height = ov5693_res[dev->fmt_idx].height;
- fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-
- return 0;
-}
-
-static int ov5693_detect(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- u16 high, low;
- int ret;
- u16 id;
- u8 revision;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_SC_CMMN_CHIP_ID_H, &high);
- if (ret) {
- dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
- return -ENODEV;
- }
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_SC_CMMN_CHIP_ID_L, &low);
- if (ret)
- return ret;
- id = ((((u16)high) << 8) | (u16)low);
-
- if (id != OV5693_ID) {
- dev_err(&client->dev, "sensor ID error 0x%x\n", id);
- return -ENODEV;
- }
-
- ret = ov5693_read_reg(client, OV5693_8BIT,
- OV5693_SC_CMMN_SUB_ID, &high);
- revision = (u8)high & 0x0f;
-
- dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
- dev_dbg(&client->dev, "detect ov5693 success\n");
- return 0;
-}
-
-static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
-
- ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
- enable ? OV5693_START_STREAMING :
- OV5693_STOP_STREAMING);
-
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int ov5693_s_config(struct v4l2_subdev *sd,
- int irq, void *platform_data)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- if (!platform_data)
- return -ENODEV;
-
- dev->platform_data =
- (struct camera_sensor_platform_data *)platform_data;
-
- mutex_lock(&dev->input_lock);
- /* power off the module, then power on it in future
- * as first power on by board may not fulfill the
- * power on sequqence needed by the module
- */
- ret = power_down(sd);
- if (ret) {
- dev_err(&client->dev, "ov5693 power-off err.\n");
- goto fail_power_off;
- }
-
- ret = power_up(sd);
- if (ret) {
- dev_err(&client->dev, "ov5693 power-up err.\n");
- goto fail_power_on;
- }
-
- if (!dev->vcm)
- dev->vcm = vcm_detect(client);
-
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_csi_cfg;
-
- /* config & detect sensor */
- ret = ov5693_detect(client);
- if (ret) {
- dev_err(&client->dev, "ov5693_detect err s_config.\n");
- goto fail_csi_cfg;
- }
-
- dev->otp_data = ov5693_otp_read(sd);
-
- /* turn off sensor, after probed */
- ret = power_down(sd);
- if (ret) {
- dev_err(&client->dev, "ov5693 power-off err.\n");
- goto fail_csi_cfg;
- }
- mutex_unlock(&dev->input_lock);
-
- return ret;
-
-fail_csi_cfg:
- dev->platform_data->csi_cfg(sd, 0);
-fail_power_on:
- power_down(sd);
- dev_err(&client->dev, "sensor power-gating failed\n");
-fail_power_off:
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- interval->interval.numerator = 1;
- interval->interval.denominator = ov5693_res[dev->fmt_idx].fps;
-
- return 0;
-}
-
-static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index >= MAX_FMTS)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
- return 0;
-}
-
-static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- int index = fse->index;
-
- if (index >= N_RES)
- return -EINVAL;
-
- fse->min_width = ov5693_res[index].width;
- fse->min_height = ov5693_res[index].height;
- fse->max_width = ov5693_res[index].width;
- fse->max_height = ov5693_res[index].height;
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov5693_video_ops = {
- .s_stream = ov5693_s_stream,
- .g_frame_interval = ov5693_g_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops ov5693_core_ops = {
- .s_power = ov5693_s_power,
- .ioctl = ov5693_ioctl,
-};
-
-static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
- .enum_mbus_code = ov5693_enum_mbus_code,
- .enum_frame_size = ov5693_enum_frame_size,
- .get_fmt = ov5693_get_fmt,
- .set_fmt = ov5693_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ov5693_ops = {
- .core = &ov5693_core_ops,
- .video = &ov5693_video_ops,
- .pad = &ov5693_pad_ops,
-};
-
-static void ov5693_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov5693_device *dev = to_ov5693_sensor(sd);
-
- dev_dbg(&client->dev, "ov5693_remove...\n");
-
- dev->platform_data->csi_cfg(sd, 0);
-
- v4l2_device_unregister_subdev(sd);
-
- atomisp_gmin_remove_subdev(sd);
-
- media_entity_cleanup(&dev->sd.entity);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- kfree(dev);
-}
-
-static int ov5693_probe(struct i2c_client *client)
-{
- struct ov5693_device *dev;
- int i2c;
- int ret;
- void *pdata;
- unsigned int i;
-
- /*
- * Firmware workaround: Some modules use a "secondary default"
- * address of 0x10 which doesn't appear on schematics, and
- * some BIOS versions haven't gotten the memo. Work around
- * via config.
- */
- i2c = gmin_get_var_int(&client->dev, false, "I2CAddr", -1);
- if (i2c != -1) {
- dev_info(&client->dev,
- "Overriding firmware-provided I2C address (0x%x) with 0x%x\n",
- client->addr, i2c);
- client->addr = i2c;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- mutex_init(&dev->input_lock);
-
- dev->fmt_idx = 0;
- v4l2_i2c_subdev_init(&dev->sd, client, &ov5693_ops);
-
- pdata = gmin_camera_platform_data(&dev->sd,
- ATOMISP_INPUT_FORMAT_RAW_10,
- atomisp_bayer_order_bggr);
- if (!pdata) {
- ret = -EINVAL;
- goto out_free;
- }
-
- ret = ov5693_s_config(&dev->sd, client->irq, pdata);
- if (ret)
- goto out_free;
-
- ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
- if (ret)
- goto out_free;
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
- ret =
- v4l2_ctrl_handler_init(&dev->ctrl_handler,
- ARRAY_SIZE(ov5693_controls));
- if (ret) {
- ov5693_remove(client);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i],
- NULL);
-
- if (dev->ctrl_handler.error) {
- ov5693_remove(client);
- return dev->ctrl_handler.error;
- }
-
- /* Use same lock for controls as for everything else. */
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = &dev->ctrl_handler;
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret)
- ov5693_remove(client);
-
- return ret;
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- kfree(dev);
- return ret;
-}
-
-static const struct acpi_device_id ov5693_acpi_match[] = {
- {"INT33BE"},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
-
-static struct i2c_driver ov5693_driver = {
- .driver = {
- .name = "ov5693",
- .acpi_match_table = ov5693_acpi_match,
- },
- .probe = ov5693_probe,
- .remove = ov5693_remove,
-};
-module_i2c_driver(ov5693_driver);
-
-MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
deleted file mode 100644
index 5e17eaf8fd6e..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ /dev/null
@@ -1,1331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for OmniVision OV5693 5M camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __OV5693_H__
-#define __OV5693_H__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-
-#include "../../include/linux/atomisp_platform.h"
-
-/*
- * FIXME: non-preview resolutions are currently broken
- */
-#define ENABLE_NON_PREVIEW 0
-
-#define OV5693_POWER_UP_RETRY_NUM 5
-
-/* Defines for register writes and register array processing */
-#define I2C_MSG_LENGTH 0x2
-#define I2C_RETRY_COUNT 5
-
-#define OV5693_FOCAL_LENGTH_NUM 334 /*3.34mm*/
-#define OV5693_FOCAL_LENGTH_DEM 100
-#define OV5693_F_NUMBER_DEFAULT_NUM 24
-#define OV5693_F_NUMBER_DEM 10
-
-#define MAX_FMTS 1
-
-/* sensor_mode_data read_mode adaptation */
-#define OV5693_READ_MODE_BINNING_ON 0x0400
-#define OV5693_READ_MODE_BINNING_OFF 0x00
-#define OV5693_INTEGRATION_TIME_MARGIN 8
-
-#define OV5693_MAX_EXPOSURE_VALUE 0xFFF1
-#define OV5693_MAX_GAIN_VALUE 0xFF
-
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define OV5693_F_NUMBER_DEFAULT 0x18000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define OV5693_F_NUMBER_RANGE 0x180a180a
-#define OV5693_ID 0x5690
-
-#define OV5693_FINE_INTG_TIME_MIN 0
-#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0
-#define OV5693_COARSE_INTG_TIME_MIN 1
-#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6
-
-#define OV5693_BIN_FACTOR_MAX 4
-/*
- * OV5693 System control registers
- */
-#define OV5693_SW_SLEEP 0x0100
-#define OV5693_SW_RESET 0x0103
-#define OV5693_SW_STREAM 0x0100
-
-#define OV5693_SC_CMMN_CHIP_ID_H 0x300A
-#define OV5693_SC_CMMN_CHIP_ID_L 0x300B
-#define OV5693_SC_CMMN_SCCB_ID 0x300C
-#define OV5693_SC_CMMN_SUB_ID 0x302A /* process, version*/
-/*Bit[7:4] Group control, Bit[3:0] Group ID*/
-#define OV5693_GROUP_ACCESS 0x3208
-/*
-*Bit[3:0] Bit[19:16] of exposure,
-*remaining 16 bits lies in Reg0x3501&Reg0x3502
-*/
-#define OV5693_EXPOSURE_H 0x3500
-#define OV5693_EXPOSURE_M 0x3501
-#define OV5693_EXPOSURE_L 0x3502
-/*Bit[1:0] means Bit[9:8] of gain*/
-#define OV5693_AGC_H 0x350A
-#define OV5693_AGC_L 0x350B /*Bit[7:0] of gain*/
-
-#define OV5693_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/
-#define OV5693_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/
-#define OV5693_VERTICAL_START_H 0x3802 /*Bit[11:8]*/
-#define OV5693_VERTICAL_START_L 0x3803 /*Bit[7:0]*/
-#define OV5693_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/
-#define OV5693_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/
-#define OV5693_VERTICAL_END_H 0x3806 /*Bit[11:8]*/
-#define OV5693_VERTICAL_END_L 0x3807 /*Bit[7:0]*/
-#define OV5693_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/
-#define OV5693_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/
-#define OV5693_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/
-#define OV5693_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/
-/*High 8-bit, and low 8-bit HTS address is 0x380d*/
-#define OV5693_TIMING_HTS_H 0x380C
-/*High 8-bit, and low 8-bit HTS address is 0x380d*/
-#define OV5693_TIMING_HTS_L 0x380D
-/*High 8-bit, and low 8-bit HTS address is 0x380f*/
-#define OV5693_TIMING_VTS_H 0x380e
-/*High 8-bit, and low 8-bit HTS address is 0x380f*/
-#define OV5693_TIMING_VTS_L 0x380f
-
-#define OV5693_MWB_RED_GAIN_H 0x3400
-#define OV5693_MWB_GREEN_GAIN_H 0x3402
-#define OV5693_MWB_BLUE_GAIN_H 0x3404
-#define OV5693_MWB_GAIN_MAX 0x0fff
-
-#define OV5693_START_STREAMING 0x01
-#define OV5693_STOP_STREAMING 0x00
-
-#define VCM_ADDR 0x0c
-#define VCM_CODE_MSB 0x04
-
-#define OV5693_INVALID_CONFIG 0xffffffff
-
-#define OV5693_VCM_SLEW_STEP 0x30F0
-#define OV5693_VCM_SLEW_STEP_MAX 0x7
-#define OV5693_VCM_SLEW_STEP_MASK 0x7
-#define OV5693_VCM_CODE 0x30F2
-#define OV5693_VCM_SLEW_TIME 0x30F4
-#define OV5693_VCM_SLEW_TIME_MAX 0xffff
-#define OV5693_VCM_ENABLE 0x8000
-
-#define OV5693_VCM_MAX_FOCUS_NEG -1023
-#define OV5693_VCM_MAX_FOCUS_POS 1023
-
-#define DLC_ENABLE 1
-#define DLC_DISABLE 0
-#define VCM_PROTECTION_OFF 0xeca3
-#define VCM_PROTECTION_ON 0xdc51
-#define VCM_DEFAULT_S 0x0
-#define vcm_step_s(a) (u8)(a & 0xf)
-#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
-#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
-#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
-#define vcm_val(data, s) (u16)(data << 4 | s)
-#define DIRECT_VCM vcm_dlc_mclk(0, 0)
-
-/* Defines for OTP Data Registers */
-#define OV5693_FRAME_OFF_NUM 0x4202
-#define OV5693_OTP_BYTE_MAX 32 //change to 32 as needed by otpdata
-#define OV5693_OTP_SHORT_MAX 16
-#define OV5693_OTP_START_ADDR 0x3D00
-#define OV5693_OTP_END_ADDR 0x3D0F
-#define OV5693_OTP_DATA_SIZE 320
-#define OV5693_OTP_PROGRAM_REG 0x3D80
-#define OV5693_OTP_READ_REG 0x3D81 // 1:Enable 0:disable
-#define OV5693_OTP_BANK_REG 0x3D84 //otp bank and mode
-#define OV5693_OTP_READY_REG_DONE 1
-#define OV5693_OTP_BANK_MAX 28
-#define OV5693_OTP_BANK_SIZE 16 //16 bytes per bank
-#define OV5693_OTP_READ_ONETIME 16
-#define OV5693_OTP_MODE_READ 1
-
-struct regval_list {
- u16 reg_num;
- u8 value;
-};
-
-struct ov5693_resolution {
- u8 *desc;
- const struct ov5693_reg *regs;
- int res;
- int width;
- int height;
- int fps;
- int pix_clk_freq;
- u16 pixels_per_line;
- u16 lines_per_frame;
- bool used;
-};
-
-struct ov5693_format {
- u8 *desc;
- u32 pixelformat;
- struct ov5693_reg *regs;
-};
-
-enum vcm_type {
- VCM_UNKNOWN,
- VCM_AD5823,
- VCM_DW9714,
-};
-
-/*
- * ov5693 device structure.
- */
-struct ov5693_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_mbus_framefmt format;
- struct mutex input_lock;
- struct v4l2_ctrl_handler ctrl_handler;
-
- struct camera_sensor_platform_data *platform_data;
- ktime_t timestamp_t_focus_abs;
- int fmt_idx;
- int run_mode;
- int otp_size;
- u8 *otp_data;
- u32 focus;
- s16 number_of_steps;
- u8 res;
- u8 type;
- bool vcm_update;
- enum vcm_type vcm;
-};
-
-enum ov5693_tok_type {
- OV5693_8BIT = 0x0001,
- OV5693_16BIT = 0x0002,
- OV5693_32BIT = 0x0004,
- OV5693_TOK_TERM = 0xf000, /* terminating token for reg list */
- OV5693_TOK_DELAY = 0xfe00, /* delay token for reg list */
- OV5693_TOK_MASK = 0xfff0
-};
-
-/**
- * struct ov5693_reg - MI sensor register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct ov5693_reg {
- enum ov5693_tok_type type;
- u16 reg;
- u32 val; /* @set value for read/mod/write, @mask */
-};
-
-#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
-
-#define OV5693_MAX_WRITE_BUF_SIZE 30
-
-struct ov5693_write_buffer {
- u16 addr;
- u8 data[OV5693_MAX_WRITE_BUF_SIZE];
-};
-
-struct ov5693_write_ctrl {
- int index;
- struct ov5693_write_buffer buffer;
-};
-
-static struct ov5693_reg const ov5693_global_setting[] = {
- {OV5693_8BIT, 0x0103, 0x01},
- {OV5693_8BIT, 0x3001, 0x0a},
- {OV5693_8BIT, 0x3002, 0x80},
- {OV5693_8BIT, 0x3006, 0x00},
- {OV5693_8BIT, 0x3011, 0x21},
- {OV5693_8BIT, 0x3012, 0x09},
- {OV5693_8BIT, 0x3013, 0x10},
- {OV5693_8BIT, 0x3014, 0x00},
- {OV5693_8BIT, 0x3015, 0x08},
- {OV5693_8BIT, 0x3016, 0xf0},
- {OV5693_8BIT, 0x3017, 0xf0},
- {OV5693_8BIT, 0x3018, 0xf0},
- {OV5693_8BIT, 0x301b, 0xb4},
- {OV5693_8BIT, 0x301d, 0x02},
- {OV5693_8BIT, 0x3021, 0x00},
- {OV5693_8BIT, 0x3022, 0x01},
- {OV5693_8BIT, 0x3028, 0x44},
- {OV5693_8BIT, 0x3098, 0x02},
- {OV5693_8BIT, 0x3099, 0x19},
- {OV5693_8BIT, 0x309a, 0x02},
- {OV5693_8BIT, 0x309b, 0x01},
- {OV5693_8BIT, 0x309c, 0x00},
- {OV5693_8BIT, 0x30a0, 0xd2},
- {OV5693_8BIT, 0x30a2, 0x01},
- {OV5693_8BIT, 0x30b2, 0x00},
- {OV5693_8BIT, 0x30b3, 0x7d},
- {OV5693_8BIT, 0x30b4, 0x03},
- {OV5693_8BIT, 0x30b5, 0x04},
- {OV5693_8BIT, 0x30b6, 0x01},
- {OV5693_8BIT, 0x3104, 0x21},
- {OV5693_8BIT, 0x3106, 0x00},
- {OV5693_8BIT, 0x3400, 0x04},
- {OV5693_8BIT, 0x3401, 0x00},
- {OV5693_8BIT, 0x3402, 0x04},
- {OV5693_8BIT, 0x3403, 0x00},
- {OV5693_8BIT, 0x3404, 0x04},
- {OV5693_8BIT, 0x3405, 0x00},
- {OV5693_8BIT, 0x3406, 0x01},
- {OV5693_8BIT, 0x3500, 0x00},
- {OV5693_8BIT, 0x3503, 0x07},
- {OV5693_8BIT, 0x3504, 0x00},
- {OV5693_8BIT, 0x3505, 0x00},
- {OV5693_8BIT, 0x3506, 0x00},
- {OV5693_8BIT, 0x3507, 0x02},
- {OV5693_8BIT, 0x3508, 0x00},
- {OV5693_8BIT, 0x3509, 0x10},
- {OV5693_8BIT, 0x350a, 0x00},
- {OV5693_8BIT, 0x350b, 0x40},
- {OV5693_8BIT, 0x3601, 0x0a},
- {OV5693_8BIT, 0x3602, 0x38},
- {OV5693_8BIT, 0x3612, 0x80},
- {OV5693_8BIT, 0x3620, 0x54},
- {OV5693_8BIT, 0x3621, 0xc7},
- {OV5693_8BIT, 0x3622, 0x0f},
- {OV5693_8BIT, 0x3625, 0x10},
- {OV5693_8BIT, 0x3630, 0x55},
- {OV5693_8BIT, 0x3631, 0xf4},
- {OV5693_8BIT, 0x3632, 0x00},
- {OV5693_8BIT, 0x3633, 0x34},
- {OV5693_8BIT, 0x3634, 0x02},
- {OV5693_8BIT, 0x364d, 0x0d},
- {OV5693_8BIT, 0x364f, 0xdd},
- {OV5693_8BIT, 0x3660, 0x04},
- {OV5693_8BIT, 0x3662, 0x10},
- {OV5693_8BIT, 0x3663, 0xf1},
- {OV5693_8BIT, 0x3665, 0x00},
- {OV5693_8BIT, 0x3666, 0x20},
- {OV5693_8BIT, 0x3667, 0x00},
- {OV5693_8BIT, 0x366a, 0x80},
- {OV5693_8BIT, 0x3680, 0xe0},
- {OV5693_8BIT, 0x3681, 0x00},
- {OV5693_8BIT, 0x3700, 0x42},
- {OV5693_8BIT, 0x3701, 0x14},
- {OV5693_8BIT, 0x3702, 0xa0},
- {OV5693_8BIT, 0x3703, 0xd8},
- {OV5693_8BIT, 0x3704, 0x78},
- {OV5693_8BIT, 0x3705, 0x02},
- {OV5693_8BIT, 0x370a, 0x00},
- {OV5693_8BIT, 0x370b, 0x20},
- {OV5693_8BIT, 0x370c, 0x0c},
- {OV5693_8BIT, 0x370d, 0x11},
- {OV5693_8BIT, 0x370e, 0x00},
- {OV5693_8BIT, 0x370f, 0x40},
- {OV5693_8BIT, 0x3710, 0x00},
- {OV5693_8BIT, 0x371a, 0x1c},
- {OV5693_8BIT, 0x371b, 0x05},
- {OV5693_8BIT, 0x371c, 0x01},
- {OV5693_8BIT, 0x371e, 0xa1},
- {OV5693_8BIT, 0x371f, 0x0c},
- {OV5693_8BIT, 0x3721, 0x00},
- {OV5693_8BIT, 0x3724, 0x10},
- {OV5693_8BIT, 0x3726, 0x00},
- {OV5693_8BIT, 0x372a, 0x01},
- {OV5693_8BIT, 0x3730, 0x10},
- {OV5693_8BIT, 0x3738, 0x22},
- {OV5693_8BIT, 0x3739, 0xe5},
- {OV5693_8BIT, 0x373a, 0x50},
- {OV5693_8BIT, 0x373b, 0x02},
- {OV5693_8BIT, 0x373c, 0x41},
- {OV5693_8BIT, 0x373f, 0x02},
- {OV5693_8BIT, 0x3740, 0x42},
- {OV5693_8BIT, 0x3741, 0x02},
- {OV5693_8BIT, 0x3742, 0x18},
- {OV5693_8BIT, 0x3743, 0x01},
- {OV5693_8BIT, 0x3744, 0x02},
- {OV5693_8BIT, 0x3747, 0x10},
- {OV5693_8BIT, 0x374c, 0x04},
- {OV5693_8BIT, 0x3751, 0xf0},
- {OV5693_8BIT, 0x3752, 0x00},
- {OV5693_8BIT, 0x3753, 0x00},
- {OV5693_8BIT, 0x3754, 0xc0},
- {OV5693_8BIT, 0x3755, 0x00},
- {OV5693_8BIT, 0x3756, 0x1a},
- {OV5693_8BIT, 0x3758, 0x00},
- {OV5693_8BIT, 0x3759, 0x0f},
- {OV5693_8BIT, 0x376b, 0x44},
- {OV5693_8BIT, 0x375c, 0x04},
- {OV5693_8BIT, 0x3774, 0x10},
- {OV5693_8BIT, 0x3776, 0x00},
- {OV5693_8BIT, 0x377f, 0x08},
- {OV5693_8BIT, 0x3780, 0x22},
- {OV5693_8BIT, 0x3781, 0x0c},
- {OV5693_8BIT, 0x3784, 0x2c},
- {OV5693_8BIT, 0x3785, 0x1e},
- {OV5693_8BIT, 0x378f, 0xf5},
- {OV5693_8BIT, 0x3791, 0xb0},
- {OV5693_8BIT, 0x3795, 0x00},
- {OV5693_8BIT, 0x3796, 0x64},
- {OV5693_8BIT, 0x3797, 0x11},
- {OV5693_8BIT, 0x3798, 0x30},
- {OV5693_8BIT, 0x3799, 0x41},
- {OV5693_8BIT, 0x379a, 0x07},
- {OV5693_8BIT, 0x379b, 0xb0},
- {OV5693_8BIT, 0x379c, 0x0c},
- {OV5693_8BIT, 0x37c5, 0x00},
- {OV5693_8BIT, 0x37c6, 0x00},
- {OV5693_8BIT, 0x37c7, 0x00},
- {OV5693_8BIT, 0x37c9, 0x00},
- {OV5693_8BIT, 0x37ca, 0x00},
- {OV5693_8BIT, 0x37cb, 0x00},
- {OV5693_8BIT, 0x37de, 0x00},
- {OV5693_8BIT, 0x37df, 0x00},
- {OV5693_8BIT, 0x3800, 0x00},
- {OV5693_8BIT, 0x3801, 0x00},
- {OV5693_8BIT, 0x3802, 0x00},
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3810, 0x00},
- {OV5693_8BIT, 0x3812, 0x00},
- {OV5693_8BIT, 0x3823, 0x00},
- {OV5693_8BIT, 0x3824, 0x00},
- {OV5693_8BIT, 0x3825, 0x00},
- {OV5693_8BIT, 0x3826, 0x00},
- {OV5693_8BIT, 0x3827, 0x00},
- {OV5693_8BIT, 0x382a, 0x04},
- {OV5693_8BIT, 0x3a04, 0x06},
- {OV5693_8BIT, 0x3a05, 0x14},
- {OV5693_8BIT, 0x3a06, 0x00},
- {OV5693_8BIT, 0x3a07, 0xfe},
- {OV5693_8BIT, 0x3b00, 0x00},
- {OV5693_8BIT, 0x3b02, 0x00},
- {OV5693_8BIT, 0x3b03, 0x00},
- {OV5693_8BIT, 0x3b04, 0x00},
- {OV5693_8BIT, 0x3b05, 0x00},
- {OV5693_8BIT, 0x3e07, 0x20},
- {OV5693_8BIT, 0x4000, 0x08},
- {OV5693_8BIT, 0x4001, 0x04},
- {OV5693_8BIT, 0x4002, 0x45},
- {OV5693_8BIT, 0x4004, 0x08},
- {OV5693_8BIT, 0x4005, 0x18},
- {OV5693_8BIT, 0x4006, 0x20},
- {OV5693_8BIT, 0x4008, 0x24},
- {OV5693_8BIT, 0x4009, 0x10},
- {OV5693_8BIT, 0x400c, 0x00},
- {OV5693_8BIT, 0x400d, 0x00},
- {OV5693_8BIT, 0x4058, 0x00},
- {OV5693_8BIT, 0x404e, 0x37},
- {OV5693_8BIT, 0x404f, 0x8f},
- {OV5693_8BIT, 0x4058, 0x00},
- {OV5693_8BIT, 0x4101, 0xb2},
- {OV5693_8BIT, 0x4303, 0x00},
- {OV5693_8BIT, 0x4304, 0x08},
- {OV5693_8BIT, 0x4307, 0x31},
- {OV5693_8BIT, 0x4311, 0x04},
- {OV5693_8BIT, 0x4315, 0x01},
- {OV5693_8BIT, 0x4511, 0x05},
- {OV5693_8BIT, 0x4512, 0x01},
- {OV5693_8BIT, 0x4806, 0x00},
- {OV5693_8BIT, 0x4816, 0x52},
- {OV5693_8BIT, 0x481f, 0x30},
- {OV5693_8BIT, 0x4826, 0x2c},
- {OV5693_8BIT, 0x4831, 0x64},
- {OV5693_8BIT, 0x4d00, 0x04},
- {OV5693_8BIT, 0x4d01, 0x71},
- {OV5693_8BIT, 0x4d02, 0xfd},
- {OV5693_8BIT, 0x4d03, 0xf5},
- {OV5693_8BIT, 0x4d04, 0x0c},
- {OV5693_8BIT, 0x4d05, 0xcc},
- {OV5693_8BIT, 0x4837, 0x0a},
- {OV5693_8BIT, 0x5000, 0x06},
- {OV5693_8BIT, 0x5001, 0x01},
- {OV5693_8BIT, 0x5003, 0x20},
- {OV5693_8BIT, 0x5046, 0x0a},
- {OV5693_8BIT, 0x5013, 0x00},
- {OV5693_8BIT, 0x5046, 0x0a},
- {OV5693_8BIT, 0x5780, 0x1c},
- {OV5693_8BIT, 0x5786, 0x20},
- {OV5693_8BIT, 0x5787, 0x10},
- {OV5693_8BIT, 0x5788, 0x18},
- {OV5693_8BIT, 0x578a, 0x04},
- {OV5693_8BIT, 0x578b, 0x02},
- {OV5693_8BIT, 0x578c, 0x02},
- {OV5693_8BIT, 0x578e, 0x06},
- {OV5693_8BIT, 0x578f, 0x02},
- {OV5693_8BIT, 0x5790, 0x02},
- {OV5693_8BIT, 0x5791, 0xff},
- {OV5693_8BIT, 0x5842, 0x01},
- {OV5693_8BIT, 0x5843, 0x2b},
- {OV5693_8BIT, 0x5844, 0x01},
- {OV5693_8BIT, 0x5845, 0x92},
- {OV5693_8BIT, 0x5846, 0x01},
- {OV5693_8BIT, 0x5847, 0x8f},
- {OV5693_8BIT, 0x5848, 0x01},
- {OV5693_8BIT, 0x5849, 0x0c},
- {OV5693_8BIT, 0x5e00, 0x00},
- {OV5693_8BIT, 0x5e10, 0x0c},
- {OV5693_8BIT, 0x0100, 0x00},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-#if ENABLE_NON_PREVIEW
-/*
- * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- */
-static struct ov5693_reg const ov5693_654x496[] = {
- {OV5693_8BIT, 0x3501, 0x3d},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe6},
- {OV5693_8BIT, 0x3709, 0xc7},
- {OV5693_8BIT, 0x3803, 0x00},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xa3},
- {OV5693_8BIT, 0x3808, 0x02},
- {OV5693_8BIT, 0x3809, 0x90},
- {OV5693_8BIT, 0x380a, 0x01},
- {OV5693_8BIT, 0x380b, 0xf0},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x08},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x04},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling)
-*DS from 2592x1952
-*/
-static struct ov5693_reg const ov5693_1296x976[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
-
- {OV5693_8BIT, 0x3800, 0x00},
- {OV5693_8BIT, 0x3801, 0x00},
- {OV5693_8BIT, 0x3802, 0x00},
- {OV5693_8BIT, 0x3803, 0x00},
-
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xA3},
-
- {OV5693_8BIT, 0x3808, 0x05},
- {OV5693_8BIT, 0x3809, 0x10},
- {OV5693_8BIT, 0x380a, 0x03},
- {OV5693_8BIT, 0x380b, 0xD0},
-
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
-
- {OV5693_8BIT, 0x3810, 0x00},
- {OV5693_8BIT, 0x3811, 0x10},
- {OV5693_8BIT, 0x3812, 0x00},
- {OV5693_8BIT, 0x3813, 0x02},
-
- {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
- {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-
-};
-
-/*
- * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2564x1956
- */
-static struct ov5693_reg const ov5693_336x256[] = {
- {OV5693_8BIT, 0x3501, 0x3d},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe6},
- {OV5693_8BIT, 0x3709, 0xc7},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xa3},
- {OV5693_8BIT, 0x3808, 0x01},
- {OV5693_8BIT, 0x3809, 0x50},
- {OV5693_8BIT, 0x380a, 0x01},
- {OV5693_8BIT, 0x380b, 0x00},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x1E},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x04},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2368x1956
- */
-static struct ov5693_reg const ov5693_368x304[] = {
- {OV5693_8BIT, 0x3501, 0x3d},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe6},
- {OV5693_8BIT, 0x3709, 0xc7},
- {OV5693_8BIT, 0x3808, 0x01},
- {OV5693_8BIT, 0x3809, 0x70},
- {OV5693_8BIT, 0x380a, 0x01},
- {OV5693_8BIT, 0x380b, 0x30},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x80},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x04},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2460x1956
- */
-static struct ov5693_reg const ov5693_192x160[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x80},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xA3},
- {OV5693_8BIT, 0x3808, 0x00},
- {OV5693_8BIT, 0x3809, 0xC0},
- {OV5693_8BIT, 0x380a, 0x00},
- {OV5693_8BIT, 0x380b, 0xA0},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x40},
- {OV5693_8BIT, 0x3813, 0x00},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x04},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_736x496[] = {
- {OV5693_8BIT, 0x3501, 0x3d},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe6},
- {OV5693_8BIT, 0x3709, 0xc7},
- {OV5693_8BIT, 0x3803, 0x68},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0x3b},
- {OV5693_8BIT, 0x3808, 0x02},
- {OV5693_8BIT, 0x3809, 0xe0},
- {OV5693_8BIT, 0x380a, 0x01},
- {OV5693_8BIT, 0x380b, 0xf0},
- {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x08},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x04},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
-static struct ov5693_reg const ov5693_736x496[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe6},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0x00},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xa3},
- {OV5693_8BIT, 0x3808, 0x02},
- {OV5693_8BIT, 0x3809, 0xe0},
- {OV5693_8BIT, 0x380a, 0x01},
- {OV5693_8BIT, 0x380b, 0xf0},
- {OV5693_8BIT, 0x380c, 0x0d},
- {OV5693_8BIT, 0x380d, 0xb0},
- {OV5693_8BIT, 0x380e, 0x05},
- {OV5693_8BIT, 0x380f, 0xf2},
- {OV5693_8BIT, 0x3811, 0x08},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x31},
- {OV5693_8BIT, 0x3815, 0x31},
- {OV5693_8BIT, 0x3820, 0x01},
- {OV5693_8BIT, 0x3821, 0x1f},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-*/
-/*
- * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling)
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_976x556[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0xf0},
- {OV5693_8BIT, 0x3806, 0x06},
- {OV5693_8BIT, 0x3807, 0xa7},
- {OV5693_8BIT, 0x3808, 0x03},
- {OV5693_8BIT, 0x3809, 0xd0},
- {OV5693_8BIT, 0x380a, 0x02},
- {OV5693_8BIT, 0x380b, 0x2C},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x10},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*DS from 2624x1492*/
-static struct ov5693_reg const ov5693_1296x736[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
-
- {OV5693_8BIT, 0x3800, 0x00},
- {OV5693_8BIT, 0x3801, 0x00},
- {OV5693_8BIT, 0x3802, 0x00},
- {OV5693_8BIT, 0x3803, 0x00},
-
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xA3},
-
- {OV5693_8BIT, 0x3808, 0x05},
- {OV5693_8BIT, 0x3809, 0x10},
- {OV5693_8BIT, 0x380a, 0x02},
- {OV5693_8BIT, 0x380b, 0xe0},
-
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
-
- {OV5693_8BIT, 0x3813, 0xE8},
-
- {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
- {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_1636p_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0xf0},
- {OV5693_8BIT, 0x3806, 0x06},
- {OV5693_8BIT, 0x3807, 0xa7},
- {OV5693_8BIT, 0x3808, 0x06},
- {OV5693_8BIT, 0x3809, 0x64},
- {OV5693_8BIT, 0x380a, 0x04},
- {OV5693_8BIT, 0x380b, 0x48},
- {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x02},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x80},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3800, 0x00}, /*{3800,3801} Array X start*/
- {OV5693_8BIT, 0x3801, 0x08}, /* 04 //{3800,3801} Array X start*/
- {OV5693_8BIT, 0x3802, 0x00}, /*{3802,3803} Array Y start*/
- {OV5693_8BIT, 0x3803, 0x04}, /* 00 //{3802,3803} Array Y start*/
- {OV5693_8BIT, 0x3804, 0x0a}, /*{3804,3805} Array X end*/
- {OV5693_8BIT, 0x3805, 0x37}, /* 3b //{3804,3805} Array X end*/
- {OV5693_8BIT, 0x3806, 0x07}, /*{3806,3807} Array Y end*/
- {OV5693_8BIT, 0x3807, 0x9f}, /* a3 //{3806,3807} Array Y end*/
- {OV5693_8BIT, 0x3808, 0x06}, /*{3808,3809} Final output H size*/
- {OV5693_8BIT, 0x3809, 0x50}, /*{3808,3809} Final output H size*/
- {OV5693_8BIT, 0x380a, 0x04}, /*{380a,380b} Final output V size*/
- {OV5693_8BIT, 0x380b, 0xc0}, /*{380a,380b} Final output V size*/
- {OV5693_8BIT, 0x380c, 0x0a}, /*{380c,380d} HTS*/
- {OV5693_8BIT, 0x380d, 0x80}, /*{380c,380d} HTS*/
- {OV5693_8BIT, 0x380e, 0x07}, /*{380e,380f} VTS*/
- {OV5693_8BIT, 0x380f, 0xc0}, /* bc //{380e,380f} VTS*/
- {OV5693_8BIT, 0x3810, 0x00}, /*{3810,3811} windowing X offset*/
- {OV5693_8BIT, 0x3811, 0x10}, /*{3810,3811} windowing X offset*/
- {OV5693_8BIT, 0x3812, 0x00}, /*{3812,3813} windowing Y offset*/
- {OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/
- {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
- {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
- {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binning control*/
- {OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x5041, 0x84},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling)
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_1940x1096[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0xf0},
- {OV5693_8BIT, 0x3806, 0x06},
- {OV5693_8BIT, 0x3807, 0xa7},
- {OV5693_8BIT, 0x3808, 0x07},
- {OV5693_8BIT, 0x3809, 0x94},
- {OV5693_8BIT, 0x380a, 0x04},
- {OV5693_8BIT, 0x380b, 0x48},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x02},
- {OV5693_8BIT, 0x3813, 0x02},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x80},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_2592x1456_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3800, 0x00},
- {OV5693_8BIT, 0x3801, 0x00},
- {OV5693_8BIT, 0x3802, 0x00},
- {OV5693_8BIT, 0x3803, 0xf0},
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3806, 0x06},
- {OV5693_8BIT, 0x3807, 0xa4},
- {OV5693_8BIT, 0x3808, 0x0a},
- {OV5693_8BIT, 0x3809, 0x20},
- {OV5693_8BIT, 0x380a, 0x05},
- {OV5693_8BIT, 0x380b, 0xb0},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x10},
- {OV5693_8BIT, 0x3813, 0x00},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct ov5693_reg const ov5693_2576x1456_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3800, 0x00},
- {OV5693_8BIT, 0x3801, 0x00},
- {OV5693_8BIT, 0x3802, 0x00},
- {OV5693_8BIT, 0x3803, 0xf0},
- {OV5693_8BIT, 0x3804, 0x0a},
- {OV5693_8BIT, 0x3805, 0x3f},
- {OV5693_8BIT, 0x3806, 0x06},
- {OV5693_8BIT, 0x3807, 0xa4},
- {OV5693_8BIT, 0x3808, 0x0a},
- {OV5693_8BIT, 0x3809, 0x10},
- {OV5693_8BIT, 0x380a, 0x05},
- {OV5693_8BIT, 0x380b, 0xb0},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x18},
- {OV5693_8BIT, 0x3813, 0x00},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_2592x1944_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0x00},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xa3},
- {OV5693_8BIT, 0x3808, 0x0a},
- {OV5693_8BIT, 0x3809, 0x20},
- {OV5693_8BIT, 0x380a, 0x07},
- {OV5693_8BIT, 0x380b, 0x98},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x10},
- {OV5693_8BIT, 0x3813, 0x00},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
- * 11:9 Full FOV Output, expected FOV Res: 2346x1920
- * ISP Effect Res: 1408x1152
- * Sensor out: 1424x1168, DS From: 2380x1952
- *
- * WA: Left Offset: 8, Hor scal: 64
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_1424x1168_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
- {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
- {OV5693_8BIT, 0x3801, 0x50}, /* 80 */
- {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
- {OV5693_8BIT, 0x3803, 0x02}, /* 2 */
- {OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */
- {OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */
- {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
- {OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */
- {OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */
- {OV5693_8BIT, 0x3809, 0x90}, /* 1424 */
- {OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */
- {OV5693_8BIT, 0x380b, 0x90}, /* 1168 */
- {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
- {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
- {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
- {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
- {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
- {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
- * 3:2 Full FOV Output, expected FOV Res: 2560x1706
- * ISP Effect Res: 720x480
- * Sensor out: 736x496, DS From 2616x1764
- */
-static struct ov5693_reg const ov5693_736x496_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
- {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
- {OV5693_8BIT, 0x3801, 0x02}, /* 2 */
- {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
- {OV5693_8BIT, 0x3803, 0x62}, /* 98 */
- {OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */
- {OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */
- {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
- {OV5693_8BIT, 0x3807, 0x43}, /* 1859 */
- {OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */
- {OV5693_8BIT, 0x3809, 0xe0}, /* 736 */
- {OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */
- {OV5693_8BIT, 0x380b, 0xf0}, /* 496 */
- {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
- {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
- {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
- {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
- {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
- {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_2576x1936_30fps[] = {
- {OV5693_8BIT, 0x3501, 0x7b},
- {OV5693_8BIT, 0x3502, 0x00},
- {OV5693_8BIT, 0x3708, 0xe2},
- {OV5693_8BIT, 0x3709, 0xc3},
- {OV5693_8BIT, 0x3803, 0x00},
- {OV5693_8BIT, 0x3806, 0x07},
- {OV5693_8BIT, 0x3807, 0xa3},
- {OV5693_8BIT, 0x3808, 0x0a},
- {OV5693_8BIT, 0x3809, 0x10},
- {OV5693_8BIT, 0x380a, 0x07},
- {OV5693_8BIT, 0x380b, 0x90},
- {OV5693_8BIT, 0x380c, 0x0a},
- {OV5693_8BIT, 0x380d, 0x80},
- {OV5693_8BIT, 0x380e, 0x07},
- {OV5693_8BIT, 0x380f, 0xc0},
- {OV5693_8BIT, 0x3811, 0x18},
- {OV5693_8BIT, 0x3813, 0x00},
- {OV5693_8BIT, 0x3814, 0x11},
- {OV5693_8BIT, 0x3815, 0x11},
- {OV5693_8BIT, 0x3820, 0x00},
- {OV5693_8BIT, 0x3821, 0x1e},
- {OV5693_8BIT, 0x5002, 0x00},
- {OV5693_8BIT, 0x0100, 0x01},
- {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_resolution ov5693_res_preview[] = {
- {
- .desc = "ov5693_736x496_30fps",
- .width = 736,
- .height = 496,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_736x496_30fps,
- },
- {
- .desc = "ov5693_1616x1216_30fps",
- .width = 1616,
- .height = 1216,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1616x1216_30fps,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2576,
- .height = 1456,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2576x1456_30fps,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2576,
- .height = 1936,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2576x1936_30fps,
- },
-};
-
-#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview))
-
-/*
- * Disable non-preview configurations until the configuration selection is
- * improved.
- */
-#if ENABLE_NON_PREVIEW
-struct ov5693_resolution ov5693_res_still[] = {
- {
- .desc = "ov5693_736x496_30fps",
- .width = 736,
- .height = 496,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_736x496_30fps,
- },
- {
- .desc = "ov5693_1424x1168_30fps",
- .width = 1424,
- .height = 1168,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1424x1168_30fps,
- },
- {
- .desc = "ov5693_1616x1216_30fps",
- .width = 1616,
- .height = 1216,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1616x1216_30fps,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2592,
- .height = 1456,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2592x1456_30fps,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2592,
- .height = 1944,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2592x1944_30fps,
- },
-};
-
-#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still))
-
-struct ov5693_resolution ov5693_res_video[] = {
- {
- .desc = "ov5693_736x496_30fps",
- .width = 736,
- .height = 496,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_736x496,
- },
- {
- .desc = "ov5693_336x256_30fps",
- .width = 336,
- .height = 256,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_336x256,
- },
- {
- .desc = "ov5693_368x304_30fps",
- .width = 368,
- .height = 304,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_368x304,
- },
- {
- .desc = "ov5693_192x160_30fps",
- .width = 192,
- .height = 160,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_192x160,
- },
- {
- .desc = "ov5693_1296x736_30fps",
- .width = 1296,
- .height = 736,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1296x736,
- },
- {
- .desc = "ov5693_1296x976_30fps",
- .width = 1296,
- .height = 976,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1296x976,
- },
- {
- .desc = "ov5693_1636P_30fps",
- .width = 1636,
- .height = 1096,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1636p_30fps,
- },
- {
- .desc = "ov5693_1080P_30fps",
- .width = 1940,
- .height = 1096,
- .fps = 30,
- .pix_clk_freq = 160,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_1940x1096,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2592,
- .height = 1456,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2592x1456_30fps,
- },
- {
- .desc = "ov5693_5M_30fps",
- .width = 2592,
- .height = 1944,
- .pix_clk_freq = 160,
- .fps = 30,
- .used = 0,
- .pixels_per_line = 2688,
- .lines_per_frame = 1984,
- .regs = ov5693_2592x1944_30fps,
- },
-};
-
-#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
-#endif
-
-static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
-static unsigned long N_RES = N_RES_PREVIEW;
-#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index 14b1757e6674..bbbd904b696a 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -713,13 +713,6 @@ enum atomisp_burst_capture_options {
#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10
#define EXT_ISP_SHOT_MODE_SPORTS 11
-/*
- * Set Senor run mode
- */
-struct atomisp_s_runmode {
- __u32 mode;
-};
-
/*Private IOCTLs for ISP */
#define ATOMISP_IOC_G_XNR \
_IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
@@ -875,9 +868,6 @@ struct atomisp_s_runmode {
#define ATOMISP_IOC_S_SENSOR_EE_CONFIG \
_IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int)
-#define ATOMISP_IOC_S_SENSOR_RUNMODE \
- _IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode)
-
/*
* Reserved ioctls. We have customer implementing it internally.
* We can't use both numbers to not cause ABI conflict.
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 0803b296e9ac..759233a7ba50 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -1248,28 +1248,6 @@ static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_PRIMARY);
}
-/* ISP2401 */
-int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
- struct atomisp_s_runmode *runmode)
-{
- struct atomisp_device *isp = asd->isp;
- struct v4l2_ctrl *c;
- int ret = 0;
-
- if (!(runmode && (runmode->mode & RUNMODE_MASK)))
- return -EINVAL;
-
- mutex_lock(asd->ctrl_handler.lock);
- c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler,
- V4L2_CID_RUN_MODE);
-
- if (c)
- ret = v4l2_ctrl_s_ctrl(c, runmode->mode);
-
- mutex_unlock(asd->ctrl_handler.lock);
- return ret;
-}
-
/*
* Function to enable/disable lens geometry distortion correction (GDC) and
* chromatic aberration correction (CAC)
@@ -2793,12 +2771,16 @@ int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
css_param->dvs_6axis = NULL;
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
- if (!dvs_6axis_config)
- return -ENOMEM;
+ if (!dvs_6axis_config) {
+ ret = -ENOMEM;
+ goto error;
+ }
} else if (!dvs_6axis_config) {
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
- if (!dvs_6axis_config)
- return -ENOMEM;
+ if (!dvs_6axis_config) {
+ ret = -ENOMEM;
+ goto error;
+ }
}
dvs_6axis_config->exp_id = source_6axis_config->exp_id;
@@ -2896,8 +2878,10 @@ int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
morph_table = atomisp_css_morph_table_allocate(
source_morph_table->width,
source_morph_table->height);
- if (!morph_table)
- return -ENOMEM;
+ if (!morph_table) {
+ ret = -ENOMEM;
+ goto error;
+ }
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
if (copy_from_compatible(morph_table->coordinates_x[i],
@@ -3808,6 +3792,10 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
return -EINVAL;
}
+ /* The preview pipeline does not support width > 1920 */
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW)
+ f->width = min_t(u32, f->width, 1920);
+
/*
* atomisp_set_fmt() will set the sensor resolution to the requested
* resolution + padding. Add padding here and remove it again after
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
index 8305161d2062..b8cd957eebdc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -42,13 +42,6 @@ struct ia_css_frame;
#define INTR_IER 24
#define INTR_IIR 16
-/* ISP2401 */
-#define RUNMODE_MASK (ATOMISP_RUN_MODE_VIDEO | ATOMISP_RUN_MODE_STILL_CAPTURE \
- | ATOMISP_RUN_MODE_PREVIEW)
-
-/* FIXME: check if can go */
-extern int atomisp_punit_hpll_freq;
-
/* Helper function */
void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
unsigned int size);
@@ -78,12 +71,6 @@ bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
/* ISP features control function */
/*
- * Function to set sensor runmode by user when
- * ATOMISP_IOC_S_SENSOR_RUNMODE ioctl was called
- */
-int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
- struct atomisp_s_runmode *runmode);
-/*
* Function to enable/disable lens geometry distortion correction (GDC) and
* chromatic aberration correction (CAC)
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index b97ec85aa0ba..02f06294bbfe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -16,7 +16,6 @@
*
*/
-#include <media/videobuf-vmalloc.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
@@ -850,19 +849,17 @@ int atomisp_css_irq_translate(struct atomisp_device *isp,
void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
unsigned int *infos)
{
-#ifndef ISP2401
- ia_css_isys_rx_get_irq_info(port, infos);
-#else
- *infos = 0;
-#endif
+ if (IS_ISP2401)
+ *infos = 0;
+ else
+ ia_css_isys_rx_get_irq_info(port, infos);
}
void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
unsigned int infos)
{
-#ifndef ISP2401
- ia_css_isys_rx_clear_irq_info(port, infos);
-#endif
+ if (!IS_ISP2401)
+ ia_css_isys_rx_clear_irq_info(port, infos);
}
int atomisp_css_irq_enable(struct atomisp_device *isp,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
index 03940c11505f..2483eaeeac73 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -521,7 +521,12 @@ static char *atomisp_csi2_get_vcm_type(struct acpi_device *adev)
}
static const struct acpi_device_id atomisp_sensor_configs[] = {
- ATOMISP_SENSOR_CONFIG("INT33BE", 2, true), /* OV5693 */
+ /*
+ * FIXME ov5693 modules have a VCM, but for unknown reasons
+ * the sensor fails to start streaming when instantiating
+ * an i2c-client for the VCM, so it is disabled for now.
+ */
+ ATOMISP_SENSOR_CONFIG("INT33BE", 2, false), /* OV5693 */
{}
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index 54466d2f323a..4dba6120af39 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -460,7 +460,6 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp)
static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
{
- v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE);
memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
asd->params.color_effect = V4L2_COLORFX_NONE;
asd->params.bad_pixel_en = true;
@@ -513,8 +512,8 @@ static int atomisp_open(struct file *file)
*/
if (pipe->users) {
dev_dbg(isp->dev, "video node already opened\n");
- mutex_unlock(&isp->mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto error;
}
/* runtime power management, turn on ISP */
@@ -533,8 +532,6 @@ static int atomisp_open(struct file *file)
}
atomisp_subdev_init_struct(asd);
- /* Ensure that a mode is set */
- v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_PREVIEW);
pipe->users++;
mutex_unlock(&isp->mutex);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index d2174156573a..a8e4779d007f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -665,11 +665,6 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
dev_err(isp->dev, "Failed to power-on sensor\n");
return ret;
}
- /*
- * Some sensor driver resets the run mode during power-on, thus force
- * update the run mode to sensor after power-on.
- */
- atomisp_update_run_mode(asd);
/* select operating sensor */
ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
@@ -708,6 +703,9 @@ static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
int *valid_sizes)
{
static const struct v4l2_frmsize_discrete frame_sizes[] = {
+ { 1920, 1440 },
+ { 1920, 1200 },
+ { 1920, 1080 },
{ 1600, 1200 },
{ 1600, 1080 },
{ 1600, 900 },
@@ -729,11 +727,11 @@ static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
continue;
/*
- * Skip sizes where width and height are less then 2/3th of the
+ * Skip sizes where width and height are less then 5/8th of the
* sensor size to avoid sizes with a too small field of view.
*/
- if (frame_sizes[i].width < (active->width * 2 / 3) &&
- frame_sizes[i].height < (active->height * 2 / 3))
+ if (frame_sizes[i].width < (active->width * 5 / 8) &&
+ frame_sizes[i].height < (active->height * 5 / 8))
continue;
if (*valid_sizes == fsize->index) {
@@ -1781,13 +1779,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
int err;
switch (cmd) {
- case ATOMISP_IOC_S_SENSOR_RUNMODE:
- if (IS_ISP2401)
- err = atomisp_set_sensor_runmode(asd, arg);
- else
- err = -EINVAL;
- break;
-
case ATOMISP_IOC_G_XNR:
err = atomisp_xnr(asd, 0, arg);
break;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 45073e401bac..471912dea5cd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -663,52 +663,6 @@ static const struct media_entity_operations isp_subdev_media_ops = {
/* .set_power = v4l2_subdev_set_power, */
};
-static int __atomisp_update_run_mode(struct atomisp_sub_device *asd)
-{
- struct atomisp_device *isp = asd->isp;
- struct v4l2_ctrl *ctrl = asd->run_mode;
- struct v4l2_ctrl *c;
- s32 mode;
-
- mode = ctrl->val;
-
- c = v4l2_ctrl_find(
- isp->inputs[asd->input_curr].camera->ctrl_handler,
- V4L2_CID_RUN_MODE);
-
- if (c)
- return v4l2_ctrl_s_ctrl(c, mode);
-
- return 0;
-}
-
-int atomisp_update_run_mode(struct atomisp_sub_device *asd)
-{
- int rval;
-
- mutex_lock(asd->ctrl_handler.lock);
- rval = __atomisp_update_run_mode(asd);
- mutex_unlock(asd->ctrl_handler.lock);
-
- return rval;
-}
-
-static int s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct atomisp_sub_device *asd = container_of(
- ctrl->handler, struct atomisp_sub_device, ctrl_handler);
- switch (ctrl->id) {
- case V4L2_CID_RUN_MODE:
- return __atomisp_update_run_mode(asd);
- }
-
- return 0;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = &s_ctrl,
-};
-
static const char *const ctrl_run_mode_menu[] = {
[ATOMISP_RUN_MODE_VIDEO] = "Video",
[ATOMISP_RUN_MODE_STILL_CAPTURE] = "Still capture",
@@ -716,7 +670,6 @@ static const char *const ctrl_run_mode_menu[] = {
};
static const struct v4l2_ctrl_config ctrl_run_mode = {
- .ops = &ctrl_ops,
.id = V4L2_CID_RUN_MODE,
.name = "Atomisp run mode",
.type = V4L2_CTRL_TYPE_MENU,
@@ -754,7 +707,6 @@ static const struct v4l2_ctrl_config ctrl_vfpp = {
* the CSS subsystem.
*/
static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
- .ops = &ctrl_ops,
.id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Continuous raw ringbuffer size",
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index 9a04511b9efd..9c1703bf439c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -360,8 +360,6 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt);
-int atomisp_update_run_mode(struct atomisp_sub_device *asd);
-
void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 0d0329f5e4ad..c1c8501ec61f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -1206,25 +1206,6 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id
return false;
}
- /*
- * FIXME:
- * remove the if once the driver become generic
- */
-
-#ifndef ISP2401
- if (IS_ISP2401) {
- dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
- name);
- return false;
- }
-#else
- if (!IS_ISP2401) {
- dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
- name);
- return false;
- }
-#endif
-
dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
index a76987190292..0f585a7e0fa4 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
@@ -18,8 +18,6 @@
#include <type_support.h>
-#if defined(ISP2401)
-
typedef struct isys_irqc_state_s isys_irqc_state_t;
struct isys_irqc_state_s {
@@ -31,6 +29,5 @@ struct isys_irqc_state_s {
/*hrt_data clear; */ /* write-only register */
};
-#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_LOCAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
index fb168c25bdfc..d94c8e6add72 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
@@ -19,7 +19,6 @@
#include "isys_irq_global.h"
#include "isys_irq_local.h"
-#if defined(ISP2401)
/* -------------------------------------------------------+
| Native command interface (NCI) |
@@ -102,6 +101,5 @@ hrt_data isys_irqc_reg_load(
/* end of DLI */
-#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_PRIVATE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
index a81e4d13ac9f..16336ed73036 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
@@ -16,7 +16,6 @@
#ifndef __ISYS_IRQ_GLOBAL_H__
#define __ISYS_IRQ_GLOBAL_H__
-#if defined(ISP2401)
/* Register offset/index from base location */
#define ISYS_IRQ_EDGE_REG_IDX (0)
@@ -31,6 +30,5 @@
#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF)
#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF)
-#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_GLOBAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
index b6538beca18a..f2e17945fd45 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
@@ -36,13 +36,6 @@
#define DEBUG_BUFFER_ISP_DMEM_ADDR 0x0
/*
- * Enable HAS_WATCHDOG_SP_THREAD_DEBUG for additional SP thread and
- * pipe information on watchdog output
- * #undef HAS_WATCHDOG_SP_THREAD_DEBUG
- * #define HAS_WATCHDOG_SP_THREAD_DEBUG
- */
-
-/*
* The linear buffer mode will accept data until the first
* overflow and then stop accepting new data
* The circular buffer mode will accept if there is place
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
index f85950c471c7..0b6647b2eb76 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
@@ -23,272 +23,6 @@
#include "dma_private.h"
#endif /* __INLINE_DMA__ */
-void dma_get_state(const dma_ID_t ID, dma_state_t *state)
-{
- int i;
- hrt_data tmp;
-
- assert(ID < N_DMA_ID);
- assert(state);
-
- tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX);
- //reg [3:0] : flags error [3], stall, run, idle [0]
- //reg [9:4] : command
- //reg[14:10] : channel
- //reg [23:15] : param
- state->fsm_command_idle = tmp & 0x1;
- state->fsm_command_run = tmp & 0x2;
- state->fsm_command_stalling = tmp & 0x4;
- state->fsm_command_error = tmp & 0x8;
- state->last_command_channel = (tmp >> 10 & 0x1F);
- state->last_command_param = (tmp >> 15 & 0x0F);
- tmp = (tmp >> 4) & 0x3F;
- /* state->last_command = (dma_commands_t)tmp; */
- /* if the enumerator is made non-linear */
- /* AM: the list below does not cover all the cases*/
- /* and these are not correct */
- /* therefore for just dumpinmg this command*/
- state->last_command = tmp;
-
- /*
- if (tmp == 0)
- state->last_command = DMA_COMMAND_READ;
- if (tmp == 1)
- state->last_command = DMA_COMMAND_WRITE;
- if (tmp == 2)
- state->last_command = DMA_COMMAND_SET_CHANNEL;
- if (tmp == 3)
- state->last_command = DMA_COMMAND_SET_PARAM;
- if (tmp == 4)
- state->last_command = DMA_COMMAND_READ_SPECIFIC;
- if (tmp == 5)
- state->last_command = DMA_COMMAND_WRITE_SPECIFIC;
- if (tmp == 8)
- state->last_command = DMA_COMMAND_INIT;
- if (tmp == 12)
- state->last_command = DMA_COMMAND_INIT_SPECIFIC;
- if (tmp == 15)
- state->last_command = DMA_COMMAND_RST;
- */
-
- /* No sub-fields, idx = 0 */
- state->current_command = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX));
- state->current_addr_a = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX));
- state->current_addr_b = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX));
-
- tmp = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_idle = tmp & 0x1;
- state->fsm_ctrl_run = tmp & 0x2;
- state->fsm_ctrl_stalling = tmp & 0x4;
- state->fsm_ctrl_error = tmp & 0x8;
- tmp = tmp >> 4;
- /* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */
- if (tmp == 0)
- state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE;
- if (tmp == 1)
- state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV;
- if (tmp == 2)
- state->fsm_ctrl_state = DMA_CTRL_STATE_RCV;
- if (tmp == 3)
- state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ;
- if (tmp == 4)
- state->fsm_ctrl_state = DMA_CTRL_STATE_INIT;
- state->fsm_ctrl_source_dev = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_source_addr = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_source_stride = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_source_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_source_height = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_source_dev = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_dest_addr = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_dest_stride = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_source_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_dest_height = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_dest_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_source_elems = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
- state->fsm_ctrl_pack_extension = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX,
- _DMA_FSM_GROUP_FSM_CTRL_IDX));
-
- tmp = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_PACK_STATE_IDX,
- _DMA_FSM_GROUP_FSM_PACK_IDX));
- state->pack_idle = tmp & 0x1;
- state->pack_run = tmp & 0x2;
- state->pack_stalling = tmp & 0x4;
- state->pack_error = tmp & 0x8;
- state->pack_cnt_height = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX,
- _DMA_FSM_GROUP_FSM_PACK_IDX));
- state->pack_src_cnt_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX,
- _DMA_FSM_GROUP_FSM_PACK_IDX));
- state->pack_dest_cnt_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX,
- _DMA_FSM_GROUP_FSM_PACK_IDX));
-
- tmp = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_REQ_STATE_IDX,
- _DMA_FSM_GROUP_FSM_REQ_IDX));
- /* state->read_state = (dma_rw_states_t)tmp; */
- if (tmp == 0)
- state->read_state = DMA_RW_STATE_IDLE;
- if (tmp == 1)
- state->read_state = DMA_RW_STATE_REQ;
- if (tmp == 2)
- state->read_state = DMA_RW_STATE_NEXT_LINE;
- if (tmp == 3)
- state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL;
- state->read_cnt_height = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX,
- _DMA_FSM_GROUP_FSM_REQ_IDX));
- state->read_cnt_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX,
- _DMA_FSM_GROUP_FSM_REQ_IDX));
-
- tmp = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_WR_STATE_IDX,
- _DMA_FSM_GROUP_FSM_WR_IDX));
- /* state->write_state = (dma_rw_states_t)tmp; */
- if (tmp == 0)
- state->write_state = DMA_RW_STATE_IDLE;
- if (tmp == 1)
- state->write_state = DMA_RW_STATE_REQ;
- if (tmp == 2)
- state->write_state = DMA_RW_STATE_NEXT_LINE;
- if (tmp == 3)
- state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL;
- state->write_height = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX,
- _DMA_FSM_GROUP_FSM_WR_IDX));
- state->write_width = dma_reg_load(ID,
- DMA_CG_INFO_REG_IDX(
- _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX,
- _DMA_FSM_GROUP_FSM_WR_IDX));
-
- for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
- dma_port_state_t *port = &state->port_states[i];
-
- tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i));
- port->req_cs = ((tmp & 0x1) != 0);
- port->req_we_n = ((tmp & 0x2) != 0);
- port->req_run = ((tmp & 0x4) != 0);
- port->req_ack = ((tmp & 0x8) != 0);
-
- tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i));
- port->send_cs = ((tmp & 0x1) != 0);
- port->send_we_n = ((tmp & 0x2) != 0);
- port->send_run = ((tmp & 0x4) != 0);
- port->send_ack = ((tmp & 0x8) != 0);
-
- tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i));
- if (tmp & 0x1)
- port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL;
- if (tmp & 0x2)
- port->fifo_state = DMA_FIFO_STATE_FULL;
- if (tmp & 0x4)
- port->fifo_state = DMA_FIFO_STATE_EMPTY;
- port->fifo_counter = tmp >> 3;
- }
-
- for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
- dma_channel_state_t *ch = &state->channel_states[i];
-
- ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_PACKING_SETUP_PARAM)));
- ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_PACKING_SETUP_PARAM)));
- ch->height = dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_HEIGHT_PARAM));
- ch->stride_a = dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_STRIDE_A_PARAM));
- ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_ELEM_CROPPING_A_PARAM)));
- ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_ELEM_CROPPING_A_PARAM)));
- ch->width_a = dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_WIDTH_A_PARAM));
- ch->stride_b = dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_STRIDE_B_PARAM));
- ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_ELEM_CROPPING_B_PARAM)));
- ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_ELEM_CROPPING_B_PARAM)));
- ch->width_b = dma_reg_load(ID,
- DMA_CHANNEL_PARAM_REG_IDX(i,
- _DMA_WIDTH_B_PARAM));
- }
-}
-
void
dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn,
uint32_t max_burst_size)
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
index 7e4cc75733cd..48a1ace79897 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
@@ -89,120 +89,4 @@
#define DMA_GET_ELEMENTS(val) _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX, _DMA_V2_ELEMENTS_BITS)
#define DMA_GET_CROPPING(val) _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS)
-typedef enum {
- DMA_CTRL_STATE_IDLE,
- DMA_CTRL_STATE_REQ_RCV,
- DMA_CTRL_STATE_RCV,
- DMA_CTRL_STATE_RCV_REQ,
- DMA_CTRL_STATE_INIT,
- N_DMA_CTRL_STATES
-} dma_ctrl_states_t;
-
-typedef enum {
- DMA_COMMAND_READ,
- DMA_COMMAND_WRITE,
- DMA_COMMAND_SET_CHANNEL,
- DMA_COMMAND_SET_PARAM,
- DMA_COMMAND_READ_SPECIFIC,
- DMA_COMMAND_WRITE_SPECIFIC,
- DMA_COMMAND_INIT,
- DMA_COMMAND_INIT_SPECIFIC,
- DMA_COMMAND_RST,
- N_DMA_COMMANDS
-} dma_commands_t;
-
-typedef enum {
- DMA_RW_STATE_IDLE,
- DMA_RW_STATE_REQ,
- DMA_RW_STATE_NEXT_LINE,
- DMA_RW_STATE_UNLOCK_CHANNEL,
- N_DMA_RW_STATES
-} dma_rw_states_t;
-
-typedef enum {
- DMA_FIFO_STATE_WILL_BE_FULL,
- DMA_FIFO_STATE_FULL,
- DMA_FIFO_STATE_EMPTY,
- N_DMA_FIFO_STATES
-} dma_fifo_states_t;
-
-/* typedef struct dma_state_s dma_state_t; */
-typedef struct dma_channel_state_s dma_channel_state_t;
-typedef struct dma_port_state_s dma_port_state_t;
-
-struct dma_port_state_s {
- bool req_cs;
- bool req_we_n;
- bool req_run;
- bool req_ack;
- bool send_cs;
- bool send_we_n;
- bool send_run;
- bool send_ack;
- dma_fifo_states_t fifo_state;
- int fifo_counter;
-};
-
-struct dma_channel_state_s {
- int connection;
- bool sign_extend;
- int height;
- int stride_a;
- int elems_a;
- int cropping_a;
- int width_a;
- int stride_b;
- int elems_b;
- int cropping_b;
- int width_b;
-};
-
-struct dma_state_s {
- bool fsm_command_idle;
- bool fsm_command_run;
- bool fsm_command_stalling;
- bool fsm_command_error;
- dma_commands_t last_command;
- int last_command_channel;
- int last_command_param;
- dma_commands_t current_command;
- int current_addr_a;
- int current_addr_b;
- bool fsm_ctrl_idle;
- bool fsm_ctrl_run;
- bool fsm_ctrl_stalling;
- bool fsm_ctrl_error;
- dma_ctrl_states_t fsm_ctrl_state;
- int fsm_ctrl_source_dev;
- int fsm_ctrl_source_addr;
- int fsm_ctrl_source_stride;
- int fsm_ctrl_source_width;
- int fsm_ctrl_source_height;
- int fsm_ctrl_pack_source_dev;
- int fsm_ctrl_pack_dest_dev;
- int fsm_ctrl_dest_addr;
- int fsm_ctrl_dest_stride;
- int fsm_ctrl_pack_source_width;
- int fsm_ctrl_pack_dest_height;
- int fsm_ctrl_pack_dest_width;
- int fsm_ctrl_pack_source_elems;
- int fsm_ctrl_pack_dest_elems;
- int fsm_ctrl_pack_extension;
- int pack_idle;
- int pack_run;
- int pack_stalling;
- int pack_error;
- int pack_cnt_height;
- int pack_src_cnt_width;
- int pack_dest_cnt_width;
- dma_rw_states_t read_state;
- int read_cnt_height;
- int read_cnt_width;
- dma_rw_states_t write_state;
- int write_height;
- int write_width;
- dma_port_state_t port_states[HIVE_ISP_NUM_DMA_CONNS];
- dma_channel_state_t channel_states[HIVE_DMA_NUM_CHANNELS];
-};
-
#endif /* __DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
index 5cd6136f21a2..e01f30f137a0 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
@@ -15,7 +15,6 @@
#include "system_global.h"
-#ifndef ISP2401
#include "input_formatter.h"
#include <type_support.h>
@@ -243,4 +242,3 @@ void input_formatter_bin_get_state(
HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS);
return;
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
index 712e01c37870..ca1ce6689034 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
@@ -15,7 +15,6 @@
#include "system_global.h"
-#ifndef ISP2401
#include "input_system.h"
#include <type_support.h>
@@ -80,31 +79,6 @@ static input_system_err_t input_system_multiplexer_cfg(
const input_system_multiplex_t rhs,
input_system_config_flags_t *const flags);
-static inline void capture_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- capture_unit_state_t *state);
-
-static inline void acquisition_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- acquisition_unit_state_t *state);
-
-static inline void ctrl_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- ctrl_unit_state_t *state);
-
-static inline void mipi_port_get_state(
- const rx_ID_t ID,
- const enum mipi_port_id port_ID,
- mipi_port_state_t *state);
-
-static inline void rx_channel_get_state(
- const rx_ID_t ID,
- const unsigned int ch_id,
- rx_channel_state_t *state);
-
static void gp_device_rst(const gp_device_ID_t ID);
static void input_selector_cfg_for_sensor(const gp_device_ID_t ID);
@@ -116,149 +90,6 @@ static void input_switch_cfg(
const input_switch_cfg_t *const cfg
);
-void input_system_get_state(
- const input_system_ID_t ID,
- input_system_state_t *state)
-{
- sub_system_ID_t sub_id;
-
- assert(ID < N_INPUT_SYSTEM_ID);
- assert(state);
-
- state->str_multicastA_sel = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_MULTICAST_A_IDX);
- state->str_multicastB_sel = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_MULTICAST_B_IDX);
- state->str_multicastC_sel = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_MULTICAST_C_IDX);
- state->str_mux_sel = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_MUX_IDX);
- state->str_mon_status = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_STRMON_STAT_IDX);
- state->str_mon_irq_cond = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_STRMON_COND_IDX);
- state->str_mon_irq_en = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX);
- state->isys_srst = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_SRST_IDX);
- state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_SLV_REG_SRST_IDX);
- state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_REG_PORT_A_IDX);
- state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID,
- GPREGS_UNIT0_ID,
- HIVE_ISYS_GPREG_REG_PORT_B_IDX);
-
- for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
- sub_id++) {
- capture_unit_get_state(ID, sub_id,
- &state->capture_unit[sub_id - CAPTURE_UNIT0_ID]);
- }
- for (sub_id = ACQUISITION_UNIT0_ID;
- sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
- acquisition_unit_get_state(ID, sub_id,
- &state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID]);
- }
- for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
- sub_id++) {
- ctrl_unit_get_state(ID, sub_id,
- &state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]);
- }
-}
-
-void receiver_get_state(
- const rx_ID_t ID,
- receiver_state_t *state)
-{
- enum mipi_port_id port_id;
- unsigned int ch_id;
-
- assert(ID < N_RX_ID);
- assert(state);
-
- state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX);
- state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX);
- state->data_to_le_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX);
- state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX);
- state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX);
- state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX);
- state->is_two_ppc = (bool)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX);
- state->backend_rst = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX);
- state->raw18 = (uint16_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_RAW18_REG_IDX);
- state->force_raw8 = (bool)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX);
- state->raw16 = (uint16_t)receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_RAW16_REG_IDX);
-
- for (port_id = (enum mipi_port_id)0; port_id < N_MIPI_PORT_ID; port_id++) {
- mipi_port_get_state(ID, port_id,
- &state->mipi_port_state[port_id]);
- }
- for (ch_id = 0U; ch_id < N_RX_CHANNEL_ID; ch_id++) {
- rx_channel_get_state(ID, ch_id,
- &state->rx_channel_state[ch_id]);
- }
-
- state->be_gsp_acc_ovl = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX);
- state->be_srst = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_SRST_REG_IDX);
- state->be_is_two_ppc = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX);
- state->be_comp_format0 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX);
- state->be_comp_format1 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX);
- state->be_comp_format2 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX);
- state->be_comp_format3 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX);
- state->be_sel = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_SEL_REG_IDX);
- state->be_raw16_config = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX);
- state->be_raw18_config = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX);
- state->be_force_raw8 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX);
- state->be_irq_status = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX);
- state->be_irq_clear = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
-}
-
-bool is_mipi_format_yuv420(
- const mipi_format_t mipi_format)
-{
- bool is_yuv420 = (
- (mipi_format == MIPI_FORMAT_YUV420_8) ||
- (mipi_format == MIPI_FORMAT_YUV420_10) ||
- (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) ||
- (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
- /* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
-
- return is_yuv420;
-}
-
void receiver_set_compression(
const rx_ID_t ID,
const unsigned int cfg_ID,
@@ -361,282 +192,6 @@ void receiver_irq_clear(
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
}
-static inline void capture_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- capture_unit_state_t *state)
-{
- assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID));
- assert(state);
-
- state->StartMode = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_START_MODE_REG_ID);
- state->Start_Addr = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_START_ADDR_REG_ID);
- state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_MEM_REGION_SIZE_REG_ID);
- state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_NUM_MEM_REGIONS_REG_ID);
-// AM: Illegal read from following registers.
- /* state->Init = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_INIT_REG_ID);
- state->Start = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_START_REG_ID);
- state->Stop = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_STOP_REG_ID);
- */
- state->Packet_Length = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_PACKET_LENGTH_REG_ID);
- state->Received_Length = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_RECEIVED_LENGTH_REG_ID);
- state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_RECEIVED_SHORT_PACKETS_REG_ID);
- state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_RECEIVED_LONG_PACKETS_REG_ID);
- state->Last_Command = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_LAST_COMMAND_REG_ID);
- state->Next_Command = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_NEXT_COMMAND_REG_ID);
- state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_LAST_ACKNOWLEDGE_REG_ID);
- state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_NEXT_ACKNOWLEDGE_REG_ID);
- state->FSM_State_Info = input_system_sub_system_reg_load(ID,
- sub_id,
- CAPT_FSM_STATE_INFO_REG_ID);
-}
-
-static inline void acquisition_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- acquisition_unit_state_t *state)
-{
- assert(sub_id == ACQUISITION_UNIT0_ID);
- assert(state);
-
- state->Start_Addr = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_START_ADDR_REG_ID);
- state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_MEM_REGION_SIZE_REG_ID);
- state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_NUM_MEM_REGIONS_REG_ID);
-// AM: Illegal read from following registers.
- /* state->Init = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_INIT_REG_ID);
- */
- state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_RECEIVED_SHORT_PACKETS_REG_ID);
- state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_RECEIVED_LONG_PACKETS_REG_ID);
- state->Last_Command = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_LAST_COMMAND_REG_ID);
- state->Next_Command = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_NEXT_COMMAND_REG_ID);
- state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_LAST_ACKNOWLEDGE_REG_ID);
- state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_NEXT_ACKNOWLEDGE_REG_ID);
- state->FSM_State_Info = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_FSM_STATE_INFO_REG_ID);
- state->Int_Cntr_Info = input_system_sub_system_reg_load(ID,
- sub_id,
- ACQ_INT_CNTR_INFO_REG_ID);
-}
-
-static inline void ctrl_unit_get_state(
- const input_system_ID_t ID,
- const sub_system_ID_t sub_id,
- ctrl_unit_state_t *state)
-{
- assert(sub_id == CTRL_UNIT0_ID);
- assert(state);
-
- state->captA_start_addr = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_START_ADDR_A_REG_ID);
- state->captB_start_addr = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_START_ADDR_B_REG_ID);
- state->captC_start_addr = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_START_ADDR_C_REG_ID);
- state->captA_mem_region_size = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID);
- state->captB_mem_region_size = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID);
- state->captC_mem_region_size = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID);
- state->captA_num_mem_regions = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID);
- state->captB_num_mem_regions = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID);
- state->captC_num_mem_regions = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID);
- state->acq_start_addr = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_ACQ_START_ADDR_REG_ID);
- state->acq_mem_region_size = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID);
- state->acq_num_mem_regions = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID);
-// AM: Illegal read from following registers.
- /* state->ctrl_init = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_INIT_REG_ID);
- */
- state->last_cmd = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_LAST_COMMAND_REG_ID);
- state->next_cmd = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_NEXT_COMMAND_REG_ID);
- state->last_ack = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID);
- state->next_ack = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID);
- state->top_fsm_state = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_FSM_STATE_INFO_REG_ID);
- state->captA_fsm_state = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID);
- state->captB_fsm_state = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID);
- state->captC_fsm_state = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID);
- state->acq_fsm_state = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID);
- state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID,
- sub_id,
- ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
-}
-
-static inline void mipi_port_get_state(
- const rx_ID_t ID,
- const enum mipi_port_id port_ID,
- mipi_port_state_t *state)
-{
- int i;
-
- assert(ID < N_RX_ID);
- assert(port_ID < N_MIPI_PORT_ID);
- assert(state);
-
- state->device_ready = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
- state->irq_status = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
- state->irq_enable = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
- state->timeout_count = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX);
- state->init_count = (uint16_t)receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX);
- state->raw16_18 = (uint16_t)receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX);
- state->sync_count = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX);
- state->rx_count = receiver_port_reg_load(ID,
- port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX);
-
- for (i = 0; i < MIPI_4LANE_CFG ; i++) {
- state->lane_sync_count[i] = (uint8_t)((state->sync_count) >> (i * 8));
- state->lane_rx_count[i] = (uint8_t)((state->rx_count) >> (i * 8));
- }
-}
-
-static inline void rx_channel_get_state(
- const rx_ID_t ID,
- const unsigned int ch_id,
- rx_channel_state_t *state)
-{
- int i;
-
- assert(ID < N_RX_ID);
- assert(ch_id < N_RX_CHANNEL_ID);
- assert(state);
-
- switch (ch_id) {
- case 0:
- state->comp_scheme0 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
- break;
- case 1:
- state->comp_scheme0 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
- break;
- case 2:
- state->comp_scheme0 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
- break;
- case 3:
- state->comp_scheme0 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
- _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
- break;
- }
-
- /* See Table 7.1.17,..., 7.1.24 */
- for (i = 0; i < 6; i++) {
- u8 val = (uint8_t)((state->comp_scheme0) >> (i * 5)) & 0x1f;
-
- state->comp[i] = (mipi_compressor_t)(val & 0x07);
- state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
- }
- for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) {
- u8 val = (uint8_t)((state->comp_scheme0) >> ((i - 6) * 5)) & 0x1f;
-
- state->comp[i] = (mipi_compressor_t)(val & 0x07);
- state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
- }
-}
-
// MW: "2400" in the name is not good, but this is to avoid a naming conflict
static input_system_cfg2400_t config;
@@ -1787,4 +1342,3 @@ static input_system_err_t input_system_multiplexer_cfg(
*flags |= INPUT_SYSTEM_CFG_FLAG_SET;
return INPUT_SYSTEM_ERR_NO_ERROR;
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
index 80b5fd0dc9f6..4697d8d7b915 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
@@ -225,25 +225,6 @@ void irq_raise(
return;
}
-void irq_controller_get_state(const irq_ID_t ID,
- struct irq_controller_state *state)
-{
- assert(ID < N_IRQ_ID);
- assert(state);
-
- state->irq_edge = irq_reg_load(ID,
- _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
- state->irq_mask = irq_reg_load(ID,
- _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
- state->irq_status = irq_reg_load(ID,
- _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
- state->irq_enable = irq_reg_load(ID,
- _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
- state->irq_level_not_pulse = irq_reg_load(ID,
- _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
- return;
-}
-
bool any_virq_signal(void)
{
unsigned int irq_status = irq_reg_load(IRQ0_ID,
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
index 6a25345ae88e..8fd1bce85214 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
@@ -115,12 +115,4 @@ struct virq_info {
hrt_data irq_status_reg[N_IRQ_ID];
};
-struct irq_controller_state {
- unsigned int irq_edge;
- unsigned int irq_mask;
- unsigned int irq_status;
- unsigned int irq_enable;
- unsigned int irq_level_not_pulse;
-};
-
#endif /* __IRQ_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
index 4ad5e2db8a89..b78cc324da6a 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
@@ -39,66 +39,6 @@ void cnd_isp_irq_enable(
return;
}
-void isp_get_state(
- const isp_ID_t ID,
- isp_state_t *state,
- isp_stall_t *stall)
-{
- hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG);
-
- assert(state);
- assert(stall);
-
-#if defined(_hrt_sysmem_ident_address)
- /* Patch to avoid compiler unused symbol warning in C_RUN build */
- (void)__hrt_sysmem_ident_address;
- (void)_hrt_sysmem_map_var;
-#endif
-
- state->pc = isp_ctrl_load(ID, ISP_PC_REG);
- state->status_register = sc;
- state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT);
- state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
- state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
- state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT);
- stall->stat_ctrl =
- !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT);
- stall->pmem =
- !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT);
- stall->dmem =
- !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT);
- stall->vmem =
- !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT);
- stall->fifo0 =
- !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT);
- stall->fifo1 =
- !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT);
- stall->fifo2 =
- !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT);
- stall->fifo3 =
- !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT);
- stall->fifo4 =
- !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT);
- stall->fifo5 =
- !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT);
- stall->fifo6 =
- !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT);
- stall->vamem1 =
- !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT);
- stall->vamem2 =
- !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT);
- stall->vamem3 =
- !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT);
- stall->hmem =
- !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT);
- /*
- stall->icache_master =
- !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
- ISP_ICACHE_MT_SINK_BIT);
- */
- return;
-}
-
/* ISP functions to control the ISP state from the host, even in crun. */
/* Inspect readiness of an ISP indexed by ID */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
index 4dbec4063b3d..fb98696cc44d 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
@@ -22,35 +22,4 @@
#define HIVE_ISP_VMEM_MASK ((1U << ISP_VMEM_ELEMBITS) - 1)
-typedef struct isp_state_s isp_state_t;
-typedef struct isp_stall_s isp_stall_t;
-
-struct isp_state_s {
- int pc;
- int status_register;
- bool is_broken;
- bool is_idle;
- bool is_sleeping;
- bool is_stalling;
-};
-
-struct isp_stall_s {
- bool fifo0;
- bool fifo1;
- bool fifo2;
- bool fifo3;
- bool fifo4;
- bool fifo5;
- bool fifo6;
- bool stat_ctrl;
- bool dmem;
- bool vmem;
- bool vamem1;
- bool vamem2;
- bool vamem3;
- bool hmem;
- bool pmem;
- bool icache_master;
-};
-
#endif /* __ISP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
index aae18465b6ae..3dc4d1289ea1 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
@@ -33,50 +33,3 @@ void cnd_sp_irq_enable(
sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
}
}
-
-void sp_get_state(
- const sp_ID_t ID,
- sp_state_t *state,
- sp_stall_t *stall)
-{
- hrt_data sc = sp_ctrl_load(ID, SP_SC_REG);
-
- assert(state);
- assert(stall);
-
- state->pc = sp_ctrl_load(ID, SP_PC_REG);
- state->status_register = sc;
- state->is_broken = (sc & (1U << SP_BROKEN_BIT)) != 0;
- state->is_idle = (sc & (1U << SP_IDLE_BIT)) != 0;
- state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0;
- state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0;
- stall->fifo0 =
- !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT);
- stall->fifo1 =
- !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT);
- stall->fifo2 =
- !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT);
- stall->fifo3 =
- !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT);
- stall->fifo4 =
- !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT);
- stall->fifo5 =
- !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT);
- stall->fifo6 =
- !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT);
- stall->fifo7 =
- !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT);
- stall->fifo8 =
- !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT);
- stall->fifo9 =
- !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT);
- stall->fifoa =
- !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT);
- stall->dmem =
- !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT);
- stall->control_master =
- !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT);
- stall->icache_master =
- !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG,
- SP_ICACHE_MT_SINK_BIT);
-}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
index 2956c7023b33..e22d25a902f4 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
@@ -19,32 +19,6 @@
#include <type_support.h>
#include "sp_global.h"
-struct sp_state_s {
- int pc;
- int status_register;
- bool is_broken;
- bool is_idle;
- bool is_sleeping;
- bool is_stalling;
-};
-
-struct sp_stall_s {
- bool fifo0;
- bool fifo1;
- bool fifo2;
- bool fifo3;
- bool fifo4;
- bool fifo5;
- bool fifo6;
- bool fifo7;
- bool fifo8;
- bool fifo9;
- bool fifoa;
- bool dmem;
- bool control_master;
- bool icache_master;
-};
-
#define sp_address_of(var) (HIVE_ADDR_ ## var)
/*
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
index 3d6621f2fa96..693154e8ec2f 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -16,7 +16,6 @@
#ifndef __CSI_RX_PUBLIC_H_INCLUDED__
#define __CSI_RX_PUBLIC_H_INCLUDED__
-#ifdef ISP2401
/*****************************************************
*
* Native command interface (NCI).
@@ -132,5 +131,4 @@ void csi_rx_be_ctrl_reg_store(
const hrt_address reg,
const hrt_data value);
/* end of DLI */
-#endif /* ISP2401 */
#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
index a23cbc9a2129..6fed47f04594 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
@@ -18,19 +18,6 @@
#include "system_local.h"
-typedef struct dma_state_s dma_state_t;
-
-/*! Read the control registers of DMA[ID]
-
- \param ID[in] DMA identifier
- \param state[out] input formatter state structure
-
- \return none, state = DMA[ID].state
- */
-void dma_get_state(
- const dma_ID_t ID,
- dma_state_t *state);
-
/*! Write to a control register of DMA[ID]
\param ID[in] DMA identifier
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
index d335e7b0a76e..43787ab64078 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
@@ -19,16 +19,6 @@
#include <type_support.h>
#include "system_local.h"
-/*! Read the control registers of IRQ[ID]
-
- \param ID[in] IRQ identifier
- \param state[out] irq controller state structure
-
- \return none, state = IRQ[ID].state
- */
-void irq_controller_get_state(const irq_ID_t ID,
- struct irq_controller_state *state);
-
/*! Write to a control register of IRQ[ID]
\param ID[in] IRQ identifier
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
index a8ff75c639e5..34dd7f912df6 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
@@ -30,19 +30,6 @@ void cnd_isp_irq_enable(
const isp_ID_t ID,
const bool cnd);
-/*! Read the state of cell ISP[ID]
-
- \param ID[in] ISP identifier
- \param state[out] isp state structure
- \param stall[out] isp stall conditions
-
- \return none, state = ISP[ID].state, stall = ISP[ID].stall
- */
-void isp_get_state(
- const isp_ID_t ID,
- isp_state_t *state,
- isp_stall_t *stall);
-
/*! Write to the status and control register of ISP[ID]
\param ID[in] ISP identifier
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
index d9b6af898c06..f18a057adb0f 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
@@ -16,7 +16,6 @@
#ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
#define __ISYS_DMA_PUBLIC_H_INCLUDED__
-#ifdef ISP2401
#include "system_local.h"
#include "type_support.h"
@@ -34,6 +33,5 @@ void isys2401_dma_set_max_burst_size(
const isys2401_dma_ID_t dma_id,
uint32_t max_burst_size);
-#endif /* ISP2401 */
#endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
index 736cbc4e3705..2b13688256cf 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
@@ -19,7 +19,6 @@
#include "isys_irq_global.h"
#include "isys_irq_local.h"
-#if defined(ISP2401)
void isys_irqc_state_get(const isys_irq_ID_t isys_irqc_id,
isys_irqc_state_t *state);
@@ -36,6 +35,5 @@ hrt_data isys_irqc_reg_load(const isys_irq_ID_t isys_irqc_id,
void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id);
-#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
deleted file mode 100644
index dac53e324118..000000000000
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ISYS_PUBLIC_H_INCLUDED__
-#define __ISYS_PUBLIC_H_INCLUDED__
-
-#ifdef ISP2401
-/*! Read the state of INPUT_SYSTEM[ID]
- \param ID[in] INPUT_SYSTEM identifier
- \param state[out] pointer to input system state structure
- \return none, state = INPUT_SYSTEM[ID].state
- */
-STORAGE_CLASS_INPUT_SYSTEM_H input_system_err_t input_system_get_state(
- const input_system_ID_t ID,
- input_system_state_t *state);
-/*! Dump the state of INPUT_SYSTEM[ID]
- \param ID[in] INPUT_SYSTEM identifier
- \param state[in] pointer to input system state structure
- \return none
- \depends on host supplied print function as part of ia_css_init()
- */
-STORAGE_CLASS_INPUT_SYSTEM_H void input_system_dump_state(
- const input_system_ID_t ID,
- input_system_state_t *state);
-#endif /* ISP2401 */
-#endif /* __ISYS_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
index 40a9fb6d7761..da10e6b98c63 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
@@ -16,7 +16,6 @@
#ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
#define __PIXELGEN_PUBLIC_H_INCLUDED__
-#ifdef ISP2401
/*****************************************************
*
* Native command interface (NCI).
@@ -76,5 +75,4 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
const hrt_data value);
/* end of DLI */
-#endif /* ISP2401 */
#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
index b0b7f2e27854..e9166535ce09 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
@@ -19,9 +19,6 @@
#include <type_support.h>
#include "system_local.h"
-typedef struct sp_state_s sp_state_t;
-typedef struct sp_stall_s sp_stall_t;
-
/*! Enable or disable the program complete irq signal of SP[ID]
\param ID[in] SP identifier
@@ -33,19 +30,6 @@ void cnd_sp_irq_enable(
const sp_ID_t ID,
const bool cnd);
-/*! Read the state of cell SP[ID]
-
- \param ID[in] SP identifier
- \param state[out] sp state structure
- \param stall[out] isp stall conditions
-
- \return none, state = SP[ID].state, stall = SP[ID].stall
- */
-void sp_get_state(
- const sp_ID_t ID,
- sp_state_t *state,
- sp_stall_t *stall);
-
/*! Write to the status and control register of SP[ID]
\param ID[in] SP identifier
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
index 001c55ea970b..952b633fdca7 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
@@ -19,10 +19,8 @@
#include <type_support.h>
#include <system_local.h>
-#if defined(ISP2401)
#include "isys_irq_public.h"
-#endif /* defined(ISP2401) */
#endif /* __IA_CSS_ISYS_IRQ_H__ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
index a20879aedef6..d6e52b4971d6 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -331,11 +331,7 @@ struct ia_css_sp_info {
of DDR debug queue */
u32 perf_counter_input_system_error; /** input system perf
counter array */
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
- u32 debug_wait; /** thread/pipe post mortem debug */
- u32 debug_stage; /** thread/pipe post mortem debug */
- u32 debug_stripe; /** thread/pipe post mortem debug */
-#endif
+
u32 threads_stack; /** sp thread's stack pointers */
u32 threads_stack_size; /** sp thread's stack sizes */
u32 curr_binary_id; /** current binary id */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
index 9e50e1c619be..cd6e0111d9f4 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_mipi.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
@@ -25,22 +25,6 @@
#include "ia_css_stream_format.h"
#include "ia_css_input_port.h"
-/* @brief Register size of a CSS MIPI frame for check during capturing.
- *
- * @param[in] port CSI-2 port this check is registered.
- * @param[in] size_mem_words The frame size in memory words (32B).
- * @return Return the error in case of failure. E.g. MAX_NOF_ENTRIES REACHED
- *
- * Register size of a CSS MIPI frame to check during capturing. Up to
- * IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES entries per port allowed. Entries are reset
- * when stream is stopped.
- *
- *
- */
-int
-ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
- const unsigned int size_mem_words);
-
/* @brief Calculate the size of a mipi frame.
*
* @param[in] width The width (in pixels) of the frame.
diff --git a/drivers/staging/media/atomisp/pci/input_system_local.h b/drivers/staging/media/atomisp/pci/input_system_local.h
index 357987d629cd..12f7acfeb79c 100644
--- a/drivers/staging/media/atomisp/pci/input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/input_system_local.h
@@ -138,8 +138,5 @@ struct rx_cfg_s {
bool is_two_ppc;
};
-#ifdef ISP2401
-# include "isp2401_input_system_local.h"
-#else
-# include "isp2400_input_system_local.h"
-#endif
+#include "isp2401_input_system_local.h"
+#include "isp2400_input_system_local.h"
diff --git a/drivers/staging/media/atomisp/pci/input_system_private.h b/drivers/staging/media/atomisp/pci/input_system_private.h
index 889f204e77d5..148ba2ca2290 100644
--- a/drivers/staging/media/atomisp/pci/input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/input_system_private.h
@@ -4,8 +4,5 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
-#ifdef ISP2401
-# include "isp2401_input_system_private.h"
-#else
-# include "isp2400_input_system_private.h"
-#endif
+#include "isp2401_input_system_private.h"
+#include "isp2400_input_system_private.h"
diff --git a/drivers/staging/media/atomisp/pci/input_system_public.h b/drivers/staging/media/atomisp/pci/input_system_public.h
index 3f5167fd6643..06b19434b6c0 100644
--- a/drivers/staging/media/atomisp/pci/input_system_public.h
+++ b/drivers/staging/media/atomisp/pci/input_system_public.h
@@ -4,6 +4,4 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
-#ifndef ISP2401
-# include "isp2400_input_system_public.h"
-#endif
+#include "isp2400_input_system_public.h"
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
index 646d6e39c1e5..40d4a0521562 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -73,17 +73,9 @@ int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
const struct ia_css_frame_info *internal_info = from->internal_info;
int ret;
-#if !defined(ISP2401)
- /* 2401 input system uses input width width */
- in_info = internal_info;
-#else
- /*in some cases, in_info is NULL*/
- if (in_info)
- (void)internal_info;
- else
+ if (!IS_ISP2401 || !in_info)
in_info = internal_info;
-#endif
ret = ia_css_dma_configure_from_info(&to->port_b, in_info);
if (ret)
return ret;
@@ -99,11 +91,12 @@ int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
to->two_ppc = from->two_ppc;
to->stream_format = css2isp_stream_format(from->stream_format);
to->deinterleaved = from->deinterleaved;
-#if defined(ISP2401)
- to->start_column = in_info->crop_info.start_column;
- to->start_line = in_info->crop_info.start_line;
- to->enable_left_padding = from->enable_left_padding;
-#endif
+
+ if (IS_ISP2401) {
+ to->start_column = in_info->crop_info.start_column;
+ to->start_line = in_info->crop_info.start_line;
+ to->enable_left_padding = from->enable_left_padding;
+ }
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
index c3ae5014a039..f9b9c3ae50aa 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -13,8 +13,8 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
@@ -163,45 +163,39 @@ struct input_system_cfg2400_s {
#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX
#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS
-typedef struct capture_unit_state_s capture_unit_state_t;
-typedef struct acquisition_unit_state_s acquisition_unit_state_t;
-typedef struct ctrl_unit_state_s ctrl_unit_state_t;
-
typedef enum {
- MIPI_FORMAT_RGB888 = 0,
- MIPI_FORMAT_RGB555,
- MIPI_FORMAT_RGB444,
- MIPI_FORMAT_RGB565,
- MIPI_FORMAT_RGB666,
- MIPI_FORMAT_RAW8, /* 5 */
- MIPI_FORMAT_RAW10,
- MIPI_FORMAT_RAW6,
- MIPI_FORMAT_RAW7,
- MIPI_FORMAT_RAW12,
- MIPI_FORMAT_RAW14, /* 10 */
- MIPI_FORMAT_YUV420_8,
- MIPI_FORMAT_YUV420_10,
- MIPI_FORMAT_YUV422_8,
- MIPI_FORMAT_YUV422_10,
- MIPI_FORMAT_CUSTOM0, /* 15 */
- MIPI_FORMAT_YUV420_8_LEGACY,
- MIPI_FORMAT_EMBEDDED,
- MIPI_FORMAT_CUSTOM1,
- MIPI_FORMAT_CUSTOM2,
- MIPI_FORMAT_CUSTOM3, /* 20 */
- MIPI_FORMAT_CUSTOM4,
- MIPI_FORMAT_CUSTOM5,
- MIPI_FORMAT_CUSTOM6,
- MIPI_FORMAT_CUSTOM7,
- MIPI_FORMAT_YUV420_8_SHIFT, /* 25 */
- MIPI_FORMAT_YUV420_10_SHIFT,
- MIPI_FORMAT_RAW16,
- MIPI_FORMAT_RAW18,
- N_MIPI_FORMAT,
-} mipi_format_t;
+ MIPI_FORMAT_2400_RGB888 = 0,
+ MIPI_FORMAT_2400_RGB555,
+ MIPI_FORMAT_2400_RGB444,
+ MIPI_FORMAT_2400_RGB565,
+ MIPI_FORMAT_2400_RGB666,
+ MIPI_FORMAT_2400_RAW8, /* 5 */
+ MIPI_FORMAT_2400_RAW10,
+ MIPI_FORMAT_2400_RAW6,
+ MIPI_FORMAT_2400_RAW7,
+ MIPI_FORMAT_2400_RAW12,
+ MIPI_FORMAT_2400_RAW14, /* 10 */
+ MIPI_FORMAT_2400_YUV420_8,
+ MIPI_FORMAT_2400_YUV420_10,
+ MIPI_FORMAT_2400_YUV422_8,
+ MIPI_FORMAT_2400_YUV422_10,
+ MIPI_FORMAT_2400_CUSTOM0, /* 15 */
+ MIPI_FORMAT_2400_YUV420_8_LEGACY,
+ MIPI_FORMAT_2400_EMBEDDED,
+ MIPI_FORMAT_2400_CUSTOM1,
+ MIPI_FORMAT_2400_CUSTOM2,
+ MIPI_FORMAT_2400_CUSTOM3, /* 20 */
+ MIPI_FORMAT_2400_CUSTOM4,
+ MIPI_FORMAT_2400_CUSTOM5,
+ MIPI_FORMAT_2400_CUSTOM6,
+ MIPI_FORMAT_2400_CUSTOM7,
+ MIPI_FORMAT_2400_YUV420_8_SHIFT, /* 25 */
+ MIPI_FORMAT_2400_YUV420_10_SHIFT,
+ MIPI_FORMAT_2400_RAW16,
+ MIPI_FORMAT_2400_RAW18,
+ N_MIPI_FORMAT_2400,
+} mipi_format_2400_t;
-#define MIPI_FORMAT_JPEG MIPI_FORMAT_CUSTOM0
-#define MIPI_FORMAT_BINARY_8 MIPI_FORMAT_CUSTOM0
#define N_MIPI_FORMAT_CUSTOM 8
/* The number of stores for compressed format types */
@@ -246,130 +240,4 @@ static const hrt_address __maybe_unused SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = {
0x0000C000UL
};
-struct capture_unit_state_s {
- int Packet_Length;
- int Received_Length;
- int Received_Short_Packets;
- int Received_Long_Packets;
- int Last_Command;
- int Next_Command;
- int Last_Acknowledge;
- int Next_Acknowledge;
- int FSM_State_Info;
- int StartMode;
- int Start_Addr;
- int Mem_Region_Size;
- int Num_Mem_Regions;
- /* int Init; write-only registers
- int Start;
- int Stop; */
-};
-
-struct acquisition_unit_state_s {
- /* int Init; write-only register */
- int Received_Short_Packets;
- int Received_Long_Packets;
- int Last_Command;
- int Next_Command;
- int Last_Acknowledge;
- int Next_Acknowledge;
- int FSM_State_Info;
- int Int_Cntr_Info;
- int Start_Addr;
- int Mem_Region_Size;
- int Num_Mem_Regions;
-};
-
-struct ctrl_unit_state_s {
- int last_cmd;
- int next_cmd;
- int last_ack;
- int next_ack;
- int top_fsm_state;
- int captA_fsm_state;
- int captB_fsm_state;
- int captC_fsm_state;
- int acq_fsm_state;
- int captA_start_addr;
- int captB_start_addr;
- int captC_start_addr;
- int captA_mem_region_size;
- int captB_mem_region_size;
- int captC_mem_region_size;
- int captA_num_mem_regions;
- int captB_num_mem_regions;
- int captC_num_mem_regions;
- int acq_start_addr;
- int acq_mem_region_size;
- int acq_num_mem_regions;
- /* int ctrl_init; write only register */
- int capt_reserve_one_mem_region;
-};
-
-struct input_system_state_s {
- int str_multicastA_sel;
- int str_multicastB_sel;
- int str_multicastC_sel;
- int str_mux_sel;
- int str_mon_status;
- int str_mon_irq_cond;
- int str_mon_irq_en;
- int isys_srst;
- int isys_slv_reg_srst;
- int str_deint_portA_cnt;
- int str_deint_portB_cnt;
- struct capture_unit_state_s capture_unit[N_CAPTURE_UNIT_ID];
- struct acquisition_unit_state_s acquisition_unit[N_ACQUISITION_UNIT_ID];
- struct ctrl_unit_state_s ctrl_unit_state[N_CTRL_UNIT_ID];
-};
-
-struct mipi_port_state_s {
- int device_ready;
- int irq_status;
- int irq_enable;
- u32 timeout_count;
- u16 init_count;
- u16 raw16_18;
- u32 sync_count; /*4 x uint8_t */
- u32 rx_count; /*4 x uint8_t */
- u8 lane_sync_count[MIPI_4LANE_CFG];
- u8 lane_rx_count[MIPI_4LANE_CFG];
-};
-
-struct rx_channel_state_s {
- u32 comp_scheme0;
- u32 comp_scheme1;
- mipi_predictor_t pred[N_MIPI_FORMAT_CUSTOM];
- mipi_compressor_t comp[N_MIPI_FORMAT_CUSTOM];
-};
-
-struct receiver_state_s {
- u8 fs_to_ls_delay;
- u8 ls_to_data_delay;
- u8 data_to_le_delay;
- u8 le_to_fe_delay;
- u8 fe_to_fs_delay;
- u8 le_to_fs_delay;
- bool is_two_ppc;
- int backend_rst;
- u16 raw18;
- bool force_raw8;
- u16 raw16;
- struct mipi_port_state_s mipi_port_state[N_MIPI_PORT_ID];
- struct rx_channel_state_s rx_channel_state[N_RX_CHANNEL_ID];
- int be_gsp_acc_ovl;
- int be_srst;
- int be_is_two_ppc;
- int be_comp_format0;
- int be_comp_format1;
- int be_comp_format2;
- int be_comp_format3;
- int be_sel;
- int be_raw16_config;
- int be_raw18_config;
- int be_force_raw8;
- int be_irq_status;
- int be_irq_clear;
-};
-
#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
index 9c39ca2da923..e011920f00e1 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
@@ -13,8 +13,8 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
-#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
#include "input_system_public.h"
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
index 85cb61e34192..447c7c5c55a1 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
@@ -13,47 +13,10 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
-#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
#include <type_support.h>
-#ifdef ISP2401
-#include "isys_public.h"
-#else
-
-typedef struct input_system_state_s input_system_state_t;
-typedef struct receiver_state_s receiver_state_t;
-
-/*! Read the state of INPUT_SYSTEM[ID]
-
- \param ID[in] INPUT_SYSTEM identifier
- \param state[out] input system state structure
-
- \return none, state = INPUT_SYSTEM[ID].state
- */
-void input_system_get_state(
- const input_system_ID_t ID,
- input_system_state_t *state);
-
-/*! Read the state of RECEIVER[ID]
-
- \param ID[in] RECEIVER identifier
- \param state[out] receiver state structure
-
- \return none, state = RECEIVER[ID].state
- */
-void receiver_get_state(
- const rx_ID_t ID,
- receiver_state_t *state);
-
-/*! Flag whether a MIPI format is YUV420
-
- \param mipi_format[in] MIPI format
-
- \return mipi_format == YUV420
- */
-bool is_mipi_format_yuv420(
- const mipi_format_t mipi_format);
/*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID]
@@ -365,6 +328,5 @@ input_system_err_t input_system_gpfifo_channel_cfg(
u32 nof_frames,
target_cfg2400_t target
);
-#endif /* #ifdef ISP2401 */
#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
index 74bfa10e670e..d9a8d575c58e 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
@@ -13,8 +13,8 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
#include "csi_rx.h"
#include "pixelgen.h"
@@ -22,57 +22,49 @@
#include "isys_irq.h"
typedef enum {
- MIPI_FORMAT_SHORT1 = 0x08,
- MIPI_FORMAT_SHORT2,
- MIPI_FORMAT_SHORT3,
- MIPI_FORMAT_SHORT4,
- MIPI_FORMAT_SHORT5,
- MIPI_FORMAT_SHORT6,
- MIPI_FORMAT_SHORT7,
- MIPI_FORMAT_SHORT8,
- MIPI_FORMAT_EMBEDDED = 0x12,
- MIPI_FORMAT_YUV420_8 = 0x18,
- MIPI_FORMAT_YUV420_10,
- MIPI_FORMAT_YUV420_8_LEGACY,
- MIPI_FORMAT_YUV420_8_SHIFT = 0x1C,
- MIPI_FORMAT_YUV420_10_SHIFT,
- MIPI_FORMAT_YUV422_8 = 0x1E,
- MIPI_FORMAT_YUV422_10,
- MIPI_FORMAT_RGB444 = 0x20,
- MIPI_FORMAT_RGB555,
- MIPI_FORMAT_RGB565,
- MIPI_FORMAT_RGB666,
- MIPI_FORMAT_RGB888,
- MIPI_FORMAT_RAW6 = 0x28,
- MIPI_FORMAT_RAW7,
- MIPI_FORMAT_RAW8,
- MIPI_FORMAT_RAW10,
- MIPI_FORMAT_RAW12,
- MIPI_FORMAT_RAW14,
- MIPI_FORMAT_CUSTOM0 = 0x30,
- MIPI_FORMAT_CUSTOM1,
- MIPI_FORMAT_CUSTOM2,
- MIPI_FORMAT_CUSTOM3,
- MIPI_FORMAT_CUSTOM4,
- MIPI_FORMAT_CUSTOM5,
- MIPI_FORMAT_CUSTOM6,
- MIPI_FORMAT_CUSTOM7,
+ MIPI_FORMAT_2401_SHORT1 = 0x08,
+ MIPI_FORMAT_2401_SHORT2,
+ MIPI_FORMAT_2401_SHORT3,
+ MIPI_FORMAT_2401_SHORT4,
+ MIPI_FORMAT_2401_SHORT5,
+ MIPI_FORMAT_2401_SHORT6,
+ MIPI_FORMAT_2401_SHORT7,
+ MIPI_FORMAT_2401_SHORT8,
+ MIPI_FORMAT_2401_EMBEDDED = 0x12,
+ MIPI_FORMAT_2401_YUV420_8 = 0x18,
+ MIPI_FORMAT_2401_YUV420_10,
+ MIPI_FORMAT_2401_YUV420_8_LEGACY,
+ MIPI_FORMAT_2401_YUV420_8_SHIFT = 0x1C,
+ MIPI_FORMAT_2401_YUV420_10_SHIFT,
+ MIPI_FORMAT_2401_YUV422_8 = 0x1E,
+ MIPI_FORMAT_2401_YUV422_10,
+ MIPI_FORMAT_2401_RGB444 = 0x20,
+ MIPI_FORMAT_2401_RGB555,
+ MIPI_FORMAT_2401_RGB565,
+ MIPI_FORMAT_2401_RGB666,
+ MIPI_FORMAT_2401_RGB888,
+ MIPI_FORMAT_2401_RAW6 = 0x28,
+ MIPI_FORMAT_2401_RAW7,
+ MIPI_FORMAT_2401_RAW8,
+ MIPI_FORMAT_2401_RAW10,
+ MIPI_FORMAT_2401_RAW12,
+ MIPI_FORMAT_2401_RAW14,
+ MIPI_FORMAT_2401_CUSTOM0 = 0x30,
+ MIPI_FORMAT_2401_CUSTOM1,
+ MIPI_FORMAT_2401_CUSTOM2,
+ MIPI_FORMAT_2401_CUSTOM3,
+ MIPI_FORMAT_2401_CUSTOM4,
+ MIPI_FORMAT_2401_CUSTOM5,
+ MIPI_FORMAT_2401_CUSTOM6,
+ MIPI_FORMAT_2401_CUSTOM7,
//MIPI_FORMAT_RAW16, /*not supported by 2401*/
//MIPI_FORMAT_RAW18,
- N_MIPI_FORMAT
-} mipi_format_t;
+ N_MIPI_FORMAT_2401
+} mipi_format_2401_t;
#define N_MIPI_FORMAT_CUSTOM 8
/* The number of stores for compressed format types */
#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
-typedef struct input_system_state_s input_system_state_t;
-struct input_system_state_s {
- ibuf_ctrl_state_t ibuf_ctrl_state[N_IBUF_CTRL_ID];
- csi_rx_fe_ctrl_state_t csi_rx_fe_ctrl_state[N_CSI_RX_FRONTEND_ID];
- csi_rx_be_ctrl_state_t csi_rx_be_ctrl_state[N_CSI_RX_BACKEND_ID];
- pixelgen_ctrl_state_t pixelgen_ctrl_state[N_PIXELGEN_ID];
- stream2mmio_state_t stream2mmio_state[N_STREAM2MMIO_ID];
- isys_irqc_state_t isys_irqc_state[N_ISYS_IRQ_ID];
-};
+
#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
index e4c76428f6dd..845ed0add027 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
@@ -13,8 +13,8 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
-#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
#include "input_system_public.h"
@@ -231,112 +231,4 @@ static inline void ibuf_ctrl_dump_state(const ibuf_ctrl_ID_t ID,
}
}
-static inline input_system_err_t
-input_system_get_state(const input_system_ID_t ID,
- input_system_state_t *state)
-{
- u32 i;
-
- (void)(ID);
-
- /* get the states of all CSI RX frontend devices */
- for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
- csi_rx_fe_ctrl_get_state(
- (csi_rx_frontend_ID_t)i,
- &state->csi_rx_fe_ctrl_state[i]);
- }
-
- /* get the states of all CIS RX backend devices */
- for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
- csi_rx_be_ctrl_get_state(
- (csi_rx_backend_ID_t)i,
- &state->csi_rx_be_ctrl_state[i]);
- }
-
- /* get the states of all pixelgen devices */
- for (i = 0; i < N_PIXELGEN_ID; i++) {
- pixelgen_ctrl_get_state(
- (pixelgen_ID_t)i,
- &state->pixelgen_ctrl_state[i]);
- }
-
- /* get the states of all stream2mmio devices */
- for (i = 0; i < N_STREAM2MMIO_ID; i++) {
- stream2mmio_get_state(
- (stream2mmio_ID_t)i,
- &state->stream2mmio_state[i]);
- }
-
- /* get the states of all ibuf-controller devices */
- for (i = 0; i < N_IBUF_CTRL_ID; i++) {
- ibuf_ctrl_get_state(
- (ibuf_ctrl_ID_t)i,
- &state->ibuf_ctrl_state[i]);
- }
-
- /* get the states of all isys irq controllers */
- for (i = 0; i < N_ISYS_IRQ_ID; i++) {
- isys_irqc_state_get((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
- }
-
- /* TODO: get the states of all ISYS2401 DMA devices */
- for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
- }
-
- return INPUT_SYSTEM_ERR_NO_ERROR;
-}
-
-static inline void input_system_dump_state(const input_system_ID_t ID,
- input_system_state_t *state)
-{
- u32 i;
-
- (void)(ID);
-
- /* dump the states of all CSI RX frontend devices */
- for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
- csi_rx_fe_ctrl_dump_state(
- (csi_rx_frontend_ID_t)i,
- &state->csi_rx_fe_ctrl_state[i]);
- }
-
- /* dump the states of all CIS RX backend devices */
- for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
- csi_rx_be_ctrl_dump_state(
- (csi_rx_backend_ID_t)i,
- &state->csi_rx_be_ctrl_state[i]);
- }
-
- /* dump the states of all pixelgen devices */
- for (i = 0; i < N_PIXELGEN_ID; i++) {
- pixelgen_ctrl_dump_state(
- (pixelgen_ID_t)i,
- &state->pixelgen_ctrl_state[i]);
- }
-
- /* dump the states of all st2mmio devices */
- for (i = 0; i < N_STREAM2MMIO_ID; i++) {
- stream2mmio_dump_state(
- (stream2mmio_ID_t)i,
- &state->stream2mmio_state[i]);
- }
-
- /* dump the states of all ibuf-controller devices */
- for (i = 0; i < N_IBUF_CTRL_ID; i++) {
- ibuf_ctrl_dump_state(
- (ibuf_ctrl_ID_t)i,
- &state->ibuf_ctrl_state[i]);
- }
-
- /* dump the states of all isys irq controllers */
- for (i = 0; i < N_ISYS_IRQ_ID; i++) {
- isys_irqc_state_dump((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
- }
-
- /* TODO: dump the states of all ISYS2401 DMA devices */
- for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
- }
-
- return;
-}
#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index 768da86b8c2c..0f3729e55e14 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -604,13 +604,14 @@ binary_in_frame_padded_width(int in_frame_width,
int rval;
int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */
-#if defined(ISP2401)
- /* the output image line of Input System 2401 does not have the left paddings */
- nr_of_left_paddings = 0;
-#else
- /* in other cases, the left padding pixels are always 128 */
- nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
-#endif
+ if (IS_ISP2401) {
+ /* the output image line of Input System 2401 does not have the left paddings */
+ nr_of_left_paddings = 0;
+ } else {
+ /* in other cases, the left padding pixels are always 128 */
+ nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
+ }
+
if (need_scaling) {
/* In SDV use-case, we need to match left-padding of
* primary and the video binary. */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
index fff89e9b4b01..e9a09117e5e5 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -141,12 +141,6 @@ static inline void __printf(2, 0) ia_css_debug_vdtrace(unsigned int level,
__printf(2, 3) void ia_css_debug_dtrace(unsigned int level,
const char *fmt, ...);
-/*! @brief Dump sp thread's stack contents
- * SP thread's stack contents are set to 0xcafecafe. This function dumps the
- * stack to inspect if the stack's boundaries are compromised.
- * @return None
- */
-void ia_css_debug_dump_sp_stack_info(void);
/*! @brief Function to set the global dtrace verbosity level.
* @param[in] trace_level Maximum level of the messages to be traced.
@@ -160,18 +154,6 @@ void ia_css_debug_set_dtrace_level(
*/
unsigned int ia_css_debug_get_dtrace_level(void);
-/*! @brief Dump isp hardware state.
- * Dumps the isp hardware state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_isp_state(void);
-
-/*! @brief Dump sp hardware state.
- * Dumps the sp hardware state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_sp_state(void);
-
/* ISP2401 */
/*! @brief Dump GAC hardware state.
* Dumps the GAC ACB hardware registers. may be useful for
@@ -180,25 +162,12 @@ void ia_css_debug_dump_sp_state(void);
*/
void ia_css_debug_dump_gac_state(void);
-/*! @brief Dump dma controller state.
- * Dumps the dma controller state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_dma_state(void);
-
/*! @brief Dump internal sp software state.
* Dumps the sp software state to tracing output.
* @return None
*/
void ia_css_debug_dump_sp_sw_debug_info(void);
-/*! @brief Dump all related hardware state to the trace output
- * @param[in] context String to identify context in output.
- * @return None
- */
-void ia_css_debug_dump_debug_info(
- const char *context);
-
#if SP_DEBUG != SP_DEBUG_NONE
void ia_css_debug_print_sp_debug_state(
const struct sh_css_sp_debug_state *state);
@@ -213,24 +182,6 @@ void ia_css_debug_binary_print(
void ia_css_debug_sp_dump_mipi_fifo_high_water(void);
-/*! @brief Dump isp gdc fifo state to the trace output
- * Dumps the isp gdc fifo state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_isp_gdc_fifo_state(void);
-
-/*! @brief Dump dma isp fifo state
- * Dumps the dma isp fifo state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_dma_isp_fifo_state(void);
-
-/*! @brief Dump dma sp fifo state
- * Dumps the dma sp fifo state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_dma_sp_fifo_state(void);
-
/*! \brief Dump pif A isp fifo state
* Dumps the primary input formatter state to tracing output.
* @return None
@@ -249,30 +200,12 @@ void ia_css_debug_dump_pif_b_isp_fifo_state(void);
*/
void ia_css_debug_dump_str2mem_sp_fifo_state(void);
-/*! @brief Dump isp sp fifo state
- * Dumps the isp sp fifo state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_isp_sp_fifo_state(void);
-
/*! @brief Dump all fifo state info to the output
* Dumps all fifo state to tracing output.
* @return None
*/
void ia_css_debug_dump_all_fifo_state(void);
-/*! @brief Dump the rx state to the output
- * Dumps the rx state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_rx_state(void);
-
-/*! @brief Dump the input system state to the output
- * Dumps the input system state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_isys_state(void);
-
/*! @brief Dump the frame info to the trace output
* Dumps the frame info to tracing output.
* @param[in] frame pointer to struct ia_css_frame
@@ -306,18 +239,6 @@ void ia_css_debug_wake_up_sp(void);
void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
unsigned int enable);
-/*! @brief Function to dump some sp performance counters.
- * Dump sp performance counters, currently input system errors.
- * @return None
- */
-void ia_css_debug_dump_perf_counters(void);
-
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-void sh_css_dump_thread_wait_info(void);
-void sh_css_dump_pipe_stage_info(void);
-void sh_css_dump_pipe_stripe_info(void);
-#endif
-
void ia_css_debug_dump_isp_binary(void);
void sh_css_dump_sp_raw_copy_linecount(bool reduced);
@@ -400,12 +321,6 @@ void ia_css_debug_dump_stream_config(
const struct ia_css_stream_config *config,
int num_pipes);
-/*! @brief Dump the state of the SP tagger
- * Dumps the internal state of the SP tagger
- * @return None
- */
-void ia_css_debug_tagger_state(void);
-
/**
* @brief Initialize the debug mode.
*
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
index bb6204cb42c5..3e92794555ec 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -101,12 +101,6 @@
#define ENABLE_LINE_MAX_LENGTH (25)
-/*
- * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads
- * future rework should fix this and remove the define MAX_THREAD_NUM
- */
-#define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS)
-
static struct pipe_graph_class {
bool do_init;
int height;
@@ -147,79 +141,6 @@ void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
va_end(ap);
}
-static void debug_dump_long_array_formatted(
- const sp_ID_t sp_id,
- hrt_address stack_sp_addr,
- unsigned int stack_size)
-{
- unsigned int i;
- u32 val;
- u32 addr = (uint32_t)stack_sp_addr;
- u32 stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t));
-
- /* When size is not multiple of four, last word is only relevant for
- * remaining bytes */
- for (i = 0; i < stack_size_words; i++) {
- val = sp_dmem_load_uint32(sp_id, (hrt_address)addr);
- if ((i % 8) == 0)
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val);
- addr += sizeof(uint32_t);
- }
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
-}
-
-static void debug_dump_sp_stack_info(
- const sp_ID_t sp_id)
-{
- const struct ia_css_fw_info *fw;
- unsigned int HIVE_ADDR_sp_threads_stack;
- unsigned int HIVE_ADDR_sp_threads_stack_size;
- u32 stack_sizes[MAX_THREAD_NUM];
- u32 stack_sp_addr[MAX_THREAD_NUM];
- unsigned int i;
-
- fw = &sh_css_sp_fw;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id);
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n",
- fw->info.sp.threads_stack,
- fw->info.sp.threads_stack_size);
-
- HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack;
- HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size;
-
- if (fw->info.sp.threads_stack == 0 ||
- fw->info.sp.threads_stack_size == 0)
- return;
-
- (void)HIVE_ADDR_sp_threads_stack;
- (void)HIVE_ADDR_sp_threads_stack_size;
-
- sp_dmem_load(sp_id,
- (unsigned int)sp_address_of(sp_threads_stack),
- &stack_sp_addr, sizeof(stack_sp_addr));
- sp_dmem_load(sp_id,
- (unsigned int)sp_address_of(sp_threads_stack_size),
- &stack_sizes, sizeof(stack_sizes));
-
- for (i = 0 ; i < MAX_THREAD_NUM; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "thread: %u stack_addr: 0x%08x stack_size: %u\n",
- i, stack_sp_addr[i], stack_sizes[i]);
- debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i],
- stack_sizes[i]);
- }
-}
-
-void ia_css_debug_dump_sp_stack_info(void)
-{
- debug_dump_sp_stack_info(SP0_ID);
-}
-
void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
{
dbg_level = trace_level;
@@ -387,136 +308,6 @@ static const char *debug_frame_format2str(const enum ia_css_frame_format
}
}
-static void debug_print_sp_state(const sp_state_t *state, const char *cell)
-{
- assert(cell);
- assert(state);
-
- ia_css_debug_dtrace(2, "%s state:\n", cell);
- ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
- ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
- state->status_register);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
- state->is_sleeping);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
- state->is_stalling);
- return;
-}
-
-static void debug_print_isp_state(const isp_state_t *state, const char *cell)
-{
- assert(state);
- assert(cell);
-
- ia_css_debug_dtrace(2, "%s state:\n", cell);
- ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
- ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
- state->status_register);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
- state->is_sleeping);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
- state->is_stalling);
- return;
-}
-
-void ia_css_debug_dump_isp_state(void)
-{
- isp_state_t state;
- isp_stall_t stall;
-
- isp_get_state(ISP0_ID, &state, &stall);
-
- debug_print_isp_state(&state, "ISP");
-
- if (state.is_stalling) {
- if (!IS_ISP2401) {
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "[0] if_prim_a_FIFO stalled", stall.fifo0);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "[1] if_prim_b_FIFO stalled", stall.fifo1);
- }
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled",
- stall.fifo2);
-
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled",
- stall.fifo3);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled",
- stall.fifo4);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled",
- stall.fifo5);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled",
- stall.fifo6);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "status & control stalled",
- stall.stat_ctrl);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
- stall.dmem);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled",
- stall.vmem);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled",
- stall.vamem1);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled",
- stall.vamem2);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled",
- stall.vamem3);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled",
- stall.hmem);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled",
- stall.pmem);
- }
- return;
-}
-
-void ia_css_debug_dump_sp_state(void)
-{
- sp_state_t state;
- sp_stall_t stall;
-
- sp_get_state(SP0_ID, &state, &stall);
- debug_print_sp_state(&state, "SP");
- if (state.is_stalling) {
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled",
- stall.fifo0);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled",
- stall.fifo1);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "str_to_mem_FIFO stalled", stall.fifo2);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled",
- stall.fifo3);
- if (!IS_ISP2401)
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "if_prim_a_FIFO stalled", stall.fifo4);
-
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled",
- stall.fifo5);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled",
- stall.fifo6);
- if (!IS_ISP2401)
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "if_prim_b_FIFO stalled", stall.fifo7);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled",
- stall.fifo8);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled",
- stall.fifo9);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled",
- stall.fifoa);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
- stall.dmem);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "control master stalled",
- stall.control_master);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "i-cache master stalled",
- stall.icache_master);
- }
- ia_css_debug_dump_trace();
- return;
-}
-
static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
const char *descr)
{
@@ -571,634 +362,6 @@ void ia_css_debug_dump_str2mem_sp_fifo_state(void)
debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
}
-#ifndef ISP2401
-static void debug_print_if_state(input_formatter_state_t *state, const char *id)
-{
- unsigned int val;
-
- const char *st_vsync_active_low =
- (state->vsync_active_low ? "low" : "high");
- const char *st_hsync_active_low =
- (state->hsync_active_low ? "low" : "high");
-
- const char *fsm_sync_status_str = "unknown";
- const char *fsm_crop_status_str = "unknown";
- const char *fsm_padding_status_str = "unknown";
-
- int st_stline = state->start_line;
- int st_stcol = state->start_column;
- int st_crpht = state->cropped_height;
- int st_crpwd = state->cropped_width;
- int st_verdcm = state->ver_decimation;
- int st_hordcm = state->hor_decimation;
- int st_ver_deinterleaving = state->ver_deinterleaving;
- int st_hor_deinterleaving = state->hor_deinterleaving;
- int st_leftpd = state->left_padding;
- int st_eoloff = state->eol_offset;
- int st_vmstartaddr = state->vmem_start_address;
- int st_vmendaddr = state->vmem_end_address;
- int st_vmincr = state->vmem_increment;
- int st_yuv420 = state->is_yuv420;
- int st_allow_fifo_overflow = state->allow_fifo_overflow;
- int st_block_fifo_when_no_req = state->block_fifo_when_no_req;
-
- assert(state);
- ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id);
-
- ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Ver deinterleaving", st_ver_deinterleaving);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Hor deinterleaving", st_hor_deinterleaving);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "EOL offset (bytes)", st_eoloff);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
- "VMEM start address", st_vmstartaddr);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
- "VMEM end address", st_vmendaddr);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
- "VMEM increment", st_vmincr);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420);
- ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
- "Vsync", st_vsync_active_low);
- ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
- "Hsync", st_hsync_active_low);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Allow FIFO overflow", st_allow_fifo_overflow);
- /* Flag that tells whether the IF gives backpressure on frames */
- /*
- * FYI, this is only on the frame request (indicate), when the IF has
- * synch'd on a frame it will always give back pressure
- */
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Block when no request", st_block_fifo_when_no_req);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "IF_BLOCKED_FIFO_NO_REQ_ADDRESS",
- input_formatter_reg_load(INPUT_FORMATTER0_ID,
- HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS)
- );
-
- ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg0",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg0));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg1",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg1));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg2",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg2));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg3",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg3));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg4",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg4));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg5",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg5));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg6",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg6));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_lut_reg7",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_lut_reg7));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_input_switch_fsync_lut",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_input_switch_fsync_lut));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_srst",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_srst));
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "_REG_GP_IFMT_slv_reg_srst",
- gp_device_reg_load(GP_DEVICE0_ID,
- _REG_GP_IFMT_slv_reg_srst));
-
- ia_css_debug_dtrace(2, "\tFSM Status:\n");
-
- val = state->fsm_sync_status;
-
- if (val > 7)
- fsm_sync_status_str = "ERROR";
-
- switch (val & 0x7) {
- case 0:
- fsm_sync_status_str = "idle";
- break;
- case 1:
- fsm_sync_status_str = "request frame";
- break;
- case 2:
- fsm_sync_status_str = "request lines";
- break;
- case 3:
- fsm_sync_status_str = "request vectors";
- break;
- case 4:
- fsm_sync_status_str = "send acknowledge";
- break;
- default:
- fsm_sync_status_str = "unknown";
- break;
- }
-
- ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
- "FSM Synchronization Status", val,
- fsm_sync_status_str);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM Synchronization Counter",
- state->fsm_sync_counter);
-
- val = state->fsm_crop_status;
-
- if (val > 7)
- fsm_crop_status_str = "ERROR";
-
- switch (val & 0x7) {
- case 0:
- fsm_crop_status_str = "idle";
- break;
- case 1:
- fsm_crop_status_str = "wait line";
- break;
- case 2:
- fsm_crop_status_str = "crop line";
- break;
- case 3:
- fsm_crop_status_str = "crop pixel";
- break;
- case 4:
- fsm_crop_status_str = "pass pixel";
- break;
- case 5:
- fsm_crop_status_str = "pass line";
- break;
- case 6:
- fsm_crop_status_str = "lost line";
- break;
- default:
- fsm_crop_status_str = "unknown";
- break;
- }
- ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
- "FSM Crop Status", val, fsm_crop_status_str);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM Crop Line Counter",
- state->fsm_crop_line_counter);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM Crop Pixel Counter",
- state->fsm_crop_pixel_counter);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM Deinterleaving idx buffer",
- state->fsm_deinterleaving_index);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM H decimation counter",
- state->fsm_dec_h_counter);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM V decimation counter",
- state->fsm_dec_v_counter);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM block V decimation counter",
- state->fsm_dec_block_v_counter);
-
- val = state->fsm_padding_status;
-
- if (val > 7)
- fsm_padding_status_str = "ERROR";
-
- switch (val & 0x7) {
- case 0:
- fsm_padding_status_str = "idle";
- break;
- case 1:
- fsm_padding_status_str = "left pad";
- break;
- case 2:
- fsm_padding_status_str = "write";
- break;
- case 3:
- fsm_padding_status_str = "right pad";
- break;
- case 4:
- fsm_padding_status_str = "send end of line";
- break;
- default:
- fsm_padding_status_str = "unknown";
- break;
- }
-
- ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status",
- val, fsm_padding_status_str);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM Padding element idx counter",
- state->fsm_padding_elem_counter);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error",
- state->fsm_vector_support_error);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full",
- state->fsm_vector_buffer_full);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support",
- state->vector_support);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost",
- state->sensor_data_lost);
-}
-
-static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
-{
- ia_css_debug_dtrace(2, "Stream-to-memory state:\n");
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness",
- state->input_endianness);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness",
- state->output_endianness);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch",
- state->block_synch);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch",
- state->packet_synch);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync",
- state->readpostwrite_synch);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update",
- state->en_status_update);
-}
-
-static void ia_css_debug_dump_if_state(void)
-{
- input_formatter_state_t if_state;
- input_formatter_bin_state_t if_bin_state;
-
- input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state);
- debug_print_if_state(&if_state, "Primary IF A");
- ia_css_debug_dump_pif_a_isp_fifo_state();
-
- input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state);
- debug_print_if_state(&if_state, "Primary IF B");
- ia_css_debug_dump_pif_b_isp_fifo_state();
-
- input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state);
- debug_print_if_bin_state(&if_bin_state);
- ia_css_debug_dump_str2mem_sp_fifo_state();
-}
-#endif
-
-void ia_css_debug_dump_dma_state(void)
-{
- /* note: the var below is made static as it is quite large;
- if it is not static it ends up on the stack which could
- cause issues for drivers
- */
- static dma_state_t state;
- int i, ch_id;
-
- const char *fsm_cmd_st_lbl = "FSM Command flag state";
- const char *fsm_ctl_st_lbl = "FSM Control flag state";
- const char *fsm_ctl_state = NULL;
- const char *fsm_ctl_flag = NULL;
- const char *fsm_pack_st = NULL;
- const char *fsm_read_st = NULL;
- const char *fsm_write_st = NULL;
- char last_cmd_str[64];
-
- dma_get_state(DMA0_ID, &state);
- /* Print header for DMA dump status */
- ia_css_debug_dtrace(2, "DMA dump status:\n");
-
- /* Print FSM command flag state */
- if (state.fsm_command_idle)
- ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE");
- if (state.fsm_command_run)
- ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN");
- if (state.fsm_command_stalling)
- ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
- "STALL");
- if (state.fsm_command_error)
- ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
- "ERROR");
-
- /* Print last command along with the channel */
- ch_id = state.last_command_channel;
-
- switch (state.last_command) {
- case DMA_COMMAND_READ:
- snprintf(last_cmd_str, 64,
- "Read 2D Block [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_WRITE:
- snprintf(last_cmd_str, 64,
- "Write 2D Block [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_SET_CHANNEL:
- snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_SET_PARAM:
- snprintf(last_cmd_str, 64,
- "Set Param: %d [Channel: %d]",
- state.last_command_param, ch_id);
- break;
- case DMA_COMMAND_READ_SPECIFIC:
- snprintf(last_cmd_str, 64,
- "Read Specific 2D Block [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_WRITE_SPECIFIC:
- snprintf(last_cmd_str, 64,
- "Write Specific 2D Block [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_INIT:
- snprintf(last_cmd_str, 64,
- "Init 2D Block on Device A [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_INIT_SPECIFIC:
- snprintf(last_cmd_str, 64,
- "Init Specific 2D Block [Channel: %d]", ch_id);
- break;
- case DMA_COMMAND_RST:
- snprintf(last_cmd_str, 64, "DMA SW Reset");
- break;
- case N_DMA_COMMANDS:
- snprintf(last_cmd_str, 64, "UNKNOWN");
- break;
- default:
- snprintf(last_cmd_str, 64,
- "unknown [Channel: %d]", ch_id);
- break;
- }
- ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n",
- "last command received", state.last_command,
- last_cmd_str);
-
- /* Print DMA registers */
- ia_css_debug_dtrace(2, "\t%-32s\n",
- "DMA registers, connection group 0");
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command",
- state.current_command);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A",
- state.current_addr_a);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B",
- state.current_addr_b);
-
- if (state.fsm_ctrl_idle)
- fsm_ctl_flag = "IDLE";
- else if (state.fsm_ctrl_run)
- fsm_ctl_flag = "RUN";
- else if (state.fsm_ctrl_stalling)
- fsm_ctl_flag = "STAL";
- else if (state.fsm_ctrl_error)
- fsm_ctl_flag = "ERROR";
- else
- fsm_ctl_flag = "UNKNOWN";
-
- switch (state.fsm_ctrl_state) {
- case DMA_CTRL_STATE_IDLE:
- fsm_ctl_state = "Idle state";
- break;
- case DMA_CTRL_STATE_REQ_RCV:
- fsm_ctl_state = "Req Rcv state";
- break;
- case DMA_CTRL_STATE_RCV:
- fsm_ctl_state = "Rcv state";
- break;
- case DMA_CTRL_STATE_RCV_REQ:
- fsm_ctl_state = "Rcv Req state";
- break;
- case DMA_CTRL_STATE_INIT:
- fsm_ctl_state = "Init state";
- break;
- case N_DMA_CTRL_STATES:
- fsm_ctl_state = "Unknown";
- break;
- }
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl,
- fsm_ctl_flag, fsm_ctl_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev",
- state.fsm_ctrl_source_dev);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr",
- state.fsm_ctrl_source_addr);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride",
- state.fsm_ctrl_source_stride);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width",
- state.fsm_ctrl_source_width);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height",
- state.fsm_ctrl_source_height);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev",
- state.fsm_ctrl_pack_source_dev);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev",
- state.fsm_ctrl_pack_dest_dev);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr",
- state.fsm_ctrl_dest_addr);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride",
- state.fsm_ctrl_dest_stride);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width",
- state.fsm_ctrl_pack_source_width);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height",
- state.fsm_ctrl_pack_dest_height);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width",
- state.fsm_ctrl_pack_dest_width);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems",
- state.fsm_ctrl_pack_source_elems);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems",
- state.fsm_ctrl_pack_dest_elems);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension",
- state.fsm_ctrl_pack_extension);
-
- if (state.pack_idle)
- fsm_pack_st = "IDLE";
- if (state.pack_run)
- fsm_pack_st = "RUN";
- if (state.pack_stalling)
- fsm_pack_st = "STALL";
- if (state.pack_error)
- fsm_pack_st = "ERROR";
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state",
- fsm_pack_st);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height",
- state.pack_cnt_height);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width",
- state.pack_src_cnt_width);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width",
- state.pack_dest_cnt_width);
-
- if (state.read_state == DMA_RW_STATE_IDLE)
- fsm_read_st = "Idle state";
- if (state.read_state == DMA_RW_STATE_REQ)
- fsm_read_st = "Req state";
- if (state.read_state == DMA_RW_STATE_NEXT_LINE)
- fsm_read_st = "Next line";
- if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL)
- fsm_read_st = "Unlock channel";
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state",
- fsm_read_st);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height",
- state.read_cnt_height);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width",
- state.read_cnt_width);
-
- if (state.write_state == DMA_RW_STATE_IDLE)
- fsm_write_st = "Idle state";
- if (state.write_state == DMA_RW_STATE_REQ)
- fsm_write_st = "Req state";
- if (state.write_state == DMA_RW_STATE_NEXT_LINE)
- fsm_write_st = "Next line";
- if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL)
- fsm_write_st = "Unlock channel";
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state",
- fsm_write_st);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height",
- state.write_height);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width",
- state.write_width);
-
- for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
- dma_port_state_t *port = &state.port_states[i];
-
- ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i);
- ia_css_debug_dtrace(2, "\t\tDMA internal side state\n");
- ia_css_debug_dtrace(2,
- "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
- port->req_cs, port->req_we_n, port->req_run,
- port->req_ack);
- ia_css_debug_dtrace(2, "\t\tMaster Output side state\n");
- ia_css_debug_dtrace(2,
- "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
- port->send_cs, port->send_we_n,
- port->send_run, port->send_ack);
- ia_css_debug_dtrace(2, "\t\tFifo state\n");
- if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL)
- ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n");
- else if (port->fifo_state == DMA_FIFO_STATE_FULL)
- ia_css_debug_dtrace(2, "\t\t\tFifo Full\n");
- else if (port->fifo_state == DMA_FIFO_STATE_EMPTY)
- ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n");
- else
- ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n");
-
- ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n",
- port->fifo_counter);
- }
-
- for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
- dma_channel_state_t *ch = &state.channel_states[i];
-
- ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register",
- i);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection",
- ch->connection);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend",
- ch->sign_extend);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A",
- ch->stride_a);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A",
- ch->elems_a);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A",
- ch->cropping_a);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A",
- ch->width_a);
- ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B",
- ch->stride_b);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B",
- ch->elems_b);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B",
- ch->cropping_b);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B",
- ch->width_b);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height);
- }
- ia_css_debug_dtrace(2, "\n");
- return;
-}
-
-void ia_css_debug_dump_dma_sp_fifo_state(void)
-{
- fifo_channel_state_t dma_to_sp, sp_to_dma;
-
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp);
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma);
- debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP");
- debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA");
- return;
-}
-
-void ia_css_debug_dump_dma_isp_fifo_state(void)
-{
- fifo_channel_state_t dma_to_isp, isp_to_dma;
-
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp);
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma);
- debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP");
- debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA");
- return;
-}
-
-void ia_css_debug_dump_isp_sp_fifo_state(void)
-{
- fifo_channel_state_t sp_to_isp, isp_to_sp;
-
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp);
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp);
- debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP");
- debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP");
- return;
-}
-
-void ia_css_debug_dump_isp_gdc_fifo_state(void)
-{
- fifo_channel_state_t gdc_to_isp, isp_to_gdc;
-
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp);
- fifo_channel_get_state(FIFO_MONITOR0_ID,
- FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc);
- debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP");
- debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC");
- return;
-}
-
void ia_css_debug_dump_all_fifo_state(void)
{
int i;
@@ -1658,213 +821,6 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
}
#endif
-#if !defined(ISP2401)
-static void debug_print_rx_mipi_port_state(mipi_port_state_t *state)
-{
- int i;
- unsigned int bits, infos;
-
- assert(state);
-
- bits = state->irq_status;
- infos = ia_css_isys_rx_translate_irq_infos(bits);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n",
- "receiver errors", bits);
-
- if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
- ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
- ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
- ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
- ia_css_debug_dtrace(2, "\t\t\tcontrol error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
- ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
- ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
- ia_css_debug_dtrace(2, "\t\t\tunknown error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
- ia_css_debug_dtrace(2, "\t\t\tframe sync error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
- ia_css_debug_dtrace(2, "\t\t\tframe data error\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
- ia_css_debug_dtrace(2, "\t\t\tdata timeout\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
- ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n");
- if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
- ia_css_debug_dtrace(2, "\t\t\tline sync error\n");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "device_ready", state->device_ready);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_status", state->irq_status);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_enable", state->irq_enable);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "timeout_count", state->timeout_count);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "init_count", state->init_count);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "sync_count", state->sync_count);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count);
-
- for (i = 0; i < MIPI_4LANE_CFG; i++) {
- ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
- "lane_sync_count[", i, "]",
- state->lane_sync_count[i]);
- }
-
- for (i = 0; i < MIPI_4LANE_CFG; i++) {
- ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
- "lane_rx_count[", i, "]",
- state->lane_rx_count[i]);
- }
-
- return;
-}
-
-static void debug_print_rx_channel_state(rx_channel_state_t *state)
-{
- int i;
-
- assert(state);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "compression_scheme0", state->comp_scheme0);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "compression_scheme1", state->comp_scheme1);
-
- for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
- ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
- "MIPI Predictor ", i, state->pred[i]);
- }
-
- for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
- ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
- "MIPI Compressor ", i, state->comp[i]);
- }
-
- return;
-}
-
-static void debug_print_rx_state(receiver_state_t *state)
-{
- int i;
-
- assert(state);
- ia_css_debug_dtrace(2, "CSI Receiver State:\n");
-
- ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "fs_to_ls_delay", state->fs_to_ls_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "ls_to_data_delay", state->ls_to_data_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "data_to_le_delay", state->data_to_le_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "le_to_fe_delay", state->le_to_fe_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "fe_to_fs_delay", state->fe_to_fs_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "le_to_fs_delay", state->le_to_fs_delay);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "is_two_ppc", state->is_two_ppc);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "backend_rst", state->backend_rst);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "force_raw8", state->force_raw8);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_gsp_acc_ovl", state->be_gsp_acc_ovl);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_is_two_ppc", state->be_is_two_ppc);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_comp_format0", state->be_comp_format0);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_comp_format1", state->be_comp_format1);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_comp_format2", state->be_comp_format2);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_comp_format3", state->be_comp_format3);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_raw16_config", state->be_raw16_config);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_raw18_config", state->be_raw18_config);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_force_raw8", state->be_force_raw8);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_irq_status", state->be_irq_status);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "be_irq_clear", state->be_irq_clear);
-
- /* mipi port state */
- for (i = 0; i < N_MIPI_PORT_ID; i++) {
- ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i);
-
- debug_print_rx_mipi_port_state(&state->mipi_port_state[i]);
- }
- /* end of mipi port state */
-
- /* rx channel state */
- for (i = 0; i < N_RX_CHANNEL_ID; i++) {
- ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i);
-
- debug_print_rx_channel_state(&state->rx_channel_state[i]);
- }
- /* end of rx channel state */
-
- return;
-}
-#endif
-
-void ia_css_debug_dump_rx_state(void)
-{
-#if !defined(ISP2401)
- receiver_state_t state;
-
- receiver_get_state(RX0_ID, &state);
- debug_print_rx_state(&state);
-#endif
-}
-
void ia_css_debug_dump_sp_sw_debug_info(void)
{
#if SP_DEBUG != SP_DEBUG_NONE
@@ -1878,319 +834,6 @@ void ia_css_debug_dump_sp_sw_debug_info(void)
return;
}
-#if !defined(ISP2401)
-static void debug_print_isys_capture_unit_state(capture_unit_state_t *state)
-{
- assert(state);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Packet_Length", state->Packet_Length);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Received_Length", state->Received_Length);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Received_Short_Packets",
- state->Received_Short_Packets);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Received_Long_Packets",
- state->Received_Long_Packets);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Last_Command", state->Last_Command);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Next_Command", state->Next_Command);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Last_Acknowledge", state->Last_Acknowledge);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Next_Acknowledge", state->Next_Acknowledge);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM_State_Info", state->FSM_State_Info);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "StartMode", state->StartMode);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Start_Addr", state->Start_Addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Mem_Region_Size", state->Mem_Region_Size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Num_Mem_Regions", state->Num_Mem_Regions);
- return;
-}
-
-static void debug_print_isys_acquisition_unit_state(
- acquisition_unit_state_t *state)
-{
- assert(state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Received_Short_Packets",
- state->Received_Short_Packets);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Received_Long_Packets",
- state->Received_Long_Packets);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Last_Command", state->Last_Command);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Next_Command", state->Next_Command);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Last_Acknowledge", state->Last_Acknowledge);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Next_Acknowledge", state->Next_Acknowledge);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "FSM_State_Info", state->FSM_State_Info);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Int_Cntr_Info", state->Int_Cntr_Info);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Start_Addr", state->Start_Addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Mem_Region_Size", state->Mem_Region_Size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "Num_Mem_Regions", state->Num_Mem_Regions);
-}
-
-static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state)
-{
- assert(state);
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "top_fsm_state", state->top_fsm_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captA_fsm_state", state->captA_fsm_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captB_fsm_state", state->captB_fsm_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captC_fsm_state", state->captC_fsm_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "acq_fsm_state", state->acq_fsm_state);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captA_start_addr", state->captA_start_addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captB_start_addr", state->captB_start_addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captC_start_addr", state->captC_start_addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captA_mem_region_size",
- state->captA_mem_region_size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captB_mem_region_size",
- state->captB_mem_region_size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captC_mem_region_size",
- state->captC_mem_region_size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captA_num_mem_regions",
- state->captA_num_mem_regions);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captB_num_mem_regions",
- state->captB_num_mem_regions);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "captC_num_mem_regions",
- state->captC_num_mem_regions);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "acq_start_addr", state->acq_start_addr);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "acq_mem_region_size", state->acq_mem_region_size);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "acq_num_mem_regions", state->acq_num_mem_regions);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "capt_reserve_one_mem_region",
- state->capt_reserve_one_mem_region);
-
- return;
-}
-
-static void debug_print_isys_state(input_system_state_t *state)
-{
- int i;
-
- assert(state);
- ia_css_debug_dtrace(2, "InputSystem State:\n");
-
- /* configuration */
- ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_multiCastA_sel", state->str_multicastA_sel);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_multicastB_sel", state->str_multicastB_sel);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_multicastC_sel", state->str_multicastC_sel);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_mux_sel", state->str_mux_sel);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_mon_status", state->str_mon_status);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_mon_irq_cond", state->str_mon_irq_cond);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_mon_irq_en", state->str_mon_irq_en);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "isys_srst", state->isys_srst);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "isys_slv_reg_srst", state->isys_slv_reg_srst);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_deint_portA_cnt", state->str_deint_portA_cnt);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "str_deint_portB_cnd", state->str_deint_portB_cnt);
- /* end of configuration */
-
- /* capture unit state */
- for (i = 0; i < N_CAPTURE_UNIT_ID; i++) {
- capture_unit_state_t *capture_unit_state;
-
- ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i);
-
- capture_unit_state = &state->capture_unit[i];
- debug_print_isys_capture_unit_state(capture_unit_state);
- }
- /* end of capture unit state */
-
- /* acquisition unit state */
- for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) {
- acquisition_unit_state_t *acquisition_unit_state;
-
- ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i);
-
- acquisition_unit_state = &state->acquisition_unit[i];
- debug_print_isys_acquisition_unit_state(acquisition_unit_state);
- }
- /* end of acquisition unit state */
-
- /* control unit state */
- for (i = 0; i < N_CTRL_UNIT_ID; i++) {
- ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i);
-
- debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]);
- }
- /* end of control unit state */
-}
-#endif
-
-void ia_css_debug_dump_isys_state(void)
-{
- static input_system_state_t state;
-
- input_system_get_state(INPUT_SYSTEM0_ID, &state);
-
-#ifndef ISP2401
- debug_print_isys_state(&state);
-#else
- input_system_dump_state(INPUT_SYSTEM0_ID, &state);
-#endif
-}
-
-void ia_css_debug_dump_debug_info(const char *context)
-{
- if (!context)
- context = "No Context provided";
-
- ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context);
- if (!IS_ISP2401)
- ia_css_debug_dump_rx_state();
-
-#ifndef ISP2401
- ia_css_debug_dump_if_state();
-#endif
- ia_css_debug_dump_isp_state();
- ia_css_debug_dump_isp_sp_fifo_state();
- ia_css_debug_dump_isp_gdc_fifo_state();
- ia_css_debug_dump_sp_state();
- ia_css_debug_dump_perf_counters();
-
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
- sh_css_dump_thread_wait_info();
- sh_css_dump_pipe_stage_info();
- sh_css_dump_pipe_stripe_info();
-#endif
- ia_css_debug_dump_dma_isp_fifo_state();
- ia_css_debug_dump_dma_sp_fifo_state();
- ia_css_debug_dump_dma_state();
-
- if (!IS_ISP2401) {
- struct irq_controller_state state;
-
- ia_css_debug_dump_isys_state();
-
- irq_controller_get_state(IRQ2_ID, &state);
-
- ia_css_debug_dtrace(2, "\t%-32s:\n",
- "Input System IRQ Controller State");
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_edge", state.irq_edge);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_mask", state.irq_mask);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_status", state.irq_status);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_enable", state.irq_enable);
-
- ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
- "irq_level_not_pulse",
- state.irq_level_not_pulse);
- } else {
- ia_css_debug_dump_isys_state();
- }
-
- ia_css_debug_tagger_state();
-
- return;
-}
-
/* this function is for debug use, it can make SP go to sleep
state after each frame, then user can dump the stable SP dmem.
this function can be called after ia_css_start_sp()
@@ -2379,36 +1022,6 @@ void ia_css_debug_dump_isp_binary(void)
}
}
-void ia_css_debug_dump_perf_counters(void)
-{
- const struct ia_css_fw_info *fw;
- int i;
- unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt;
- /* N_MIPI_PORT_ID + 1: 3 Capture Units and 1 Acquire Unit. */
- s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1];
-
- if (IS_ISP2401)
- return;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n");
-
- fw = &sh_css_sp_fw;
- HIVE_ADDR_ia_css_isys_sp_error_cnt =
- fw->info.sp.perf_counter_input_system_error;
-
- (void)HIVE_ADDR_ia_css_isys_sp_error_cnt;
-
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt),
- &ia_css_sp_input_system_error_cnt,
- sizeof(ia_css_sp_input_system_error_cnt));
-
- for (i = 0; i < N_MIPI_PORT_ID + 1; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n",
- i, ia_css_sp_input_system_error_cnt[i]);
- }
-}
-
/*
* @brief Initialize the debug mode.
* Refer to "ia_css_debug.h" for more details.
@@ -2464,86 +1077,6 @@ static void __printf(1, 2) dtrace_dot(const char *fmt, ...)
va_end(ap);
}
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-void sh_css_dump_thread_wait_info(void)
-{
- const struct ia_css_fw_info *fw;
- int i;
- unsigned int HIVE_ADDR_sp_thread_wait;
- s32 sp_thread_wait[MAX_THREAD_NUM];
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n");
-
- fw = &sh_css_sp_fw;
- HIVE_ADDR_sp_thread_wait =
- fw->info.sp.debug_wait;
-
- (void)HIVE_ADDR_sp_thread_wait;
-
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(sp_thread_wait),
- &sp_thread_wait,
- sizeof(sp_thread_wait));
- for (i = 0; i < MAX_THREAD_NUM; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "\twait[%d] = 0x%X\n",
- i, sp_thread_wait[i]);
- }
-}
-
-void sh_css_dump_pipe_stage_info(void)
-{
- const struct ia_css_fw_info *fw;
- int i;
- unsigned int HIVE_ADDR_sp_pipe_stage;
- s32 sp_pipe_stage[MAX_THREAD_NUM];
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n");
-
- fw = &sh_css_sp_fw;
- HIVE_ADDR_sp_pipe_stage =
- fw->info.sp.debug_stage;
-
- (void)HIVE_ADDR_sp_pipe_stage;
-
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(sp_pipe_stage),
- &sp_pipe_stage,
- sizeof(sp_pipe_stage));
- for (i = 0; i < MAX_THREAD_NUM; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "\tstage[%d] = %d\n",
- i, sp_pipe_stage[i]);
- }
-}
-
-void sh_css_dump_pipe_stripe_info(void)
-{
- const struct ia_css_fw_info *fw;
- int i;
- unsigned int HIVE_ADDR_sp_pipe_stripe;
- s32 sp_pipe_stripe[MAX_THREAD_NUM];
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n");
-
- fw = &sh_css_sp_fw;
- HIVE_ADDR_sp_pipe_stripe =
- fw->info.sp.debug_stripe;
-
- (void)HIVE_ADDR_sp_pipe_stripe;
-
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(sp_pipe_stripe),
- &sp_pipe_stripe,
- sizeof(sp_pipe_stripe));
- for (i = 0; i < MAX_THREAD_NUM; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "\tstripe[%d] = %d\n",
- i, sp_pipe_stripe[i]);
- }
-}
-#endif
-
static void
ia_css_debug_pipe_graph_dump_frame(
const struct ia_css_frame *frame,
@@ -2673,7 +1206,7 @@ ia_css_debug_pipe_graph_dump_stage(
char enable_info1[100];
char enable_info2[100];
char enable_info3[100];
- char enable_info[200];
+ char enable_info[302];
struct ia_css_binary_info *bi = stage->binary_info;
/* Split it in 2 function-calls to keep the amount of
@@ -3360,32 +1893,6 @@ void ia_css_debug_dump_trace(void)
#endif
}
-/* Tagger state dump function. The tagger is only available when the CSS
- * contains an input system (2400 or 2401). */
-void ia_css_debug_tagger_state(void)
-{
- unsigned int i;
- unsigned int HIVE_ADDR_tagger_frames;
- ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER];
-
- HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr;
-
- /* This variable is not used in crun */
- (void)HIVE_ADDR_tagger_frames;
-
- /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(tagger_frames),
- tbuf_frames,
- sizeof(tbuf_frames));
-
- ia_css_debug_dtrace(2, "Tagger Info:\n");
- for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) {
- ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n",
- i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock);
- }
-}
-
/* ISP2401 */
void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
{
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
index 6d9f47629fbc..86254888f676 100644
--- a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
@@ -16,7 +16,6 @@
#include "system_global.h"
#include <linux/kernel.h>
-#ifndef ISP2401
#include "ia_css_ifmtr.h"
#include <math_support.h>
@@ -550,4 +549,3 @@ static int ifmtr_input_start_line(
return 0;
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
index 711a321e9a3f..d067b9fc43c7 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
@@ -24,20 +24,18 @@
#include <system_global.h>
#include "ia_css_isys_comm.h"
-#ifdef ISP2401
/**
* Virtual Input System. (Input System 2401)
*/
typedef isp2401_input_system_cfg_t ia_css_isys_descr_t;
/* end of Virtual Input System */
-#endif
+
input_system_err_t ia_css_isys_init(void);
void ia_css_isys_uninit(void);
enum mipi_port_id ia_css_isys_port_to_mipi_port(
enum mipi_port_id api_port);
-#if defined(ISP2401)
/**
* @brief Register one (virtual) stream. This is used to track when all
@@ -74,9 +72,7 @@ int ia_css_isys_convert_compressed_format(
struct isp2401_input_system_cfg_s *cfg);
unsigned int ia_css_csi2_calculate_input_system_alignment(
enum atomisp_input_format fmt_type);
-#endif
-#if !defined(ISP2401)
/* CSS Receiver */
void ia_css_isys_rx_configure(
const rx_cfg_t *config,
@@ -93,7 +89,6 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
unsigned int irq_infos);
unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
-#endif /* #if !defined(ISP2401) */
/* @brief Translate format and compression to format type.
*
@@ -111,7 +106,6 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
mipi_predictor_t compression,
unsigned int *fmt_type);
-#ifdef ISP2401
/**
* Virtual Input System. (Input System 2401)
*/
@@ -178,6 +172,5 @@ void ia_css_isys_stream2mmio_sid_rmgr_release(
stream2mmio_sid_ID_t *sid);
/* end of Virtual Input System */
-#endif
#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
index d80ef42c7a64..784afc82c8d2 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
@@ -19,7 +19,6 @@
#include <type_support.h>
#include <input_system.h>
-#ifdef ISP2401
#include <platform_support.h> /* inline */
#include <input_system_global.h>
#include <ia_css_stream_public.h> /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
@@ -50,5 +49,4 @@ static inline uint32_t ia_css_isys_generate_stream_id(
return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
}
-#endif /* ISP2401*/
#endif /*_IA_CSS_ISYS_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
index 3fc9fed1e516..881036c67baf 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
@@ -15,7 +15,6 @@
#include "system_global.h"
-#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
@@ -165,4 +164,3 @@ int ia_css_isys_csi_rx_unregister_stream(
}
return retval;
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
index 261c6460e970..4df0a9188ee6 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
@@ -15,7 +15,6 @@
#include "system_global.h"
-#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
@@ -85,4 +84,3 @@ void ia_css_isys_dma_channel_rmgr_release(
}
}
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
index d0a43c44963c..18bfe1010989 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
@@ -18,13 +18,10 @@
#include "ia_css_isys.h"
#include "platform_support.h"
-#ifdef ISP2401
#include "isys_dma_public.h" /* isys2401_dma_set_max_burst_size() */
#include "isys_irq.h"
-#endif
-#if !defined(ISP2401)
-input_system_err_t ia_css_isys_init(void)
+static input_system_err_t ia_css_isys_2400_init(void)
{
backend_channel_cfg_t backend_ch0;
backend_channel_cfg_t backend_ch1;
@@ -86,8 +83,8 @@ input_system_err_t ia_css_isys_init(void)
return error;
}
-#elif defined(ISP2401)
-input_system_err_t ia_css_isys_init(void)
+
+static input_system_err_t ia_css_isys_2401_init(void)
{
ia_css_isys_csi_rx_lut_rmgr_init();
ia_css_isys_ibuf_rmgr_init();
@@ -104,19 +101,21 @@ input_system_err_t ia_css_isys_init(void)
return INPUT_SYSTEM_ERR_NO_ERROR;
}
-#endif
-#if !defined(ISP2401)
-void ia_css_isys_uninit(void)
+input_system_err_t ia_css_isys_init(void)
{
+ if (IS_ISP2401)
+ return ia_css_isys_2401_init();
+
+ return ia_css_isys_2400_init();
}
-#elif defined(ISP2401)
+
void ia_css_isys_uninit(void)
{
- ia_css_isys_csi_rx_lut_rmgr_uninit();
- ia_css_isys_ibuf_rmgr_uninit();
- ia_css_isys_dma_channel_rmgr_uninit();
- ia_css_isys_stream2mmio_sid_rmgr_uninit();
+ if (IS_ISP2401) {
+ ia_css_isys_csi_rx_lut_rmgr_uninit();
+ ia_css_isys_ibuf_rmgr_uninit();
+ ia_css_isys_dma_channel_rmgr_uninit();
+ ia_css_isys_stream2mmio_sid_rmgr_uninit();
+ }
}
-#endif
-
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
index fb0cb183f701..b6be63746c3e 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -15,7 +15,6 @@
#include "system_global.h"
-#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
@@ -87,4 +86,3 @@ void ia_css_isys_stream2mmio_sid_rmgr_release(
}
}
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
index af153c3fb86d..deb4130f710c 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
@@ -20,7 +20,6 @@
#include "ia_css_irq.h"
#include "sh_css_internal.h"
-#if !defined(ISP2401)
void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
{
hrt_data bits = receiver_port_reg_load(RX0_ID,
@@ -209,144 +208,158 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
return;
}
-#endif /* #if !defined(ISP2401) */
-int ia_css_isys_convert_stream_format_to_mipi_format(
- enum atomisp_input_format input_format,
- mipi_predictor_t compression,
- unsigned int *fmt_type)
+static int ia_css_isys_2400_set_fmt_type(enum atomisp_input_format input_format,
+ unsigned int *fmt_type)
{
- assert(fmt_type);
- /*
- * Custom (user defined) modes. Used for compressed
- * MIPI transfers
- *
- * Checkpatch thinks the indent before "if" is suspect
- * I think the only suspect part is the missing "else"
- * because of the return.
- */
- if (compression != MIPI_PREDICTOR_NONE) {
- switch (input_format) {
- case ATOMISP_INPUT_FORMAT_RAW_6:
- *fmt_type = 6;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_7:
- *fmt_type = 7;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_8:
- *fmt_type = 8;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_10:
- *fmt_type = 10;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_12:
- *fmt_type = 12;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_14:
- *fmt_type = 14;
- break;
- case ATOMISP_INPUT_FORMAT_RAW_16:
- *fmt_type = 16;
- break;
- default:
- return -EINVAL;
- }
- return 0;
- }
- /*
- * This mapping comes from the Arasan CSS function spec
- * (CSS_func_spec1.08_ahb_sep29_08.pdf).
- *
- * MW: For some reason the mapping is not 1-to-1
- */
switch (input_format) {
case ATOMISP_INPUT_FORMAT_RGB_888:
- *fmt_type = MIPI_FORMAT_RGB888;
+ *fmt_type = MIPI_FORMAT_2400_RGB888;
break;
case ATOMISP_INPUT_FORMAT_RGB_555:
- *fmt_type = MIPI_FORMAT_RGB555;
+ *fmt_type = MIPI_FORMAT_2400_RGB555;
break;
case ATOMISP_INPUT_FORMAT_RGB_444:
- *fmt_type = MIPI_FORMAT_RGB444;
+ *fmt_type = MIPI_FORMAT_2400_RGB444;
break;
case ATOMISP_INPUT_FORMAT_RGB_565:
- *fmt_type = MIPI_FORMAT_RGB565;
+ *fmt_type = MIPI_FORMAT_2400_RGB565;
break;
case ATOMISP_INPUT_FORMAT_RGB_666:
- *fmt_type = MIPI_FORMAT_RGB666;
+ *fmt_type = MIPI_FORMAT_2400_RGB666;
break;
case ATOMISP_INPUT_FORMAT_RAW_8:
- *fmt_type = MIPI_FORMAT_RAW8;
+ *fmt_type = MIPI_FORMAT_2400_RAW8;
break;
case ATOMISP_INPUT_FORMAT_RAW_10:
- *fmt_type = MIPI_FORMAT_RAW10;
+ *fmt_type = MIPI_FORMAT_2400_RAW10;
break;
case ATOMISP_INPUT_FORMAT_RAW_6:
- *fmt_type = MIPI_FORMAT_RAW6;
+ *fmt_type = MIPI_FORMAT_2400_RAW6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7:
- *fmt_type = MIPI_FORMAT_RAW7;
+ *fmt_type = MIPI_FORMAT_2400_RAW7;
break;
case ATOMISP_INPUT_FORMAT_RAW_12:
- *fmt_type = MIPI_FORMAT_RAW12;
+ *fmt_type = MIPI_FORMAT_2400_RAW12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14:
- *fmt_type = MIPI_FORMAT_RAW14;
+ *fmt_type = MIPI_FORMAT_2400_RAW14;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8:
- *fmt_type = MIPI_FORMAT_YUV420_8;
+ *fmt_type = MIPI_FORMAT_2400_YUV420_8;
break;
case ATOMISP_INPUT_FORMAT_YUV420_10:
- *fmt_type = MIPI_FORMAT_YUV420_10;
+ *fmt_type = MIPI_FORMAT_2400_YUV420_10;
break;
case ATOMISP_INPUT_FORMAT_YUV422_8:
- *fmt_type = MIPI_FORMAT_YUV422_8;
+ *fmt_type = MIPI_FORMAT_2400_YUV422_8;
break;
case ATOMISP_INPUT_FORMAT_YUV422_10:
- *fmt_type = MIPI_FORMAT_YUV422_10;
+ *fmt_type = MIPI_FORMAT_2400_YUV422_10;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
- *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY;
+ *fmt_type = MIPI_FORMAT_2400_YUV420_8_LEGACY;
break;
case ATOMISP_INPUT_FORMAT_EMBEDDED:
- *fmt_type = MIPI_FORMAT_EMBEDDED;
+ *fmt_type = MIPI_FORMAT_2400_EMBEDDED;
break;
-#ifndef ISP2401
case ATOMISP_INPUT_FORMAT_RAW_16:
/* This is not specified by Arasan, so we use
* 17 for now.
*/
- *fmt_type = MIPI_FORMAT_RAW16;
+ *fmt_type = MIPI_FORMAT_2400_RAW16;
break;
case ATOMISP_INPUT_FORMAT_BINARY_8:
- *fmt_type = MIPI_FORMAT_BINARY_8;
+ *fmt_type = MIPI_FORMAT_2400_CUSTOM0;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ia_css_isys_2401_set_fmt_type(enum atomisp_input_format input_format,
+ unsigned int *fmt_type)
+{
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ *fmt_type = MIPI_FORMAT_2401_RGB888;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ *fmt_type = MIPI_FORMAT_2401_RGB555;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ *fmt_type = MIPI_FORMAT_2401_RGB444;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ *fmt_type = MIPI_FORMAT_2401_RGB565;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ *fmt_type = MIPI_FORMAT_2401_RGB666;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = MIPI_FORMAT_2401_RAW8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = MIPI_FORMAT_2401_RAW10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = MIPI_FORMAT_2401_RAW6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = MIPI_FORMAT_2401_RAW7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = MIPI_FORMAT_2401_RAW12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = MIPI_FORMAT_2401_RAW14;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ *fmt_type = MIPI_FORMAT_2401_YUV422_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ *fmt_type = MIPI_FORMAT_2401_YUV422_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_8_LEGACY;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ *fmt_type = MIPI_FORMAT_2401_EMBEDDED;
break;
-#else
case ATOMISP_INPUT_FORMAT_USER_DEF1:
- *fmt_type = MIPI_FORMAT_CUSTOM0;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM0;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF2:
- *fmt_type = MIPI_FORMAT_CUSTOM1;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM1;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF3:
- *fmt_type = MIPI_FORMAT_CUSTOM2;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM2;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF4:
- *fmt_type = MIPI_FORMAT_CUSTOM3;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM3;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF5:
- *fmt_type = MIPI_FORMAT_CUSTOM4;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM4;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF6:
- *fmt_type = MIPI_FORMAT_CUSTOM5;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM5;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF7:
- *fmt_type = MIPI_FORMAT_CUSTOM6;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM6;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF8:
- *fmt_type = MIPI_FORMAT_CUSTOM7;
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM7;
break;
-#endif
case ATOMISP_INPUT_FORMAT_YUV420_16:
case ATOMISP_INPUT_FORMAT_YUV422_16:
@@ -356,7 +369,60 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
return 0;
}
-#if defined(ISP2401)
+int ia_css_isys_convert_stream_format_to_mipi_format(
+ enum atomisp_input_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type)
+{
+ assert(fmt_type);
+ /*
+ * Custom (user defined) modes. Used for compressed
+ * MIPI transfers
+ *
+ * Checkpatch thinks the indent before "if" is suspect
+ * I think the only suspect part is the missing "else"
+ * because of the return.
+ */
+ if (compression != MIPI_PREDICTOR_NONE) {
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ *fmt_type = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ }
+ /*
+ * This mapping comes from the Arasan CSS function spec
+ * (CSS_func_spec1.08_ahb_sep29_08.pdf).
+ *
+ * MW: For some reason the mapping is not 1-to-1
+ */
+ if (IS_ISP2401)
+ return ia_css_isys_2401_set_fmt_type(input_format, fmt_type);
+ else
+ return ia_css_isys_2400_set_fmt_type(input_format, fmt_type);
+}
+
static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
enum ia_css_csi2_compression_type type)
{
@@ -473,9 +539,7 @@ unsigned int ia_css_csi2_calculate_input_system_alignment(
return memory_alignment_in_bytes;
}
-#endif
-#if !defined(ISP2401)
static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
{MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
{MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
@@ -597,4 +661,3 @@ void ia_css_isys_rx_disable(void)
}
return;
}
-#endif /* if !defined(ISP2401) */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
index 8fc7746f8639..269a81190577 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -17,7 +17,6 @@
#include "system_global.h"
-#ifdef ISP2401
#include "ia_css_isys.h"
#include "ia_css_debug.h"
@@ -689,7 +688,7 @@ static bool calculate_be_cfg(
cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type -
- MIPI_FORMAT_CUSTOM0;
+ MIPI_FORMAT_2401_CUSTOM0;
}
return true;
@@ -856,14 +855,13 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
- if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8)
+ if (data_type >= 0 && data_type <= MIPI_FORMAT_2401_SHORT8)
packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
- if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT)
+ if (data_type > MIPI_FORMAT_2401_SHORT8 && data_type <= N_MIPI_FORMAT_2401)
packet_type = CSI_MIPI_PACKET_TYPE_LONG;
return packet_type;
}
/* end of Private Methods */
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
index 222c381ff3b9..5f5dab7252aa 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
@@ -241,7 +241,6 @@ bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
*/
bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
-#if defined(ISP2401)
/**
* @brief Get the pipeline io status
*
@@ -250,7 +249,6 @@ bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
* Pointer to pipe_io_status
*/
struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void);
-#endif
/**
* @brief Map an SP thread to this pipeline
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
index e9e187649a65..3d8741e7d5ca 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -454,12 +454,10 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
return sp_group.pipe[thread_id].num_stages == 0;
}
-#if defined(ISP2401)
struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
{
return(&sh_css_sp_group.pipe_io_status);
}
-#endif
bool ia_css_pipeline_is_mapped(unsigned int key)
{
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 4b3fa6d93fe0..f35c90809414 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -56,9 +56,7 @@
#include "assert_support.h"
#include "math_support.h"
#include "sw_event_global.h" /* Event IDs.*/
-#if !defined(ISP2401)
#include "ia_css_ifmtr.h"
-#endif
#include "input_system.h"
#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
@@ -345,7 +343,6 @@ static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
enum ia_css_buffer_type type);
-#ifdef ISP2401
static unsigned int get_crop_lines_for_bayer_order(const struct
ia_css_stream_config *config);
static unsigned int get_crop_columns_for_bayer_order(const struct
@@ -353,8 +350,6 @@ static unsigned int get_crop_columns_for_bayer_order(const struct
static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int *extra_row, unsigned int *extra_column);
-#endif
-
static void
sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
{
@@ -472,9 +467,8 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
/* TODO: move define to proper file in tools */
#define GP_ISEL_TPG_MODE 0x90058
-#if !defined(ISP2401)
static int
-sh_css_config_input_network(struct ia_css_stream *stream)
+sh_css_config_input_network_2400(struct ia_css_stream *stream)
{
unsigned int fmt_type;
struct ia_css_pipe *pipe = stream->last_pipe;
@@ -528,7 +522,7 @@ sh_css_config_input_network(struct ia_css_stream *stream)
"sh_css_config_input_network() leave:\n");
return 0;
}
-#elif defined(ISP2401)
+
static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
enum atomisp_input_format format,
unsigned int pixels_per_line)
@@ -824,9 +818,10 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
stream_cfg->source.port.num_lanes;
isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
-#ifdef ISP2401
- isys_stream_descr->online = stream_cfg->online;
-#endif
+
+ if (IS_ISP2401)
+ isys_stream_descr->online = stream_cfg->online;
+
err |= ia_css_isys_convert_compressed_format(
&stream_cfg->source.port.compression,
isys_stream_descr);
@@ -849,15 +844,15 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
stream_cfg->metadata_config.resolution.width;
isys_stream_descr->metadata.lines_per_frame =
stream_cfg->metadata_config.resolution.height;
-#ifdef ISP2401
+
/*
* For new input system, number of str2mmio requests must be even.
* So we round up number of metadata lines to be even.
*/
- if (isys_stream_descr->metadata.lines_per_frame > 0)
+ if (IS_ISP2401 && isys_stream_descr->metadata.lines_per_frame > 0)
isys_stream_descr->metadata.lines_per_frame +=
(isys_stream_descr->metadata.lines_per_frame & 1);
-#endif
+
isys_stream_descr->metadata.align_req_in_bytes =
ia_css_csi2_calculate_input_system_alignment(
stream_cfg->metadata_config.data_type);
@@ -972,7 +967,7 @@ static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
}
static int
-sh_css_config_input_network(struct ia_css_stream *stream)
+sh_css_config_input_network_2401(struct ia_css_stream *stream)
{
bool rc;
ia_css_isys_descr_t isys_stream_descr;
@@ -1181,7 +1176,6 @@ static inline int stream_unregister_with_csi_rx(
{
return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
}
-#endif
static void
@@ -1194,14 +1188,11 @@ start_binary(struct ia_css_pipe *pipe,
if (binary)
sh_css_metrics_start_binary(&binary->metrics);
-
-#if !defined(ISP2401)
- if (pipe->stream->reconfigure_css_rx) {
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
-#endif
}
/* start the copy function on the SP */
@@ -1214,22 +1205,18 @@ start_copy_on_sp(struct ia_css_pipe *pipe,
if ((!pipe) || (!pipe->stream))
return -EINVAL;
-#if !defined(ISP2401)
- if (pipe->stream->reconfigure_css_rx)
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx)
ia_css_isys_rx_disable();
-#endif
if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8)
return -EINVAL;
sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
-#if !defined(ISP2401)
- if (pipe->stream->reconfigure_css_rx) {
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
-#endif
return 0;
}
@@ -1311,9 +1298,7 @@ sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
static void
enable_interrupts(enum ia_css_irq_type irq_type)
{
-#ifndef ISP2401
enum mipi_port_id port;
-#endif
bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
IA_CSS_ENTER_PRIVATE("");
@@ -1334,10 +1319,10 @@ enable_interrupts(enum ia_css_irq_type irq_type)
(enum virq_id)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
true);
-#ifndef ISP2401
- for (port = 0; port < N_MIPI_PORT_ID; port++)
- ia_css_isys_rx_enable_all_interrupts(port);
-#endif
+ if (!IS_ISP2401) {
+ for (port = 0; port < N_MIPI_PORT_ID; port++)
+ ia_css_isys_rx_enable_all_interrupts(port);
+ }
IA_CSS_LEAVE_PRIVATE("");
}
@@ -2173,10 +2158,10 @@ ia_css_uninit(void)
ia_css_rmgr_uninit();
-#if !defined(ISP2401)
- /* needed for reprogramming the inputformatter after power cycle of css */
- ifmtr_set_if_blocking_mode_reset = true;
-#endif
+ if (!IS_ISP2401) {
+ /* needed for reprogramming the inputformatter after power cycle of css */
+ ifmtr_set_if_blocking_mode_reset = true;
+ }
if (!fw_explicitly_loaded)
ia_css_unload_firmware();
@@ -2957,7 +2942,6 @@ init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
return err;
}
-#ifdef ISP2401
static unsigned int
get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config)
{
@@ -3059,11 +3043,11 @@ ia_css_get_crop_offsets(
pipe->config.input_effective_res.height);
input_res = &pipe->stream->config.input_config.input_res;
-#ifndef ISP2401
- effective_res = &pipe->stream->config.input_config.effective_res;
-#else
- effective_res = &pipe->config.input_effective_res;
-#endif
+
+ if (IS_ISP2401)
+ effective_res = &pipe->config.input_effective_res;
+ else
+ effective_res = &pipe->stream->config.input_config.effective_res;
get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
@@ -3101,7 +3085,6 @@ ia_css_get_crop_offsets(
return;
}
-#endif
static int
init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
@@ -3132,9 +3115,10 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
in_frame->dynamic_queue_id = queue_id;
in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
-#ifdef ISP2401
- ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
-#endif
+
+ if (IS_ISP2401)
+ ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
+
err = ia_css_frame_init_planes(in_frame);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s() bayer_order = %d\n",
@@ -4473,7 +4457,6 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
return 0;
}
-#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
{
@@ -4484,7 +4467,6 @@ sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
port, idx, my_css.mipi_sizes_for_check[port][idx]);
return my_css.mipi_sizes_for_check[port][idx];
}
-#endif
static int sh_css_pipe_configure_output(
struct ia_css_pipe *pipe,
@@ -7369,7 +7351,7 @@ static int capture_start(struct ia_css_pipe *pipe)
}
}
/* old isys: need to send_mipi_frames() in all pipe modes */
- if (!IS_ISP2401 || (IS_ISP2401 && pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+ if (!IS_ISP2401 || pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7382,19 +7364,18 @@ static int capture_start(struct ia_css_pipe *pipe)
start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
-#if !defined(ISP2401)
/*
* old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
* which is currently done in start_binary(); but COPY pipe contains no binary,
* and does not call start_binary(); so we need to configure the rx here.
*/
- if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
+ if (!IS_ISP2401 &&
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
-#endif
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -7616,20 +7597,15 @@ void ia_css_stream_request_flash(struct ia_css_stream *stream)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_request_flash() enter: void\n");
-#ifndef ISP2401
- sh_css_write_host2sp_command(host2sp_cmd_start_flash);
-#else
- if (sh_css_sp_is_running()) {
- if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) {
+ if (!IS_ISP2401 || sh_css_sp_is_running()) {
+ if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash) && IS_ISP2401) {
IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
ia_css_debug_dump_sp_sw_debug_info();
- ia_css_debug_dump_debug_info(NULL);
}
} else {
IA_CSS_LOG("SP is not running!");
}
-#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_request_flash() leave: return_void\n");
}
@@ -7974,7 +7950,6 @@ ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
return err;
}
-#if !defined(ISP2401)
/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
static int
ia_css_stream_configure_rx(struct ia_css_stream *stream)
@@ -8017,7 +7992,6 @@ ia_css_stream_configure_rx(struct ia_css_stream *stream)
stream->reconfigure_css_rx = true;
return 0;
}
-#endif
static struct ia_css_pipe *
find_pipe(struct ia_css_pipe *pipes[], unsigned int num_pipes,
@@ -8103,9 +8077,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* check if mipi size specified */
if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
-#ifdef ISP2401
- if (!stream_config->online)
-#endif
+ if (!IS_ISP2401 || !stream_config->online)
{
unsigned int port = (unsigned int)stream_config->source.port.port;
@@ -8206,32 +8178,31 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
switch (curr_stream->config.mode) {
case IA_CSS_INPUT_MODE_SENSOR:
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
-#if !defined(ISP2401)
- ia_css_stream_configure_rx(curr_stream);
-#endif
+ if (!IS_ISP2401)
+ ia_css_stream_configure_rx(curr_stream);
break;
case IA_CSS_INPUT_MODE_TPG:
-#if !defined(ISP2401)
- IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
- curr_stream->config.source.tpg.x_mask,
- curr_stream->config.source.tpg.y_mask,
- curr_stream->config.source.tpg.x_delta,
- curr_stream->config.source.tpg.y_delta,
- curr_stream->config.source.tpg.xy_mask);
-
- sh_css_sp_configure_tpg(
- curr_stream->config.source.tpg.x_mask,
- curr_stream->config.source.tpg.y_mask,
- curr_stream->config.source.tpg.x_delta,
- curr_stream->config.source.tpg.y_delta,
- curr_stream->config.source.tpg.xy_mask);
-#endif
+ if (!IS_ISP2401) {
+ IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+
+ sh_css_sp_configure_tpg(
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+ }
break;
case IA_CSS_INPUT_MODE_PRBS:
-#if !defined(ISP2401)
- IA_CSS_LOG("mode prbs");
- sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
-#endif
+ if (!IS_ISP2401) {
+ IA_CSS_LOG("mode prbs");
+ sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
+ }
break;
case IA_CSS_INPUT_MODE_MEMORY:
IA_CSS_LOG("mode memory");
@@ -8473,46 +8444,48 @@ ia_css_stream_destroy(struct ia_css_stream *stream)
if ((stream->last_pipe) &&
ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
-#if defined(ISP2401)
- for (i = 0; i < stream->num_pipes; i++) {
- struct ia_css_pipe *entry = stream->pipes[i];
- unsigned int sp_thread_id;
- struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
-
- assert(entry);
- if (entry) {
- /* get the SP thread id */
- if (!ia_css_pipeline_get_sp_thread_id(
- ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
- return -EINVAL;
- /* get the target input terminal */
- sp_pipeline_input_terminal =
- &sh_css_sp_group.pipe_io[sp_thread_id].input;
-
- for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
- ia_css_isys_stream_h isys_stream =
- &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
- if (stream->config.isys_config[i].valid && isys_stream->valid)
- ia_css_isys_stream_destroy(isys_stream);
- }
- }
- }
- if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ if (IS_ISP2401) {
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
- /*
- * free any mipi frames that are remaining:
- * some test stream create-destroy cycles do
- * not generate output frames
- * and the mipi buffer is not freed in the
- * deque function
- */
- if (entry)
- free_mipi_frames(entry);
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+
+ assert(entry);
+ if (entry) {
+ /* get the SP thread id */
+ if (!ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
+ return -EINVAL;
+
+ /* get the target input terminal */
+ sp_pipeline_input_terminal =
+ &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ ia_css_isys_stream_h isys_stream =
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
+ if (stream->config.isys_config[i].valid && isys_stream->valid)
+ ia_css_isys_stream_destroy(isys_stream);
+ }
+ }
+ }
+
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ /*
+ * free any mipi frames that are remaining:
+ * some test stream create-destroy cycles do
+ * not generate output frames
+ * and the mipi buffer is not freed in the
+ * deque function
+ */
+ if (entry)
+ free_mipi_frames(entry);
+ }
}
+ stream_unregister_with_csi_rx(stream);
}
- stream_unregister_with_csi_rx(stream);
-#endif
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *curr_pipe = stream->pipes[i];
@@ -8605,15 +8578,13 @@ ia_css_stream_start(struct ia_css_stream *stream)
return err;
}
-#if defined(ISP2401)
- if ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
- (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ if (IS_ISP2401 &&
+ ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
+ (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)))
stream_register_with_csi_rx(stream);
-#endif
-#if !defined(ISP2401)
/* Initialize mipi size checks */
- if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
@@ -8622,10 +8593,12 @@ ia_css_stream_start(struct ia_css_stream *stream)
sh_css_get_mipi_sizes_for_check(port, idx);
}
}
-#endif
if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
- err = sh_css_config_input_network(stream);
+ if (IS_ISP2401)
+ err = sh_css_config_input_network_2401(stream);
+ else
+ err = sh_css_config_input_network_2400(stream);
if (err)
return err;
}
@@ -8646,16 +8619,14 @@ ia_css_stream_stop(struct ia_css_stream *stream)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
stream->last_pipe->mode);
-#if !defined(ISP2401)
/* De-initialize mipi size checks */
- if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++)
sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
}
-#endif
err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
if (err)
@@ -9035,7 +9006,6 @@ ia_css_stop_sp(void)
if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
ia_css_debug_dump_sp_sw_debug_info();
- ia_css_debug_dump_debug_info(NULL);
}
sh_css_sp_set_sp_running(false);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
index d98f1323441e..2349eb4d3767 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -22,9 +22,7 @@
#include <platform_support.h>
#include <linux/stdarg.h>
-#if !defined(ISP2401)
#include "input_formatter.h"
-#endif
#include "input_system.h"
#include "ia_css_types.h"
@@ -86,31 +84,8 @@
#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
-/*
- * SH_CSS_MAX_SP_THREADS:
- * sp threads visible to host with connected communication queues
- * these threads are capable of running an image pipe
- * SH_CSS_MAX_SP_INTERNAL_THREADS:
- * internal sp service threads, no communication queues to host
- * these threads can't be used as image pipe
- */
-
-#if !defined(ISP2401)
-#define SH_CSS_SP_INTERNAL_METADATA_THREAD 1
-#else
-#define SH_CSS_SP_INTERNAL_METADATA_THREAD 0
-#endif
-
-#define SH_CSS_SP_INTERNAL_SERVICE_THREAD 1
-
#define SH_CSS_MAX_SP_THREADS 5
-#define SH_CSS_MAX_SP_INTERNAL_THREADS (\
- SH_CSS_SP_INTERNAL_SERVICE_THREAD +\
- SH_CSS_SP_INTERNAL_METADATA_THREAD)
-
-#define SH_CSS_MAX_PIPELINES SH_CSS_MAX_SP_THREADS
-
/**
* The C99 standard does not specify the exact object representation of structs;
* the representation is compiler dependent.
@@ -357,14 +332,12 @@ struct sh_css_sp_debug_command {
u32 dma_sw_reg;
};
-#if !defined(ISP2401)
/* SP input formatter configuration.*/
struct sh_css_sp_input_formatter_set {
u32 stream_format;
input_formatter_cfg_t config_a;
input_formatter_cfg_t config_b;
};
-#endif
#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
@@ -377,7 +350,7 @@ struct sh_css_sp_config {
frames are locked when their EOF event is successfully sent to the
host (true) or when they are passed to the preview/video pipe
(false). */
-#if !defined(ISP2401)
+
struct {
u8 a_changed;
u8 b_changed;
@@ -385,15 +358,13 @@ struct sh_css_sp_config {
struct sh_css_sp_input_formatter_set
set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
} input_formatter;
-#endif
-#if !defined(ISP2401)
+
sync_generator_cfg_t sync_gen;
tpg_cfg_t tpg;
prbs_cfg_t prbs;
input_system_cfg_t input_circuit;
u8 input_circuit_cfg_changed;
u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
-#endif
u8 enable_isys_event_queue;
u8 disable_cont_vf;
};
@@ -409,7 +380,6 @@ enum sh_css_stage_type {
#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS) - 1)
-#if defined(ISP2401)
struct sh_css_sp_pipeline_terminal {
union {
/* Input System 2401 */
@@ -442,7 +412,6 @@ struct sh_css_sp_pipeline_io_status {
u32 running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */
};
-#endif
enum sh_css_port_dir {
SH_CSS_PORT_INPUT = 0,
SH_CSS_PORT_OUTPUT = 1
@@ -641,10 +610,8 @@ struct sh_css_sp_stage {
struct sh_css_sp_group {
struct sh_css_sp_config config;
struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS];
-#if defined(ISP2401)
struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS];
struct sh_css_sp_pipeline_io_status pipe_io_status;
-#endif
struct sh_css_sp_debug_command debug;
};
@@ -922,13 +889,11 @@ sh_css_frame_info_set_width(struct ia_css_frame_info *info,
unsigned int width,
unsigned int aligned);
-#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port,
const unsigned int idx);
-#endif
ia_css_ptr
sh_css_store_sp_group_to_ddr(void);
@@ -971,11 +936,9 @@ sh_css_continuous_is_enabled(uint8_t pipe_num);
struct ia_css_pipe *
find_pipe_by_num(uint32_t pipe_num);
-#ifdef ISP2401
void
ia_css_get_crop_offsets(
struct ia_css_pipe *pipe,
struct ia_css_frame_info *in_frame);
-#endif
#endif /* _SH_CSS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index ced21dedf7ac..b7c1e164ee24 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -185,35 +185,6 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
return err;
}
-/*
- * Check if a source port or TPG/PRBS ID is valid
- */
-
-#if !defined(ISP2401)
-int
-ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
- const unsigned int size_mem_words)
-{
- u32 idx;
-
- int err = -EBUSY;
-
- OP___assert(port < N_CSI_PORTS);
- OP___assert(size_mem_words != 0);
-
- for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
- my_css.mipi_sizes_for_check[port][idx] != 0;
- idx++) { /* do nothing */
- }
- if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) {
- my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
- err = 0;
- }
-
- return err;
-}
-#endif
-
void
mipi_init(void)
{
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
index 588f2adab058..232744973ab8 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -3720,10 +3720,47 @@ struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream
ia_css_ptr sh_css_store_sp_group_to_ddr(void)
{
+ u8 *write_buf;
+ u8 *buf_ptr;
+
IA_CSS_ENTER_LEAVE_PRIVATE("void");
+
+ write_buf = kzalloc(sizeof(u8) * 8192, GFP_KERNEL);
+ if (!write_buf)
+ return 0;
+
+ buf_ptr = write_buf;
+ if (IS_ISP2401) {
+ memcpy(buf_ptr, &sh_css_sp_group.config, 3);
+ buf_ptr += 3;
+ *buf_ptr++ = sh_css_sp_group.config.enable_isys_event_queue;
+ *buf_ptr++ = sh_css_sp_group.config.disable_cont_vf;
+ memset(buf_ptr, 0, 3);
+ buf_ptr += 3; /* Padding 3 bytes for struct sh_css_sp_config*/
+ } else {
+ memcpy(buf_ptr, &sh_css_sp_group.config, sizeof(sh_css_sp_group.config));
+ buf_ptr += sizeof(sh_css_sp_group.config);
+ }
+
+ memcpy(buf_ptr, &sh_css_sp_group.pipe, sizeof(sh_css_sp_group.pipe));
+ buf_ptr += sizeof(sh_css_sp_group.pipe);
+
+ if (IS_ISP2401) {
+ memcpy(buf_ptr, &sh_css_sp_group.pipe_io, sizeof(sh_css_sp_group.pipe_io));
+ buf_ptr += sizeof(sh_css_sp_group.pipe_io);
+ memcpy(buf_ptr, &sh_css_sp_group.pipe_io_status,
+ sizeof(sh_css_sp_group.pipe_io_status));
+ buf_ptr += sizeof(sh_css_sp_group.pipe_io_status);
+ }
+
+ memcpy(buf_ptr, &sh_css_sp_group.debug, sizeof(sh_css_sp_group.debug));
+ buf_ptr += sizeof(sh_css_sp_group.debug);
+
hmm_store(xmem_sp_group_ptrs,
- &sh_css_sp_group,
- sizeof(struct sh_css_sp_group));
+ write_buf,
+ buf_ptr - write_buf);
+
+ kfree(write_buf);
return xmem_sp_group_ptrs;
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
index f35c745c22c0..cd7f5a3fecaa 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -17,9 +17,7 @@
#include "sh_css_sp.h"
-#if !defined(ISP2401)
#include "input_formatter.h"
-#endif
#include "dma.h" /* N_DMA_CHANNEL_ID */
@@ -228,11 +226,8 @@ sh_css_sp_start_binary_copy(unsigned int pipe_num,
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
-#if !defined(ISP2401)
- sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
-#else
- (void)two_ppc;
-#endif
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
sh_css_sp_stage.num = stage_num;
sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
@@ -306,11 +301,8 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
-#if !defined(ISP2401)
- sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
-#else
- (void)two_ppc;
-#endif
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
sh_css_sp_stage.num = stage_num;
sh_css_sp_stage.xmem_bin_addr = 0x0;
@@ -633,7 +625,6 @@ set_view_finder_buffer(const struct ia_css_frame *frame)
return 0;
}
-#if !defined(ISP2401)
void sh_css_sp_set_if_configs(
const input_formatter_cfg_t *config_a,
const input_formatter_cfg_t *config_b,
@@ -655,9 +646,7 @@ void sh_css_sp_set_if_configs(
return;
}
-#endif
-#if !defined(ISP2401)
void
sh_css_sp_program_input_circuit(int fmt_type,
int ch_id,
@@ -674,9 +663,7 @@ sh_css_sp_program_input_circuit(int fmt_type,
sh_css_sp_group.config.input_circuit_cfg_changed = true;
sh_css_sp_stage.program_input_circuit = true;
}
-#endif
-#if !defined(ISP2401)
void
sh_css_sp_configure_sync_gen(int width, int height,
int hblank_cycles,
@@ -707,7 +694,6 @@ sh_css_sp_configure_prbs(int seed)
{
sh_css_sp_group.config.prbs.seed = seed;
}
-#endif
void
sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
@@ -757,22 +743,18 @@ sh_css_sp_init_group(bool two_ppc,
bool no_isp_sync,
uint8_t if_config_index)
{
-#if !defined(ISP2401)
- sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
-#else
- (void)two_ppc;
-#endif
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
/* decide whether the frame is processed online or offline */
if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
-#if !defined(ISP2401)
- assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
- sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
- input_format;
-#else
- (void)input_format;
-#endif
+
+ if (!IS_ISP2401) {
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
+ input_format;
+ }
}
void
@@ -1031,18 +1013,16 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
if (err)
return err;
-#ifdef ISP2401
- pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
- if (!pipe)
- return -EINVAL;
+ if (IS_ISP2401) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return -EINVAL;
- if (args->in_frame)
- ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
- else
- ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
-#else
- (void)pipe; /*avoid build warning*/
-#endif
+ if (args->in_frame)
+ ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
+ else
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.h b/drivers/staging/media/atomisp/pci/sh_css_sp.h
index f69a79b0b0da..36b693bd916a 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.h
@@ -18,9 +18,7 @@
#include <system_global.h>
#include <type_support.h>
-#if !defined(ISP2401)
#include "input_formatter.h"
-#endif
#include "ia_css_binary.h"
#include "ia_css_types.h"
@@ -149,13 +147,11 @@ sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
#endif
-#if !defined(ISP2401)
void
sh_css_sp_set_if_configs(
const input_formatter_cfg_t *config_a,
const input_formatter_cfg_t *config_b,
const uint8_t if_config_index);
-#endif
void
sh_css_sp_program_input_circuit(int fmt_type,
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
index f5d963904201..8e26663cecb6 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
@@ -488,12 +488,8 @@ static const struct vb2_ops isc_vb2_ops = {
static int isc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct isc_device *isc = video_drvdata(file);
-
strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
strscpy(cap->card, "Atmel Image Sensor Controller", sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", isc->v4l2_dev.name);
return 0;
}
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
index 76ad802d694e..34f574b0b521 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -2425,16 +2425,16 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
acc->awb_fr.stripes[1].grid_cfg.width,
b_w_log2);
acc->awb_fr.stripes[1].grid_cfg.x_end = end;
-
- /*
- * To reduce complexity of debubbling and loading
- * statistics fix grid_height_per_slice to 1 for both
- * stripes.
- */
- for (i = 0; i < stripes; i++)
- acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
}
+ /*
+ * To reduce complexity of debubbling and loading
+ * statistics fix grid_height_per_slice to 1 for both
+ * stripes.
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
+
if (imgu_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
return -EINVAL;
@@ -2597,15 +2597,15 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
acc->af.stripes[1].grid_cfg.width,
b_w_log2);
-
- /*
- * To reduce complexity of debubbling and loading statistics
- * fix grid_height_per_slice to 1 for both stripes
- */
- for (i = 0; i < stripes; i++)
- acc->af.stripes[i].grid_cfg.height_per_slice = 1;
}
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->af.stripes[i].grid_cfg.height_per_slice = 1;
+
if (imgu_css_af_ops_calc(css, pipe, &acc->af))
return -EINVAL;
@@ -2677,15 +2677,15 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
acc->awb.stripes[1].grid.width,
b_w_log2);
-
- /*
- * To reduce complexity of debubbling and loading statistics
- * fix grid_height_per_slice to 1 for both stripes
- */
- for (i = 0; i < stripes; i++)
- acc->awb.stripes[i].grid.height_per_slice = 1;
}
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb.stripes[i].grid.height_per_slice = 1;
+
if (imgu_css_awb_ops_calc(css, pipe, &acc->awb))
return -EINVAL;
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index 0c453b37f8c4..18ca22c3018a 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -762,7 +762,6 @@ static int __maybe_unused imgu_suspend(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct imgu_device *imgu = pci_get_drvdata(pci_dev);
- dev_dbg(dev, "enter %s\n", __func__);
imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
if (!imgu->suspend_in_stream)
goto out;
@@ -783,7 +782,6 @@ static int __maybe_unused imgu_suspend(struct device *dev)
imgu_powerdown(imgu);
pm_runtime_force_suspend(dev);
out:
- dev_dbg(dev, "leave %s\n", __func__);
return 0;
}
@@ -793,8 +791,6 @@ static int __maybe_unused imgu_resume(struct device *dev)
int r = 0;
unsigned int pipe;
- dev_dbg(dev, "enter %s\n", __func__);
-
if (!imgu->suspend_in_stream)
goto out;
@@ -821,8 +817,6 @@ static int __maybe_unused imgu_resume(struct device *dev)
}
out:
- dev_dbg(dev, "leave %s\n", __func__);
-
return r;
}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 04ce0e7eb557..d2844414de4f 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -1260,7 +1260,7 @@ static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
struct media_pad *pads = csi2->pads;
struct media_entity *me = &sd->entity;
int ret;
- char name[V4L2_SUBDEV_NAME_SIZE];
+ char name[32];
v4l2_subdev_init(sd, &csi2_ops);
sd->internal_ops = &csi2_internal_ops;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 8e248d4a0aec..f52df6836045 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -708,7 +708,7 @@ static struct platform_driver cedrus_driver = {
.remove_new = cedrus_remove,
.driver = {
.name = CEDRUS_NAME,
- .of_match_table = of_match_ptr(cedrus_dt_match),
+ .of_match_table = cedrus_dt_match,
.pm = &cedrus_dev_pm_ops,
},
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index b696bf884cbd..32af0e96e762 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
{
struct cedrus_dev *dev = dev_get_drvdata(device);
- reset_control_assert(dev->rstc);
-
clk_disable_unprepare(dev->ram_clk);
clk_disable_unprepare(dev->mod_clk);
clk_disable_unprepare(dev->ahb_clk);
+ reset_control_assert(dev->rstc);
+
return 0;
}
@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
struct cedrus_dev *dev = dev_get_drvdata(device);
int ret;
+ ret = reset_control_reset(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ return ret;
+ }
+
ret = clk_prepare_enable(dev->ahb_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable AHB clock\n");
- return ret;
+ goto err_rst;
}
ret = clk_prepare_enable(dev->mod_clk);
@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
goto err_mod_clk;
}
- ret = reset_control_reset(dev->rstc);
- if (ret) {
- dev_err(dev->dev, "Failed to apply reset\n");
-
- goto err_ram_clk;
- }
-
return 0;
-err_ram_clk:
- clk_disable_unprepare(dev->ram_clk);
err_mod_clk:
clk_disable_unprepare(dev->mod_clk);
err_ahb_clk:
clk_disable_unprepare(dev->ahb_clk);
+err_rst:
+ reset_control_assert(dev->rstc);
return ret;
}
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index e79657920dc8..9aa72863c213 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -607,10 +607,10 @@ static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
v4l2_subdev_init(subdev, &tegra_csi_ops);
subdev->dev = csi->dev;
if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
+ snprintf(subdev->name, sizeof(subdev->name), "%s-%d", "tpg",
chan->csi_port_nums[0]);
else
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+ snprintf(subdev->name, sizeof(subdev->name), "%s",
kbasename(chan->of_node->full_name));
v4l2_set_subdevdata(subdev, chan);
diff --git a/drivers/staging/media/tegra-video/vip.c b/drivers/staging/media/tegra-video/vip.c
index 191ecd19a6a7..e95cc7bb190e 100644
--- a/drivers/staging/media/tegra-video/vip.c
+++ b/drivers/staging/media/tegra-video/vip.c
@@ -163,7 +163,7 @@ static int tegra_vip_channel_init(struct tegra_vip *vip)
subdev = &vip->chan.subdev;
v4l2_subdev_init(subdev, &tegra_vip_ops);
subdev->dev = vip->dev;
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+ snprintf(subdev->name, sizeof(subdev->name), "%s",
kbasename(vip->chan.of_node->full_name));
v4l2_set_subdevdata(subdev, &vip->chan);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index d5271eac14f6..08ec3aae90ea 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -777,7 +777,7 @@ static struct i2c_driver dcon_driver = {
.name = "olpc_dcon",
.pm = &dcon_pm_ops,
},
- .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
+ .class = I2C_CLASS_HWMON,
.id_table = dcon_idtable,
.probe = dcon_probe,
.remove = dcon_remove,
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 58887619b83f..0ec3130225db 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -56,7 +56,10 @@ static DEFINE_IDR(pi433_idr);
static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
static struct dentry *root_dir; /* debugfs root directory for the driver */
-static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
+/* mainly for udev to create /dev/pi433 */
+static const struct class pi433_class = {
+ .name = "pi433",
+};
/*
* tx config is instance specific
@@ -1259,7 +1262,7 @@ static int pi433_probe(struct spi_device *spi)
/* create device */
device->devt = MKDEV(MAJOR(pi433_dev), device->minor);
- device->dev = device_create(pi433_class,
+ device->dev = device_create(&pi433_class,
&spi->dev,
device->devt,
device,
@@ -1315,7 +1318,7 @@ del_cdev:
cdev_failed:
kthread_stop(device->tx_task_struct);
send_thread_failed:
- device_destroy(pi433_class, device->devt);
+ device_destroy(&pi433_class, device->devt);
device_create_failed:
pi433_free_minor(device);
minor_failed:
@@ -1342,7 +1345,7 @@ static void pi433_remove(struct spi_device *spi)
kthread_stop(device->tx_task_struct);
- device_destroy(pi433_class, device->devt);
+ device_destroy(&pi433_class, device->devt);
cdev_del(device->cdev);
@@ -1398,18 +1401,18 @@ static int __init pi433_init(void)
if (status < 0)
return status;
- pi433_class = class_create("pi433");
- if (IS_ERR(pi433_class)) {
+ status = class_register(&pi433_class);
+ if (status) {
unregister_chrdev(MAJOR(pi433_dev),
pi433_spi_driver.driver.name);
- return PTR_ERR(pi433_class);
+ return status;
}
root_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
status = spi_register_driver(&pi433_spi_driver);
if (status < 0) {
- class_destroy(pi433_class);
+ class_unregister(&pi433_class);
unregister_chrdev(MAJOR(pi433_dev),
pi433_spi_driver.driver.name);
}
@@ -1422,7 +1425,7 @@ module_init(pi433_init);
static void __exit pi433_exit(void)
{
spi_unregister_driver(&pi433_spi_driver);
- class_destroy(pi433_class);
+ class_unregister(&pi433_class);
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
debugfs_remove(root_dir);
}
diff --git a/drivers/staging/qlge/Kconfig b/drivers/staging/qlge/Kconfig
deleted file mode 100644
index 6d831ed67965..000000000000
--- a/drivers/staging/qlge/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config QLGE
- tristate "QLogic QLGE 10Gb Ethernet Driver Support"
- depends on ETHERNET && PCI
- select NET_DEVLINK
- help
- This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
-
- To compile this driver as a module, choose M here. The module will be
- called qlge.
diff --git a/drivers/staging/qlge/Makefile b/drivers/staging/qlge/Makefile
deleted file mode 100644
index 07c1898a512e..000000000000
--- a/drivers/staging/qlge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Qlogic 10GbE PCI Express ethernet driver
-#
-
-obj-$(CONFIG_QLGE) += qlge.o
-
-qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o qlge_devlink.o
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
deleted file mode 100644
index c76394b9451b..000000000000
--- a/drivers/staging/qlge/TODO
+++ /dev/null
@@ -1,33 +0,0 @@
-* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1)
- introduced dead code in the receive routines, which should be rewritten
- anyways by the admission of the author himself, see the comment above
- ql_build_rx_skb(). That function is now used exclusively to handle packets
- that underwent header splitting but it still contains code to handle non
- split cases.
-* truesize accounting is incorrect (ex: a 9000B frame has skb->truesize 10280
- while containing two frags of order-1 allocations, ie. >16K)
-* while in that area, using two 8k buffers to store one 9k frame is a poor
- choice of buffer size.
-* in the "chain of large buffers" case, the driver uses an skb allocated with
- head room but only puts data in the frags.
-* rename "rx" queues to "completion" queues. Calling tx completion queues "rx
- queues" is confusing.
-* struct rx_ring is used for rx and tx completions, with some members relevant
- to one case only
-* the flow control implementation in firmware is buggy (sends a flood of pause
- frames, resets the link, device and driver buffer queues become
- desynchronized), disable it by default
-* some structures are initialized redundantly (ex. memset 0 after
- alloc_etherdev())
-* the driver has a habit of using runtime checks where compile time checks are
- possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers())
-* reorder struct members to avoid holes if it doesn't impact performance
-* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi,
- use pci_iomap)
-* some "while" loops could be rewritten with simple "for", ex.
- ql_wait_reg_rdy(), ql_start_rx_ring())
-* remove duplicate and useless comments
-* fix weird line wrapping (all over, ex. the ql_set_routing_reg() calls in
- qlge_set_multicast_list()).
-* fix weird indentation (all over, ex. the for loops in qlge_get_stats())
-* fix checkpatch issues
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
deleted file mode 100644
index d0dd659834ee..000000000000
--- a/drivers/staging/qlge/qlge.h
+++ /dev/null
@@ -1,2293 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * QLogic QLA41xx NIC HBA Driver
- * Copyright (c) 2003-2006 QLogic Corporation
- */
-#ifndef _QLGE_H_
-#define _QLGE_H_
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-
-/*
- * General definitions...
- */
-#define DRV_NAME "qlge"
-#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.35"
-
-#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
-
-#define QLGE_VENDOR_ID 0x1077
-#define QLGE_DEVICE_ID_8012 0x8012
-#define QLGE_DEVICE_ID_8000 0x8000
-#define QLGE_MEZZ_SSYS_ID_068 0x0068
-#define QLGE_MEZZ_SSYS_ID_180 0x0180
-#define MAX_CPUS 8
-#define MAX_TX_RINGS MAX_CPUS
-#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
-
-#define NUM_TX_RING_ENTRIES 256
-#define NUM_RX_RING_ENTRIES 256
-
-/* Use the same len for sbq and lbq. Note that it seems like the device might
- * support different sizes.
- */
-#define QLGE_BQ_SHIFT 9
-#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT)
-#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64))
-
-#define DB_PAGE_SIZE 4096
-
-/* Calculate the number of (4k) pages required to
- * contain a buffer queue of the given length.
- */
-#define MAX_DB_PAGES_PER_BQ(x) \
- (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
- (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
-
-#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
-#define LARGE_BUFFER_MAX_SIZE 8192
-#define LARGE_BUFFER_MIN_SIZE 2048
-
-#define MAX_CQ 128
-#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
-#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
-#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT / 2)
-#define UDELAY_COUNT 3
-#define UDELAY_DELAY 100
-
-#define TX_DESC_PER_IOCB 8
-
-#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
-#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
-#else /* all other page sizes */
-#define TX_DESC_PER_OAL 0
-#endif
-
-/* Word shifting for converting 64-bit
- * address to a series of 16-bit words.
- * This is used for some MPI firmware
- * mailbox commands.
- */
-#define LSW(x) ((u16)(x))
-#define MSW(x) ((u16)((u32)(x) >> 16))
-#define LSD(x) ((u32)((u64)(x)))
-#define MSD(x) ((u32)((((u64)(x)) >> 32)))
-
-/* In some cases, the device interprets a value of 0x0000 as 65536. These
- * cases are marked using the following macro.
- */
-#define QLGE_FIT16(value) ((u16)(value))
-
-/* MPI test register definitions. This register
- * is used for determining alternate NIC function's
- * PCI->func number.
- */
-enum {
- MPI_TEST_FUNC_PORT_CFG = 0x1002,
- MPI_TEST_FUNC_PRB_CTL = 0x100e,
- MPI_TEST_FUNC_PRB_EN = 0x18a20000,
- MPI_TEST_FUNC_RST_STS = 0x100a,
- MPI_TEST_FUNC_RST_FRC = 0x00000003,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
- MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
- MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
- MPI_TEST_NIC1_FUNC_SHIFT = 1,
- MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
- MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
- MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
- MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
- MPI_TEST_FC1_FUNCTION_SHIFT = 9,
- MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
- MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
- MPI_TEST_FC2_FUNCTION_SHIFT = 13,
-
- MPI_NIC_READ = 0x00000000,
- MPI_NIC_REG_BLOCK = 0x00020000,
- MPI_NIC_FUNCTION_SHIFT = 6,
-};
-
-/*
- * Processor Address Register (PROC_ADDR) bit definitions.
- */
-enum {
- /* Misc. stuff */
- MAILBOX_COUNT = 16,
- MAILBOX_TIMEOUT = 5,
-
- PROC_ADDR_RDY = (1 << 31),
- PROC_ADDR_R = (1 << 30),
- PROC_ADDR_ERR = (1 << 29),
- PROC_ADDR_DA = (1 << 28),
- PROC_ADDR_FUNC0_MBI = 0x00001180,
- PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC0_CTL = 0x000011a1,
- PROC_ADDR_FUNC2_MBI = 0x00001280,
- PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC2_CTL = 0x000012a1,
- PROC_ADDR_MPI_RISC = 0x00000000,
- PROC_ADDR_MDE = 0x00010000,
- PROC_ADDR_REGBLOCK = 0x00020000,
- PROC_ADDR_RISC_REG = 0x00030000,
-};
-
-/*
- * System Register (SYS) bit definitions.
- */
-enum {
- SYS_EFE = (1 << 0),
- SYS_FAE = (1 << 1),
- SYS_MDC = (1 << 2),
- SYS_DST = (1 << 3),
- SYS_DWC = (1 << 4),
- SYS_EVW = (1 << 5),
- SYS_OMP_DLY_MASK = 0x3f000000,
- /*
- * There are no values defined as of edit #15.
- */
- SYS_ODI = (1 << 14),
-};
-
-/*
- * Reset/Failover Register (RST_FO) bit definitions.
- */
-enum {
- RST_FO_TFO = (1 << 0),
- RST_FO_RR_MASK = 0x00060000,
- RST_FO_RR_CQ_CAM = 0x00000000,
- RST_FO_RR_DROP = 0x00000002,
- RST_FO_RR_DQ = 0x00000004,
- RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
- RST_FO_FRB = (1 << 12),
- RST_FO_MOP = (1 << 13),
- RST_FO_REG = (1 << 14),
- RST_FO_FR = (1 << 15),
-};
-
-/*
- * Function Specific Control Register (FSC) bit definitions.
- */
-enum {
- FSC_DBRST_MASK = 0x00070000,
- FSC_DBRST_256 = 0x00000000,
- FSC_DBRST_512 = 0x00000001,
- FSC_DBRST_768 = 0x00000002,
- FSC_DBRST_1024 = 0x00000003,
- FSC_DBL_MASK = 0x00180000,
- FSC_DBL_DBRST = 0x00000000,
- FSC_DBL_MAX_PLD = 0x00000008,
- FSC_DBL_MAX_BRST = 0x00000010,
- FSC_DBL_128_BYTES = 0x00000018,
- FSC_EC = (1 << 5),
- FSC_EPC_MASK = 0x00c00000,
- FSC_EPC_INBOUND = (1 << 6),
- FSC_EPC_OUTBOUND = (1 << 7),
- FSC_VM_PAGESIZE_MASK = 0x07000000,
- FSC_VM_PAGE_2K = 0x00000100,
- FSC_VM_PAGE_4K = 0x00000200,
- FSC_VM_PAGE_8K = 0x00000300,
- FSC_VM_PAGE_64K = 0x00000600,
- FSC_SH = (1 << 11),
- FSC_DSB = (1 << 12),
- FSC_STE = (1 << 13),
- FSC_FE = (1 << 15),
-};
-
-/*
- * Host Command Status Register (CSR) bit definitions.
- */
-enum {
- CSR_ERR_STS_MASK = 0x0000003f,
- /*
- * There are no valued defined as of edit #15.
- */
- CSR_RR = (1 << 8),
- CSR_HRI = (1 << 9),
- CSR_RP = (1 << 10),
- CSR_CMD_PARM_SHIFT = 22,
- CSR_CMD_NOP = 0x00000000,
- CSR_CMD_SET_RST = 0x10000000,
- CSR_CMD_CLR_RST = 0x20000000,
- CSR_CMD_SET_PAUSE = 0x30000000,
- CSR_CMD_CLR_PAUSE = 0x40000000,
- CSR_CMD_SET_H2R_INT = 0x50000000,
- CSR_CMD_CLR_H2R_INT = 0x60000000,
- CSR_CMD_PAR_EN = 0x70000000,
- CSR_CMD_SET_BAD_PAR = 0x80000000,
- CSR_CMD_CLR_BAD_PAR = 0x90000000,
- CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
-};
-
-/*
- * Configuration Register (CFG) bit definitions.
- */
-enum {
- CFG_LRQ = (1 << 0),
- CFG_DRQ = (1 << 1),
- CFG_LR = (1 << 2),
- CFG_DR = (1 << 3),
- CFG_LE = (1 << 5),
- CFG_LCQ = (1 << 6),
- CFG_DCQ = (1 << 7),
- CFG_Q_SHIFT = 8,
- CFG_Q_MASK = 0x7f000000,
-};
-
-/*
- * Status Register (STS) bit definitions.
- */
-enum {
- STS_FE = (1 << 0),
- STS_PI = (1 << 1),
- STS_PL0 = (1 << 2),
- STS_PL1 = (1 << 3),
- STS_PI0 = (1 << 4),
- STS_PI1 = (1 << 5),
- STS_FUNC_ID_MASK = 0x000000c0,
- STS_FUNC_ID_SHIFT = 6,
- STS_F0E = (1 << 8),
- STS_F1E = (1 << 9),
- STS_F2E = (1 << 10),
- STS_F3E = (1 << 11),
- STS_NFE = (1 << 12),
-};
-
-/*
- * Interrupt Enable Register (INTR_EN) bit definitions.
- */
-enum {
- INTR_EN_INTR_MASK = 0x007f0000,
- INTR_EN_TYPE_MASK = 0x03000000,
- INTR_EN_TYPE_ENABLE = 0x00000100,
- INTR_EN_TYPE_DISABLE = 0x00000200,
- INTR_EN_TYPE_READ = 0x00000300,
- INTR_EN_IHD = (1 << 13),
- INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
- INTR_EN_EI = (1 << 14),
- INTR_EN_EN = (1 << 15),
-};
-
-/*
- * Interrupt Mask Register (INTR_MASK) bit definitions.
- */
-enum {
- INTR_MASK_PI = (1 << 0),
- INTR_MASK_HL0 = (1 << 1),
- INTR_MASK_LH0 = (1 << 2),
- INTR_MASK_HL1 = (1 << 3),
- INTR_MASK_LH1 = (1 << 4),
- INTR_MASK_SE = (1 << 5),
- INTR_MASK_LSC = (1 << 6),
- INTR_MASK_MC = (1 << 7),
- INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
-};
-
-/*
- * Register (REV_ID) bit definitions.
- */
-enum {
- REV_ID_MASK = 0x0000000f,
- REV_ID_NICROLL_SHIFT = 0,
- REV_ID_NICREV_SHIFT = 4,
- REV_ID_XGROLL_SHIFT = 8,
- REV_ID_XGREV_SHIFT = 12,
- REV_ID_CHIPREV_SHIFT = 28,
-};
-
-/*
- * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
- */
-enum {
- FRC_ECC_ERR_VW = (1 << 12),
- FRC_ECC_ERR_VB = (1 << 13),
- FRC_ECC_ERR_NI = (1 << 14),
- FRC_ECC_ERR_NO = (1 << 15),
- FRC_ECC_PFE_SHIFT = 16,
- FRC_ECC_ERR_DO = (1 << 18),
- FRC_ECC_P14 = (1 << 19),
-};
-
-/*
- * Error Status Register (ERR_STS) bit definitions.
- */
-enum {
- ERR_STS_NOF = (1 << 0),
- ERR_STS_NIF = (1 << 1),
- ERR_STS_DRP = (1 << 2),
- ERR_STS_XGP = (1 << 3),
- ERR_STS_FOU = (1 << 4),
- ERR_STS_FOC = (1 << 5),
- ERR_STS_FOF = (1 << 6),
- ERR_STS_FIU = (1 << 7),
- ERR_STS_FIC = (1 << 8),
- ERR_STS_FIF = (1 << 9),
- ERR_STS_MOF = (1 << 10),
- ERR_STS_TA = (1 << 11),
- ERR_STS_MA = (1 << 12),
- ERR_STS_MPE = (1 << 13),
- ERR_STS_SCE = (1 << 14),
- ERR_STS_STE = (1 << 15),
- ERR_STS_FOW = (1 << 16),
- ERR_STS_UE = (1 << 17),
- ERR_STS_MCH = (1 << 26),
- ERR_STS_LOC_SHIFT = 27,
-};
-
-/*
- * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
- */
-enum {
- RAM_DBG_ADDR_FW = (1 << 30),
- RAM_DBG_ADDR_FR = (1 << 31),
-};
-
-/*
- * Semaphore Register (SEM) bit definitions.
- */
-enum {
- /*
- * Example:
- * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
- */
- SEM_CLEAR = 0,
- SEM_SET = 1,
- SEM_FORCE = 3,
- SEM_XGMAC0_SHIFT = 0,
- SEM_XGMAC1_SHIFT = 2,
- SEM_ICB_SHIFT = 4,
- SEM_MAC_ADDR_SHIFT = 6,
- SEM_FLASH_SHIFT = 8,
- SEM_PROBE_SHIFT = 10,
- SEM_RT_IDX_SHIFT = 12,
- SEM_PROC_REG_SHIFT = 14,
- SEM_XGMAC0_MASK = 0x00030000,
- SEM_XGMAC1_MASK = 0x000c0000,
- SEM_ICB_MASK = 0x00300000,
- SEM_MAC_ADDR_MASK = 0x00c00000,
- SEM_FLASH_MASK = 0x03000000,
- SEM_PROBE_MASK = 0x0c000000,
- SEM_RT_IDX_MASK = 0x30000000,
- SEM_PROC_REG_MASK = 0xc0000000,
-};
-
-/*
- * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
- */
-enum {
- XGMAC_ADDR_RDY = (1 << 31),
- XGMAC_ADDR_R = (1 << 30),
- XGMAC_ADDR_XME = (1 << 29),
-
- /* XGMAC control registers */
- PAUSE_SRC_LO = 0x00000100,
- PAUSE_SRC_HI = 0x00000104,
- GLOBAL_CFG = 0x00000108,
- GLOBAL_CFG_RESET = (1 << 0),
- GLOBAL_CFG_JUMBO = (1 << 6),
- GLOBAL_CFG_TX_STAT_EN = (1 << 10),
- GLOBAL_CFG_RX_STAT_EN = (1 << 11),
- TX_CFG = 0x0000010c,
- TX_CFG_RESET = (1 << 0),
- TX_CFG_EN = (1 << 1),
- TX_CFG_PREAM = (1 << 2),
- RX_CFG = 0x00000110,
- RX_CFG_RESET = (1 << 0),
- RX_CFG_EN = (1 << 1),
- RX_CFG_PREAM = (1 << 2),
- FLOW_CTL = 0x0000011c,
- PAUSE_OPCODE = 0x00000120,
- PAUSE_TIMER = 0x00000124,
- PAUSE_FRM_DEST_LO = 0x00000128,
- PAUSE_FRM_DEST_HI = 0x0000012c,
- MAC_TX_PARAMS = 0x00000134,
- MAC_TX_PARAMS_JUMBO = (1 << 31),
- MAC_TX_PARAMS_SIZE_SHIFT = 16,
- MAC_RX_PARAMS = 0x00000138,
- MAC_SYS_INT = 0x00000144,
- MAC_SYS_INT_MASK = 0x00000148,
- MAC_MGMT_INT = 0x0000014c,
- MAC_MGMT_IN_MASK = 0x00000150,
- EXT_ARB_MODE = 0x000001fc,
-
- /* XGMAC TX statistics registers */
- TX_PKTS = 0x00000200,
- TX_BYTES = 0x00000208,
- TX_MCAST_PKTS = 0x00000210,
- TX_BCAST_PKTS = 0x00000218,
- TX_UCAST_PKTS = 0x00000220,
- TX_CTL_PKTS = 0x00000228,
- TX_PAUSE_PKTS = 0x00000230,
- TX_64_PKT = 0x00000238,
- TX_65_TO_127_PKT = 0x00000240,
- TX_128_TO_255_PKT = 0x00000248,
- TX_256_511_PKT = 0x00000250,
- TX_512_TO_1023_PKT = 0x00000258,
- TX_1024_TO_1518_PKT = 0x00000260,
- TX_1519_TO_MAX_PKT = 0x00000268,
- TX_UNDERSIZE_PKT = 0x00000270,
- TX_OVERSIZE_PKT = 0x00000278,
-
- /* XGMAC statistics control registers */
- RX_HALF_FULL_DET = 0x000002a0,
- TX_HALF_FULL_DET = 0x000002a4,
- RX_OVERFLOW_DET = 0x000002a8,
- TX_OVERFLOW_DET = 0x000002ac,
- RX_HALF_FULL_MASK = 0x000002b0,
- TX_HALF_FULL_MASK = 0x000002b4,
- RX_OVERFLOW_MASK = 0x000002b8,
- TX_OVERFLOW_MASK = 0x000002bc,
- STAT_CNT_CTL = 0x000002c0,
- STAT_CNT_CTL_CLEAR_TX = (1 << 0),
- STAT_CNT_CTL_CLEAR_RX = (1 << 1),
- AUX_RX_HALF_FULL_DET = 0x000002d0,
- AUX_TX_HALF_FULL_DET = 0x000002d4,
- AUX_RX_OVERFLOW_DET = 0x000002d8,
- AUX_TX_OVERFLOW_DET = 0x000002dc,
- AUX_RX_HALF_FULL_MASK = 0x000002f0,
- AUX_TX_HALF_FULL_MASK = 0x000002f4,
- AUX_RX_OVERFLOW_MASK = 0x000002f8,
- AUX_TX_OVERFLOW_MASK = 0x000002fc,
-
- /* XGMAC RX statistics registers */
- RX_BYTES = 0x00000300,
- RX_BYTES_OK = 0x00000308,
- RX_PKTS = 0x00000310,
- RX_PKTS_OK = 0x00000318,
- RX_BCAST_PKTS = 0x00000320,
- RX_MCAST_PKTS = 0x00000328,
- RX_UCAST_PKTS = 0x00000330,
- RX_UNDERSIZE_PKTS = 0x00000338,
- RX_OVERSIZE_PKTS = 0x00000340,
- RX_JABBER_PKTS = 0x00000348,
- RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
- RX_DROP_EVENTS = 0x00000358,
- RX_FCERR_PKTS = 0x00000360,
- RX_ALIGN_ERR = 0x00000368,
- RX_SYMBOL_ERR = 0x00000370,
- RX_MAC_ERR = 0x00000378,
- RX_CTL_PKTS = 0x00000380,
- RX_PAUSE_PKTS = 0x00000388,
- RX_64_PKTS = 0x00000390,
- RX_65_TO_127_PKTS = 0x00000398,
- RX_128_255_PKTS = 0x000003a0,
- RX_256_511_PKTS = 0x000003a8,
- RX_512_TO_1023_PKTS = 0x000003b0,
- RX_1024_TO_1518_PKTS = 0x000003b8,
- RX_1519_TO_MAX_PKTS = 0x000003c0,
- RX_LEN_ERR_PKTS = 0x000003c8,
-
- /* XGMAC MDIO control registers */
- MDIO_TX_DATA = 0x00000400,
- MDIO_RX_DATA = 0x00000410,
- MDIO_CMD = 0x00000420,
- MDIO_PHY_ADDR = 0x00000430,
- MDIO_PORT = 0x00000440,
- MDIO_STATUS = 0x00000450,
-
- XGMAC_REGISTER_END = 0x00000740,
-};
-
-/*
- * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
- */
-enum {
- ETS_QUEUE_SHIFT = 29,
- ETS_REF = (1 << 26),
- ETS_RS = (1 << 27),
- ETS_P = (1 << 28),
- ETS_FC_COS_SHIFT = 23,
-};
-
-/*
- * Flash Address Register (FLASH_ADDR) bit definitions.
- */
-enum {
- FLASH_ADDR_RDY = (1 << 31),
- FLASH_ADDR_R = (1 << 30),
- FLASH_ADDR_ERR = (1 << 29),
-};
-
-/*
- * Stop CQ Processing Register (CQ_STOP) bit definitions.
- */
-enum {
- CQ_STOP_QUEUE_MASK = (0x007f0000),
- CQ_STOP_TYPE_MASK = (0x03000000),
- CQ_STOP_TYPE_START = 0x00000100,
- CQ_STOP_TYPE_STOP = 0x00000200,
- CQ_STOP_TYPE_READ = 0x00000300,
- CQ_STOP_EN = (1 << 15),
-};
-
-/*
- * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
- */
-enum {
- MAC_ADDR_IDX_SHIFT = 4,
- MAC_ADDR_TYPE_SHIFT = 16,
- MAC_ADDR_TYPE_COUNT = 10,
- MAC_ADDR_TYPE_MASK = 0x000f0000,
- MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
- MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
- MAC_ADDR_TYPE_VLAN = 0x00020000,
- MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
- MAC_ADDR_TYPE_FC_MAC = 0x00040000,
- MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
- MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
- MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
- MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
- MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
- MAC_ADDR_ADR = (1 << 25),
- MAC_ADDR_RS = (1 << 26),
- MAC_ADDR_E = (1 << 27),
- MAC_ADDR_MR = (1 << 30),
- MAC_ADDR_MW = (1 << 31),
- MAX_MULTICAST_ENTRIES = 32,
-
- /* Entry count and words per entry
- * for each address type in the filter.
- */
- MAC_ADDR_MAX_CAM_ENTRIES = 512,
- MAC_ADDR_MAX_CAM_WCOUNT = 3,
- MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
- MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
- MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
- MAC_ADDR_MAX_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
- MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
- MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
- MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
- MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
- MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
-};
-
-/*
- * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
- */
-enum {
- SPLT_HDR_EP = (1 << 31),
-};
-
-/*
- * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
- */
-enum {
- FC_RCV_CFG_ECT = (1 << 15),
- FC_RCV_CFG_DFH = (1 << 20),
- FC_RCV_CFG_DVF = (1 << 21),
- FC_RCV_CFG_RCE = (1 << 27),
- FC_RCV_CFG_RFE = (1 << 28),
- FC_RCV_CFG_TEE = (1 << 29),
- FC_RCV_CFG_TCE = (1 << 30),
- FC_RCV_CFG_TFE = (1 << 31),
-};
-
-/*
- * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
- */
-enum {
- NIC_RCV_CFG_PPE = (1 << 0),
- NIC_RCV_CFG_VLAN_MASK = 0x00060000,
- NIC_RCV_CFG_VLAN_ALL = 0x00000000,
- NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
- NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
- NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
- NIC_RCV_CFG_RV = (1 << 3),
- NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
- NIC_RCV_CFG_DFQ_SHIFT = 8,
- NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
-};
-
-/*
- * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
- */
-enum {
- MGMT_RCV_CFG_ARP = (1 << 0),
- MGMT_RCV_CFG_DHC = (1 << 1),
- MGMT_RCV_CFG_DHS = (1 << 2),
- MGMT_RCV_CFG_NP = (1 << 3),
- MGMT_RCV_CFG_I6N = (1 << 4),
- MGMT_RCV_CFG_I6R = (1 << 5),
- MGMT_RCV_CFG_DH6 = (1 << 6),
- MGMT_RCV_CFG_UD1 = (1 << 7),
- MGMT_RCV_CFG_UD0 = (1 << 8),
- MGMT_RCV_CFG_BCT = (1 << 9),
- MGMT_RCV_CFG_MCT = (1 << 10),
- MGMT_RCV_CFG_DM = (1 << 11),
- MGMT_RCV_CFG_RM = (1 << 12),
- MGMT_RCV_CFG_STL = (1 << 13),
- MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
- MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
- MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
- MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
- MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
-};
-
-/*
- * Routing Index Register (RT_IDX) bit definitions.
- */
-enum {
- RT_IDX_IDX_SHIFT = 8,
- RT_IDX_TYPE_MASK = 0x000f0000,
- RT_IDX_TYPE_SHIFT = 16,
- RT_IDX_TYPE_RT = 0x00000000,
- RT_IDX_TYPE_RT_INV = 0x00010000,
- RT_IDX_TYPE_NICQ = 0x00020000,
- RT_IDX_TYPE_NICQ_INV = 0x00030000,
- RT_IDX_DST_MASK = 0x00700000,
- RT_IDX_DST_RSS = 0x00000000,
- RT_IDX_DST_CAM_Q = 0x00100000,
- RT_IDX_DST_COS_Q = 0x00200000,
- RT_IDX_DST_DFLT_Q = 0x00300000,
- RT_IDX_DST_DEST_Q = 0x00400000,
- RT_IDX_RS = (1 << 26),
- RT_IDX_E = (1 << 27),
- RT_IDX_MR = (1 << 30),
- RT_IDX_MW = (1 << 31),
-
- /* Nic Queue format - type 2 bits */
- RT_IDX_BCAST = (1 << 0),
- RT_IDX_MCAST = (1 << 1),
- RT_IDX_MCAST_MATCH = (1 << 2),
- RT_IDX_MCAST_REG_MATCH = (1 << 3),
- RT_IDX_MCAST_HASH_MATCH = (1 << 4),
- RT_IDX_FC_MACH = (1 << 5),
- RT_IDX_ETH_FCOE = (1 << 6),
- RT_IDX_CAM_HIT = (1 << 7),
- RT_IDX_CAM_BIT0 = (1 << 8),
- RT_IDX_CAM_BIT1 = (1 << 9),
- RT_IDX_VLAN_TAG = (1 << 10),
- RT_IDX_VLAN_MATCH = (1 << 11),
- RT_IDX_VLAN_FILTER = (1 << 12),
- RT_IDX_ETH_SKIP1 = (1 << 13),
- RT_IDX_ETH_SKIP2 = (1 << 14),
- RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
- RT_IDX_802_3 = (1 << 16),
- RT_IDX_LLDP = (1 << 17),
- RT_IDX_UNUSED018 = (1 << 18),
- RT_IDX_UNUSED019 = (1 << 19),
- RT_IDX_UNUSED20 = (1 << 20),
- RT_IDX_UNUSED21 = (1 << 21),
- RT_IDX_ERR = (1 << 22),
- RT_IDX_VALID = (1 << 23),
- RT_IDX_TU_CSUM_ERR = (1 << 24),
- RT_IDX_IP_CSUM_ERR = (1 << 25),
- RT_IDX_MAC_ERR = (1 << 26),
- RT_IDX_RSS_TCP6 = (1 << 27),
- RT_IDX_RSS_TCP4 = (1 << 28),
- RT_IDX_RSS_IPV6 = (1 << 29),
- RT_IDX_RSS_IPV4 = (1 << 30),
- RT_IDX_RSS_MATCH = (1 << 31),
-
- /* Hierarchy for the NIC Queue Mask */
- RT_IDX_ALL_ERR_SLOT = 0,
- RT_IDX_MAC_ERR_SLOT = 0,
- RT_IDX_IP_CSUM_ERR_SLOT = 1,
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
- RT_IDX_BCAST_SLOT = 3,
- RT_IDX_MCAST_MATCH_SLOT = 4,
- RT_IDX_ALLMULTI_SLOT = 5,
- RT_IDX_UNUSED6_SLOT = 6,
- RT_IDX_UNUSED7_SLOT = 7,
- RT_IDX_RSS_MATCH_SLOT = 8,
- RT_IDX_RSS_IPV4_SLOT = 8,
- RT_IDX_RSS_IPV6_SLOT = 9,
- RT_IDX_RSS_TCP4_SLOT = 10,
- RT_IDX_RSS_TCP6_SLOT = 11,
- RT_IDX_CAM_HIT_SLOT = 12,
- RT_IDX_UNUSED013 = 13,
- RT_IDX_UNUSED014 = 14,
- RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_RT_SLOTS = 8,
- RT_IDX_MAX_NIC_SLOTS = 16,
-};
-
-/*
- * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
- */
-enum {
- XG_SERDES_ADDR_RDY = (1 << 31),
- XG_SERDES_ADDR_R = (1 << 30),
-
- XG_SERDES_ADDR_STS = 0x00001E06,
- XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
- XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
- XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
-
- /* Serdes coredump definitions. */
- XG_SERDES_XAUI_AN_START = 0x00000000,
- XG_SERDES_XAUI_AN_END = 0x00000034,
- XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
- XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
- XG_SERDES_XFI_AN_START = 0x00001000,
- XG_SERDES_XFI_AN_END = 0x00001034,
- XG_SERDES_XFI_TRAIN_START = 0x10001050,
- XG_SERDES_XFI_TRAIN_END = 0x1000107C,
- XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
- XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
- XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
- XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
- XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
- XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
- XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
- XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
-};
-
-/*
- * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
- */
-enum {
- PRB_MX_ADDR_ARE = (1 << 16),
- PRB_MX_ADDR_UP = (1 << 15),
- PRB_MX_ADDR_SWP = (1 << 14),
-
- /* Module select values. */
- PRB_MX_ADDR_MAX_MODS = 21,
- PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
- PRB_MX_ADDR_MOD_SEL_TBD = 0,
- PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
- PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
- PRB_MX_ADDR_MOD_SEL_FRB = 3,
- PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
- PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
- PRB_MX_ADDR_MOD_SEL_DA1 = 6,
- PRB_MX_ADDR_MOD_SEL_DA2 = 7,
- PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
- PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
- PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
- PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
- PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
- PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
- PRB_MX_ADDR_MOD_SEL_REG = 14,
- PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
- PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
- PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
- PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
- PRB_MX_ADDR_MOD_SEL_MOP = 20,
- /* Bit fields indicating which modules
- * are valid for each clock domain.
- */
- PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
- PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
- PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
- PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
- PRB_MX_ADDR_VALID_TOTAL = 34,
-
- /* Clock domain values. */
- PRB_MX_ADDR_CLOCK_SHIFT = 6,
- PRB_MX_ADDR_SYS_CLOCK = 0,
- PRB_MX_ADDR_PCI_CLOCK = 2,
- PRB_MX_ADDR_FC_CLOCK = 5,
- PRB_MX_ADDR_XGM_CLOCK = 6,
-
- PRB_MX_ADDR_MAX_MUX = 64,
-};
-
-/*
- * Control Register Set Map
- */
-enum {
- PROC_ADDR = 0, /* Use semaphore */
- PROC_DATA = 0x04, /* Use semaphore */
- SYS = 0x08,
- RST_FO = 0x0c,
- FSC = 0x10,
- CSR = 0x14,
- LED = 0x18,
- ICB_RID = 0x1c, /* Use semaphore */
- ICB_L = 0x20, /* Use semaphore */
- ICB_H = 0x24, /* Use semaphore */
- CFG = 0x28,
- BIOS_ADDR = 0x2c,
- STS = 0x30,
- INTR_EN = 0x34,
- INTR_MASK = 0x38,
- ISR1 = 0x3c,
- ISR2 = 0x40,
- ISR3 = 0x44,
- ISR4 = 0x48,
- REV_ID = 0x4c,
- FRC_ECC_ERR = 0x50,
- ERR_STS = 0x54,
- RAM_DBG_ADDR = 0x58,
- RAM_DBG_DATA = 0x5c,
- ECC_ERR_CNT = 0x60,
- SEM = 0x64,
- GPIO_1 = 0x68, /* Use semaphore */
- GPIO_2 = 0x6c, /* Use semaphore */
- GPIO_3 = 0x70, /* Use semaphore */
- RSVD2 = 0x74,
- XGMAC_ADDR = 0x78, /* Use semaphore */
- XGMAC_DATA = 0x7c, /* Use semaphore */
- NIC_ETS = 0x80,
- CNA_ETS = 0x84,
- FLASH_ADDR = 0x88, /* Use semaphore */
- FLASH_DATA = 0x8c, /* Use semaphore */
- CQ_STOP = 0x90,
- PAGE_TBL_RID = 0x94,
- WQ_PAGE_TBL_LO = 0x98,
- WQ_PAGE_TBL_HI = 0x9c,
- CQ_PAGE_TBL_LO = 0xa0,
- CQ_PAGE_TBL_HI = 0xa4,
- MAC_ADDR_IDX = 0xa8, /* Use semaphore */
- MAC_ADDR_DATA = 0xac, /* Use semaphore */
- COS_DFLT_CQ1 = 0xb0,
- COS_DFLT_CQ2 = 0xb4,
- ETYPE_SKIP1 = 0xb8,
- ETYPE_SKIP2 = 0xbc,
- SPLT_HDR = 0xc0,
- FC_PAUSE_THRES = 0xc4,
- NIC_PAUSE_THRES = 0xc8,
- FC_ETHERTYPE = 0xcc,
- FC_RCV_CFG = 0xd0,
- NIC_RCV_CFG = 0xd4,
- FC_COS_TAGS = 0xd8,
- NIC_COS_TAGS = 0xdc,
- MGMT_RCV_CFG = 0xe0,
- RT_IDX = 0xe4,
- RT_DATA = 0xe8,
- RSVD7 = 0xec,
- XG_SERDES_ADDR = 0xf0,
- XG_SERDES_DATA = 0xf4,
- PRB_MX_ADDR = 0xf8, /* Use semaphore */
- PRB_MX_DATA = 0xfc, /* Use semaphore */
-};
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define SMALL_BUFFER_SIZE 256
-#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
-#define SPLT_SETTING FSC_DBRST_1024
-#define SPLT_LEN 0
-#define QLGE_SB_PAD 0
-#else
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
-#define SPLT_SETTING FSC_SH
-#define SPLT_LEN (SPLT_HDR_EP | \
- min(SMALL_BUF_MAP_SIZE, 1023))
-#define QLGE_SB_PAD 32
-#endif
-
-/*
- * CAM output format.
- */
-enum {
- CAM_OUT_ROUTE_FC = 0,
- CAM_OUT_ROUTE_NIC = 1,
- CAM_OUT_FUNC_SHIFT = 2,
- CAM_OUT_RV = (1 << 4),
- CAM_OUT_SH = (1 << 15),
- CAM_OUT_CQ_ID_SHIFT = 5,
-};
-
-/*
- * Mailbox definitions
- */
-enum {
- /* Asynchronous Event Notifications */
- AEN_SYS_ERR = 0x00008002,
- AEN_LINK_UP = 0x00008011,
- AEN_LINK_DOWN = 0x00008012,
- AEN_IDC_CMPLT = 0x00008100,
- AEN_IDC_REQ = 0x00008101,
- AEN_IDC_EXT = 0x00008102,
- AEN_DCBX_CHG = 0x00008110,
- AEN_AEN_LOST = 0x00008120,
- AEN_AEN_SFP_IN = 0x00008130,
- AEN_AEN_SFP_OUT = 0x00008131,
- AEN_FW_INIT_DONE = 0x00008400,
- AEN_FW_INIT_FAIL = 0x00008401,
-
- /* Mailbox Command Opcodes. */
- MB_CMD_NOP = 0x00000000,
- MB_CMD_EX_FW = 0x00000002,
- MB_CMD_MB_TEST = 0x00000006,
- MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
- MB_CMD_ABOUT_FW = 0x00000008,
- MB_CMD_COPY_RISC_RAM = 0x0000000a,
- MB_CMD_LOAD_RISC_RAM = 0x0000000b,
- MB_CMD_DUMP_RISC_RAM = 0x0000000c,
- MB_CMD_WRITE_RAM = 0x0000000d,
- MB_CMD_INIT_RISC_RAM = 0x0000000e,
- MB_CMD_READ_RAM = 0x0000000f,
- MB_CMD_STOP_FW = 0x00000014,
- MB_CMD_MAKE_SYS_ERR = 0x0000002a,
- MB_CMD_WRITE_SFP = 0x00000030,
- MB_CMD_READ_SFP = 0x00000031,
- MB_CMD_INIT_FW = 0x00000060,
- MB_CMD_GET_IFCB = 0x00000061,
- MB_CMD_GET_FW_STATE = 0x00000069,
- MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
- MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
- MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
- MB_WOL_DISABLE = 0,
- MB_WOL_MAGIC_PKT = (1 << 1),
- MB_WOL_FLTR = (1 << 2),
- MB_WOL_UCAST = (1 << 3),
- MB_WOL_MCAST = (1 << 4),
- MB_WOL_BCAST = (1 << 5),
- MB_WOL_LINK_UP = (1 << 6),
- MB_WOL_LINK_DOWN = (1 << 7),
- MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
- MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
- MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
- MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
- MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
- MB_CMD_SET_WOL_IMMED = 0x00000115,
- MB_CMD_PORT_RESET = 0x00000120,
- MB_CMD_SET_PORT_CFG = 0x00000122,
- MB_CMD_GET_PORT_CFG = 0x00000123,
- MB_CMD_GET_LINK_STS = 0x00000124,
- MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
- QL_LED_BLINK = 0x03e803e8,
- MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
- MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
- MB_SET_MPI_TFK_STOP = (1 << 0),
- MB_SET_MPI_TFK_RESUME = (1 << 1),
- MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
- MB_GET_MPI_TFK_STOPPED = (1 << 0),
- MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
- /* Sub-commands for IDC request.
- * This describes the reason for the
- * IDC request.
- */
- MB_CMD_IOP_NONE = 0x0000,
- MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
- MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
- MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
- MB_CMD_IOP_DVR_START = 0x0100,
- MB_CMD_IOP_FLASH_ACC = 0x0101,
- MB_CMD_IOP_RESTART_MPI = 0x0102,
- MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
-
- /* Mailbox Command Status. */
- MB_CMD_STS_GOOD = 0x00004000, /* Success. */
- MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
- MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
- MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
- MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
- MB_CMD_STS_ERR = 0x00004005, /* System Error. */
- MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
-};
-
-struct mbox_params {
- u32 mbox_in[MAILBOX_COUNT];
- u32 mbox_out[MAILBOX_COUNT];
- int in_count;
- int out_count;
-};
-
-struct flash_params_8012 {
- u8 dev_id_str[4];
- __le16 size;
- __le16 csum;
- __le16 ver;
- __le16 sub_dev_id;
- u8 mac_addr[6];
- __le16 res;
-};
-
-/* 8000 device's flash is a different structure
- * at a different offset in flash.
- */
-#define FUNC0_FLASH_OFFSET 0x140200
-#define FUNC1_FLASH_OFFSET 0x140600
-
-/* Flash related data structures. */
-struct flash_params_8000 {
- u8 dev_id_str[4]; /* "8000" */
- __le16 ver;
- __le16 size;
- __le16 csum;
- __le16 reserved0;
- __le16 total_size;
- __le16 entry_count;
- u8 data_type0;
- u8 data_size0;
- u8 mac_addr[6];
- u8 data_type1;
- u8 data_size1;
- u8 mac_addr1[6];
- u8 data_type2;
- u8 data_size2;
- __le16 vlan_id;
- u8 data_type3;
- u8 data_size3;
- __le16 last;
- u8 reserved1[464];
- __le16 subsys_ven_id;
- __le16 subsys_dev_id;
- u8 reserved2[4];
-};
-
-union flash_params {
- struct flash_params_8012 flash_params_8012;
- struct flash_params_8000 flash_params_8000;
-};
-
-/*
- * doorbell space for the rx ring context
- */
-struct rx_doorbell_context {
- u32 cnsmr_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/*
- * doorbell space for the tx ring context
- */
-struct tx_doorbell_context {
- u32 prod_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/* DATA STRUCTURES SHARED WITH HARDWARE. */
-struct tx_buf_desc {
- __le64 addr;
- __le32 len;
-#define TX_DESC_LEN_MASK 0x000fffff
-#define TX_DESC_C 0x40000000
-#define TX_DESC_E 0x80000000
-} __packed;
-
-/*
- * IOCB Definitions...
- */
-
-#define OPCODE_OB_MAC_IOCB 0x01
-#define OPCODE_OB_MAC_TSO_IOCB 0x02
-#define OPCODE_IB_MAC_IOCB 0x20
-#define OPCODE_IB_MPI_IOCB 0x21
-#define OPCODE_IB_AE_IOCB 0x3f
-
-struct qlge_ob_mac_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_IOCB_REQ_OI 0x01
-#define OB_MAC_IOCB_REQ_I 0x02
-#define OB_MAC_IOCB_REQ_D 0x08
-#define OB_MAC_IOCB_REQ_F 0x10
- u8 flags2;
- u8 flags3;
-#define OB_MAC_IOCB_DFP 0x02
-#define OB_MAC_IOCB_V 0x04
- __le32 reserved1[2];
- __le16 frame_len;
-#define OB_MAC_IOCB_LEN_MASK 0x3ffff
- __le16 reserved2;
- u32 tid;
- u32 txq_idx;
- __le32 reserved3;
- __le16 vlan_tci;
- __le16 reserved4;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct qlge_ob_mac_iocb_rsp {
- u8 opcode; /* */
- u8 flags1; /* */
-#define OB_MAC_IOCB_RSP_OI 0x01 /* */
-#define OB_MAC_IOCB_RSP_I 0x02 /* */
-#define OB_MAC_IOCB_RSP_E 0x08 /* */
-#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
-#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
-#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_IOCB_RSP_B 0x80 /* */
- u32 tid;
- u32 txq_idx;
- __le32 reserved[13];
-} __packed;
-
-struct qlge_ob_mac_tso_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_OI 0x01
-#define OB_MAC_TSO_IOCB_I 0x02
-#define OB_MAC_TSO_IOCB_D 0x08
-#define OB_MAC_TSO_IOCB_IP4 0x40
-#define OB_MAC_TSO_IOCB_IP6 0x80
- u8 flags2;
-#define OB_MAC_TSO_IOCB_LSO 0x20
-#define OB_MAC_TSO_IOCB_UC 0x40
-#define OB_MAC_TSO_IOCB_TC 0x80
- u8 flags3;
-#define OB_MAC_TSO_IOCB_IC 0x01
-#define OB_MAC_TSO_IOCB_DFP 0x02
-#define OB_MAC_TSO_IOCB_V 0x04
- __le32 reserved1[2];
- __le32 frame_len;
- u32 tid;
- u32 txq_idx;
- __le16 total_hdrs_len;
- __le16 net_trans_offset;
-#define OB_MAC_TRANSPORT_HDR_SHIFT 6
- __le16 vlan_tci;
- __le16 mss;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct qlge_ob_mac_tso_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_RSP_OI 0x01
-#define OB_MAC_TSO_IOCB_RSP_I 0x02
-#define OB_MAC_TSO_IOCB_RSP_E 0x08
-#define OB_MAC_TSO_IOCB_RSP_S 0x10
-#define OB_MAC_TSO_IOCB_RSP_L 0x20
-#define OB_MAC_TSO_IOCB_RSP_P 0x40
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_TSO_IOCB_RSP_B 0x8000
- u32 tid;
- u32 txq_idx;
- __le32 reserved2[13];
-} __packed;
-
-struct qlge_ib_mac_iocb_rsp {
- u8 opcode; /* 0x20 */
- u8 flags1;
-#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
-#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
-#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
-#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
-#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
-#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
-#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
-#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
-#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
-#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
-#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
- u8 flags2;
-#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
-#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
-#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
-#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
-#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
-#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
-#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
-#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
-#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
-#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
-#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
-#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
- u8 flags3;
-#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
-#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
-#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
-#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
-#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
-#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
-#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
-#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
- __le32 data_len; /* */
- __le64 data_addr; /* */
- __le32 rss; /* */
- __le16 vlan_id; /* 12 bits */
-#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
-#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
-#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
-
- __le16 reserved1;
- __le32 reserved2[6];
- u8 reserved3[3];
- u8 flags4;
-#define IB_MAC_IOCB_RSP_HV 0x20
-#define IB_MAC_IOCB_RSP_HS 0x40
-#define IB_MAC_IOCB_RSP_HL 0x80
- __le32 hdr_len; /* */
- __le64 hdr_addr; /* */
-} __packed;
-
-struct qlge_ib_ae_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define IB_AE_IOCB_RSP_OI 0x01
-#define IB_AE_IOCB_RSP_I 0x02
- u8 event;
-#define LINK_UP_EVENT 0x00
-#define LINK_DOWN_EVENT 0x01
-#define CAM_LOOKUP_ERR_EVENT 0x06
-#define SOFT_ECC_ERROR_EVENT 0x07
-#define MGMT_ERR_EVENT 0x08
-#define TEN_GIG_MAC_EVENT 0x09
-#define GPI0_H2L_EVENT 0x10
-#define GPI0_L2H_EVENT 0x20
-#define GPI1_H2L_EVENT 0x11
-#define GPI1_L2H_EVENT 0x21
-#define PCI_ERR_ANON_BUF_RD 0x40
- u8 q_id;
- __le32 reserved[15];
-} __packed;
-
-/*
- * These three structures are for generic
- * handling of ib and ob iocbs.
- */
-struct qlge_net_rsp_iocb {
- u8 opcode;
- u8 flags0;
- __le16 length;
- __le32 tid;
- __le32 reserved[14];
-} __packed;
-
-struct qlge_net_req_iocb {
- u8 opcode;
- u8 flags0;
- __le16 flags1;
- __le32 tid;
- __le32 reserved1[30];
-} __packed;
-
-/*
- * tx ring initialization control block for chip.
- * It is defined as:
- * "Work Queue Initialization Control Block"
- */
-struct wqicb {
- __le16 len;
-#define Q_LEN_V (1 << 4)
-#define Q_LEN_CPP_CONT 0x0000
-#define Q_LEN_CPP_16 0x0001
-#define Q_LEN_CPP_32 0x0002
-#define Q_LEN_CPP_64 0x0003
-#define Q_LEN_CPP_512 0x0006
- __le16 flags;
-#define Q_PRI_SHIFT 1
-#define Q_FLAGS_LC 0x1000
-#define Q_FLAGS_LB 0x2000
-#define Q_FLAGS_LI 0x4000
-#define Q_FLAGS_LO 0x8000
- __le16 cq_id_rss;
-#define Q_CQ_ID_RSS_RV 0x8000
- __le16 rid;
- __le64 addr;
- __le64 cnsmr_idx_addr;
-} __packed;
-
-/*
- * rx ring initialization control block for chip.
- * It is defined as:
- * "Completion Queue Initialization Control Block"
- */
-struct cqicb {
- u8 msix_vect;
- u8 reserved1;
- u8 reserved2;
- u8 flags;
-#define FLAGS_LV 0x08
-#define FLAGS_LS 0x10
-#define FLAGS_LL 0x20
-#define FLAGS_LI 0x40
-#define FLAGS_LC 0x80
- __le16 len;
-#define LEN_V (1 << 4)
-#define LEN_CPP_CONT 0x0000
-#define LEN_CPP_32 0x0001
-#define LEN_CPP_64 0x0002
-#define LEN_CPP_128 0x0003
- __le16 rid;
- __le64 addr;
- __le64 prod_idx_addr;
- __le16 pkt_delay;
- __le16 irq_delay;
- __le64 lbq_addr;
- __le16 lbq_buf_size;
- __le16 lbq_len; /* entry count */
- __le64 sbq_addr;
- __le16 sbq_buf_size;
- __le16 sbq_len; /* entry count */
-} __packed;
-
-struct ricb {
- u8 base_cq;
-#define RSS_L4K 0x80
- u8 flags;
-#define RSS_L6K 0x01
-#define RSS_LI 0x02
-#define RSS_LB 0x04
-#define RSS_LM 0x08
-#define RSS_RI4 0x10
-#define RSS_RT4 0x20
-#define RSS_RI6 0x40
-#define RSS_RT6 0x80
- __le16 mask;
- u8 hash_cq_id[1024];
- __le32 ipv6_hash_key[10];
- __le32 ipv4_hash_key[4];
-} __packed;
-
-/* SOFTWARE/DRIVER DATA STRUCTURES. */
-
-struct qlge_oal {
- struct tx_buf_desc oal[TX_DESC_PER_OAL];
-};
-
-struct map_list {
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
-struct tx_ring_desc {
- struct sk_buff *skb;
- struct qlge_ob_mac_iocb_req *queue_entry;
- u32 index;
- struct qlge_oal oal;
- struct map_list map[MAX_SKB_FRAGS + 2];
- int map_cnt;
- struct tx_ring_desc *next;
-};
-
-#define QL_TXQ_IDX(qdev, skb) (smp_processor_id() % (qdev->tx_ring_count))
-
-struct tx_ring {
- /*
- * queue info.
- */
- struct wqicb wqicb; /* structure used to inform chip of new queue */
- void *wq_base; /* pci_alloc:virtual addr for tx */
- dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
- __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
- dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
- u32 wq_size; /* size in bytes of queue area */
- u32 wq_len; /* number of entries in queue */
- void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
- void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
- u16 prod_idx; /* current value for prod idx */
- u16 cq_id; /* completion (rx) queue for tx completions */
- u8 wq_id; /* queue id for this entry */
- u8 reserved1[3];
- struct tx_ring_desc *q; /* descriptor list for the queue */
- spinlock_t lock;
- atomic_t tx_count; /* counts down for every outstanding IO */
- struct delayed_work tx_work;
- struct qlge_adapter *qdev;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_errors;
-};
-
-struct qlge_page_chunk {
- struct page *page;
- void *va; /* virt addr including offset */
- unsigned int offset;
-};
-
-struct qlge_bq_desc {
- union {
- /* for large buffers */
- struct qlge_page_chunk pg_chunk;
- /* for small buffers */
- struct sk_buff *skb;
- } p;
- dma_addr_t dma_addr;
- /* address in ring where the buffer address is written for the device */
- __le64 *buf_ptr;
- u32 index;
-};
-
-/* buffer queue */
-struct qlge_bq {
- __le64 *base;
- dma_addr_t base_dma;
- __le64 *base_indirect;
- dma_addr_t base_indirect_dma;
- struct qlge_bq_desc *queue;
- /* prod_idx is the index of the first buffer that may NOT be used by
- * hw, ie. one after the last. Advanced by sw.
- */
- void __iomem *prod_idx_db_reg;
- /* next index where sw should refill a buffer for hw */
- u16 next_to_use;
- /* next index where sw expects to find a buffer filled by hw */
- u16 next_to_clean;
- enum {
- QLGE_SB, /* small buffer */
- QLGE_LB, /* large buffer */
- } type;
-};
-
-#define QLGE_BQ_CONTAINER(bq) \
-({ \
- typeof(bq) _bq = bq; \
- (struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \
- offsetof(struct rx_ring, sbq) : \
- offsetof(struct rx_ring, lbq))); \
-})
-
-/* Experience shows that the device ignores the low 4 bits of the tail index.
- * Refill up to a x16 multiple.
- */
-#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
-
-#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
-
-#define QLGE_BQ_HW_OWNED(bq) \
-({ \
- typeof(bq) _bq = bq; \
- QLGE_BQ_WRAP(QLGE_BQ_ALIGN((_bq)->next_to_use) - \
- (_bq)->next_to_clean); \
-})
-
-struct rx_ring {
- struct cqicb cqicb; /* The chip's completion queue init control block. */
-
- /* Completion queue elements. */
- void *cq_base;
- dma_addr_t cq_base_dma;
- u32 cq_size;
- u32 cq_len;
- u16 cq_id;
- __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
- dma_addr_t prod_idx_sh_reg_dma;
- void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
- u32 cnsmr_idx; /* current sw idx */
- struct qlge_net_rsp_iocb *curr_entry; /* next entry on queue */
- void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
-
- /* Large buffer queue elements. */
- struct qlge_bq lbq;
- struct qlge_page_chunk master_chunk;
- dma_addr_t chunk_dma_addr;
-
- /* Small buffer queue elements. */
- struct qlge_bq sbq;
-
- /* Misc. handler elements. */
- u32 irq; /* Which vector this ring is assigned. */
- u32 cpu; /* Which CPU this should run on. */
- struct delayed_work refill_work;
- char name[IFNAMSIZ + 5];
- struct napi_struct napi;
- u8 reserved;
- struct qlge_adapter *qdev;
- u64 rx_packets;
- u64 rx_multicast;
- u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
-};
-
-/*
- * RSS Initialization Control Block
- */
-struct hash_id {
- u8 value[4];
-};
-
-struct nic_stats {
- /*
- * These stats come from offset 200h to 278h
- * in the XGMAC register.
- */
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_mcast_pkts;
- u64 tx_bcast_pkts;
- u64 tx_ucast_pkts;
- u64 tx_ctl_pkts;
- u64 tx_pause_pkts;
- u64 tx_64_pkt;
- u64 tx_65_to_127_pkt;
- u64 tx_128_to_255_pkt;
- u64 tx_256_511_pkt;
- u64 tx_512_to_1023_pkt;
- u64 tx_1024_to_1518_pkt;
- u64 tx_1519_to_max_pkt;
- u64 tx_undersize_pkt;
- u64 tx_oversize_pkt;
-
- /*
- * These stats come from offset 300h to 3C8h
- * in the XGMAC register.
- */
- u64 rx_bytes;
- u64 rx_bytes_ok;
- u64 rx_pkts;
- u64 rx_pkts_ok;
- u64 rx_bcast_pkts;
- u64 rx_mcast_pkts;
- u64 rx_ucast_pkts;
- u64 rx_undersize_pkts;
- u64 rx_oversize_pkts;
- u64 rx_jabber_pkts;
- u64 rx_undersize_fcerr_pkts;
- u64 rx_drop_events;
- u64 rx_fcerr_pkts;
- u64 rx_align_err;
- u64 rx_symbol_err;
- u64 rx_mac_err;
- u64 rx_ctl_pkts;
- u64 rx_pause_pkts;
- u64 rx_64_pkts;
- u64 rx_65_to_127_pkts;
- u64 rx_128_255_pkts;
- u64 rx_256_511_pkts;
- u64 rx_512_to_1023_pkts;
- u64 rx_1024_to_1518_pkts;
- u64 rx_1519_to_max_pkts;
- u64 rx_len_err_pkts;
- /* Receive Mac Err stats */
- u64 rx_code_err;
- u64 rx_oversize_err;
- u64 rx_undersize_err;
- u64 rx_preamble_err;
- u64 rx_frame_len_err;
- u64 rx_crc_err;
- u64 rx_err_count;
- /*
- * These stats come from offset 500h to 5C8h
- * in the XGMAC register.
- */
- u64 tx_cbfc_pause_frames0;
- u64 tx_cbfc_pause_frames1;
- u64 tx_cbfc_pause_frames2;
- u64 tx_cbfc_pause_frames3;
- u64 tx_cbfc_pause_frames4;
- u64 tx_cbfc_pause_frames5;
- u64 tx_cbfc_pause_frames6;
- u64 tx_cbfc_pause_frames7;
- u64 rx_cbfc_pause_frames0;
- u64 rx_cbfc_pause_frames1;
- u64 rx_cbfc_pause_frames2;
- u64 rx_cbfc_pause_frames3;
- u64 rx_cbfc_pause_frames4;
- u64 rx_cbfc_pause_frames5;
- u64 rx_cbfc_pause_frames6;
- u64 rx_cbfc_pause_frames7;
- u64 rx_nic_fifo_drop;
-};
-
-/* Firmware coredump internal register address/length pairs. */
-enum {
- MPI_CORE_REGS_ADDR = 0x00030000,
- MPI_CORE_REGS_CNT = 127,
- MPI_CORE_SH_REGS_CNT = 16,
- TEST_REGS_ADDR = 0x00001000,
- TEST_REGS_CNT = 23,
- RMII_REGS_ADDR = 0x00001040,
- RMII_REGS_CNT = 64,
- FCMAC1_REGS_ADDR = 0x00001080,
- FCMAC2_REGS_ADDR = 0x000010c0,
- FCMAC_REGS_CNT = 64,
- FC1_MBX_REGS_ADDR = 0x00001100,
- FC2_MBX_REGS_ADDR = 0x00001240,
- FC_MBX_REGS_CNT = 64,
- IDE_REGS_ADDR = 0x00001140,
- IDE_REGS_CNT = 64,
- NIC1_MBX_REGS_ADDR = 0x00001180,
- NIC2_MBX_REGS_ADDR = 0x00001280,
- NIC_MBX_REGS_CNT = 64,
- SMBUS_REGS_ADDR = 0x00001200,
- SMBUS_REGS_CNT = 64,
- I2C_REGS_ADDR = 0x00001fc0,
- I2C_REGS_CNT = 64,
- MEMC_REGS_ADDR = 0x00003000,
- MEMC_REGS_CNT = 256,
- PBUS_REGS_ADDR = 0x00007c00,
- PBUS_REGS_CNT = 256,
- MDE_REGS_ADDR = 0x00010000,
- MDE_REGS_CNT = 6,
- CODE_RAM_ADDR = 0x00020000,
- CODE_RAM_CNT = 0x2000,
- MEMC_RAM_ADDR = 0x00100000,
- MEMC_RAM_CNT = 0x2000,
-};
-
-#define MPI_COREDUMP_COOKIE 0x5555aaaa
-struct mpi_coredump_global_header {
- u32 cookie;
- u8 id_string[16];
- u32 time_lo;
- u32 time_hi;
- u32 image_size;
- u32 header_size;
- u8 info[220];
-};
-
-struct mpi_coredump_segment_header {
- u32 cookie;
- u32 seg_num;
- u32 seg_size;
- u32 extra;
- u8 description[16];
-};
-
-/* Firmware coredump header segment numbers. */
-enum {
- CORE_SEG_NUM = 1,
- TEST_LOGIC_SEG_NUM = 2,
- RMII_SEG_NUM = 3,
- FCMAC1_SEG_NUM = 4,
- FCMAC2_SEG_NUM = 5,
- FC1_MBOX_SEG_NUM = 6,
- IDE_SEG_NUM = 7,
- NIC1_MBOX_SEG_NUM = 8,
- SMBUS_SEG_NUM = 9,
- FC2_MBOX_SEG_NUM = 10,
- NIC2_MBOX_SEG_NUM = 11,
- I2C_SEG_NUM = 12,
- MEMC_SEG_NUM = 13,
- PBUS_SEG_NUM = 14,
- MDE_SEG_NUM = 15,
- NIC1_CONTROL_SEG_NUM = 16,
- NIC2_CONTROL_SEG_NUM = 17,
- NIC1_XGMAC_SEG_NUM = 18,
- NIC2_XGMAC_SEG_NUM = 19,
- WCS_RAM_SEG_NUM = 20,
- MEMC_RAM_SEG_NUM = 21,
- XAUI_AN_SEG_NUM = 22,
- XAUI_HSS_PCS_SEG_NUM = 23,
- XFI_AN_SEG_NUM = 24,
- XFI_TRAIN_SEG_NUM = 25,
- XFI_HSS_PCS_SEG_NUM = 26,
- XFI_HSS_TX_SEG_NUM = 27,
- XFI_HSS_RX_SEG_NUM = 28,
- XFI_HSS_PLL_SEG_NUM = 29,
- MISC_NIC_INFO_SEG_NUM = 30,
- INTR_STATES_SEG_NUM = 31,
- CAM_ENTRIES_SEG_NUM = 32,
- ROUTING_WORDS_SEG_NUM = 33,
- ETS_SEG_NUM = 34,
- PROBE_DUMP_SEG_NUM = 35,
- ROUTING_INDEX_SEG_NUM = 36,
- MAC_PROTOCOL_SEG_NUM = 37,
- XAUI2_AN_SEG_NUM = 38,
- XAUI2_HSS_PCS_SEG_NUM = 39,
- XFI2_AN_SEG_NUM = 40,
- XFI2_TRAIN_SEG_NUM = 41,
- XFI2_HSS_PCS_SEG_NUM = 42,
- XFI2_HSS_TX_SEG_NUM = 43,
- XFI2_HSS_RX_SEG_NUM = 44,
- XFI2_HSS_PLL_SEG_NUM = 45,
- SEM_REGS_SEG_NUM = 50
-
-};
-
-/* There are 64 generic NIC registers. */
-#define NIC_REGS_DUMP_WORD_COUNT 64
-/* XGMAC word count. */
-#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
-/* Word counts for the SERDES blocks. */
-#define XG_SERDES_XAUI_AN_COUNT 14
-#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
-#define XG_SERDES_XFI_AN_COUNT 14
-#define XG_SERDES_XFI_TRAIN_COUNT 12
-#define XG_SERDES_XFI_HSS_PCS_COUNT 15
-#define XG_SERDES_XFI_HSS_TX_COUNT 32
-#define XG_SERDES_XFI_HSS_RX_COUNT 32
-#define XG_SERDES_XFI_HSS_PLL_COUNT 32
-
-/* There are 2 CNA ETS and 8 NIC ETS registers. */
-#define ETS_REGS_DUMP_WORD_COUNT 10
-
-/* Each probe mux entry stores the probe type plus 64 entries
- * that are each 64-bits in length. There are a total of
- * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
- */
-#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
-#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
- PRB_MX_ADDR_VALID_TOTAL)
-/* Each routing entry consists of 4 32-bit words.
- * They are route type, index, index word, and result.
- * There are 2 route blocks with 8 entries each and
- * 2 NIC blocks with 16 entries each.
- * The totol entries is 48 with 4 words each.
- */
-#define RT_IDX_DUMP_ENTRIES 48
-#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
-#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
- RT_IDX_DUMP_WORDS_PER_ENTRY)
-/* There are 10 address blocks in filter, each with
- * different entry counts and different word-count-per-entry.
- */
-#define MAC_ADDR_DUMP_ENTRIES \
- ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
- (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
- (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
- (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
-#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
-#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
- MAC_ADDR_DUMP_WORDS_PER_ENTRY)
-/* Maximum of 4 functions whose semaphore registeres are
- * in the coredump.
- */
-#define MAX_SEMAPHORE_FUNCTIONS 4
-/* Defines for access the MPI shadow registers. */
-#define RISC_124 0x0003007c
-#define RISC_127 0x0003007f
-#define SHADOW_OFFSET 0xb0000000
-#define SHADOW_REG_SHIFT 20
-
-struct qlge_nic_misc {
- u32 rx_ring_count;
- u32 tx_ring_count;
- u32 intr_count;
- u32 function;
-};
-
-struct qlge_reg_dump {
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[64];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct qlge_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_CPUS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
-
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[8 + 2];
-};
-
-struct qlge_mpi_coredump {
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 1 */
- struct mpi_coredump_segment_header core_regs_seg_hdr;
- u32 mpi_core_regs[MPI_CORE_REGS_CNT];
- u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
-
- /* segment 2 */
- struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
- u32 test_logic_regs[TEST_REGS_CNT];
-
- /* segment 3 */
- struct mpi_coredump_segment_header rmii_regs_seg_hdr;
- u32 rmii_regs[RMII_REGS_CNT];
-
- /* segment 4 */
- struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
- u32 fcmac1_regs[FCMAC_REGS_CNT];
-
- /* segment 5 */
- struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
- u32 fcmac2_regs[FCMAC_REGS_CNT];
-
- /* segment 6 */
- struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
- u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 7 */
- struct mpi_coredump_segment_header ide_regs_seg_hdr;
- u32 ide_regs[IDE_REGS_CNT];
-
- /* segment 8 */
- struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
- u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 9 */
- struct mpi_coredump_segment_header smbus_regs_seg_hdr;
- u32 smbus_regs[SMBUS_REGS_CNT];
-
- /* segment 10 */
- struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
- u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 11 */
- struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
- u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 12 */
- struct mpi_coredump_segment_header i2c_regs_seg_hdr;
- u32 i2c_regs[I2C_REGS_CNT];
- /* segment 13 */
- struct mpi_coredump_segment_header memc_regs_seg_hdr;
- u32 memc_regs[MEMC_REGS_CNT];
-
- /* segment 14 */
- struct mpi_coredump_segment_header pbus_regs_seg_hdr;
- u32 pbus_regs[PBUS_REGS_CNT];
-
- /* segment 15 */
- struct mpi_coredump_segment_header mde_regs_seg_hdr;
- u32 mde_regs[MDE_REGS_CNT];
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 17 */
- struct mpi_coredump_segment_header nic2_regs_seg_hdr;
- u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 18 */
- struct mpi_coredump_segment_header xgmac1_seg_hdr;
- u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 19 */
- struct mpi_coredump_segment_header xgmac2_seg_hdr;
- u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 20 */
- struct mpi_coredump_segment_header code_ram_seg_hdr;
- u32 code_ram[CODE_RAM_CNT];
-
- /* segment 21 */
- struct mpi_coredump_segment_header memc_ram_seg_hdr;
- u32 memc_ram[MEMC_RAM_CNT];
-
- /* segment 22 */
- struct mpi_coredump_segment_header xaui_an_hdr;
- u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 23 */
- struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
- u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 24 */
- struct mpi_coredump_segment_header xfi_an_hdr;
- u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 25 */
- struct mpi_coredump_segment_header xfi_train_hdr;
- u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 26 */
- struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
- u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 27 */
- struct mpi_coredump_segment_header xfi_hss_tx_hdr;
- u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 28 */
- struct mpi_coredump_segment_header xfi_hss_rx_hdr;
- u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 29 */
- struct mpi_coredump_segment_header xfi_hss_pll_hdr;
- u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct qlge_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_RX_RINGS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[ETS_REGS_DUMP_WORD_COUNT];
-
- /* segment 35 */
- struct mpi_coredump_segment_header probe_dump_seg_hdr;
- u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
-
- /* segment 36 */
- struct mpi_coredump_segment_header routing_reg_seg_hdr;
- u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
-
- /* segment 37 */
- struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
- u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
-
- /* segment 38 */
- struct mpi_coredump_segment_header xaui2_an_hdr;
- u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 39 */
- struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
- u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 40 */
- struct mpi_coredump_segment_header xfi2_an_hdr;
- u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 41 */
- struct mpi_coredump_segment_header xfi2_train_hdr;
- u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 42 */
- struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
- u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 43 */
- struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
- u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 44 */
- struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
- u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 45 */
- struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
- u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 50 */
- /* semaphore register for all 5 functions */
- struct mpi_coredump_segment_header sem_regs_seg_hdr;
- u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
-};
-
-/*
- * intr_context structure is used during initialization
- * to hook the interrupts. It is also used in a single
- * irq environment as a context to the ISR.
- */
-struct intr_context {
- struct qlge_adapter *qdev;
- u32 intr;
- u32 irq_mask; /* Mask of which rings the vector services. */
- u32 hooked;
- u32 intr_en_mask; /* value/mask used to enable this intr */
- u32 intr_dis_mask; /* value/mask used to disable this intr */
- u32 intr_read_mask; /* value/mask used to read this intr */
- char name[IFNAMSIZ * 2];
- irq_handler_t handler;
-};
-
-/* adapter flags definitions. */
-enum {
- QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
- QL_LEGACY_ENABLED = 1,
- QL_MSI_ENABLED = 2,
- QL_MSIX_ENABLED = 3,
- QL_DMA64 = 4,
- QL_PROMISCUOUS = 5,
- QL_ALLMULTI = 6,
- QL_PORT_CFG = 7,
- QL_CAM_RT_SET = 8,
- QL_SELFTEST = 9,
- QL_LB_LINK_UP = 10,
- QL_FRC_COREDUMP = 11,
- QL_EEH_FATAL = 12,
- QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
-};
-
-/* link_status bit definitions */
-enum {
- STS_LOOPBACK_MASK = 0x00000700,
- STS_LOOPBACK_PCS = 0x00000100,
- STS_LOOPBACK_HSS = 0x00000200,
- STS_LOOPBACK_EXT = 0x00000300,
- STS_PAUSE_MASK = 0x000000c0,
- STS_PAUSE_STD = 0x00000040,
- STS_PAUSE_PRI = 0x00000080,
- STS_SPEED_MASK = 0x00000038,
- STS_SPEED_100Mb = 0x00000000,
- STS_SPEED_1Gb = 0x00000008,
- STS_SPEED_10Gb = 0x00000010,
- STS_LINK_TYPE_MASK = 0x00000007,
- STS_LINK_TYPE_XFI = 0x00000001,
- STS_LINK_TYPE_XAUI = 0x00000002,
- STS_LINK_TYPE_XFI_BP = 0x00000003,
- STS_LINK_TYPE_XAUI_BP = 0x00000004,
- STS_LINK_TYPE_10GBASET = 0x00000005,
-};
-
-/* link_config bit definitions */
-enum {
- CFG_JUMBO_FRAME_SIZE = 0x00010000,
- CFG_PAUSE_MASK = 0x00000060,
- CFG_PAUSE_STD = 0x00000020,
- CFG_PAUSE_PRI = 0x00000040,
- CFG_DCBX = 0x00000010,
- CFG_LOOPBACK_MASK = 0x00000007,
- CFG_LOOPBACK_PCS = 0x00000002,
- CFG_LOOPBACK_HSS = 0x00000004,
- CFG_LOOPBACK_EXT = 0x00000006,
- CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
-};
-
-struct nic_operations {
- int (*get_flash)(struct qlge_adapter *qdev);
- int (*port_initialize)(struct qlge_adapter *qdev);
-};
-
-struct qlge_netdev_priv {
- struct qlge_adapter *qdev;
- struct net_device *ndev;
-};
-
-static inline
-struct qlge_adapter *netdev_to_qdev(struct net_device *ndev)
-{
- struct qlge_netdev_priv *ndev_priv = netdev_priv(ndev);
-
- return ndev_priv->qdev;
-}
-
-/*
- * The main Adapter structure definition.
- * This structure has all fields relevant to the hardware.
- */
-struct qlge_adapter {
- struct ricb ricb;
- unsigned long flags;
- u32 wol;
-
- struct nic_stats nic_stats;
-
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
- /* PCI Configuration information for this device */
- struct pci_dev *pdev;
- struct net_device *ndev; /* Parent NET device */
-
- struct devlink_health_reporter *reporter;
- /* Hardware information */
- u32 chip_rev_id;
- u32 fw_rev_id;
- u32 func; /* PCI function for this adapter */
- u32 alt_func; /* PCI function for alternate adapter */
- u32 port; /* Port number this adapter */
-
- spinlock_t adapter_lock;
- spinlock_t stats_lock;
-
- /* PCI Bus Relative Register Addresses */
- void __iomem *reg_base;
- void __iomem *doorbell_area;
- u32 doorbell_area_size;
-
- u32 msg_enable;
-
- /* Page for Shadow Registers */
- void *rx_ring_shadow_reg_area;
- dma_addr_t rx_ring_shadow_reg_dma;
- void *tx_ring_shadow_reg_area;
- dma_addr_t tx_ring_shadow_reg_dma;
-
- u32 mailbox_in;
- u32 mailbox_out;
- struct mbox_params idc_mbc;
- struct mutex mpi_mutex;
-
- int tx_ring_size;
- int rx_ring_size;
- u32 intr_count;
- struct msix_entry *msi_x_entry;
- struct intr_context intr_context[MAX_RX_RINGS];
-
- int tx_ring_count; /* One per online CPU. */
- u32 rss_ring_count; /* One per irq vector. */
- /*
- * rx_ring_count =
- * (CPU count * outbound completion rx_ring) +
- * (irq_vector_cnt * inbound (RSS) completion rx_ring)
- */
- int rx_ring_count;
- int ring_mem_size;
- void *ring_mem;
-
- struct rx_ring rx_ring[MAX_RX_RINGS];
- struct tx_ring tx_ring[MAX_TX_RINGS];
- unsigned int lbq_buf_order;
- u32 lbq_buf_size;
-
- int rx_csum;
- u32 default_rx_queue;
-
- u16 rx_coalesce_usecs; /* cqicb->int_delay */
- u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
- u16 tx_coalesce_usecs; /* cqicb->int_delay */
- u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
-
- u32 xg_sem_mask;
- u32 port_link_up;
- u32 port_init;
- u32 link_status;
- struct qlge_mpi_coredump *mpi_coredump;
- u32 link_config;
- u32 led_config;
- u32 max_frame_size;
-
- union flash_params flash;
-
- struct workqueue_struct *workqueue;
- struct delayed_work asic_reset_work;
- struct delayed_work mpi_reset_work;
- struct delayed_work mpi_work;
- struct delayed_work mpi_port_cfg_work;
- struct delayed_work mpi_idc_work;
- struct completion ide_completion;
- const struct nic_operations *nic_ops;
- u16 device_id;
- struct timer_list timer;
- atomic_t lb_count;
- /* Keep local copy of current mac address. */
- char current_mac_addr[ETH_ALEN];
-};
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline u32 qlge_read32(const struct qlge_adapter *qdev, int reg)
-{
- return readl(qdev->reg_base + reg);
-}
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline void qlge_write32(const struct qlge_adapter *qdev, int reg, u32 val)
-{
- writel(val, qdev->reg_base + reg);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- */
-static inline void qlge_write_db_reg(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- * Caller has to guarantee ordering.
- */
-static inline void qlge_write_db_reg_relaxed(u32 val, void __iomem *addr)
-{
- writel_relaxed(val, addr);
-}
-
-/*
- * Shadow Registers:
- * Outbound queues have a consumer index that is maintained by the chip.
- * Inbound queues have a producer index that is maintained by the chip.
- * For lower overhead, these registers are "shadowed" to host memory
- * which allows the device driver to track the queue progress without
- * PCI reads. When an entry is placed on an inbound queue, the chip will
- * update the relevant index register and then copy the value to the
- * shadow register in host memory.
- */
-static inline u32 qlge_read_sh_reg(__le32 *addr)
-{
- u32 reg;
-
- reg = le32_to_cpu(*addr);
- rmb();
- return reg;
-}
-
-extern char qlge_driver_name[];
-extern const char qlge_driver_version[];
-extern const struct ethtool_ops qlge_ethtool_ops;
-
-int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask);
-void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask);
-int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
-int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
- u32 *value);
-int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value);
-int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
-void qlge_queue_fw_error(struct qlge_adapter *qdev);
-void qlge_mpi_work(struct work_struct *work);
-void qlge_mpi_reset_work(struct work_struct *work);
-int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 ebit);
-void qlge_queue_asic_error(struct qlge_adapter *qdev);
-void qlge_set_ethtool_ops(struct net_device *ndev);
-int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data);
-void qlge_mpi_idc_work(struct work_struct *work);
-void qlge_mpi_port_cfg_work(struct work_struct *work);
-int qlge_mb_get_fw_state(struct qlge_adapter *qdev);
-int qlge_cam_route_initialize(struct qlge_adapter *qdev);
-int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
-int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data);
-int qlge_unpause_mpi_risc(struct qlge_adapter *qdev);
-int qlge_pause_mpi_risc(struct qlge_adapter *qdev);
-int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev);
-int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev);
-int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf, u32 ram_addr,
- int word_count);
-int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump);
-int qlge_mb_about_fw(struct qlge_adapter *qdev);
-int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol);
-int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol);
-int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config);
-int qlge_mb_get_led_cfg(struct qlge_adapter *qdev);
-void qlge_link_on(struct qlge_adapter *qdev);
-void qlge_link_off(struct qlge_adapter *qdev);
-int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control);
-int qlge_mb_get_port_cfg(struct qlge_adapter *qdev);
-int qlge_mb_set_port_cfg(struct qlge_adapter *qdev);
-int qlge_wait_fifo_empty(struct qlge_adapter *qdev);
-void qlge_get_dump(struct qlge_adapter *qdev, void *buff);
-netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev);
-void qlge_check_lb_frame(struct qlge_adapter *qdev, struct sk_buff *skb);
-int qlge_own_firmware(struct qlge_adapter *qdev);
-int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-
-#endif /* _QLGE_H_ */
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
deleted file mode 100644
index c7e865f515cf..000000000000
--- a/drivers/staging/qlge/qlge_dbg.c
+++ /dev/null
@@ -1,1311 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-
-#include "qlge.h"
-
-/* Read a NIC register from the alternate function. */
-static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev,
- u32 reg)
-{
- u32 register_to_read;
- u32 reg_val;
- unsigned int status = 0;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
- status = qlge_read_mpi_reg(qdev, register_to_read, &reg_val);
- if (status != 0)
- return 0xffffffff;
-
- return reg_val;
-}
-
-/* Write a NIC register from the alternate function. */
-static int qlge_write_other_func_reg(struct qlge_adapter *qdev,
- u32 reg, u32 reg_val)
-{
- u32 register_to_read;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
-
- return qlge_write_mpi_reg(qdev, register_to_read, reg_val);
-}
-
-static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
-{
- u32 temp;
- int count;
-
- for (count = 10; count; count--) {
- temp = qlge_read_other_func_reg(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit)
- return -1;
- else if (temp & bit)
- return 0;
- mdelay(10);
- }
- return -1;
-}
-
-static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- qlge_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = qlge_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
-exit:
- return status;
-}
-
-/* Read out the SERDES registers */
-static int qlge_read_serdes_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- qlge_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = qlge_read32(qdev, XG_SERDES_DATA);
-exit:
- return status;
-}
-
-static void qlge_get_both_serdes(struct qlge_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- bool direct_valid, bool indirect_valid)
-{
- unsigned int status;
-
- status = 1;
- if (direct_valid)
- status = qlge_read_serdes_reg(qdev, addr, direct_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *direct_ptr = 0xDEADBEEF;
-
- status = 1;
- if (indirect_valid)
- status = qlge_read_other_func_serdes_reg(qdev, addr,
- indirect_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *indirect_ptr = 0xDEADBEEF;
-}
-
-static int qlge_get_serdes_regs(struct qlge_adapter *qdev,
- struct qlge_mpi_coredump *mpi_coredump)
-{
- int status;
- bool xfi_direct_valid = false, xfi_indirect_valid = false;
- bool xaui_direct_valid = true, xaui_indirect_valid = true;
- unsigned int i;
- u32 *direct_ptr, temp;
- u32 *indirect_ptr;
-
- /* The XAUI needs to be read out per port */
- status = qlge_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START,
- &temp);
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = false;
-
- status = qlge_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
-
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = false;
-
- /*
- * XFI register is shared so only need to read one
- * functions and then check the bits.
- */
- status = qlge_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
- if (status)
- temp = 0;
-
- if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
- XG_SERDES_ADDR_XFI1_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = true;
- else
- xfi_direct_valid = true;
- }
- if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
- XG_SERDES_ADDR_XFI2_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = true;
- else
- xfi_indirect_valid = true;
- }
-
- /* Get XAUI_AN register block. */
- if (qdev->func & 1) {
- /* Function 2 is direct */
- direct_ptr = mpi_coredump->serdes2_xaui_an;
- indirect_ptr = mpi_coredump->serdes_xaui_an;
- } else {
- /* Function 1 is direct */
- direct_ptr = mpi_coredump->serdes_xaui_an;
- indirect_ptr = mpi_coredump->serdes2_xaui_an;
- }
-
- for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- }
-
- for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_XFI_AN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_an;
- indirect_ptr = mpi_coredump->serdes_xfi_an;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_an;
- indirect_ptr = mpi_coredump->serdes2_xfi_an;
- }
-
- for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_TRAIN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes_xfi_train;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_train;
- }
-
- for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- }
-
- for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_TX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_tx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- }
- for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_RX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_rx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- }
-
- for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_PLL register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- }
- for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
- qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
- return 0;
-}
-
-static int qlge_read_other_func_xgmac_reg(struct qlge_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status = 0;
-
- /* wait for reg to come ready */
- status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* set up for reg read */
- qlge_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
-
- /* wait for reg to come ready */
- status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* get the data */
- *data = qlge_read_other_func_reg(qdev, XGMAC_DATA / 4);
-exit:
- return status;
-}
-
-/* Read the 400 xgmac control/statistics registers
- * skipping unused locations.
- */
-static int qlge_get_xgmac_regs(struct qlge_adapter *qdev, u32 *buf,
- unsigned int other_function)
-{
- int status = 0;
- int i;
-
- for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
- /* We're reading 400 xgmac registers, but we filter out
- * several locations that are non-responsive to reads.
- */
- if ((i == 0x00000114) || (i == 0x00000118) ||
- (i == 0x0000013c) || (i == 0x00000140) ||
- (i > 0x00000150 && i < 0x000001fc) ||
- (i > 0x00000278 && i < 0x000002a0) ||
- (i > 0x000002c0 && i < 0x000002cf) ||
- (i > 0x000002dc && i < 0x000002f0) ||
- (i > 0x000003c8 && i < 0x00000400) ||
- (i > 0x00000400 && i < 0x00000410) ||
- (i > 0x00000410 && i < 0x00000420) ||
- (i > 0x00000420 && i < 0x00000430) ||
- (i > 0x00000430 && i < 0x00000440) ||
- (i > 0x00000440 && i < 0x00000450) ||
- (i > 0x00000450 && i < 0x00000500) ||
- (i > 0x0000054c && i < 0x00000568) ||
- (i > 0x000005c8 && i < 0x00000600)) {
- if (other_function)
- status = qlge_read_other_func_xgmac_reg(qdev, i, buf);
- else
- status = qlge_read_xgmac_reg(qdev, i, buf);
-
- if (status)
- *buf = 0xdeadbeef;
- break;
- }
- }
- return status;
-}
-
-static int qlge_get_ets_regs(struct qlge_adapter *qdev, u32 *buf)
-{
- int i;
-
- for (i = 0; i < 8; i++, buf++) {
- qlge_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
- *buf = qlge_read32(qdev, NIC_ETS);
- }
-
- for (i = 0; i < 2; i++, buf++) {
- qlge_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
- *buf = qlge_read32(qdev, CNA_ETS);
- }
-
- return 0;
-}
-
-static void qlge_get_intr_states(struct qlge_adapter *qdev, u32 *buf)
-{
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
- qlge_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
- *buf = qlge_read32(qdev, INTR_EN);
- }
-}
-
-static int qlge_get_cam_entries(struct qlge_adapter *qdev, u32 *buf)
-{
- int i, status;
- u32 value[3];
-
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = qlge_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower MAC address */
- *buf++ = value[1]; /* upper MAC address */
- *buf++ = value[2]; /* output */
- }
- for (i = 0; i < 32; i++) {
- status = qlge_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC,
- i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower Mcast address */
- *buf++ = value[1]; /* upper Mcast address */
- }
-err:
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static int qlge_get_routing_entries(struct qlge_adapter *qdev, u32 *buf)
-{
- int status;
- u32 value, i;
-
- status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = qlge_get_routing_reg(qdev, i, &value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of routing index register\n");
- goto err;
- } else {
- *buf++ = value;
- }
- }
-err:
- qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read the MPI Processor shadow registers */
-static int qlge_get_mpi_shadow_regs(struct qlge_adapter *qdev, u32 *buf)
-{
- u32 i;
- int status;
-
- for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = qlge_write_mpi_reg(qdev,
- RISC_124,
- (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
- if (status)
- goto end;
- status = qlge_read_mpi_reg(qdev, RISC_127, buf);
- if (status)
- goto end;
- }
-end:
- return status;
-}
-
-/* Read the MPI Processor core registers */
-static int qlge_get_mpi_regs(struct qlge_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
-{
- int i, status = 0;
-
- for (i = 0; i < count; i++, buf++) {
- status = qlge_read_mpi_reg(qdev, offset + i, buf);
- if (status)
- return status;
- }
- return status;
-}
-
-/* Read the ASIC probe dump */
-static unsigned int *qlge_get_probe(struct qlge_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
-{
- u32 module, mux_sel, probe, lo_val, hi_val;
-
- for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
- if (!((valid >> module) & 1))
- continue;
- for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
- probe = clock
- | PRB_MX_ADDR_ARE
- | mux_sel
- | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
- qlge_write32(qdev, PRB_MX_ADDR, probe);
- lo_val = qlge_read32(qdev, PRB_MX_DATA);
- if (mux_sel == 0) {
- *buf = probe;
- buf++;
- }
- probe |= PRB_MX_ADDR_UP;
- qlge_write32(qdev, PRB_MX_ADDR, probe);
- hi_val = qlge_read32(qdev, PRB_MX_DATA);
- *buf = lo_val;
- buf++;
- *buf = hi_val;
- buf++;
- }
- }
- return buf;
-}
-
-static int qlge_get_probe_dump(struct qlge_adapter *qdev, unsigned int *buf)
-{
- /* First we have to enable the probe mux */
- qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
- buf = qlge_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
- buf = qlge_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
- buf = qlge_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
- buf = qlge_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
- return 0;
-}
-
-/* Read out the routing index registers */
-static int qlge_get_routing_index_registers(struct qlge_adapter *qdev, u32 *buf)
-{
- int status;
- u32 type, index, index_max;
- u32 result_index;
- u32 result_data;
- u32 val;
-
- status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (type = 0; type < 4; type++) {
- if (type < 2)
- index_max = 8;
- else
- index_max = 16;
- for (index = 0; index < index_max; index++) {
- val = RT_IDX_RS
- | (type << RT_IDX_TYPE_SHIFT)
- | (index << RT_IDX_IDX_SHIFT);
- qlge_write32(qdev, RT_IDX, val);
- result_index = 0;
- while ((result_index & RT_IDX_MR) == 0)
- result_index = qlge_read32(qdev, RT_IDX);
- result_data = qlge_read32(qdev, RT_DATA);
- *buf = type;
- buf++;
- *buf = index;
- buf++;
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read out the MAC protocol registers */
-static void qlge_get_mac_protocol_registers(struct qlge_adapter *qdev, u32 *buf)
-{
- u32 result_index, result_data;
- u32 type;
- u32 index;
- u32 offset;
- u32 val;
- u32 initial_val = MAC_ADDR_RS;
- u32 max_index;
- u32 max_offset;
-
- for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
- switch (type) {
- case 0: /* CAM */
- initial_val |= MAC_ADDR_ADR;
- max_index = MAC_ADDR_MAX_CAM_ENTRIES;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 1: /* Multicast MAC Address */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 2: /* VLAN filter mask */
- case 3: /* MC filter mask */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 4: /* FC MAC addresses */
- max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
- break;
- case 5: /* Mgmt MAC addresses */
- max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
- break;
- case 6: /* Mgmt VLAN addresses */
- max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
- break;
- case 7: /* Mgmt IPv4 address */
- max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
- break;
- case 8: /* Mgmt IPv6 address */
- max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
- break;
- case 9: /* Mgmt TCP/UDP Dest port */
- max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
- break;
- default:
- netdev_err(qdev->ndev, "Bad type!!! 0x%08x\n", type);
- max_index = 0;
- max_offset = 0;
- break;
- }
- for (index = 0; index < max_index; index++) {
- for (offset = 0; offset < max_offset; offset++) {
- val = initial_val
- | (type << MAC_ADDR_TYPE_SHIFT)
- | (index << MAC_ADDR_IDX_SHIFT)
- | (offset);
- qlge_write32(qdev, MAC_ADDR_IDX, val);
- result_index = 0;
- while ((result_index & MAC_ADDR_MR) == 0) {
- result_index = qlge_read32(qdev,
- MAC_ADDR_IDX);
- }
- result_data = qlge_read32(qdev, MAC_ADDR_DATA);
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- }
-}
-
-static void qlge_get_sem_registers(struct qlge_adapter *qdev, u32 *buf)
-{
- u32 func_num, reg, reg_val;
- int status;
-
- for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
- reg = MPI_NIC_REG_BLOCK
- | (func_num << MPI_NIC_FUNCTION_SHIFT)
- | (SEM / 4);
- status = qlge_read_mpi_reg(qdev, reg, &reg_val);
- *buf = reg_val;
- /* if the read failed then dead fill the element. */
- if (!status)
- *buf = 0xdeadbeef;
- buf++;
- }
-}
-
-/* Create a coredump segment header */
-static void qlge_build_coredump_seg_header(struct mpi_coredump_segment_header *seg_hdr,
- u32 seg_number, u32 seg_size, u8 *desc)
-{
- memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
- seg_hdr->cookie = MPI_COREDUMP_COOKIE;
- seg_hdr->seg_num = seg_number;
- seg_hdr->seg_size = seg_size;
- strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
-}
-
-/*
- * This function should be called when a coredump / probedump
- * is to be extracted from the HBA. It is assumed there is a
- * qdev structure that contains the base address of the register
- * space for this function as well as a coredump structure that
- * will contain the dump.
- */
-int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump)
-{
- int status;
- int i;
-
- if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
- return -EINVAL;
- }
-
- /* Try to get the spinlock, but dont worry if
- * it isn't available. If the firmware died it
- * might be holding the sem.
- */
- qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
-
- status = qlge_pause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC pause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Insert the global header */
- memset(&mpi_coredump->mpi_global_header, 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.header_size =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.image_size =
- sizeof(struct qlge_mpi_coredump);
- strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.id_string));
-
- /* Get generic NIC reg dump */
- qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
-
- /* Get XGMac registers. (Segment 18, Rev C. step 21) */
- qlge_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
-
- if (qdev->func & 1) {
- /* Odd means our function is NIC 2 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- qlge_read32(qdev, i * sizeof(u32));
-
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
- qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
- } else {
- /* Even means our function is NIC 1 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- qlge_read32(qdev, i * sizeof(u32));
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
- qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
- }
-
- /* Rev C. Step 20a */
- qlge_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_an),
- "XAUI AN Registers");
-
- /* Rev C. Step 20b */
- qlge_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_hss_pcs),
- "XAUI HSS PCS Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_an),
- "XFI AN Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_train),
- "XFI TRAIN Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pcs),
- "XFI HSS PCS Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_tx),
- "XFI HSS TX Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_rx),
- "XFI HSS RX Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pll),
- "XFI HSS PLL Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_an),
- "XAUI2 AN Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
- "XAUI2 HSS PCS Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_an),
- "XFI2 AN Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_train),
- "XFI2 TRAIN Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
- "XFI2 HSS PCS Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_tx),
- "XFI2 HSS TX Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_rx),
- "XFI2 HSS RX Registers");
-
- qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pll),
- "XFI2 HSS PLL Registers");
-
- status = qlge_get_serdes_regs(qdev, mpi_coredump);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- qlge_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
- sizeof(mpi_coredump->core_regs_seg_hdr) +
- sizeof(mpi_coredump->mpi_core_regs) +
- sizeof(mpi_coredump->mpi_core_sh_regs),
- "Core Registers");
-
- /* Get the MPI Core Registers */
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
- MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
- if (status)
- goto err;
- /* Get the 16 MPI shadow registers */
- status = qlge_get_mpi_shadow_regs(qdev,
- &mpi_coredump->mpi_core_sh_regs[0]);
- if (status)
- goto err;
-
- /* Get the Test Logic Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->test_logic_regs),
- "Test Logic Regs");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
- TEST_REGS_ADDR, TEST_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the RMII Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->rmii_regs),
- "RMII Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
- RMII_REGS_ADDR, RMII_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC1 Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac1_regs),
- "FCMAC1 Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
- FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC2 Registers */
-
- qlge_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac2_regs),
- "FCMAC2 Registers");
-
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
- FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC1 MBX Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc1_mbx_regs),
- "FC1 MBox Regs");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
- FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the IDE Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ide_regs),
- "IDE Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
- IDE_REGS_ADDR, IDE_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC1 MBX Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic1_mbx_regs),
- "NIC1 MBox Regs");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
- NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the SMBus Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->smbus_regs),
- "SMBus Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
- SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC2 MBX Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc2_mbx_regs),
- "FC2 MBox Regs");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
- FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC2 MBX Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic2_mbx_regs),
- "NIC2 MBox Regs");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
- NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the I2C Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->i2c_regs),
- "I2C Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
- I2C_REGS_ADDR, I2C_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MEMC Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_regs),
- "MEMC Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
- MEMC_REGS_ADDR, MEMC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the PBus Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->pbus_regs),
- "PBUS Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
- PBUS_REGS_ADDR, PBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MDE Registers */
- qlge_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mde_regs),
- "MDE Registers");
- status = qlge_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
- MDE_REGS_ADDR, MDE_REGS_CNT);
- if (status)
- goto err;
-
- qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 31 */
- /* Get indexed register values. */
- qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- goto err;
-
- qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = qlge_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- goto err;
-
- /* Segment 34 (Rev C. step 23) */
- qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- goto err;
-
- qlge_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->probe_dump),
- "Probe Dump");
- qlge_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
-
- qlge_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->routing_regs),
- "Routing Regs");
- status = qlge_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
- if (status)
- goto err;
-
- qlge_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mac_prot_regs),
- "MAC Prot Regs");
- qlge_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
-
- /* Get the semaphore registers for all 5 functions */
- qlge_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->sem_regs), "Sem Registers");
-
- qlge_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
-
- /* Prevent the mpi restarting while we dump the memory.*/
- qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
-
- /* clear the pause */
- status = qlge_unpause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC unpause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Reset the RISC so we can dump RAM */
- status = qlge_hard_reset_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC reset. Status = 0x%.08x\n", status);
- goto err;
- }
-
- qlge_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->code_ram),
- "WCS RAM");
- status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of CODE RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- /* Insert the segment header */
- qlge_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_ram),
- "MEMC RAM");
- status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-err:
- qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-}
-
-static void qlge_get_core_dump(struct qlge_adapter *qdev)
-{
- if (!qlge_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- if (!netif_running(qdev->ndev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Force Coredump can only be done from interface that is up\n");
- return;
- }
- qlge_queue_fw_error(qdev);
-}
-
-static void qlge_gen_reg_dump(struct qlge_adapter *qdev,
- struct qlge_reg_dump *mpi_coredump)
-{
- int i, status;
-
- memset(&mpi_coredump->mpi_global_header, 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.header_size =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.image_size =
- sizeof(struct qlge_reg_dump);
- strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.id_string));
-
- /* segment 16 */
- qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 16, Rev C. Step 18 */
- qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_regs),
- "NIC Registers");
- /* Get generic reg dump */
- for (i = 0; i < 64; i++)
- mpi_coredump->nic_regs[i] = qlge_read32(qdev, i * sizeof(u32));
-
- /* Segment 31 */
- /* Get indexed register values. */
- qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- return;
-
- qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = qlge_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- return;
-
- /* Segment 34 (Rev C. step 23) */
- qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- return;
-}
-
-void qlge_get_dump(struct qlge_adapter *qdev, void *buff)
-{
- /*
- * If the dump has already been taken and is stored
- * in our internal buffer and if force dump is set then
- * just start the spool to dump it to the log file
- * and also, take a snapshot of the general regs
- * to the user's buffer or else take complete dump
- * to the user's buffer if force is not set.
- */
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
- if (!qlge_core_dump(qdev, buff))
- qlge_soft_reset_mpi_risc(qdev);
- else
- netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
- } else {
- qlge_gen_reg_dump(qdev, buff);
- qlge_get_core_dump(qdev);
- }
-}
diff --git a/drivers/staging/qlge/qlge_devlink.c b/drivers/staging/qlge/qlge_devlink.c
deleted file mode 100644
index 0b19363ca2e9..000000000000
--- a/drivers/staging/qlge/qlge_devlink.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-#include "qlge.h"
-#include "qlge_devlink.h"
-
-static void qlge_fill_seg_(struct devlink_fmsg *fmsg,
- struct mpi_coredump_segment_header *seg_header,
- u32 *reg_data)
-{
- int regs_num = (seg_header->seg_size
- - sizeof(struct mpi_coredump_segment_header)) / sizeof(u32);
- int i;
-
- devlink_fmsg_pair_nest_start(fmsg, seg_header->description);
- devlink_fmsg_obj_nest_start(fmsg);
- devlink_fmsg_u32_pair_put(fmsg, "segment", seg_header->seg_num);
- devlink_fmsg_arr_pair_nest_start(fmsg, "values");
- for (i = 0; i < regs_num; i++) {
- devlink_fmsg_u32_put(fmsg, *reg_data);
- reg_data++;
- }
- devlink_fmsg_obj_nest_end(fmsg);
- devlink_fmsg_arr_pair_nest_end(fmsg);
- devlink_fmsg_pair_nest_end(fmsg);
-}
-
-#define FILL_SEG(seg_hdr, seg_regs) \
- qlge_fill_seg_(fmsg, &dump->seg_hdr, dump->seg_regs)
-
-static int qlge_reporter_coredump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx,
- struct netlink_ext_ack *extack)
-{
- int err = 0;
-
- struct qlge_adapter *qdev = devlink_health_reporter_priv(reporter);
- struct qlge_mpi_coredump *dump;
- wait_queue_head_t wait;
-
- if (!netif_running(qdev->ndev))
- return 0;
-
- if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
- if (qlge_own_firmware(qdev)) {
- qlge_queue_fw_error(qdev);
- init_waitqueue_head(&wait);
- wait_event_timeout(wait, 0, 5 * HZ);
- } else {
- netif_err(qdev, ifup, qdev->ndev,
- "Force Coredump failed because this NIC function doesn't own the firmware\n");
- return -EPERM;
- }
- }
-
- dump = kvmalloc(sizeof(*dump), GFP_KERNEL);
- if (!dump)
- return -ENOMEM;
-
- err = qlge_core_dump(qdev, dump);
- if (err) {
- kvfree(dump);
- return err;
- }
-
- qlge_soft_reset_mpi_risc(qdev);
-
- FILL_SEG(core_regs_seg_hdr, mpi_core_regs);
- FILL_SEG(test_logic_regs_seg_hdr, test_logic_regs);
- FILL_SEG(rmii_regs_seg_hdr, rmii_regs);
- FILL_SEG(fcmac1_regs_seg_hdr, fcmac1_regs);
- FILL_SEG(fcmac2_regs_seg_hdr, fcmac2_regs);
- FILL_SEG(fc1_mbx_regs_seg_hdr, fc1_mbx_regs);
- FILL_SEG(ide_regs_seg_hdr, ide_regs);
- FILL_SEG(nic1_mbx_regs_seg_hdr, nic1_mbx_regs);
- FILL_SEG(smbus_regs_seg_hdr, smbus_regs);
- FILL_SEG(fc2_mbx_regs_seg_hdr, fc2_mbx_regs);
- FILL_SEG(nic2_mbx_regs_seg_hdr, nic2_mbx_regs);
- FILL_SEG(i2c_regs_seg_hdr, i2c_regs);
- FILL_SEG(memc_regs_seg_hdr, memc_regs);
- FILL_SEG(pbus_regs_seg_hdr, pbus_regs);
- FILL_SEG(mde_regs_seg_hdr, mde_regs);
- FILL_SEG(nic_regs_seg_hdr, nic_regs);
- FILL_SEG(nic2_regs_seg_hdr, nic2_regs);
- FILL_SEG(xgmac1_seg_hdr, xgmac1);
- FILL_SEG(xgmac2_seg_hdr, xgmac2);
- FILL_SEG(code_ram_seg_hdr, code_ram);
- FILL_SEG(memc_ram_seg_hdr, memc_ram);
- FILL_SEG(xaui_an_hdr, serdes_xaui_an);
- FILL_SEG(xaui_hss_pcs_hdr, serdes_xaui_hss_pcs);
- FILL_SEG(xfi_an_hdr, serdes_xfi_an);
- FILL_SEG(xfi_train_hdr, serdes_xfi_train);
- FILL_SEG(xfi_hss_pcs_hdr, serdes_xfi_hss_pcs);
- FILL_SEG(xfi_hss_tx_hdr, serdes_xfi_hss_tx);
- FILL_SEG(xfi_hss_rx_hdr, serdes_xfi_hss_rx);
- FILL_SEG(xfi_hss_pll_hdr, serdes_xfi_hss_pll);
- qlge_fill_seg_(fmsg, &dump->misc_nic_seg_hdr,
- (u32 *)&dump->misc_nic_info);
- FILL_SEG(intr_states_seg_hdr, intr_states);
- FILL_SEG(cam_entries_seg_hdr, cam_entries);
- FILL_SEG(nic_routing_words_seg_hdr, nic_routing_words);
- FILL_SEG(ets_seg_hdr, ets);
- FILL_SEG(probe_dump_seg_hdr, probe_dump);
- FILL_SEG(routing_reg_seg_hdr, routing_regs);
- FILL_SEG(mac_prot_reg_seg_hdr, mac_prot_regs);
- FILL_SEG(xaui2_an_hdr, serdes2_xaui_an);
- FILL_SEG(xaui2_hss_pcs_hdr, serdes2_xaui_hss_pcs);
- FILL_SEG(xfi2_an_hdr, serdes2_xfi_an);
- FILL_SEG(xfi2_train_hdr, serdes2_xfi_train);
- FILL_SEG(xfi2_hss_pcs_hdr, serdes2_xfi_hss_pcs);
- FILL_SEG(xfi2_hss_tx_hdr, serdes2_xfi_hss_tx);
- FILL_SEG(xfi2_hss_rx_hdr, serdes2_xfi_hss_rx);
- FILL_SEG(xfi2_hss_pll_hdr, serdes2_xfi_hss_pll);
- FILL_SEG(sem_regs_seg_hdr, sem_regs);
-
- kvfree(dump);
- return 0;
-}
-
-static const struct devlink_health_reporter_ops qlge_reporter_ops = {
- .name = "coredump",
- .dump = qlge_reporter_coredump,
-};
-
-long qlge_health_create_reporters(struct qlge_adapter *priv)
-{
- struct devlink *devlink;
- long err = 0;
-
- devlink = priv_to_devlink(priv);
- priv->reporter =
- devlink_health_reporter_create(devlink, &qlge_reporter_ops,
- 0, priv);
- if (IS_ERR(priv->reporter)) {
- err = PTR_ERR(priv->reporter);
- netdev_warn(priv->ndev,
- "Failed to create reporter, err = %ld\n",
- err);
- }
- return err;
-}
diff --git a/drivers/staging/qlge/qlge_devlink.h b/drivers/staging/qlge/qlge_devlink.h
deleted file mode 100644
index 94538e923f2f..000000000000
--- a/drivers/staging/qlge/qlge_devlink.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef QLGE_DEVLINK_H
-#define QLGE_DEVLINK_H
-
-#include <net/devlink.h>
-
-long qlge_health_create_reporters(struct qlge_adapter *priv);
-
-#endif /* QLGE_DEVLINK_H */
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
deleted file mode 100644
index 12efcd1057ba..000000000000
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ /dev/null
@@ -1,746 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-
-#include "qlge.h"
-
-struct qlge_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define QL_SIZEOF(m) sizeof_field(struct qlge_adapter, m)
-#define QL_OFF(m) offsetof(struct qlge_adapter, m)
-
-static const struct qlge_stats qlge_gstrings_stats[] = {
- {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
- {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
- {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
- QL_OFF(nic_stats.tx_mcast_pkts)},
- {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
- QL_OFF(nic_stats.tx_bcast_pkts)},
- {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
- QL_OFF(nic_stats.tx_ucast_pkts)},
- {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
- QL_OFF(nic_stats.tx_ctl_pkts)},
- {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
- QL_OFF(nic_stats.tx_pause_pkts)},
- {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
- QL_OFF(nic_stats.tx_64_pkt)},
- {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
- QL_OFF(nic_stats.tx_65_to_127_pkt)},
- {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
- QL_OFF(nic_stats.tx_128_to_255_pkt)},
- {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
- QL_OFF(nic_stats.tx_256_511_pkt)},
- {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
- QL_OFF(nic_stats.tx_512_to_1023_pkt)},
- {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
- QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
- {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
- QL_OFF(nic_stats.tx_1519_to_max_pkt)},
- {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
- QL_OFF(nic_stats.tx_undersize_pkt)},
- {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
- QL_OFF(nic_stats.tx_oversize_pkt)},
- {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
- {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
- QL_OFF(nic_stats.rx_bytes_ok)},
- {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
- {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
- QL_OFF(nic_stats.rx_pkts_ok)},
- {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
- QL_OFF(nic_stats.rx_bcast_pkts)},
- {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
- QL_OFF(nic_stats.rx_mcast_pkts)},
- {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
- QL_OFF(nic_stats.rx_ucast_pkts)},
- {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
- QL_OFF(nic_stats.rx_undersize_pkts)},
- {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
- QL_OFF(nic_stats.rx_oversize_pkts)},
- {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
- QL_OFF(nic_stats.rx_jabber_pkts)},
- {"rx_undersize_fcerr_pkts",
- QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
- QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
- {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
- QL_OFF(nic_stats.rx_drop_events)},
- {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
- QL_OFF(nic_stats.rx_fcerr_pkts)},
- {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
- QL_OFF(nic_stats.rx_align_err)},
- {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
- QL_OFF(nic_stats.rx_symbol_err)},
- {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
- QL_OFF(nic_stats.rx_mac_err)},
- {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
- QL_OFF(nic_stats.rx_ctl_pkts)},
- {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
- QL_OFF(nic_stats.rx_pause_pkts)},
- {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
- QL_OFF(nic_stats.rx_64_pkts)},
- {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
- QL_OFF(nic_stats.rx_65_to_127_pkts)},
- {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
- QL_OFF(nic_stats.rx_128_255_pkts)},
- {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
- QL_OFF(nic_stats.rx_256_511_pkts)},
- {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
- QL_OFF(nic_stats.rx_512_to_1023_pkts)},
- {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
- QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
- {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
- QL_OFF(nic_stats.rx_1519_to_max_pkts)},
- {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
- QL_OFF(nic_stats.rx_len_err_pkts)},
- {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
- QL_OFF(nic_stats.rx_code_err)},
- {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
- QL_OFF(nic_stats.rx_oversize_err)},
- {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
- QL_OFF(nic_stats.rx_undersize_err)},
- {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
- QL_OFF(nic_stats.rx_preamble_err)},
- {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
- QL_OFF(nic_stats.rx_frame_len_err)},
- {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
- QL_OFF(nic_stats.rx_crc_err)},
- {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
- QL_OFF(nic_stats.rx_err_count)},
- {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
- QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
- {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
- QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
- {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
- QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
- {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
- QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
- {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
- QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
- {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
- QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
- {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
- QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
- {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
- QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
- {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
- QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
- {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
- QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
- {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
- QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
- {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
- QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
- {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
- QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
- {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
- QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
- {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
- QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
- {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
- QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
- {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
- QL_OFF(nic_stats.rx_nic_fifo_drop)},
-};
-
-static const char qlge_gstrings_test[][ETH_GSTRING_LEN] = {
- "Loopback test (offline)"
-};
-
-#define QLGE_TEST_LEN (sizeof(qlge_gstrings_test) / ETH_GSTRING_LEN)
-#define QLGE_STATS_LEN ARRAY_SIZE(qlge_gstrings_stats)
-#define QLGE_RCV_MAC_ERR_STATS 7
-
-static int qlge_update_ring_coalescing(struct qlge_adapter *qdev)
-{
- int i, status = 0;
- struct rx_ring *rx_ring;
- struct cqicb *cqicb;
-
- if (!netif_running(qdev->ndev))
- return status;
-
- /* Skip the default queue, and update the outbound handler
- * queues if they changed.
- */
- cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->tx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-
- /* Update the inbound (RSS) handler queues if they changed. */
- cqicb = (struct cqicb *)&qdev->rx_ring[0];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->rx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-exit:
- return status;
-}
-
-static void qlge_update_stats(struct qlge_adapter *qdev)
-{
- u32 i;
- u64 data;
- u64 *iter = &qdev->nic_stats.tx_pkts;
-
- spin_lock(&qdev->stats_lock);
- if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get xgmac sem.\n");
- goto quit;
- }
- /*
- * Get TX statistics.
- */
- for (i = 0x200; i < 0x280; i += 8) {
- if (qlge_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else {
- *iter = data;
- }
- iter++;
- }
-
- /*
- * Get RX statistics.
- */
- for (i = 0x300; i < 0x3d0; i += 8) {
- if (qlge_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else {
- *iter = data;
- }
- iter++;
- }
-
- /* Update receive mac error statistics */
- iter += QLGE_RCV_MAC_ERR_STATS;
-
- /*
- * Get Per-priority TX pause frame counter statistics.
- */
- for (i = 0x500; i < 0x540; i += 8) {
- if (qlge_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else {
- *iter = data;
- }
- iter++;
- }
-
- /*
- * Get Per-priority RX pause frame counter statistics.
- */
- for (i = 0x568; i < 0x5a8; i += 8) {
- if (qlge_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else {
- *iter = data;
- }
- iter++;
- }
-
- /*
- * Get RX NIC FIFO DROP statistics.
- */
- if (qlge_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n", i);
- goto end;
- } else {
- *iter = data;
- }
-end:
- qlge_sem_unlock(qdev, qdev->xg_sem_mask);
-quit:
- spin_unlock(&qdev->stats_lock);
-}
-
-static void qlge_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
-{
- int index;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(buf, *qlge_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (index = 0; index < QLGE_STATS_LEN; index++) {
- memcpy(buf + index * ETH_GSTRING_LEN,
- qlge_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
- }
- break;
- }
-}
-
-static int qlge_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_TEST:
- return QLGE_TEST_LEN;
- case ETH_SS_STATS:
- return QLGE_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void
-qlge_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int index, length;
-
- length = QLGE_STATS_LEN;
- qlge_update_stats(qdev);
-
- for (index = 0; index < length; index++) {
- char *p = (char *)qdev +
- qlge_gstrings_stats[index].stat_offset;
- *data++ = (qlge_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
- }
-}
-
-static int qlge_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
-
- if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
- STS_LINK_TYPE_10GBASET) {
- supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->base.port = PORT_TP;
- ecmd->base.autoneg = AUTONEG_ENABLE;
- } else {
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
- ecmd->base.port = PORT_FIBRE;
- }
-
- ecmd->base.speed = SPEED_10000;
- ecmd->base.duplex = DUPLEX_FULL;
-
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static void qlge_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- strscpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, qlge_driver_version,
- sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "v%d.%d.%d",
- (qdev->fw_rev_id & 0x00ff0000) >> 16,
- (qdev->fw_rev_id & 0x0000ff00) >> 8,
- (qdev->fw_rev_id & 0x000000ff));
- strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void qlge_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
- ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = qdev->wol;
- }
-}
-
-static int qlge_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
- ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
- netif_info(qdev, drv, qdev->ndev,
- "WOL is only supported for mezz card\n");
- return -EOPNOTSUPP;
- }
- if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
- qdev->wol = wol->wolopts;
-
- netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- return 0;
-}
-
-static int qlge_set_phys_id(struct net_device *ndev,
- enum ethtool_phys_id_state state)
-
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Save the current LED settings */
- if (qlge_mb_get_led_cfg(qdev))
- return -EIO;
-
- /* Start blinking */
- qlge_mb_set_led_cfg(qdev, QL_LED_BLINK);
- return 0;
-
- case ETHTOOL_ID_INACTIVE:
- /* Restore LED settings */
- if (qlge_mb_set_led_cfg(qdev, qdev->led_config))
- return -EIO;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static int qlge_start_loopback(struct qlge_adapter *qdev)
-{
- if (netif_carrier_ok(qdev->ndev)) {
- set_bit(QL_LB_LINK_UP, &qdev->flags);
- netif_carrier_off(qdev->ndev);
- } else {
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- }
- qdev->link_config |= CFG_LOOPBACK_PCS;
- return qlge_mb_set_port_cfg(qdev);
-}
-
-static void qlge_stop_loopback(struct qlge_adapter *qdev)
-{
- qdev->link_config &= ~CFG_LOOPBACK_PCS;
- qlge_mb_set_port_cfg(qdev);
- if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
- netif_carrier_on(qdev->ndev);
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- }
-}
-
-static void qlge_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- skb->data[frame_size / 2 + 10] = (unsigned char)0xBE;
- skb->data[frame_size / 2 + 12] = (unsigned char)0xAF;
-}
-
-void qlge_check_lb_frame(struct qlge_adapter *qdev,
- struct sk_buff *skb)
-{
- unsigned int frame_size = skb->len;
-
- if ((*(skb->data + 3) == 0xFF) &&
- (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- atomic_dec(&qdev->lb_count);
- return;
- }
-}
-
-static int qlge_run_loopback_test(struct qlge_adapter *qdev)
-{
- int i;
- netdev_tx_t rc;
- struct sk_buff *skb;
- unsigned int size = SMALL_BUF_MAP_SIZE;
-
- for (i = 0; i < 64; i++) {
- skb = netdev_alloc_skb(qdev->ndev, size);
- if (!skb)
- return -ENOMEM;
-
- skb->queue_mapping = 0;
- skb_put(skb, size);
- qlge_create_lb_frame(skb, size);
- rc = qlge_lb_send(skb, qdev->ndev);
- if (rc != NETDEV_TX_OK)
- return -EPIPE;
- atomic_inc(&qdev->lb_count);
- }
- /* Give queue time to settle before testing results. */
- usleep_range(2000, 2100);
- qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
- return atomic_read(&qdev->lb_count) ? -EIO : 0;
-}
-
-static int qlge_loopback_test(struct qlge_adapter *qdev, u64 *data)
-{
- *data = qlge_start_loopback(qdev);
- if (*data)
- goto out;
- *data = qlge_run_loopback_test(qdev);
-out:
- qlge_stop_loopback(qdev);
- return *data;
-}
-
-static void qlge_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
-
- if (netif_running(ndev)) {
- set_bit(QL_SELFTEST, &qdev->flags);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
- if (qlge_loopback_test(qdev, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- } else {
- /* Online tests */
- data[0] = 0;
- }
- clear_bit(QL_SELFTEST, &qdev->flags);
- /* Give link time to come up after
- * port configuration changes.
- */
- msleep_interruptible(4 * 1000);
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "is down, Loopback test will fail.\n");
- eth_test->flags |= ETH_TEST_FL_FAILED;
- }
-}
-
-static int qlge_get_regs_len(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- return sizeof(struct qlge_mpi_coredump);
- else
- return sizeof(struct qlge_reg_dump);
-}
-
-static void qlge_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- qlge_get_dump(qdev, p);
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- regs->len = sizeof(struct qlge_mpi_coredump);
- else
- regs->len = sizeof(struct qlge_reg_dump);
-}
-
-static int qlge_get_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *c,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
- c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
-
- /* This chip coalesces as follows:
- * If a packet arrives, hold off interrupts until
- * cqicb->int_delay expires, but if no other packets arrive don't
- * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
- * timer to coalesce on a frame basis. So, we have to take ethtool's
- * max_coalesced_frames value and convert it to a delay in microseconds.
- * We do this by using a basic thoughput of 1,000,000 frames per
- * second @ (1024 bytes). This means one frame per usec. So it's a
- * simple one to one ratio.
- */
- c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
- c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
-
- return 0;
-}
-
-static int qlge_set_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *c,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- /* Validate user parameters. */
- if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
- return -EINVAL;
- /* Don't wait more than 10 usec. */
- if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
- if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
- return -EINVAL;
- if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
-
- /* Verify a change took place before updating the hardware. */
- if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
- qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
- qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
- qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
- return 0;
-
- qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
- qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
- qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
- qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
-
- return qlge_update_ring_coalescing(qdev);
-}
-
-static void qlge_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pause)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- qlge_mb_get_port_cfg(qdev);
- if (qdev->link_config & CFG_PAUSE_STD) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int qlge_set_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pause)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- if ((pause->rx_pause) && (pause->tx_pause))
- qdev->link_config |= CFG_PAUSE_STD;
- else if (!pause->rx_pause && !pause->tx_pause)
- qdev->link_config &= ~CFG_PAUSE_STD;
- else
- return -EINVAL;
-
- return qlge_mb_set_port_cfg(qdev);
-}
-
-static u32 qlge_get_msglevel(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- return qdev->msg_enable;
-}
-
-static void qlge_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- qdev->msg_enable = value;
-}
-
-const struct ethtool_ops qlge_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
- .get_drvinfo = qlge_get_drvinfo,
- .get_wol = qlge_get_wol,
- .set_wol = qlge_set_wol,
- .get_regs_len = qlge_get_regs_len,
- .get_regs = qlge_get_regs,
- .get_msglevel = qlge_get_msglevel,
- .set_msglevel = qlge_set_msglevel,
- .get_link = ethtool_op_get_link,
- .set_phys_id = qlge_set_phys_id,
- .self_test = qlge_self_test,
- .get_pauseparam = qlge_get_pauseparam,
- .set_pauseparam = qlge_set_pauseparam,
- .get_coalesce = qlge_get_coalesce,
- .set_coalesce = qlge_set_coalesce,
- .get_sset_count = qlge_get_sset_count,
- .get_strings = qlge_get_strings,
- .get_ethtool_stats = qlge_get_ethtool_stats,
- .get_link_ksettings = qlge_get_link_ksettings,
-};
-
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
deleted file mode 100644
index 1ead7793062a..000000000000
--- a/drivers/staging/qlge/qlge_main.c
+++ /dev/null
@@ -1,4845 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * QLogic qlge NIC HBA Driver
- * Copyright (c) 2003-2008 QLogic Corporation
- * Author: Linux qlge network device driver by
- * Ron Mercer <ron.mercer@qlogic.com>
- */
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/prefetch.h>
-#include <net/ip6_checksum.h>
-
-#include "qlge.h"
-#include "qlge_devlink.h"
-
-char qlge_driver_name[] = DRV_NAME;
-const char qlge_driver_version[] = DRV_VERSION;
-
-MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
-MODULE_DESCRIPTION(DRV_STRING " ");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
- NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP |
- NETIF_MSG_RX_ERR |
- NETIF_MSG_TX_ERR |
- NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-
-static int debug = -1; /* defaults above */
-module_param(debug, int, 0664);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-#define MSIX_IRQ 0
-#define MSI_IRQ 1
-#define LEG_IRQ 2
-static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, 0664);
-MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-
-static int qlge_mpi_coredump;
-module_param(qlge_mpi_coredump, int, 0);
-MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
-
-static int qlge_force_coredump;
-module_param(qlge_force_coredump, int, 0);
-MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
-
-static const struct pci_device_id qlge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
- /* required last entry */
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-
-static int qlge_wol(struct qlge_adapter *);
-static void qlge_set_multicast_list(struct net_device *);
-static int qlge_adapter_down(struct qlge_adapter *);
-static int qlge_adapter_up(struct qlge_adapter *);
-
-/* This hardware semaphore causes exclusive access to
- * resources shared between the NIC driver, MPI firmware,
- * FCOE firmware and the FC driver.
- */
-static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
-{
- u32 sem_bits = 0;
-
- switch (sem_mask) {
- case SEM_XGMAC0_MASK:
- sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
- break;
- case SEM_XGMAC1_MASK:
- sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
- break;
- case SEM_ICB_MASK:
- sem_bits = SEM_SET << SEM_ICB_SHIFT;
- break;
- case SEM_MAC_ADDR_MASK:
- sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
- break;
- case SEM_FLASH_MASK:
- sem_bits = SEM_SET << SEM_FLASH_SHIFT;
- break;
- case SEM_PROBE_MASK:
- sem_bits = SEM_SET << SEM_PROBE_SHIFT;
- break;
- case SEM_RT_IDX_MASK:
- sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
- break;
- case SEM_PROC_REG_MASK:
- sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
- break;
- default:
- netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
- return -EINVAL;
- }
-
- qlge_write32(qdev, SEM, sem_bits | sem_mask);
- return !(qlge_read32(qdev, SEM) & sem_bits);
-}
-
-int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
-{
- unsigned int wait_count = 30;
-
- do {
- if (!qlge_sem_trylock(qdev, sem_mask))
- return 0;
- udelay(100);
- } while (--wait_count);
- return -ETIMEDOUT;
-}
-
-void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
-{
- qlge_write32(qdev, SEM, sem_mask);
- qlge_read32(qdev, SEM); /* flush */
-}
-
-/* This function waits for a specific bit to come ready
- * in a given register. It is used mostly by the initialize
- * process, but is also used in kernel thread API such as
- * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
- */
-int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
-{
- u32 temp;
- int count;
-
- for (count = 0; count < UDELAY_COUNT; count++) {
- temp = qlge_read32(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit) {
- netif_alert(qdev, probe, qdev->ndev,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
- return -EIO;
- } else if (temp & bit) {
- return 0;
- }
- udelay(UDELAY_DELAY);
- }
- netif_alert(qdev, probe, qdev->ndev,
- "Timed out waiting for reg %x to come ready.\n", reg);
- return -ETIMEDOUT;
-}
-
-/* The CFG register is used to download TX and RX control blocks
- * to the chip. This function waits for an operation to complete.
- */
-static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
-{
- int count;
- u32 temp;
-
- for (count = 0; count < UDELAY_COUNT; count++) {
- temp = qlge_read32(qdev, CFG);
- if (temp & CFG_LE)
- return -EIO;
- if (!(temp & bit))
- return 0;
- udelay(UDELAY_DELAY);
- }
- return -ETIMEDOUT;
-}
-
-/* Used to issue init control blocks to hw. Maps control block,
- * sets address, triggers download, waits for completion.
- */
-int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id)
-{
- u64 map;
- int status = 0;
- int direction;
- u32 mask;
- u32 value;
-
- if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
- direction = DMA_TO_DEVICE;
- else
- direction = DMA_FROM_DEVICE;
-
- map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
- if (dma_mapping_error(&qdev->pdev->dev, map)) {
- netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
- return -ENOMEM;
- }
-
- status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- goto lock_failed;
-
- status = qlge_wait_cfg(qdev, bit);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for CFG to come ready.\n");
- goto exit;
- }
-
- qlge_write32(qdev, ICB_L, (u32)map);
- qlge_write32(qdev, ICB_H, (u32)(map >> 32));
-
- mask = CFG_Q_MASK | (bit << 16);
- value = bit | (q_id << CFG_Q_SHIFT);
- qlge_write32(qdev, CFG, (mask | value));
-
- /*
- * Wait for the bit to clear after signaling hw.
- */
- status = qlge_wait_cfg(qdev, bit);
-exit:
- qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
-lock_failed:
- dma_unmap_single(&qdev->pdev->dev, map, size, direction);
- return status;
-}
-
-/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
-int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
- u32 *value)
-{
- u32 offset = 0;
- int status;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC: {
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS |
- type); /* type */
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- break;
- *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS |
- type); /* type */
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- break;
- *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
- (index
- << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR |
- MAC_ADDR_RS | type); /* type */
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- break;
- *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
- }
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
- return status;
-}
-
-/* Set up a MAC, multicast or VLAN address for the
- * inbound frame matching.
- */
-static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
- u32 type, u16 index)
-{
- u32 offset = 0;
- int status = 0;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC: {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
- (addr[5]);
-
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
- MAC_ADDR_E);
- qlge_write32(qdev, MAC_ADDR_DATA, lower);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
- MAC_ADDR_E);
-
- qlge_write32(qdev, MAC_ADDR_DATA, upper);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- break;
- }
- case MAC_ADDR_TYPE_CAM_MAC: {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
- (addr[5]);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- qlge_write32(qdev, MAC_ADDR_DATA, lower);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- qlge_write32(qdev, MAC_ADDR_DATA, upper);
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- * and possibly the function id. Right now we hardcode
- * the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
- break;
- }
- case MAC_ADDR_TYPE_VLAN: {
- u32 enable_bit = *((u32 *)&addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- break;
- qlge_write32(qdev, MAC_ADDR_IDX,
- offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
- break;
- }
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
- return status;
-}
-
-/* Set or clear MAC address in hardware. We sometimes
- * have to clear it to prevent wrong frame routing
- * especially in a bonding environment.
- */
-static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
-{
- int status;
- char zero_mac_addr[ETH_ALEN];
- char *addr;
-
- if (set) {
- addr = &qdev->current_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Set Mac addr %pM\n", addr);
- } else {
- eth_zero_addr(zero_mac_addr);
- addr = &zero_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Clearing MAC address\n");
- }
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
- MAC_ADDR_TYPE_CAM_MAC,
- qdev->func * MAX_CQ);
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init mac address.\n");
- return status;
-}
-
-void qlge_link_on(struct qlge_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is up.\n");
- netif_carrier_on(qdev->ndev);
- qlge_set_mac_addr(qdev, 1);
-}
-
-void qlge_link_off(struct qlge_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is down.\n");
- netif_carrier_off(qdev->ndev);
- qlge_set_mac_addr(qdev, 0);
-}
-
-/* Get a specific frame routing value from the CAM.
- * Used for debug and reg dump.
- */
-int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
-{
- int status = 0;
-
- status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
-
- qlge_write32(qdev, RT_IDX,
- RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
- status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
- if (status)
- goto exit;
- *value = qlge_read32(qdev, RT_DATA);
-exit:
- return status;
-}
-
-/* The NIC function for this chip has 16 routing indexes. Each one can be used
- * to route different frame types to various inbound queues. We send broadcast/
- * multicast/error frames to the default queue for slow handling,
- * and CAM hit/RSS frames to the fast handling queues.
- */
-static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
- int enable)
-{
- int status = -EINVAL; /* Return error if no mask match. */
- u32 value = 0;
-
- switch (mask) {
- case RT_IDX_CAM_HIT:
- {
- value = RT_IDX_DST_CAM_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_VALID: /* Promiscuous Mode frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_IP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST: /* Pass up All Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
- {
- value = RT_IDX_DST_RSS | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case 0: /* Clear the E-bit on an entry. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (index << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- default:
- netif_err(qdev, ifup, qdev->ndev,
- "Mask type %d not yet supported.\n", mask);
- status = -EPERM;
- goto exit;
- }
-
- if (value) {
- status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
- value |= (enable ? RT_IDX_E : 0);
- qlge_write32(qdev, RT_IDX, value);
- qlge_write32(qdev, RT_DATA, enable ? mask : 0);
- }
-exit:
- return status;
-}
-
-static void qlge_enable_interrupts(struct qlge_adapter *qdev)
-{
- qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
-}
-
-static void qlge_disable_interrupts(struct qlge_adapter *qdev)
-{
- qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
-}
-
-static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
-{
- struct intr_context *ctx = &qdev->intr_context[intr];
-
- qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
-}
-
-static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
-{
- struct intr_context *ctx = &qdev->intr_context[intr];
-
- qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
-}
-
-static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->intr_count; i++)
- qlge_enable_completion_interrupt(qdev, i);
-}
-
-static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
-{
- int status, i;
- u16 csum = 0;
- __le16 *flash = (__le16 *)&qdev->flash;
-
- status = strncmp((char *)&qdev->flash, str, 4);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
- return status;
- }
-
- for (i = 0; i < size; i++)
- csum += le16_to_cpu(*flash++);
-
- if (csum)
- netif_err(qdev, ifup, qdev->ndev,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
-
- return csum;
-}
-
-static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* This data is stored on flash as an array of
- * __le32. Since qlge_read32() returns cpu endian
- * we need to swap it back.
- */
- *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
-exit:
- return status;
-}
-
-static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
-{
- u32 i, size;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset;
- u8 mac_addr[6];
-
- /* Get flash offset for function and adjust
- * for dword access.
- */
- if (!qdev->port)
- offset = FUNC0_FLASH_OFFSET / sizeof(u32);
- else
- offset = FUNC1_FLASH_OFFSET / sizeof(u32);
-
- if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- size = sizeof(struct flash_params_8000) / sizeof(u32);
- for (i = 0; i < size; i++, p++) {
- status = qlge_read_flash_word(qdev, i + offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
-
- status = qlge_validate_flash(qdev,
- sizeof(struct flash_params_8000) /
- sizeof(u16),
- "8000");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- /* Extract either manufacturer or BOFM modified
- * MAC address.
- */
- if (qdev->flash.flash_params_8000.data_type1 == 2)
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
- else
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
-
- if (!is_valid_ether_addr(mac_addr)) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
- status = -EINVAL;
- goto exit;
- }
-
- eth_hw_addr_set(qdev->ndev, mac_addr);
-
-exit:
- qlge_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
-{
- int i;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset = 0;
- u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
-
- /* Second function's parameters follow the first
- * function's.
- */
- if (qdev->port)
- offset = size;
-
- if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- for (i = 0; i < size; i++, p++) {
- status = qlge_read_flash_word(qdev, i + offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
-
- status = qlge_validate_flash(qdev,
- sizeof(struct flash_params_8012) /
- sizeof(u16),
- "8012");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
- status = -EINVAL;
- goto exit;
- }
-
- eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
-
-exit:
- qlge_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
-{
- int status;
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- return status;
- /* write the data to the data reg */
- qlge_write32(qdev, XGMAC_DATA, data);
- /* trigger the write */
- qlge_write32(qdev, XGMAC_ADDR, reg);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* set up for reg read */
- qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* get the data */
- *data = qlge_read32(qdev, XGMAC_DATA);
-exit:
- return status;
-}
-
-/* This is used for reading the 64-bit statistics regs. */
-int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
-{
- int status = 0;
- u32 hi = 0;
- u32 lo = 0;
-
- status = qlge_read_xgmac_reg(qdev, reg, &lo);
- if (status)
- goto exit;
-
- status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
- if (status)
- goto exit;
-
- *data = (u64)lo | ((u64)hi << 32);
-
-exit:
- return status;
-}
-
-static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
-{
- int status;
- /*
- * Get MPI firmware version for driver banner
- * and ethool info.
- */
- status = qlge_mb_about_fw(qdev);
- if (status)
- goto exit;
- status = qlge_mb_get_fw_state(qdev);
- if (status)
- goto exit;
- /* Wake up a worker to get/set the TX/RX frame sizes. */
- queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
-exit:
- return status;
-}
-
-/* Take the MAC Core out of reset.
- * Enable statistics counting.
- * Take the transmitter/receiver out of reset.
- * This functionality may be done in the MPI firmware at a
- * later date.
- */
-static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
-{
- int status = 0;
- u32 data;
-
- if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
- /* Another function has the semaphore, so
- * wait for the port init bit to come ready.
- */
- netif_info(qdev, link, qdev->ndev,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
- status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
- if (status) {
- netif_crit(qdev, link, qdev->ndev,
- "Port initialize timed out.\n");
- }
- return status;
- }
-
- netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
- /* Set the core reset. */
- status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
- if (status)
- goto end;
- data |= GLOBAL_CFG_RESET;
- status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Clear the core reset and turn on jumbo for receiver. */
- data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
- data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
- data |= GLOBAL_CFG_TX_STAT_EN;
- data |= GLOBAL_CFG_RX_STAT_EN;
- status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Enable transmitter, and clear it's reset. */
- status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
- if (status)
- goto end;
- data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
- data |= TX_CFG_EN; /* Enable the transmitter. */
- status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
- if (status)
- goto end;
-
- /* Enable receiver and clear it's reset. */
- status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
- if (status)
- goto end;
- data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
- data |= RX_CFG_EN; /* Enable the receiver. */
- status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
- if (status)
- goto end;
-
- /* Turn on jumbo. */
- status =
- qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
- if (status)
- goto end;
- status =
- qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
- if (status)
- goto end;
-
- /* Signal to the world that the port is enabled. */
- qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
-end:
- qlge_sem_unlock(qdev, qdev->xg_sem_mask);
- return status;
-}
-
-static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
-{
- return PAGE_SIZE << qdev->lbq_buf_order;
-}
-
-static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
-{
- struct qlge_bq_desc *bq_desc;
-
- bq_desc = &bq->queue[bq->next_to_clean];
- bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
-
- return bq_desc;
-}
-
-static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
-
- dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
- qdev->lbq_buf_size, DMA_FROM_DEVICE);
-
- if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
- qlge_lbq_block_size(qdev)) {
- /* last chunk of the master page */
- dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
- qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
- }
-
- return lbq_desc;
-}
-
-/* Update an rx ring index. */
-static void qlge_update_cq(struct rx_ring *rx_ring)
-{
- rx_ring->cnsmr_idx++;
- rx_ring->curr_entry++;
- if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
- }
-}
-
-static void qlge_write_cq_idx(struct rx_ring *rx_ring)
-{
- qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
-}
-
-static const char * const bq_type_name[] = {
- [QLGE_SB] = "sbq",
- [QLGE_LB] = "lbq",
-};
-
-/* return 0 or negative error */
-static int qlge_refill_sb(struct rx_ring *rx_ring,
- struct qlge_bq_desc *sbq_desc, gfp_t gfp)
-{
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct sk_buff *skb;
-
- if (sbq_desc->p.skb)
- return 0;
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "ring %u sbq: getting new skb for index %d.\n",
- rx_ring->cq_id, sbq_desc->index);
-
- skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, QLGE_SB_PAD);
-
- sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
- SMALL_BUF_MAP_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
- netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
- dev_kfree_skb_any(skb);
- return -EIO;
- }
- *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
-
- sbq_desc->p.skb = skb;
- return 0;
-}
-
-/* return 0 or negative error */
-static int qlge_refill_lb(struct rx_ring *rx_ring,
- struct qlge_bq_desc *lbq_desc, gfp_t gfp)
-{
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
-
- if (!master_chunk->page) {
- struct page *page;
- dma_addr_t dma_addr;
-
- page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
- if (unlikely(!page))
- return -ENOMEM;
- dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
- qlge_lbq_block_size(qdev),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
- __free_pages(page, qdev->lbq_buf_order);
- netif_err(qdev, drv, qdev->ndev,
- "PCI mapping failed.\n");
- return -EIO;
- }
- master_chunk->page = page;
- master_chunk->va = page_address(page);
- master_chunk->offset = 0;
- rx_ring->chunk_dma_addr = dma_addr;
- }
-
- lbq_desc->p.pg_chunk = *master_chunk;
- lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
- *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
- lbq_desc->p.pg_chunk.offset);
-
- /* Adjust the master page chunk for next
- * buffer get.
- */
- master_chunk->offset += qdev->lbq_buf_size;
- if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
- master_chunk->page = NULL;
- } else {
- master_chunk->va += qdev->lbq_buf_size;
- get_page(master_chunk->page);
- }
-
- return 0;
-}
-
-/* return 0 or negative error */
-static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
-{
- struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct qlge_bq_desc *bq_desc;
- int refill_count;
- int retval;
- int i;
-
- refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
- bq->next_to_use);
- if (!refill_count)
- return 0;
-
- i = bq->next_to_use;
- bq_desc = &bq->queue[i];
- i -= QLGE_BQ_LEN;
- do {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "ring %u %s: try cleaning idx %d\n",
- rx_ring->cq_id, bq_type_name[bq->type], i);
-
- if (bq->type == QLGE_SB)
- retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
- else
- retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
- if (retval < 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "ring %u %s: Could not get a page chunk, idx %d\n",
- rx_ring->cq_id, bq_type_name[bq->type], i);
- break;
- }
-
- bq_desc++;
- i++;
- if (unlikely(!i)) {
- bq_desc = &bq->queue[0];
- i -= QLGE_BQ_LEN;
- }
- refill_count--;
- } while (refill_count);
- i += QLGE_BQ_LEN;
-
- if (bq->next_to_use != i) {
- if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "ring %u %s: updating prod idx = %d.\n",
- rx_ring->cq_id, bq_type_name[bq->type],
- i);
- qlge_write_db_reg(i, bq->prod_idx_db_reg);
- }
- bq->next_to_use = i;
- }
-
- return retval;
-}
-
-static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
- unsigned long delay)
-{
- bool sbq_fail, lbq_fail;
-
- sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
- lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
-
- /* Minimum number of buffers needed to be able to receive at least one
- * frame of any format:
- * sbq: 1 for header + 1 for data
- * lbq: mtu 9000 / lb size
- * Below this, the queue might stall.
- */
- if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
- (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
- DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
- /* Allocations can take a long time in certain cases (ex.
- * reclaim). Therefore, use a workqueue for long-running
- * work items.
- */
- queue_delayed_work_on(smp_processor_id(), system_long_wq,
- &rx_ring->refill_work, delay);
-}
-
-static void qlge_slow_refill(struct work_struct *work)
-{
- struct rx_ring *rx_ring = container_of(work, struct rx_ring,
- refill_work.work);
- struct napi_struct *napi = &rx_ring->napi;
-
- napi_disable(napi);
- qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
- napi_enable(napi);
-
- local_bh_disable();
- /* napi_disable() might have prevented incomplete napi work from being
- * rescheduled.
- */
- napi_schedule(napi);
- /* trigger softirq processing */
- local_bh_enable();
-}
-
-/* Unmaps tx buffers. Can be called from send() if a pci mapping
- * fails at some stage, or from the interrupt when a tx completes.
- */
-static void qlge_unmap_send(struct qlge_adapter *qdev,
- struct tx_ring_desc *tx_ring_desc, int mapped)
-{
- int i;
-
- for (i = 0; i < mapped; i++) {
- if (i == 0 || (i == 7 && mapped > 7)) {
- /*
- * Unmap the skb->data area, or the
- * external sglist (AKA the Outbound
- * Address List (OAL)).
- * If its the zeroeth element, then it's
- * the skb->data area. If it's the 7th
- * element and there is more than 6 frags,
- * then its an OAL.
- */
- if (i == 7) {
- netif_printk(qdev, tx_done, KERN_DEBUG,
- qdev->ndev,
- "unmapping OAL area.\n");
- }
- dma_unmap_single(&qdev->pdev->dev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen),
- DMA_TO_DEVICE);
- } else {
- netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
- "unmapping frag %d.\n", i);
- dma_unmap_page(&qdev->pdev->dev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen), DMA_TO_DEVICE);
- }
- }
-}
-
-/* Map the buffers for this transmit. This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
- */
-static int qlge_map_send(struct qlge_adapter *qdev,
- struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
- struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
-{
- int len = skb_headlen(skb);
- dma_addr_t map;
- int frag_idx, err, map_idx = 0;
- struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
- int frag_cnt = skb_shinfo(skb)->nr_frags;
-
- if (frag_cnt) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "frag_cnt = %d.\n", frag_cnt);
- }
- /*
- * Map the skb buffer first.
- */
- map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
-
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping failed with error: %d\n", err);
-
- return NETDEV_TX_BUSY;
- }
-
- tbd->len = cpu_to_le32(len);
- tbd->addr = cpu_to_le64(map);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
- map_idx++;
-
- /*
- * This loop fills the remainder of the 8 address descriptors
- * in the IOCB. If there are more than 7 fragments, then the
- * eighth address desc will point to an external list (OAL).
- * When this happens, the remainder of the frags will be stored
- * in this list.
- */
- for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
-
- tbd++;
- if (frag_idx == 6 && frag_cnt > 7) {
- /* Let's tack on an sglist.
- * Our control block will now
- * look like this:
- * iocb->seg[0] = skb->data
- * iocb->seg[1] = frag[0]
- * iocb->seg[2] = frag[1]
- * iocb->seg[3] = frag[2]
- * iocb->seg[4] = frag[3]
- * iocb->seg[5] = frag[4]
- * iocb->seg[6] = frag[5]
- * iocb->seg[7] = ptr to OAL (external sglist)
- * oal->seg[0] = frag[6]
- * oal->seg[1] = frag[7]
- * oal->seg[2] = frag[8]
- * oal->seg[3] = frag[9]
- * oal->seg[4] = frag[10]
- * etc...
- */
- /* Tack on the OAL in the eighth segment of IOCB. */
- map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
- sizeof(struct qlge_oal),
- DMA_TO_DEVICE);
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping outbound address list with error: %d\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- /*
- * The length is the number of fragments
- * that remain to be mapped times the length
- * of our sglist (OAL).
- */
- tbd->len =
- cpu_to_le32((sizeof(struct tx_buf_desc) *
- (frag_cnt - frag_idx)) | TX_DESC_C);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
- map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- sizeof(struct qlge_oal));
- tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
- map_idx++;
- }
-
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping frags failed with error: %d.\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(skb_frag_size(frag));
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- skb_frag_size(frag));
- }
- /* Save the number of segments we've mapped. */
- tx_ring_desc->map_cnt = map_idx;
- /* Terminate the last segment. */
- tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
- return NETDEV_TX_OK;
-
-map_error:
- /*
- * If the first frag mapping failed, then i will be zero.
- * This causes the unmap of the skb->data area. Otherwise
- * we pass in the number of frags that mapped successfully
- * so they can be umapped.
- */
- qlge_unmap_send(qdev, tx_ring_desc, map_idx);
- return NETDEV_TX_BUSY;
-}
-
-/* Categorizing receive firmware frame errors */
-static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
-{
- struct nic_stats *stats = &qdev->nic_stats;
-
- stats->rx_err_count++;
- rx_ring->rx_errors++;
-
- switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
- case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
- stats->rx_code_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
- stats->rx_oversize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
- stats->rx_undersize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
- stats->rx_preamble_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
- stats->rx_frame_len_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_CRC:
- stats->rx_crc_err++;
- break;
- default:
- break;
- }
-}
-
-/*
- * qlge_update_mac_hdr_len - helper routine to update the mac header length
- * based on vlan tags if present
- */
-static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
- void *page, size_t *len)
-{
- u16 *tags;
-
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- return;
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
- tags = (u16 *)page;
- /* Look for stacked vlan tags in ethertype field */
- if (tags[6] == ETH_P_8021Q &&
- tags[8] == ETH_P_8021Q)
- *len += 2 * VLAN_HLEN;
- else
- *len += VLAN_HLEN;
- }
-}
-
-/* Process an inbound completion from an rx ring. */
-static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
-{
- struct sk_buff *skb;
- struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- napi->dev = qdev->ndev;
-
- skb = napi_get_frags(napi);
- if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, exiting.\n");
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- prefetch(lbq_desc->p.pg_chunk.va);
- __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
-
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- skb_shinfo(skb)->nr_frags++;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += length;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- napi_gro_frags(napi);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- void *addr;
- struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
- size_t hlen = ETH_HLEN;
-
- skb = netdev_alloc_skb(ndev, length);
- if (!skb) {
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
-
- addr = lbq_desc->p.pg_chunk.va;
- prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- goto err_out;
- }
-
- /* Update the MAC header length*/
- qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + hlen) {
- netif_err(qdev, drv, qdev->ndev,
- "Segment too small, dropping.\n");
- rx_ring->rx_dropped++;
- goto err_out;
- }
- skb_put_data(skb, addr, hlen);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
- skb->len += length - hlen;
- skb->data_len += length - hlen;
- skb->truesize += length - hlen;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph =
- (struct iphdr *)((u8 *)addr + hlen);
- if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(napi, skb);
- else
- netif_receive_skb(skb);
- return;
-err_out:
- dev_kfree_skb_any(skb);
- put_page(lbq_desc->p.pg_chunk.page);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length, u16 vlan_id)
-{
- struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb, *new_skb;
-
- skb = sbq_desc->p.skb;
- /* Allocate new_skb and copy */
- new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (!new_skb) {
- rx_ring->rx_dropped++;
- return;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
-
- dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
-
- skb_put_data(new_skb, skb->data, length);
-
- skb = new_skb;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- qlge_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-static void qlge_realign_skb(struct sk_buff *skb, int len)
-{
- void *temp_addr = skb->data;
-
- /* Undo the skb_reserve(skb,32) we did before
- * giving to hardware, and realign data on
- * a 2-byte boundary.
- */
- skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
- skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
- memmove(skb->data, temp_addr, len);
-}
-
-/*
- * This function builds an skb for the given inbound
- * completion. It will be rewritten for readability in the near
- * future, but for not it works well.
- */
-static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
-{
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
- struct qlge_bq_desc *lbq_desc, *sbq_desc;
- struct sk_buff *skb = NULL;
- size_t hlen = ETH_HLEN;
-
- /*
- * Handle the header buffer if present.
- */
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header of %d bytes in small buffer.\n", hdr_len);
- /*
- * Headers fit nicely into a small buffer.
- */
- sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
- skb = sbq_desc->p.skb;
- qlge_realign_skb(skb, hdr_len);
- skb_put(skb, hdr_len);
- sbq_desc->p.skb = NULL;
- }
-
- /*
- * Handle the data buffer(s).
- */
- if (unlikely(!length)) { /* Is there data too? */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No Data buffer in this packet.\n");
- return skb;
- }
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Headers in small, data of %d bytes in small, combine them.\n",
- length);
- /*
- * Data is less than small buffer size so it's
- * stuffed in a small buffer.
- * For this case we append the data
- * from the "data" small buffer to the "header" small
- * buffer.
- */
- sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- dma_sync_single_for_cpu(&qdev->pdev->dev,
- sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- DMA_FROM_DEVICE);
- skb_put_data(skb, sbq_desc->p.skb->data, length);
- } else {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes in a single small buffer.\n",
- length);
- sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- skb = sbq_desc->p.skb;
- qlge_realign_skb(skb, length);
- skb_put(skb, length);
- dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- DMA_FROM_DEVICE);
- sbq_desc->p.skb = NULL;
- }
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header in small, %d bytes in large. Chain large to small!\n",
- length);
- /*
- * The data is in a single large buffer. We
- * chain it to the header buffer's skb and let
- * it rip.
- */
- lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Chaining page at offset = %d, for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- } else {
- /*
- * The headers and data are in a single large buffer. We
- * copy it to a new skb and let it go. This can happen with
- * jumbo mtu on a non-TCP/UDP frame.
- */
- lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
- skb = netdev_alloc_skb(qdev->ndev, length);
- if (!skb) {
- netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
- "No skb available, drop the packet.\n");
- return NULL;
- }
- dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
- qdev->lbq_buf_size,
- DMA_FROM_DEVICE);
- skb_reserve(skb, NET_IP_ALIGN);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
- lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- } else {
- /*
- * The data is in a chain of large buffers
- * pointed to by a small buffer. We loop
- * thru and chain them to the our small header
- * buffer's skb.
- * frags: There are 18 max frags and our small
- * buffer will hold 32 of them. The thing is,
- * we'll use 3 max for our 9000 byte jumbo
- * frames. If the MTU goes up we could
- * eventually be in trouble.
- */
- int size, i = 0;
-
- sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
- if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
- /*
- * This is an non TCP/UDP IP frame, so
- * the headers aren't split into a small
- * buffer. We have to use the small buffer
- * that contains our sg list as our skb to
- * send upstairs. Copy the sg list here to
- * a local buffer and use it to find the
- * pages to chain.
- */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers & data in chain of large.\n",
- length);
- skb = sbq_desc->p.skb;
- sbq_desc->p.skb = NULL;
- skb_reserve(skb, NET_IP_ALIGN);
- }
- do {
- lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
- size = min(length, qdev->lbq_buf_size);
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
- skb_fill_page_desc(skb, i,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
- length -= size;
- i++;
- } while (length > 0);
- qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- return skb;
-}
-
-/* Process an inbound completion from an rx ring. */
-static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
-
- skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
- if (unlikely(!skb)) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No skb available, drop packet.\n");
- rx_ring->rx_dropped++;
- return;
- }
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- qlge_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- rx_ring->rx_multicast++;
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
- }
-
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (!(iph->frag_off &
- htons(IP_MF | IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- }
- }
- }
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-/* Process an inbound completion from an rx ring. */
-static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring,
- struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
-{
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
- ((le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- /* The data and headers are split into
- * separate buffers.
- */
- qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- /* The data fit in a single small buffer.
- * Allocate a new skb, copy the data and
- * return the buffer to the free pool.
- */
- qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
- } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
- /* TCP packet in a page chunk that's been checksummed.
- * Tack it on to our GRO skb and let it go.
- */
- qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- /* Non-TCP packet in a page chunk. Allocate an
- * skb, tack it on frags, and send it up.
- */
- qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
- vlan_id);
- } else {
- /* Non-TCP/UDP large frames that span multiple buffers
- * can be processed correctly by the split frame logic.
- */
- qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- }
-
- return (unsigned long)length;
-}
-
-/* Process an outbound completion from an rx ring. */
-static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
- struct qlge_ob_mac_iocb_rsp *mac_rsp)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
-
- tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
- tx_ring_desc = &tx_ring->q[mac_rsp->tid];
- qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
- tx_ring->tx_packets++;
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
-
- if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
- OB_MAC_IOCB_RSP_S |
- OB_MAC_IOCB_RSP_L |
- OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Total descriptor length did not match transfer length.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too short to be valid, not sent.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too long, but sent anyway.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "PCI backplane error. Frame not sent.\n");
- }
- }
- atomic_inc(&tx_ring->tx_count);
-}
-
-/* Fire up a handler to reset the MPI processor. */
-void qlge_queue_fw_error(struct qlge_adapter *qdev)
-{
- qlge_link_off(qdev);
- queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
-}
-
-void qlge_queue_asic_error(struct qlge_adapter *qdev)
-{
- qlge_link_off(qdev);
- qlge_disable_interrupts(qdev);
- /* Clear adapter up bit to signal the recovery
- * process that it shouldn't kill the reset worker
- * thread
- */
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- /* Set asic recovery bit to indicate reset process that we are
- * in fatal error recovery process rather than normal close
- */
- set_bit(QL_ASIC_RECOVERY, &qdev->flags);
- queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
-}
-
-static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
- struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
-{
- switch (ib_ae_rsp->event) {
- case MGMT_ERR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Management Processor Fatal Error.\n");
- qlge_queue_fw_error(qdev);
- return;
-
- case CAM_LOOKUP_ERR_EVENT:
- netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
- netdev_err(qdev->ndev, "This event shouldn't occur.\n");
- qlge_queue_asic_error(qdev);
- return;
-
- case SOFT_ECC_ERROR_EVENT:
- netdev_err(qdev->ndev, "Soft ECC error detected.\n");
- qlge_queue_asic_error(qdev);
- break;
-
- case PCI_ERR_ANON_BUF_RD:
- netdev_err(qdev->ndev,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
- qlge_queue_asic_error(qdev);
- break;
-
- default:
- netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
- ib_ae_rsp->event);
- qlge_queue_asic_error(qdev);
- break;
- }
-}
-
-static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
-{
- struct qlge_adapter *qdev = rx_ring->qdev;
- u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
- int count = 0;
-
- struct tx_ring *tx_ring;
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
- case OPCODE_OB_MAC_TSO_IOCB:
- case OPCODE_OB_MAC_IOCB:
- qlge_process_mac_tx_intr(qdev, net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
- count++;
- qlge_update_cq(rx_ring);
- prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
- }
- if (!net_rsp)
- return 0;
- qlge_write_cq_idx(rx_ring);
- tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
- if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
-
- return count;
-}
-
-static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- struct qlge_adapter *qdev = rx_ring->qdev;
- u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct qlge_net_rsp_iocb *net_rsp;
- int count = 0;
-
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
- case OPCODE_IB_MAC_IOCB:
- qlge_process_mac_rx_intr(qdev, rx_ring,
- (struct qlge_ib_mac_iocb_rsp *)
- net_rsp);
- break;
-
- case OPCODE_IB_AE_IOCB:
- qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
- net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- break;
- }
- count++;
- qlge_update_cq(rx_ring);
- prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
- if (count == budget)
- break;
- }
- qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
- qlge_write_cq_idx(rx_ring);
- return count;
-}
-
-static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
-{
- struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct rx_ring *trx_ring;
- int i, work_done = 0;
- struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
-
- /* Service the TX rings first. They start
- * right after the RSS rings.
- */
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- trx_ring = &qdev->rx_ring[i];
- /* If this TX completion ring belongs to this vector and
- * it's not empty then service it.
- */
- if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
- trx_ring->cnsmr_idx)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
- qlge_clean_outbound_rx_ring(trx_ring);
- }
- }
-
- /*
- * Now service the RSS ring if it's active.
- */
- if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
- rx_ring->cnsmr_idx) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
- work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
- }
-
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- qlge_enable_completion_interrupt(qdev, rx_ring->irq);
- }
- return work_done;
-}
-
-static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
- } else {
- qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
- }
-}
-
-/*
- * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
- * based on the features to enable/disable hardware vlan accel
- */
-static int qlge_update_hw_vlan_features(struct net_device *ndev,
- netdev_features_t features)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- bool need_restart = netif_running(ndev);
- int status = 0;
-
- if (need_restart) {
- status = qlge_adapter_down(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring down the adapter\n");
- return status;
- }
- }
-
- /* update the features with resent change */
- ndev->features = features;
-
- if (need_restart) {
- status = qlge_adapter_up(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring up the adapter\n");
- return status;
- }
- }
-
- return status;
-}
-
-static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- netdev_features_t changed = ndev->features ^ features;
- int err;
-
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- qlge_vlan_mode(ndev, features);
- }
-
- return 0;
-}
-
-static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
-{
- u32 enable_bit = MAC_ADDR_E;
- int err;
-
- err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int status;
- int err;
-
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_add_vid(qdev, vid);
- set_bit(vid, qdev->active_vlans);
-
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
-{
- u32 enable_bit = 0;
- int err;
-
- err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to clear vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int status;
- int err;
-
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_kill_vid(qdev, vid);
- clear_bit(vid, qdev->active_vlans);
-
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static void qlge_restore_vlan(struct qlge_adapter *qdev)
-{
- int status;
- u16 vid;
-
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return;
-
- for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
- __qlge_vlan_rx_add_vid(qdev, vid);
-
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
-static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
-
- napi_schedule(&rx_ring->napi);
- return IRQ_HANDLED;
-}
-
-/* This handles a fatal error, MPI activity, and the default
- * rx_ring in an MSI-X multiple vector environment.
- * In MSI/Legacy environment it also process the rest of
- * the rx_rings.
- */
-static irqreturn_t qlge_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
- u32 var;
- int work_done = 0;
-
- /* Experience shows that when using INTx interrupts, interrupts must
- * be masked manually.
- * When using MSI mode, INTR_EN_EN must be explicitly disabled
- * (even though it is auto-masked), otherwise a later command to
- * enable it is not effective.
- */
- if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
- qlge_disable_completion_interrupt(qdev, 0);
-
- var = qlge_read32(qdev, STS);
-
- /*
- * Check for fatal error.
- */
- if (var & STS_FE) {
- qlge_disable_completion_interrupt(qdev, 0);
- qlge_queue_asic_error(qdev);
- netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
- var = qlge_read32(qdev, ERR_STS);
- netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
- return IRQ_HANDLED;
- }
-
- /*
- * Check MPI processor activity.
- */
- if ((var & STS_PI) &&
- (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
- /*
- * We've got an async event or mailbox completion.
- * Handle it and clear the source of the interrupt.
- */
- netif_err(qdev, intr, qdev->ndev,
- "Got MPI processor interrupt.\n");
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work_on(smp_processor_id(),
- qdev->workqueue, &qdev->mpi_work, 0);
- work_done++;
- }
-
- /*
- * Get the bit-mask that shows the active queues for this
- * pass. Compare it to the queues that this irq services
- * and call napi if there's a match.
- */
- var = qlge_read32(qdev, ISR1);
- if (var & intr_context->irq_mask) {
- netif_info(qdev, intr, qdev->ndev,
- "Waking handler for rx_ring[0].\n");
- napi_schedule(&rx_ring->napi);
- work_done++;
- } else {
- /* Experience shows that the device sometimes signals an
- * interrupt but no work is scheduled from this function.
- * Nevertheless, the interrupt is auto-masked. Therefore, we
- * systematically re-enable the interrupt if we didn't
- * schedule napi.
- */
- qlge_enable_completion_interrupt(qdev, 0);
- }
-
- return work_done ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
- if (skb_is_gso(skb)) {
- int err;
- __be16 l3_proto = vlan_get_protocol(skb);
-
- err = skb_cow_head(skb, 0);
- if (err < 0)
- return err;
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_tcp_all_headers(skb));
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb)
- << OB_MAC_TRANSPORT_HDR_SHIFT);
- mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(l3_proto == htons(ETH_P_IP))) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->check = 0;
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (l3_proto == htons(ETH_P_IPV6)) {
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
- return 1;
- }
- return 0;
-}
-
-static void qlge_hw_csum_setup(struct sk_buff *skb,
- struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
- int len;
- struct iphdr *iph = ip_hdr(skb);
- __sum16 *check;
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
-
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- len = (ntohs(iph->tot_len) - (iph->ihl << 2));
- if (likely(iph->protocol == IPPROTO_TCP)) {
- check = &(tcp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- (tcp_hdr(skb)->doff << 2));
- } else {
- check = &(udp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- sizeof(struct udphdr));
- }
- *check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, len, iph->protocol, 0);
-}
-
-static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
- struct tx_ring_desc *tx_ring_desc;
- int tso;
- struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32)skb->queue_mapping;
-
- tx_ring = &qdev->tx_ring[tx_ring_idx];
-
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_info(qdev, tx_queued, qdev->ndev,
- "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
- __func__, tx_ring_idx);
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
- tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
- mac_iocb_ptr = tx_ring_desc->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
- mac_iocb_ptr->tid = tx_ring_desc->index;
- /* We use the upper 32-bits to store the tx queue for this IO.
- * When we get the completion we can use it to establish the context.
- */
- mac_iocb_ptr->txq_idx = tx_ring_idx;
- tx_ring_desc->skb = skb;
-
- mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
-
- if (skb_vlan_tag_present(skb)) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
- mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
- mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
- }
- tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
- qlge_hw_csum_setup(skb,
- (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
- }
- if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
- NETDEV_TX_OK) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "Could not map the segments.\n");
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
-
- tx_ring->prod_idx++;
- if (tx_ring->prod_idx == tx_ring->wq_len)
- tx_ring->prod_idx = 0;
- wmb();
-
- qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
-
- atomic_dec(&tx_ring->tx_count);
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
- return NETDEV_TX_OK;
-}
-
-static void qlge_free_shadow_space(struct qlge_adapter *qdev)
-{
- if (qdev->rx_ring_shadow_reg_area) {
- dma_free_coherent(&qdev->pdev->dev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- qdev->rx_ring_shadow_reg_area = NULL;
- }
- if (qdev->tx_ring_shadow_reg_area) {
- dma_free_coherent(&qdev->pdev->dev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
- qdev->tx_ring_shadow_reg_area = NULL;
- }
-}
-
-static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
-{
- qdev->rx_ring_shadow_reg_area =
- dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
- if (!qdev->rx_ring_shadow_reg_area) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of RX shadow space failed.\n");
- return -ENOMEM;
- }
-
- qdev->tx_ring_shadow_reg_area =
- dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
- if (!qdev->tx_ring_shadow_reg_area) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of TX shadow space failed.\n");
- goto err_wqp_sh_area;
- }
- return 0;
-
-err_wqp_sh_area:
- dma_free_coherent(&qdev->pdev->dev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- return -ENOMEM;
-}
-
-static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct tx_ring_desc *tx_ring_desc;
- int i;
- struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
-
- mac_iocb_ptr = tx_ring->wq_base;
- tx_ring_desc = tx_ring->q;
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc->index = i;
- tx_ring_desc->skb = NULL;
- tx_ring_desc->queue_entry = mac_iocb_ptr;
- mac_iocb_ptr++;
- tx_ring_desc++;
- }
- atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
-}
-
-static void qlge_free_tx_resources(struct qlge_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- if (tx_ring->wq_base) {
- dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
- }
- kfree(tx_ring->q);
- tx_ring->q = NULL;
-}
-
-static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- tx_ring->wq_base =
- dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
- &tx_ring->wq_base_dma, GFP_ATOMIC);
-
- if (!tx_ring->wq_base ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
- goto pci_alloc_err;
-
- tx_ring->q =
- kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
- GFP_KERNEL);
- if (!tx_ring->q)
- goto err;
-
- return 0;
-err:
- dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
-pci_alloc_err:
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
-}
-
-static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct qlge_bq *lbq = &rx_ring->lbq;
- unsigned int last_offset;
-
- last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
- while (lbq->next_to_clean != lbq->next_to_use) {
- struct qlge_bq_desc *lbq_desc =
- &lbq->queue[lbq->next_to_clean];
-
- if (lbq_desc->p.pg_chunk.offset == last_offset)
- dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
- qlge_lbq_block_size(qdev),
- DMA_FROM_DEVICE);
- put_page(lbq_desc->p.pg_chunk.page);
-
- lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
- }
-
- if (rx_ring->master_chunk.page) {
- dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
- qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
- put_page(rx_ring->master_chunk.page);
- rx_ring->master_chunk.page = NULL;
- }
-}
-
-static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
-{
- int i;
-
- for (i = 0; i < QLGE_BQ_LEN; i++) {
- struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
-
- if (!sbq_desc) {
- netif_err(qdev, ifup, qdev->ndev,
- "sbq_desc %d is NULL.\n", i);
- return;
- }
- if (sbq_desc->p.skb) {
- dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- DMA_FROM_DEVICE);
- dev_kfree_skb(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- }
- }
-}
-
-/* Free all large and small rx buffers associated
- * with the completion queues for this device.
- */
-static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- struct rx_ring *rx_ring = &qdev->rx_ring[i];
-
- if (rx_ring->lbq.queue)
- qlge_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq.queue)
- qlge_free_sbq_buffers(qdev, rx_ring);
- }
-}
-
-static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
- HZ / 2);
-}
-
-static int qlge_init_bq(struct qlge_bq *bq)
-{
- struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
- struct qlge_adapter *qdev = rx_ring->qdev;
- struct qlge_bq_desc *bq_desc;
- __le64 *buf_ptr;
- int i;
-
- bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
- &bq->base_dma, GFP_ATOMIC);
- if (!bq->base)
- return -ENOMEM;
-
- bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
- GFP_KERNEL);
- if (!bq->queue)
- return -ENOMEM;
-
- buf_ptr = bq->base;
- bq_desc = &bq->queue[0];
- for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
- bq_desc->p.skb = NULL;
- bq_desc->index = i;
- bq_desc->buf_ptr = buf_ptr;
- }
-
- return 0;
-}
-
-static void qlge_free_rx_resources(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- /* Free the small buffer queue. */
- if (rx_ring->sbq.base) {
- dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
- rx_ring->sbq.base, rx_ring->sbq.base_dma);
- rx_ring->sbq.base = NULL;
- }
-
- /* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq.queue);
- rx_ring->sbq.queue = NULL;
-
- /* Free the large buffer queue. */
- if (rx_ring->lbq.base) {
- dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
- rx_ring->lbq.base, rx_ring->lbq.base_dma);
- rx_ring->lbq.base = NULL;
- }
-
- /* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq.queue);
- rx_ring->lbq.queue = NULL;
-
- /* Free the rx queue. */
- if (rx_ring->cq_base) {
- dma_free_coherent(&qdev->pdev->dev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
- rx_ring->cq_base = NULL;
- }
-}
-
-/* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure.
- */
-static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- /*
- * Allocate the completion queue for this rx_ring.
- */
- rx_ring->cq_base =
- dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
- &rx_ring->cq_base_dma, GFP_ATOMIC);
-
- if (!rx_ring->cq_base) {
- netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
- return -ENOMEM;
- }
-
- if (rx_ring->cq_id < qdev->rss_ring_count &&
- (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
- qlge_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
- int i, j;
-
- /*
- * Loop through all queues and free
- * any resources.
- */
- for (j = 0; j < qdev->tx_ring_count; j++) {
- tx_ring = &qdev->tx_ring[j];
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc = &tx_ring->q[i];
- if (tx_ring_desc && tx_ring_desc->skb) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
- qlge_unmap_send(qdev, tx_ring_desc,
- tx_ring_desc->map_cnt);
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
- }
- }
- }
-}
-
-static void qlge_free_mem_resources(struct qlge_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->tx_ring_count; i++)
- qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
- for (i = 0; i < qdev->rx_ring_count; i++)
- qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
- qlge_free_shadow_space(qdev);
-}
-
-static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
-{
- int i;
-
- /* Allocate space for our shadow registers and such. */
- if (qlge_alloc_shadow_space(qdev))
- return -ENOMEM;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "RX resource allocation failed.\n");
- goto err_mem;
- }
- }
- /* Allocate tx queue resources */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "TX resource allocation failed.\n");
- goto err_mem;
- }
- }
- return 0;
-
-err_mem:
- qlge_free_mem_resources(qdev);
- return -ENOMEM;
-}
-
-/* Set up the rx ring control block and pass it to the chip.
- * The control block is defined as
- * "Completion Queue Initialization Control Block", or cqicb.
- */
-static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct cqicb *cqicb = &rx_ring->cqicb;
- void *shadow_reg = qdev->rx_ring_shadow_reg_area +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
- int err = 0;
- u64 dma;
- __le64 *base_indirect_ptr;
- int page_entries;
-
- /* Set up the shadow registers for this ring. */
- rx_ring->prod_idx_sh_reg = shadow_reg;
- rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
- *rx_ring->prod_idx_sh_reg = 0;
- shadow_reg += sizeof(u64);
- shadow_reg_dma += sizeof(u64);
- rx_ring->lbq.base_indirect = shadow_reg;
- rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
- rx_ring->sbq.base_indirect = shadow_reg;
- rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
-
- /* PCI doorbell mem area + 0x00 for consumer index register */
- rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
-
- /* PCI doorbell mem area + 0x04 for valid register */
- rx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
-
- /* PCI doorbell mem area + 0x1c */
- rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
-
- memset((void *)cqicb, 0, sizeof(struct cqicb));
- cqicb->msix_vect = rx_ring->irq;
-
- cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
- LEN_CPP_CONT);
-
- cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
-
- cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
-
- /*
- * Set up the control block load flags.
- */
- cqicb->flags = FLAGS_LC | /* Load queue base address */
- FLAGS_LV | /* Load MSI-X vector */
- FLAGS_LI; /* Load irq delay values */
- if (rx_ring->cq_id < qdev->rss_ring_count) {
- cqicb->flags |= FLAGS_LL; /* Load lbq values */
- dma = (u64)rx_ring->lbq.base_dma;
- base_indirect_ptr = rx_ring->lbq.base_indirect;
-
- for (page_entries = 0;
- page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
- page_entries++) {
- base_indirect_ptr[page_entries] = cpu_to_le64(dma);
- dma += DB_PAGE_SIZE;
- }
- cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
- cqicb->lbq_buf_size =
- cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
- cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
- rx_ring->lbq.next_to_use = 0;
- rx_ring->lbq.next_to_clean = 0;
-
- cqicb->flags |= FLAGS_LS; /* Load sbq values */
- dma = (u64)rx_ring->sbq.base_dma;
- base_indirect_ptr = rx_ring->sbq.base_indirect;
-
- for (page_entries = 0;
- page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
- page_entries++) {
- base_indirect_ptr[page_entries] = cpu_to_le64(dma);
- dma += DB_PAGE_SIZE;
- }
- cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq.base_indirect_dma);
- cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
- cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
- rx_ring->sbq.next_to_use = 0;
- rx_ring->sbq.next_to_clean = 0;
- }
- if (rx_ring->cq_id < qdev->rss_ring_count) {
- /* Inbound completion handling rx_rings run in
- * separate NAPI contexts.
- */
- netif_napi_add(qdev->ndev, &rx_ring->napi,
- qlge_napi_poll_msix);
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- } else {
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- }
- err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
- return err;
- }
- return err;
-}
-
-static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct wqicb *wqicb = (struct wqicb *)tx_ring;
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
- void *shadow_reg = qdev->tx_ring_shadow_reg_area +
- (tx_ring->wq_id * sizeof(u64));
- u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
- (tx_ring->wq_id * sizeof(u64));
- int err = 0;
-
- /*
- * Assign doorbell registers for this tx_ring.
- */
- /* TX PCI doorbell mem area for tx producer index */
- tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
- tx_ring->prod_idx = 0;
- /* TX PCI doorbell mem area + 0x04 */
- tx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /*
- * Assign shadow registers for this tx_ring.
- */
- tx_ring->cnsmr_idx_sh_reg = shadow_reg;
- tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
-
- wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
- wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
- Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
- wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
- wqicb->rid = 0;
- wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
-
- wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
-
- qlge_init_tx_ring(qdev, tx_ring);
-
- err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16)tx_ring->wq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
- return err;
- }
- return err;
-}
-
-static void qlge_disable_msix(struct qlge_adapter *qdev)
-{
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- pci_disable_msix(qdev->pdev);
- clear_bit(QL_MSIX_ENABLED, &qdev->flags);
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
- pci_disable_msi(qdev->pdev);
- clear_bit(QL_MSI_ENABLED, &qdev->flags);
- }
-}
-
-/* We start by trying to get the number of vectors
- * stored in qdev->intr_count. If we don't get that
- * many then we reduce the count and try again.
- */
-static void qlge_enable_msix(struct qlge_adapter *qdev)
-{
- int i, err;
-
- /* Get the MSIX vectors. */
- if (qlge_irq_type == MSIX_IRQ) {
- /* Try to alloc space for the msix struct,
- * if it fails then go to MSI/legacy.
- */
- qdev->msi_x_entry = kcalloc(qdev->intr_count,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!qdev->msi_x_entry) {
- qlge_irq_type = MSI_IRQ;
- goto msi;
- }
-
- for (i = 0; i < qdev->intr_count; i++)
- qdev->msi_x_entry[i].entry = i;
-
- err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
- 1, qdev->intr_count);
- if (err < 0) {
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- netif_warn(qdev, ifup, qdev->ndev,
- "MSI-X Enable failed, trying MSI.\n");
- qlge_irq_type = MSI_IRQ;
- } else {
- qdev->intr_count = err;
- set_bit(QL_MSIX_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
- return;
- }
- }
-msi:
- qdev->intr_count = 1;
- if (qlge_irq_type == MSI_IRQ) {
- if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
- set_bit(QL_MSI_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "Running with MSI interrupts.\n");
- return;
- }
- }
- qlge_irq_type = LEG_IRQ;
- set_bit(QL_LEGACY_ENABLED, &qdev->flags);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Running with legacy interrupts.\n");
-}
-
-/* Each vector services 1 RSS ring and 1 or more
- * TX completion rings. This function loops through
- * the TX completion rings and assigns the vector that
- * will service it. An example would be if there are
- * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
- * This would mean that vector 0 would service RSS ring 0
- * and TX completion rings 0,1,2 and 3. Vector 1 would
- * service RSS ring 1 and TX completion rings 4,5,6 and 7.
- */
-static void qlge_set_tx_vect(struct qlge_adapter *qdev)
-{
- int i, j, vect;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Assign irq vectors to TX rx_rings.*/
- for (vect = 0, j = 0, i = qdev->rss_ring_count;
- i < qdev->rx_ring_count; i++) {
- if (j == tx_rings_per_vector) {
- vect++;
- j = 0;
- }
- qdev->rx_ring[i].irq = vect;
- j++;
- }
- } else {
- /* For single vector all rings have an irq
- * of zero.
- */
- for (i = 0; i < qdev->rx_ring_count; i++)
- qdev->rx_ring[i].irq = 0;
- }
-}
-
-/* Set the interrupt mask for this vector. Each vector
- * will service 1 RSS ring and 1 or more TX completion
- * rings. This function sets up a bit mask per vector
- * that indicates which rings it services.
- */
-static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
-{
- int j, vect = ctx->intr;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Add the RSS ring serviced by this vector
- * to the mask.
- */
- ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
- /* Add the TX ring(s) serviced by this vector
- * to the mask.
- */
- for (j = 0; j < tx_rings_per_vector; j++) {
- ctx->irq_mask |=
- (1 << qdev->rx_ring[qdev->rss_ring_count +
- (vect * tx_rings_per_vector) + j].cq_id);
- }
- } else {
- /* For single vector we just shift each queue's
- * ID into the mask.
- */
- for (j = 0; j < qdev->rx_ring_count; j++)
- ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
- }
-}
-
-/*
- * Here we build the intr_context structures based on
- * our rx_ring count and intr vector count.
- * The intr_context structure is used to hook each vector
- * to possibly different handlers.
- */
-static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
-{
- int i = 0;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Each rx_ring has it's
- * own intr_context since we have separate
- * vectors for each queue.
- */
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- qdev->rx_ring[i].irq = i;
- intr_context->intr = i;
- intr_context->qdev = qdev;
- /* Set up this vector's bit-mask that indicates
- * which queues it services.
- */
- qlge_set_irq_mask(qdev, intr_context);
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
- | i;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
- INTR_EN_IHD | i;
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
- i;
- if (i == 0) {
- /* The first vector/queue handles
- * broadcast/multicast, fatal errors,
- * and firmware events. This in addition
- * to normal inbound NAPI processing.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- } else {
- /*
- * Inbound queues handle unicast frames only.
- */
- intr_context->handler = qlge_msix_rx_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- }
- }
- } else {
- /*
- * All rx_rings use the same intr_context since
- * there is only one vector.
- */
- intr_context->intr = 0;
- intr_context->qdev = qdev;
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE;
- if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
- /* Experience shows that when using INTx interrupts,
- * the device does not always auto-mask INTR_EN_EN.
- * Moreover, masking INTR_EN_EN manually does not
- * immediately prevent interrupt generation.
- */
- intr_context->intr_en_mask |= INTR_EN_EI << 16 |
- INTR_EN_EI;
- intr_context->intr_dis_mask |= INTR_EN_EI << 16;
- }
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
- /*
- * Single interrupt means one handler for all rings.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
- /* Set up this vector's bit-mask that indicates
- * which queues it services. In this case there is
- * a single vector so it will service all RSS and
- * TX completion rings.
- */
- qlge_set_irq_mask(qdev, intr_context);
- }
- /* Tell the TX completion rings which MSIx vector
- * they will be using.
- */
- qlge_set_tx_vect(qdev);
-}
-
-static void qlge_free_irq(struct qlge_adapter *qdev)
-{
- int i;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- if (intr_context->hooked) {
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- free_irq(qdev->msi_x_entry[i].vector,
- &qdev->rx_ring[i]);
- } else {
- free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- }
- }
- }
- qlge_disable_msix(qdev);
-}
-
-static int qlge_request_irq(struct qlge_adapter *qdev)
-{
- int i;
- int status = 0;
- struct pci_dev *pdev = qdev->pdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- qlge_resolve_queues_to_irqs(qdev);
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- status = request_irq(qdev->msi_x_entry[i].vector,
- intr_context->handler,
- 0,
- intr_context->name,
- &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed request for MSIX interrupt %d.\n",
- i);
- goto err_irq;
- }
- } else {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "trying msi or legacy interrupts.\n");
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: irq = %d.\n", __func__, pdev->irq);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
- status =
- request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED, &qdev->flags)
- ? 0
- : IRQF_SHARED,
- intr_context->name, &qdev->rx_ring[0]);
- if (status)
- goto err_irq;
-
- netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr 0, queue type RX_Q, with name %s.\n",
- intr_context->name);
- }
- intr_context->hooked = 1;
- }
- return status;
-err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
- qlge_free_irq(qdev);
- return status;
-}
-
-static int qlge_start_rss(struct qlge_adapter *qdev)
-{
- static const u8 init_hash_seed[] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
- 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
- 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
- };
- struct ricb *ricb = &qdev->ricb;
- int status = 0;
- int i;
- u8 *hash_id = (u8 *)ricb->hash_cq_id;
-
- memset((void *)ricb, 0, sizeof(*ricb));
-
- ricb->base_cq = RSS_L4K;
- ricb->flags =
- (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
- ricb->mask = cpu_to_le16((u16)(0x3ff));
-
- /*
- * Fill out the Indirection Table.
- */
- for (i = 0; i < 1024; i++)
- hash_id[i] = (i & (qdev->rss_ring_count - 1));
-
- memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
- memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
-
- status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
- return status;
- }
- return status;
-}
-
-static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
-{
- int i, status = 0;
-
- status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
- /* Clear all the entries in the routing table. */
- for (i = 0; i < 16; i++) {
- status = qlge_set_routing_reg(qdev, i, 0, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
- break;
- }
- }
- qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Initialize the frame-to-queue routing. */
-static int qlge_route_initialize(struct qlge_adapter *qdev)
-{
- int status = 0;
-
- /* Clear all the entries in the routing table. */
- status = qlge_clear_routing_entries(qdev);
- if (status)
- return status;
-
- status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for IP CSUM error packets.\n");
- goto exit;
- }
- status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for TCP/UDP CSUM error packets.\n");
- goto exit;
- }
- status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for broadcast packets.\n");
- goto exit;
- }
- /* If we have more than one inbound queue, then turn on RSS in the
- * routing block.
- */
- if (qdev->rss_ring_count > 1) {
- status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for MATCH RSS packets.\n");
- goto exit;
- }
- }
-
- status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
- RT_IDX_CAM_HIT, 1);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
-exit:
- qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-int qlge_cam_route_initialize(struct qlge_adapter *qdev)
-{
- int status, set;
-
- /* If check if the link is up and use to
- * determine if we are setting or clearing
- * the MAC address in the CAM.
- */
- set = qlge_read32(qdev, STS);
- set &= qdev->port_link_up;
- status = qlge_set_mac_addr(qdev, set);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
- return status;
- }
-
- status = qlge_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
-
- return status;
-}
-
-static int qlge_adapter_initialize(struct qlge_adapter *qdev)
-{
- u32 value, mask;
- int i;
- int status = 0;
-
- /*
- * Set up the System register to halt on errors.
- */
- value = SYS_EFE | SYS_FAE;
- mask = value << 16;
- qlge_write32(qdev, SYS, mask | value);
-
- /* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ;
- mask = NIC_RCV_CFG_DFQ_MASK;
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- value |= NIC_RCV_CFG_RV;
- mask |= (NIC_RCV_CFG_RV << 16);
- }
- qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
-
- /* Set the MPI interrupt to enabled. */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
-
- /* Enable the function, set pagesize, enable error checking. */
- value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K;
- value |= SPLT_SETTING;
-
- /* Set/clear header splitting. */
- mask = FSC_VM_PAGESIZE_MASK |
- FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
- qlge_write32(qdev, FSC, mask | value);
-
- qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
-
- /* Set RX packet routing to use port/pci function on which the
- * packet arrived on in addition to usual frame routing.
- * This is helpful on bonding where both interfaces can have
- * the same MAC address.
- */
- qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
- /* Reroute all packets to our Interface.
- * They may have been routed to MPI firmware
- * due to WOL.
- */
- value = qlge_read32(qdev, MGMT_RCV_CFG);
- value &= ~MGMT_RCV_CFG_RM;
- mask = 0xffff0000;
-
- /* Sticky reg needs clearing due to WOL. */
- qlge_write32(qdev, MGMT_RCV_CFG, mask);
- qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
-
- /* Default WOL is enable on Mezz cards */
- if (qdev->pdev->subsystem_device == 0x0068 ||
- qdev->pdev->subsystem_device == 0x0180)
- qdev->wol = WAKE_MAGIC;
-
- /* Start up the rx queues. */
- for (i = 0; i < qdev->rx_ring_count; i++) {
- status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start rx ring[%d].\n", i);
- return status;
- }
- }
-
- /* If there is more than one inbound completion queue
- * then download a RICB to configure RSS.
- */
- if (qdev->rss_ring_count > 1) {
- status = qlge_start_rss(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
- return status;
- }
- }
-
- /* Start up the tx queues. */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start tx ring[%d].\n", i);
- return status;
- }
- }
-
- /* Initialize the port and set the max framesize. */
- status = qdev->nic_ops->port_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
-
- /* Set up the MAC address and frame routing filter. */
- status = qlge_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return status;
- }
-
- /* Start NAPI for the RSS queues. */
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_enable(&qdev->rx_ring[i].napi);
-
- return status;
-}
-
-/* Issue soft reset to chip. */
-static int qlge_adapter_reset(struct qlge_adapter *qdev)
-{
- u32 value;
- int status = 0;
- unsigned long end_jiffies;
-
- /* Clear all the entries in the routing table. */
- status = qlge_clear_routing_entries(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
- return status;
- }
-
- /* Check if bit is set then skip the mailbox command and
- * clear the bit, else we are in normal reset process.
- */
- if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
- /* Stop management traffic. */
- qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
-
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- qlge_wait_fifo_empty(qdev);
- } else {
- clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
- }
-
- qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
-
- end_jiffies = jiffies + usecs_to_jiffies(30);
- do {
- value = qlge_read32(qdev, RST_FO);
- if ((value & RST_FO_FR) == 0)
- break;
- cpu_relax();
- } while (time_before(jiffies, end_jiffies));
-
- if (value & RST_FO_FR) {
- netif_err(qdev, ifdown, qdev->ndev,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
- status = -ETIMEDOUT;
- }
-
- /* Resume management traffic. */
- qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
- return status;
-}
-
-static void qlge_display_dev_info(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- netif_info(qdev, probe, qdev->ndev,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- netif_info(qdev, probe, qdev->ndev,
- "MAC address %pM\n", ndev->dev_addr);
-}
-
-static int qlge_wol(struct qlge_adapter *qdev)
-{
- int status = 0;
- u32 wol = MB_WOL_DISABLE;
-
- /* The CAM is still intact after a reset, but if we
- * are doing WOL, then we may need to program the
- * routing regs. We would also need to issue the mailbox
- * commands to instruct the MPI what to do per the ethtool
- * settings.
- */
-
- if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
- WAKE_MCAST | WAKE_BCAST)) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
- qdev->wol);
- return -EINVAL;
- }
-
- if (qdev->wol & WAKE_MAGIC) {
- status = qlge_mb_wol_set_magic(qdev, 1);
- if (status) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
- return status;
- }
- netif_info(qdev, drv, qdev->ndev,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
-
- wol |= MB_WOL_MAGIC_PKT;
- }
-
- if (qdev->wol) {
- wol |= MB_WOL_MODE_ON;
- status = qlge_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev,
- "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Successfully set" : "Failed",
- wol, qdev->ndev->name);
- }
-
- return status;
-}
-
-static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
-{
- /* Don't kill the reset worker thread if we
- * are in the process of recovery.
- */
- if (test_bit(QL_ADAPTER_UP, &qdev->flags))
- cancel_delayed_work_sync(&qdev->asic_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
-}
-
-static int qlge_adapter_down(struct qlge_adapter *qdev)
-{
- int i, status = 0;
-
- qlge_link_off(qdev);
-
- qlge_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_disable(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
-
- qlge_disable_interrupts(qdev);
-
- qlge_tx_ring_clean(qdev);
-
- /* Call netif_napi_del() from common point. */
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- status = qlge_adapter_reset(qdev);
- if (status)
- netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
- qdev->func);
- qlge_free_rx_buffers(qdev);
-
- return status;
-}
-
-static int qlge_adapter_up(struct qlge_adapter *qdev)
-{
- int err = 0;
-
- err = qlge_adapter_initialize(qdev);
- if (err) {
- netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
- goto err_init;
- }
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- qlge_alloc_rx_buffers(qdev);
- /* If the port is initialized and the
- * link is up the turn on the carrier.
- */
- if ((qlge_read32(qdev, STS) & qdev->port_init) &&
- (qlge_read32(qdev, STS) & qdev->port_link_up))
- qlge_link_on(qdev);
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- /* Restore vlan setting. */
- qlge_restore_vlan(qdev);
-
- qlge_enable_interrupts(qdev);
- qlge_enable_all_completion_interrupts(qdev);
- netif_tx_start_all_queues(qdev->ndev);
-
- return 0;
-err_init:
- qlge_adapter_reset(qdev);
- return err;
-}
-
-static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
-{
- qlge_free_mem_resources(qdev);
- qlge_free_irq(qdev);
-}
-
-static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
-{
- if (qlge_alloc_mem_resources(qdev)) {
- netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
- return -ENOMEM;
- }
- return qlge_request_irq(qdev);
-}
-
-static int qlge_close(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int i;
-
- /* If we hit pci_channel_io_perm_failure
- * failure condition, then we already
- * brought the adapter down.
- */
- if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
- clear_bit(QL_EEH_FATAL, &qdev->flags);
- return 0;
- }
-
- /*
- * Wait for device to recover from a reset.
- * (Rarely happens, but possible.)
- */
- while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
- msleep(1);
-
- /* Make sure refill_work doesn't re-enable napi */
- for (i = 0; i < qdev->rss_ring_count; i++)
- cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
-
- qlge_adapter_down(qdev);
- qlge_release_adapter_resources(qdev);
- return 0;
-}
-
-static void qlge_set_lb_size(struct qlge_adapter *qdev)
-{
- if (qdev->ndev->mtu <= 1500)
- qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
- else
- qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
- qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
-}
-
-static int qlge_configure_rings(struct qlge_adapter *qdev)
-{
- int i;
- struct rx_ring *rx_ring;
- struct tx_ring *tx_ring;
- int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
-
- /* In a perfect world we have one RSS ring for each CPU
- * and each has it's own vector. To do that we ask for
- * cpu_cnt vectors. qlge_enable_msix() will adjust the
- * vector count to what we actually get. We then
- * allocate an RSS ring for each.
- * Essentially, we are doing min(cpu_count, msix_vector_count).
- */
- qdev->intr_count = cpu_cnt;
- qlge_enable_msix(qdev);
- /* Adjust the RSS ring count to the actual vector count. */
- qdev->rss_ring_count = qdev->intr_count;
- qdev->tx_ring_count = cpu_cnt;
- qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
-
- for (i = 0; i < qdev->tx_ring_count; i++) {
- tx_ring = &qdev->tx_ring[i];
- memset((void *)tx_ring, 0, sizeof(*tx_ring));
- tx_ring->qdev = qdev;
- tx_ring->wq_id = i;
- tx_ring->wq_len = qdev->tx_ring_size;
- tx_ring->wq_size =
- tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
-
- /*
- * The completion queue ID for the tx rings start
- * immediately after the rss rings.
- */
- tx_ring->cq_id = qdev->rss_ring_count + i;
- }
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- memset((void *)rx_ring, 0, sizeof(*rx_ring));
- rx_ring->qdev = qdev;
- rx_ring->cq_id = i;
- rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
- if (i < qdev->rss_ring_count) {
- /*
- * Inbound (RSS) queues.
- */
- rx_ring->cq_len = qdev->rx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
- rx_ring->lbq.type = QLGE_LB;
- rx_ring->sbq.type = QLGE_SB;
- INIT_DELAYED_WORK(&rx_ring->refill_work,
- &qlge_slow_refill);
- } else {
- /*
- * Outbound queue handles outbound completions only.
- */
- /* outbound cq is same size as tx_ring it services. */
- rx_ring->cq_len = qdev->tx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
- }
- }
- return 0;
-}
-
-static int qlge_open(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int err = 0;
-
- err = qlge_adapter_reset(qdev);
- if (err)
- return err;
-
- qlge_set_lb_size(qdev);
- err = qlge_configure_rings(qdev);
- if (err)
- return err;
-
- err = qlge_get_adapter_resources(qdev);
- if (err)
- goto error_up;
-
- err = qlge_adapter_up(qdev);
- if (err)
- goto error_up;
-
- return err;
-
-error_up:
- qlge_release_adapter_resources(qdev);
- return err;
-}
-
-static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
-{
- int status;
-
- /* Wait for an outstanding reset to complete. */
- if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 4;
-
- while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Waiting for adapter UP...\n");
- ssleep(1);
- }
-
- if (!i) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for adapter UP\n");
- return -ETIMEDOUT;
- }
- }
-
- status = qlge_adapter_down(qdev);
- if (status)
- goto error;
-
- qlge_set_lb_size(qdev);
-
- status = qlge_adapter_up(qdev);
- if (status)
- goto error;
-
- return status;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device.\n");
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- return status;
-}
-
-static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int status;
-
- if (ndev->mtu == 1500 && new_mtu == 9000)
- netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
- else if (ndev->mtu == 9000 && new_mtu == 1500)
- netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
- else
- return -EINVAL;
-
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 3 * HZ);
-
- ndev->mtu = new_mtu;
-
- if (!netif_running(qdev->ndev))
- return 0;
-
- status = qlge_change_rx_buffers(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Changing MTU failed.\n");
- }
-
- return status;
-}
-
-static struct net_device_stats *qlge_get_stats(struct net_device
- *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- struct rx_ring *rx_ring = &qdev->rx_ring[0];
- struct tx_ring *tx_ring = &qdev->tx_ring[0];
- unsigned long pkts, mcast, dropped, errors, bytes;
- int i;
-
- /* Get RX stats. */
- pkts = mcast = dropped = errors = bytes = 0;
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- pkts += rx_ring->rx_packets;
- bytes += rx_ring->rx_bytes;
- dropped += rx_ring->rx_dropped;
- errors += rx_ring->rx_errors;
- mcast += rx_ring->rx_multicast;
- }
- ndev->stats.rx_packets = pkts;
- ndev->stats.rx_bytes = bytes;
- ndev->stats.rx_dropped = dropped;
- ndev->stats.rx_errors = errors;
- ndev->stats.multicast = mcast;
-
- /* Get TX stats. */
- pkts = errors = bytes = 0;
- for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
- pkts += tx_ring->tx_packets;
- bytes += tx_ring->tx_bytes;
- errors += tx_ring->tx_errors;
- }
- ndev->stats.tx_packets = pkts;
- ndev->stats.tx_bytes = bytes;
- ndev->stats.tx_errors = errors;
- return &ndev->stats;
-}
-
-static void qlge_set_multicast_list(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- struct netdev_hw_addr *ha;
- int i, status;
-
- status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return;
- /*
- * Set or clear promiscuous mode if a
- * transition is taking place.
- */
- if (ndev->flags & IFF_PROMISC) {
- if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (qlge_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set promiscuous mode.\n");
- } else {
- set_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (qlge_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear promiscuous mode.\n");
- } else {
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- }
-
- /*
- * Set or clear all multicast mode if a
- * transition is taking place.
- */
- if ((ndev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
- if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (qlge_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set all-multi mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (qlge_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear all-multi mode.\n");
- } else {
- clear_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- }
-
- if (!netdev_mc_empty(ndev)) {
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- goto exit;
- i = 0;
- netdev_for_each_mc_addr(ha, ndev) {
- if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
- MAC_ADDR_TYPE_MULTI_MAC, i)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to loadmulticast address.\n");
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- goto exit;
- }
- i++;
- }
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (qlge_set_routing_reg
- (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set multicast match mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
-exit:
- qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-static int qlge_set_mac_address(struct net_device *ndev, void *p)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- struct sockaddr *addr = p;
- int status;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- eth_hw_addr_set(ndev, addr->sa_data);
- /* Update local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC,
- qdev->func * MAX_CQ);
- if (status)
- netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
- qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
-
- qlge_queue_asic_error(qdev);
-}
-
-static void qlge_asic_reset_work(struct work_struct *work)
-{
- struct qlge_adapter *qdev =
- container_of(work, struct qlge_adapter, asic_reset_work.work);
- int status;
-
- rtnl_lock();
- status = qlge_adapter_down(qdev);
- if (status)
- goto error;
-
- status = qlge_adapter_up(qdev);
- if (status)
- goto error;
-
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- rtnl_unlock();
- return;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device\n");
-
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- rtnl_unlock();
-}
-
-static const struct nic_operations qla8012_nic_ops = {
- .get_flash = qlge_get_8012_flash_params,
- .port_initialize = qlge_8012_port_initialize,
-};
-
-static const struct nic_operations qla8000_nic_ops = {
- .get_flash = qlge_get_8000_flash_params,
- .port_initialize = qlge_8000_port_initialize,
-};
-
-/* Find the pcie function number for the other NIC
- * on this chip. Since both NIC functions share a
- * common firmware we have the lowest enabled function
- * do any common work. Examples would be resetting
- * after a fatal firmware error, or doing a firmware
- * coredump.
- */
-static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
-{
- int status = 0;
- u32 temp;
- u32 nic_func1, nic_func2;
-
- status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
- if (status)
- return status;
-
- nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
- nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
-
- if (qdev->func == nic_func1)
- qdev->alt_func = nic_func2;
- else if (qdev->func == nic_func2)
- qdev->alt_func = nic_func1;
- else
- status = -EIO;
-
- return status;
-}
-
-static int qlge_get_board_info(struct qlge_adapter *qdev)
-{
- int status;
-
- qdev->func =
- (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
- if (qdev->func > 3)
- return -EIO;
-
- status = qlge_get_alt_pcie_func(qdev);
- if (status)
- return status;
-
- qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
- if (qdev->port) {
- qdev->xg_sem_mask = SEM_XGMAC1_MASK;
- qdev->port_link_up = STS_PL1;
- qdev->port_init = STS_PI1;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
- } else {
- qdev->xg_sem_mask = SEM_XGMAC0_MASK;
- qdev->port_link_up = STS_PL0;
- qdev->port_init = STS_PI0;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
- }
- qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
- qdev->device_id = qdev->pdev->device;
- if (qdev->device_id == QLGE_DEVICE_ID_8012)
- qdev->nic_ops = &qla8012_nic_ops;
- else if (qdev->device_id == QLGE_DEVICE_ID_8000)
- qdev->nic_ops = &qla8000_nic_ops;
- return status;
-}
-
-static void qlge_release_all(struct pci_dev *pdev)
-{
- struct qlge_adapter *qdev = pci_get_drvdata(pdev);
-
- if (qdev->workqueue) {
- destroy_workqueue(qdev->workqueue);
- qdev->workqueue = NULL;
- }
-
- if (qdev->reg_base)
- iounmap(qdev->reg_base);
- if (qdev->doorbell_area)
- iounmap(qdev->doorbell_area);
- vfree(qdev->mpi_coredump);
- pci_release_regions(pdev);
-}
-
-static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
- int cards_found)
-{
- struct net_device *ndev = qdev->ndev;
- int err = 0;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "PCI device enable failed.\n");
- return err;
- }
-
- qdev->pdev = pdev;
- pci_set_drvdata(pdev, qdev);
-
- /* Set PCIe read request size */
- err = pcie_set_readrq(pdev, 4096);
- if (err) {
- dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_disable_pci;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "PCI region request failed.\n");
- goto err_disable_pci;
- }
-
- pci_set_master(pdev);
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- set_bit(QL_DMA64, &qdev->flags);
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (!err)
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- }
-
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_release_pci;
- }
-
- /* Set PCIe reset type for EEH to fundamental. */
- pdev->needs_freset = 1;
- pci_save_state(pdev);
- qdev->reg_base =
- ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
- if (!qdev->reg_base) {
- dev_err(&pdev->dev, "Register mapping failed.\n");
- err = -ENOMEM;
- goto err_release_pci;
- }
-
- qdev->doorbell_area_size = pci_resource_len(pdev, 3);
- qdev->doorbell_area =
- ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
- if (!qdev->doorbell_area) {
- dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
- err = -ENOMEM;
- goto err_iounmap_base;
- }
-
- err = qlge_get_board_info(qdev);
- if (err) {
- dev_err(&pdev->dev, "Register access failed.\n");
- err = -EIO;
- goto err_iounmap_doorbell;
- }
- qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->stats_lock);
-
- if (qlge_mpi_coredump) {
- qdev->mpi_coredump =
- vmalloc(sizeof(struct qlge_mpi_coredump));
- if (!qdev->mpi_coredump) {
- err = -ENOMEM;
- goto err_iounmap_doorbell;
- }
- if (qlge_force_coredump)
- set_bit(QL_FRC_COREDUMP, &qdev->flags);
- }
- /* make sure the EEPROM is good */
- err = qdev->nic_ops->get_flash(qdev);
- if (err) {
- dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_free_mpi_coredump;
- }
-
- /* Keep local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- /* Set up the default ring sizes. */
- qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
- qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
-
- /* Set up the coalescing parameters. */
- qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
- qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
-
- /*
- * Set up the operating parameters.
- */
- qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
- ndev->name);
- if (!qdev->workqueue) {
- err = -ENOMEM;
- goto err_free_mpi_coredump;
- }
-
- INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
- INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
- INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
- init_completion(&qdev->ide_completion);
- mutex_init(&qdev->mpi_mutex);
-
- if (!cards_found) {
- dev_info(&pdev->dev, "%s\n", DRV_STRING);
- dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
- DRV_NAME, DRV_VERSION);
- }
- return 0;
-
-err_free_mpi_coredump:
- vfree(qdev->mpi_coredump);
-err_iounmap_doorbell:
- iounmap(qdev->doorbell_area);
-err_iounmap_base:
- iounmap(qdev->reg_base);
-err_release_pci:
- pci_release_regions(pdev);
-err_disable_pci:
- pci_disable_device(pdev);
-
- return err;
-}
-
-static const struct net_device_ops qlge_netdev_ops = {
- .ndo_open = qlge_open,
- .ndo_stop = qlge_close,
- .ndo_start_xmit = qlge_send,
- .ndo_change_mtu = qlge_change_mtu,
- .ndo_get_stats = qlge_get_stats,
- .ndo_set_rx_mode = qlge_set_multicast_list,
- .ndo_set_mac_address = qlge_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qlge_tx_timeout,
- .ndo_set_features = qlge_set_features,
- .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
-};
-
-static void qlge_timer(struct timer_list *t)
-{
- struct qlge_adapter *qdev = from_timer(qdev, t, timer);
- u32 var = 0;
-
- var = qlge_read32(qdev, STS);
- if (pci_channel_offline(qdev->pdev)) {
- netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
- return;
- }
-
- mod_timer(&qdev->timer, jiffies + (5 * HZ));
-}
-
-static const struct devlink_ops qlge_devlink_ops;
-
-static int qlge_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_entry)
-{
- struct qlge_netdev_priv *ndev_priv;
- struct qlge_adapter *qdev = NULL;
- struct net_device *ndev = NULL;
- struct devlink *devlink;
- static int cards_found;
- int err;
-
- devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
- &pdev->dev);
- if (!devlink)
- return -ENOMEM;
-
- qdev = devlink_priv(devlink);
-
- ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
- min(MAX_CPUS,
- netif_get_num_default_rss_queues()));
- if (!ndev) {
- err = -ENOMEM;
- goto devlink_free;
- }
-
- ndev_priv = netdev_priv(ndev);
- ndev_priv->qdev = qdev;
- ndev_priv->ndev = ndev;
- qdev->ndev = ndev;
- err = qlge_init_device(pdev, qdev, cards_found);
- if (err < 0)
- goto netdev_free;
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features;
- ndev->vlan_features = ndev->hw_features;
- /* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
-
- if (test_bit(QL_DMA64, &qdev->flags))
- ndev->features |= NETIF_F_HIGHDMA;
-
- /*
- * Set up net_device structure.
- */
- ndev->tx_queue_len = qdev->tx_ring_size;
- ndev->irq = pdev->irq;
-
- ndev->netdev_ops = &qlge_netdev_ops;
- ndev->ethtool_ops = &qlge_ethtool_ops;
- ndev->watchdog_timeo = 10 * HZ;
-
- /* MTU range: this driver only supports 1500 or 9000, so this only
- * filters out values above or below, and we'll rely on
- * qlge_change_mtu to make sure only 1500 or 9000 are allowed
- */
- ndev->min_mtu = ETH_DATA_LEN;
- ndev->max_mtu = 9000;
-
- err = register_netdev(ndev);
- if (err) {
- dev_err(&pdev->dev, "net device registration failed.\n");
- goto cleanup_pdev;
- }
-
- err = qlge_health_create_reporters(qdev);
- if (err)
- goto unregister_netdev;
-
- /* Start up the timer to trigger EEH if
- * the bus goes dead
- */
- timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
- mod_timer(&qdev->timer, jiffies + (5 * HZ));
- qlge_link_off(qdev);
- qlge_display_dev_info(ndev);
- atomic_set(&qdev->lb_count, 0);
- cards_found++;
- devlink_register(devlink);
- return 0;
-
-unregister_netdev:
- unregister_netdev(ndev);
-cleanup_pdev:
- qlge_release_all(pdev);
- pci_disable_device(pdev);
-netdev_free:
- free_netdev(ndev);
-devlink_free:
- devlink_free(devlink);
-
- return err;
-}
-
-netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
-{
- return qlge_send(skb, ndev);
-}
-
-int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- return qlge_clean_inbound_rx_ring(rx_ring, budget);
-}
-
-static void qlge_remove(struct pci_dev *pdev)
-{
- struct qlge_adapter *qdev = pci_get_drvdata(pdev);
- struct net_device *ndev = qdev->ndev;
- struct devlink *devlink = priv_to_devlink(qdev);
-
- devlink_unregister(devlink);
- del_timer_sync(&qdev->timer);
- qlge_cancel_all_work_sync(qdev);
- unregister_netdev(ndev);
- qlge_release_all(pdev);
- pci_disable_device(pdev);
- devlink_health_reporter_destroy(qdev->reporter);
- devlink_free(devlink);
- free_netdev(ndev);
-}
-
-/* Clean up resources without touching hardware. */
-static void qlge_eeh_close(struct net_device *ndev)
-{
- struct qlge_adapter *qdev = netdev_to_qdev(ndev);
- int i;
-
- if (netif_carrier_ok(ndev)) {
- netif_carrier_off(ndev);
- netif_stop_queue(ndev);
- }
-
- /* Disabling the timer */
- qlge_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- qlge_tx_ring_clean(qdev);
- qlge_free_rx_buffers(qdev);
- qlge_release_adapter_resources(qdev);
-}
-
-/*
- * This callback is called by the PCI subsystem whenever
- * a PCI bus error is detected.
- */
-static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct qlge_adapter *qdev = pci_get_drvdata(pdev);
- struct net_device *ndev = qdev->ndev;
-
- switch (state) {
- case pci_channel_io_normal:
- return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
- if (netif_running(ndev))
- qlge_eeh_close(ndev);
- pci_disable_device(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- dev_err(&pdev->dev,
- "%s: pci_channel_io_perm_failure.\n", __func__);
- del_timer_sync(&qdev->timer);
- qlge_eeh_close(ndev);
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/*
- * This callback is called after the PCI buss has been reset.
- * Basically, this tries to restart the card from scratch.
- * This is a shortened version of the device probe/discovery code,
- * it resembles the first-half of the () routine.
- */
-static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
-{
- struct qlge_adapter *qdev = pci_get_drvdata(pdev);
-
- pdev->error_state = pci_channel_io_normal;
-
- pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
-
- if (qlge_adapter_reset(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void qlge_io_resume(struct pci_dev *pdev)
-{
- struct qlge_adapter *qdev = pci_get_drvdata(pdev);
- struct net_device *ndev = qdev->ndev;
- int err = 0;
-
- if (netif_running(ndev)) {
- err = qlge_open(ndev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev,
- "Device initialization failed after reset.\n");
- return;
- }
- } else {
- netif_err(qdev, ifup, qdev->ndev,
- "Device was not running prior to EEH.\n");
- }
- mod_timer(&qdev->timer, jiffies + (5 * HZ));
- netif_device_attach(ndev);
-}
-
-static const struct pci_error_handlers qlge_err_handler = {
- .error_detected = qlge_io_error_detected,
- .slot_reset = qlge_io_slot_reset,
- .resume = qlge_io_resume,
-};
-
-static int __maybe_unused qlge_suspend(struct device *dev_d)
-{
- struct pci_dev *pdev = to_pci_dev(dev_d);
- struct qlge_adapter *qdev;
- struct net_device *ndev;
- int err;
-
- qdev = pci_get_drvdata(pdev);
- ndev = qdev->ndev;
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
-
- if (netif_running(ndev)) {
- err = qlge_adapter_down(qdev);
- if (!err)
- return err;
- }
-
- qlge_wol(qdev);
-
- return 0;
-}
-
-static int __maybe_unused qlge_resume(struct device *dev_d)
-{
- struct pci_dev *pdev = to_pci_dev(dev_d);
- struct qlge_adapter *qdev;
- struct net_device *ndev;
- int err;
-
- qdev = pci_get_drvdata(pdev);
- ndev = qdev->ndev;
-
- pci_set_master(pdev);
-
- device_wakeup_disable(dev_d);
-
- if (netif_running(ndev)) {
- err = qlge_adapter_up(qdev);
- if (err)
- return err;
- }
-
- mod_timer(&qdev->timer, jiffies + (5 * HZ));
- netif_device_attach(ndev);
-
- return 0;
-}
-
-static void qlge_shutdown(struct pci_dev *pdev)
-{
- qlge_suspend(&pdev->dev);
-}
-
-static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
-
-static struct pci_driver qlge_driver = {
- .name = DRV_NAME,
- .id_table = qlge_pci_tbl,
- .probe = qlge_probe,
- .remove = qlge_remove,
- .driver.pm = &qlge_pm_ops,
- .shutdown = qlge_shutdown,
- .err_handler = &qlge_err_handler
-};
-
-module_pci_driver(qlge_driver);
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
deleted file mode 100644
index 96a4de6d2b34..000000000000
--- a/drivers/staging/qlge/qlge_mpi.c
+++ /dev/null
@@ -1,1273 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "qlge.h"
-
-int qlge_unpause_mpi_risc(struct qlge_adapter *qdev)
-{
- u32 tmp;
-
- /* Un-pause the RISC */
- tmp = qlge_read32(qdev, CSR);
- if (!(tmp & CSR_RP))
- return -EIO;
-
- qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
- return 0;
-}
-
-int qlge_pause_mpi_risc(struct qlge_adapter *qdev)
-{
- u32 tmp;
- int count;
-
- /* Pause the RISC */
- qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
- for (count = UDELAY_COUNT; count; count--) {
- tmp = qlge_read32(qdev, CSR);
- if (tmp & CSR_RP)
- break;
- mdelay(UDELAY_DELAY);
- }
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev)
-{
- u32 tmp;
- int count;
-
- /* Reset the RISC */
- qlge_write32(qdev, CSR, CSR_CMD_SET_RST);
- for (count = UDELAY_COUNT; count; count--) {
- tmp = qlge_read32(qdev, CSR);
- if (tmp & CSR_RR) {
- qlge_write32(qdev, CSR, CSR_CMD_CLR_RST);
- break;
- }
- mdelay(UDELAY_DELAY);
- }
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- qlge_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* get the data */
- *data = qlge_read32(qdev, PROC_DATA);
-exit:
- return status;
-}
-
-int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* write the data to the data reg */
- qlge_write32(qdev, PROC_DATA, data);
- /* trigger the write */
- qlge_write32(qdev, PROC_ADDR, reg);
- /* wait for reg to come ready */
- status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
-exit:
- return status;
-}
-
-int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev)
-{
- return qlge_write_mpi_reg(qdev, 0x00001010, 1);
-}
-
-/* Determine if we are in charge of the firmware. If
- * we are the lower of the 2 NIC pcie functions, or if
- * we are the higher function and the lower function
- * is not enabled.
- */
-int qlge_own_firmware(struct qlge_adapter *qdev)
-{
- u32 temp;
-
- /* If we are the lower of the 2 NIC functions
- * on the chip the we are responsible for
- * core dump and firmware reset after an error.
- */
- if (qdev->func < qdev->alt_func)
- return 1;
-
- /* If we are the higher of the 2 NIC functions
- * on the chip and the lower function is not
- * enabled, then we are responsible for
- * core dump and firmware reset after an error.
- */
- temp = qlge_read32(qdev, STS);
- if (!(temp & (1 << (8 + qdev->alt_func))))
- return 1;
-
- return 0;
-}
-
-static int qlge_get_mb_sts(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return -EBUSY;
- for (i = 0; i < mbcp->out_count; i++) {
- status =
- qlge_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
- break;
- }
- }
- qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-}
-
-/* Wait for a single mailbox command to complete.
- * Returns zero on success.
- */
-static int qlge_wait_mbx_cmd_cmplt(struct qlge_adapter *qdev)
-{
- int count;
- u32 value;
-
- for (count = 100; count; count--) {
- value = qlge_read32(qdev, STS);
- if (value & STS_PI)
- return 0;
- mdelay(UDELAY_DELAY); /* 100ms */
- }
- return -ETIMEDOUT;
-}
-
-/* Execute a single mailbox command.
- * Caller must hold PROC_ADDR semaphore.
- */
-static int qlge_exec_mb_cmd(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- /*
- * Make sure there's nothing pending.
- * This shouldn't happen.
- */
- if (qlge_read32(qdev, CSR) & CSR_HRI)
- return -EIO;
-
- status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return status;
-
- /*
- * Fill the outbound mailboxes.
- */
- for (i = 0; i < mbcp->in_count; i++) {
- status = qlge_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
- if (status)
- goto end;
- }
- /*
- * Wake up the MPI firmware.
- */
- qlge_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
-end:
- qlge_sem_unlock(qdev, SEM_PROC_REG_MASK);
- return status;
-}
-
-/* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode. We wake up a worker
- * to handler processing this since a mailbox command
- * will need to be sent to ACK the request.
- */
-static int qlge_idc_req_aen(struct qlge_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
-
- netif_err(qdev, drv, qdev->ndev, "Enter!\n");
- /* Get the status data and start up a thread to
- * handle the request.
- */
- mbcp->out_count = 4;
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- qlge_queue_asic_error(qdev);
- } else {
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker.
- */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
- }
- return status;
-}
-
-/* Process an inter-device event completion.
- * If good, signal the caller's completion.
- */
-static int qlge_idc_cmplt_aen(struct qlge_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
-
- mbcp->out_count = 4;
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting RISC!\n");
- qlge_queue_fw_error(qdev);
- } else {
- /* Wake up the sleeping mpi_idc_work thread that is
- * waiting for this event.
- */
- complete(&qdev->ide_completion);
- }
- return status;
-}
-
-static void qlge_link_up(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 2;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "%s: Could not get mailbox status.\n", __func__);
- return;
- }
-
- qdev->link_status = mbcp->mbox_out[1];
- netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
-
- /* If we're coming back from an IDC event
- * then set up the CAM and frame routing.
- */
- if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
- status = qlge_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return;
- }
- clear_bit(QL_CAM_RT_SET, &qdev->flags);
- }
-
- /* Queue up a worker to check the frame
- * size information, and fix it if it's not
- * to our liking.
- */
- if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
- set_bit(QL_PORT_CFG, &qdev->flags);
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker dpc.
- */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
- }
-
- qlge_link_on(qdev);
-}
-
-static void qlge_link_down(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 3;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
-
- qlge_link_off(qdev);
-}
-
-static int qlge_sfp_in(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 5;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
-
- return status;
-}
-
-static int qlge_sfp_out(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 1;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
-
- return status;
-}
-
-static int qlge_aen_lost(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 6;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
- } else {
- int i;
-
- netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
- for (i = 0; i < mbcp->out_count; i++)
- netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
- }
-
- return status;
-}
-
-static void qlge_init_fw_done(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 2;
-
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
- } else {
- netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
- qdev->fw_rev_id = mbcp->mbox_out[1];
- status = qlge_cam_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- }
-}
-
-/* Process an async event and clear it unless it's an
- * error condition.
- * This can get called iteratively from the mpi_work thread
- * when events arrive via an interrupt.
- * It also gets called when a mailbox command is polling for
- * it's completion.
- */
-static int qlge_mpi_handler(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- int orig_count = mbcp->out_count;
-
- /* Just get mailbox zero for now. */
- mbcp->out_count = 1;
- status = qlge_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- qlge_queue_asic_error(qdev);
- goto end;
- }
-
- switch (mbcp->mbox_out[0]) {
- /* This case is only active when we arrive here
- * as a result of issuing a mailbox command to
- * the firmware.
- */
- case MB_CMD_STS_INTRMDT:
- case MB_CMD_STS_GOOD:
- case MB_CMD_STS_INVLD_CMD:
- case MB_CMD_STS_XFC_ERR:
- case MB_CMD_STS_CSUM_ERR:
- case MB_CMD_STS_ERR:
- case MB_CMD_STS_PARAM_ERR:
- /* We can only get mailbox status if we're polling from an
- * unfinished command. Get the rest of the status data and
- * return back to the caller.
- * We only end up here when we're polling for a mailbox
- * command completion.
- */
- mbcp->out_count = orig_count;
- status = qlge_get_mb_sts(qdev, mbcp);
- return status;
-
- /* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode.
- */
- case AEN_IDC_REQ:
- status = qlge_idc_req_aen(qdev);
- break;
-
- /* Process and inbound IDC event.
- * This will happen when we're trying to
- * change tx/rx max frame size, change pause
- * parameters or loopback mode.
- */
- case AEN_IDC_CMPLT:
- case AEN_IDC_EXT:
- status = qlge_idc_cmplt_aen(qdev);
- break;
-
- case AEN_LINK_UP:
- qlge_link_up(qdev, mbcp);
- break;
-
- case AEN_LINK_DOWN:
- qlge_link_down(qdev, mbcp);
- break;
-
- case AEN_FW_INIT_DONE:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = qlge_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
- return status;
- }
- qlge_init_fw_done(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_IN:
- qlge_sfp_in(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_OUT:
- qlge_sfp_out(qdev, mbcp);
- break;
-
- /* This event can arrive at boot time or after an
- * MPI reset if the firmware failed to initialize.
- */
- case AEN_FW_INIT_FAIL:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = qlge_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_ERR;
- return status;
- }
- netif_err(qdev, drv, qdev->ndev,
- "Firmware initialization failed.\n");
- status = -EIO;
- qlge_queue_fw_error(qdev);
- break;
-
- case AEN_SYS_ERR:
- netif_err(qdev, drv, qdev->ndev, "System Error.\n");
- qlge_queue_fw_error(qdev);
- status = -EIO;
- break;
-
- case AEN_AEN_LOST:
- qlge_aen_lost(qdev, mbcp);
- break;
-
- case AEN_DCBX_CHG:
- /* Need to support AEN 8110 */
- break;
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
- /* Clear the MPI firmware status. */
- }
-end:
- qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
- /* Restore the original mailbox count to
- * what the caller asked for. This can get
- * changed when a mailbox command is waiting
- * for a response and an AEN arrives and
- * is handled.
- */
- mbcp->out_count = orig_count;
- return status;
-}
-
-/* Execute a single mailbox command.
- * mbcp is a pointer to an array of u32. Each
- * element in the array contains the value for it's
- * respective mailbox register.
- */
-static int qlge_mailbox_command(struct qlge_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- unsigned long count;
-
- mutex_lock(&qdev->mpi_mutex);
-
- /* Begin polled mode for MPI */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- /* Load the mailbox registers and wake up MPI RISC. */
- status = qlge_exec_mb_cmd(qdev, mbcp);
- if (status)
- goto end;
-
- /* If we're generating a system error, then there's nothing
- * to wait for.
- */
- if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
- goto end;
-
- /* Wait for the command to complete. We loop
- * here because some AEN might arrive while
- * we're waiting for the mailbox command to
- * complete. If more than 5 seconds expire we can
- * assume something is wrong.
- */
- count = jiffies + HZ * MAILBOX_TIMEOUT;
- do {
- /* Wait for the interrupt to come in. */
- status = qlge_wait_mbx_cmd_cmplt(qdev);
- if (status)
- continue;
-
- /* Process the event. If it's an AEN, it
- * will be handled in-line or a worker
- * will be spawned. If it's our completion
- * we will catch it below.
- */
- status = qlge_mpi_handler(qdev, mbcp);
- if (status)
- goto end;
-
- /* It's either the completion for our mailbox
- * command complete or an AEN. If it's our
- * completion then get out.
- */
- if (((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_GOOD) ||
- ((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_INTRMDT))
- goto done;
- } while (time_before(jiffies, count));
-
- netif_err(qdev, drv, qdev->ndev,
- "Timed out waiting for mailbox complete.\n");
- status = -ETIMEDOUT;
- goto end;
-
-done:
-
- /* Now we can clear the interrupt condition
- * and look at our status.
- */
- qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
-
- if (((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_GOOD) &&
- ((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_INTRMDT)) {
- status = -EIO;
- }
-end:
- /* End polled mode for MPI */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
- return status;
-}
-
-/* Get MPI firmware version. This will be used for
- * driver banner and for ethtool info.
- * Returns zero on success.
- */
-int qlge_mb_about_fw(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed about firmware command\n");
- status = -EIO;
- }
-
- /* Store the firmware version */
- qdev->fw_rev_id = mbcp->mbox_out[1];
-
- return status;
-}
-
-/* Get functional state for MPI firmware.
- * Returns zero on success.
- */
-int qlge_mb_get_fw_state(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Firmware State.\n");
- status = -EIO;
- }
-
- /* If bit zero is set in mbx 1 then the firmware is
- * running, but not initialized. This should never
- * happen.
- */
- if (mbcp->mbox_out[1] & 1) {
- netif_err(qdev, drv, qdev->ndev,
- "Firmware waiting for initialization.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-/* Send and ACK mailbox command to the firmware to
- * let it continue with the change.
- */
-static int qlge_mb_idc_ack(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 5;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
- mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
- mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
- mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
- mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int qlge_mb_set_port_cfg(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 3;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
- mbcp->mbox_in[1] = qdev->link_config;
- mbcp->mbox_in[2] = qdev->max_frame_size;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- netif_err(qdev, drv, qdev->ndev,
- "Port Config sent, wait for IDC.\n");
- } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Set Port Configuration.\n");
- status = -EIO;
- }
- return status;
-}
-
-static int qlge_mb_dump_ram(struct qlge_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
-{
- int status = 0;
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 9;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
- mbcp->mbox_in[1] = LSW(addr);
- mbcp->mbox_in[2] = MSW(req_dma);
- mbcp->mbox_in[3] = LSW(req_dma);
- mbcp->mbox_in[4] = MSW(size);
- mbcp->mbox_in[5] = LSW(size);
- mbcp->mbox_in[6] = MSW(MSD(req_dma));
- mbcp->mbox_in[7] = LSW(MSD(req_dma));
- mbcp->mbox_in[8] = MSW(addr);
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Issue a mailbox command to dump RISC RAM. */
-int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
-{
- int status;
- char *my_buf;
- dma_addr_t buf_dma;
-
- my_buf = dma_alloc_coherent(&qdev->pdev->dev,
- word_count * sizeof(u32), &buf_dma,
- GFP_ATOMIC);
- if (!my_buf)
- return -EIO;
-
- status = qlge_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
- if (!status)
- memcpy(buf, my_buf, word_count * sizeof(u32));
-
- dma_free_coherent(&qdev->pdev->dev, word_count * sizeof(u32), my_buf,
- buf_dma);
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int qlge_mb_get_port_cfg(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Port Configuration.\n");
- status = -EIO;
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Passed Get Port Configuration.\n");
- qdev->link_config = mbcp->mbox_out[1];
- qdev->max_frame_size = mbcp->mbox_out[2];
- }
- return status;
-}
-
-int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
- mbcp->mbox_in[1] = wol;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
- const u8 *addr = qdev->ndev->dev_addr;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 8;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
- if (enable_wol) {
- mbcp->mbox_in[1] = (u32)addr[0];
- mbcp->mbox_in[2] = (u32)addr[1];
- mbcp->mbox_in[3] = (u32)addr[2];
- mbcp->mbox_in[4] = (u32)addr[3];
- mbcp->mbox_in[5] = (u32)addr[4];
- mbcp->mbox_in[6] = (u32)addr[5];
- mbcp->mbox_in[7] = 0;
- } else {
- mbcp->mbox_in[1] = 0;
- mbcp->mbox_in[2] = 1;
- mbcp->mbox_in[3] = 1;
- mbcp->mbox_in[4] = 1;
- mbcp->mbox_in[5] = 1;
- mbcp->mbox_in[6] = 1;
- mbcp->mbox_in[7] = 0;
- }
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* IDC - Inter Device Communication...
- * Some firmware commands require consent of adjacent FCOE
- * function. This function waits for the OK, or a
- * counter-request for a little more time.i
- * The firmware will complete the request if the other
- * function doesn't respond.
- */
-static int qlge_idc_wait(struct qlge_adapter *qdev)
-{
- int status = -ETIMEDOUT;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- long wait_time;
-
- for (wait_time = 1 * HZ; wait_time;) {
- /* Wait here for the command to complete
- * via the IDC process.
- */
- wait_time =
- wait_for_completion_timeout(&qdev->ide_completion,
- wait_time);
- if (!wait_time) {
- netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
- break;
- }
- /* Now examine the response from the IDC process.
- * We might have a good completion or a request for
- * more wait time.
- */
- if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- netif_err(qdev, drv, qdev->ndev,
- "IDC Time Extension from function.\n");
- wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
- } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
- status = 0;
- break;
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
- status = -EIO;
- break;
- }
- }
-
- return status;
-}
-
-int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
- mbcp->mbox_in[1] = led_config;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to set LED Configuration.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-int qlge_mb_get_led_cfg(struct qlge_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get LED Configuration.\n");
- status = -EIO;
- } else {
- qdev->led_config = mbcp->mbox_out[1];
- }
- return status;
-}
-
-int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
- mbcp->mbox_in[1] = control;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- /* This indicates that the firmware is
- * already in the state we are trying to
- * change it to.
- */
- netif_err(qdev, drv, qdev->ndev,
- "Command parameters make no change.\n");
- }
- return status;
-}
-
-/* Returns a negative error code or the mailbox command status. */
-static int qlge_mb_get_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 *control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
- *control = 0;
-
- mbcp->in_count = 1;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
-
- status = qlge_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
- *control = mbcp->mbox_in[1];
- return status;
- }
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get MPI traffic control.\n");
- status = -EIO;
- }
- return status;
-}
-
-int qlge_wait_fifo_empty(struct qlge_adapter *qdev)
-{
- int count;
- u32 mgmnt_fifo_empty;
- u32 nic_fifo_empty;
-
- for (count = 6; count; count--) {
- nic_fifo_empty = qlge_read32(qdev, STS) & STS_NFE;
- qlge_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
- mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
- if (nic_fifo_empty && mgmnt_fifo_empty)
- return 0;
- msleep(100);
- }
- return -ETIMEDOUT;
-}
-
-/* API called in work thread context to set new TX/RX
- * maximum frame size values to match MTU.
- */
-static int qlge_set_port_cfg(struct qlge_adapter *qdev)
-{
- int status;
-
- status = qlge_mb_set_port_cfg(qdev);
- if (status)
- return status;
- status = qlge_idc_wait(qdev);
- return status;
-}
-
-/* The following routines are worker threads that process
- * events that may sleep waiting for completion.
- */
-
-/* This thread gets the maximum TX and RX frame size values
- * from the firmware and, if necessary, changes them to match
- * the MTU setting.
- */
-void qlge_mpi_port_cfg_work(struct work_struct *work)
-{
- struct qlge_adapter *qdev =
- container_of(work, struct qlge_adapter, mpi_port_cfg_work.work);
- int status;
-
- status = qlge_mb_get_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to get port config data.\n");
- goto err;
- }
-
- if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
- qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE)
- goto end;
-
- qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
- qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
- status = qlge_set_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to set port config data.\n");
- goto err;
- }
-end:
- clear_bit(QL_PORT_CFG, &qdev->flags);
- return;
-err:
- qlge_queue_fw_error(qdev);
- goto end;
-}
-
-/* Process an inter-device request. This is issues by
- * the firmware in response to another function requesting
- * a change to the port. We set a flag to indicate a change
- * has been made and then send a mailbox command ACKing
- * the change request.
- */
-void qlge_mpi_idc_work(struct work_struct *work)
-{
- struct qlge_adapter *qdev =
- container_of(work, struct qlge_adapter, mpi_idc_work.work);
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- u32 aen;
- int timeout;
-
- aen = mbcp->mbox_out[1] >> 16;
- timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
-
- switch (aen) {
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Unhandled IDC action.\n");
- break;
- case MB_CMD_PORT_RESET:
- case MB_CMD_STOP_FW:
- qlge_link_off(qdev);
- fallthrough;
- case MB_CMD_SET_PORT_CFG:
- /* Signal the resulting link up AEN
- * that the frame routing and mac addr
- * needs to be set.
- */
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Do ACK if required */
- if (timeout) {
- status = qlge_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
-
- /* These sub-commands issued by another (FCoE)
- * function are requesting to do an operation
- * on the shared resource (MPI environment).
- * We currently don't issue these so we just
- * ACK the request.
- */
- case MB_CMD_IOP_RESTART_MPI:
- case MB_CMD_IOP_PREP_LINK_DOWN:
- /* Drop the link, reload the routing
- * table when link comes up.
- */
- qlge_link_off(qdev);
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- fallthrough;
- case MB_CMD_IOP_DVR_START:
- case MB_CMD_IOP_FLASH_ACC:
- case MB_CMD_IOP_CORE_DUMP_MPI:
- case MB_CMD_IOP_PREP_UPDATE_MPI:
- case MB_CMD_IOP_COMP_UPDATE_MPI:
- case MB_CMD_IOP_NONE: /* an IDC without params */
- /* Do ACK if required */
- if (timeout) {
- status = qlge_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
- }
-}
-
-void qlge_mpi_work(struct work_struct *work)
-{
- struct qlge_adapter *qdev =
- container_of(work, struct qlge_adapter, mpi_work.work);
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int err = 0;
-
- mutex_lock(&qdev->mpi_mutex);
- /* Begin polled mode for MPI */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- while (qlge_read32(qdev, STS) & STS_PI) {
- memset(mbcp, 0, sizeof(struct mbox_params));
- mbcp->out_count = 1;
- /* Don't continue if an async event
- * did not complete properly.
- */
- err = qlge_mpi_handler(qdev, mbcp);
- if (err)
- break;
- }
-
- /* End polled mode for MPI */
- qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
-}
-
-void qlge_mpi_reset_work(struct work_struct *work)
-{
- struct qlge_adapter *qdev =
- container_of(work, struct qlge_adapter, mpi_reset_work.work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- /* If we're not the dominant NIC function,
- * then there is nothing to do.
- */
- if (!qlge_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- qlge_soft_reset_mpi_risc(qdev);
-}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 82c11caeee7a..d0b7332645be 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -73,19 +73,16 @@ void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee)
for (i = 12; i <= 14; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
ieee->bss_start_channel = 10;
- ieee->ibss_maxjoin_chal = 11;
break;
case COUNTRY_CODE_WORLD_WIDE_13:
for (i = 12; i <= 13; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
ieee->bss_start_channel = 10;
- ieee->ibss_maxjoin_chal = 11;
break;
default:
ieee->bss_start_channel = 1;
- ieee->ibss_maxjoin_chal = 14;
break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index c229fd244a48..8c85f1c866d3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -11,8 +11,6 @@
#define MAX_SILENT_RESET_RX_SLOT_NUM 10
-#define RX_MPDU_QUEUE 0
-
enum rtl819x_loopback {
RTL819X_NO_LOOPBACK = 0,
RTL819X_MAC_LOOPBACK = 1,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 58e90b7772ef..e93394c51264 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -18,30 +18,6 @@
static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI,
EDCAPARA_VO};
-void rtl92e_start_beacon(struct net_device *dev)
-{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- struct rtllib_network *net = &priv->rtllib->current_network;
- u16 BcnTimeCfg = 0;
- u16 BcnCW = 6;
- u16 BcnIFS = 0xf;
-
- rtl92e_irq_disable(dev);
-
- rtl92e_writew(dev, ATIMWND, 2);
-
- rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
- rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
- rtl92e_writew(dev, BCN_DMATIME, 256);
-
- rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
-
- BcnTimeCfg |= BcnCW << BCN_TCFG_CW_SHIFT;
- BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
- rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
- rtl92e_irq_enable(dev);
-}
-
static void _rtl92e_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -55,10 +31,6 @@ static void _rtl92e_update_msr(struct net_device *dev)
if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_MANAGED;
break;
- case IW_MODE_ADHOC:
- if (priv->rtllib->link_state == MAC80211_LINKED)
- msr |= MSR_LINK_ADHOC;
- break;
default:
break;
}
@@ -513,10 +485,10 @@ bool rtl92e_start_adapter(struct net_device *dev)
start:
rtl92e_reset_desc_ring(dev);
priv->rf_mode = RF_OP_By_SW_3wire;
- if (priv->rst_progress == RESET_TYPE_NORESET) {
- rtl92e_writeb(dev, ANAPAR, 0x37);
- mdelay(500);
- }
+
+ rtl92e_writeb(dev, ANAPAR, 0x37);
+ mdelay(500);
+
priv->fw_info->status = FW_STATUS_0_INIT;
ulRegRead = rtl92e_readl(dev, CPU_GEN);
@@ -546,21 +518,20 @@ start:
}
priv->loopback_mode = RTL819X_NO_LOOPBACK;
- if (priv->rst_progress == RESET_TYPE_NORESET) {
- ulRegRead = rtl92e_readl(dev, CPU_GEN);
- if (priv->loopback_mode == RTL819X_NO_LOOPBACK)
- ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
- CPU_GEN_NO_LOOPBACK_SET;
- else if (priv->loopback_mode == RTL819X_MAC_LOOPBACK)
- ulRegRead |= CPU_CCK_LOOPBACK;
- else
- netdev_err(dev, "%s: Invalid loopback mode setting.\n",
- __func__);
+ ulRegRead = rtl92e_readl(dev, CPU_GEN);
+ if (priv->loopback_mode == RTL819X_NO_LOOPBACK)
+ ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
+ CPU_GEN_NO_LOOPBACK_SET;
+ else if (priv->loopback_mode == RTL819X_MAC_LOOPBACK)
+ ulRegRead |= CPU_CCK_LOOPBACK;
+ else
+ netdev_err(dev, "%s: Invalid loopback mode setting.\n",
+ __func__);
- rtl92e_writel(dev, CPU_GEN, ulRegRead);
+ rtl92e_writel(dev, CPU_GEN, ulRegRead);
+
+ udelay(500);
- udelay(500);
- }
_rtl92e_hwconfig(dev);
rtl92e_writeb(dev, CMDR, CR_RE | CR_TE);
@@ -595,8 +566,7 @@ start:
rtl92e_writeb(dev, ACK_TIMEOUT, 0x30);
- if (priv->rst_progress == RESET_TYPE_NORESET)
- rtl92e_set_wireless_mode(dev, priv->rtllib->mode);
+ rtl92e_set_wireless_mode(dev, priv->rtllib->mode);
rtl92e_cam_reset(dev);
{
u8 SECR_value = 0x0;
@@ -635,12 +605,10 @@ start:
}
}
- if (priv->rst_progress == RESET_TYPE_NORESET) {
- rtStatus = rtl92e_config_rf(dev);
- if (!rtStatus) {
- netdev_info(dev, "RF Config failed\n");
- return rtStatus;
- }
+ rtStatus = rtl92e_config_rf(dev);
+ if (!rtStatus) {
+ netdev_info(dev, "RF Config failed\n");
+ return rtStatus;
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -662,39 +630,37 @@ start:
else
priv->rf_mode = RF_OP_By_SW_3wire;
- if (priv->rst_progress == RESET_TYPE_NORESET) {
- rtl92e_dm_init_txpower_tracking(dev);
-
- if (priv->ic_cut >= IC_VersionCut_D) {
- tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord);
- rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord);
-
- for (i = 0; i < TX_BB_GAIN_TABLE_LEN; i++) {
- if (tmpRegA == dm_tx_bb_gain[i]) {
- priv->rfa_txpowertrackingindex = i;
- priv->rfa_txpowertrackingindex_real = i;
- priv->rfa_txpowertracking_default =
- priv->rfa_txpowertrackingindex;
- break;
- }
+ rtl92e_dm_init_txpower_tracking(dev);
+
+ if (priv->ic_cut >= IC_VersionCut_D) {
+ tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord);
+ rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord);
+
+ for (i = 0; i < TX_BB_GAIN_TABLE_LEN; i++) {
+ if (tmpRegA == dm_tx_bb_gain[i]) {
+ priv->rfa_txpowertrackingindex = i;
+ priv->rfa_txpowertrackingindex_real = i;
+ priv->rfa_txpowertracking_default =
+ priv->rfa_txpowertrackingindex;
+ break;
}
+ }
- TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
- bMaskByte2);
+ TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
+ bMaskByte2);
- for (i = 0; i < CCK_TX_BB_GAIN_TABLE_LEN; i++) {
- if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
- priv->cck_present_attn_20m_def = i;
- break;
- }
+ for (i = 0; i < CCK_TX_BB_GAIN_TABLE_LEN; i++) {
+ if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
+ priv->cck_present_attn_20m_def = i;
+ break;
}
- priv->cck_present_attn_40m_def = 0;
- priv->cck_present_attn_diff = 0;
- priv->cck_present_attn =
- priv->cck_present_attn_20m_def;
- priv->btxpower_tracking = false;
}
+ priv->cck_present_attn_40m_def = 0;
+ priv->cck_present_attn_diff = 0;
+ priv->cck_present_attn =
+ priv->cck_present_attn_20m_def;
+ priv->btxpower_tracking = false;
}
rtl92e_irq_enable(dev);
end:
@@ -706,7 +672,6 @@ static void _rtl92e_net_update(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net;
- u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
u16 rate_config = 0;
net = &priv->rtllib->current_network;
@@ -715,19 +680,6 @@ static void _rtl92e_net_update(struct net_device *dev)
priv->basic_rate = rate_config &= 0x15f;
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
-
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- rtl92e_writew(dev, ATIMWND, 2);
- rtl92e_writew(dev, BCN_DMATIME, 256);
- rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
- rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
- rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
-
- BcnTimeCfg |= (BcnCW << BCN_TCFG_CW_SHIFT);
- BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
-
- rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
- }
}
void rtl92e_link_change(struct net_device *dev)
@@ -749,15 +701,12 @@ void rtl92e_link_change(struct net_device *dev)
}
_rtl92e_update_msr(dev);
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
+ if (ieee->iw_mode == IW_MODE_INFRA) {
u32 reg;
reg = rtl92e_readl(dev, RCR);
if (priv->rtllib->link_state == MAC80211_LINKED) {
- if (ieee->intel_promiscuous_md_info.promiscuous_on)
- ;
- else
- priv->receive_config = reg |= RCR_CBSSID;
+ priv->receive_config = reg |= RCR_CBSSID;
} else {
priv->receive_config = reg &= ~RCR_CBSSID;
}
@@ -1411,12 +1360,12 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 slide_beacon_adc_pwdb_index;
static u32 slide_beacon_adc_pwdb_statistics;
static u32 last_beacon_adc_pwdb;
- struct rtllib_hdr_3addr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
u16 sc;
unsigned int seq;
- hdr = (struct rtllib_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctl);
+ hdr = (struct ieee80211_hdr_3addr *)buffer;
+ sc = le16_to_cpu(hdr->seq_ctrl);
seq = WLAN_GET_SEQ_SEQ(sc);
curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
@@ -1561,7 +1510,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
bool bpacket_match_bssid, bpacket_toself;
bool bPacketBeacon = false;
- struct rtllib_hdr_3addr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
bool bToSelfBA = false;
static struct rtllib_rx_stats previous_stats;
u16 fc, type;
@@ -1570,21 +1519,21 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
- hdr = (struct rtllib_hdr_3addr *)tmp_buf;
- fc = le16_to_cpu(hdr->frame_ctl);
+ hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_control);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
bpacket_match_bssid =
((type != RTLLIB_FTYPE_CTL) &&
ether_addr_equal(priv->rtllib->current_network.bssid,
- (fc & RTLLIB_FCTL_TODS) ? hdr->addr1 :
- (fc & RTLLIB_FCTL_FROMDS) ? hdr->addr2 :
+ (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
(!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
bpacket_toself = bpacket_match_bssid && /* check this */
ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
- if (WLAN_FC_GET_FRAMETYPE(fc) == RTLLIB_STYPE_BEACON)
+ if (ieee80211_is_beacon(hdr->frame_control))
bPacketBeacon = true;
_rtl92e_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
_rtl92e_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
@@ -1870,7 +1819,7 @@ rtl92e_init_variables(struct net_device *dev)
priv->bfirst_after_down = false;
}
-void rtl92e_enable_irq(struct net_device *dev)
+void rtl92e_irq_enable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -1879,7 +1828,7 @@ void rtl92e_enable_irq(struct net_device *dev)
rtl92e_writel(dev, INTA_MASK, priv->irq_mask[0]);
}
-void rtl92e_disable_irq(struct net_device *dev)
+void rtl92e_irq_disable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -1892,7 +1841,7 @@ void rtl92e_enable_rx(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- rtl92e_writel(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
+ rtl92e_writel(dev, RDQDA, priv->rx_ring_dma);
}
static const u32 TX_DESC_BASE[] = {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index 11366fda4ec3..878c96236824 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -16,10 +16,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev);
void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta);
void rtl92e_enable_rx(struct net_device *dev);
void rtl92e_enable_tx(struct net_device *dev);
-void rtl92e_enable_irq(struct net_device *dev);
-void rtl92e_disable_irq(struct net_device *dev);
void rtl92e_init_variables(struct net_device *dev);
-void rtl92e_start_beacon(struct net_device *dev);
void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val);
void rtl92e_get_eeprom_size(struct net_device *dev);
bool rtl92e_start_adapter(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 09f8c76b7e65..1b444529b59c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -43,7 +43,7 @@ enum _RTL8192PCI_HW {
PMR = 0x00c,
EPROM_CMD = 0x00e,
-#define EPROM_CMD_9356SEL BIT4
+#define EPROM_CMD_9356SEL BIT(4)
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
#define EPROM_CMD_NORMAL 0
#define EPROM_CMD_PROGRAM 2
@@ -61,16 +61,16 @@ enum _RTL8192PCI_HW {
#define CR_TE 0x04
SIFS = 0x03E,
RCR = 0x044,
-#define RCR_ONLYERLPKT BIT31
-#define RCR_CBSSID BIT23
-#define RCR_ADD3 BIT21
-#define RCR_AMF BIT20
-#define RCR_ADF BIT18
-#define RCR_AICV BIT12
-#define RCR_AB BIT3
-#define RCR_AM BIT2
-#define RCR_APM BIT1
-#define RCR_AAP BIT0
+#define RCR_ONLYERLPKT BIT(31)
+#define RCR_CBSSID BIT(23)
+#define RCR_ADD3 BIT(21)
+#define RCR_AMF BIT(20)
+#define RCR_ADF BIT(18)
+#define RCR_AICV BIT(12)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
SLOT_TIME = 0x049,
@@ -95,34 +95,34 @@ enum _RTL8192PCI_HW {
#define TOTAL_CAM_ENTRY 32
WCAMI = 0x0A4,
SECR = 0x0B0,
-#define SCR_TxUseDK BIT0
-#define SCR_RxUseDK BIT1
-#define SCR_TxEncEnable BIT2
-#define SCR_RxDecEnable BIT3
-#define SCR_NoSKMC BIT5
+#define SCR_TxUseDK BIT(0)
+#define SCR_RxUseDK BIT(1)
+#define SCR_TxEncEnable BIT(2)
+#define SCR_RxDecEnable BIT(3)
+#define SCR_NoSKMC BIT(5)
SWREGULATOR = 0x0BD,
INTA_MASK = 0x0f4,
-#define IMR_TBDOK BIT27
-#define IMR_TBDER BIT26
-#define IMR_TXFOVW BIT15
-#define IMR_TIMEOUT0 BIT14
-#define IMR_BcnInt BIT13
-#define IMR_RXFOVW BIT12
-#define IMR_RDU BIT11
-#define IMR_RXCMDOK BIT10
-#define IMR_BDOK BIT9
-#define IMR_HIGHDOK BIT8
-#define IMR_COMDOK BIT7
-#define IMR_MGNTDOK BIT6
-#define IMR_HCCADOK BIT5
-#define IMR_BKDOK BIT4
-#define IMR_BEDOK BIT3
-#define IMR_VIDOK BIT2
-#define IMR_VODOK BIT1
-#define IMR_ROK BIT0
+#define IMR_TBDOK BIT(27)
+#define IMR_TBDER BIT(26)
+#define IMR_TXFOVW BIT(15)
+#define IMR_TIMEOUT0 BIT(14)
+#define IMR_BcnInt BIT(13)
+#define IMR_RXFOVW BIT(12)
+#define IMR_RDU BIT(11)
+#define IMR_RXCMDOK BIT(10)
+#define IMR_BDOK BIT(9)
+#define IMR_HIGHDOK BIT(8)
+#define IMR_COMDOK BIT(7)
+#define IMR_MGNTDOK BIT(6)
+#define IMR_HCCADOK BIT(5)
+#define IMR_BKDOK BIT(4)
+#define IMR_BEDOK BIT(3)
+#define IMR_VIDOK BIT(2)
+#define IMR_VODOK BIT(1)
+#define IMR_ROK BIT(0)
ISR = 0x0f8,
TP_POLL = 0x0fd,
-#define TP_POLL_CQ BIT5
+#define TP_POLL_CQ BIT(5)
PSR = 0x0ff,
CPU_GEN = 0x100,
#define CPU_CCK_LOOPBACK 0x00030000
@@ -136,9 +136,9 @@ enum _RTL8192PCI_HW {
#define CPU_GEN_NO_LOOPBACK_MSK 0xFFF8FFFF
#define CPU_GEN_NO_LOOPBACK_SET 0x00080000
ACM_HW_CTRL = 0x171,
-#define ACM_HW_BEQ_EN BIT1
-#define ACM_HW_VIQ_EN BIT2
-#define ACM_HW_VOQ_EN BIT3
+#define ACM_HW_BEQ_EN BIT(1)
+#define ACM_HW_VIQ_EN BIT(2)
+#define ACM_HW_VOQ_EN BIT(3)
RQPN1 = 0x180,
RQPN2 = 0x184,
RQPN3 = 0x188,
@@ -159,7 +159,7 @@ enum _RTL8192PCI_HW {
WFCRC2 = 0x2f8,
BW_OPMODE = 0x300,
-#define BW_OPMODE_20MHZ BIT2
+#define BW_OPMODE_20MHZ BIT(2)
IC_VERRSION = 0x301,
MSR = 0x303,
#define MSR_LINK_MASK (BIT(1) | BIT(0))
@@ -178,19 +178,19 @@ enum _RTL8192PCI_HW {
TSFR = 0x308,
RRSR = 0x310,
#define RRSR_SHORT_OFFSET 23
-#define RRSR_1M BIT0
-#define RRSR_2M BIT1
-#define RRSR_5_5M BIT2
-#define RRSR_11M BIT3
-#define RRSR_6M BIT4
-#define RRSR_9M BIT5
-#define RRSR_12M BIT6
-#define RRSR_18M BIT7
-#define RRSR_24M BIT8
-#define RRSR_36M BIT9
-#define RRSR_48M BIT10
-#define RRSR_54M BIT11
-#define BRSR_AckShortPmb BIT23
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define BRSR_AckShortPmb BIT(23)
UFWP = 0x318,
RATR0 = 0x320,
#define RATR_1M 0x00000001
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 875540a2079d..4d12d7385041 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -513,7 +513,7 @@ static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
{
struct sw_chnl_cmd *pCmd;
- if (CmdTable == NULL) {
+ if (!CmdTable) {
netdev_err(dev, "%s(): CmdTable cannot be NULL.\n", __func__);
return false;
}
@@ -920,9 +920,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
case IG_Backup:
initial_gain = SCAN_RX_INITIAL_GAIN;
BitMask = bMaskByte0;
- if (dm_digtable.dig_algorithm ==
- DIG_ALGO_BY_FALSE_ALARM)
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 =
rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
BitMask);
@@ -947,10 +944,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
break;
case IG_Restore:
BitMask = 0x7f;
- if (dm_digtable.dig_algorithm ==
- DIG_ALGO_BY_FALSE_ALARM)
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
-
rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
(u32)priv->initgain_backup.xaagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
@@ -965,10 +958,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
rtl92e_set_tx_power(dev,
priv->rtllib->current_network.channel);
-
- if (dm_digtable.dig_algorithm ==
- DIG_ALGO_BY_FALSE_ALARM)
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
break;
}
}
@@ -976,7 +965,7 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
void rtl92e_set_rf_off(struct net_device *dev)
{
- rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT(4), 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
@@ -1027,7 +1016,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
0x4, 0x1);
priv->hw_rf_off_action = 0;
rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE,
- BIT4, 0x1);
+ BIT(4), 0x1);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4,
0x300, 0x3);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 24fb0ca539ea..c48c56869c19 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
@@ -248,75 +248,15 @@
#define bPAEnd 0xf
#define bTREnd 0x0f000000
#define bRFEnd 0x000f0000
-/* T2R */
-#define bCCAMask 0x000000f0
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
/* Channel gain at continue TX. */
-#define bContTxHSSI 0x400
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
#define b3WireDataLength 0x800
#define b3WireAddressLength 0x400
-#define b3WireRFPowerDown 0x1
-/*#define bHWSISelect 0x8 */
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
/* 3-wire total control */
-#define bRFSI_3Wire 0xf
#define bRFSI_RFENV 0x10
-#define bRFSI_TRSW 0x20
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
#define bLSSIReadAddress 0x3f000000 /* LSSI "read" address */
#define bLSSIReadEdge 0x80000000 /* LSSI "read" edge signal */
#define bLSSIReadBackData 0xfff
-#define bLSSIReadOKFlag 0x1000
-#define bCCKSampleRate 0x8 /* 0: 44 MHz, 1: 88MHz */
-
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
+
#define bDA6Swing 0x380000
#define bADClkPhase 0x4000000
#define b80MClkDelay 0x18000000
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 944cc73fb2b6..69298c7c129a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -17,7 +17,7 @@ void rtl92e_cam_reset(struct net_device *dev)
{
u32 ulcommand = 0;
- ulcommand |= BIT31 | BIT30;
+ ulcommand |= BIT(31) | BIT(30);
rtl92e_writel(dev, RWCAM, ulcommand);
}
@@ -33,11 +33,6 @@ void rtl92e_enable_hw_security_config(struct net_device *dev)
(priv->rtllib->auth_mode != 2)) {
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
- } else if ((ieee->iw_mode == IW_MODE_ADHOC) &&
- (ieee->pairwise_key_type & (KEY_TYPE_CCMP |
- KEY_TYPE_TKIP))) {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
}
ieee->hwsec_active = 1;
@@ -94,13 +89,13 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
}
if (DefaultKey)
- usConfig |= BIT15 | (KeyType << 2);
+ usConfig |= BIT(15) | (KeyType << 2);
else
- usConfig |= BIT15 | (KeyType << 2) | KeyIndex;
+ usConfig |= BIT(15) | (KeyType << 2) | KeyIndex;
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
- TargetCommand |= BIT31 | BIT16;
+ TargetCommand |= BIT(31) | BIT(16);
if (i == 0) {
TargetContent = (u32)(*(MacAddr + 0)) << 16 |
@@ -117,7 +112,7 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
rtl92e_writel(dev, WCAMI, TargetContent);
rtl92e_writel(dev, RWCAM, TargetCommand);
} else {
- if (KeyContent != NULL) {
+ if (KeyContent) {
rtl92e_writel(dev, WCAMI,
(u32)(*(KeyContent + i - 2)));
rtl92e_writel(dev, RWCAM, TargetCommand);
@@ -126,108 +121,3 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
}
}
}
-
-void rtl92e_cam_restore(struct net_device *dev)
-{
- u8 EntryId = 0;
- struct r8192_priv *priv = rtllib_priv(dev);
- u8 *MacAddr = priv->rtllib->current_network.bssid;
-
- static u8 CAM_CONST_ADDR[4][6] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
- };
- static u8 CAM_CONST_BROAD[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
- if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
- (priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
- for (EntryId = 0; EntryId < 4; EntryId++) {
- MacAddr = CAM_CONST_ADDR[EntryId];
- if (priv->rtllib->swcamtable[EntryId].bused) {
- rtl92e_set_key(dev, EntryId, EntryId,
- priv->rtllib->pairwise_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable
- [EntryId].key_buf[0]));
- }
- }
-
- } else if (priv->rtllib->pairwise_key_type == KEY_TYPE_TKIP) {
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- rtl92e_set_key(dev, 4, 0,
- priv->rtllib->pairwise_key_type,
- (const u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
- } else {
- rtl92e_set_key(dev, 4, 0,
- priv->rtllib->pairwise_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
- }
-
- } else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- rtl92e_set_key(dev, 4, 0,
- priv->rtllib->pairwise_key_type,
- (const u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
- } else {
- rtl92e_set_key(dev, 4, 0,
- priv->rtllib->pairwise_key_type, MacAddr,
- 0, (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
- }
- }
-
- if (priv->rtllib->group_key_type == KEY_TYPE_TKIP) {
- MacAddr = CAM_CONST_BROAD;
- for (EntryId = 1; EntryId < 4; EntryId++) {
- if (priv->rtllib->swcamtable[EntryId].bused) {
- rtl92e_set_key(dev, EntryId, EntryId,
- priv->rtllib->group_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
- }
- }
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- if (priv->rtllib->swcamtable[0].bused) {
- rtl92e_set_key(dev, 0, 0,
- priv->rtllib->group_key_type,
- CAM_CONST_ADDR[0], 0,
- (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
- } else {
- netdev_warn(dev,
- "%s(): ADHOC TKIP: missing key entry.\n",
- __func__);
- return;
- }
- }
- } else if (priv->rtllib->group_key_type == KEY_TYPE_CCMP) {
- MacAddr = CAM_CONST_BROAD;
- for (EntryId = 1; EntryId < 4; EntryId++) {
- if (priv->rtllib->swcamtable[EntryId].bused) {
- rtl92e_set_key(dev, EntryId, EntryId,
- priv->rtllib->group_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
- }
- }
-
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- if (priv->rtllib->swcamtable[0].bused) {
- rtl92e_set_key(dev, 0, 0,
- priv->rtllib->group_key_type,
- CAM_CONST_ADDR[0], 0,
- (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
- } else {
- netdev_warn(dev,
- "%s(): ADHOC CCMP: missing key entry.\n",
- __func__);
- return;
- }
- }
- }
-}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index bd33ef105107..615b84bca9b8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -20,6 +20,5 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u32 *KeyContent);
void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 KeyType, const u8 *MacAddr, u32 *KeyContent);
-void rtl92e_cam_restore(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 50eb8f3494ec..995daab906c9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -61,7 +61,7 @@ static short _rtl92e_pci_initdescring(struct net_device *dev);
static void _rtl92e_irq_tx_tasklet(struct tasklet_struct *t);
static void _rtl92e_irq_rx_tasklet(struct tasklet_struct *t);
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
-static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
+static int _rtl92e_up(struct net_device *dev);
static int _rtl92e_try_up(struct net_device *dev);
static int _rtl92e_down(struct net_device *dev, bool shutdownrf);
static void _rtl92e_restart(void *data);
@@ -165,8 +165,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
case rf_off:
- if ((priv->rtllib->iw_mode == IW_MODE_INFRA) ||
- (priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
+ if (priv->rtllib->iw_mode == IW_MODE_INFRA) {
if ((priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) ||
(change_source > RF_CHANGE_BY_IPS)) {
if (ieee->link_state == MAC80211_LINKED)
@@ -227,24 +226,6 @@ static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
netdev_info(dev, "TXTIMEOUT");
}
-void rtl92e_irq_enable(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- priv->irq_enabled = 1;
-
- rtl92e_enable_irq(dev);
-}
-
-void rtl92e_irq_disable(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- rtl92e_disable_irq(dev);
-
- priv->irq_enabled = 0;
-}
-
static void _rtl92e_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -313,7 +294,7 @@ static void _rtl92e_update_beacon(void *data)
struct rtllib_device *ieee = priv->rtllib;
struct rtllib_network *net = &ieee->current_network;
- if (ieee->ht_info->bCurrentHTSupport)
+ if (ieee->ht_info->current_ht_support)
HT_update_self_and_peer_setting(ieee, net);
ieee->ht_info->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
ieee->ht_info->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
@@ -477,10 +458,6 @@ static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
pdesc->OWN = 1;
}
-static void _rtl92e_stop_beacon(struct net_device *dev)
-{
-}
-
void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -611,7 +588,7 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
_rtl92e_refresh_support_rate(priv);
}
-static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
+static int _rtl92e_sta_up(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
@@ -634,7 +611,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (priv->rtllib->link_state != MAC80211_LINKED)
- rtllib_softmac_start_protocol(priv->rtllib, 0);
+ rtllib_softmac_start_protocol(priv->rtllib);
rtllib_reset_queue(priv->rtllib);
_rtl92e_watchdog_timer_cb(&priv->watch_dog_timer);
@@ -678,7 +655,7 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
_rtl92e_cancel_deferred_work(priv);
cancel_delayed_work(&priv->rtllib->hw_wakeup_wq);
- rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
+ rtllib_softmac_stop_protocol(priv->rtllib);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
while (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
@@ -719,9 +696,6 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->set_bw_mode_handler = rtl92e_set_bw_mode;
priv->rf_set_chan = rtl92e_set_channel;
- priv->rtllib->start_send_beacons = rtl92e_start_beacon;
- priv->rtllib->stop_send_beacons = _rtl92e_stop_beacon;
-
priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
priv->rtllib->ps_is_queue_empty = _rtl92e_is_tx_queue_empty;
@@ -738,15 +712,6 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
}
-static void _rtl92e_init_priv_constant(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
- &priv->rtllib->pwr_save_ctrl;
-
- psc->reg_max_lps_awake_intvl = 5;
-}
-
static void _rtl92e_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -765,10 +730,6 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->chan = 1;
priv->rtllib->mode = WIRELESS_MODE_AUTO;
priv->rtllib->iw_mode = IW_MODE_INFRA;
- priv->rtllib->net_promiscuous_md = false;
- priv->rtllib->intel_promiscuous_md_info.promiscuous_on = false;
- priv->rtllib->intel_promiscuous_md_info.fltr_src_sta_frame =
- false;
priv->rtllib->ieee_up = 0;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
@@ -778,10 +739,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->bcck_in_ch14 = false;
priv->cck_present_attn = 0;
priv->rfa_txpowertrackingindex = 0;
- priv->rfc_txpowertrackingindex = 0;
priv->cck_pwr_enl = 6;
- priv->rst_progress = RESET_TYPE_NORESET;
- priv->force_reset = false;
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
priv->rx_ctr = 0;
priv->rtllib->wx_set_enc = 0;
@@ -797,7 +755,6 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->rtllib->iw_mode = IW_MODE_INFRA;
- priv->rtllib->active_scan = 1;
priv->rtllib->be_scan_inprogress = false;
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
@@ -871,7 +828,6 @@ static short _rtl92e_init(struct net_device *dev)
memset(&priv->stats, 0, sizeof(struct rt_stats));
_rtl92e_init_priv_handler(dev);
- _rtl92e_init_priv_constant(dev);
_rtl92e_init_priv_variable(dev);
_rtl92e_init_priv_lock(priv);
_rtl92e_init_priv_task(dev);
@@ -984,7 +940,7 @@ static enum reset_type _rtl92e_rx_check_stuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
+static void _rtl92e_if_check_reset(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
enum reset_type TxResetType = RESET_TYPE_NORESET;
@@ -1001,133 +957,12 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
(priv->rtllib->link_state == MAC80211_LINKED))
RxResetType = _rtl92e_rx_check_stuck(dev);
- if (TxResetType == RESET_TYPE_NORMAL ||
- RxResetType == RESET_TYPE_NORMAL) {
- netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
- __func__, TxResetType, RxResetType);
- return RESET_TYPE_NORMAL;
- } else if (TxResetType == RESET_TYPE_SILENT ||
+ if (TxResetType == RESET_TYPE_SILENT ||
RxResetType == RESET_TYPE_SILENT) {
netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
__func__, TxResetType, RxResetType);
- return RESET_TYPE_SILENT;
- } else {
- return RESET_TYPE_NORESET;
- }
-}
-
-static void _rtl92e_if_silent_reset(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- u8 reset_times = 0;
- int reset_status = 0;
- struct rtllib_device *ieee = priv->rtllib;
- unsigned long flag;
-
- if (priv->rst_progress == RESET_TYPE_NORESET) {
- priv->rst_progress = RESET_TYPE_SILENT;
-
- spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->rf_change_in_progress) {
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- goto END;
- }
- priv->rf_change_in_progress = true;
- priv->reset_in_progress = true;
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-
-RESET_START:
-
- mutex_lock(&priv->wx_mutex);
-
- if (priv->rtllib->link_state == MAC80211_LINKED)
- rtl92e_leisure_ps_leave(dev);
-
- if (priv->up) {
- netdev_info(dev, "%s():the driver is not up.\n",
- __func__);
- mutex_unlock(&priv->wx_mutex);
- return;
- }
- priv->up = 0;
-
- mdelay(1000);
-
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
-
- rtl92e_irq_disable(dev);
- del_timer_sync(&priv->watch_dog_timer);
- _rtl92e_cancel_deferred_work(priv);
- rtl92e_dm_deinit(dev);
- rtllib_stop_scan_syncro(ieee);
-
- if (ieee->link_state == MAC80211_LINKED) {
- mutex_lock(&ieee->wx_mutex);
- netdev_info(dev, "ieee->link_state is MAC80211_LINKED\n");
- rtllib_stop_send_beacons(priv->rtllib);
- del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- rtllib_stop_scan(ieee);
- netif_carrier_off(dev);
- mutex_unlock(&ieee->wx_mutex);
- } else {
- netdev_info(dev, "ieee->link_state is NOT LINKED\n");
- rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
- }
-
- rtl92e_dm_backup_state(dev);
-
- mutex_unlock(&priv->wx_mutex);
- reset_status = _rtl92e_up(dev, true);
-
- if (reset_status == -1) {
- if (reset_times < 3) {
- reset_times++;
- goto RESET_START;
- } else {
- netdev_warn(dev, "%s(): Reset Failed\n",
- __func__);
- }
- }
-
- ieee->is_silent_reset = 1;
-
- spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->rf_change_in_progress = false;
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-
- rtl92e_enable_hw_security_config(dev);
-
- if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
- IW_MODE_INFRA) {
- ieee->set_chan(ieee->dev,
- ieee->current_network.channel);
-
- schedule_work(&ieee->associate_complete_wq);
-
- } else if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
- IW_MODE_ADHOC) {
- ieee->set_chan(ieee->dev,
- ieee->current_network.channel);
- ieee->link_change(ieee->dev);
-
- notify_wx_assoc_event(ieee);
-
- rtllib_start_send_beacons(ieee);
-
- netif_carrier_on(ieee->dev);
- }
-
- rtl92e_cam_restore(dev);
- rtl92e_dm_restore_state(dev);
-END:
- priv->rst_progress = RESET_TYPE_NORESET;
- priv->reset_count++;
- priv->reset_in_progress = false;
-
- rtl92e_writeb(dev, UFWP, 1);
}
+ return;
}
static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
@@ -1157,7 +992,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
struct r8192_priv, watch_dog_wq);
struct net_device *dev = priv->rtllib->dev;
struct rtllib_device *ieee = priv->rtllib;
- enum reset_type ResetType = RESET_TYPE_NORESET;
static u8 check_reset_cnt;
unsigned long flags;
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
@@ -1184,15 +1018,12 @@ static void _rtl92e_watchdog_wq_cb(void *data)
MAC80211_NOLINK) &&
(ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
- if ((ieee->pwr_save_ctrl.ReturnPoint ==
- IPS_CALLBACK_NONE) &&
- (!ieee->net_promiscuous_md)) {
+ if (ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE) {
rtl92e_ips_enter(dev);
}
}
}
- if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode ==
- IW_MODE_INFRA) && (!ieee->net_promiscuous_md)) {
+ if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA)) {
if (ieee->link_detect_info.NumRxOkInPeriod > 100 ||
ieee->link_detect_info.NumTxOkInPeriod > 100)
bBusyTraffic = true;
@@ -1275,20 +1106,10 @@ static void _rtl92e_watchdog_wq_cb(void *data)
spin_lock_irqsave(&priv->tx_lock, flags);
if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) &&
(!priv->rf_change_in_progress) && (!psc->bSwRfProcessing)) {
- ResetType = _rtl92e_if_check_reset(dev);
+ _rtl92e_if_check_reset(dev);
check_reset_cnt = 3;
}
spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- if (ResetType == RESET_TYPE_NORMAL) {
- priv->rst_progress = RESET_TYPE_NORMAL;
- return;
- }
-
- if ((priv->force_reset || ResetType == RESET_TYPE_SILENT))
- _rtl92e_if_silent_reset(dev);
- priv->force_reset = false;
- priv->reset_in_progress = false;
}
static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
@@ -1320,28 +1141,25 @@ void rtl92e_tx_enable(struct net_device *dev)
static void _rtl92e_free_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- int i, rx_queue_idx;
-
- for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE;
- rx_queue_idx++) {
- for (i = 0; i < priv->rxringcount; i++) {
- struct sk_buff *skb = priv->rx_buf[rx_queue_idx][i];
+ int i;
- if (!skb)
- continue;
+ for (i = 0; i < priv->rxringcount; i++) {
+ struct sk_buff *skb = priv->rx_buf[i];
- dma_unmap_single(&priv->pdev->dev,
- *((dma_addr_t *)skb->cb),
- priv->rxbuffersize, DMA_FROM_DEVICE);
- kfree_skb(skb);
- }
+ if (!skb)
+ continue;
- dma_free_coherent(&priv->pdev->dev,
- sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
- priv->rx_ring[rx_queue_idx],
- priv->rx_ring_dma[rx_queue_idx]);
- priv->rx_ring[rx_queue_idx] = NULL;
+ dma_unmap_single(&priv->pdev->dev,
+ *((dma_addr_t *)skb->cb),
+ priv->rxbuffersize, DMA_FROM_DEVICE);
+ kfree_skb(skb);
}
+
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(*priv->rx_ring) * priv->rxringcount,
+ priv->rx_ring,
+ priv->rx_ring_dma);
+ priv->rx_ring = NULL;
}
static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio)
@@ -1374,8 +1192,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
- if ((priv->rtllib->rf_power_state == rf_off) || !priv->up ||
- priv->reset_in_progress) {
+ if ((priv->rtllib->rf_power_state == rf_off) || !priv->up) {
kfree_skb(skb);
return;
}
@@ -1408,7 +1225,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (queue_index != TXCMD_QUEUE) {
if ((priv->rtllib->rf_power_state == rf_off) ||
- !priv->up || priv->reset_in_progress) {
+ !priv->up) {
kfree_skb(skb);
return 0;
}
@@ -1488,7 +1305,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
struct tx_desc *pdesc = NULL;
- struct rtllib_hdr_1addr *header = NULL;
+ struct ieee80211_hdr *header = NULL;
u8 *pda_addr = NULL;
int idx;
u32 fwinfo_size = 0;
@@ -1497,7 +1314,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
- header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
+ header = (struct ieee80211_hdr *)(((u8 *)skb->data) + fwinfo_size);
pda_addr = header->addr1;
if (!is_broadcast_ether_addr(pda_addr) && !is_multicast_ether_addr(pda_addr))
@@ -1533,47 +1350,44 @@ static short _rtl92e_alloc_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rx_desc *entry = NULL;
- int i, rx_queue_idx;
-
- for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
- priv->rx_ring[rx_queue_idx] = dma_alloc_coherent(&priv->pdev->dev,
- sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
- &priv->rx_ring_dma[rx_queue_idx],
- GFP_ATOMIC);
- if (!priv->rx_ring[rx_queue_idx] ||
- (unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) {
- netdev_warn(dev, "Cannot allocate RX ring\n");
- return -ENOMEM;
- }
+ int i;
- priv->rx_idx[rx_queue_idx] = 0;
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(*priv->rx_ring) * priv->rxringcount,
+ &priv->rx_ring_dma,
+ GFP_ATOMIC);
+ if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
+ netdev_warn(dev, "Cannot allocate RX ring\n");
+ return -ENOMEM;
+ }
- for (i = 0; i < priv->rxringcount; i++) {
- struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
- dma_addr_t *mapping;
-
- entry = &priv->rx_ring[rx_queue_idx][i];
- if (!skb)
- return 0;
- skb->dev = dev;
- priv->rx_buf[rx_queue_idx][i] = skb;
- mapping = (dma_addr_t *)skb->cb;
- *mapping = dma_map_single(&priv->pdev->dev,
- skb_tail_pointer(skb),
- priv->rxbuffersize, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
- dev_kfree_skb_any(skb);
- return -1;
- }
- entry->BufferAddress = *mapping;
+ priv->rx_idx = 0;
- entry->Length = priv->rxbuffersize;
- entry->OWN = 1;
+ for (i = 0; i < priv->rxringcount; i++) {
+ struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
+ dma_addr_t *mapping;
+
+ entry = &priv->rx_ring[i];
+ if (!skb)
+ return 0;
+ skb->dev = dev;
+ priv->rx_buf[i] = skb;
+ mapping = (dma_addr_t *)skb->cb;
+ *mapping = dma_map_single(&priv->pdev->dev,
+ skb_tail_pointer(skb),
+ priv->rxbuffersize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
+ dev_kfree_skb_any(skb);
+ return -1;
}
+ entry->BufferAddress = *mapping;
- if (entry)
- entry->EOR = 1;
+ entry->Length = priv->rxbuffersize;
+ entry->OWN = 1;
}
+
+ if (entry)
+ entry->EOR = 1;
return 0;
}
@@ -1635,19 +1449,17 @@ err_free_rings:
void rtl92e_reset_desc_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- int i, rx_queue_idx;
+ int i;
unsigned long flags = 0;
- for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
- if (priv->rx_ring[rx_queue_idx]) {
- struct rx_desc *entry = NULL;
+ if (priv->rx_ring) {
+ struct rx_desc *entry = NULL;
- for (i = 0; i < priv->rxringcount; i++) {
- entry = &priv->rx_ring[rx_queue_idx][i];
- entry->OWN = 1;
- }
- priv->rx_idx[rx_queue_idx] = 0;
+ for (i = 0; i < priv->rxringcount; i++) {
+ entry = &priv->rx_ring[i];
+ entry->OWN = 1;
}
+ priv->rx_idx = 0;
}
spin_lock_irqsave(&priv->irq_th_lock, flags);
@@ -1743,10 +1555,9 @@ void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
static void _rtl92e_rx_normal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- struct rtllib_hdr_1addr *rtllib_hdr = NULL;
+ struct ieee80211_hdr *rtllib_hdr = NULL;
bool unicast_packet = false;
u32 skb_len = 0;
- int rx_queue_idx = RX_MPDU_QUEUE;
struct rtllib_rx_stats stats = {
.signal = 0,
@@ -1758,10 +1569,10 @@ static void _rtl92e_rx_normal(struct net_device *dev)
stats.nic_type = NIC_8192E;
while (count--) {
- struct rx_desc *pdesc = &priv->rx_ring[rx_queue_idx]
- [priv->rx_idx[rx_queue_idx]];
- struct sk_buff *skb = priv->rx_buf[rx_queue_idx]
- [priv->rx_idx[rx_queue_idx]];
+ struct rx_desc *pdesc = &priv->rx_ring
+ [priv->rx_idx];
+ struct sk_buff *skb = priv->rx_buf
+ [priv->rx_idx];
struct sk_buff *new_skb;
if (pdesc->OWN)
@@ -1782,7 +1593,7 @@ static void _rtl92e_rx_normal(struct net_device *dev)
skb_reserve(skb, stats.RxDrvInfoSize +
stats.RxBufShift);
skb_trim(skb, skb->len - S_CRC_LEN);
- rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data;
+ rtllib_hdr = (struct ieee80211_hdr *)skb->data;
if (!is_multicast_ether_addr(rtllib_hdr->addr1)) {
/* unicast packet */
unicast_packet = true;
@@ -1799,8 +1610,7 @@ static void _rtl92e_rx_normal(struct net_device *dev)
skb = new_skb;
skb->dev = dev;
- priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] =
- skb;
+ priv->rx_buf[priv->rx_idx] = skb;
*((dma_addr_t *)skb->cb) = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(skb),
priv->rxbuffersize, DMA_FROM_DEVICE);
@@ -1812,9 +1622,9 @@ done:
pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
- if (priv->rx_idx[rx_queue_idx] == priv->rxringcount - 1)
+ if (priv->rx_idx == priv->rxringcount - 1)
pdesc->EOR = 1;
- priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
+ priv->rx_idx = (priv->rx_idx + 1) %
priv->rxringcount;
}
}
@@ -1865,9 +1675,9 @@ static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv)
cancel_work_sync(&priv->qos_activate);
}
-static int _rtl92e_up(struct net_device *dev, bool is_silent_reset)
+static int _rtl92e_up(struct net_device *dev)
{
- if (_rtl92e_sta_up(dev, is_silent_reset) == -1)
+ if (_rtl92e_sta_up(dev) == -1)
return -1;
return 0;
}
@@ -1889,7 +1699,7 @@ static int _rtl92e_try_up(struct net_device *dev)
if (priv->up == 1)
return -1;
- return _rtl92e_up(dev, false);
+ return _rtl92e_up(dev);
}
static int _rtl92e_close(struct net_device *dev)
@@ -1925,10 +1735,10 @@ void rtl92e_commit(struct net_device *dev)
if (priv->up == 0)
return;
- rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
+ rtllib_softmac_stop_protocol(priv->rtllib);
rtl92e_irq_disable(dev);
rtl92e_stop_adapter(dev, true);
- _rtl92e_up(dev, false);
+ _rtl92e_up(dev);
}
static void _rtl92e_restart(void *data)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index fa82a0667813..a4afbf3e934d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -91,8 +91,6 @@
#define MAX_TX_QUEUE 9
-#define MAX_RX_QUEUE 1
-
#define MAX_RX_COUNT 64
#define MAX_TX_QUEUE_COUNT 9
@@ -132,7 +130,6 @@ enum rt_customer_id {
enum reset_type {
RESET_TYPE_NORESET = 0x00,
- RESET_TYPE_NORMAL = 0x01,
RESET_TYPE_SILENT = 0x02
};
@@ -233,10 +230,10 @@ struct r8192_priv {
u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
- struct rx_desc *rx_ring[MAX_RX_QUEUE];
- struct sk_buff *rx_buf[MAX_RX_QUEUE][MAX_RX_COUNT];
- dma_addr_t rx_ring_dma[MAX_RX_QUEUE];
- unsigned int rx_idx[MAX_RX_QUEUE];
+ struct rx_desc *rx_ring;
+ struct sk_buff *rx_buf[MAX_RX_COUNT];
+ dma_addr_t rx_ring_dma;
+ unsigned int rx_idx;
int rxringcount;
u16 rxbuffersize;
@@ -264,7 +261,6 @@ struct r8192_priv {
short promisc;
short chan;
- bool ps_force;
u32 irq_mask[2];
@@ -345,7 +341,6 @@ struct r8192_priv {
u8 rfa_txpowertrackingindex;
u8 rfa_txpowertrackingindex_real;
u8 rfa_txpowertracking_default;
- u8 rfc_txpowertrackingindex;
bool btxpower_tracking;
bool bcck_in_ch14;
@@ -369,14 +364,9 @@ struct r8192_priv {
u32 continue_diff_count;
bool bswitch_fsync;
u8 framesync;
- u32 reset_count;
- enum reset_type rst_progress;
u16 tx_counter;
u16 rx_ctr;
- bool reset_in_progress;
- bool force_reset;
- bool force_lps;
};
extern const struct ethtool_ops rtl819x_ethtool_ops;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index dbf765d601b3..330dafd62656 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -161,12 +161,9 @@ static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev);
static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev);
-static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev);
static void _rtl92e_dm_dig_init(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev);
-static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev);
-static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev);
static void _rtl92e_dm_initial_gain(struct net_device *dev);
static void _rtl92e_dm_pd_th(struct net_device *dev);
static void _rtl92e_dm_cs_ratio(struct net_device *dev);
@@ -285,9 +282,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (!priv->up)
return;
- if (pra->rate_adaptive_disabled)
- return;
-
if (priv->rtllib->mode != WIRELESS_MODE_N_24G)
return;
@@ -298,25 +292,25 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
ht_info->bCurShortGI20MHz);
pra->upper_rssi_threshold_ratr =
- (pra->upper_rssi_threshold_ratr & (~BIT31)) |
- ((bshort_gi_enabled) ? BIT31 : 0);
+ (pra->upper_rssi_threshold_ratr & (~BIT(31))) |
+ ((bshort_gi_enabled) ? BIT(31) : 0);
pra->middle_rssi_threshold_ratr =
- (pra->middle_rssi_threshold_ratr & (~BIT31)) |
- ((bshort_gi_enabled) ? BIT31 : 0);
+ (pra->middle_rssi_threshold_ratr & (~BIT(31))) |
+ ((bshort_gi_enabled) ? BIT(31) : 0);
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) {
pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_40M & (~BIT31)) |
- ((bshort_gi_enabled) ? BIT31 : 0);
+ (pra->low_rssi_threshold_ratr_40M & (~BIT(31))) |
+ ((bshort_gi_enabled) ? BIT(31) : 0);
} else {
pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_20M & (~BIT31)) |
- ((bshort_gi_enabled) ? BIT31 : 0);
+ (pra->low_rssi_threshold_ratr_20M & (~BIT(31))) |
+ ((bshort_gi_enabled) ? BIT(31) : 0);
}
pra->ping_rssi_ratr =
- (pra->ping_rssi_ratr & (~BIT31)) |
- ((bshort_gi_enabled) ? BIT31 : 0);
+ (pra->ping_rssi_ratr & (~BIT(31))) |
+ ((bshort_gi_enabled) ? BIT(31) : 0);
if (pra->ratr_state == DM_RATR_STA_HIGH) {
high_rssi_thresh_for_ra = pra->high2low_rssi_thresh_for_ra;
@@ -371,8 +365,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
-
- pra->last_ratr = target_ratr;
}
} else {
@@ -530,11 +522,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
if (Pwr_Flag == 0) {
mdelay(1);
- if (priv->reset_in_progress) {
- rtl92e_writeb(dev, Pw_Track_Flag, 0);
- rtl92e_writeb(dev, FW_Busy_Flag, 0);
- return;
- }
if (priv->rtllib->rf_power_state != rf_on) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
@@ -920,95 +907,10 @@ void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
_rtl92e_dm_cck_tx_power_adjust_thermal_meter(dev, binch14);
}
-static void _rtl92e_dm_tx_power_reset_recovery(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
- dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
- rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
-
- rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
- dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
-}
-
-void rtl92e_dm_restore_state(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- u32 reg_ratr = priv->rate_adaptive.last_ratr;
- u32 ratr_value;
-
- if (!priv->up)
- return;
-
- if (priv->rate_adaptive.rate_adaptive_disabled)
- return;
- if (priv->rtllib->mode != WIRELESS_MODE_N_24G)
- return;
- ratr_value = reg_ratr;
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- rtl92e_writel(dev, RATR0, ratr_value);
- rtl92e_writeb(dev, UFWP, 1);
- if (priv->tx_pwr_tracking_init && priv->btxpower_tracking)
- _rtl92e_dm_tx_power_reset_recovery(dev);
-
- _rtl92e_dm_bb_initialgain_restore(dev);
-}
-
-static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- u32 bit_mask = 0x7f;
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- return;
-
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xaagccore1);
- rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xbagccore1);
- rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xcagccore1);
- rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xdagccore1);
- bit_mask = bMaskByte2;
- rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
- (u32)priv->initgain_backup.cca);
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
-}
-
-void rtl92e_dm_backup_state(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- u32 bit_mask = bMaskByte0;
-
- priv->bswitch_fsync = false;
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- return;
-
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- priv->initgain_backup.xaagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
- priv->initgain_backup.xbagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
- priv->initgain_backup.xcagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
- priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
- bit_mask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
-}
-
static void _rtl92e_dm_dig_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- dm_digtable.dig_enable_flag = true;
-
- dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
-
- dm_digtable.dig_algorithm_switch = 0;
-
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT;
dm_digtable.pre_sta_connect_state = DIG_STA_DISCONNECT;
@@ -1029,15 +931,7 @@ static void _rtl92e_dm_dig_init(struct net_device *dev)
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
{
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(dev);
- else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- _rtl92e_dm_ctrl_initgain_byrssi_driver(dev);
- else
- return;
+ _rtl92e_dm_ctrl_initgain_byrssi_driver(dev);
}
/*-----------------------------------------------------------------------------
@@ -1064,16 +958,10 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
u8 i;
static u8 fw_dig;
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm_switch)
- fw_dig = 0;
if (fw_dig <= 3) {
for (i = 0; i < 3; i++)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
fw_dig++;
- dm_digtable.dig_state = DM_STA_DIG_OFF;
}
if (priv->rtllib->link_state == MAC80211_LINKED)
@@ -1085,150 +973,14 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
_rtl92e_dm_initial_gain(dev);
_rtl92e_dm_pd_th(dev);
_rtl92e_dm_cs_ratio(dev);
- if (dm_digtable.dig_algorithm_switch)
- dm_digtable.dig_algorithm_switch = 0;
dm_digtable.pre_sta_connect_state = dm_digtable.cur_sta_connect_state;
}
-static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- static u32 reset_cnt;
- u8 i;
-
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm_switch) {
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- for (i = 0; i < 3; i++)
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
- dm_digtable.dig_algorithm_switch = 0;
- }
-
- if (priv->rtllib->link_state != MAC80211_LINKED)
- return;
-
- if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
- return;
- if (priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh) {
- if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt))
- return;
- reset_cnt = priv->reset_count;
-
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
- dm_digtable.dig_state = DM_STA_DIG_OFF;
-
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
-
- rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x17);
- rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x17);
- rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x17);
- rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
-
- if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x00);
- else
- rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
-
- rtl92e_writeb(dev, 0xa0a, 0x08);
-
- return;
- }
-
- if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
- u8 reset_flag = 0;
-
- if (dm_digtable.dig_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt)) {
- _rtl92e_dm_ctrl_initgain_byrssi_highpwr(dev);
- return;
- }
- if (priv->reset_count != reset_cnt)
- reset_flag = 1;
-
- reset_cnt = priv->reset_count;
-
- dm_digtable.dig_state = DM_STA_DIG_ON;
-
- if (reset_flag == 1) {
- rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x2c);
- rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x2c);
- rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x2c);
- rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x2c);
- } else {
- rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x20);
- rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x20);
- rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x20);
- rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x20);
- }
-
- if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
- else
- rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
-
- rtl92e_writeb(dev, 0xa0a, 0xcd);
-
- rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
- }
- _rtl92e_dm_ctrl_initgain_byrssi_highpwr(dev);
-}
-
-static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- static u32 reset_cnt_highpwr;
-
- if ((priv->undecorated_smoothed_pwdb >
- dm_digtable.rssi_high_power_lowthresh) &&
- (priv->undecorated_smoothed_pwdb <
- dm_digtable.rssi_high_power_highthresh))
- return;
-
- if (priv->undecorated_smoothed_pwdb >=
- dm_digtable.rssi_high_power_highthresh) {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
-
- if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x10);
- else
- rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
- } else {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
-
- if ((priv->undecorated_smoothed_pwdb <
- dm_digtable.rssi_high_power_lowthresh) &&
- (priv->undecorated_smoothed_pwdb >=
- dm_digtable.rssi_high_thresh)) {
- if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
- else
- rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
- }
- }
- reset_cnt_highpwr = priv->reset_count;
-}
-
static void _rtl92e_dm_initial_gain(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 initial_gain = 0;
static u8 initialized, force_write;
- static u32 reset_cnt;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
if (rtllib_act_scanning(priv->rtllib, true)) {
force_write = 1;
@@ -1254,11 +1006,6 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
dm_digtable.pre_ig_value = 0;
}
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
if (dm_digtable.pre_ig_value != rtl92e_readb(dev, rOFDM0_XAAGCCore1))
force_write = 1;
@@ -1279,12 +1026,6 @@ static void _rtl92e_dm_pd_th(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
static u8 initialized, force_write;
- static u32 reset_cnt;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) {
if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) {
@@ -1312,11 +1053,6 @@ static void _rtl92e_dm_pd_th(struct net_device *dev)
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
(initialized <= 3) || force_write) {
if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
@@ -1345,14 +1081,7 @@ static void _rtl92e_dm_pd_th(struct net_device *dev)
static void _rtl92e_dm_cs_ratio(struct net_device *dev)
{
- struct r8192_priv *priv = rtllib_priv(dev);
static u8 initialized, force_write;
- static u32 reset_cnt;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) {
if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) {
@@ -1369,11 +1098,6 @@ static void _rtl92e_dm_cs_ratio(struct net_device *dev)
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
!initialized || force_write) {
if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
@@ -1405,8 +1129,6 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
- goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->link_state != MAC80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->ht_info->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO)
@@ -1525,7 +1247,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
tmp1byte = rtl92e_readb(dev, GPI);
- rf_power_state_to_set = (tmp1byte & BIT1) ? rf_on : rf_off;
+ rf_power_state_to_set = (tmp1byte & BIT(1)) ? rf_on : rf_off;
if (priv->hw_radio_off && (rf_power_state_to_set == rf_on)) {
netdev_info(dev, "gpiochangeRF - HW Radio ON\n");
@@ -1989,7 +1711,6 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
#define RegC38_Fsync_AP_BCM 2
struct r8192_priv *priv = rtllib_priv(dev);
static u8 reg_c38_State = RegC38_Default;
- static u32 reset_cnt;
if (priv->rtllib->link_state == MAC80211_LINKED &&
priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
@@ -2073,12 +1794,6 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
}
}
}
- if (priv->reset_count != reset_cnt) {
- rtl92e_writeb(dev, rOFDM0_RxDetector3,
- priv->framesync);
- reg_c38_State = RegC38_Default;
- reset_cnt = priv->reset_count;
- }
}
/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index 01587e2fec65..84e673452be4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -50,18 +50,12 @@
/*------------------------------Define structure----------------------------*/
struct dig_t {
- u8 dig_enable_flag;
- u8 dig_algorithm;
- u8 dig_algorithm_switch;
-
long rssi_low_thresh;
long rssi_high_thresh;
long rssi_high_power_lowthresh;
long rssi_high_power_highthresh;
- u8 dig_state;
- u8 dig_highpwr_state;
u8 cur_sta_connect_state;
u8 pre_sta_connect_state;
@@ -80,12 +74,6 @@ struct dig_t {
long rssi_val;
};
-enum dm_dig_sta {
- DM_STA_DIG_OFF = 0,
- DM_STA_DIG_ON,
- DM_STA_DIG_MAX
-};
-
enum dm_ratr_sta {
DM_RATR_STA_HIGH = 0,
DM_RATR_STA_MIDDLE = 1,
@@ -93,22 +81,9 @@ enum dm_ratr_sta {
DM_RATR_STA_MAX
};
-enum dm_dig_alg {
- DIG_ALGO_BY_FALSE_ALARM = 0,
- DIG_ALGO_BY_RSSI = 1,
- DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM = 2,
- DIG_ALGO_BY_TOW_PORT = 3,
- DIG_ALGO_MAX
-};
-
enum dm_dig_connect {
DIG_STA_DISCONNECT = 0,
DIG_STA_CONNECT = 1,
- DIG_STA_BEFORE_CONNECT = 2,
- DIG_AP_DISCONNECT = 3,
- DIG_AP_CONNECT = 4,
- DIG_AP_ADD_STATION = 5,
- DIG_CONNECT_MAX
};
enum dm_dig_pd_th {
@@ -178,8 +153,6 @@ void rtl92e_dm_txpower_tracking_wq(void *data);
void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-void rtl92e_dm_restore_state(struct net_device *dev);
-void rtl92e_dm_backup_state(struct net_device *dev);
void rtl92e_dm_init_edca_turbo(struct net_device *dev);
void rtl92e_dm_rf_pathcheck_wq(void *data);
void rtl92e_dm_init_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 0bc3e013001e..1aa73561581b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -21,7 +21,7 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &link_ctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
- tmp |= BIT4;
+ tmp |= BIT(4);
pci_write_config_byte(pdev, 0x98, tmp);
tmp = 0x17;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 598bfc0ff3d1..44a9fe831849 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -181,11 +181,7 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
- return false;
-
- if (!priv->ps_force)
- priv->rtllib->ps = rtPsMode;
+ priv->rtllib->ps = rtPsMode;
if (priv->rtllib->sta_sleep != LPS_IS_WAKE &&
rtPsMode == RTLLIB_PS_DISABLED) {
unsigned long flags;
@@ -208,8 +204,7 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
&priv->rtllib->pwr_save_ctrl;
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
- (priv->rtllib->link_state == MAC80211_LINKED))
- || (priv->rtllib->iw_mode == IW_MODE_ADHOC))
+ (priv->rtllib->link_state == MAC80211_LINKED)))
return;
if (psc->bLeisurePs) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 189798852568..4371ab1239ee 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -122,126 +122,11 @@ static int _rtl92e_wx_get_power(struct net_device *dev,
return rtllib_wx_get_power(priv->rtllib, info, wrqu, extra);
}
-static int _rtl92e_wx_set_rawtx(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- int ret;
-
- if (priv->hw_radio_off)
- return 0;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int _rtl92e_wx_force_reset(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- priv->force_reset = *extra;
- mutex_unlock(&priv->wx_mutex);
- return 0;
-}
-
-static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
- (&priv->rtllib->pwr_save_ctrl);
- struct rtllib_device *ieee = priv->rtllib;
-
- mutex_lock(&priv->wx_mutex);
-
- if (*extra || priv->force_lps) {
- priv->ps_force = false;
- psc->bLeisurePs = true;
- } else {
- if (priv->rtllib->link_state == MAC80211_LINKED)
- rtl92e_leisure_ps_leave(dev);
-
- priv->ps_force = true;
- psc->bLeisurePs = false;
- ieee->ps = *extra;
- }
-
- mutex_unlock(&priv->wx_mutex);
-
- return 0;
-}
-
-static int _rtl92e_wx_set_lps_awake_interval(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
- (&priv->rtllib->pwr_save_ctrl);
-
- mutex_lock(&priv->wx_mutex);
-
- netdev_info(dev, "%s(): set lps awake interval ! extra is %d\n",
- __func__, *extra);
-
- psc->reg_max_lps_awake_intvl = *extra;
- mutex_unlock(&priv->wx_mutex);
- return 0;
-}
-
-static int _rtl92e_wx_set_force_lps(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- netdev_info(dev,
- "%s(): force LPS ! extra is %d (1 is open 0 is close)\n",
- __func__, *extra);
- priv->force_lps = *extra;
- mutex_unlock(&priv->wx_mutex);
- return 0;
-}
-
-static int _rtl92e_wx_set_debug(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- u8 c = *extra;
-
- if (priv->hw_radio_off)
- return 0;
-
- netdev_info(dev, "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
- *extra, rt_global_debug_component);
- if (c > 0)
- rt_global_debug_component |= (1 << c);
- else
- rt_global_debug_component &= BIT31;
- return 0;
-}
-
static int _rtl92e_wx_set_mode(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
- struct rtllib_device *ieee = netdev_priv_rsl(dev);
enum rt_rf_power_state rt_state;
int ret;
@@ -250,8 +135,7 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
return 0;
rt_state = priv->rtllib->rf_power_state;
mutex_lock(&priv->wx_mutex);
- if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
- ieee->net_promiscuous_md) {
+ if (wrqu->mode == IW_MODE_MONITOR) {
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
@@ -724,22 +608,6 @@ end_hw_sec:
return ret;
}
-static int _rtl92e_wx_set_scan_type(struct net_device *dev,
- struct iw_request_info *aa,
- union iwreq_data *wrqu, char *p)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- int *parms = (int *)p;
- int mode = parms[0];
-
- if (priv->hw_radio_off)
- return 0;
-
- priv->rtllib->active_scan = mode;
-
- return 1;
-}
-
#define R8192_MAX_RETRY 255
static int _rtl92e_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
@@ -844,8 +712,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
idx--;
group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
- if ((!group) || (ieee->iw_mode == IW_MODE_ADHOC) ||
- (alg == KEY_TYPE_WEP40)) {
+ if ((!group) || (alg == KEY_TYPE_WEP40)) {
if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
alg = KEY_TYPE_WEP104;
ieee->pairwise_key_type = alg;
@@ -865,7 +732,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
- ieee->ht_info->bCurrentHTSupport)
+ ieee->ht_info->current_ht_support)
rtl92e_writeb(dev, 0x173, 1);
rtl92e_set_key(dev, 4, idx, alg,
(u8 *)ieee->ap_mac_addr, 0, key);
@@ -938,7 +805,7 @@ static int _rtl92e_wx_get_gen_ie(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
+ if (ieee->wpa_ie_len == 0 || !ieee->wpa_ie) {
data->data.length = 0;
return 0;
}
@@ -951,67 +818,6 @@ static int _rtl92e_wx_get_gen_ie(struct net_device *dev,
return ret;
}
-#define OID_RT_INTEL_PROMISCUOUS_MODE 0xFF0101F6
-
-static int _rtl92e_wx_set_promisc_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- struct rtllib_device *ieee = priv->rtllib;
-
- u32 info_buf[3];
-
- u32 oid;
- u32 promiscuous_on;
- u32 fltr_src_sta_frame;
-
- if (copy_from_user(info_buf, wrqu->data.pointer, sizeof(info_buf)))
- return -EFAULT;
-
- oid = info_buf[0];
- promiscuous_on = info_buf[1];
- fltr_src_sta_frame = info_buf[2];
-
- if (oid == OID_RT_INTEL_PROMISCUOUS_MODE) {
- ieee->intel_promiscuous_md_info.promiscuous_on =
- (promiscuous_on) ? (true) : (false);
- ieee->intel_promiscuous_md_info.fltr_src_sta_frame =
- (fltr_src_sta_frame) ? (true) : (false);
- (promiscuous_on) ?
- (rtllib_EnableIntelPromiscuousMode(dev, false)) :
- (rtllib_DisableIntelPromiscuousMode(dev, false));
-
- netdev_info(dev,
- "=======>%s(), on = %d, filter src sta = %d\n",
- __func__, promiscuous_on,
- fltr_src_sta_frame);
- } else {
- return -1;
- }
-
- return 0;
-}
-
-static int _rtl92e_wx_get_promisc_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- struct rtllib_device *ieee = priv->rtllib;
-
- mutex_lock(&priv->wx_mutex);
-
- snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d",
- ieee->intel_promiscuous_md_info.promiscuous_on,
- ieee->intel_promiscuous_md_info.fltr_src_sta_frame);
- wrqu->data.length = strlen(extra) + 1;
-
- mutex_unlock(&priv->wx_mutex);
-
- return 0;
-}
-
#define IW_IOCTL(x) ((x) - SIOCSIWCOMMIT)
static iw_handler r8192_wx_handlers[] = {
[IW_IOCTL(SIOCGIWNAME)] = _rtl92e_wx_get_name,
@@ -1047,72 +853,6 @@ static iw_handler r8192_wx_handlers[] = {
[IW_IOCTL(SIOCSIWENCODEEXT)] = _rtl92e_wx_set_encode_ext,
};
-/* the following rule need to be following,
- * Odd : get (world access),
- * even : set (root access)
- */
-static const struct iw_priv_args r8192_private_args[] = {
- {
- SIOCIWFIRSTPRIV + 0x0,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_debugflag"
- }, {
- SIOCIWFIRSTPRIV + 0x1,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
- }, {
- SIOCIWFIRSTPRIV + 0x2,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
- }, {
- SIOCIWFIRSTPRIV + 0x3,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
- }, {
- SIOCIWFIRSTPRIV + 0x6,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
- "set_power"
- }, {
- SIOCIWFIRSTPRIV + 0xa,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
- "lps_interv"
- }, {
- SIOCIWFIRSTPRIV + 0xb,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE,
- "lps_force"
- }, {
- SIOCIWFIRSTPRIV + 0x16,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "setpromisc"
- }, {
- SIOCIWFIRSTPRIV + 0x17,
- 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 45, "getpromisc"
- }
-
-};
-
-static iw_handler r8192_private_handler[] = {
- (iw_handler)_rtl92e_wx_set_debug, /*SIOCIWSECONDPRIV*/
- (iw_handler)_rtl92e_wx_set_scan_type,
- (iw_handler)_rtl92e_wx_set_rawtx,
- (iw_handler)_rtl92e_wx_force_reset,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)_rtl92e_wx_adapter_power_status,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)_rtl92e_wx_set_lps_awake_interval,
- (iw_handler)_rtl92e_wx_set_force_lps,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)NULL,
- (iw_handler)_rtl92e_wx_set_promisc_mode,
- (iw_handler)_rtl92e_wx_get_promisc_mode,
-};
-
static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1144,10 +884,5 @@ static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev)
const struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
- .private = r8192_private_handler,
- .num_private = ARRAY_SIZE(r8192_private_handler),
- .num_private_args = sizeof(r8192_private_args) /
- sizeof(struct iw_priv_args),
.get_wireless_stats = _rtl92e_get_wireless_stats,
- .private_args = (struct iw_priv_args *)r8192_private_args,
};
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 0e3372868f97..4af8055d2489 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -10,109 +10,109 @@
#include "rtllib.h"
#include "rtl819x_BA.h"
-static void activate_ba_entry(struct ba_record *pBA, u16 Time)
+static void activate_ba_entry(struct ba_record *ba, u16 time)
{
- pBA->b_valid = true;
- if (Time != 0)
- mod_timer(&pBA->timer, jiffies + msecs_to_jiffies(Time));
+ ba->b_valid = true;
+ if (time != 0)
+ mod_timer(&ba->timer, jiffies + msecs_to_jiffies(time));
}
-static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *pBA)
+static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *ba)
{
- pBA->b_valid = false;
- del_timer_sync(&pBA->timer);
+ ba->b_valid = false;
+ del_timer_sync(&ba->timer);
}
static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
{
- struct ba_record *pAdmittedBa = &pTxTs->TxAdmittedBARecord;
- struct ba_record *pPendingBa = &pTxTs->TxPendingBARecord;
+ struct ba_record *admitted_ba = &pTxTs->TxAdmittedBARecord;
+ struct ba_record *pending_ba = &pTxTs->TxPendingBARecord;
u8 bSendDELBA = false;
- if (pPendingBa->b_valid) {
- deactivate_ba_entry(ieee, pPendingBa);
+ if (pending_ba->b_valid) {
+ deactivate_ba_entry(ieee, pending_ba);
bSendDELBA = true;
}
- if (pAdmittedBa->b_valid) {
- deactivate_ba_entry(ieee, pAdmittedBa);
+ if (admitted_ba->b_valid) {
+ deactivate_ba_entry(ieee, admitted_ba);
bSendDELBA = true;
}
return bSendDELBA;
}
-static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *pRxTs)
+static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *ts)
{
- struct ba_record *pBa = &pRxTs->rx_admitted_ba_record;
+ struct ba_record *ba = &ts->rx_admitted_ba_record;
u8 bSendDELBA = false;
- if (pBa->b_valid) {
- deactivate_ba_entry(ieee, pBa);
+ if (ba->b_valid) {
+ deactivate_ba_entry(ieee, ba);
bSendDELBA = true;
}
return bSendDELBA;
}
-void rtllib_reset_ba_entry(struct ba_record *pBA)
+void rtllib_reset_ba_entry(struct ba_record *ba)
{
- pBA->b_valid = false;
- pBA->ba_param_set.short_data = 0;
- pBA->ba_timeout_value = 0;
- pBA->dialog_token = 0;
- pBA->ba_start_seq_ctrl.short_data = 0;
+ ba->b_valid = false;
+ ba->ba_param_set.short_data = 0;
+ ba->ba_timeout_value = 0;
+ ba->dialog_token = 0;
+ ba->ba_start_seq_ctrl.short_data = 0;
}
-static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
- struct ba_record *pBA,
- u16 StatusCode, u8 type)
+static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *ba,
+ u16 status_code, u8 type)
{
struct sk_buff *skb = NULL;
- struct rtllib_hdr_3addr *BAReq = NULL;
+ struct ieee80211_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
u16 len = ieee->tx_headroom + 9;
netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
- __func__, type, Dst, ieee->dev);
+ __func__, type, dst, ieee->dev);
- if (!pBA) {
- netdev_warn(ieee->dev, "pBA is NULL\n");
+ if (!ba) {
+ netdev_warn(ieee->dev, "ba is NULL\n");
return NULL;
}
- skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
+ skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
if (!skb)
return NULL;
- memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
+ memset(skb->data, 0, sizeof(struct ieee80211_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
- BAReq = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
+ BAReq = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
- ether_addr_copy(BAReq->addr1, Dst);
+ ether_addr_copy(BAReq->addr1, dst);
ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
- BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
+ BAReq->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
tag = skb_put(skb, 9);
*tag++ = ACT_CAT_BA;
*tag++ = type;
- *tag++ = pBA->dialog_token;
+ *tag++ = ba->dialog_token;
if (type == ACT_ADDBARSP) {
- put_unaligned_le16(StatusCode, tag);
+ put_unaligned_le16(status_code, tag);
tag += 2;
}
- put_unaligned_le16(pBA->ba_param_set.short_data, tag);
+ put_unaligned_le16(ba->ba_param_set.short_data, tag);
tag += 2;
- put_unaligned_le16(pBA->ba_timeout_value, tag);
+ put_unaligned_le16(ba->ba_timeout_value, tag);
tag += 2;
if (type == ACT_ADDBAREQ) {
- memcpy(tag, (u8 *)&pBA->ba_start_seq_ctrl, 2);
+ memcpy(tag, (u8 *)&ba->ba_start_seq_ctrl, 2);
tag += 2;
}
@@ -124,36 +124,36 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
}
static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *pBA,
- enum tr_select TxRxSelect, u16 ReasonCode)
+ struct ba_record *ba,
+ enum tr_select TxRxSelect, u16 reason_code)
{
union delba_param_set DelbaParamSet;
struct sk_buff *skb = NULL;
- struct rtllib_hdr_3addr *Delba = NULL;
+ struct ieee80211_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
u16 len = 6 + ieee->tx_headroom;
if (net_ratelimit())
- netdev_dbg(ieee->dev, "%s(): ReasonCode(%d) sentd to: %pM\n",
- __func__, ReasonCode, dst);
+ netdev_dbg(ieee->dev, "%s(): reason_code(%d) sentd to: %pM\n",
+ __func__, reason_code, dst);
memset(&DelbaParamSet, 0, 2);
DelbaParamSet.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
- DelbaParamSet.field.tid = pBA->ba_param_set.field.tid;
+ DelbaParamSet.field.tid = ba->ba_param_set.field.tid;
- skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
+ skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
- Delba = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
+ Delba = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
ether_addr_copy(Delba->addr1, dst);
ether_addr_copy(Delba->addr2, ieee->dev->dev_addr);
ether_addr_copy(Delba->addr3, ieee->current_network.bssid);
- Delba->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
+ Delba->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
tag = skb_put(skb, 6);
@@ -163,7 +163,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
put_unaligned_le16(DelbaParamSet.short_data, tag);
tag += 2;
- put_unaligned_le16(ReasonCode, tag);
+ put_unaligned_le16(reason_code, tag);
tag += 2;
#ifdef VERBOSE_DEBUG
@@ -174,11 +174,11 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
}
static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *pBA)
+ struct ba_record *ba)
{
struct sk_buff *skb;
- skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
+ skb = rtllib_ADDBA(ieee, dst, ba, 0, ACT_ADDBAREQ);
if (skb)
softmac_mgmt_xmit(skb, ieee);
@@ -187,11 +187,11 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
}
static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *pBA, u16 StatusCode)
+ struct ba_record *ba, u16 status_code)
{
struct sk_buff *skb;
- skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
+ skb = rtllib_ADDBA(ieee, dst, ba, status_code, ACT_ADDBARSP);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
@@ -199,12 +199,12 @@ static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
}
static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *pBA, enum tr_select TxRxSelect,
- u16 ReasonCode)
+ struct ba_record *ba, enum tr_select TxRxSelect,
+ u16 reason_code)
{
struct sk_buff *skb;
- skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
+ skb = rtllib_DELBA(ieee, dst, ba, TxRxSelect, reason_code);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
@@ -213,19 +213,19 @@ static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *req = NULL;
+ struct ieee80211_hdr_3addr *req = NULL;
u16 rc = 0;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
- struct ba_record *pBA = NULL;
+ struct ba_record *ba = NULL;
union ba_param_set *pBaParamSet = NULL;
u16 *pBaTimeoutVal = NULL;
union sequence_control *pBaStartSeqCtrl = NULL;
- struct rx_ts_record *pTS = NULL;
+ struct rx_ts_record *ts = NULL;
- if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
netdev_warn(ieee->dev, "Invalid skb len in BAREQ(%d / %d)\n",
(int)skb->len,
- (int)(sizeof(struct rtllib_hdr_3addr) + 9));
+ (int)(sizeof(struct ieee80211_hdr_3addr) + 9));
return -1;
}
@@ -234,32 +234,32 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
skb->data, skb->len);
#endif
- req = (struct rtllib_hdr_3addr *)skb->data;
+ req = (struct ieee80211_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = (u8 *)(&req->addr2[0]);
- tag += sizeof(struct rtllib_hdr_3addr);
+ tag += sizeof(struct ieee80211_hdr_3addr);
pDialogToken = tag + 2;
pBaParamSet = (union ba_param_set *)(tag + 3);
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (union sequence_control *)(req + 7);
if (!ieee->current_network.qos_data.active ||
- !ieee->ht_info->bCurrentHTSupport ||
+ !ieee->ht_info->current_ht_support ||
(ieee->ht_info->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev,
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
ieee->current_network.qos_data.active,
- ieee->ht_info->bCurrentHTSupport);
+ ieee->ht_info->current_ht_support);
goto OnADDBAReq_Fail;
}
- if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
(u8)(pBaParamSet->field.tid), RX_DIR, true)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
goto OnADDBAReq_Fail;
}
- pBA = &pTS->rx_admitted_ba_record;
+ ba = &ts->rx_admitted_ba_record;
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
rc = ADDBA_STATUS_INVALID_PARAM;
@@ -268,22 +268,22 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
goto OnADDBAReq_Fail;
}
- rtllib_FlushRxTsPendingPkts(ieee, pTS);
+ rtllib_FlushRxTsPendingPkts(ieee, ts);
- deactivate_ba_entry(ieee, pBA);
- pBA->dialog_token = *pDialogToken;
- pBA->ba_param_set = *pBaParamSet;
- pBA->ba_timeout_value = *pBaTimeoutVal;
- pBA->ba_start_seq_ctrl = *pBaStartSeqCtrl;
+ deactivate_ba_entry(ieee, ba);
+ ba->dialog_token = *pDialogToken;
+ ba->ba_param_set = *pBaParamSet;
+ ba->ba_timeout_value = *pBaTimeoutVal;
+ ba->ba_start_seq_ctrl = *pBaStartSeqCtrl;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
(ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
- pBA->ba_param_set.field.buffer_size = 1;
+ ba->ba_param_set.field.buffer_size = 1;
else
- pBA->ba_param_set.field.buffer_size = 32;
+ ba->ba_param_set.field.buffer_size = 32;
- activate_ba_entry(pBA, 0);
- rtllib_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
+ activate_ba_entry(ba, 0);
+ rtllib_send_ADDBARsp(ieee, dst, ba, ADDBA_STATUS_SUCCESS);
return 0;
@@ -302,88 +302,88 @@ OnADDBAReq_Fail:
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *rsp = NULL;
- struct ba_record *pPendingBA, *pAdmittedBA;
- struct tx_ts_record *pTS = NULL;
+ struct ieee80211_hdr_3addr *rsp = NULL;
+ struct ba_record *pending_ba, *pAdmittedBA;
+ struct tx_ts_record *ts = NULL;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
- u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
+ u16 *status_code = NULL, *pBaTimeoutVal = NULL;
union ba_param_set *pBaParamSet = NULL;
- u16 ReasonCode;
+ u16 reason_code;
- if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
netdev_warn(ieee->dev, "Invalid skb len in BARSP(%d / %d)\n",
(int)skb->len,
- (int)(sizeof(struct rtllib_hdr_3addr) + 9));
+ (int)(sizeof(struct ieee80211_hdr_3addr) + 9));
return -1;
}
- rsp = (struct rtllib_hdr_3addr *)skb->data;
+ rsp = (struct ieee80211_hdr_3addr *)skb->data;
tag = (u8 *)rsp;
dst = (u8 *)(&rsp->addr2[0]);
- tag += sizeof(struct rtllib_hdr_3addr);
+ tag += sizeof(struct ieee80211_hdr_3addr);
pDialogToken = tag + 2;
- pStatusCode = (u16 *)(tag + 3);
+ status_code = (u16 *)(tag + 3);
pBaParamSet = (union ba_param_set *)(tag + 5);
pBaTimeoutVal = (u16 *)(tag + 7);
if (!ieee->current_network.qos_data.active ||
- !ieee->ht_info->bCurrentHTSupport ||
+ !ieee->ht_info->current_ht_support ||
!ieee->ht_info->bCurrentAMPDUEnable) {
netdev_warn(ieee->dev,
"reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",
ieee->current_network.qos_data.active,
- ieee->ht_info->bCurrentHTSupport,
+ ieee->ht_info->current_ht_support,
ieee->ht_info->bCurrentAMPDUEnable);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ reason_code = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
- if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
(u8)(pBaParamSet->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ reason_code = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
- pTS->bAddBaReqInProgress = false;
- pPendingBA = &pTS->TxPendingBARecord;
- pAdmittedBA = &pTS->TxAdmittedBARecord;
+ ts->bAddBaReqInProgress = false;
+ pending_ba = &ts->TxPendingBARecord;
+ pAdmittedBA = &ts->TxAdmittedBARecord;
if (pAdmittedBA->b_valid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
return -1;
- } else if (!pPendingBA->b_valid ||
- (*pDialogToken != pPendingBA->dialog_token)) {
+ } else if (!pending_ba->b_valid ||
+ (*pDialogToken != pending_ba->dialog_token)) {
netdev_warn(ieee->dev,
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
__func__);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ reason_code = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
} else {
netdev_dbg(ieee->dev,
"%s(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n",
- __func__, *pStatusCode);
- deactivate_ba_entry(ieee, pPendingBA);
+ __func__, *status_code);
+ deactivate_ba_entry(ieee, pending_ba);
}
- if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
+ if (*status_code == ADDBA_STATUS_SUCCESS) {
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
- pTS->bAddBaReqDelayed = true;
+ ts->bAddBaReqDelayed = true;
deactivate_ba_entry(ieee, pAdmittedBA);
- ReasonCode = DELBA_REASON_END_BA;
+ reason_code = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
pAdmittedBA->dialog_token = *pDialogToken;
pAdmittedBA->ba_timeout_value = *pBaTimeoutVal;
- pAdmittedBA->ba_start_seq_ctrl = pPendingBA->ba_start_seq_ctrl;
+ pAdmittedBA->ba_start_seq_ctrl = pending_ba->ba_start_seq_ctrl;
pAdmittedBA->ba_param_set = *pBaParamSet;
deactivate_ba_entry(ieee, pAdmittedBA);
activate_ba_entry(pAdmittedBA, *pBaTimeoutVal);
} else {
- pTS->bAddBaReqDelayed = true;
- pTS->bDisable_AddBa = true;
- ReasonCode = DELBA_REASON_END_BA;
+ ts->bAddBaReqDelayed = true;
+ ts->bDisable_AddBa = true;
+ reason_code = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
@@ -394,30 +394,30 @@ OnADDBARsp_Reject:
struct ba_record BA;
BA.ba_param_set = *pBaParamSet;
- rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, ReasonCode);
+ rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, reason_code);
return 0;
}
}
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *delba = NULL;
+ struct ieee80211_hdr_3addr *delba = NULL;
union delba_param_set *pDelBaParamSet = NULL;
u8 *dst = NULL;
- if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 6) {
netdev_warn(ieee->dev, "Invalid skb len in DELBA(%d / %d)\n",
(int)skb->len,
- (int)(sizeof(struct rtllib_hdr_3addr) + 6));
+ (int)(sizeof(struct ieee80211_hdr_3addr) + 6));
return -1;
}
if (!ieee->current_network.qos_data.active ||
- !ieee->ht_info->bCurrentHTSupport) {
+ !ieee->ht_info->current_ht_support) {
netdev_warn(ieee->dev,
"received DELBA while QOS or HT is not supported(%d, %d)\n",
ieee->current_network. qos_data.active,
- ieee->ht_info->bCurrentHTSupport);
+ ieee->ht_info->current_ht_support);
return -1;
}
@@ -425,14 +425,14 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
__func__, skb->len);
#endif
- delba = (struct rtllib_hdr_3addr *)skb->data;
+ delba = (struct ieee80211_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
- pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
+ pDelBaParamSet = (union delba_param_set *)&delba->seq_ctrl + 2;
if (pDelBaParamSet->field.initiator == 1) {
- struct rx_ts_record *pRxTs;
+ struct rx_ts_record *ts;
- if (!GetTs(ieee, (struct ts_common_info **)&pRxTs, dst,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
(u8)pDelBaParamSet->field.tid, RX_DIR, false)) {
netdev_warn(ieee->dev,
"%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
@@ -441,11 +441,11 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
return -1;
}
- rx_ts_delete_ba(ieee, pRxTs);
+ rx_ts_delete_ba(ieee, ts);
} else {
struct tx_ts_record *pTxTs;
- if (!GetTs(ieee, (struct ts_common_info **)&pTxTs, dst,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)&pTxTs, dst,
(u8)pDelBaParamSet->field.tid, TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
__func__);
@@ -461,27 +461,27 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
return 0;
}
-void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending)
+void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
+ u8 policy, u8 bOverwritePending)
{
- struct ba_record *pBA = &pTS->TxPendingBARecord;
+ struct ba_record *ba = &ts->TxPendingBARecord;
- if (pBA->b_valid && !bOverwritePending)
+ if (ba->b_valid && !bOverwritePending)
return;
- deactivate_ba_entry(ieee, pBA);
+ deactivate_ba_entry(ieee, ba);
- pBA->dialog_token++;
- pBA->ba_param_set.field.amsdu_support = 0;
- pBA->ba_param_set.field.ba_policy = Policy;
- pBA->ba_param_set.field.tid = pTS->TsCommonInfo.TSpec.f.TSInfo.field.ucTSID;
- pBA->ba_param_set.field.buffer_size = 32;
- pBA->ba_timeout_value = 0;
- pBA->ba_start_seq_ctrl.field.seq_num = (pTS->TxCurSeq + 3) % 4096;
+ ba->dialog_token++;
+ ba->ba_param_set.field.amsdu_support = 0;
+ ba->ba_param_set.field.ba_policy = policy;
+ ba->ba_param_set.field.tid = ts->TsCommonInfo.TSpec.ucTSID;
+ ba->ba_param_set.field.buffer_size = 32;
+ ba->ba_timeout_value = 0;
+ ba->ba_start_seq_ctrl.field.seq_num = (ts->TxCurSeq + 3) % 4096;
- activate_ba_entry(pBA, BA_SETUP_TIMEOUT);
+ activate_ba_entry(ba, BA_SETUP_TIMEOUT);
- rtllib_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
+ rtllib_send_ADDBAReq(ieee, ts->TsCommonInfo.addr, ba);
}
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
@@ -493,17 +493,17 @@ void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
(struct tx_ts_record *)pTsCommonInfo;
if (tx_ts_delete_ba(ieee, pTxTs))
- rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
+ rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
(pTxTs->TxAdmittedBARecord.b_valid) ?
(&pTxTs->TxAdmittedBARecord) :
(&pTxTs->TxPendingBARecord),
TxRxSelect, DELBA_REASON_END_BA);
} else if (TxRxSelect == RX_DIR) {
- struct rx_ts_record *pRxTs =
+ struct rx_ts_record *ts =
(struct rx_ts_record *)pTsCommonInfo;
- if (rx_ts_delete_ba(ieee, pRxTs))
- rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
- &pRxTs->rx_admitted_ba_record,
+ if (rx_ts_delete_ba(ieee, ts))
+ rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
+ &ts->rx_admitted_ba_record,
TxRxSelect, DELBA_REASON_END_BA);
}
}
@@ -525,20 +525,20 @@ void rtllib_tx_ba_inact_timeout(struct timer_list *t)
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[pTxTs->num]);
tx_ts_delete_ba(ieee, pTxTs);
- rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.Addr,
+ rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.addr,
&pTxTs->TxAdmittedBARecord, TX_DIR,
DELBA_REASON_TIMEOUT);
}
void rtllib_rx_ba_inact_timeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
+ struct rx_ts_record *ts = from_timer(ts, t,
rx_admitted_ba_record.timer);
- struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
- RxTsRecord[pRxTs->num]);
+ struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+ RxTsRecord[ts->num]);
- rx_ts_delete_ba(ieee, pRxTs);
- rtllib_send_DELBA(ieee, pRxTs->ts_common_info.Addr,
- &pRxTs->rx_admitted_ba_record, RX_DIR,
+ rx_ts_delete_ba(ieee, ts);
+ rtllib_send_DELBA(ieee, ts->ts_common_info.addr,
+ &ts->rx_admitted_ba_record, RX_DIR,
DELBA_REASON_TIMEOUT);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 2bbd01048561..f8eb4d553fe0 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -93,7 +93,7 @@ enum ht_aggre_mode {
struct rt_hi_throughput {
u8 enable_ht;
- u8 bCurrentHTSupport;
+ u8 current_ht_support;
u8 bRegBW40MHz;
u8 bCurBW40MHz;
u8 bRegShortGI40MHz;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index f19feea46158..e607bccc079a 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -67,7 +67,7 @@ static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
-void HTUpdateDefaultSetting(struct rtllib_device *ieee)
+void ht_update_default_setting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
@@ -207,27 +207,6 @@ static void HTIOTPeerDetermine(struct rtllib_device *ieee)
netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->IOTPeer);
}
-static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr)
-{
- return 0;
-}
-
-static bool HTIOTActIsDisableMCS15(struct rtllib_device *ieee)
-{
- return false;
-}
-
-static bool HTIOTActIsDisableMCSTwoSpatialStream(struct rtllib_device *ieee)
-{
- return false;
-}
-
-static u8 HTIOTActIsDisableEDCATurbo(struct rtllib_device *ieee,
- u8 *PeerMacAddr)
-{
- return false;
-}
-
static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
struct rtllib_network *network)
{
@@ -352,7 +331,6 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
u8 *len, u8 IsEncrypt)
{
- struct rt_hi_throughput *pHT = ieee->ht_info;
struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
if (!posHTInfo || !pHTInfoEle) {
@@ -363,32 +341,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
}
memset(posHTInfo, 0, *len);
- if (ieee->iw_mode == IW_MODE_ADHOC) {
- pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ?
- HT_EXTCHNL_OFFSET_NO_EXT :
- (ieee->current_network.channel <= 6)
- ? HT_EXTCHNL_OFFSET_UPPER :
- HT_EXTCHNL_OFFSET_LOWER);
- pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
- pHTInfoEle->RIFS = 0;
- pHTInfoEle->PSMPAccessOnly = 0;
- pHTInfoEle->SrvIntGranularity = 0;
- pHTInfoEle->OptMode = pHT->current_op_mode;
- pHTInfoEle->NonGFDevPresent = 0;
- pHTInfoEle->DualBeacon = 0;
- pHTInfoEle->SecondaryBeacon = 0;
- pHTInfoEle->LSigTxopProtectFull = 0;
- pHTInfoEle->PcoActive = 0;
- pHTInfoEle->PcoPhase = 0;
-
- memset(pHTInfoEle->BasicMSC, 0, 16);
-
- *len = 22 + 2;
-
- } else {
- *len = 0;
- }
+ *len = 0;
}
void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
@@ -515,7 +468,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
static const u8 EWC11NHTInfo[] = { 0x00, 0x90, 0x4c, 0x34 };
- if (!ht_info->bCurrentHTSupport) {
+ if (!ht_info->current_ht_support) {
netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__);
return;
}
@@ -620,7 +573,7 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- ht_info->bCurrentHTSupport = false;
+ ht_info->current_ht_support = false;
ht_info->bCurBW40MHz = false;
ht_info->cur_tx_bw40mhz = false;
@@ -691,7 +644,7 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
* function rtllib_softmac_new_net. WB 2008.09.10
*/
if (pNetwork->bssht.bd_support_ht) {
- ht_info->bCurrentHTSupport = true;
+ ht_info->current_ht_support = true;
ht_info->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver;
if (pNetwork->bssht.bd_ht_cap_len > 0 &&
@@ -722,22 +675,6 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
HTIOTPeerDetermine(ieee);
ht_info->iot_action = 0;
- bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
- if (bIOTAction)
- ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS14;
-
- bIOTAction = HTIOTActIsDisableMCS15(ieee);
- if (bIOTAction)
- ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS15;
-
- bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
- if (bIOTAction)
- ht_info->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS;
-
- bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
- if (bIOTAction)
- ht_info->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
-
bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
@@ -745,7 +682,7 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_CDD_FSYNC;
} else {
- ht_info->bCurrentHTSupport = false;
+ ht_info->current_ht_support = false;
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
@@ -762,52 +699,16 @@ void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
struct ht_info_ele *pPeerHTInfo =
(struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf;
- if (ht_info->bCurrentHTSupport) {
+ if (ht_info->current_ht_support) {
if (pNetwork->bssht.bd_ht_info_len != 0)
ht_info->current_op_mode = pPeerHTInfo->OptMode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
-void HTUseDefaultSetting(struct rtllib_device *ieee)
-{
- struct rt_hi_throughput *ht_info = ieee->ht_info;
-
- if (ht_info->enable_ht) {
- ht_info->bCurrentHTSupport = true;
- ht_info->bCurSuppCCK = ht_info->bRegSuppCCK;
-
- ht_info->bCurBW40MHz = ht_info->bRegBW40MHz;
- ht_info->bCurShortGI20MHz = ht_info->bRegShortGI20MHz;
-
- ht_info->bCurShortGI40MHz = ht_info->bRegShortGI40MHz;
-
- if (ieee->iw_mode == IW_MODE_ADHOC)
- ieee->current_network.qos_data.active =
- ieee->current_network.qos_data.supported;
- ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support;
- ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
-
- ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable;
- ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
-
- ht_info->current_mpdu_density = ht_info->current_mpdu_density;
-
- HTFilterMCSRate(ieee, ieee->reg_dot11tx_ht_oper_rate_set,
- ieee->dot11ht_oper_rate_set);
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
- ieee->dot11ht_oper_rate_set,
- MCS_FILTER_ALL);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- } else {
- ht_info->bCurrentHTSupport = false;
- }
-}
-
u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
{
- if (ieee->ht_info->bCurrentHTSupport) {
+ if (ieee->ht_info->current_ht_support) {
if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n");
return true;
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 0499711ea2bf..1c00092ea3a5 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -7,76 +7,9 @@
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-union qos_tsinfo {
- u8 charData[3];
- struct {
- u8 ucTrafficType:1;
- u8 ucTSID:4;
- u8 ucDirection:2;
- u8 ucAccessPolicy:2;
- u8 ucAggregation:1;
- u8 ucPSB:1;
- u8 ucUP:3;
- u8 ucTSInfoAckPolicy:2;
- u8 ucSchedule:1;
- u8 ucReserved:7;
- } field;
-};
-
-union tspec_body {
- u8 charData[55];
-
- struct {
- union qos_tsinfo TSInfo;
- u16 NominalMSDUsize;
- u16 MaxMSDUsize;
- u32 MinServiceItv;
- u32 MaxServiceItv;
- u32 InactivityItv;
- u32 SuspenItv;
- u32 ServiceStartTime;
- u32 MinDataRate;
- u32 MeanDataRate;
- u32 PeakDataRate;
- u32 MaxBurstSize;
- u32 DelayBound;
- u32 MinPhyRate;
- u16 SurplusBandwidthAllowance;
- u16 MediumTime;
- } f;
+struct qos_tsinfo {
+ u8 ucTSID:4;
+ u8 ucDirection:2;
};
struct octet_string {
@@ -88,7 +21,6 @@ struct octet_string {
#define AC1_BK 1
#define AC2_VI 2
#define AC3_VO 3
-#define AC_MAX 4
enum direction_value {
DIR_UP = 0,
@@ -97,62 +29,6 @@ enum direction_value {
DIR_BI_DIR = 3,
};
-struct acm {
- u64 UsedTime;
- u64 MediumTime;
- u8 HwAcmCtl;
-};
-
-union qos_tclas {
- struct _TYPE_GENERAL {
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- } TYPE_GENERAL;
-
- struct _TYPE0_ETH {
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 SrcAddr[ETH_ALEN];
- u8 DstAddr[ETH_ALEN];
- u16 Type;
- } TYPE0_ETH;
-
- struct _TYPE1_IPV4 {
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[4];
- u8 DstIP[4];
- u16 SrcPort;
- u16 DstPort;
- u8 DSCP;
- u8 Protocol;
- u8 Reserved;
- } TYPE1_IPV4;
-
- struct _TYPE1_IPV6 {
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[16];
- u8 DstIP[16];
- u16 SrcPort;
- u16 DstPort;
- u8 FlowLabel[3];
- } TYPE1_IPV6;
-
- struct _TYPE2_8021Q {
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u16 TagType;
- } TYPE2_8021Q;
-};
-
union aci_aifsn {
u8 charData;
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index 0e851d4221a7..fff36315f174 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -10,7 +10,6 @@
#define TS_ADDBA_DELAY 60
#define TOTAL_TS_NUM 16
-#define TCLAS_NUM 4
enum tr_select {
TX_DIR = 0,
@@ -19,11 +18,8 @@ enum tr_select {
struct ts_common_info {
struct list_head List;
- u8 Addr[ETH_ALEN];
- union tspec_body TSpec;
- union qos_tclas TClass[TCLAS_NUM];
- u8 TClasProc;
- u8 TClasNum;
+ u8 addr[ETH_ALEN];
+ struct qos_tsinfo TSpec;
};
struct tx_ts_record {
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 419ff72f2ba7..3206fdb3e65a 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -10,10 +10,9 @@
static void RxPktPendingTimeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
- rx_pkt_pending_timer);
- struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
- RxTsRecord[pRxTs->num]);
+ struct rx_ts_record *ts = from_timer(ts, t, rx_pkt_pending_timer);
+ struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+ RxTsRecord[ts->num]);
struct rx_reorder_entry *pReorderEntry = NULL;
@@ -22,24 +21,24 @@ static void RxPktPendingTimeout(struct timer_list *t)
bool bPktInBuf = false;
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- if (pRxTs->rx_timeout_indicate_seq != 0xffff) {
- while (!list_empty(&pRxTs->rx_pending_pkt_list)) {
+ if (ts->rx_timeout_indicate_seq != 0xffff) {
+ while (!list_empty(&ts->rx_pending_pkt_list)) {
pReorderEntry = (struct rx_reorder_entry *)
- list_entry(pRxTs->rx_pending_pkt_list.prev,
+ list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
if (index == 0)
- pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;
+ ts->rx_indicate_seq = pReorderEntry->SeqNum;
if (SN_LESS(pReorderEntry->SeqNum,
- pRxTs->rx_indicate_seq) ||
+ ts->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum,
- pRxTs->rx_indicate_seq)) {
+ ts->rx_indicate_seq)) {
list_del_init(&pReorderEntry->List);
if (SN_EQUAL(pReorderEntry->SeqNum,
- pRxTs->rx_indicate_seq))
- pRxTs->rx_indicate_seq =
- (pRxTs->rx_indicate_seq + 1) % 4096;
+ ts->rx_indicate_seq))
+ ts->rx_indicate_seq =
+ (ts->rx_indicate_seq + 1) % 4096;
netdev_dbg(ieee->dev,
"%s(): Indicate SeqNum: %d\n",
@@ -58,7 +57,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
}
if (index > 0) {
- pRxTs->rx_timeout_indicate_seq = 0xffff;
+ ts->rx_timeout_indicate_seq = 0xffff;
if (index > REORDER_WIN_SIZE) {
netdev_warn(ieee->dev,
@@ -72,9 +71,9 @@ static void RxPktPendingTimeout(struct timer_list *t)
bPktInBuf = false;
}
- if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
- pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
- mod_timer(&pRxTs->rx_pkt_pending_timer, jiffies +
+ if (bPktInBuf && (ts->rx_timeout_indicate_seq == 0xffff)) {
+ ts->rx_timeout_indicate_seq = ts->rx_indicate_seq;
+ mod_timer(&ts->rx_pkt_pending_timer, jiffies +
msecs_to_jiffies(ieee->ht_info->rx_reorder_pending_time)
);
}
@@ -94,37 +93,34 @@ static void TsAddBaProcess(struct timer_list *t)
static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
- eth_zero_addr(pTsCommonInfo->Addr);
- memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
- memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas) * TCLAS_NUM);
- pTsCommonInfo->TClasProc = 0;
- pTsCommonInfo->TClasNum = 0;
+ eth_zero_addr(pTsCommonInfo->addr);
+ memset(&pTsCommonInfo->TSpec, 0, sizeof(struct qos_tsinfo));
}
-static void ResetTxTsEntry(struct tx_ts_record *pTS)
+static void ResetTxTsEntry(struct tx_ts_record *ts)
{
- ResetTsCommonInfo(&pTS->TsCommonInfo);
- pTS->TxCurSeq = 0;
- pTS->bAddBaReqInProgress = false;
- pTS->bAddBaReqDelayed = false;
- pTS->bUsingBa = false;
- pTS->bDisable_AddBa = false;
- rtllib_reset_ba_entry(&pTS->TxAdmittedBARecord);
- rtllib_reset_ba_entry(&pTS->TxPendingBARecord);
+ ResetTsCommonInfo(&ts->TsCommonInfo);
+ ts->TxCurSeq = 0;
+ ts->bAddBaReqInProgress = false;
+ ts->bAddBaReqDelayed = false;
+ ts->bUsingBa = false;
+ ts->bDisable_AddBa = false;
+ rtllib_reset_ba_entry(&ts->TxAdmittedBARecord);
+ rtllib_reset_ba_entry(&ts->TxPendingBARecord);
}
-static void ResetRxTsEntry(struct rx_ts_record *pTS)
+static void ResetRxTsEntry(struct rx_ts_record *ts)
{
- ResetTsCommonInfo(&pTS->ts_common_info);
- pTS->rx_indicate_seq = 0xffff;
- pTS->rx_timeout_indicate_seq = 0xffff;
- rtllib_reset_ba_entry(&pTS->rx_admitted_ba_record);
+ ResetTsCommonInfo(&ts->ts_common_info);
+ ts->rx_indicate_seq = 0xffff;
+ ts->rx_timeout_indicate_seq = 0xffff;
+ rtllib_reset_ba_entry(&ts->rx_admitted_ba_record);
}
-void TSInitialize(struct rtllib_device *ieee)
+void rtllib_ts_init(struct rtllib_device *ieee)
{
struct tx_ts_record *pTxTS = ieee->TxTsRecord;
- struct rx_ts_record *pRxTS = ieee->RxTsRecord;
+ struct rx_ts_record *rxts = ieee->RxTsRecord;
struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
@@ -151,17 +147,17 @@ void TSInitialize(struct rtllib_device *ieee)
INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
- pRxTS->num = count;
- INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
- timer_setup(&pRxTS->rx_admitted_ba_record.timer,
+ rxts->num = count;
+ INIT_LIST_HEAD(&rxts->rx_pending_pkt_list);
+ timer_setup(&rxts->rx_admitted_ba_record.timer,
rtllib_rx_ba_inact_timeout, 0);
- timer_setup(&pRxTS->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
+ timer_setup(&rxts->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
- ResetRxTsEntry(pRxTS);
- list_add_tail(&pRxTS->ts_common_info.List,
+ ResetRxTsEntry(rxts);
+ list_add_tail(&rxts->ts_common_info.List,
&ieee->Rx_TS_Unused_List);
- pRxTS++;
+ rxts++;
}
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
@@ -174,7 +170,7 @@ void TSInitialize(struct rtllib_device *ieee)
}
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
- u8 *Addr, u8 TID,
+ u8 *addr, u8 TID,
enum tr_select TxRxSelect)
{
u8 dir;
@@ -182,21 +178,14 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
- if (ieee->iw_mode == IW_MODE_ADHOC) {
- if (TxRxSelect == TX_DIR)
- search_dir[DIR_UP] = true;
- else
- search_dir[DIR_DOWN] = true;
+ if (TxRxSelect == TX_DIR) {
+ search_dir[DIR_UP] = true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
} else {
- if (TxRxSelect == TX_DIR) {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR] = true;
- search_dir[DIR_DIRECT] = true;
- } else {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR] = true;
- search_dir[DIR_DIRECT] = true;
- }
+ search_dir[DIR_DOWN] = true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
}
if (TxRxSelect == TX_DIR)
@@ -208,9 +197,9 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
if (!search_dir[dir])
continue;
list_for_each_entry(pRet, psearch_list, List) {
- if (memcmp(pRet->Addr, Addr, 6) == 0 &&
- pRet->TSpec.f.TSInfo.field.ucTSID == TID &&
- pRet->TSpec.f.TSInfo.field.ucDirection == dir)
+ if (memcmp(pRet->addr, addr, 6) == 0 &&
+ pRet->TSpec.ucTSID == TID &&
+ pRet->TSpec.ucDirection == dir)
break;
}
if (&pRet->List != psearch_list)
@@ -222,40 +211,30 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
return NULL;
}
-static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
- union tspec_body *pTSPEC, union qos_tclas *pTCLAS,
- u8 TCLAS_Num, u8 TCLAS_Proc)
+static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *addr,
+ struct qos_tsinfo *pTSPEC)
{
- u8 count;
-
if (!pTsCommonInfo)
return;
- memcpy(pTsCommonInfo->Addr, Addr, 6);
+ memcpy(pTsCommonInfo->addr, addr, 6);
if (pTSPEC)
memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
- sizeof(union tspec_body));
-
- for (count = 0; count < TCLAS_Num; count++)
- memcpy((u8 *)(&(pTsCommonInfo->TClass[count])),
- (u8 *)pTCLAS, sizeof(union qos_tclas));
-
- pTsCommonInfo->TClasProc = TCLAS_Proc;
- pTsCommonInfo->TClasNum = TCLAS_Num;
+ sizeof(struct qos_tsinfo));
}
-bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
- u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
+bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
+ u8 *addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
u8 UP = 0;
- union tspec_body TSpec;
- union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo;
+ struct qos_tsinfo TSpec;
+ struct qos_tsinfo *ts_info = &TSpec;
struct list_head *pUnusedList;
struct list_head *pAddmitList;
enum direction_value Dir;
- if (is_multicast_ether_addr(Addr)) {
+ if (is_multicast_ether_addr(addr)) {
netdev_warn(ieee->dev, "Get TS for Broadcast or Multicast\n");
return false;
}
@@ -286,7 +265,7 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
}
}
- *ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect);
+ *ppTS = SearchAdmitTRStream(ieee, addr, UP, TxRxSelect);
if (*ppTS)
return true;
@@ -316,27 +295,20 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
TsCommonInfo);
ResetTxTsEntry(tmp);
} else {
- struct rx_ts_record *tmp =
+ struct rx_ts_record *ts =
container_of(*ppTS,
struct rx_ts_record,
ts_common_info);
- ResetRxTsEntry(tmp);
+ ResetRxTsEntry(ts);
}
netdev_dbg(ieee->dev,
"to init current TS, UP:%d, Dir:%d, addr: %pM ppTs=%p\n",
- UP, Dir, Addr, *ppTS);
- pTSInfo->field.ucTrafficType = 0;
- pTSInfo->field.ucTSID = UP;
- pTSInfo->field.ucDirection = Dir;
- pTSInfo->field.ucAccessPolicy = 1;
- pTSInfo->field.ucAggregation = 0;
- pTSInfo->field.ucPSB = 0;
- pTSInfo->field.ucUP = UP;
- pTSInfo->field.ucTSInfoAckPolicy = 0;
- pTSInfo->field.ucSchedule = 0;
-
- MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
+ UP, Dir, addr, *ppTS);
+ ts_info->ucTSID = UP;
+ ts_info->ucDirection = Dir;
+
+ MakeTSEntry(*ppTS, addr, &TSpec);
list_add_tail(&((*ppTS)->List), pAddmitList);
return true;
@@ -355,14 +327,14 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
if (TxRxSelect == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
- struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
+ struct rx_ts_record *ts = (struct rx_ts_record *)pTs;
- if (timer_pending(&pRxTS->rx_pkt_pending_timer))
- del_timer_sync(&pRxTS->rx_pkt_pending_timer);
+ if (timer_pending(&ts->rx_pkt_pending_timer))
+ del_timer_sync(&ts->rx_pkt_pending_timer);
- while (!list_empty(&pRxTS->rx_pending_pkt_list)) {
+ while (!list_empty(&ts->rx_pending_pkt_list)) {
pRxReorderEntry = (struct rx_reorder_entry *)
- list_entry(pRxTS->rx_pending_pkt_list.prev,
+ list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n",
__func__, pRxReorderEntry->SeqNum);
@@ -388,43 +360,43 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
}
}
-void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
+void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
{
- struct ts_common_info *pTS, *pTmpTS;
+ struct ts_common_info *ts, *pTmpTS;
- netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, Addr);
+ netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, addr);
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ if (memcmp(ts->addr, addr, 6) == 0) {
+ RemoveTsEntry(ieee, ts, TX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ if (memcmp(ts->addr, addr, 6) == 0) {
netdev_info(ieee->dev,
"====>remove Tx_TS_admin_list\n");
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ RemoveTsEntry(ieee, ts, TX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ if (memcmp(ts->addr, addr, 6) == 0) {
+ RemoveTsEntry(ieee, ts, RX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ if (memcmp(ts->addr, addr, 6) == 0) {
+ RemoveTsEntry(ieee, ts, RX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
}
}
}
@@ -432,30 +404,30 @@ EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
- struct ts_common_info *pTS, *pTmpTS;
+ struct ts_common_info *ts, *pTmpTS;
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ RemoveTsEntry(ieee, ts, TX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ RemoveTsEntry(ieee, ts, TX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ RemoveTsEntry(ieee, ts, RX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ RemoveTsEntry(ieee, ts, RX_DIR);
+ list_del_init(&ts->List);
+ list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
}
}
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index bfa4dbf94d60..d2cf3cfaaaba 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/wireless.h>
-#include "rtllib_debug.h"
#include "rtl819x_HT.h"
#include "rtl819x_BA.h"
#include "rtl819x_TS.h"
@@ -92,7 +91,7 @@ static inline void *netdev_priv_rsl(struct net_device *dev)
#define IE_CISCO_FLAG_POSITION 0x08
#define SUPPORT_CKIP_MIC 0x08
#define SUPPORT_CKIP_PK 0x10
-#define RT_RF_OFF_LEVL_HALT_NIC BIT3
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
#define RT_IN_PS_LEVEL(psc, _PS_FLAG) \
((psc->CurPsLevel & _PS_FLAG) ? true : false)
#define RT_CLEAR_PS_LEVEL(psc, _PS_FLAG) \
@@ -172,7 +171,7 @@ struct sw_chnl_cmd {
u32 Para1;
u32 Para2;
u32 msDelay;
-} __packed;
+};
/*--------------------------Define -------------------------------------------*/
#define MGN_1M 0x02
@@ -332,66 +331,23 @@ enum rt_op_mode {
#define MIN_FRAG_THRESHOLD 256U
#define MAX_FRAG_THRESHOLD 2346U
-/* Frame control field constants */
-#define RTLLIB_FCTL_FTYPE 0x000c
-#define RTLLIB_FCTL_STYPE 0x00f0
-#define RTLLIB_FCTL_FRAMETYPE 0x00fc
-#define RTLLIB_FCTL_TODS 0x0100
-#define RTLLIB_FCTL_FROMDS 0x0200
-#define RTLLIB_FCTL_DSTODS 0x0300
-#define RTLLIB_FCTL_MOREFRAGS 0x0400
-#define RTLLIB_FCTL_RETRY 0x0800
-#define RTLLIB_FCTL_PM 0x1000
-#define RTLLIB_FCTL_MOREDATA 0x2000
-#define RTLLIB_FCTL_WEP 0x4000
-#define RTLLIB_FCTL_ORDER 0x8000
-
#define RTLLIB_FTYPE_MGMT 0x0000
#define RTLLIB_FTYPE_CTL 0x0004
#define RTLLIB_FTYPE_DATA 0x0008
-/* management */
-#define RTLLIB_STYPE_ASSOC_REQ 0x0000
-#define RTLLIB_STYPE_ASSOC_RESP 0x0010
-#define RTLLIB_STYPE_REASSOC_REQ 0x0020
-#define RTLLIB_STYPE_REASSOC_RESP 0x0030
-#define RTLLIB_STYPE_PROBE_REQ 0x0040
-#define RTLLIB_STYPE_PROBE_RESP 0x0050
-#define RTLLIB_STYPE_BEACON 0x0080
-#define RTLLIB_STYPE_ATIM 0x0090
-#define RTLLIB_STYPE_DISASSOC 0x00A0
-#define RTLLIB_STYPE_AUTH 0x00B0
-#define RTLLIB_STYPE_DEAUTH 0x00C0
-#define RTLLIB_STYPE_MANAGE_ACT 0x00D0
-
-/* control */
-#define RTLLIB_STYPE_PSPOLL 0x00A0
-#define RTLLIB_STYPE_RTS 0x00B0
-#define RTLLIB_STYPE_CTS 0x00C0
-#define RTLLIB_STYPE_ACK 0x00D0
-
-/* data */
-#define RTLLIB_STYPE_DATA 0x0000
-#define RTLLIB_STYPE_DATA_CFACK 0x0010
-#define RTLLIB_STYPE_DATA_CFPOLL 0x0020
-#define RTLLIB_STYPE_DATA_CFACKPOLL 0x0030
-#define RTLLIB_STYPE_NULLFUNC 0x0040
-#define RTLLIB_STYPE_QOS_DATA 0x0080
-#define RTLLIB_STYPE_QOS_NULL 0x00C0
-
#define RTLLIB_SCTL_FRAG 0x000F
#define RTLLIB_SCTL_SEQ 0xFFF0
/* QOS control */
#define RTLLIB_QCTL_TID 0x000F
-#define FC_QOS_BIT BIT7
+#define FC_QOS_BIT BIT(7)
#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
#define IsQoSDataFrame(pframe) \
- ((*(u16 *)pframe&(RTLLIB_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \
- (RTLLIB_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16 *)pframe&RTLLIB_FCTL_ORDER)
+ ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \
+ (IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
#define SN_LESS(a, b) (((a-b)&0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
@@ -455,11 +411,10 @@ enum _REG_PREAMBLE_MODE {
#define SNAP_SIZE sizeof(struct rtllib_snap_hdr)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & RTLLIB_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & RTLLIB_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc) ((fc) & RTLLIB_FCTL_MOREDATA)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
-#define WLAN_FC_GET_FRAMETYPE(fc) ((fc) & RTLLIB_FCTL_FRAMETYPE)
#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTLLIB_SCTL_FRAG)
#define WLAN_GET_SEQ_SEQ(seq) (((seq) & RTLLIB_SCTL_SEQ) >> 4)
@@ -510,13 +465,6 @@ enum _REG_PREAMBLE_MODE {
/* this is stolen from ipw2200 driver */
#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct ieee_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num[17];
- u16 frag_num[17];
- unsigned long packet_time[17];
- struct list_head list;
-};
/* NOTE: This data is for statistical purposes; not all hardware provides this
* information for frames received. Not setting these will not cause
@@ -673,78 +621,6 @@ enum rtllib_mfie {
* information to determine what type of underlying data type is actually
* stored in the data.
*/
-struct rtllib_pspoll_hdr {
- __le16 frame_ctl;
- __le16 aid;
- u8 bssid[ETH_ALEN];
- u8 ta[ETH_ALEN];
-} __packed;
-
-struct rtllib_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_1addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_2addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_4addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_3addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- __le16 qos_ctl;
- u8 payload[];
-} __packed;
-
-struct rtllib_hdr_4addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- __le16 qos_ctl;
- u8 payload[];
-} __packed;
-
struct rtllib_info_element {
u8 id;
u8 len;
@@ -752,32 +628,32 @@ struct rtllib_info_element {
} __packed;
struct rtllib_authentication {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
__le16 algorithm;
__le16 transaction;
__le16 status;
/*challenge*/
struct rtllib_info_element info_element[];
-} __packed;
+} __packed __aligned(2);
struct rtllib_disauth {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
__le16 reason;
-} __packed;
+} __packed __aligned(2);
struct rtllib_disassoc {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
__le16 reason;
-} __packed;
+} __packed __aligned(2);
struct rtllib_probe_request {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
/* SSID, supported rates */
struct rtllib_info_element info_element[];
-} __packed;
+} __packed __aligned(2);
struct rtllib_probe_response {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
u32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
@@ -785,26 +661,26 @@ struct rtllib_probe_response {
* CF params, IBSS params, TIM (if beacon), RSN
*/
struct rtllib_info_element info_element[];
-} __packed;
+} __packed __aligned(2);
/* Alias beacon for probe_response */
#define rtllib_beacon rtllib_probe_response
struct rtllib_assoc_request_frame {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
struct rtllib_info_element info_element[];
-} __packed;
+} __packed __aligned(2);
struct rtllib_assoc_response_frame {
- struct rtllib_hdr_3addr header;
+ struct ieee80211_hdr_3addr header;
__le16 capability;
__le16 status;
__le16 aid;
struct rtllib_info_element info_element[]; /* supported rates */
-} __packed;
+} __packed __aligned(2);
struct rtllib_txb {
u8 nr_frags;
@@ -823,7 +699,7 @@ struct rtllib_rxb {
struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
-} __packed;
+};
union frameqos {
u16 shortdata;
@@ -947,13 +823,13 @@ static inline const char *eap_get_type(int type)
static inline u8 Frame_QoSTID(u8 *buf)
{
- struct rtllib_hdr_3addr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
u16 fc;
- hdr = (struct rtllib_hdr_3addr *)buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((union frameqos *)(buf + (((fc & RTLLIB_FCTL_TODS) &&
- (fc & RTLLIB_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
+ hdr = (struct ieee80211_hdr_3addr *)buf;
+ fc = le16_to_cpu(hdr->frame_control);
+ return (u8)((union frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) &&
+ (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
}
struct eapol {
@@ -1035,7 +911,6 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
((up) < 6) ? WME_AC_VI : \
WME_AC_VO)
-#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address
* plus ether type
*/
@@ -1183,13 +1058,6 @@ enum fsync_state {
SW_Fsync
};
-enum rt_ps_mode {
- eActive,
- eMaxPs,
- eFastPs,
- eAutoPs,
-};
-
enum ips_callback_function {
IPS_CALLBACK_NONE = 0,
IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
@@ -1209,7 +1077,6 @@ struct rt_pwr_save_ctrl {
bool bLeisurePs;
u8 LpsIdleCount;
- u8 reg_max_lps_awake_intvl;
u8 LPSAwakeIntvl;
u32 CurPsLevel;
@@ -1217,10 +1084,10 @@ struct rt_pwr_save_ctrl {
#define RT_RF_CHANGE_SOURCE u32
-#define RF_CHANGE_BY_SW BIT31
-#define RF_CHANGE_BY_HW BIT30
-#define RF_CHANGE_BY_PS BIT29
-#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_SW BIT(31)
+#define RF_CHANGE_BY_HW BIT(30)
+#define RF_CHANGE_BY_PS BIT(29)
+#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_INIT 0
enum country_code_type {
@@ -1246,33 +1113,6 @@ enum scan_op_backup_opt {
SCAN_OPT_MAX
};
-enum fw_cmd_io_type {
- FW_CMD_DIG_ENABLE = 0,
- FW_CMD_DIG_DISABLE = 1,
- FW_CMD_DIG_HALT = 2,
- FW_CMD_DIG_RESUME = 3,
- FW_CMD_HIGH_PWR_ENABLE = 4,
- FW_CMD_HIGH_PWR_DISABLE = 5,
- FW_CMD_RA_RESET = 6,
- FW_CMD_RA_ACTIVE = 7,
- FW_CMD_RA_REFRESH_N = 8,
- FW_CMD_RA_REFRESH_BG = 9,
- FW_CMD_RA_INIT = 10,
- FW_CMD_IQK_ENABLE = 11,
- FW_CMD_TXPWR_TRACK_ENABLE = 12,
- FW_CMD_TXPWR_TRACK_DISABLE = 13,
- FW_CMD_TXPWR_TRACK_THERMAL = 14,
- FW_CMD_PAUSE_DM_BY_SCAN = 15,
- FW_CMD_RESUME_DM_BY_SCAN = 16,
- FW_CMD_RA_REFRESH_N_COMB = 17,
- FW_CMD_RA_REFRESH_BG_COMB = 18,
- FW_CMD_ANTENNA_SW_ENABLE = 19,
- FW_CMD_ANTENNA_SW_DISABLE = 20,
- FW_CMD_TX_FEEDBACK_CCX_ENABLE = 21,
- FW_CMD_LPS_ENTER = 22,
- FW_CMD_LPS_LEAVE = 23,
-};
-
#define RT_MAX_LD_SLOT_NUM 10
struct rt_link_detect {
u32 NumRecvBcnInPeriod;
@@ -1303,7 +1143,6 @@ struct sw_cam_table {
#define TOTAL_CAM_ENTRY 32
struct rate_adaptive {
- u8 rate_adaptive_disabled;
u8 ratr_state;
u16 reserve;
@@ -1321,7 +1160,6 @@ struct rate_adaptive {
u8 ping_rssi_enable;
u32 ping_rssi_ratr;
u32 ping_rssi_thresh_for_ra;
- u32 last_ratr;
u8 PreRATRState;
};
@@ -1334,11 +1172,6 @@ struct rt_pmkid_list {
u8 bUsed;
};
-struct rt_intel_promisc_mode {
- bool promiscuous_on;
- bool fltr_src_sta_frame;
-};
-
/*************** DRIVER STATUS *****/
#define STATUS_SCANNING 0
/*************** DRIVER STATUS *****/
@@ -1368,7 +1201,6 @@ struct rtllib_device {
bool bForcedBgMode;
u8 hwsec_active;
- bool is_silent_reset;
bool is_roaming;
bool ieee_up;
bool cannot_notify;
@@ -1418,8 +1250,6 @@ struct rtllib_device {
int scan_age;
int iw_mode; /* operating mode (IW_MODE_*) */
- bool net_promiscuous_md;
- struct rt_intel_promisc_mode intel_promiscuous_md_info;
spinlock_t lock;
spinlock_t wpax_suitlist_lock;
@@ -1496,13 +1326,10 @@ struct rtllib_device {
u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
u8 bss_start_channel;
- u8 ibss_maxjoin_chal;
int rate; /* current rate */
int basic_rate;
- short active_scan;
-
/* this contains flags for selectively enable softmac support */
u16 softmac_features;
@@ -1525,7 +1352,6 @@ struct rtllib_device {
u64 ps_time;
bool polling;
- short raw_tx;
/* used if IEEE_SOFTMAC_TX_QUEUE is set */
short queue_stop;
short scanning_continue;
@@ -1602,7 +1428,6 @@ struct rtllib_device {
struct delayed_work associate_procedure_wq;
struct delayed_work softmac_scan_wq;
struct delayed_work associate_retry_wq;
- struct delayed_work start_ibss_wq;
struct delayed_work hw_wakeup_wq;
struct delayed_work hw_sleep_wq;
struct delayed_work link_change_wq;
@@ -1653,15 +1478,6 @@ struct rtllib_device {
*/
void (*link_change)(struct net_device *dev);
- /* these two function indicates to the HW when to start
- * and stop to send beacons. This is used when the
- * IEEE_SOFTMAC_BEACONS is not set. For now the
- * stop_send_bacons is NOT guaranteed to be called only
- * after start_send_beacons.
- */
- void (*start_send_beacons)(struct net_device *dev);
- void (*stop_send_beacons)(struct net_device *dev);
-
/* power save mode related */
void (*sta_wake_up)(struct net_device *dev);
void (*enter_sleep_state)(struct net_device *dev, u64 time);
@@ -1761,15 +1577,15 @@ static inline int rtllib_get_hdrlen(u16 fc)
switch (WLAN_FC_GET_TYPE(fc)) {
case RTLLIB_FTYPE_DATA:
- if ((fc & RTLLIB_FCTL_FROMDS) && (fc & RTLLIB_FCTL_TODS))
+ if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
hdrlen = RTLLIB_4ADDR_LEN; /* Addr4 */
if (RTLLIB_QOS_HAS_SEQ(fc))
hdrlen += 2; /* QOS ctrl*/
break;
case RTLLIB_FTYPE_CTL:
switch (WLAN_FC_GET_STYPE(fc)) {
- case RTLLIB_STYPE_CTS:
- case RTLLIB_STYPE_ACK:
+ case IEEE80211_STYPE_CTS:
+ case IEEE80211_STYPE_ACK:
hdrlen = RTLLIB_1ADDR_LEN;
break;
default:
@@ -1782,21 +1598,6 @@ static inline int rtllib_get_hdrlen(u16 fc)
return hdrlen;
}
-static inline u8 *rtllib_get_payload(struct rtllib_hdr *hdr)
-{
- switch (rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
- case RTLLIB_1ADDR_LEN:
- return ((struct rtllib_hdr_1addr *)hdr)->payload;
- case RTLLIB_2ADDR_LEN:
- return ((struct rtllib_hdr_2addr *)hdr)->payload;
- case RTLLIB_3ADDR_LEN:
- return ((struct rtllib_hdr_3addr *)hdr)->payload;
- case RTLLIB_4ADDR_LEN:
- return ((struct rtllib_hdr_4addr *)hdr)->payload;
- }
- return NULL;
-}
-
static inline int rtllib_is_ofdm_rate(u8 rate)
{
switch (rate & ~RTLLIB_BASIC_RATE_MASK) {
@@ -1841,8 +1642,6 @@ void rtllib_txb_free(struct rtllib_txb *txb);
/* rtllib_rx.c */
int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats);
-void rtllib_rx_probe_rq(struct rtllib_device *ieee,
- struct sk_buff *skb);
int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
/* rtllib_wx.c */
@@ -1876,7 +1675,6 @@ void rtllib_softmac_new_net(struct rtllib_device *ieee,
void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
-void rtllib_start_ibss(struct rtllib_device *ieee);
int rtllib_softmac_init(struct rtllib_device *ieee);
void rtllib_softmac_free(struct rtllib_device *ieee);
void rtllib_disassociate(struct rtllib_device *ieee);
@@ -1887,23 +1685,18 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee);
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
void rtllib_start_protocol(struct rtllib_device *ieee);
-void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
+void rtllib_stop_protocol(struct rtllib_device *ieee);
void rtllib_EnableNetMonitorMode(struct net_device *dev, bool bInitState);
void rtllib_DisableNetMonitorMode(struct net_device *dev, bool bInitState);
-void rtllib_EnableIntelPromiscuousMode(struct net_device *dev, bool bInitState);
-void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
- bool bInitState);
-void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
- u8 mesh_flag, u8 shutdown);
-void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag);
+
+void rtllib_softmac_stop_protocol(struct rtllib_device *ieee);
+void rtllib_softmac_start_protocol(struct rtllib_device *ieee);
void rtllib_reset_queue(struct rtllib_device *ieee);
void rtllib_wake_all_queues(struct rtllib_device *ieee);
void rtllib_stop_all_queues(struct rtllib_device *ieee);
struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
-void rtllib_start_send_beacons(struct rtllib_device *ieee);
-void rtllib_stop_send_beacons(struct rtllib_device *ieee);
void notify_wx_assoc_event(struct rtllib_device *ieee);
void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
@@ -1947,10 +1740,6 @@ int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b);
void rtllib_wx_sync_scan_wq(void *data);
-int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
@@ -1972,7 +1761,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
-void HTUpdateDefaultSetting(struct rtllib_device *ieee);
+void ht_update_default_setting(struct rtllib_device *ieee);
void HTConstructCapabilityElement(struct rtllib_device *ieee,
u8 *posHTCap, u8 *len,
u8 isEncrypt, bool bAssoc);
@@ -1998,21 +1787,21 @@ u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
-void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending);
+void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
+ u8 policy, u8 bOverwritePending);
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo,
enum tr_select TxRxSelect);
void rtllib_ba_setup_timeout(struct timer_list *t);
void rtllib_tx_ba_inact_timeout(struct timer_list *t);
void rtllib_rx_ba_inact_timeout(struct timer_list *t);
-void rtllib_reset_ba_entry(struct ba_record *pBA);
-bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
+void rtllib_reset_ba_entry(struct ba_record *ba);
+bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr,
u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
-void TSInitialize(struct rtllib_device *ieee);
+void rtllib_ts_init(struct rtllib_device *ieee);
void TsStartAddBaProcess(struct rtllib_device *ieee,
struct tx_ts_record *pTxTS);
-void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
+void RemovePeerTS(struct rtllib_device *ieee, u8 *addr);
void RemoveAllTS(struct rtllib_device *ieee);
static inline const char *escape_essid(const char *essid, u8 essid_len)
@@ -2035,7 +1824,7 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
* ieee handler to refer to it.
*/
void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *pTS);
+ struct rx_ts_record *ts);
int rtllib_parse_info_param(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
u16 length,
@@ -2044,7 +1833,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
void rtllib_indicate_packets(struct rtllib_device *ieee,
struct rtllib_rxb **prxbIndicateArray, u8 index);
-void HTUseDefaultSetting(struct rtllib_device *ieee);
#define RT_ASOC_RETRY_LIMIT 5
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index f88096bcb181..cbb8c8dbe9b0 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -52,7 +52,7 @@ static void *rtllib_ccmp_init(int key_idx)
struct rtllib_ccmp_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
+ if (!priv)
goto fail;
priv->key_idx = key_idx;
@@ -83,7 +83,7 @@ static void rtllib_ccmp_deinit(void *priv)
kfree(priv);
}
-static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
+static int ccmp_init_iv_and_aad(struct ieee80211_hdr *hdr,
u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
@@ -91,9 +91,8 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
u16 fc;
int a4_included, qc_included;
- fc = le16_to_cpu(hdr->frame_ctl);
- a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
- (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS));
+ fc = le16_to_cpu(hdr->frame_control);
+ a4_included = ieee80211_has_a4(hdr->frame_control);
qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
@@ -134,7 +133,7 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
- pos = (u8 *)&hdr->seq_ctl;
+ pos = (u8 *)&hdr->seq_ctrl;
aad[20] = pos[0] & 0x0f;
aad[21] = 0; /* all bits masked */
memset(aad + 22, 0, 8);
@@ -153,7 +152,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct rtllib_ccmp_data *key = priv;
int i;
u8 *pos;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
if (skb_headroom(skb) < CCMP_HDR_LEN ||
@@ -182,7 +181,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (!tcb_desc->bHwSec) {
struct aead_request *req;
struct scatterlist sg[2];
@@ -220,7 +219,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_ccmp_data *key = priv;
u8 keyidx, *pos;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 pn[6];
@@ -230,7 +229,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
}
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9fdfcc017ee6..0244b524a7d4 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -255,7 +255,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct rtllib_tkip_data *tkey = priv;
int len;
u8 *pos;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
int ret = 0;
@@ -266,7 +266,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
@@ -330,7 +330,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 keyidx, *pos;
u32 iv32;
u16 iv16;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
u8 rc4key[16];
@@ -341,7 +341,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
@@ -465,20 +465,20 @@ out:
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
- struct rtllib_hdr_4addr *hdr11;
+ struct ieee80211_hdr *hdr11;
- hdr11 = (struct rtllib_hdr_4addr *)skb->data;
- switch (le16_to_cpu(hdr11->frame_ctl) &
- (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
- case RTLLIB_FCTL_TODS:
+ hdr11 = (struct ieee80211_hdr *)skb->data;
+ switch (le16_to_cpu(hdr11->frame_control) &
+ (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+ case IEEE80211_FCTL_TODS:
ether_addr_copy(hdr, hdr11->addr3); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr2); /* SA */
break;
- case RTLLIB_FCTL_FROMDS:
+ case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr, hdr11->addr1); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr3); /* SA */
break;
- case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
+ case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
ether_addr_copy(hdr, hdr11->addr3); /* DA */
ether_addr_copy(hdr + ETH_ALEN, hdr11->addr4); /* SA */
break;
@@ -501,9 +501,9 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 *pos;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
netdev_dbg(skb->dev,
@@ -514,7 +514,7 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
michael_mic_hdr(skb, tkey->tx_hdr);
- if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
+ if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_control)))
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -525,7 +525,7 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
}
static void rtllib_michael_mic_failure(struct net_device *dev,
- struct rtllib_hdr_4addr *hdr,
+ struct ieee80211_hdr *hdr,
int keyidx)
{
union iwreq_data wrqu;
@@ -550,15 +550,15 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
{
struct rtllib_tkip_data *tkey = priv;
u8 mic[8];
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (!tkey->key_set)
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
+ if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_control)))
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
@@ -566,9 +566,9 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
netdev_dbg(skb->dev,
"Michael MIC verification failed for MSDU from %pM keyidx=%d\n",
hdr->addr2, keyidx);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index 062285e4d939..21c2b7666d6f 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -35,7 +35,7 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
+ if (!priv)
return NULL;
priv->key_idx = keyidx;
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
deleted file mode 100644
index f6b23defe225..000000000000
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Contact Information: wlanfae <wlanfae@realtek.com>
- */
-#ifndef _RTL_DEBUG_H
-#define _RTL_DEBUG_H
-
-#include <linux/bits.h>
-
-/* Allow files to override DRV_NAME */
-#ifndef DRV_NAME
-#define DRV_NAME "rtllib_92e"
-#endif
-
-extern u32 rt_global_debug_component;
-
-/* These are the defines for rt_global_debug_component */
-enum RTL_DEBUG {
- COMP_TRACE = BIT(0),
- COMP_DBG = BIT(1),
- COMP_INIT = BIT(2),
- COMP_RECV = BIT(3),
- COMP_POWER = BIT(6),
- COMP_SWBW = BIT(8),
- COMP_SEC = BIT(9),
- COMP_LPS = BIT(10),
- COMP_QOS = BIT(11),
- COMP_RATE = BIT(12),
- COMP_RXDESC = BIT(13),
- COMP_PHY = BIT(14),
- COMP_DIG = BIT(15),
- COMP_TXAGC = BIT(16),
- COMP_HALDM = BIT(17),
- COMP_POWER_TRACKING = BIT(18),
- COMP_CH = BIT(19),
- COMP_RF = BIT(20),
- COMP_FIRMWARE = BIT(21),
- COMP_RESET = BIT(23),
- COMP_CMDPKT = BIT(24),
- COMP_SCAN = BIT(25),
- COMP_PS = BIT(26),
- COMP_DOWN = BIT(27),
- COMP_INTR = BIT(28),
- COMP_ERR = BIT(31)
-};
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 2416e0c60255..95b6d6b9429c 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -34,9 +34,6 @@
#include <net/arp.h>
#include "rtllib.h"
-u32 rt_global_debug_component = COMP_ERR;
-EXPORT_SYMBOL(rt_global_debug_component);
-
static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
{
if (ieee->networks)
@@ -114,7 +111,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
- ieee->raw_tx = 0;
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
@@ -126,9 +122,9 @@ struct net_device *alloc_rtllib(int sizeof_priv)
if (!ieee->ht_info)
goto free_softmac;
- HTUpdateDefaultSetting(ieee);
+ ht_update_default_setting(ieee);
HTInitializeHTInfo(ieee);
- TSInitialize(ieee);
+ rtllib_ts_init(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 40e7bbb17c0d..ecaa4dec3f94 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -63,7 +63,7 @@ rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
for (i = 0; i < RTLLIB_FRAG_CACHE_LEN; i++) {
entry = &ieee->frag_cache[tid][i];
- if (entry->skb != NULL &&
+ if (entry->skb &&
time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
netdev_dbg(ieee->dev,
"expiring fragment cache entry seq=%u last_frag=%u\n",
@@ -72,7 +72,7 @@ rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
entry->skb = NULL;
}
- if (entry->skb != NULL && entry->seq == seq &&
+ if (entry->skb && entry->seq == seq &&
(entry->last_frag + 1 == frag || frag == -1) &&
memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
@@ -85,27 +85,27 @@ rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *
rtllib_frag_cache_get(struct rtllib_device *ieee,
- struct rtllib_hdr_4addr *hdr)
+ struct ieee80211_hdr *hdr)
{
struct sk_buff *skb = NULL;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 sc = le16_to_cpu(hdr->seq_ctrl);
unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct rtllib_frag_entry *entry;
- struct rtllib_hdr_3addrqos *hdr_3addrqos;
- struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ struct ieee80211_qos_hdr *hdr_3addrqos;
+ struct ieee80211_qos_hdr_4addr *hdr_4addrqos;
u8 tid;
- if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
+ if (ieee80211_has_a4(hdr->frame_control) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_4addrqos = (struct ieee80211_qos_hdr_4addr *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_3addrqos = (struct ieee80211_qos_hdr *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
@@ -115,7 +115,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(ieee->dev->mtu +
- sizeof(struct rtllib_hdr_4addr) +
+ sizeof(struct ieee80211_hdr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ +
@@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
if (ieee->frag_next_idx[tid] >= RTLLIB_FRAG_CACHE_LEN)
ieee->frag_next_idx[tid] = 0;
- if (entry->skb != NULL)
+ if (entry->skb)
dev_kfree_skb_any(entry->skb);
entry->first_frag_time = jiffies;
@@ -145,7 +145,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
*/
entry = rtllib_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
hdr->addr1);
- if (entry != NULL) {
+ if (entry) {
entry->last_frag = frag;
skb = entry->skb;
}
@@ -156,25 +156,25 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
/* Called only as a tasklet (software IRQ) */
static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee,
- struct rtllib_hdr_4addr *hdr)
+ struct ieee80211_hdr *hdr)
{
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 sc = le16_to_cpu(hdr->seq_ctrl);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct rtllib_frag_entry *entry;
- struct rtllib_hdr_3addrqos *hdr_3addrqos;
- struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ struct ieee80211_qos_hdr *hdr_3addrqos;
+ struct ieee80211_qos_hdr_4addr *hdr_4addrqos;
u8 tid;
- if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
+ if (ieee80211_has_a4(hdr->frame_control) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_4addrqos = (struct ieee80211_qos_hdr_4addr *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_3addrqos = (struct ieee80211_qos_hdr *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
@@ -184,7 +184,7 @@ static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee,
entry = rtllib_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
hdr->addr1);
- if (entry == NULL) {
+ if (!entry) {
netdev_dbg(ieee->dev,
"Couldn't invalidate fragment cache entry (seq=%u)\n",
seq);
@@ -210,7 +210,7 @@ rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
- struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
rtllib_rx_mgt(ieee, skb, rx_stats);
@@ -233,23 +233,23 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
{
struct net_device *dev = ieee->dev;
u16 fc, ethertype;
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
/* check that the frame is unicast frame to us */
- if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
- RTLLIB_FCTL_TODS &&
+ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+ IEEE80211_FCTL_TODS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
/* ToDS frame with own addr BSSID and DA */
- } else if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
- RTLLIB_FCTL_FROMDS &&
+ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+ IEEE80211_FCTL_FROMDS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
/* FromDS frame with own addr as DA */
} else {
@@ -273,10 +273,10 @@ static inline int
rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct lib80211_crypt_data *crypt)
{
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
int res, hdrlen;
- if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+ if (!crypt || !crypt->ops->decrypt_mpdu)
return 0;
if (ieee->hwsec_active) {
@@ -289,8 +289,8 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_control));
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
@@ -313,10 +313,10 @@ static inline int
rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
int keyidx, struct lib80211_crypt_data *crypt)
{
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
int res, hdrlen;
- if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+ if (!crypt || !crypt->ops->decrypt_msdu)
return 0;
if (ieee->hwsec_active) {
struct cb_desc *tcb_desc = (struct cb_desc *)
@@ -328,8 +328,8 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_control));
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
@@ -347,27 +347,27 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
/* this function is stolen from ipw2200 driver*/
#define IEEE_PACKET_RETRY_TIME (5 * HZ)
static int is_duplicate_packet(struct rtllib_device *ieee,
- struct rtllib_hdr_4addr *header)
+ struct ieee80211_hdr *header)
{
- u16 fc = le16_to_cpu(header->frame_ctl);
- u16 sc = le16_to_cpu(header->seq_ctl);
+ u16 fc = le16_to_cpu(header->frame_control);
+ u16 sc = le16_to_cpu(header->seq_ctrl);
u16 seq = WLAN_GET_SEQ_SEQ(sc);
u16 frag = WLAN_GET_SEQ_FRAG(sc);
u16 *last_seq, *last_frag;
unsigned long *last_time;
- struct rtllib_hdr_3addrqos *hdr_3addrqos;
- struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ struct ieee80211_qos_hdr *hdr_3addrqos;
+ struct ieee80211_qos_hdr_4addr *hdr_4addrqos;
u8 tid;
- if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) &&
+ if (ieee80211_has_a4(header->frame_control) &&
RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtllib_hdr_4addrqos *)header;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_4addrqos = (struct ieee80211_qos_hdr_4addr *)header;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else if (RTLLIB_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtllib_hdr_3addrqos *)header;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ hdr_3addrqos = (struct ieee80211_qos_hdr *)header;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctrl) & RTLLIB_QCTL_TID;
tid = UP2AC(tid);
tid++;
} else {
@@ -375,37 +375,6 @@ static int is_duplicate_packet(struct rtllib_device *ieee,
}
switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
- {
- struct list_head *p;
- struct ieee_ibss_seq *entry = NULL;
- u8 *mac = header->addr2;
- int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
-
- list_for_each(p, &ieee->ibss_mac_hash[index]) {
- entry = list_entry(p, struct ieee_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
- break;
- }
- if (p == &ieee->ibss_mac_hash[index]) {
- entry = kmalloc(sizeof(struct ieee_ibss_seq),
- GFP_ATOMIC);
- if (!entry)
- return 0;
-
- ether_addr_copy(entry->mac, mac);
- entry->seq_num[tid] = seq;
- entry->frag_num[tid] = frag;
- entry->packet_time[tid] = jiffies;
- list_add(&entry->list, &ieee->ibss_mac_hash[index]);
- return 0;
- }
- last_seq = &entry->seq_num[tid];
- last_frag = &entry->frag_num[tid];
- last_time = &entry->packet_time[tid];
- break;
- }
-
case IW_MODE_INFRA:
last_seq = &ieee->last_rxseq_num[tid];
last_frag = &ieee->last_rxfrag_num[tid];
@@ -435,12 +404,12 @@ drop:
return 1;
}
-static bool AddReorderEntry(struct rx_ts_record *pTS,
+static bool AddReorderEntry(struct rx_ts_record *ts,
struct rx_reorder_entry *pReorderEntry)
{
- struct list_head *pList = &pTS->rx_pending_pkt_list;
+ struct list_head *pList = &ts->rx_pending_pkt_list;
- while (pList->next != &pTS->rx_pending_pkt_list) {
+ while (pList->next != &ts->rx_pending_pkt_list) {
if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
list_entry(pList->next, struct rx_reorder_entry,
List))->SeqNum))
@@ -520,13 +489,13 @@ void rtllib_indicate_packets(struct rtllib_device *ieee,
}
void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *pTS)
+ struct rx_ts_record *ts)
{
struct rx_reorder_entry *pRxReorderEntry;
u8 RfdCnt = 0;
- del_timer_sync(&pTS->rx_pkt_pending_timer);
- while (!list_empty(&pTS->rx_pending_pkt_list)) {
+ del_timer_sync(&ts->rx_pkt_pending_timer);
+ while (!list_empty(&ts->rx_pending_pkt_list)) {
if (RfdCnt >= REORDER_WIN_SIZE) {
netdev_info(ieee->dev,
"-------------->%s() error! RfdCnt >= REORDER_WIN_SIZE\n",
@@ -535,7 +504,7 @@ void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
}
pRxReorderEntry = (struct rx_reorder_entry *)
- list_entry(pTS->rx_pending_pkt_list.prev,
+ list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, List);
netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
pRxReorderEntry->SeqNum);
@@ -549,12 +518,12 @@ void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
}
rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt);
- pTS->rx_indicate_seq = 0xffff;
+ ts->rx_indicate_seq = 0xffff;
}
static void RxReorderIndicatePacket(struct rtllib_device *ieee,
struct rtllib_rxb *prxb,
- struct rx_ts_record *pTS, u16 SeqNum)
+ struct rx_ts_record *ts, u16 SeqNum)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct rx_reorder_entry *pReorderEntry = NULL;
@@ -565,21 +534,21 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
unsigned long flags;
netdev_dbg(ieee->dev,
- "%s(): Seq is %d, pTS->rx_indicate_seq is %d, WinSize is %d\n",
- __func__, SeqNum, pTS->rx_indicate_seq, WinSize);
+ "%s(): Seq is %d, ts->rx_indicate_seq is %d, WinSize is %d\n",
+ __func__, SeqNum, ts->rx_indicate_seq, WinSize);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
+ WinEnd = (ts->rx_indicate_seq + WinSize - 1) % 4096;
/* Rx Reorder initialize condition.*/
- if (pTS->rx_indicate_seq == 0xffff)
- pTS->rx_indicate_seq = SeqNum;
+ if (ts->rx_indicate_seq == 0xffff)
+ ts->rx_indicate_seq = SeqNum;
/* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
+ if (SN_LESS(SeqNum, ts->rx_indicate_seq)) {
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- pTS->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, SeqNum);
ht_info->rx_reorder_drop_counter++;
{
int i;
@@ -597,18 +566,18 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
* 1. Incoming SeqNum is equal to WinStart =>Window shift 1
* 2. Incoming SeqNum is larger than the WinEnd => Window shift N
*/
- if (SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
- pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
+ if (SN_EQUAL(SeqNum, ts->rx_indicate_seq)) {
+ ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096;
bMatchWinStart = true;
} else if (SN_LESS(WinEnd, SeqNum)) {
if (SeqNum >= (WinSize - 1))
- pTS->rx_indicate_seq = SeqNum + 1 - WinSize;
+ ts->rx_indicate_seq = SeqNum + 1 - WinSize;
else
- pTS->rx_indicate_seq = 4095 -
+ ts->rx_indicate_seq = 4095 -
(WinSize - (SeqNum + 1)) + 1;
netdev_dbg(ieee->dev,
"Window Shift! IndicateSeq: %d, NewSeq: %d\n",
- pTS->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, SeqNum);
}
/* Indication process.
@@ -625,7 +594,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
/* Current packet is going to be indicated.*/
netdev_dbg(ieee->dev,
"Packets indication! IndicateSeq: %d, NewSeq: %d\n",
- pTS->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, SeqNum);
ieee->prxbIndicateArray[0] = prxb;
index = 1;
} else {
@@ -642,12 +611,12 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
pReorderEntry->SeqNum = SeqNum;
pReorderEntry->prxb = prxb;
- if (!AddReorderEntry(pTS, pReorderEntry)) {
+ if (!AddReorderEntry(ts, pReorderEntry)) {
int i;
netdev_dbg(ieee->dev,
"%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
- __func__, pTS->rx_indicate_seq,
+ __func__, ts->rx_indicate_seq,
SeqNum);
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
@@ -659,7 +628,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
} else {
netdev_dbg(ieee->dev,
"Pkt insert into struct buffer. IndicateSeq: %d, NewSeq: %d\n",
- pTS->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, SeqNum);
}
} else {
/* Packets are dropped if there are not enough reorder
@@ -682,16 +651,16 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
}
/* Check if there is any packet need indicate.*/
- while (!list_empty(&pTS->rx_pending_pkt_list)) {
+ while (!list_empty(&ts->rx_pending_pkt_list)) {
netdev_dbg(ieee->dev, "%s(): start RREORDER indicate\n",
__func__);
pReorderEntry = (struct rx_reorder_entry *)
- list_entry(pTS->rx_pending_pkt_list.prev,
+ list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry,
List);
- if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq)) {
+ if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) {
/* This protect struct buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
@@ -703,8 +672,8 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
list_del_init(&pReorderEntry->List);
- if (SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
- pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) %
+ if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq))
+ ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) %
4096;
ieee->prxbIndicateArray[index] = pReorderEntry->prxb;
@@ -724,9 +693,11 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
* Rx buffering.
*/
if (index > 0) {
- if (timer_pending(&pTS->rx_pkt_pending_timer))
- del_timer_sync(&pTS->rx_pkt_pending_timer);
- pTS->rx_timeout_indicate_seq = 0xffff;
+ spin_unlock_irqrestore(&ieee->reorder_spinlock, flags);
+ if (timer_pending(&ts->rx_pkt_pending_timer))
+ del_timer_sync(&ts->rx_pkt_pending_timer);
+ spin_lock_irqsave(&ieee->reorder_spinlock, flags);
+ ts->rx_timeout_indicate_seq = 0xffff;
if (index > REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
@@ -740,11 +711,13 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
bPktInBuf = false;
}
- if (bPktInBuf && pTS->rx_timeout_indicate_seq == 0xffff) {
+ if (bPktInBuf && ts->rx_timeout_indicate_seq == 0xffff) {
netdev_dbg(ieee->dev, "%s(): SET rx timeout timer\n", __func__);
- pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
- mod_timer(&pTS->rx_pkt_pending_timer, jiffies +
+ ts->rx_timeout_indicate_seq = ts->rx_indicate_seq;
+ spin_unlock_irqrestore(&ieee->reorder_spinlock, flags);
+ mod_timer(&ts->rx_pkt_pending_timer, jiffies +
msecs_to_jiffies(ht_info->rx_reorder_pending_time));
+ spin_lock_irqsave(&ieee->reorder_spinlock, flags);
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
@@ -753,10 +726,10 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
struct rtllib_rxb *rxb, u8 *src, u8 *dst)
{
- struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_control);
- u16 LLCOffset = sizeof(struct rtllib_hdr_3addr);
+ u16 LLCOffset = sizeof(struct ieee80211_hdr_3addr);
u16 ChkLength;
bool bIsAggregateFrame = false;
u16 nSubframe_Length;
@@ -764,7 +737,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
u16 SeqNum = 0;
struct sk_buff *sub_skb;
/* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
+ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
(((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
bIsAggregateFrame = true;
@@ -880,8 +853,8 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
- struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_control);
size_t hdrlen;
hdrlen = rtllib_get_hdrlen(fc);
@@ -902,35 +875,32 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
struct sk_buff *skb, u8 multicast)
{
- struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u16 fc, sc;
- u8 frag, type, stype;
+ u8 frag;
- fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
- sc = le16_to_cpu(hdr->seq_ctl);
+ fc = le16_to_cpu(hdr->frame_control);
+ sc = le16_to_cpu(hdr->seq_ctrl);
frag = WLAN_GET_SEQ_FRAG(sc);
if (!ieee->ht_info->cur_rx_reorder_enable ||
!ieee->current_network.qos_data.active ||
!IsDataFrame(skb->data) ||
IsLegacyDataFrame(skb->data)) {
- if (!((type == RTLLIB_FTYPE_MGMT) &&
- (stype == RTLLIB_STYPE_BEACON))) {
+ if (!ieee80211_is_beacon(hdr->frame_control)) {
if (is_duplicate_packet(ieee, hdr))
return -1;
}
} else {
- struct rx_ts_record *pRxTS = NULL;
+ struct rx_ts_record *ts = NULL;
- if (GetTs(ieee, (struct ts_common_info **)&pRxTS, hdr->addr2,
+ if (rtllib_get_ts(ieee, (struct ts_common_info **)&ts, hdr->addr2,
(u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
- if ((fc & (1 << 11)) && (frag == pRxTS->rx_last_frag_num) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num))
+ if ((fc & (1 << 11)) && (frag == ts->rx_last_frag_num) &&
+ (WLAN_GET_SEQ_SEQ(sc) == ts->rx_last_seq_num))
return -1;
- pRxTS->rx_last_frag_num = frag;
- pRxTS->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
+ ts->rx_last_frag_num = frag;
+ ts->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
} else {
netdev_warn(ieee->dev, "%s(): No TS! Skip the check!\n",
__func__);
@@ -942,23 +912,23 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
}
static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
- struct rtllib_hdr_4addr *hdr, u8 *dst,
+ struct ieee80211_hdr *hdr, u8 *dst,
u8 *src, u8 *bssid)
{
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ u16 fc = le16_to_cpu(hdr->frame_control);
- switch (fc & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
- case RTLLIB_FCTL_FROMDS:
+ switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+ case IEEE80211_FCTL_FROMDS:
ether_addr_copy(dst, hdr->addr1);
ether_addr_copy(src, hdr->addr3);
ether_addr_copy(bssid, hdr->addr2);
break;
- case RTLLIB_FCTL_TODS:
+ case IEEE80211_FCTL_TODS:
ether_addr_copy(dst, hdr->addr3);
ether_addr_copy(src, hdr->addr2);
ether_addr_copy(bssid, hdr->addr1);
break;
- case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
+ case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
ether_addr_copy(dst, hdr->addr3);
ether_addr_copy(src, hdr->addr4);
ether_addr_copy(bssid, ieee->current_network.bssid);
@@ -971,46 +941,34 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
}
}
-static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
+static int rtllib_rx_data_filter(struct rtllib_device *ieee, struct ieee80211_hdr *hdr,
u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
{
u8 type, stype;
-
+ u16 fc = le16_to_cpu(hdr->frame_control);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
/* Filter frames from different BSS */
- if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS) &&
+ if (ieee80211_has_a4(hdr->frame_control) &&
!ether_addr_equal(ieee->current_network.bssid, bssid) &&
!is_zero_ether_addr(ieee->current_network.bssid)) {
return -1;
}
- /* Filter packets sent by an STA that will be forwarded by AP */
- if (ieee->intel_promiscuous_md_info.promiscuous_on &&
- ieee->intel_promiscuous_md_info.fltr_src_sta_frame) {
- if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) &&
- !ether_addr_equal(dst, ieee->current_network.bssid) &&
- ether_addr_equal(bssid, ieee->current_network.bssid)) {
- return -1;
- }
- }
-
/* Nullfunc frames may have PS-bit set, so they must be passed to
* hostap_handle_sta_rx() before being dropped here.
*/
- if (!ieee->intel_promiscuous_md_info.promiscuous_on) {
- if (stype != RTLLIB_STYPE_DATA &&
- stype != RTLLIB_STYPE_DATA_CFACK &&
- stype != RTLLIB_STYPE_DATA_CFPOLL &&
- stype != RTLLIB_STYPE_DATA_CFACKPOLL &&
- stype != RTLLIB_STYPE_QOS_DATA) {
- if (stype != RTLLIB_STYPE_NULLFUNC)
- netdev_dbg(ieee->dev,
- "RX: dropped data frame with no data (type=0x%02x, subtype=0x%02x)\n",
- type, stype);
- return -1;
- }
+ if (stype != IEEE80211_STYPE_DATA &&
+ stype != IEEE80211_STYPE_DATA_CFACK &&
+ stype != IEEE80211_STYPE_DATA_CFPOLL &&
+ stype != IEEE80211_STYPE_DATA_CFACKPOLL &&
+ stype != IEEE80211_STYPE_QOS_DATA) {
+ if (stype != IEEE80211_STYPE_NULLFUNC)
+ netdev_dbg(ieee->dev,
+ "RX: dropped data frame with no data (type=0x%02x, subtype=0x%02x)\n",
+ type, stype);
+ return -1;
}
/* packets from our adapter are dropped (echo) */
@@ -1029,8 +987,8 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct lib80211_crypt_data **crypt, size_t hdrlen)
{
- struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_control);
int idx = 0;
if (skb->len >= hdrlen + 3)
@@ -1040,11 +998,10 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
/* allow NULL decrypt to indicate an station specific override
* for default encryption
*/
- if (*crypt && ((*crypt)->ops == NULL ||
- (*crypt)->ops->decrypt_mpdu == NULL))
+ if (*crypt && (!(*crypt)->ops || !(*crypt)->ops->decrypt_mpdu))
*crypt = NULL;
- if (!*crypt && (fc & RTLLIB_FCTL_WEP)) {
+ if (!*crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
/* This seems to be triggered by some (multicast?)
* frames from other than current BSS, so just drop the
* frames silently instead of filling system log with
@@ -1063,14 +1020,14 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
struct lib80211_crypt_data *crypt, size_t hdrlen)
{
- struct rtllib_hdr_4addr *hdr;
+ struct ieee80211_hdr *hdr;
int keyidx = 0;
u16 fc, sc;
u8 frag;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
- sc = le16_to_cpu(hdr->seq_ctl);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+ sc = le16_to_cpu(hdr->seq_ctrl);
frag = WLAN_GET_SEQ_FRAG(sc);
if ((!rx_stats->Decrypted))
@@ -1079,13 +1036,13 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->need_sw_enc = 0;
keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt);
- if ((fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) {
+ if ((fc & IEEE80211_FCTL_PROTECTED) && (keyidx < 0)) {
netdev_info(ieee->dev, "%s: decrypt frame error\n", __func__);
return -1;
}
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
@@ -1094,7 +1051,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (!frag_skb) {
netdev_dbg(ieee->dev,
"Rx cannot get skb from fragment cache (morefrag=%d seq=%u frag=%u)\n",
- (fc & RTLLIB_FCTL_MOREFRAGS) != 0,
+ (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
WLAN_GET_SEQ_SEQ(sc), frag);
return -1;
}
@@ -1124,7 +1081,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
dev_kfree_skb_any(skb);
skb = NULL;
- if (fc & RTLLIB_FCTL_MOREFRAGS) {
+ if (fc & IEEE80211_FCTL_MOREFRAGS) {
/* more fragments expected - leave the skb in fragment
* cache for now; it will be delivered to upper layers
* after all fragments have been received
@@ -1136,21 +1093,21 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
* delivered, so remove skb from fragment cache
*/
skb = frag_skb;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
rtllib_frag_cache_invalidate(ieee, hdr);
}
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated
*/
- if ((fc & RTLLIB_FCTL_WEP) &&
+ if ((fc & IEEE80211_FCTL_PROTECTED) &&
rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
netdev_info(ieee->dev, "%s: ==>decrypt msdu error\n", __func__);
return -1;
}
- hdr = (struct rtllib_hdr_4addr *)skb->data;
- if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
/* pass unencrypted EAPOL frames even if encryption is
@@ -1169,7 +1126,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
}
}
- if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
+ if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) &&
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
struct eapol *eap = (struct eapol *)(skb->data + 24);
@@ -1177,7 +1134,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
eap_get_type(eap->type));
}
- if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep &&
+ if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep &&
!rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
netdev_dbg(ieee->dev,
"dropped unencrypted RX data frame from %pM (drop_unencrypted=1)\n",
@@ -1213,7 +1170,7 @@ static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee,
u16 ethertype;
int i = 0;
- if (rxb == NULL) {
+ if (!rxb) {
netdev_info(dev, "%s: rxb is NULL!!\n", __func__);
return;
}
@@ -1275,10 +1232,10 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
- struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
- struct rx_ts_record *pTS = NULL;
+ struct rx_ts_record *ts = NULL;
u16 fc, sc, SeqNum = 0;
u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
u8 dst[ETH_ALEN];
@@ -1286,23 +1243,18 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
u8 bssid[ETH_ALEN] = {0};
size_t hdrlen = 0;
- bool bToOtherSTA = false;
int ret = 0, i = 0;
- fc = le16_to_cpu(hdr->frame_ctl);
+ fc = le16_to_cpu(hdr->frame_control);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
- sc = le16_to_cpu(hdr->seq_ctl);
+ sc = le16_to_cpu(hdr->seq_ctrl);
/*Filter pkt not to me*/
multicast = is_multicast_ether_addr(hdr->addr1);
unicast = !multicast;
- if (unicast && !ether_addr_equal(dev->dev_addr, hdr->addr1)) {
- if (ieee->net_promiscuous_md)
- bToOtherSTA = true;
- else
- goto rx_dropped;
- }
+ if (unicast && !ether_addr_equal(dev->dev_addr, hdr->addr1))
+ goto rx_dropped;
/*Filter pkt has too small length */
hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats);
@@ -1324,8 +1276,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Filter MGNT Frame */
if (type == RTLLIB_FTYPE_MGMT) {
- if (bToOtherSTA)
- goto rx_dropped;
if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
goto rx_dropped;
else
@@ -1335,16 +1285,14 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Filter WAPI DATA Frame */
/* Update statstics for AP roaming */
- if (!bToOtherSTA) {
- ieee->link_detect_info.NumRecvDataInPeriod++;
- ieee->link_detect_info.NumRxOkInPeriod++;
- }
+ ieee->link_detect_info.NumRecvDataInPeriod++;
+ ieee->link_detect_info.NumRxOkInPeriod++;
/* Data frame - extract src/dst addresses */
rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
/* Filter Data frames */
- ret = rtllib_rx_data_filter(ieee, fc, dst, src, bssid, hdr->addr2);
+ ret = rtllib_rx_data_filter(ieee, hdr, dst, src, bssid, hdr->addr2);
if (ret < 0)
goto rx_dropped;
@@ -1354,7 +1302,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Send pspoll based on moredata */
if ((ieee->iw_mode == IW_MODE_INFRA) &&
(ieee->sta_sleep == LPS_IS_SLEEP) &&
- (ieee->polling) && (!bToOtherSTA)) {
+ (ieee->polling)) {
if (WLAN_FC_MORE_DATA(fc)) {
/* more data bit is set, let's request a new frame
* from the AP
@@ -1378,13 +1326,12 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
goto rx_exit;
/* Get TS for Rx Reorder */
- hdr = (struct rtllib_hdr_4addr *)skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
- && !is_multicast_ether_addr(hdr->addr1)
- && (!bToOtherSTA)) {
+ && !is_multicast_ether_addr(hdr->addr1)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID,
+ rtllib_get_ts(ieee, (struct ts_common_info **)&ts, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
ieee->bis_any_nonbepkts = true;
@@ -1412,21 +1359,19 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Update WAPI PN */
/* Check if leave LPS */
- if (!bToOtherSTA) {
- if (ieee->bIsAggregateFrame)
- nr_subframes = rxb->nr_subframes;
- else
- nr_subframes = 1;
- if (unicast)
- ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes;
- rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
- }
+ if (ieee->bIsAggregateFrame)
+ nr_subframes = rxb->nr_subframes;
+ else
+ nr_subframes = 1;
+ if (unicast)
+ ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes;
+ rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
/* Indicate packets to upper layer or Rx Reorder */
- if (!ieee->ht_info->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA)
+ if (!ieee->ht_info->cur_rx_reorder_enable || !ts)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
- RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
+ RxReorderIndicatePacket(ieee, rxb, ts, SeqNum);
dev_kfree_skb(skb);
@@ -1446,8 +1391,8 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
- struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_control);
size_t hdrlen = rtllib_get_hdrlen(fc);
if (skb->len < hdrlen) {
@@ -1490,7 +1435,6 @@ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
}
switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
case IW_MODE_INFRA:
ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats);
break;
@@ -2448,11 +2392,6 @@ static inline void update_network(struct rtllib_device *ieee,
dst->BssCcxVerNumber = src->BssCcxVerNumber;
}
-static inline int is_beacon(u16 fc)
-{
- return (WLAN_FC_GET_STYPE(fc) == RTLLIB_STYPE_BEACON);
-}
-
static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
{
if (channel > MAX_CHANNEL_NUMBER) {
@@ -2491,7 +2430,7 @@ static inline void rtllib_process_probe_response(
short renew;
struct rtllib_network *network = kzalloc(sizeof(struct rtllib_network),
GFP_ATOMIC);
- u16 frame_ctl = le16_to_cpu(beacon->header.frame_ctl);
+ __le16 frame_ctl = beacon->header.frame_control;
if (!network)
return;
@@ -2521,14 +2460,14 @@ static inline void rtllib_process_probe_response(
netdev_dbg(ieee->dev, "Dropped '%s' ( %pM) via %s.\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
- is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
+ ieee80211_is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
goto free_network;
}
if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
- if (WLAN_FC_GET_STYPE(frame_ctl) == RTLLIB_STYPE_PROBE_RESP) {
+ if (ieee80211_is_probe_resp(frame_ctl)) {
if (IsPassiveChannel(ieee, network->channel)) {
netdev_info(ieee->dev,
"GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n",
@@ -2561,7 +2500,7 @@ static inline void rtllib_process_probe_response(
else
ieee->current_network.buseprotection = false;
}
- if (is_beacon(frame_ctl)) {
+ if (ieee80211_is_beacon(frame_ctl)) {
if (ieee->link_state >= MAC80211_LINKED)
ieee->link_detect_info.NumRecvBcnInPeriod++;
}
@@ -2570,8 +2509,7 @@ static inline void rtllib_process_probe_response(
if (is_same_network(target, network,
(target->ssid_len ? 1 : 0)))
break;
- if ((oldest == NULL) ||
- (target->last_scanned < oldest->last_scanned))
+ if (!oldest || (target->last_scanned < oldest->last_scanned))
oldest = target;
}
@@ -2597,7 +2535,7 @@ static inline void rtllib_process_probe_response(
netdev_dbg(ieee->dev, "Adding '%s' ( %pM) via %s.\n",
escape_essid(network->ssid, network->ssid_len),
network->bssid,
- is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
+ ieee80211_is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
memcpy(target, network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
@@ -2607,7 +2545,7 @@ static inline void rtllib_process_probe_response(
netdev_dbg(ieee->dev, "Updating '%s' ( %pM) via %s.\n",
escape_essid(target->ssid, target->ssid_len),
target->bssid,
- is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
+ ieee80211_is_beacon(frame_ctl) ? "BEACON" : "PROBE RESPONSE");
/* we have an entry and we are going to update it. But this
* entry may be already expired. In this case we do the same
@@ -2628,7 +2566,7 @@ static inline void rtllib_process_probe_response(
}
spin_unlock_irqrestore(&ieee->lock, flags);
- if (is_beacon(frame_ctl) &&
+ if (ieee80211_is_beacon(frame_ctl) &&
is_same_network(&ieee->current_network, network,
(network->ssid_len ? 1 : 0)) &&
(ieee->link_state == MAC80211_LINKED)) {
@@ -2642,18 +2580,14 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *stats)
{
- struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
+ struct ieee80211_hdr *header = (struct ieee80211_hdr *)skb->data;
- if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
- RTLLIB_STYPE_PROBE_RESP) &&
- (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
- RTLLIB_STYPE_BEACON))
+ if (!ieee80211_is_probe_resp(header->frame_control) &&
+ (!ieee80211_is_beacon(header->frame_control)))
ieee->last_rx_ps_time = jiffies;
- switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
- case RTLLIB_STYPE_BEACON:
- netdev_dbg(ieee->dev, "received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
+ if (ieee80211_is_beacon(header->frame_control)) {
+ netdev_dbg(ieee->dev, "received BEACON\n");
rtllib_process_probe_response(
ieee, (struct rtllib_probe_response *)header,
stats);
@@ -2662,22 +2596,9 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
ieee->iw_mode == IW_MODE_INFRA &&
ieee->link_state == MAC80211_LINKED))
schedule_work(&ieee->ps_task);
-
- break;
-
- case RTLLIB_STYPE_PROBE_RESP:
- netdev_dbg(ieee->dev, "received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
+ } else if (ieee80211_is_probe_resp(header->frame_control)) {
+ netdev_dbg(ieee->dev, "received PROBE RESPONSE\n");
rtllib_process_probe_response(ieee,
(struct rtllib_probe_response *)header, stats);
- break;
- case RTLLIB_STYPE_PROBE_REQ:
- netdev_dbg(ieee->dev, "received PROBE REQUEST (%d)\n",
- WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
- if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- (ieee->iw_mode == IW_MODE_ADHOC &&
- ieee->link_state == MAC80211_LINKED))
- rtllib_rx_probe_rq(ieee, skb);
- break;
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index de1702491191..42d652fe8601 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -187,8 +187,8 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *header =
+ (struct ieee80211_hdr_3addr *)skb->data;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
@@ -197,7 +197,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
/* called with 2nd param 0, no mgmt lock required */
rtllib_sta_wakeup(ieee, 0);
- if (le16_to_cpu(header->frame_ctl) == RTLLIB_STYPE_BEACON)
+ if (ieee80211_is_beacon(header->frame_control))
tcb_desc->queue_index = BEACON_QUEUE;
else
tcb_desc->queue_index = MGNT_QUEUE;
@@ -213,7 +213,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
if (ieee->queue_stop) {
enqueue_mgmt(ieee, skb);
} else {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -230,7 +230,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -264,16 +264,16 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
struct rtllib_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *header =
+ (struct ieee80211_hdr_3addr *)skb->data;
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
- fc = le16_to_cpu(header->frame_ctl);
+ fc = le16_to_cpu(header->frame_control);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
- if (stype != RTLLIB_STYPE_PSPOLL)
+ if (stype != IEEE80211_STYPE_PSPOLL)
tcb_desc->queue_index = MGNT_QUEUE;
else
tcb_desc->queue_index = HIGH_QUEUE;
@@ -287,7 +287,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
tcb_desc->tx_use_drv_assinged_rate = 1;
if (single) {
if (type != RTLLIB_FTYPE_CTL) {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -300,7 +300,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
} else {
if (type != RTLLIB_FTYPE_CTL) {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -331,7 +331,7 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
skb_reserve(skb, ieee->tx_headroom);
req = skb_put(skb, sizeof(struct rtllib_probe_request));
- req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
+ req->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
req->header.duration_id = 0;
eth_broadcast_addr(req->header.addr1);
@@ -406,49 +406,6 @@ void rtllib_DisableNetMonitorMode(struct net_device *dev,
ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
}
-/* Enables the specialized promiscuous mode required by Intel.
- * In this mode, Intel intends to hear traffics from/to other STAs in the
- * same BSS. Therefore we don't have to disable checking BSSID and we only need
- * to allow all dest. BUT: if we enable checking BSSID then we can't recv
- * packets from other STA.
- */
-void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
- bool bInitState)
-{
- bool bFilterOutNonAssociatedBSSID = false;
-
- struct rtllib_device *ieee = netdev_priv_rsl(dev);
-
- netdev_info(dev, "========>Enter Intel Promiscuous Mode\n");
-
- ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
- ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
- (u8 *)&bFilterOutNonAssociatedBSSID);
-
- ieee->net_promiscuous_md = true;
-}
-EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
-
-/* Disables the specialized promiscuous mode required by Intel.
- * See MgntEnableIntelPromiscuousMode for detail.
- */
-void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
- bool bInitState)
-{
- bool bFilterOutNonAssociatedBSSID = true;
-
- struct rtllib_device *ieee = netdev_priv_rsl(dev);
-
- netdev_info(dev, "========>Exit Intel Promiscuous Mode\n");
-
- ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
- ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
- (u8 *)&bFilterOutNonAssociatedBSSID);
-
- ieee->net_promiscuous_md = false;
-}
-EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
-
static void rtllib_send_probe(struct rtllib_device *ieee)
{
struct sk_buff *skb;
@@ -462,8 +419,7 @@ static void rtllib_send_probe(struct rtllib_device *ieee)
static void rtllib_send_probe_requests(struct rtllib_device *ieee)
{
- if (ieee->active_scan && (ieee->softmac_features &
- IEEE_SOFTMAC_PROBERQ)) {
+ if (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ) {
rtllib_send_probe(ieee);
rtllib_send_probe(ieee);
}
@@ -607,46 +563,6 @@ out1:
mutex_unlock(&ieee->scan_mutex);
}
-static void rtllib_beacons_start(struct rtllib_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
-
- ieee->beacon_txing = 1;
- rtllib_send_beacon(ieee);
-
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
-static void rtllib_beacons_stop(struct rtllib_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
-
- ieee->beacon_txing = 0;
-
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
- del_timer_sync(&ieee->beacon_timer);
-}
-
-void rtllib_stop_send_beacons(struct rtllib_device *ieee)
-{
- ieee->stop_send_beacons(ieee->dev);
- if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- rtllib_beacons_stop(ieee);
-}
-EXPORT_SYMBOL(rtllib_stop_send_beacons);
-
-void rtllib_start_send_beacons(struct rtllib_device *ieee)
-{
- ieee->start_send_beacons(ieee->dev);
- if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- rtllib_beacons_start(ieee);
-}
-EXPORT_SYMBOL(rtllib_start_send_beacons);
-
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
{
mutex_lock(&ieee->scan_mutex);
@@ -739,9 +655,9 @@ rtllib_authentication_req(struct rtllib_network *beacon,
auth = skb_put(skb, sizeof(struct rtllib_authentication));
- auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
+ auth->header.frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH);
if (challengelen)
- auth->header.frame_ctl |= cpu_to_le16(RTLLIB_FCTL_WEP);
+ auth->header.frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
auth->header.duration_id = cpu_to_le16(0x013a);
ether_addr_copy(auth->header.addr1, beacon->bssid);
@@ -809,7 +725,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
- if (ieee->ht_info->bCurrentHTSupport) {
+ if (ieee->ht_info->current_ht_support) {
tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
tmp_ht_info_buf = (u8 *)&(ieee->ht_info->SelfHTInfo);
@@ -860,7 +776,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
- beacon_buf->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_RESP);
+ beacon_buf->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
@@ -901,8 +817,6 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
}
if (wpa_ie_len) {
- if (ieee->iw_mode == IW_MODE_ADHOC)
- memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
tag += ieee->wpa_ie_len;
}
@@ -912,23 +826,23 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *skb;
- struct rtllib_hdr_3addr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
- skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr) + ieee->tx_headroom);
+ skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr) + ieee->tx_headroom);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
- hdr = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
+ hdr = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
ether_addr_copy(hdr->addr1, ieee->current_network.bssid);
ether_addr_copy(hdr->addr2, ieee->dev->dev_addr);
ether_addr_copy(hdr->addr3, ieee->current_network.bssid);
- hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_DATA |
- RTLLIB_STYPE_NULLFUNC | RTLLIB_FCTL_TODS |
- (pwr ? RTLLIB_FCTL_PM : 0));
+ hdr->frame_control = cpu_to_le16(RTLLIB_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
+ (pwr ? IEEE80211_FCTL_PM : 0));
return skb;
}
@@ -936,34 +850,26 @@ static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
{
struct sk_buff *skb;
- struct rtllib_pspoll_hdr *hdr;
+ struct ieee80211_pspoll *hdr;
- skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr) + ieee->tx_headroom);
+ skb = dev_alloc_skb(sizeof(struct ieee80211_pspoll) + ieee->tx_headroom);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
- hdr = skb_put(skb, sizeof(struct rtllib_pspoll_hdr));
+ hdr = skb_put(skb, sizeof(struct ieee80211_pspoll));
ether_addr_copy(hdr->bssid, ieee->current_network.bssid);
ether_addr_copy(hdr->ta, ieee->dev->dev_addr);
hdr->aid = cpu_to_le16(ieee->assoc_id | 0xc000);
- hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_CTL | RTLLIB_STYPE_PSPOLL |
- RTLLIB_FCTL_PM);
+ hdr->frame_control = cpu_to_le16(RTLLIB_FTYPE_CTL | IEEE80211_STYPE_PSPOLL |
+ IEEE80211_FCTL_PM);
return skb;
}
-static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
-{
- struct sk_buff *buf = rtllib_probe_resp(ieee, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
@@ -1012,7 +918,7 @@ rtllib_association_req(struct rtllib_network *beacon,
int len = 0;
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- if (crypt != NULL)
+ if (crypt)
encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 ||
wpa_ie_len));
@@ -1026,7 +932,7 @@ rtllib_association_req(struct rtllib_network *beacon,
ieee->mode = WIRELESS_MODE_G;
}
- if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
+ if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
@@ -1076,7 +982,7 @@ rtllib_association_req(struct rtllib_network *beacon,
hdr = skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
- hdr->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_REQ);
+ hdr->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ);
hdr->header.duration_id = cpu_to_le16(37);
ether_addr_copy(hdr->header.addr1, beacon->bssid);
ether_addr_copy(hdr->header.addr2, ieee->dev->dev_addr);
@@ -1164,7 +1070,7 @@ rtllib_association_req(struct rtllib_network *beacon,
memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
tag += osCcxVerNum.Length;
}
- if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
+ if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
if (ieee->ht_info->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_HT_CAP;
@@ -1198,7 +1104,7 @@ rtllib_association_req(struct rtllib_network *beacon,
rtllib_TURBO_Info(ieee, &tag);
}
- if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
+ if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
if (ieee->ht_info->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_GENERIC;
@@ -1311,7 +1217,7 @@ static void rtllib_auth_challenge(struct rtllib_device *ieee, u8 *challenge,
"Sending authentication challenge response\n");
rtllib_encrypt_fragment(ieee, skb,
- sizeof(struct rtllib_hdr_3addr));
+ sizeof(struct ieee80211_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
@@ -1348,10 +1254,8 @@ static void rtllib_associate_complete_wq(void *data)
netdev_info(ieee->dev, "Associated successfully with %pM\n",
ieee->current_network.bssid);
- if (!ieee->is_silent_reset) {
- netdev_info(ieee->dev, "normal associate\n");
- notify_wx_assoc_event(ieee);
- }
+ netdev_info(ieee->dev, "normal associate\n");
+ notify_wx_assoc_event(ieee);
netif_carrier_on(ieee->dev);
ieee->is_roaming = false;
@@ -1363,13 +1267,13 @@ static void rtllib_associate_complete_wq(void *data)
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_B);
netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate);
}
- if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
+ if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
netdev_info(ieee->dev, "Successfully associated, ht enabled\n");
HTOnAssocRsp(ieee);
} else {
netdev_info(ieee->dev,
"Successfully associated, ht not enabled(%d, %d)\n",
- ieee->ht_info->bCurrentHTSupport,
+ ieee->ht_info->current_ht_support,
ieee->ht_info->enable_ht);
memset(ieee->dot11ht_oper_rate_set, 0, 16);
}
@@ -1384,10 +1288,6 @@ static void rtllib_associate_complete_wq(void *data)
psc->LpsIdleCount = 0;
ieee->link_change(ieee->dev);
- if (ieee->is_silent_reset) {
- netdev_info(ieee->dev, "silent reset associate\n");
- ieee->is_silent_reset = false;
- }
}
static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
@@ -1445,14 +1345,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
WLAN_CAPABILITY_ESS))
return;
- if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability &
- WLAN_CAPABILITY_IBSS))
- return;
-
- if ((ieee->iw_mode == IW_MODE_ADHOC) &&
- (net->channel > ieee->ibss_maxjoin_chal))
- return;
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
+ if (ieee->iw_mode == IW_MODE_INFRA) {
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
* broadcast it, it can be put manually.
@@ -1534,8 +1427,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
HTResetSelfAndSavePeerSetting(ieee,
&(ieee->current_network));
else
- ieee->ht_info->bCurrentHTSupport =
- false;
+ ieee->ht_info->current_ht_support = false;
ieee->link_state = RTLLIB_ASSOCIATING;
schedule_delayed_work(
@@ -1613,52 +1505,6 @@ static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
- u8 *src)
-{
- u8 *tag;
- u8 *skbend;
- u8 *ssid = NULL;
- u8 ssidlen = 0;
- struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *)skb->data;
- bool bssid_match;
-
- if (skb->len < sizeof(struct rtllib_hdr_3addr))
- return -1; /* corrupted */
-
- bssid_match =
- (!ether_addr_equal(header->addr3, ieee->current_network.bssid)) &&
- (!is_broadcast_ether_addr(header->addr3));
- if (bssid_match)
- return -1;
-
- ether_addr_copy(src, header->addr2);
-
- skbend = (u8 *)skb->data + skb->len;
-
- tag = skb->data + sizeof(struct rtllib_hdr_3addr);
-
- while (tag + 1 < skbend) {
- if (*tag == 0) {
- ssid = tag + 2;
- ssidlen = *(tag + 1);
- break;
- }
- tag++; /* point to the len field */
- tag = tag + *(tag); /* point to the last data byte of the tag */
- tag++; /* point to the next tag */
- }
-
- if (ssidlen == 0)
- return 1;
-
- if (!ssid)
- return 1; /* ssid not found in tagged param */
-
- return !strncmp(ssid, ieee->current_network.ssid, ssidlen);
-}
-
static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
int *aid)
{
@@ -1688,17 +1534,6 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
return le16_to_cpu(response_head->status);
}
-void rtllib_rx_probe_rq(struct rtllib_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
-
- ieee->softmac_stats.rx_probe_rq++;
- if (probe_rq_parse(ieee, skb, dest) > 0) {
- ieee->softmac_stats.tx_probe_rs++;
- rtllib_resp_to_probe(ieee, dest);
- }
-}
-
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *buf = rtllib_null_func(ieee, pwr);
@@ -1752,16 +1587,10 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
if (ieee->bAwakePktSent) {
psc->LPSAwakeIntvl = 1;
} else {
- u8 MaxPeriod = 1;
+ u8 MaxPeriod = 5;
if (psc->LPSAwakeIntvl == 0)
psc->LPSAwakeIntvl = 1;
- if (psc->reg_max_lps_awake_intvl == 0)
- MaxPeriod = 1;
- else if (psc->reg_max_lps_awake_intvl == 0xFF)
- MaxPeriod = ieee->current_network.dtim_period;
- else
- MaxPeriod = psc->reg_max_lps_awake_intvl;
psc->LPSAwakeIntvl = (psc->LPSAwakeIntvl >=
MaxPeriod) ? MaxPeriod :
(psc->LPSAwakeIntvl + 1);
@@ -1921,16 +1750,9 @@ EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee,
struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
- u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
+ u8 *act = skb->data + RTLLIB_3ADDR_LEN;
u8 category = 0;
- if (act == NULL) {
- netdev_warn(ieee->dev,
- "Error getting payload of action frame\n");
- return;
- }
-
category = *act;
act++;
switch (category) {
@@ -1960,8 +1782,8 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
int aid;
u8 *ies;
struct rtllib_assoc_response_frame *assoc_resp;
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
- u16 frame_ctl = le16_to_cpu(header->frame_ctl);
+ struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *)skb->data;
+ u16 frame_ctl = le16_to_cpu(header->frame_control);
netdev_dbg(ieee->dev, "received [RE]ASSOCIATION RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(frame_ctl));
@@ -2099,7 +1921,7 @@ rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
static inline int
rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *)skb->data;
u16 frame_ctl;
if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
@@ -2111,7 +1933,7 @@ rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->link_state == MAC80211_LINKED &&
(ieee->iw_mode == IW_MODE_INFRA)) {
- frame_ctl = le16_to_cpu(header->frame_ctl);
+ frame_ctl = le16_to_cpu(header->frame_control);
netdev_info(ieee->dev,
"==========>received disassoc/deauth(%x) frame, reason code:%x\n",
WLAN_FC_GET_STYPE(frame_ctl),
@@ -2135,30 +1957,30 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct rtllib_rx_stats *rx_stats, u16 type,
u16 stype)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *)skb->data;
u16 frame_ctl;
if (!ieee->proto_started)
return 0;
- frame_ctl = le16_to_cpu(header->frame_ctl);
+ frame_ctl = le16_to_cpu(header->frame_control);
switch (WLAN_FC_GET_STYPE(frame_ctl)) {
- case RTLLIB_STYPE_ASSOC_RESP:
- case RTLLIB_STYPE_REASSOC_RESP:
+ case IEEE80211_STYPE_ASSOC_RESP:
+ case IEEE80211_STYPE_REASSOC_RESP:
if (rtllib_rx_assoc_resp(ieee, skb, rx_stats) == 1)
return 1;
break;
- case RTLLIB_STYPE_ASSOC_REQ:
- case RTLLIB_STYPE_REASSOC_REQ:
+ case IEEE80211_STYPE_ASSOC_REQ:
+ case IEEE80211_STYPE_REASSOC_REQ:
break;
- case RTLLIB_STYPE_AUTH:
+ case IEEE80211_STYPE_AUTH:
rtllib_rx_auth(ieee, skb, rx_stats);
break;
- case RTLLIB_STYPE_DISASSOC:
- case RTLLIB_STYPE_DEAUTH:
+ case IEEE80211_STYPE_DISASSOC:
+ case IEEE80211_STYPE_DEAUTH:
rtllib_rx_deauth(ieee, skb);
break;
- case RTLLIB_STYPE_MANAGE_ACT:
+ case IEEE80211_STYPE_ACTION:
rtllib_process_action(ieee, skb);
break;
default:
@@ -2263,128 +2085,6 @@ void rtllib_wake_all_queues(struct rtllib_device *ieee)
netif_tx_wake_all_queues(ieee->dev);
}
-static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
-{
- /* reset hardware status */
- if (ieee->raw_tx)
- netif_carrier_on(ieee->dev);
-}
-
-static void rtllib_start_ibss_wq(void *data)
-{
- struct rtllib_device *ieee = container_of_dwork_rsl(data,
- struct rtllib_device, start_ibss_wq);
- /* iwconfig mode ad-hoc will schedule this and return
- * on the other hand this will block further iwconfig SET
- * operations because of the wx_mutex hold.
- * Anyway some most set operations set a flag to speed-up
- * (abort) this wq (when syncro scanning) before sleeping
- * on the mutex
- */
- if (!ieee->proto_started) {
- netdev_info(ieee->dev, "==========oh driver down return\n");
- return;
- }
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->current_network.ssid_len == 0) {
- strscpy(ieee->current_network.ssid, RTLLIB_DEFAULT_TX_ESSID,
- sizeof(ieee->current_network.ssid));
- ieee->current_network.ssid_len = strlen(RTLLIB_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- ieee->link_state = MAC80211_NOLINK;
- ieee->mode = WIRELESS_MODE_G;
- /* check if we have this cell in our network list */
- rtllib_softmac_check_all_nets(ieee);
-
- /* if not then the state is not linked. Maybe the user switched to
- * ad-hoc mode just after being in monitor mode, or just after
- * being very few time in managed mode (so the card have had no
- * time to scan all the chans..) or we have just run up the iface
- * after setting ad-hoc mode. So we have to give another try..
- * Here, in ibss mode, should be safe to do this without extra care
- * (in bss mode we had to make sure no-one tried to associate when
- * we had just checked the ieee->link_state and we was going to start the
- * scan) because in ibss mode the rtllib_new_net function, when
- * finds a good net, just set the ieee->link_state to MAC80211_LINKED,
- * so, at worst, we waste a bit of time to initiate an unneeded syncro
- * scan, that will stop at the first round because it sees the state
- * associated.
- */
- if (ieee->link_state == MAC80211_NOLINK)
- rtllib_start_scan_syncro(ieee);
-
- /* the network definitively is not here.. create a new cell */
- if (ieee->link_state == MAC80211_NOLINK) {
- netdev_info(ieee->dev, "creating new IBSS cell\n");
- ieee->current_network.channel = ieee->bss_start_channel;
- if (!ieee->wap_set)
- eth_random_addr(ieee->current_network.bssid);
-
- ieee->current_network.rates_len = 4;
- ieee->current_network.rates[0] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
- ieee->current_network.rates[1] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
- ieee->current_network.rates[2] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
- ieee->current_network.rates[3] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
-
- ieee->current_network.rates_ex_len = 8;
- ieee->current_network.rates_ex[0] =
- RTLLIB_OFDM_RATE_6MB;
- ieee->current_network.rates_ex[1] =
- RTLLIB_OFDM_RATE_9MB;
- ieee->current_network.rates_ex[2] =
- RTLLIB_OFDM_RATE_12MB;
- ieee->current_network.rates_ex[3] =
- RTLLIB_OFDM_RATE_18MB;
- ieee->current_network.rates_ex[4] =
- RTLLIB_OFDM_RATE_24MB;
- ieee->current_network.rates_ex[5] =
- RTLLIB_OFDM_RATE_36MB;
- ieee->current_network.rates_ex[6] =
- RTLLIB_OFDM_RATE_48MB;
- ieee->current_network.rates_ex[7] =
- RTLLIB_OFDM_RATE_54MB;
- ieee->rate = 108;
-
- ieee->current_network.qos_data.supported = 0;
- ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
- ieee->current_network.mode = ieee->mode;
- ieee->current_network.atim_window = 0;
- ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- }
-
- netdev_info(ieee->dev, "%s(): ieee->mode = %d\n", __func__, ieee->mode);
- if (ieee->mode == WIRELESS_MODE_N_24G)
- HTUseDefaultSetting(ieee);
- else
- ieee->ht_info->bCurrentHTSupport = false;
-
- ieee->SetHwRegHandler(ieee->dev, HW_VAR_MEDIA_STATUS,
- (u8 *)(&ieee->link_state));
-
- ieee->link_state = MAC80211_LINKED;
- ieee->link_change(ieee->dev);
-
- HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- rtllib_start_send_beacons(ieee);
-
- notify_wx_assoc_event(ieee);
- netif_carrier_on(ieee->dev);
-
- mutex_unlock(&ieee->wx_mutex);
-}
-
-inline void rtllib_start_ibss(struct rtllib_device *ieee)
-{
- schedule_delayed_work(&ieee->start_ibss_wq, msecs_to_jiffies(150));
-}
-
/* this is called only in user context, with wx_mutex held */
static void rtllib_start_bss(struct rtllib_device *ieee)
{
@@ -2495,7 +2195,7 @@ static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
return NULL;
b = (struct rtllib_probe_response *)skb->data;
- b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
+ b->header.frame_control = cpu_to_le16(IEEE80211_STYPE_BEACON);
return skb;
}
@@ -2510,7 +2210,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
return NULL;
b = (struct rtllib_probe_response *)skb->data;
- b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -2521,31 +2221,28 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
}
EXPORT_SYMBOL(rtllib_get_beacon);
-void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
- u8 shutdown)
+void rtllib_softmac_stop_protocol(struct rtllib_device *ieee)
{
rtllib_stop_scan_syncro(ieee);
mutex_lock(&ieee->wx_mutex);
- rtllib_stop_protocol(ieee, shutdown);
+ rtllib_stop_protocol(ieee);
mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
-void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
+void rtllib_stop_protocol(struct rtllib_device *ieee)
{
if (!ieee->proto_started)
return;
- if (shutdown) {
- ieee->proto_started = 0;
- ieee->proto_stoppping = 1;
- ieee->rtllib_ips_leave(ieee->dev);
- }
+ ieee->proto_started = 0;
+ ieee->proto_stoppping = 1;
+ ieee->rtllib_ips_leave(ieee->dev);
- rtllib_stop_send_beacons(ieee);
del_timer_sync(&ieee->associate_timer);
+ mutex_unlock(&ieee->wx_mutex);
cancel_delayed_work_sync(&ieee->associate_retry_wq);
- cancel_delayed_work_sync(&ieee->start_ibss_wq);
+ mutex_lock(&ieee->wx_mutex);
cancel_delayed_work_sync(&ieee->link_change_wq);
rtllib_stop_scan(ieee);
@@ -2558,10 +2255,9 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
rtllib_disassociate(ieee);
}
- if (shutdown) {
- RemoveAllTS(ieee);
- ieee->proto_stoppping = 0;
- }
+ RemoveAllTS(ieee);
+ ieee->proto_stoppping = 0;
+
kfree(ieee->assocreq_ies);
ieee->assocreq_ies = NULL;
ieee->assocreq_ies_len = 0;
@@ -2570,7 +2266,7 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
ieee->assocresp_ies_len = 0;
}
-void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
+void rtllib_softmac_start_protocol(struct rtllib_device *ieee)
{
mutex_lock(&ieee->wx_mutex);
rtllib_start_protocol(ieee);
@@ -2618,12 +2314,6 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
case IW_MODE_INFRA:
rtllib_start_bss(ieee);
break;
- case IW_MODE_ADHOC:
- rtllib_start_ibss(ieee);
- break;
- case IW_MODE_MONITOR:
- rtllib_start_monitor_mode(ieee);
- break;
}
}
@@ -2682,7 +2372,6 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->link_change_wq, (void *)rtllib_link_change_wq);
- INIT_DELAYED_WORK(&ieee->start_ibss_wq, (void *)rtllib_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, (void *)rtllib_associate_complete_wq);
INIT_DELAYED_WORK(&ieee->associate_procedure_wq, (void *)rtllib_associate_procedure_wq);
INIT_DELAYED_WORK(&ieee->softmac_scan_wq, (void *)rtllib_softmac_scan_wq);
@@ -2703,15 +2392,11 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
void rtllib_softmac_free(struct rtllib_device *ieee)
{
- mutex_lock(&ieee->wx_mutex);
- kfree(ieee->dot11d_info);
- ieee->dot11d_info = NULL;
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work_sync(&ieee->associate_retry_wq);
cancel_delayed_work_sync(&ieee->associate_procedure_wq);
cancel_delayed_work_sync(&ieee->softmac_scan_wq);
- cancel_delayed_work_sync(&ieee->start_ibss_wq);
cancel_delayed_work_sync(&ieee->hw_wakeup_wq);
cancel_delayed_work_sync(&ieee->hw_sleep_wq);
cancel_delayed_work_sync(&ieee->link_change_wq);
@@ -2719,7 +2404,9 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
cancel_work_sync(&ieee->ps_task);
- mutex_unlock(&ieee->wx_mutex);
+
+ kfree(ieee->dot11d_info);
+ ieee->dot11d_info = NULL;
}
static inline struct sk_buff *
@@ -2737,7 +2424,7 @@ rtllib_disauth_skb(struct rtllib_network *beacon,
skb_reserve(skb, ieee->tx_headroom);
disauth = skb_put(skb, sizeof(struct rtllib_disauth));
- disauth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DEAUTH);
+ disauth->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DEAUTH);
disauth->header.duration_id = 0;
ether_addr_copy(disauth->header.addr1, beacon->bssid);
@@ -2764,7 +2451,7 @@ rtllib_disassociate_skb(struct rtllib_network *beacon,
skb_reserve(skb, ieee->tx_headroom);
disass = skb_put(skb, sizeof(struct rtllib_disassoc));
- disass->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DISASSOC);
+ disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
ether_addr_copy(disass->header.addr1, beacon->bssid);
@@ -2817,30 +2504,6 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
}
}
-static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
-{
- u8 OpMode;
- u8 i;
- bool bFilterOutNonAssociatedBSSID = false;
-
- rtllib->link_state = MAC80211_NOLINK;
-
- for (i = 0; i < 6; i++)
- rtllib->current_network.bssid[i] = 0x55;
-
- rtllib->OpMode = RT_OP_MODE_NO_LINK;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
- rtllib->current_network.bssid);
- OpMode = RT_OP_MODE_NO_LINK;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS, &OpMode);
- rtllib_stop_send_beacons(rtllib);
-
- bFilterOutNonAssociatedBSSID = false;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
- (u8 *)(&bFilterOutNonAssociatedBSSID));
- notify_wx_assoc_event(rtllib);
-}
-
static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
u8 *asSta, u8 asRsn)
{
@@ -2888,8 +2551,6 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
rtllib->sta_wake_up(rtllib->dev);
if (rtllib->link_state == MAC80211_LINKED) {
- if (rtllib->iw_mode == IW_MODE_ADHOC)
- rtllib_MgntDisconnectIBSS(rtllib);
if (rtllib->iw_mode == IW_MODE_INFRA)
rtllib_MgntDisconnectAP(rtllib, asRsn);
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 0b690f0ffeef..f32584291704 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -50,12 +50,6 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
}
ieee->current_network.channel = fwrq->m;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
-
- if (ieee->iw_mode == IW_MODE_ADHOC)
- if (ieee->link_state == MAC80211_LINKED) {
- rtllib_stop_send_beacons(ieee);
- rtllib_start_send_beacons(ieee);
- }
}
ret = 0;
@@ -140,7 +134,7 @@ int rtllib_wx_set_wap(struct rtllib_device *ieee,
}
if (ifup)
- rtllib_stop_protocol(ieee, true);
+ rtllib_stop_protocol(ieee);
/* just to avoid to give inconsistent infos in the
* get wx method. not really needed otherwise
@@ -258,7 +252,6 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
mutex_lock(&ieee->wx_mutex);
switch (wrqu->mode) {
case IW_MODE_MONITOR:
- case IW_MODE_ADHOC:
case IW_MODE_INFRA:
break;
case IW_MODE_AUTO:
@@ -284,7 +277,7 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
if (!ieee->proto_started) {
ieee->iw_mode = wrqu->mode;
} else {
- rtllib_stop_protocol(ieee, true);
+ rtllib_stop_protocol(ieee);
ieee->iw_mode = wrqu->mode;
rtllib_start_protocol(ieee);
}
@@ -317,7 +310,6 @@ void rtllib_wx_sync_scan_wq(void *data)
rtllib_sta_ps_send_null_frame(ieee, 1);
rtllib_stop_all_queues(ieee);
- rtllib_stop_send_beacons(ieee);
ieee->link_state = MAC80211_LINKED_SCANNING;
ieee->link_change(ieee->dev);
/* wait for ps packet to be kicked out successfully */
@@ -325,7 +317,7 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
- if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht &&
+ if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht &&
ieee->ht_info->bCurBW40MHz) {
b40M = 1;
chan_offset = ieee->ht_info->CurSTAExtChnlOffset;
@@ -361,9 +353,6 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->link_detect_info.NumRecvBcnInPeriod = 1;
ieee->link_detect_info.NumRecvDataInPeriod = 1;
}
- if (ieee->iw_mode == IW_MODE_ADHOC)
- rtllib_start_send_beacons(ieee);
-
rtllib_wake_all_queues(ieee);
out:
@@ -412,7 +401,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
}
if (proto_started)
- rtllib_stop_protocol(ieee, true);
+ rtllib_stop_protocol(ieee);
/* this is just to be sure that the GET wx callback
* has consistent infos. not needed otherwise
@@ -447,38 +436,6 @@ int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
}
EXPORT_SYMBOL(rtllib_wx_get_mode);
-int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int *parms = (int *)extra;
- int enable = (parms[0] > 0);
- short prev = ieee->raw_tx;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (enable)
- ieee->raw_tx = 1;
- else
- ieee->raw_tx = 0;
-
- netdev_info(ieee->dev, "raw TX is %s\n",
- ieee->raw_tx ? "enabled" : "disabled");
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- if (prev == 0 && ieee->raw_tx)
- netif_carrier_on(ieee->dev);
-
- if (prev && ieee->raw_tx == 1)
- netif_carrier_off(ieee->dev);
- }
-
- mutex_unlock(&ieee->wx_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(rtllib_wx_set_rawtx);
-
int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 4199aee930f0..9bf679438ad1 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -268,12 +268,12 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct tx_ts_record *pTxTs = NULL;
- struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (rtllib_act_scanning(ieee, false))
return;
- if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
+ if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
if (!IsQoSDataFrame(skb->data))
return;
@@ -289,7 +289,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
return;
if (ht_info->bCurrentAMPDUEnable) {
- if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
skb->priority, TX_DIR, true)) {
netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
return;
@@ -354,7 +354,7 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
tcb_desc->bUseShortGI = false;
- if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
+ if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
if (ht_info->forced_short_gi) {
@@ -375,7 +375,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
tcb_desc->bPacketBW = false;
- if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht)
+ if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
@@ -438,7 +438,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
tcb_desc->rts_rate = MGN_24M;
break;
}
- if (ht_info->bCurrentHTSupport && ht_info->enable_ht) {
+ if (ht_info->current_ht_support && ht_info->enable_ht) {
u8 HTOpMode = ht_info->current_op_mode;
if ((ht_info->bCurBW40MHz && (HTOpMode == 2 ||
@@ -482,8 +482,7 @@ static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
tcb_desc->tx_use_drv_assinged_rate = true;
if (!tcb_desc->tx_dis_rate_fallback ||
!tcb_desc->tx_use_drv_assinged_rate) {
- if (ieee->iw_mode == IW_MODE_INFRA ||
- ieee->iw_mode == IW_MODE_ADHOC)
+ if (ieee->iw_mode == IW_MODE_INFRA)
tcb_desc->ratr_index = 0;
}
}
@@ -496,13 +495,13 @@ static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
if (is_multicast_ether_addr(dst))
return 0;
if (IsQoSDataFrame(skb->data)) {
- struct tx_ts_record *pTS = NULL;
+ struct tx_ts_record *ts = NULL;
- if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), dst,
skb->priority, TX_DIR, true))
return 0;
- seqnum = pTS->TxCurSeq;
- pTS->TxCurSeq = (pTS->TxCurSeq + 1) % 4096;
+ seqnum = ts->TxCurSeq;
+ ts->TxCurSeq = (ts->TxCurSeq + 1) % 4096;
return seqnum;
}
return 0;
@@ -544,17 +543,17 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
struct rtllib_txb *txb = NULL;
- struct rtllib_hdr_3addrqos *frag_hdr;
+ struct ieee80211_qos_hdr *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
unsigned long flags;
struct net_device_stats *stats = &ieee->stats;
int ether_type = 0, encrypt;
int bytes, fc, qos_ctl = 0, hdr_len;
struct sk_buff *skb_frag;
- struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
+ struct ieee80211_qos_hdr header = { /* Ensure zero initialized */
.duration_id = 0,
- .seq_ctl = 0,
- .qos_ctl = 0
+ .seq_ctrl = 0,
+ .qos_ctrl = 0
};
int qos_activated = ieee->current_network.qos_data.active;
u8 dest[ETH_ALEN];
@@ -577,297 +576,271 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
goto success;
}
- if (likely(ieee->raw_tx == 0)) {
- if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
- netdev_warn(ieee->dev, "skb too small (%d).\n",
- skb->len);
- goto success;
- }
- /* Save source and destination addresses */
- ether_addr_copy(dest, skb->data);
- ether_addr_copy(src, skb->data + ETH_ALEN);
-
- memset(skb->cb, 0, sizeof(skb->cb));
- ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
- if (unlikely(!txb)) {
- netdev_warn(ieee->dev,
- "Could not allocate TXB\n");
- goto failed;
- }
+ if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
+ netdev_warn(ieee->dev, "skb too small (%d).\n",
+ skb->len);
+ goto success;
+ }
+ /* Save source and destination addresses */
+ ether_addr_copy(dest, skb->data);
+ ether_addr_copy(src, skb->data + ETH_ALEN);
- txb->encrypted = 0;
- txb->payload_size = cpu_to_le16(skb->len);
- skb_put_data(txb->fragments[0], skb->data, skb->len);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
- goto success;
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
+ txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
+ if (unlikely(!txb)) {
+ netdev_warn(ieee->dev,
+ "Could not allocate TXB\n");
+ goto failed;
}
- if (skb->len > 282) {
- if (ether_type == ETH_P_IP) {
- const struct iphdr *ip = (struct iphdr *)
- ((u8 *)skb->data + 14);
- if (ip->protocol == IPPROTO_UDP) {
- struct udphdr *udp;
-
- udp = (struct udphdr *)((u8 *)ip +
- (ip->ihl << 2));
- if (((((u8 *)udp)[1] == 68) &&
- (((u8 *)udp)[3] == 67)) ||
- ((((u8 *)udp)[1] == 67) &&
- (((u8 *)udp)[3] == 68))) {
- bdhcp = true;
- ieee->LPSDelayCnt = 200;
- }
+ txb->encrypted = 0;
+ txb->payload_size = cpu_to_le16(skb->len);
+ skb_put_data(txb->fragments[0], skb->data, skb->len);
+
+ goto success;
+ }
+
+ if (skb->len > 282) {
+ if (ether_type == ETH_P_IP) {
+ const struct iphdr *ip = (struct iphdr *)
+ ((u8 *)skb->data + 14);
+ if (ip->protocol == IPPROTO_UDP) {
+ struct udphdr *udp;
+
+ udp = (struct udphdr *)((u8 *)ip +
+ (ip->ihl << 2));
+ if (((((u8 *)udp)[1] == 68) &&
+ (((u8 *)udp)[3] == 67)) ||
+ ((((u8 *)udp)[1] == 67) &&
+ (((u8 *)udp)[3] == 68))) {
+ bdhcp = true;
+ ieee->LPSDelayCnt = 200;
}
- } else if (ether_type == ETH_P_ARP) {
- netdev_info(ieee->dev,
- "=================>DHCP Protocol start tx ARP pkt!!\n");
- bdhcp = true;
- ieee->LPSDelayCnt =
- ieee->current_network.tim.tim_count;
}
+ } else if (ether_type == ETH_P_ARP) {
+ netdev_info(ieee->dev,
+ "=================>DHCP Protocol start tx ARP pkt!!\n");
+ bdhcp = true;
+ ieee->LPSDelayCnt =
+ ieee->current_network.tim.tim_count;
}
+ }
- skb->priority = rtllib_classify(skb, IsAmsdu);
- crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && crypt && crypt->ops;
- if (!encrypt && ieee->ieee802_1x &&
- ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
- stats->tx_dropped++;
- goto success;
- }
- if (crypt && !encrypt && ether_type == ETH_P_PAE) {
- struct eapol *eap = (struct eapol *)(skb->data +
- sizeof(struct ethhdr) - SNAP_SIZE -
- sizeof(u16));
- netdev_dbg(ieee->dev,
- "TX: IEEE 802.11 EAPOL frame: %s\n",
- eap_get_type(eap->type));
- }
+ skb->priority = rtllib_classify(skb, IsAmsdu);
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
+ encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && crypt && crypt->ops;
+ if (!encrypt && ieee->ieee802_1x &&
+ ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
+ stats->tx_dropped++;
+ goto success;
+ }
+ if (crypt && !encrypt && ether_type == ETH_P_PAE) {
+ struct eapol *eap = (struct eapol *)(skb->data +
+ sizeof(struct ethhdr) - SNAP_SIZE -
+ sizeof(u16));
+ netdev_dbg(ieee->dev,
+ "TX: IEEE 802.11 EAPOL frame: %s\n",
+ eap_get_type(eap->type));
+ }
- /* Advance the SKB to the start of the payload */
- skb_pull(skb, sizeof(struct ethhdr));
+ /* Advance the SKB to the start of the payload */
+ skb_pull(skb, sizeof(struct ethhdr));
- /* Determine total amount of storage required for TXB packets */
- bytes = skb->len + SNAP_SIZE + sizeof(u16);
+ /* Determine total amount of storage required for TXB packets */
+ bytes = skb->len + SNAP_SIZE + sizeof(u16);
- if (encrypt)
- fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
- else
- fc = RTLLIB_FTYPE_DATA;
+ if (encrypt)
+ fc = RTLLIB_FTYPE_DATA | IEEE80211_FCTL_PROTECTED;
+ else
+ fc = RTLLIB_FTYPE_DATA;
- if (qos_activated)
- fc |= RTLLIB_STYPE_QOS_DATA;
- else
- fc |= RTLLIB_STYPE_DATA;
+ if (qos_activated)
+ fc |= IEEE80211_STYPE_QOS_DATA;
+ else
+ fc |= IEEE80211_STYPE_DATA;
- if (ieee->iw_mode == IW_MODE_INFRA) {
- fc |= RTLLIB_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA,
- * Addr3 = DA
- */
- ether_addr_copy(header.addr1,
- ieee->current_network.bssid);
- ether_addr_copy(header.addr2, src);
- if (IsAmsdu)
- ether_addr_copy(header.addr3,
- ieee->current_network.bssid);
- else
- ether_addr_copy(header.addr3, dest);
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA,
- * Addr3 = BSSID
- */
- ether_addr_copy(header.addr1, dest);
- ether_addr_copy(header.addr2, src);
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ fc |= IEEE80211_FCTL_TODS;
+ /* To DS: Addr1 = BSSID, Addr2 = SA,
+ * Addr3 = DA
+ */
+ ether_addr_copy(header.addr1,
+ ieee->current_network.bssid);
+ ether_addr_copy(header.addr2, src);
+ if (IsAmsdu)
ether_addr_copy(header.addr3,
ieee->current_network.bssid);
- }
+ else
+ ether_addr_copy(header.addr3, dest);
+ }
- bIsMulticast = is_multicast_ether_addr(header.addr1);
+ bIsMulticast = is_multicast_ether_addr(header.addr1);
- header.frame_ctl = cpu_to_le16(fc);
+ header.frame_control = cpu_to_le16(fc);
- /* Determine fragmentation size based on destination (multicast
- * and broadcast are not fragmented)
- */
- if (bIsMulticast) {
- frag_size = MAX_FRAG_THRESHOLD;
- qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
- } else {
- frag_size = ieee->fts;
- qos_ctl = 0;
+ /* Determine fragmentation size based on destination (multicast
+ * and broadcast are not fragmented)
+ */
+ if (bIsMulticast) {
+ frag_size = MAX_FRAG_THRESHOLD;
+ qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
+ } else {
+ frag_size = ieee->fts;
+ qos_ctl = 0;
+ }
+
+ if (qos_activated) {
+ hdr_len = RTLLIB_3ADDR_LEN + 2;
+
+ /* in case we are a client verify acm is not set for this ac */
+ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
+ netdev_info(ieee->dev, "skb->priority = %x\n",
+ skb->priority);
+ if (wme_downgrade_ac(skb))
+ break;
+ netdev_info(ieee->dev, "converted skb->priority = %x\n",
+ skb->priority);
}
- if (qos_activated) {
- hdr_len = RTLLIB_3ADDR_LEN + 2;
-
- /* in case we are a client verify acm is not set for this ac */
- while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
- netdev_info(ieee->dev, "skb->priority = %x\n",
- skb->priority);
- if (wme_downgrade_ac(skb))
- break;
- netdev_info(ieee->dev, "converted skb->priority = %x\n",
- skb->priority);
- }
+ qos_ctl |= skb->priority;
+ header.qos_ctrl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
+
+ } else {
+ hdr_len = RTLLIB_3ADDR_LEN;
+ }
+ /* Determine amount of payload per fragment. Regardless of if
+ * this stack is providing the full 802.11 header, one will
+ * eventually be affixed to this fragment -- so we must account
+ * for it when determining the amount of payload space.
+ */
+ bytes_per_frag = frag_size - hdr_len;
+ if (ieee->config &
+ (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
+ bytes_per_frag -= RTLLIB_FCS_LEN;
+
+ /* Each fragment may need to have room for encrypting
+ * pre/postfix
+ */
+ if (encrypt) {
+ bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_mpdu_postfix_len +
+ crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
+ }
+ /* Number of fragments is the total bytes_per_frag /
+ * payload_per_fragment
+ */
+ nr_frags = bytes / bytes_per_frag;
+ bytes_last_frag = bytes % bytes_per_frag;
+ if (bytes_last_frag)
+ nr_frags++;
+ else
+ bytes_last_frag = bytes_per_frag;
- qos_ctl |= skb->priority;
- header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
+ /* When we allocate the TXB we allocate enough space for the
+ * reserve and full fragment bytes (bytes_per_frag doesn't
+ * include prefix, postfix, header, FCS, etc.)
+ */
+ txb = rtllib_alloc_txb(nr_frags, frag_size +
+ ieee->tx_headroom, GFP_ATOMIC);
+ if (unlikely(!txb)) {
+ netdev_warn(ieee->dev, "Could not allocate TXB\n");
+ goto failed;
+ }
+ txb->encrypted = encrypt;
+ txb->payload_size = cpu_to_le16(bytes);
+
+ if (qos_activated)
+ txb->queue_index = UP2AC(skb->priority);
+ else
+ txb->queue_index = WME_AC_BE;
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag = txb->fragments[i];
+ tcb_desc = (struct cb_desc *)(skb_frag->cb +
+ MAX_DEV_ADDR_SIZE);
+ if (qos_activated) {
+ skb_frag->priority = skb->priority;
+ tcb_desc->queue_index = UP2AC(skb->priority);
} else {
- hdr_len = RTLLIB_3ADDR_LEN;
+ skb_frag->priority = WME_AC_BE;
+ tcb_desc->queue_index = WME_AC_BE;
}
- /* Determine amount of payload per fragment. Regardless of if
- * this stack is providing the full 802.11 header, one will
- * eventually be affixed to this fragment -- so we must account
- * for it when determining the amount of payload space.
- */
- bytes_per_frag = frag_size - hdr_len;
- if (ieee->config &
- (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
- bytes_per_frag -= RTLLIB_FCS_LEN;
+ skb_reserve(skb_frag, ieee->tx_headroom);
- /* Each fragment may need to have room for encrypting
- * pre/postfix
- */
if (encrypt) {
- bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
- crypt->ops->extra_mpdu_postfix_len +
- crypt->ops->extra_msdu_prefix_len +
- crypt->ops->extra_msdu_postfix_len;
+ if (ieee->hwsec_active)
+ tcb_desc->bHwSec = 1;
+ else
+ tcb_desc->bHwSec = 0;
+ skb_reserve(skb_frag,
+ crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_msdu_prefix_len);
+ } else {
+ tcb_desc->bHwSec = 0;
}
- /* Number of fragments is the total bytes_per_frag /
- * payload_per_fragment
- */
- nr_frags = bytes / bytes_per_frag;
- bytes_last_frag = bytes % bytes_per_frag;
- if (bytes_last_frag)
- nr_frags++;
- else
- bytes_last_frag = bytes_per_frag;
+ frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
- /* When we allocate the TXB we allocate enough space for the
- * reserve and full fragment bytes (bytes_per_frag doesn't
- * include prefix, postfix, header, FCS, etc.)
+ /* If this is not the last fragment, then add the
+ * MOREFRAGS bit to the frame control
*/
- txb = rtllib_alloc_txb(nr_frags, frag_size +
- ieee->tx_headroom, GFP_ATOMIC);
- if (unlikely(!txb)) {
- netdev_warn(ieee->dev, "Could not allocate TXB\n");
- goto failed;
- }
- txb->encrypted = encrypt;
- txb->payload_size = cpu_to_le16(bytes);
+ if (i != nr_frags - 1) {
+ frag_hdr->frame_control = cpu_to_le16(fc |
+ IEEE80211_FCTL_MOREFRAGS);
+ bytes = bytes_per_frag;
- if (qos_activated)
- txb->queue_index = UP2AC(skb->priority);
- else
- txb->queue_index = WME_AC_BE;
-
- for (i = 0; i < nr_frags; i++) {
- skb_frag = txb->fragments[i];
- tcb_desc = (struct cb_desc *)(skb_frag->cb +
- MAX_DEV_ADDR_SIZE);
- if (qos_activated) {
- skb_frag->priority = skb->priority;
- tcb_desc->queue_index = UP2AC(skb->priority);
- } else {
- skb_frag->priority = WME_AC_BE;
- tcb_desc->queue_index = WME_AC_BE;
- }
- skb_reserve(skb_frag, ieee->tx_headroom);
-
- if (encrypt) {
- if (ieee->hwsec_active)
- tcb_desc->bHwSec = 1;
- else
- tcb_desc->bHwSec = 0;
- skb_reserve(skb_frag,
- crypt->ops->extra_mpdu_prefix_len +
- crypt->ops->extra_msdu_prefix_len);
- } else {
- tcb_desc->bHwSec = 0;
- }
- frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
-
- /* If this is not the last fragment, then add the
- * MOREFRAGS bit to the frame control
- */
- if (i != nr_frags - 1) {
- frag_hdr->frame_ctl = cpu_to_le16(fc |
- RTLLIB_FCTL_MOREFRAGS);
- bytes = bytes_per_frag;
-
- } else {
- /* The last fragment has the remaining length */
- bytes = bytes_last_frag;
- }
- if ((qos_activated) && (!bIsMulticast)) {
- frag_hdr->seq_ctl =
- cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
- header.addr1));
- frag_hdr->seq_ctl =
- cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl) << 4 | i);
- } else {
- frag_hdr->seq_ctl =
- cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
- }
- /* Put a SNAP header on the first fragment */
- if (i == 0) {
- rtllib_put_snap(skb_put(skb_frag,
- SNAP_SIZE +
- sizeof(u16)), ether_type);
- bytes -= SNAP_SIZE + sizeof(u16);
- }
-
- skb_put_data(skb_frag, skb->data, bytes);
-
- /* Advance the SKB... */
- skb_pull(skb, bytes);
-
- /* Encryption routine will move the header forward in
- * order to insert the IV between the header and the
- * payload
- */
- if (encrypt)
- rtllib_encrypt_fragment(ieee, skb_frag,
- hdr_len);
- if (ieee->config &
- (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
- skb_put(skb_frag, 4);
+ } else {
+ /* The last fragment has the remaining length */
+ bytes = bytes_last_frag;
}
-
if ((qos_activated) && (!bIsMulticast)) {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ frag_hdr->seq_ctrl =
+ cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
+ header.addr1));
+ frag_hdr->seq_ctrl =
+ cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctrl) << 4 | i);
} else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
+ frag_hdr->seq_ctrl =
+ cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
}
- } else {
- if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
- netdev_warn(ieee->dev, "skb too small (%d).\n",
- skb->len);
- goto success;
+ /* Put a SNAP header on the first fragment */
+ if (i == 0) {
+ rtllib_put_snap(skb_put(skb_frag,
+ SNAP_SIZE +
+ sizeof(u16)), ether_type);
+ bytes -= SNAP_SIZE + sizeof(u16);
}
- txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
- if (!txb) {
- netdev_warn(ieee->dev, "Could not allocate TXB\n");
- goto failed;
- }
+ skb_put_data(skb_frag, skb->data, bytes);
- txb->encrypted = 0;
- txb->payload_size = cpu_to_le16(skb->len);
- skb_put_data(txb->fragments[0], skb->data, skb->len);
+ /* Advance the SKB... */
+ skb_pull(skb, bytes);
+
+ /* Encryption routine will move the header forward in
+ * order to insert the IV between the header and the
+ * payload
+ */
+ if (encrypt)
+ rtllib_encrypt_fragment(ieee, skb_frag,
+ hdr_len);
+ if (ieee->config &
+ (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
+ skb_put(skb_frag, 4);
+ }
+
+ if ((qos_activated) && (!bIsMulticast)) {
+ if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
+ else
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ } else {
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
}
success:
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index e9469bfef3dd..a37250de7ba3 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -29,7 +29,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
struct iw_request_info *info)
{
char custom[MAX_CUSTOM_LEN];
- char proto_name[IFNAMSIZ];
+ char proto_name[6];
char *pname = proto_name;
char *p;
struct iw_event iwe;
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
deleted file mode 100644
index f3b112a058ca..000000000000
--- a/drivers/staging/rtl8192u/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config RTL8192U
- tristate "RealTek RTL8192U Wireless LAN NIC driver"
- depends on PCI && WLAN && USB
- depends on m
- select WIRELESS_EXT
- select WEXT_PRIV
- select CRC32
- select CRYPTO
- select CRYPTO_AES
- select CRYPTO_CCM
- select CRYPTO_LIB_ARC4
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
deleted file mode 100644
index d32dfd89a606..000000000000
--- a/drivers/staging/rtl8192u/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-NIC_SELECT = RTL8192U
-
-ccflags-y += -DCONFIG_FORCE_HARD_FLOAT=y
-ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
-ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
-
-r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
- r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
- r819xU_cmdpkt.o r8192U_dm.o r819xU_firmware_img.o \
- r8192U_debugfs.o \
- ieee80211/ieee80211_crypt.o \
- ieee80211/ieee80211_crypt_tkip.o \
- ieee80211/ieee80211_crypt_ccmp.o \
- ieee80211/ieee80211_crypt_wep.o \
- ieee80211/ieee80211_rx.o \
- ieee80211/ieee80211_softmac.o \
- ieee80211/ieee80211_tx.o \
- ieee80211/ieee80211_wx.o \
- ieee80211/ieee80211_module.o \
- ieee80211/ieee80211_softmac_wx.o \
- ieee80211/rtl819x_HTProc.o \
- ieee80211/rtl819x_TSProc.o \
- ieee80211/rtl819x_BAProc.o \
- ieee80211/dot11d.o
-
-obj-$(CONFIG_RTL8192U) += r8192u_usb.o
diff --git a/drivers/staging/rtl8192u/TODO b/drivers/staging/rtl8192u/TODO
deleted file mode 100644
index ab9d5d145b3b..000000000000
--- a/drivers/staging/rtl8192u/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-To-do list:
-
-* Correct the coding style according to Linux guidelines; please read the document
- at https://www.kernel.org/doc/html/latest/process/coding-style.html.
-* Remove unnecessary debugging/printing macros; for those that are still needed
- use the proper kernel API (pr_debug(), dev_dbg(), netdev_dbg()).
-* Remove dead code such as unusued functions, variables, fields, etc..
-* Use in-kernel API and remove unnecessary wrappers where possible.
-* Fix bugs due to code that sleeps in atomic context.
-* Remove the HAL layer and migrate its functionality into the relevant parts of
- the driver.
-* Switch to use LIB80211.
-* Switch to use MAC80211.
-* Switch to use CFG80211.
-* Improve the error handling of various functions, particularly those that use
- existing kernel APIs.
diff --git a/drivers/staging/rtl8192u/authors b/drivers/staging/rtl8192u/authors
deleted file mode 100644
index 0fab11228b48..000000000000
--- a/drivers/staging/rtl8192u/authors
+++ /dev/null
@@ -1 +0,0 @@
-Andrea Merello <andrea.merello@gmail.com>
diff --git a/drivers/staging/rtl8192u/changes b/drivers/staging/rtl8192u/changes
deleted file mode 100644
index 0485d6eec7b5..000000000000
--- a/drivers/staging/rtl8192u/changes
+++ /dev/null
@@ -1,4 +0,0 @@
-v 0.1
-
-First version.
-This is based on the rtl8180-sa2400 pre-0.22-CVS code..
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
deleted file mode 100644
index ddaf66fa0f93..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Implement 802.11d. */
-
-#include "dot11d.h"
-
-void rtl8192u_dot11d_init(struct ieee80211_device *ieee)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
-
- dot11d_info->dot11d_enabled = false;
-
- dot11d_info->state = DOT11D_STATE_NONE;
- dot11d_info->country_ie_len = 0;
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- RESET_CIE_WATCHDOG(ieee);
-}
-EXPORT_SYMBOL(rtl8192u_dot11d_init);
-
-/* Reset to the state as we are just entering a regulatory domain. */
-void dot11d_reset(struct ieee80211_device *ieee)
-{
- u32 i;
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
- /* Clear old channel map */
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- /* Set new channel map */
- for (i = 1; i <= 11; i++)
- (dot11d_info->channel_map)[i] = 1;
-
- for (i = 12; i <= 14; i++)
- (dot11d_info->channel_map)[i] = 2;
-
- dot11d_info->state = DOT11D_STATE_NONE;
- dot11d_info->country_ie_len = 0;
- RESET_CIE_WATCHDOG(ieee);
-}
-EXPORT_SYMBOL(dot11d_reset);
-
-/*
- * Update country IE from Beacon or Probe Resopnse and configure PHY for
- * operation in the regulatory domain.
- *
- * TODO: Configure Tx power.
- * Assumption:
- * 1. IS_DOT11D_ENABLE() is TRUE.
- * 2. Input IE is an valid one.
- */
-void dot11d_update_country_ie(struct ieee80211_device *dev, u8 *pTaddr,
- u16 CoutryIeLen, u8 *pCoutryIe)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
- u8 i, j, NumTriples, MaxChnlNum;
- struct chnl_txpower_triple *pTriple;
-
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
- pTriple = (struct chnl_txpower_triple *)(pCoutryIe + 3);
- for (i = 0; i < NumTriples; i++) {
- if (MaxChnlNum >= pTriple->first_channel) {
- /* It is not in a monotonically increasing order, so
- * stop processing.
- */
- netdev_err(dev->dev, "%s: Invalid country IE, skip it 1\n", __func__);
- return;
- }
- if (MAX_CHANNEL_NUMBER < (pTriple->first_channel + pTriple->num_channels)) {
- /* It is not a valid set of channel id, so stop
- * processing.
- */
- netdev_err(dev->dev, "%s: Invalid country IE, skip it 2\n", __func__);
- return;
- }
-
- for (j = 0; j < pTriple->num_channels; j++) {
- dot11d_info->channel_map[pTriple->first_channel + j] = 1;
- dot11d_info->max_tx_pwr_dbm_list[pTriple->first_channel + j] = pTriple->max_tx_pwr_dbm;
- MaxChnlNum = pTriple->first_channel + j;
- }
-
- pTriple = (struct chnl_txpower_triple *)((u8 *)pTriple + 3);
- }
- netdev_info(dev->dev, "Channel List:");
- for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
- if (dot11d_info->channel_map[i] > 0)
- netdev_info(dev->dev, " %d", i);
- netdev_info(dev->dev, "\n");
-
- UPDATE_CIE_SRC(dev, pTaddr);
-
- dot11d_info->country_ie_len = CoutryIeLen;
- memcpy(dot11d_info->country_ie_buf, pCoutryIe, CoutryIeLen);
- dot11d_info->state = DOT11D_STATE_LEARNED;
-}
-EXPORT_SYMBOL(dot11d_update_country_ie);
-
-u8 dot11d_get_max_tx_pwr_in_dbm(struct ieee80211_device *dev, u8 Channel)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
- u8 MaxTxPwrInDbm = 255;
-
- if (Channel > MAX_CHANNEL_NUMBER) {
- netdev_err(dev->dev, "%s: Invalid Channel\n", __func__);
- return MaxTxPwrInDbm;
- }
- if (dot11d_info->channel_map[Channel])
- MaxTxPwrInDbm = dot11d_info->max_tx_pwr_dbm_list[Channel];
-
- return MaxTxPwrInDbm;
-}
-EXPORT_SYMBOL(dot11d_get_max_tx_pwr_in_dbm);
-
-void dot11d_scan_complete(struct ieee80211_device *dev)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
-
- switch (dot11d_info->state) {
- case DOT11D_STATE_LEARNED:
- dot11d_info->state = DOT11D_STATE_DONE;
- break;
-
- case DOT11D_STATE_DONE:
- if (GET_CIE_WATCHDOG(dev) == 0) {
- /* Reset country IE if previous one is gone. */
- dot11d_reset(dev);
- }
- break;
- case DOT11D_STATE_NONE:
- break;
- }
-}
-EXPORT_SYMBOL(dot11d_scan_complete);
-
-int is_legal_channel(struct ieee80211_device *dev, u8 channel)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
-
- if (channel > MAX_CHANNEL_NUMBER) {
- netdev_err(dev->dev, "%s: Invalid Channel\n", __func__);
- return 0;
- }
- if (dot11d_info->channel_map[channel] > 0)
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(is_legal_channel);
-
-int to_legal_channel(struct ieee80211_device *dev, u8 channel)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
- u8 default_chn = 0;
- u32 i = 0;
-
- for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
- if (dot11d_info->channel_map[i] > 0) {
- default_chn = i;
- break;
- }
- }
-
- if (channel > MAX_CHANNEL_NUMBER) {
- netdev_err(dev->dev, "%s: Invalid Channel\n", __func__);
- return default_chn;
- }
-
- if (dot11d_info->channel_map[channel] > 0)
- return channel;
-
- return default_chn;
-}
-EXPORT_SYMBOL(to_legal_channel);
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
deleted file mode 100644
index 8b485fa18089..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INC_DOT11D_H
-#define __INC_DOT11D_H
-
-#include "ieee80211.h"
-
-struct chnl_txpower_triple {
- u8 first_channel;
- u8 num_channels;
- u8 max_tx_pwr_dbm;
-};
-
-enum dot11d_state {
- DOT11D_STATE_NONE = 0,
- DOT11D_STATE_LEARNED,
- DOT11D_STATE_DONE,
-};
-
-struct rt_dot11d_info {
- u16 country_ie_len; /* > 0 if country_ie_buf[] contains valid country information element. */
-
- /* country_ie_src_addr u16 aligned for comparison and copy */
- u8 country_ie_src_addr[ETH_ALEN]; /* Source AP of the country IE. */
- u8 country_ie_buf[MAX_IE_LEN];
- u8 country_ie_watchdog;
-
- u8 channel_map[MAX_CHANNEL_NUMBER + 1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
- u8 max_tx_pwr_dbm_list[MAX_CHANNEL_NUMBER + 1];
-
- enum dot11d_state state;
- u8 dot11d_enabled; /* dot11MultiDomainCapabilityEnabled */
-};
-
-#define GET_DOT11D_INFO(ieee_dev) ((struct rt_dot11d_info *)((ieee_dev)->dot11d_info))
-
-#define IS_DOT11D_ENABLE(ieee_dev) (GET_DOT11D_INFO(ieee_dev)->dot11d_enabled)
-#define IS_COUNTRY_IE_VALID(ieee_dev) (GET_DOT11D_INFO(ieee_dev)->country_ie_len > 0)
-
-#define IS_EQUAL_CIE_SRC(ieee_dev, addr) ether_addr_equal(GET_DOT11D_INFO(ieee_dev)->country_ie_src_addr, addr)
-#define UPDATE_CIE_SRC(ieee_dev, addr) ether_addr_copy(GET_DOT11D_INFO(ieee_dev)->country_ie_src_addr, addr)
-
-#define GET_CIE_WATCHDOG(ieee_dev) (GET_DOT11D_INFO(ieee_dev)->country_ie_watchdog)
-#define RESET_CIE_WATCHDOG(ieee_dev) (GET_CIE_WATCHDOG(ieee_dev) = 0)
-#define UPDATE_CIE_WATCHDOG(ieee_dev) (++GET_CIE_WATCHDOG(ieee_dev))
-
-void rtl8192u_dot11d_init(struct ieee80211_device *dev);
-void dot11d_reset(struct ieee80211_device *dev);
-void dot11d_update_country_ie(struct ieee80211_device *dev,
- u8 *addr,
- u16 coutry_ie_len,
- u8 *coutry_ie);
-u8 dot11d_get_max_tx_pwr_in_dbm(struct ieee80211_device *dev, u8 channel);
-void dot11d_scan_complete(struct ieee80211_device *dev);
-int is_legal_channel(struct ieee80211_device *dev, u8 channel);
-int to_legal_channel(struct ieee80211_device *dev, u8 channel);
-
-#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
deleted file mode 100644
index 694d1b18f81c..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ /dev/null
@@ -1,2385 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
- * remains copyright by the original authors
- *
- * Portions of the merged code are based on Host AP (software wireless
- * LAN access point) driver for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- * Copyright (c) 2004, Intel Corporation
- *
- * Modified for Realtek's wi-fi cards by Andrea Merello
- * <andrea.merello@gmail.com>
- */
-#ifndef IEEE80211_H
-#define IEEE80211_H
-#include <linux/if_ether.h> /* ETH_ALEN */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/semaphore.h>
-#include <linux/interrupt.h>
-
-#include <linux/delay.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-
-#include "rtl819x_HT.h"
-#include "rtl819x_BA.h"
-#include "rtl819x_TS.h"
-
-
-#ifndef IW_MODE_MONITOR
-#define IW_MODE_MONITOR 6
-#endif
-
-#ifndef IWEVCUSTOM
-#define IWEVCUSTOM 0x8c02
-#endif
-
-#define KEY_TYPE_NA 0x0
-#define KEY_TYPE_WEP40 0x1
-#define KEY_TYPE_TKIP 0x2
-#define KEY_TYPE_CCMP 0x4
-#define KEY_TYPE_WEP104 0x5
-
-/* added for rtl819x tx procedure */
-#define MAX_QUEUE_SIZE 0x10
-
-//
-// 8190 queue mapping
-//
-#define BK_QUEUE 0
-#define BE_QUEUE 1
-#define VI_QUEUE 2
-#define VO_QUEUE 3
-#define HCCA_QUEUE 4
-#define TXCMD_QUEUE 5
-#define MGNT_QUEUE 6
-#define HIGH_QUEUE 7
-#define BEACON_QUEUE 8
-
-#define LOW_QUEUE BE_QUEUE
-#define NORMAL_QUEUE MGNT_QUEUE
-
-//added by amy for ps
-#define SWRF_TIMEOUT 50
-
-//added by amy for LEAP related
-#define IE_CISCO_FLAG_POSITION 0x08 // Flag byte: byte 8, numbered from 0.
-#define SUPPORT_CKIP_MIC 0x08 // bit3
-#define SUPPORT_CKIP_PK 0x10 // bit4
-/* defined for skb cb field */
-/* At most 28 byte */
-struct cb_desc {
- /* Tx Desc Related flags (8-9) */
- u8 bLastIniPkt:1;
- u8 bCmdOrInit:1;
- u8 bFirstSeg:1;
- u8 bLastSeg:1;
- u8 bEncrypt:1;
- u8 bTxDisableRateFallBack:1;
- u8 bTxUseDriverAssingedRate:1;
- u8 bHwSec:1; //indicate whether use Hw security. WB
-
- u8 reserved1;
-
- /* Tx Firmware Relaged flags (10-11)*/
- u8 bCTSEnable:1;
- u8 bRTSEnable:1;
- u8 bUseShortGI:1;
- u8 bUseShortPreamble:1;
- u8 bTxEnableFwCalcDur:1;
- u8 bAMPDUEnable:1;
- u8 bRTSSTBC:1;
- u8 RTSSC:1;
-
- u8 bRTSBW:1;
- u8 bPacketBW:1;
- u8 bRTSUseShortPreamble:1;
- u8 bRTSUseShortGI:1;
- u8 bMulticast:1;
- u8 bBroadcast:1;
- //u8 reserved2:2;
- u8 drv_agg_enable:1;
- u8 reserved2:1;
-
- /* Tx Desc related element(12-19) */
- u8 rata_index;
- u8 queue_index;
- //u8 reserved3;
- //u8 reserved4;
- u16 txbuf_size;
- //u8 reserved5;
- u8 RATRIndex;
- u8 reserved6;
- u8 reserved7;
- u8 reserved8;
-
- /* Tx firmware related element(20-27) */
- u8 data_rate;
- u8 rts_rate;
- u8 ampdu_factor;
- u8 ampdu_density;
- //u8 reserved9;
- //u8 reserved10;
- //u8 reserved11;
- u8 DrvAggrNum;
- u16 pkt_size;
- u8 reserved12;
-};
-
-/*--------------------------Define -------------------------------------------*/
-#define MGN_1M 0x02
-#define MGN_2M 0x04
-#define MGN_5_5M 0x0b
-#define MGN_11M 0x16
-
-#define MGN_6M 0x0c
-#define MGN_9M 0x12
-#define MGN_12M 0x18
-#define MGN_18M 0x24
-#define MGN_24M 0x30
-#define MGN_36M 0x48
-#define MGN_48M 0x60
-#define MGN_54M 0x6c
-
-#define MGN_MCS0 0x80
-#define MGN_MCS1 0x81
-#define MGN_MCS2 0x82
-#define MGN_MCS3 0x83
-#define MGN_MCS4 0x84
-#define MGN_MCS5 0x85
-#define MGN_MCS6 0x86
-#define MGN_MCS7 0x87
-#define MGN_MCS8 0x88
-#define MGN_MCS9 0x89
-#define MGN_MCS10 0x8a
-#define MGN_MCS11 0x8b
-#define MGN_MCS12 0x8c
-#define MGN_MCS13 0x8d
-#define MGN_MCS14 0x8e
-#define MGN_MCS15 0x8f
-
-#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A || \
- priv->ieee80211->current_network.mode == IEEE_N_24G || \
- priv->ieee80211->current_network.mode == IEEE_N_5G) ? \
- 16 : 10)
-
-#define MGMT_QUEUE_NUM 5
-
-#define IEEE_CMD_SET_WPA_PARAM 1
-#define IEEE_CMD_SET_WPA_IE 2
-#define IEEE_CMD_SET_ENCRYPTION 3
-#define IEEE_CMD_MLME 4
-
-#define IEEE_PARAM_WPA_ENABLED 1
-#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
-#define IEEE_PARAM_DROP_UNENCRYPTED 3
-#define IEEE_PARAM_PRIVACY_INVOKED 4
-#define IEEE_PARAM_AUTH_ALGS 5
-#define IEEE_PARAM_IEEE_802_1X 6
-//It should consistent with the driver_XXX.c
-// David, 2006.9.26
-#define IEEE_PARAM_WPAX_SELECT 7
-//Added for notify the encryption type selection
-// David, 2006.9.26
-#define IEEE_PROTO_WPA 1
-#define IEEE_PROTO_RSN 2
-//Added for notify the encryption type selection
-// David, 2006.9.26
-#define IEEE_WPAX_USEGROUP 0
-#define IEEE_WPAX_WEP40 1
-#define IEEE_WPAX_TKIP 2
-#define IEEE_WPAX_WRAP 3
-#define IEEE_WPAX_CCMP 4
-#define IEEE_WPAX_WEP104 5
-
-#define IEEE_KEY_MGMT_IEEE8021X 1
-#define IEEE_KEY_MGMT_PSK 2
-
-#define IEEE_MLME_STA_DEAUTH 1
-#define IEEE_MLME_STA_DISASSOC 2
-
-
-#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
-#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
-#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
-#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
-
-
-#define IEEE_CRYPT_ALG_NAME_LEN 16
-
-#define MAX_IE_LEN 0xff
-
-// added for kernel conflict
-#define ieee80211_wake_queue ieee80211_wake_queue_rsl
-#define ieee80211_stop_queue ieee80211_stop_queue_rsl
-#define notify_wx_assoc_event notify_wx_assoc_event_rsl
-#define SendDisassociation SendDisassociation_rsl
-
-
-struct ieee_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 name;
- u32 value;
- } wpa_param;
- struct {
- u32 len;
- u8 reserved[32];
- u8 data[];
- } wpa_ie;
- struct{
- int command;
- int reason_code;
- } mlme;
- struct {
- u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
- u8 set_tx;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[];
- } crypt;
- } u;
-};
-
-
-// linux under 2.6.9 release may not support it, so modify it for common use
-#define IEEE80211_DATA_LEN 2304
-/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- * 6.2.1.1.2.
- *
- * The figure in section 7.1.2 suggests a body size of up to 2312
- * bytes is allowed, which is a bit confusing, I suspect this
- * represents the 2304 bytes of real data, plus a possible 8 bytes of
- * WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro)
- */
-#define IEEE80211_1ADDR_LEN 10
-#define IEEE80211_2ADDR_LEN 16
-#define IEEE80211_3ADDR_LEN 24
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_FCS_LEN 4
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-#define IEEE80211_MGMT_HDR_LEN 24
-#define IEEE80211_DATA_HDR3_LEN 24
-#define IEEE80211_DATA_HDR4_LEN 30
-
-#define MIN_FRAG_THRESHOLD 256U
-#define MAX_FRAG_THRESHOLD 2346U
-
-
-/* Frame control field constants */
-#define IEEE80211_FCTL_VERS 0x0003
-#define IEEE80211_FCTL_FTYPE 0x000c
-#define IEEE80211_FCTL_STYPE 0x00f0
-#define IEEE80211_FCTL_FRAMETYPE 0x00fc
-#define IEEE80211_FCTL_TODS 0x0100
-#define IEEE80211_FCTL_FROMDS 0x0200
-#define IEEE80211_FCTL_DSTODS 0x0300 //added by david
-#define IEEE80211_FCTL_MOREFRAGS 0x0400
-#define IEEE80211_FCTL_RETRY 0x0800
-#define IEEE80211_FCTL_PM 0x1000
-#define IEEE80211_FCTL_MOREDATA 0x2000
-#define IEEE80211_FCTL_WEP 0x4000
-#define IEEE80211_FCTL_ORDER 0x8000
-
-#define IEEE80211_FTYPE_MGMT 0x0000
-#define IEEE80211_FTYPE_CTL 0x0004
-#define IEEE80211_FTYPE_DATA 0x0008
-
-/* management */
-#define IEEE80211_STYPE_ASSOC_REQ 0x0000
-#define IEEE80211_STYPE_ASSOC_RESP 0x0010
-#define IEEE80211_STYPE_REASSOC_REQ 0x0020
-#define IEEE80211_STYPE_REASSOC_RESP 0x0030
-#define IEEE80211_STYPE_PROBE_REQ 0x0040
-#define IEEE80211_STYPE_PROBE_RESP 0x0050
-#define IEEE80211_STYPE_BEACON 0x0080
-#define IEEE80211_STYPE_ATIM 0x0090
-#define IEEE80211_STYPE_DISASSOC 0x00A0
-#define IEEE80211_STYPE_AUTH 0x00B0
-#define IEEE80211_STYPE_DEAUTH 0x00C0
-#define IEEE80211_STYPE_MANAGE_ACT 0x00D0
-
-/* control */
-#define IEEE80211_STYPE_PSPOLL 0x00A0
-#define IEEE80211_STYPE_RTS 0x00B0
-#define IEEE80211_STYPE_CTS 0x00C0
-#define IEEE80211_STYPE_ACK 0x00D0
-#define IEEE80211_STYPE_CFEND 0x00E0
-#define IEEE80211_STYPE_CFENDACK 0x00F0
-#define IEEE80211_STYPE_BLOCKACK 0x0094
-
-/* data */
-#define IEEE80211_STYPE_DATA 0x0000
-#define IEEE80211_STYPE_DATA_CFACK 0x0010
-#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
-#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
-#define IEEE80211_STYPE_NULLFUNC 0x0040
-#define IEEE80211_STYPE_CFACK 0x0050
-#define IEEE80211_STYPE_CFPOLL 0x0060
-#define IEEE80211_STYPE_CFACKPOLL 0x0070
-#define IEEE80211_STYPE_QOS_DATA 0x0080 //added for WMM 2006/8/2
-#define IEEE80211_STYPE_QOS_NULL 0x00C0
-
-#define IEEE80211_SCTL_FRAG 0x000F
-#define IEEE80211_SCTL_SEQ 0xFFF0
-
-/* QOS control */
-#define IEEE80211_QCTL_TID 0x000F
-
-#define FC_QOS_BIT BIT(7)
-#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0] & FC_QOS_BIT)))
-//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
-#define SN_EQUAL(a, b) (a == b)
-#define MAX_DEV_ADDR_SIZE 8
-typedef enum _ACT_CATEGORY {
- ACT_CAT_QOS = 1,
- ACT_CAT_DLS = 2,
- ACT_CAT_BA = 3,
- ACT_CAT_HT = 7,
- ACT_CAT_WMM = 17,
-} ACT_CATEGORY, *PACT_CATEGORY;
-
-typedef enum _TS_ACTION {
- ACT_ADDTSREQ = 0,
- ACT_ADDTSRSP = 1,
- ACT_DELTS = 2,
- ACT_SCHEDULE = 3,
-} TS_ACTION, *PTS_ACTION;
-
-typedef enum _BA_ACTION {
- ACT_ADDBAREQ = 0,
- ACT_ADDBARSP = 1,
- ACT_DELBA = 2,
-} BA_ACTION, *PBA_ACTION;
-
-typedef enum _InitialGainOpType {
- IG_Backup = 0,
- IG_Restore,
- IG_Max
-} InitialGainOpType;
-
-/* debug macros */
-#define CONFIG_IEEE80211_DEBUG
-#ifdef CONFIG_IEEE80211_DEBUG
-extern u32 ieee80211_debug_level;
-#define IEEE80211_DEBUG(level, fmt, args...) \
-do { if (ieee80211_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
-//wb added to debug out data buf
-//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
-#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do { if ((ieee80211_debug_level & (level)) == (level)) \
- { \
- int i; \
- u8 *pdata = (u8 *)data; \
- printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
- for (i = 0; i < (int)(datalen); i++) { \
- printk("%2x ", pdata[i]); \
- if ((i + 1) % 16 == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
- } while (0)
-#else
-#define IEEE80211_DEBUG(level, fmt, args...)
-#define IEEE80211_DEBUG_DATA(level, data, datalen)
-#endif /* CONFIG_IEEE80211_DEBUG */
-
-/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
-
-/*
- * To use the debug system;
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of:
- *
- * #define IEEE80211_DL_xxxx VALUE
- *
- * shifting value to the left one bit from the previous entry. xxxx should be
- * the name of the classification (for example, WEP)
- *
- * You then need to either add a IEEE80211_xxxx_DEBUG() macro definition for your
- * classification, or use IEEE80211_DEBUG(IEEE80211_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * To add your debug level to the list of levels seen when you perform
- *
- * % cat /proc/net/ipw/debug_level
- *
- * you simply need to add your entry to the ipw_debug_levels array.
- *
- * If you do not see debug_level in /proc/net/ipw then you do not have
- * CONFIG_IEEE80211_DEBUG defined in your kernel configuration
- *
- */
-
-#define IEEE80211_DL_INFO (1<<0)
-#define IEEE80211_DL_WX (1<<1)
-#define IEEE80211_DL_SCAN (1<<2)
-#define IEEE80211_DL_STATE (1<<3)
-#define IEEE80211_DL_MGMT (1<<4)
-#define IEEE80211_DL_FRAG (1<<5)
-#define IEEE80211_DL_EAP (1<<6)
-#define IEEE80211_DL_DROP (1<<7)
-
-#define IEEE80211_DL_TX (1<<8)
-#define IEEE80211_DL_RX (1<<9)
-
-#define IEEE80211_DL_HT (1<<10) //HT
-#define IEEE80211_DL_BA (1<<11) //ba
-#define IEEE80211_DL_TS (1<<12) //TS
-#define IEEE80211_DL_QOS (1<<13)
-#define IEEE80211_DL_REORDER (1<<14)
-#define IEEE80211_DL_IOT (1<<15)
-#define IEEE80211_DL_IPS (1<<16)
-#define IEEE80211_DL_TRACE (1<<29) //trace function, need to user net_ratelimit() together in order not to print too much to the screen
-#define IEEE80211_DL_DATA (1<<30) //use this flag to control whether print data buf out.
-#define IEEE80211_DL_ERR (1<<31) //always open
-#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
-#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a)
-
-#define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a)
-#define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a)
-#define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a)
-#define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a)
-#define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a)
-#define IEEE80211_DEBUG_EAP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_EAP, f, ## a)
-#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
-#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
-#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
-#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a)
-
-#include <linux/if_arp.h> /* ARPHRD_ETHER */
-
-#ifndef WIRELESS_SPY
-#define WIRELESS_SPY // enable iwspy support
-#endif
-#include <net/iw_handler.h> // new driver API
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
-
-#ifndef ETH_P_80211_RAW
-#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
-#endif
-
-/* IEEE 802.11 defines */
-
-#define P80211_OUI_LEN 3
-
-struct ieee80211_snap_hdr {
-
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-
-} __packed;
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
-
-#define WLAN_FC_GET_FRAMETYPE(fc) ((fc) & IEEE80211_FCTL_FRAMETYPE)
-#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
-#define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
-#define WLAN_CAPABILITY_QOS (1<<9)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
-
-/* 802.11g ERP information element */
-#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
-#define WLAN_ERP_USE_PROTECTION (1<<1)
-#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
-#define IEEE80211_STATMASK_WEMASK 0x7
-
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
-
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
-
-#define IEEE80211_CCK_RATE_LEN 4
-#define IEEE80211_CCK_RATE_1MB 0x02
-#define IEEE80211_CCK_RATE_2MB 0x04
-#define IEEE80211_CCK_RATE_5MB 0x0B
-#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
-#define IEEE80211_OFDM_RATE_6MB 0x0C
-#define IEEE80211_OFDM_RATE_9MB 0x12
-#define IEEE80211_OFDM_RATE_12MB 0x18
-#define IEEE80211_OFDM_RATE_18MB 0x24
-#define IEEE80211_OFDM_RATE_24MB 0x30
-#define IEEE80211_OFDM_RATE_36MB 0x48
-#define IEEE80211_OFDM_RATE_48MB 0x60
-#define IEEE80211_OFDM_RATE_54MB 0x6C
-#define IEEE80211_BASIC_RATE_MASK 0x80
-
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
-
-#define IEEE80211_CCK_RATES_MASK 0x0000000F
-#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
- IEEE80211_CCK_RATE_2MB_MASK)
-#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
- IEEE80211_CCK_RATE_5MB_MASK | \
- IEEE80211_CCK_RATE_11MB_MASK)
-
-#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
-#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
- IEEE80211_OFDM_RATE_12MB_MASK | \
- IEEE80211_OFDM_RATE_24MB_MASK)
-#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
- IEEE80211_OFDM_RATE_9MB_MASK | \
- IEEE80211_OFDM_RATE_18MB_MASK | \
- IEEE80211_OFDM_RATE_36MB_MASK | \
- IEEE80211_OFDM_RATE_48MB_MASK | \
- IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
- IEEE80211_CCK_DEFAULT_RATES_MASK)
-
-#define IEEE80211_NUM_OFDM_RATES 8
-#define IEEE80211_NUM_CCK_RATES 4
-#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-
-/* this is stolen and modified from the madwifi driver*/
-#define IEEE80211_FC0_TYPE_MASK 0x0c
-#define IEEE80211_FC0_TYPE_DATA 0x08
-#define IEEE80211_FC0_SUBTYPE_MASK 0xB0
-#define IEEE80211_FC0_SUBTYPE_QOS 0x80
-
-#define IEEE80211_QOS_HAS_SEQ(fc) \
- (((fc) & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == \
- (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
-
-/* this is stolen from ipw2200 driver */
-#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct ieee_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num[17];
- u16 frag_num[17];
- unsigned long packet_time[17];
- struct list_head list;
-};
-
-/* NOTE: This data is for statistical purposes; not all hardware provides this
- * information for frames received. Not setting these will not cause
- * any adverse affects.
- */
-struct ieee80211_rx_stats {
- u32 mac_time[2];
- s8 rssi;
- u8 signal;
- u8 noise;
- u16 rate; /* in 100 kbps */
- u8 received_channel;
- u8 control;
- u8 mask;
- u8 freq;
- u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
- u16 Length;
- // u8 DataRate; // In 0.5 Mbps
- u8 SignalQuality; // in 0-100 index.
- s32 RecvSignalPower; // Real power in dBm for this packet, no beautification and aggregation.
- s8 RxPower; // in dBm Translate from PWdB
- u8 SignalStrength; // in 0-100 index.
- u16 bHwError:1;
- u16 bCRC:1;
- u16 bICV:1;
- u16 bShortPreamble:1;
- u16 Antenna:1; //for rtl8185
- u16 Decrypted:1; //for rtl8185, rtl8187
- u16 Wakeup:1; //for rtl8185
- u16 Reserved0:1; //for rtl8185
- u8 AGC;
- u32 TimeStampLow;
- u32 TimeStampHigh;
- bool bShift;
- bool bIsQosData; // Added by Annie, 2005-12-22.
- u8 UserPriority;
-
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
- //1Attention Please!!!<11n or 8190 specific code should be put below this line>
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- u8 RxDrvInfoSize;
- u8 RxBufShift;
- bool bIsAMPDU;
- bool bFirstMPDU;
- bool bContainHTC;
- bool RxIs40MHzPacket;
- u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4]; // in 0~100 index
- s8 RxMIMOSignalQuality[2];
- bool bPacketMatchBSSID;
- bool bIsCCK;
- bool bPacketToSelf;
- //added by amy
- u8 *virtual_address;
- u16 packetlength; // Total packet length: Must equal to sum of all FragLength
- u16 fraglength; // FragLength should equal to PacketLength in non-fragment case
- u16 fragoffset; // Data offset for this fragment
- u16 ntotalfrag;
- bool bisrxaggrsubframe;
- bool bPacketBeacon; //cosa add for rssi
- bool bToSelfBA; //cosa add for rssi
- s8 cck_adc_pwdb[4]; //cosa add for rx path selection
- u16 Seq_Num;
-
-};
-
-/* IEEE 802.11 requires that STA supports concurrent reception of at least
- * three fragmented frames. This define can be increased to support more
- * concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly.
- */
-#define IEEE80211_FRAG_CACHE_LEN 4
-
-struct ieee80211_frag_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int last_frag;
- struct sk_buff *skb;
- u8 src_addr[ETH_ALEN];
- u8 dst_addr[ETH_ALEN];
-};
-
-struct ieee80211_stats {
- unsigned int tx_unicast_frames;
- unsigned int tx_multicast_frames;
- unsigned int tx_fragments;
- unsigned int tx_unicast_octets;
- unsigned int tx_multicast_octets;
- unsigned int tx_deferred_transmissions;
- unsigned int tx_single_retry_frames;
- unsigned int tx_multiple_retry_frames;
- unsigned int tx_retry_limit_exceeded;
- unsigned int tx_discards;
- unsigned int rx_unicast_frames;
- unsigned int rx_multicast_frames;
- unsigned int rx_fragments;
- unsigned int rx_unicast_octets;
- unsigned int rx_multicast_octets;
- unsigned int rx_fcs_errors;
- unsigned int rx_discards_no_buffer;
- unsigned int tx_discards_wrong_sa;
- unsigned int rx_discards_undecryptable;
- unsigned int rx_message_in_msg_fragments;
- unsigned int rx_message_in_bad_msg_fragments;
-};
-
-struct ieee80211_device;
-
-#include "ieee80211_crypt.h"
-
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
-#define SEC_ENCRYPT (1<<9)
-
-#define SEC_LEVEL_0 0 /* None */
-#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
-
-#define SEC_ALG_NONE 0
-#define SEC_ALG_WEP 1
-#define SEC_ALG_TKIP 2
-#define SEC_ALG_CCMP 3
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
-#define SCM_KEY_LEN 32
-#define SCM_TEMPORAL_KEY_LENGTH 16
-
-struct ieee80211_security {
- u16 active_key:2,
- enabled:1,
- auth_algo:4,
- unicast_uses_group:1,
- encrypt:1;
- u8 auth_mode;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
- u8 level;
- u16 flags;
-} __packed;
-
-
-/*
- * 802.11 data frame from AP
- * ,-------------------------------------------------------------------.
- * Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- * |------|------|---------|---------|---------|------|---------|------|
- * Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
- * | | tion | (BSSID) | | | ence | data | |
- * `-------------------------------------------------------------------'
- * Total: 28-2340 bytes
- */
-
-/* Management Frame Information Element Types */
-enum ieee80211_mfie {
- MFIE_TYPE_SSID = 0,
- MFIE_TYPE_RATES = 1,
- MFIE_TYPE_FH_SET = 2,
- MFIE_TYPE_DS_SET = 3,
- MFIE_TYPE_CF_SET = 4,
- MFIE_TYPE_TIM = 5,
- MFIE_TYPE_IBSS_SET = 6,
- MFIE_TYPE_COUNTRY = 7,
- MFIE_TYPE_HOP_PARAMS = 8,
- MFIE_TYPE_HOP_TABLE = 9,
- MFIE_TYPE_REQUEST = 10,
- MFIE_TYPE_CHALLENGE = 16,
- MFIE_TYPE_POWER_CONSTRAINT = 32,
- MFIE_TYPE_POWER_CAPABILITY = 33,
- MFIE_TYPE_TPC_REQUEST = 34,
- MFIE_TYPE_TPC_REPORT = 35,
- MFIE_TYPE_SUPP_CHANNELS = 36,
- MFIE_TYPE_CSA = 37,
- MFIE_TYPE_MEASURE_REQUEST = 38,
- MFIE_TYPE_MEASURE_REPORT = 39,
- MFIE_TYPE_QUIET = 40,
- MFIE_TYPE_IBSS_DFS = 41,
- MFIE_TYPE_ERP = 42,
- MFIE_TYPE_RSN = 48,
- MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP = 45,
- MFIE_TYPE_HT_INFO = 61,
- MFIE_TYPE_AIRONET = 133,
- MFIE_TYPE_GENERIC = 221,
- MFIE_TYPE_QOS_PARAMETER = 222,
-};
-
-/* Minimal header; can be used for passing 802.11 frames with sufficient
- * information to determine what type of underlying data type is actually
- * stored in the data.
- */
-struct rtl_80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 payload[];
-} __packed;
-
-struct rtl_80211_hdr_1addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtl_80211_hdr_2addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtl_80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[];
-} __packed;
-
-struct rtl_80211_hdr_4addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[];
-} __packed;
-
-struct rtl_80211_hdr_3addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
- __le16 qos_ctl;
-} __packed;
-
-struct rtl_80211_hdr_4addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[0];
- __le16 qos_ctl;
-} __packed;
-
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct ieee80211_authentication {
- struct rtl_80211_hdr_3addr header;
- __le16 algorithm;
- __le16 transaction;
- __le16 status;
- /*challenge*/
- struct ieee80211_info_element info_element[];
-} __packed;
-
-struct ieee80211_disassoc {
- struct rtl_80211_hdr_3addr header;
- __le16 reason;
-} __packed;
-
-struct ieee80211_probe_request {
- struct rtl_80211_hdr_3addr header;
- /* SSID, supported rates */
- struct ieee80211_info_element info_element[];
-} __packed;
-
-struct ieee80211_probe_response {
- struct rtl_80211_hdr_3addr header;
- __le32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /* SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN
- */
- struct ieee80211_info_element info_element[];
-} __packed;
-
-/* Alias beacon for probe_response */
-#define ieee80211_beacon ieee80211_probe_response
-
-struct ieee80211_assoc_request_frame {
- struct rtl_80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[];
-} __packed;
-
-struct ieee80211_reassoc_request_frame {
- struct rtl_80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- u8 current_ap[ETH_ALEN];
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[];
-} __packed;
-
-struct ieee80211_assoc_response_frame {
- struct rtl_80211_hdr_3addr header;
- __le16 capability;
- __le16 status;
- __le16 aid;
- struct ieee80211_info_element info_element[]; /* supported rates */
-} __packed;
-
-struct ieee80211_txb {
- u8 nr_frags;
- u8 encrypted;
- u8 queue_index;
- u8 rts_included;
- u16 reserved;
- __le16 frag_size;
- __le16 payload_size;
- struct sk_buff *fragments[];
-};
-
-#define MAX_TX_AGG_COUNT 16
-struct ieee80211_drv_agg_txb {
- u8 nr_drv_agg_frames;
- struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
-} __packed;
-
-#define MAX_SUBFRAME_COUNT 64
-struct ieee80211_rxb {
- u8 nr_subframes;
- struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
-} __packed;
-
-typedef union _frameqos {
- u16 shortdata;
- u8 chardata[2];
- struct {
- u16 tid:4;
- u16 eosp:1;
- u16 ack_policy:2;
- u16 reserved:1;
- u16 txop:8;
- } field;
-} frameqos, *pframeqos;
-
-/* SWEEP TABLE ENTRIES NUMBER*/
-#define MAX_SWEEP_TAB_ENTRIES 42
-#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
-/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
- * only use 8, and then use extended rates for the remaining supported
- * rates. Other APs, however, stick all of their supported rates on the
- * main rates information element...
- */
-#define MAX_RATES_LENGTH ((u8)12)
-#define MAX_RATES_EX_LENGTH ((u8)16)
-#define MAX_NETWORK_COUNT 128
-
-#define MAX_CHANNEL_NUMBER 161
-#define IEEE80211_SOFTMAC_SCAN_TIME 100
-//(HZ / 2)
-#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
-
-#define CRC_LENGTH 4U
-
-#define MAX_WPA_IE_LEN 64
-
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
-
-/* QoS structure */
-#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
-#define NETWORK_HAS_QOS_INFORMATION (1<<4)
-#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
- NETWORK_HAS_QOS_INFORMATION)
-/* 802.11h */
-#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
-#define NETWORK_HAS_CSA (1<<6)
-#define NETWORK_HAS_QUIET (1<<7)
-#define NETWORK_HAS_IBSS_DFS (1<<8)
-#define NETWORK_HAS_TPC_REPORT (1<<9)
-
-#define NETWORK_HAS_ERP_VALUE (1<<10)
-
-#define QOS_QUEUE_NUM 4
-#define QOS_OUI_LEN 3
-#define QOS_OUI_TYPE 2
-#define QOS_ELEMENT_ID 221
-#define QOS_OUI_INFO_SUB_TYPE 0
-#define QOS_OUI_PARAM_SUB_TYPE 1
-#define QOS_VERSION_1 1
-#define QOS_AIFSN_MIN_VALUE 2
-struct ieee80211_qos_information_element {
- u8 elementID;
- u8 length;
- u8 qui[QOS_OUI_LEN];
- u8 qui_type;
- u8 qui_subtype;
- u8 version;
- u8 ac_info;
-} __packed;
-
-struct ieee80211_qos_ac_parameter {
- u8 aci_aifsn;
- u8 ecw_min_max;
- __le16 tx_op_limit;
-} __packed;
-
-struct ieee80211_qos_parameter_info {
- struct ieee80211_qos_information_element info_element;
- u8 reserved;
- struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
-} __packed;
-
-struct ieee80211_qos_parameters {
- __le16 cw_min[QOS_QUEUE_NUM];
- __le16 cw_max[QOS_QUEUE_NUM];
- u8 aifs[QOS_QUEUE_NUM];
- u8 flag[QOS_QUEUE_NUM];
- __le16 tx_op_limit[QOS_QUEUE_NUM];
-} __packed;
-
-struct ieee80211_qos_data {
- struct ieee80211_qos_parameters parameters;
- int active;
- int supported;
- u8 param_count;
- u8 old_param_count;
-};
-
-struct ieee80211_tim_parameters {
- u8 tim_count;
- u8 tim_period;
-} __packed;
-
-//#else
-struct ieee80211_wmm_ts_info {
- u8 ac_dir_tid;
- u8 ac_up_psb;
- u8 reserved;
-} __packed;
-
-struct ieee80211_wmm_tspec_elem {
- struct ieee80211_wmm_ts_info ts_info;
- u16 norm_msdu_size;
- u16 max_msdu_size;
- u32 min_serv_inter;
- u32 max_serv_inter;
- u32 inact_inter;
- u32 suspen_inter;
- u32 serv_start_time;
- u32 min_data_rate;
- u32 mean_data_rate;
- u32 peak_data_rate;
- u32 max_burst_size;
- u32 delay_bound;
- u32 min_phy_rate;
- u16 surp_band_allow;
- u16 medium_time;
-} __packed;
-enum eap_type {
- EAP_PACKET = 0,
- EAPOL_START,
- EAPOL_LOGOFF,
- EAPOL_KEY,
- EAPOL_ENCAP_ASF_ALERT
-};
-
-static const char *eap_types[] = {
- [EAP_PACKET] = "EAP-Packet",
- [EAPOL_START] = "EAPOL-Start",
- [EAPOL_LOGOFF] = "EAPOL-Logoff",
- [EAPOL_KEY] = "EAPOL-Key",
- [EAPOL_ENCAP_ASF_ALERT] = "EAPOL-Encap-ASF-Alert"
-};
-
-static inline const char *eap_get_type(int type)
-{
- return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
-}
-//added by amy for reorder
-static inline u8 Frame_QoSTID(u8 *buf)
-{
- struct rtl_80211_hdr_3addr *hdr;
- u16 fc;
- hdr = (struct rtl_80211_hdr_3addr *)buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
-}
-
-//added by amy for reorder
-
-struct eapol {
- u8 snap[6];
- u16 ethertype;
- u8 version;
- u8 type;
- u16 length;
-} __packed;
-
-struct ieee80211_softmac_stats {
- unsigned int rx_ass_ok;
- unsigned int rx_ass_err;
- unsigned int rx_probe_rq;
- unsigned int tx_probe_rs;
- unsigned int tx_beacons;
- unsigned int rx_auth_rq;
- unsigned int rx_auth_rs_ok;
- unsigned int rx_auth_rs_err;
- unsigned int tx_auth_rq;
- unsigned int no_auth_rs;
- unsigned int no_ass_rs;
- unsigned int tx_ass_rq;
- unsigned int rx_ass_rq;
- unsigned int tx_probe_rq;
- unsigned int reassoc;
- unsigned int swtxstop;
- unsigned int swtxawake;
- unsigned char CurrentShowTxate;
- unsigned char last_packet_rate;
- unsigned int txretrycount;
-};
-
-#define BEACON_PROBE_SSID_ID_POSITION 12
-
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
-} __packed;
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 2 //1Mbps
-
-enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
-#define MAX_SP_Len (WMM_all_frame << 4)
-#define IEEE80211_QOS_TID 0x0f
-#define QOS_CTL_NOTCONTAIN_ACK (0x01 << 5)
-
-#define IEEE80211_DTIM_MBCAST 4
-#define IEEE80211_DTIM_UCAST 2
-#define IEEE80211_DTIM_VALID 1
-#define IEEE80211_DTIM_INVALID 0
-
-#define IEEE80211_PS_DISABLED 0
-#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
-#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
-
-//added by David for QoS 2006/6/30
-//#define WMM_Hang_8187
-#ifdef WMM_Hang_8187
-#undef WMM_Hang_8187
-#endif
-
-#define WME_AC_BK 0x00
-#define WME_AC_BE 0x01
-#define WME_AC_VI 0x02
-#define WME_AC_VO 0x03
-#define WME_ACI_MASK 0x03
-#define WME_AIFSN_MASK 0x03
-#define WME_AC_PRAM_LEN 16
-
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-//#define UP2AC(up) ((up<3) ? ((up==0)?1:0) : (up>>1))
-#define UP2AC(up) ( \
- ((up) < 1) ? WME_AC_BE : \
- ((up) < 3) ? WME_AC_BK : \
- ((up) < 4) ? WME_AC_BE : \
- ((up) < 6) ? WME_AC_VI : \
- WME_AC_VO)
-//AC Mapping to UP, using in Tx part for selecting the corresponding TX queue
-#define AC2UP(_ac) ( \
- ((_ac) == WME_AC_VO) ? 6 : \
- ((_ac) == WME_AC_VI) ? 5 : \
- ((_ac) == WME_AC_BK) ? 1 : \
- 0)
-
-#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
-#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address plus ether type*/
-
-struct ether_header {
- u8 ether_dhost[ETHER_ADDR_LEN];
- u8 ether_shost[ETHER_ADDR_LEN];
- u16 ether_type;
-} __packed;
-
-#ifndef ETHERTYPE_PAE
-#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
-#endif
-#ifndef ETHERTYPE_IP
-#define ETHERTYPE_IP 0x0800 /* IP protocol */
-#endif
-
-typedef enum _erp_t {
- ERP_NonERPpresent = 0x01,
- ERP_UseProtection = 0x02,
- ERP_BarkerPreambleMode = 0x04,
-} erp_t;
-
-
-struct ieee80211_network {
- /* These entries are used to identify a unique network */
- u8 bssid[ETH_ALEN]; /* u16 aligned! */
- u8 channel;
-
- // CCXv4 S59, MBSSID.
- bool bMBssidValid;
- u8 MBssid[ETH_ALEN]; /* u16 aligned! */
- u8 MBssidMask;
- /* Ensure null-terminated for any debug msgs */
- u8 ssid[IW_ESSID_MAX_SIZE + 1];
- u8 ssid_len;
- struct ieee80211_qos_data qos_data;
-
- //added by amy for LEAP
- bool bWithAironetIE;
- bool bCkipSupported;
- bool bCcxRmEnable;
- u16 CcxRmState[2];
- // CCX 2 S38, WLAN Device Version Number element. Annie, 2006-08-20.
- bool bWithCcxVerNum;
- u8 BssCcxVerNumber;
- /* These are network statistics */
- struct ieee80211_rx_stats stats;
- u16 capability;
- u8 rates[MAX_RATES_LENGTH];
- u8 rates_len;
- u8 rates_ex[MAX_RATES_EX_LENGTH];
- u8 rates_ex_len;
- unsigned long last_scanned;
- u8 mode;
- u32 flags;
- u32 last_associate;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 listen_interval;
- u16 atim_window;
- u8 erp_value;
- u8 wpa_ie[MAX_WPA_IE_LEN];
- size_t wpa_ie_len;
- u8 rsn_ie[MAX_WPA_IE_LEN];
- size_t rsn_ie_len;
-
- struct ieee80211_tim_parameters tim;
- u8 dtim_period;
- u8 dtim_data;
- u32 last_dtim_sta_time[2];
-
- //appeded for QoS
- u8 wmm_info;
- struct ieee80211_wmm_ac_param wmm_param[4];
- u8 QoS_Enable;
-#ifdef THOMAS_TURBO
- u8 Turbo_Enable;//enable turbo mode, added by thomas
-#endif
- u16 CountryIeLen;
- u8 CountryIeBuf[MAX_IE_LEN];
- // HT Related, by amy, 2008.04.29
- BSS_HT bssht;
- // Add to handle broadcom AP management frame CCK rate.
- bool broadcom_cap_exist;
- bool ralink_cap_exist;
- bool atheros_cap_exist;
- bool cisco_cap_exist;
- bool unknown_cap_exist;
-// u8 berp_info;
- bool berp_info_valid;
- bool buseprotection;
- //put at the end of the structure.
- struct list_head list;
-};
-
-enum ieee80211_state {
-
- /* the card is not linked at all */
- IEEE80211_NOLINK = 0,
-
- /* IEEE80211_ASSOCIATING* are for BSS client mode
- * the driver shall not perform RX filtering unless
- * the state is LINKED.
- * The driver shall just check for the state LINKED and
- * defaults to NOLINK for ALL the other states (including
- * LINKED_SCANNING)
- */
-
- /* the association procedure will start (wq scheduling)*/
- IEEE80211_ASSOCIATING,
- IEEE80211_ASSOCIATING_RETRY,
-
- /* the association procedure is sending AUTH request*/
- IEEE80211_ASSOCIATING_AUTHENTICATING,
-
- /* the association procedure has successfully authentcated
- * and is sending association request
- */
- IEEE80211_ASSOCIATING_AUTHENTICATED,
-
- /* the link is ok. the card associated to a BSS or linked
- * to a ibss cell or acting as an AP and creating the bss
- */
- IEEE80211_LINKED,
-
- /* same as LINKED, but the driver shall apply RX filter
- * rules as we are in NO_LINK mode. As the card is still
- * logically linked, but it is doing a syncro site survey
- * then it will be back to LINKED state.
- */
- IEEE80211_LINKED_SCANNING,
-
-};
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-#define DEFAULT_FTS 2346
-
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-#define CFG_IEEE80211_RTS (1<<2)
-
-#define IEEE80211_24GHZ_MIN_CHANNEL 1
-#define IEEE80211_24GHZ_MAX_CHANNEL 14
-#define IEEE80211_24GHZ_CHANNELS (IEEE80211_24GHZ_MAX_CHANNEL - \
- IEEE80211_24GHZ_MIN_CHANNEL + 1)
-
-#define IEEE80211_52GHZ_MIN_CHANNEL 34
-#define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \
- IEEE80211_52GHZ_MIN_CHANNEL + 1)
-
-
-
-struct tx_pending {
- int frag;
- struct ieee80211_txb *txb;
-};
-
-struct bandwidth_autoswitch {
- long threshold_20Mhzto40Mhz;
- long threshold_40Mhzto20Mhz;
- bool bforced_tx20Mhz;
- bool bautoswitch_enable;
-};
-
-
-//added by amy for order
-
-#define REORDER_WIN_SIZE 128
-#define REORDER_ENTRY_NUM 128
-struct rx_reorder_entry {
- struct list_head List;
- u16 SeqNum;
- struct ieee80211_rxb *prxb;
-};
-//added by amy for order
-typedef enum _Fsync_State {
- Default_Fsync,
- HW_Fsync,
- SW_Fsync
-} Fsync_State;
-
-// Power save mode configured.
-typedef enum _RT_PS_MODE {
- eActive, // Active/Continuous access.
- eMaxPs, // Max power save mode.
- eFastPs // Fast power save mode.
-} RT_PS_MODE;
-
-typedef enum _IPS_CALLBACK_FUNCION {
- IPS_CALLBACK_NONE = 0,
- IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
- IPS_CALLBACK_JOIN_REQUEST = 2,
-} IPS_CALLBACK_FUNCION;
-
-typedef enum _RT_JOIN_ACTION {
- RT_JOIN_INFRA = 1,
- RT_JOIN_IBSS = 2,
- RT_START_IBSS = 3,
- RT_NO_ACTION = 4,
-} RT_JOIN_ACTION;
-
-struct ibss_parms {
- u16 atimWin;
-};
-#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
-
-// RF state.
-typedef enum _RT_RF_POWER_STATE {
- eRfOn,
- eRfSleep,
- eRfOff
-} RT_RF_POWER_STATE;
-
-struct rt_power_save_control {
-
- //
- // Inactive Power Save(IPS) : Disable RF when disconnected
- //
- bool bInactivePs;
- bool bIPSModeBackup;
- bool bSwRfProcessing;
- RT_RF_POWER_STATE eInactivePowerState;
- struct work_struct InactivePsWorkItem;
- struct timer_list InactivePsTimer;
-
- // Return point for join action
- IPS_CALLBACK_FUNCION ReturnPoint;
-
- // Recored Parameters for rescheduled JoinRequest
- bool bTmpBssDesc;
- RT_JOIN_ACTION tmpJoinAction;
- struct ieee80211_network tmpBssDesc;
-
- // Recored Parameters for rescheduled MgntLinkRequest
- bool bTmpScanOnly;
- bool bTmpActiveScan;
- bool bTmpFilterHiddenAP;
- bool bTmpUpdateParms;
- u8 tmpSsidBuf[33];
- struct octet_string tmpSsid2Scan;
- bool bTmpSsid2Scan;
- u8 tmpNetworkType;
- u8 tmpChannelNumber;
- u16 tmpBcnPeriod;
- u8 tmpDtimPeriod;
- u16 tmpmCap;
- struct octet_string tmpSuppRateSet;
- u8 tmpSuppRateBuf[MAX_NUM_RATES];
- bool bTmpSuppRate;
- struct ibss_parms tmpIbpm;
- bool bTmpIbpm;
-
- //
- // Leisre Poswer Save : Disable RF if connected but traffic is not busy
- //
- bool bLeisurePs;
-
-};
-
-typedef u32 RT_RF_CHANGE_SOURCE;
-#define RF_CHANGE_BY_SW BIT(31)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
-
-typedef enum {
- COUNTRY_CODE_FCC = 0,
- COUNTRY_CODE_IC = 1,
- COUNTRY_CODE_ETSI = 2,
- COUNTRY_CODE_SPAIN = 3,
- COUNTRY_CODE_FRANCE = 4,
- COUNTRY_CODE_MKK = 5,
- COUNTRY_CODE_MKK1 = 6,
- COUNTRY_CODE_ISRAEL = 7,
- COUNTRY_CODE_TELEC,
- COUNTRY_CODE_MIC,
- COUNTRY_CODE_GLOBAL_DOMAIN
-} country_code_type_t;
-
-#define RT_MAX_LD_SLOT_NUM 10
-struct rt_link_detect {
-
- u32 NumRecvBcnInPeriod;
- u32 NumRecvDataInPeriod;
-
- u32 RxBcnNum[RT_MAX_LD_SLOT_NUM]; // number of Rx beacon / CheckForHang_period to determine link status
- u32 RxDataNum[RT_MAX_LD_SLOT_NUM]; // number of Rx data / CheckForHang_period to determine link status
- u16 SlotNum; // number of CheckForHang period to determine link status
- u16 SlotIndex;
-
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
- bool bBusyTraffic;
-};
-
-
-struct ieee80211_device {
- struct net_device *dev;
- struct ieee80211_security sec;
-
- //hw security related
-// u8 hwsec_support; //support?
- u8 hwsec_active; //hw security active.
- bool is_silent_reset;
- bool ieee_up;
- //added by amy
- bool bSupportRemoteWakeUp;
- RT_PS_MODE dot11PowerSaveMode; // Power save mode configured.
- bool actscanning;
- bool beinretry;
- RT_RF_POWER_STATE eRFPowerState;
- RT_RF_CHANGE_SOURCE RfOffReason;
- bool is_set_key;
- //11n spec related I wonder if These info structure need to be moved out of ieee80211_device
-
- //11n HT below
- PRT_HIGH_THROUGHPUT pHTInfo;
- //struct timer_list SwBwTimer;
-// spinlock_t chnlop_spinlock;
- spinlock_t bw_spinlock;
-
- spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to separate different descriptors
- //the way fill this is the same as in the IE
- u8 Regdot11HTOperationalRateSet[16]; //use RATR format
- u8 dot11HTOperationalRateSet[16]; //use RATR format
- u8 RegHTSuppRateSet[16];
- u8 HTCurrentOperaRate;
- u8 HTHighestOperaRate;
- //wb added for rate operation mode to firmware
- u8 bTxDisableRateFallBack;
- u8 bTxUseDriverAssingedRate;
- atomic_t atm_chnlop;
- atomic_t atm_swbw;
-// u8 HTHighestOperaRate;
-// u8 HTCurrentOperaRate;
-
- // 802.11e and WMM Traffic Stream Info (TX)
- struct list_head Tx_TS_Admit_List;
- struct list_head Tx_TS_Pending_List;
- struct list_head Tx_TS_Unused_List;
- struct tx_ts_record TxTsRecord[TOTAL_TS_NUM];
- // 802.11e and WMM Traffic Stream Info (RX)
- struct list_head Rx_TS_Admit_List;
- struct list_head Rx_TS_Pending_List;
- struct list_head Rx_TS_Unused_List;
- struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
- struct rx_reorder_entry RxReorderEntry[128];
- struct list_head RxReorder_Unused_List;
- // Qos related. Added by Annie, 2005-11-01.
-// PSTA_QOS pStaQos;
- u8 ForcedPriority; // Force per-packet priority 1~7. (default: 0, not to force it.)
-
-
- /* Bookkeeping structures */
- struct net_device_stats stats;
- struct ieee80211_stats ieee_stats;
- struct ieee80211_softmac_stats softmac_stats;
-
- /* Probe / Beacon management */
- struct list_head network_free_list;
- struct list_head network_list;
- struct ieee80211_network *networks;
- int scans;
- int scan_age;
-
- int iw_mode; /* operating mode (IW_MODE_*) */
- struct iw_spy_data spy_data;
-
- spinlock_t lock;
- spinlock_t wpax_suitlist_lock;
-
- int tx_headroom; /* Set to size of any additional room needed at front
- * of allocated Tx SKBs
- */
- u32 config;
-
- /* WEP and other encryption related settings at the device level */
- int open_wep; /* Set to 1 to allow unencrypted frames */
- int auth_mode;
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes
- */
-
- /* If the host performs {en,de}cryption, then set to 1 */
- int host_encrypt;
- int host_encrypt_msdu;
- int host_decrypt;
- /* host performs multicast decryption */
- int host_mc_decrypt;
-
- /* host should strip IV and ICV from protected frames */
- /* meaningful only when hardware decryption is being used */
- int host_strip_iv_icv;
-
- int host_open_frag;
- int host_build_iv;
- int ieee802_1x; /* is IEEE 802.1X used */
-
- /* WPA data */
- bool bHalfWirelessN24GMode;
- int wpa_enabled;
- int drop_unencrypted;
- int tkip_countermeasures;
- int privacy_invoked;
- size_t wpa_ie_len;
- u8 *wpa_ie;
- u8 ap_mac_addr[6];
- u16 pairwise_key_type;
- u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct ieee80211_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-
- int bcrx_sta_key; /* use individual keys to override default keys even
- * with RX of broad/multicast frames
- */
-
- /* Fragmentation structures */
- // each streaming contain a entry
- struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
- unsigned int frag_next_idx[17];
- u16 fts; /* Fragmentation Threshold */
-#define DEFAULT_RTS_THRESHOLD 2346U
-#define MIN_RTS_THRESHOLD 1
-#define MAX_RTS_THRESHOLD 2346U
- u16 rts; /* RTS threshold */
-
- /* Association info */
- u8 bssid[ETH_ALEN];
-
- /* This stores infos for the current network.
- * Either the network we are associated in INFRASTRUCTURE
- * or the network that we are creating in MASTER mode.
- * ad-hoc is a mixture ;-).
- * Note that in infrastructure mode, even when not associated,
- * fields bssid and essid may be valid (if wpa_set and essid_set
- * are true) as thy carry the value set by the user via iwconfig
- */
- struct ieee80211_network current_network;
-
- enum ieee80211_state state;
-
- int short_slot;
- int reg_mode;
- int mode; /* A, B, G */
- int modulation; /* CCK, OFDM */
- int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
- int abg_true; /* ABG flag */
-
- /* used for forcing the ibss workqueue to terminate
- * without wait for the syncro scan to terminate
- */
- short sync_scan_hurryup;
-
- int perfect_rssi;
- int worst_rssi;
-
- u16 prev_seq_ctl; /* used to drop duplicate frames */
-
- /* map of allowed channels. 0 is dummy */
- // FIXME: remember to default to a basic channel plan depending of the PHY type
- void *dot11d_info;
- bool bGlobalDomain;
- int rate; /* current rate */
- int basic_rate;
- //FIXME: pleace callback, see if redundant with softmac_features
- short active_scan;
-
- /* this contains flags for selectively enable softmac support */
- u16 softmac_features;
-
- /* if the sequence control field is not filled by HW */
- u16 seq_ctrl[5];
-
- /* association procedure transaction sequence number */
- u16 associate_seq;
-
- /* AID for RTXed association responses */
- u16 assoc_id;
-
- /* power save mode related*/
- short ps;
- short sta_sleep;
- int ps_timeout;
- int ps_period;
- struct work_struct ps_task;
- u32 ps_th;
- u32 ps_tl;
-
- short raw_tx;
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- short queue_stop;
- short scanning;
- short proto_started;
-
- struct mutex wx_mutex;
- struct mutex scan_mutex;
-
- spinlock_t mgmt_tx_lock;
- spinlock_t beacon_lock;
-
- short beacon_txing;
-
- short wap_set;
- short ssid_set;
-
- u8 wpax_type_set; //{added by David, 2006.9.28}
- u32 wpax_type_notify; //{added by David, 2006.9.26}
-
- /* QoS related flag */
- s8 init_wmmparam_flag;
- /* set on initialization */
- u8 qos_support;
-
- /* for discarding duplicated packets in IBSS */
- struct list_head ibss_mac_hash[IEEE_IBSS_MAC_HASH_SIZE];
-
- /* for discarding duplicated packets in BSS */
- u16 last_rxseq_num[17]; /* rx seq previous per-tid */
- u16 last_rxfrag_num[17];/* tx frag previous per-tid */
- unsigned long last_packet_time[17];
-
- /* for PS mode */
- unsigned long last_rx_ps_time;
-
- /* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
- struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
- int mgmt_queue_head;
- int mgmt_queue_tail;
-//{ added for rtl819x
-#define IEEE80211_QUEUE_LIMIT 128
- u8 AsocRetryCount;
- unsigned int hw_header;
- struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_aggQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_drv_aggQ[MAX_QUEUE_SIZE];
- u32 sta_edca_param[4];
- bool aggregation;
- // Enable/Disable Rx immediate BA capability.
- bool enable_rx_imm_BA;
- bool bibsscoordinator;
-
- //+by amy for DM ,080515
- //Dynamic Tx power for near/far range enable/Disable , by amy , 2008-05-15
- bool bdynamic_txpower_enable;
-
- bool bCTSToSelfEnable;
- u8 CTSToSelfTH;
-
- u32 fsync_time_interval;
- u32 fsync_rate_bitmap;
- u8 fsync_rssi_threshold;
- bool bfsync_enable;
-
- u8 fsync_multiple_timeinterval; // FsyncMultipleTimeInterval * FsyncTimeInterval
- u32 fsync_firstdiff_ratethreshold; // low threshold
- u32 fsync_seconddiff_ratethreshold; // decrease threshold
- Fsync_State fsync_state;
- bool bis_any_nonbepkts;
- //20Mhz 40Mhz AutoSwitch Threshold
- struct bandwidth_autoswitch bandwidth_auto_switch;
- //for txpower tracking
- bool FwRWRF;
-
- //added by amy for AP roaming
- struct rt_link_detect LinkDetectInfo;
- //added by amy for ps
- struct rt_power_save_control PowerSaveControl;
-//}
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- struct tx_pending tx_pending;
-
- /* used if IEEE_SOFTMAC_ASSOCIATE is set */
- struct timer_list associate_timer;
-
- /* used if IEEE_SOFTMAC_BEACONS is set */
- struct timer_list beacon_timer;
- struct work_struct associate_complete_wq;
- struct work_struct associate_procedure_wq;
- struct delayed_work softmac_scan_wq;
- struct delayed_work associate_retry_wq;
- struct delayed_work start_ibss_wq;
- struct work_struct wx_sync_scan_wq;
- struct workqueue_struct *wq;
- // Qos related. Added by Annie, 2005-11-01.
- //STA_QOS StaQos;
-
- //u32 STA_EDCA_PARAM[4];
- //CHANNEL_ACCESS_SETTING ChannelAccessSetting;
-
- struct ieee80211_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
-
- /* Callback functions */
- void (*set_security)(struct net_device *dev,
- struct ieee80211_security *sec);
-
- /* Used to TX data frame by using txb structs.
- * this is not used if in the softmac_features
- * is set the flag IEEE_SOFTMAC_TX_QUEUE
- */
- int (*hard_start_xmit)(struct ieee80211_txb *txb,
- struct net_device *dev);
-
- int (*reset_port)(struct net_device *dev);
- int (*is_queue_full)(struct net_device *dev, int pri);
-
- int (*handle_management)(struct net_device *dev,
- struct ieee80211_network *network, u16 type);
- int (*is_qos_active)(struct net_device *dev, struct sk_buff *skb);
-
- /* Softmac-generated frames (management) are TXed via this
- * callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
- * not set. As some cards may have different HW queues that
- * one might want to use for data and management frames
- * the option to have two callbacks might be useful.
- * This function can't sleep.
- */
- int (*softmac_hard_start_xmit)(struct sk_buff *skb,
- struct net_device *dev);
-
- /* used instead of hard_start_xmit (not softmac_hard_start_xmit)
- * if the IEEE_SOFTMAC_TX_QUEUE feature is used to TX data
- * frames. I the option IEEE_SOFTMAC_SINGLE_QUEUE is also set
- * then also management frames are sent via this callback.
- * This function can't sleep.
- */
- void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
- struct net_device *dev, int rate);
-
- /* stops the HW queue for DATA frames. Useful to avoid
- * waste time to TX data frame when we are reassociating
- * This function can sleep.
- */
- void (*data_hard_stop)(struct net_device *dev);
-
- /* OK this is complementar to data_poll_hard_stop */
- void (*data_hard_resume)(struct net_device *dev);
-
- /* ask to the driver to retune the radio .
- * This function can sleep. the driver should ensure
- * the radio has been switched before return.
- */
- void (*set_chan)(struct net_device *dev, short ch);
-
- /* These are not used if the ieee stack takes care of
- * scanning (IEEE_SOFTMAC_SCAN feature set).
- * In this case only the set_chan is used.
- *
- * The syncro version is similar to the start_scan but
- * does not return until all channels has been scanned.
- * this is called in user context and should sleep,
- * it is called in a work_queue when switching to ad-hoc mode
- * or in behalf of iwlist scan when the card is associated
- * and root user ask for a scan.
- * the function stop_scan should stop both the syncro and
- * background scanning and can sleep.
- * The function start_scan should initiate the background
- * scanning and can't sleep.
- */
- void (*scan_syncro)(struct net_device *dev);
- void (*start_scan)(struct net_device *dev);
- void (*stop_scan)(struct net_device *dev);
-
- /* indicate the driver that the link state is changed
- * for example it may indicate the card is associated now.
- * Driver might be interested in this to apply RX filter
- * rules or simply light the LINK led
- */
- void (*link_change)(struct net_device *dev);
-
- /* these two function indicates to the HW when to start
- * and stop to send beacons. This is used when the
- * IEEE_SOFTMAC_BEACONS is not set. For now the
- * stop_send_bacons is NOT guaranteed to be called only
- * after start_send_beacons.
- */
- void (*start_send_beacons)(struct net_device *dev, u16 tx_rate);
- void (*stop_send_beacons)(struct net_device *dev);
-
- /* power save mode related */
- void (*sta_wake_up)(struct net_device *dev);
- void (*ps_request_tx_ack)(struct net_device *dev);
- void (*enter_sleep_state)(struct net_device *dev, u32 th, u32 tl);
- short (*ps_is_queue_empty)(struct net_device *dev);
- int (*handle_beacon)(struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
- int (*handle_assoc_response)(struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
-
-
- /* check whether Tx hw resource available */
- short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
- //added by wb for HT related
-// void (*SwChnlByTimerHandler)(struct net_device *dev, int channel);
- void (*SetBWModeHandler)(struct net_device *dev, enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset);
-// void (*UpdateHalRATRTableHandler)(struct net_device* dev, u8* pMcsRate);
- bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
- void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
- void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
-
- /* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211
- */
- u8 priv[];
-};
-
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
-
-/* Generate a 802.11 header */
-
-/* Uses the channel change callback directly
- * instead of [start/stop] scan callbacks
- */
-#define IEEE_SOFTMAC_SCAN (1<<2)
-
-/* Perform authentication and association handshake */
-#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
-
-/* Generate probe requests */
-#define IEEE_SOFTMAC_PROBERQ (1<<4)
-
-/* Generate respones to probe requests */
-#define IEEE_SOFTMAC_PROBERS (1<<5)
-
-/* The ieee802.11 stack will manages the netif queue
- * wake/stop for the driver, taking care of 802.11
- * fragmentation. See softmac.c for details.
- */
-#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
-
-/* Uses only the softmac_data_hard_start_xmit
- * even for TX management frames.
- */
-#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
-
-/* Generate beacons. The stack will enqueue beacons
- * to the card
- */
-#define IEEE_SOFTMAC_BEACONS (1<<6)
-
-static inline void *ieee80211_priv(struct net_device *dev)
-{
- return ((struct ieee80211_device *)netdev_priv(dev))->priv;
-}
-
-static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
-{
- /* Single white space is for Linksys APs */
- if (essid_len == 1 && essid[0] == ' ')
- return 1;
-
- /* Otherwise, if the entire essid is 0, we assume it is hidden */
- while (essid_len) {
- essid_len--;
- if (essid[essid_len] != '\0')
- return 0;
- }
-
- return 1;
-}
-
-static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
-{
- /*
- * It is possible for both access points and our device to support
- * combinations of modes, so as long as there is one valid combination
- * of ap/device supported modes, then return success
- *
- */
- if ((mode & IEEE_A) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_52GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_G) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_B) &&
- (ieee->modulation & IEEE80211_CCK_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- return 0;
-}
-
-static inline int ieee80211_get_hdrlen(u16 fc)
-{
- int hdrlen = IEEE80211_3ADDR_LEN;
-
- switch (WLAN_FC_GET_TYPE(fc)) {
- case IEEE80211_FTYPE_DATA:
- if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
- hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if (IEEE80211_QOS_HAS_SEQ(fc))
- hdrlen += 2; /* QOS ctrl*/
- break;
- case IEEE80211_FTYPE_CTL:
- switch (WLAN_FC_GET_STYPE(fc)) {
- case IEEE80211_STYPE_CTS:
- case IEEE80211_STYPE_ACK:
- hdrlen = IEEE80211_1ADDR_LEN;
- break;
- default:
- hdrlen = IEEE80211_2ADDR_LEN;
- break;
- }
- break;
- }
-
- return hdrlen;
-}
-
-static inline u8 *ieee80211_get_payload(struct rtl_80211_hdr *hdr)
-{
- switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
- case IEEE80211_1ADDR_LEN:
- return ((struct rtl_80211_hdr_1addr *)hdr)->payload;
- case IEEE80211_2ADDR_LEN:
- return ((struct rtl_80211_hdr_2addr *)hdr)->payload;
- case IEEE80211_3ADDR_LEN:
- return ((struct rtl_80211_hdr_3addr *)hdr)->payload;
- case IEEE80211_4ADDR_LEN:
- return ((struct rtl_80211_hdr_4addr *)hdr)->payload;
- }
- return NULL;
-}
-
-static inline int ieee80211_is_ofdm_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_9MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_18MB:
- case IEEE80211_OFDM_RATE_24MB:
- case IEEE80211_OFDM_RATE_36MB:
- case IEEE80211_OFDM_RATE_48MB:
- case IEEE80211_OFDM_RATE_54MB:
- return 1;
- }
- return 0;
-}
-
-static inline int ieee80211_is_cck_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- return 1;
- }
- return 0;
-}
-
-
-/* ieee80211.c */
-void free_ieee80211(struct net_device *dev);
-struct net_device *alloc_ieee80211(int sizeof_priv);
-
-int ieee80211_set_encryption(struct ieee80211_device *ieee);
-
-/* ieee80211_tx.c */
-
-int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
- struct sk_buff *frag, int hdr_len);
-
-netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
-void ieee80211_txb_free(struct ieee80211_txb *txb);
-
-
-/* ieee80211_rx.c */
-int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
-void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats);
-
-/* ieee80211_wx.c */
-int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
-
-/* ieee80211_softmac.c */
-short ieee80211_is_54g(const struct ieee80211_network *net);
-short ieee80211_is_shortslot(const struct ieee80211_network *net);
-int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
- struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
- u16 type, u16 stype);
-void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
- struct ieee80211_network *net);
-
-void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn);
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
- struct ieee80211_device *ieee);
-
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-void notify_wx_assoc_event(struct ieee80211_device *ieee);
-void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
-void ieee80211_start_bss(struct ieee80211_device *ieee);
-void ieee80211_start_master_bss(struct ieee80211_device *ieee);
-void ieee80211_start_ibss(struct ieee80211_device *ieee);
-void ieee80211_softmac_init(struct ieee80211_device *ieee);
-void ieee80211_softmac_free(struct ieee80211_device *ieee);
-void ieee80211_associate_abort(struct ieee80211_device *ieee);
-void ieee80211_disassociate(struct ieee80211_device *ieee);
-void ieee80211_stop_scan(struct ieee80211_device *ieee);
-void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
-void ieee80211_check_all_nets(struct ieee80211_device *ieee);
-void ieee80211_start_protocol(struct ieee80211_device *ieee);
-void ieee80211_stop_protocol(struct ieee80211_device *ieee);
-void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
-void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
-void ieee80211_reset_queue(struct ieee80211_device *ieee);
-void ieee80211_wake_queue(struct ieee80211_device *ieee);
-void ieee80211_stop_queue(struct ieee80211_device *ieee);
-struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
-void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
- struct iw_point *p);
-void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-
-void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
-
-/* ieee80211_crypt_ccmp&tkip&wep.c */
-
-int ieee80211_crypto_init(void);
-void ieee80211_crypto_deinit(void);
-int ieee80211_crypto_tkip_init(void);
-void ieee80211_crypto_tkip_exit(void);
-int ieee80211_crypto_ccmp_init(void);
-void ieee80211_crypto_ccmp_exit(void);
-int ieee80211_crypto_wep_init(void);
-void ieee80211_crypto_wep_exit(void);
-
-/* ieee80211_softmac_wx.c */
-
-int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
-
-int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
-
-int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-/* ieee80211_module.c */
-#ifdef CONFIG_IEEE80211_DEBUG
-int ieee80211_debug_init(void);
-void ieee80211_debug_exit(void);
-#else
-static inline int ieee80211_debug_init(void) { return 0; }
-static inline void ieee80211_debug_exit(void) { }
-#endif
-
-//extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
-void ieee80211_wx_sync_scan_wq(struct work_struct *work);
-
-
-int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-//HT
-#define MAX_RECEIVE_BUFFER_SIZE 9100 //
-void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
-void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device *ieee,
- enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset);
-void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
-void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap,
- u8 *len, u8 isEncrypt);
-void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo,
- u8 *len, u8 isEncrypt);
-void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
- u8 *len);
-void HTOnAssocRsp(struct ieee80211_device *ieee);
-void HTInitializeHTInfo(struct ieee80211_device *ieee);
-void HTInitializeBssDesc(PBSS_HT pBssHT);
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee,
- struct ieee80211_network *pNetwork);
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,
- struct ieee80211_network *pNetwork);
-u8 HTGetHighestMCSRate(struct ieee80211_device *ieee,
- u8 *pMCSRateSet, u8 *pMCSFilter);
-extern u8 MCS_FILTER_ALL[];
-extern u16 MCS_DATA_RATE[2][2][77];
-u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
-void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
-u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
-//function in BAPROC.c
-int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
-int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
-int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
-void TsInitAddBA(struct ieee80211_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending);
-void TsInitDelBA(struct ieee80211_device *ieee,
- struct ts_common_info *pTsCommonInfo, enum tr_select TxRxSelect);
-void BaSetupTimeOut(struct timer_list *t);
-void TxBaInactTimeout(struct timer_list *t);
-void RxBaInactTimeout(struct timer_list *t);
-void ResetBaEntry(struct ba_record *pBA);
-//function in TS.c
-bool GetTs(
- struct ieee80211_device *ieee,
- struct ts_common_info **ppTS,
- u8 *Addr,
- u8 TID,
- enum tr_select TxRxSelect, //Rx:1, Tx:0
- bool bAddNewTs
- );
-void TSInitialize(struct ieee80211_device *ieee);
-void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTxTS);
-void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
-void RemoveAllTS(struct ieee80211_device *ieee);
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
-
-extern const long ieee80211_wlan_frequencies[];
-
-static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
-{
- ieee->scans++;
-}
-
-static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
-{
- return ieee->scans;
-}
-
-static inline const char *escape_essid(const char *essid, u8 essid_len)
-{
- static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
-
- if (ieee80211_is_empty_essid(essid, essid_len)) {
- memcpy(escaped, "<hidden>", sizeof("<hidden>"));
- return escaped;
- }
-
- snprintf(escaped, sizeof(escaped), "%*pE", essid_len, essid);
- return escaped;
-}
-
-/* For the function is more related to hardware setting, it's better to use the
- * ieee handler to refer to it.
- */
-short check_nic_enough_desc(struct net_device *dev, int queue_index);
-int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
-int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats);
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee,
- struct ieee80211_rxb **prxbIndicateArray,
- u8 index);
-#define RT_ASOC_RETRY_LIMIT 5
-#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
deleted file mode 100644
index 840db6250b87..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Host AP crypto routines
- *
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-
-#include "ieee80211.h"
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("HostAP crypto");
-MODULE_LICENSE("GPL");
-
-struct ieee80211_crypto_alg {
- struct list_head list;
- struct ieee80211_crypto_ops *ops;
-};
-
-
-struct ieee80211_crypto {
- struct list_head algs;
- spinlock_t lock;
-};
-
-static struct ieee80211_crypto *hcrypt;
-
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
- int force)
-{
- struct list_head *ptr, *n;
- struct ieee80211_crypt_data *entry;
-
- for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
- ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct ieee80211_crypt_data, list);
-
- if (atomic_read(&entry->refcnt) != 0 && !force)
- continue;
-
- list_del(ptr);
-
- if (entry->ops)
- entry->ops->deinit(entry->priv);
- kfree(entry);
- }
-}
-
-void ieee80211_crypt_deinit_handler(struct timer_list *t)
-{
- struct ieee80211_device *ieee = from_timer(ieee, t, crypt_deinit_timer);
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock, flags);
- ieee80211_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
- netdev_dbg(ieee->dev, "%s: entries remaining in delayed crypt deletion list\n",
- ieee->dev->name);
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-
-}
-
-void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
- struct ieee80211_crypt_data **crypt)
-{
- struct ieee80211_crypt_data *tmp;
- unsigned long flags;
-
- if (!(*crypt))
- return;
-
- tmp = *crypt;
- *crypt = NULL;
-
- /* must not run ops->deinit() while there may be pending encrypt or
- * decrypt operations. Use a list of delayed deinits to avoid needing
- * locking.
- */
-
- spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
-{
- unsigned long flags;
- struct ieee80211_crypto_alg *alg;
-
- if (!hcrypt)
- return -1;
-
- alg = kzalloc(sizeof(*alg), GFP_KERNEL);
- if (!alg)
- return -ENOMEM;
-
- alg->ops = ops;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- list_add(&alg->list, &hcrypt->algs);
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- pr_debug("ieee80211_crypt: registered algorithm '%s'\n",
- ops->name);
-
- return 0;
-}
-
-int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
-{
- unsigned long flags;
- struct list_head *ptr;
- struct ieee80211_crypto_alg *del_alg = NULL;
-
- if (!hcrypt)
- return -1;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *)ptr;
- if (alg->ops == ops) {
- list_del(&alg->list);
- del_alg = alg;
- break;
- }
- }
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- if (del_alg) {
- pr_debug("ieee80211_crypt: unregistered algorithm '%s'\n",
- ops->name);
- kfree(del_alg);
- }
-
- return del_alg ? 0 : -1;
-}
-
-
-struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
-{
- unsigned long flags;
- struct list_head *ptr;
- struct ieee80211_crypto_alg *found_alg = NULL;
-
- if (!hcrypt)
- return NULL;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *)ptr;
- if (strcmp(alg->ops->name, name) == 0) {
- found_alg = alg;
- break;
- }
- }
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- if (found_alg)
- return found_alg->ops;
- return NULL;
-}
-
-
-static void *ieee80211_crypt_null_init(int keyidx) { return (void *)1; }
-static void ieee80211_crypt_null_deinit(void *priv) {}
-
-static struct ieee80211_crypto_ops ieee80211_crypt_null = {
- .name = "NULL",
- .init = ieee80211_crypt_null_init,
- .deinit = ieee80211_crypt_null_deinit,
- .encrypt_mpdu = NULL,
- .decrypt_mpdu = NULL,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = NULL,
- .get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
- .owner = THIS_MODULE,
-};
-
-int __init ieee80211_crypto_init(void)
-{
- int ret = -ENOMEM;
-
- hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
- if (!hcrypt)
- goto out;
-
- INIT_LIST_HEAD(&hcrypt->algs);
- spin_lock_init(&hcrypt->lock);
-
- ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
- if (ret < 0) {
- kfree(hcrypt);
- hcrypt = NULL;
- }
-out:
- return ret;
-}
-
-void ieee80211_crypto_deinit(void)
-{
- struct list_head *ptr, *n;
-
- if (!hcrypt)
- return;
-
- for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
- ptr = n, n = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *)ptr;
- list_del(ptr);
- pr_debug("ieee80211_crypt: unregistered algorithm '%s' (deinit)\n",
- alg->ops->name);
- kfree(alg);
- }
-
- kfree(hcrypt);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
deleted file mode 100644
index d3bd5598b25b..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- */
-
-/*
- * This file defines the interface to the ieee80211 crypto module.
- */
-#ifndef IEEE80211_CRYPT_H
-#define IEEE80211_CRYPT_H
-
-#include <linux/skbuff.h>
-
-struct ieee80211_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success
- */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed.
- */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics
- */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned
- */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct ieee80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct ieee80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
-int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force);
-void ieee80211_crypt_deinit_handler(struct timer_list *t);
-void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
- struct ieee80211_crypt_data **crypt);
-
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
deleted file mode 100644
index f17d07dad56d..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ /dev/null
@@ -1,421 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
- *
- * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <linux/string.h>
-#include <linux/wireless.h>
-
-#include "ieee80211.h"
-
-#include <linux/crypto.h>
-#include <crypto/aead.h>
- #include <linux/scatterlist.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: CCMP");
-MODULE_LICENSE("GPL");
-
-#define AES_BLOCK_LEN 16
-#define CCMP_HDR_LEN 8
-#define CCMP_MIC_LEN 8
-#define CCMP_TK_LEN 16
-#define CCMP_PN_LEN 6
-
-struct ieee80211_ccmp_data {
- u8 key[CCMP_TK_LEN];
- int key_set;
-
- u8 tx_pn[CCMP_PN_LEN];
- u8 rx_pn[CCMP_PN_LEN];
-
- u32 dot11RSNAStatsCCMPFormatErrors;
- u32 dot11RSNAStatsCCMPReplays;
- u32 dot11RSNAStatsCCMPDecryptErrors;
-
- int key_idx;
-
- struct crypto_aead *tfm;
-
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_aad[2 * AES_BLOCK_LEN];
- u8 rx_aad[2 * AES_BLOCK_LEN];
-};
-
-static void *ieee80211_ccmp_init(int key_idx)
-{
- struct ieee80211_ccmp_data *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- goto fail;
- priv->key_idx = key_idx;
-
- priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tfm)) {
- pr_debug("ieee80211_crypt_ccmp: could not allocate crypto API aes\n");
- priv->tfm = NULL;
- goto fail;
- }
-
- return priv;
-
-fail:
- if (priv) {
- if (priv->tfm)
- crypto_free_aead(priv->tfm);
- kfree(priv);
- }
-
- return NULL;
-}
-
-static void ieee80211_ccmp_deinit(void *priv)
-{
- struct ieee80211_ccmp_data *_priv = priv;
-
- if (_priv && _priv->tfm)
- crypto_free_aead(_priv->tfm);
- kfree(priv);
-}
-
-static int ccmp_init_iv_and_aad(struct rtl_80211_hdr_4addr *hdr,
- u8 *pn, u8 *iv, u8 *aad)
-{
- u8 *pos, qc = 0;
- size_t aad_len;
- u16 fc;
- int a4_included, qc_included;
-
- fc = le16_to_cpu(hdr->frame_ctl);
- a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
- /* qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
- * (WLAN_FC_GET_STYPE(fc) & 0x08));
- */
- /* fixed by David :2006.9.6 */
- qc_included = (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
- (WLAN_FC_GET_STYPE(fc) & 0x80);
- aad_len = 22;
- if (a4_included)
- aad_len += 6;
- if (qc_included) {
- pos = (u8 *)&hdr->addr4;
- if (a4_included)
- pos += 6;
- qc = *pos & 0x0f;
- aad_len += 2;
- }
-
- /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
- * mode authentication are not allowed to collide, yet both are derived
- * from the same vector. We only set L := 1 here to indicate that the
- * data size can be represented in (L+1) bytes. The CCM layer will take
- * care of storing the data length in the top (L+1) bytes and setting
- * and clearing the other bits as is required to derive the two IVs.
- */
- iv[0] = 0x1;
-
- /* Nonce: QC | A2 | PN */
- iv[1] = qc;
- memcpy(iv + 2, hdr->addr2, ETH_ALEN);
- memcpy(iv + 8, pn, CCMP_PN_LEN);
-
- /* AAD:
- * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
- * A1 | A2 | A3
- * SC with bits 4..15 (seq#) masked to zero
- * A4 (if present)
- * QC (if present)
- */
- pos = (u8 *)hdr;
- aad[0] = pos[0] & 0x8f;
- aad[1] = pos[1] & 0xc7;
- memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
- memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
- memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
- pos = (u8 *)&hdr->seq_ctl;
- aad[20] = pos[0] & 0x0f;
- aad[21] = 0; /* all bits masked */
- memset(aad + 22, 0, 8);
- if (a4_included)
- memcpy(aad + 22, hdr->addr4, ETH_ALEN);
- if (qc_included) {
- aad[a4_included ? 28 : 22] = qc;
- /* rest of QC masked */
- }
-
- return aad_len;
-}
-
-static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_ccmp_data *key = priv;
- int i;
- u8 *pos;
- struct rtl_80211_hdr_4addr *hdr;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-
- if (skb_headroom(skb) < CCMP_HDR_LEN ||
- skb_tailroom(skb) < CCMP_MIC_LEN ||
- skb->len < hdr_len)
- return -1;
-
- pos = skb_push(skb, CCMP_HDR_LEN);
- memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
- pos += hdr_len;
- /* mic = skb_put(skb, CCMP_MIC_LEN); */
-
- i = CCMP_PN_LEN - 1;
- while (i >= 0) {
- key->tx_pn[i]++;
- if (key->tx_pn[i] != 0)
- break;
- i--;
- }
-
- *pos++ = key->tx_pn[5];
- *pos++ = key->tx_pn[4];
- *pos++ = 0;
- *pos++ = (key->key_idx << 6) | BIT(5) /* Ext IV included */;
- *pos++ = key->tx_pn[3];
- *pos++ = key->tx_pn[2];
- *pos++ = key->tx_pn[1];
- *pos++ = key->tx_pn[0];
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- if (!tcb_desc->bHwSec) {
- struct aead_request *req;
- struct scatterlist sg[2];
- u8 *aad = key->tx_aad;
- u8 iv[AES_BLOCK_LEN];
- int aad_len, ret;
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
-
- req = aead_request_alloc(key->tfm, GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
-
- aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
-
- skb_put(skb, CCMP_MIC_LEN);
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], aad, aad_len);
- sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
- data_len + CCMP_MIC_LEN);
-
- aead_request_set_callback(req, 0, NULL, NULL);
- aead_request_set_ad(req, aad_len);
- aead_request_set_crypt(req, sg, sg, data_len, iv);
-
- ret = crypto_aead_encrypt(req);
- aead_request_free(req);
-
- return ret;
- }
- return 0;
-}
-
-static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_ccmp_data *key = priv;
- u8 keyidx, *pos;
- struct rtl_80211_hdr_4addr *hdr;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 pn[6];
-
- if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -1;
- }
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & BIT(5))) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "CCMP: received packet without ExtIV flag from %pM\n",
- hdr->addr2);
- }
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -2;
- }
- keyidx >>= 6;
- if (key->key_idx != keyidx) {
- netdev_dbg(skb->dev, "CCMP: RX tkey->key_idx=%d frame keyidx=%d priv=%p\n",
- key->key_idx, keyidx, priv);
- return -6;
- }
- if (!key->key_set) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
- hdr->addr2, keyidx);
- }
- return -3;
- }
-
- pn[0] = pos[7];
- pn[1] = pos[6];
- pn[2] = pos[5];
- pn[3] = pos[4];
- pn[4] = pos[1];
- pn[5] = pos[0];
- pos += 8;
-
- if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "CCMP: replay detected: STA=%pM previous PN %pm received PN %pm\n",
- hdr->addr2, key->rx_pn, pn);
- }
- key->dot11RSNAStatsCCMPReplays++;
- return -4;
- }
- if (!tcb_desc->bHwSec) {
- struct aead_request *req;
- struct scatterlist sg[2];
- u8 *aad = key->rx_aad;
- u8 iv[AES_BLOCK_LEN];
- int aad_len, ret;
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
-
- req = aead_request_alloc(key->tfm, GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
-
- aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], aad, aad_len);
- sg_set_buf(&sg[1], pos, data_len);
-
- aead_request_set_callback(req, 0, NULL, NULL);
- aead_request_set_ad(req, aad_len);
- aead_request_set_crypt(req, sg, sg, data_len, iv);
-
- ret = crypto_aead_decrypt(req);
- aead_request_free(req);
-
- if (ret) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "CCMP: decrypt failed: STA=%pM\n",
- hdr->addr2);
- }
- key->dot11RSNAStatsCCMPDecryptErrors++;
- return -5;
- }
-
- memcpy(key->rx_pn, pn, CCMP_PN_LEN);
- }
- /* Remove hdr and MIC */
- memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
- skb_pull(skb, CCMP_HDR_LEN);
- skb_trim(skb, skb->len - CCMP_MIC_LEN);
-
- return keyidx;
-}
-
-static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_ccmp_data *data = priv;
- int keyidx;
- struct crypto_aead *tfm = data->tfm;
-
- keyidx = data->key_idx;
- memset(data, 0, sizeof(*data));
- data->key_idx = keyidx;
- if (len == CCMP_TK_LEN) {
- memcpy(data->key, key, CCMP_TK_LEN);
- data->key_set = 1;
- if (seq) {
- data->rx_pn[0] = seq[5];
- data->rx_pn[1] = seq[4];
- data->rx_pn[2] = seq[3];
- data->rx_pn[3] = seq[2];
- data->rx_pn[4] = seq[1];
- data->rx_pn[5] = seq[0];
- }
- if (crypto_aead_setauthsize(tfm, CCMP_MIC_LEN) ||
- crypto_aead_setkey(tfm, data->key, CCMP_TK_LEN))
- return -1;
- } else if (len == 0) {
- data->key_set = 0;
- } else {
- return -1;
- }
-
- return 0;
-}
-
-static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_ccmp_data *data = priv;
-
- if (len < CCMP_TK_LEN)
- return 0;
-
- if (!data->key_set)
- return 0;
- memcpy(key, data->key, CCMP_TK_LEN);
-
- if (seq) {
- seq[0] = data->tx_pn[5];
- seq[1] = data->tx_pn[4];
- seq[2] = data->tx_pn[3];
- seq[3] = data->tx_pn[2];
- seq[4] = data->tx_pn[1];
- seq[5] = data->tx_pn[0];
- }
-
- return CCMP_TK_LEN;
-}
-
-static char *ieee80211_ccmp_print_stats(char *p, void *priv)
-{
- struct ieee80211_ccmp_data *ccmp = priv;
-
- p += sprintf(p, "key[%d] alg=CCMP key_set=%d tx_pn=%pm rx_pn=%pm format_errors=%d replays=%d decrypt_errors=%d\n",
- ccmp->key_idx, ccmp->key_set,
- ccmp->tx_pn, ccmp->rx_pn,
- ccmp->dot11RSNAStatsCCMPFormatErrors,
- ccmp->dot11RSNAStatsCCMPReplays,
- ccmp->dot11RSNAStatsCCMPDecryptErrors);
-
- return p;
-}
-
-static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
- .name = "CCMP",
- .init = ieee80211_ccmp_init,
- .deinit = ieee80211_ccmp_deinit,
- .encrypt_mpdu = ieee80211_ccmp_encrypt,
- .decrypt_mpdu = ieee80211_ccmp_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = ieee80211_ccmp_set_key,
- .get_key = ieee80211_ccmp_get_key,
- .print_stats = ieee80211_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
- .owner = THIS_MODULE,
-};
-
-int __init ieee80211_crypto_ccmp_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
-}
-
-void ieee80211_crypto_ccmp_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
deleted file mode 100644
index 9bfd24ad46b6..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ /dev/null
@@ -1,718 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
- *
- * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- */
-
-#include <linux/fips.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <linux/string.h>
-
-#include "ieee80211.h"
-
-#include <crypto/arc4.h>
-#include <crypto/hash.h>
-#include <linux/crc32.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: TKIP");
-MODULE_LICENSE("GPL");
-
-struct ieee80211_tkip_data {
-#define TKIP_KEY_LEN 32
- u8 key[TKIP_KEY_LEN];
- int key_set;
-
- u32 tx_iv32;
- u16 tx_iv16;
- u16 tx_ttak[5];
- int tx_phase1_done;
-
- u32 rx_iv32;
- u16 rx_iv16;
- u16 rx_ttak[5];
- int rx_phase1_done;
- u32 rx_iv32_new;
- u16 rx_iv16_new;
-
- u32 dot11RSNAStatsTKIPReplays;
- u32 dot11RSNAStatsTKIPICVErrors;
- u32 dot11RSNAStatsTKIPLocalMICFailures;
-
- int key_idx;
-
- struct arc4_ctx rx_ctx_arc4;
- struct arc4_ctx tx_ctx_arc4;
- struct crypto_shash *rx_tfm_michael;
- struct crypto_shash *tx_tfm_michael;
-
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 rx_hdr[16], tx_hdr[16];
-};
-
-static void *ieee80211_tkip_init(int key_idx)
-{
- struct ieee80211_tkip_data *priv;
-
- if (fips_enabled)
- return NULL;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- goto fail;
- priv->key_idx = key_idx;
-
- priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->tx_tfm_michael)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- priv->tx_tfm_michael = NULL;
- goto fail;
- }
-
- priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->rx_tfm_michael)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- priv->rx_tfm_michael = NULL;
- goto fail;
- }
-
- return priv;
-
-fail:
- if (priv) {
- crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_shash(priv->rx_tfm_michael);
- kfree(priv);
- }
-
- return NULL;
-}
-
-
-static void ieee80211_tkip_deinit(void *priv)
-{
- struct ieee80211_tkip_data *_priv = priv;
-
- if (_priv) {
- crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_shash(_priv->rx_tfm_michael);
- }
- kfree_sensitive(priv);
-}
-
-
-static inline u16 RotR1(u16 val)
-{
- return (val >> 1) | (val << 15);
-}
-
-
-static inline u8 Lo8(u16 val)
-{
- return val & 0xff;
-}
-
-
-static inline u8 Hi8(u16 val)
-{
- return val >> 8;
-}
-
-
-static inline u16 Lo16(u32 val)
-{
- return val & 0xffff;
-}
-
-
-static inline u16 Hi16(u32 val)
-{
- return val >> 16;
-}
-
-
-static inline u16 Mk16(u8 hi, u8 lo)
-{
- return lo | (((u16)hi) << 8);
-}
-
-static const u16 Sbox[256] = {
- 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
- 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
- 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
- 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
- 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
- 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
- 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
- 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
- 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
- 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
- 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
- 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
- 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
- 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
- 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
- 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
- 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
- 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
- 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
- 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
- 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
- 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
- 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
- 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
- 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
- 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
- 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
- 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
- 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
- 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
- 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
- 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
-};
-
-
-static inline u16 _S_(u16 v)
-{
- u16 t = Sbox[Hi8(v)];
- return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
-}
-
-
-#define PHASE1_LOOP_COUNT 8
-
-
-static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
-{
- int i, j;
-
- /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
- TTAK[0] = Lo16(IV32);
- TTAK[1] = Hi16(IV32);
- TTAK[2] = Mk16(TA[1], TA[0]);
- TTAK[3] = Mk16(TA[3], TA[2]);
- TTAK[4] = Mk16(TA[5], TA[4]);
-
- for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
- j = 2 * (i & 1);
- TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
- TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
- TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
- TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
- TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
- }
-}
-
-
-static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
- u16 IV16)
-{
- /*
- * Make temporary area overlap WEP seed so that the final copy can be
- * avoided on little endian hosts.
- */
- u16 *PPK = (u16 *)&WEPSeed[4];
-
- /* Step 1 - make copy of TTAK and bring in TSC */
- PPK[0] = TTAK[0];
- PPK[1] = TTAK[1];
- PPK[2] = TTAK[2];
- PPK[3] = TTAK[3];
- PPK[4] = TTAK[4];
- PPK[5] = TTAK[4] + IV16;
-
- /* Step 2 - 96-bit bijective mixing using S-box */
- PPK[0] += _S_(PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[0])));
- PPK[1] += _S_(PPK[0] ^ le16_to_cpu(*(__le16 *)(&TK[2])));
- PPK[2] += _S_(PPK[1] ^ le16_to_cpu(*(__le16 *)(&TK[4])));
- PPK[3] += _S_(PPK[2] ^ le16_to_cpu(*(__le16 *)(&TK[6])));
- PPK[4] += _S_(PPK[3] ^ le16_to_cpu(*(__le16 *)(&TK[8])));
- PPK[5] += _S_(PPK[4] ^ le16_to_cpu(*(__le16 *)(&TK[10])));
-
- PPK[0] += RotR1(PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[12])));
- PPK[1] += RotR1(PPK[0] ^ le16_to_cpu(*(__le16 *)(&TK[14])));
- PPK[2] += RotR1(PPK[1]);
- PPK[3] += RotR1(PPK[2]);
- PPK[4] += RotR1(PPK[3]);
- PPK[5] += RotR1(PPK[4]);
-
- /*
- * Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
- * WEPSeed[0..2] is transmitted as WEP IV
- */
- WEPSeed[0] = Hi8(IV16);
- WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
- WEPSeed[2] = Lo8(IV16);
- WEPSeed[3] = Lo8((PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[0]))) >> 1);
-
-#ifdef __BIG_ENDIAN
- {
- int i;
-
- for (i = 0; i < 6; i++)
- PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
- }
-#endif
-}
-
-
-static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- int len;
- u8 *pos;
- struct rtl_80211_hdr_4addr *hdr;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 rc4key[16], *icv;
- u32 crc;
-
- if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
- return -1;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
-
- if (!tcb_desc->bHwSec) {
- if (!tkey->tx_phase1_done) {
- tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
- tkey->tx_iv32);
- tkey->tx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
- } else
- tkey->tx_phase1_done = 1;
-
-
- len = skb->len - hdr_len;
- pos = skb_push(skb, 8);
- memmove(pos, pos + 8, hdr_len);
- pos += hdr_len;
-
- if (tcb_desc->bHwSec) {
- *pos++ = Hi8(tkey->tx_iv16);
- *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F;
- *pos++ = Lo8(tkey->tx_iv16);
- } else {
- *pos++ = rc4key[0];
- *pos++ = rc4key[1];
- *pos++ = rc4key[2];
- }
-
- *pos++ = (tkey->key_idx << 6) | BIT(5) /* Ext IV included */;
- *pos++ = tkey->tx_iv32 & 0xff;
- *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
-
- if (!tcb_desc->bHwSec) {
- icv = skb_put(skb, 4);
- crc = ~crc32_le(~0, pos, len);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
- arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
- }
-
- tkey->tx_iv16++;
- if (tkey->tx_iv16 == 0) {
- tkey->tx_phase1_done = 0;
- tkey->tx_iv32++;
- }
-
- return 0;
-}
-
-static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 keyidx, *pos;
- u32 iv32;
- u16 iv16;
- struct rtl_80211_hdr_4addr *hdr;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 rc4key[16];
- u8 icv[4];
- u32 crc;
- int plen;
-
- if (skb->len < hdr_len + 8 + 4)
- return -1;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & BIT(5))) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "TKIP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
- return -2;
- }
- keyidx >>= 6;
- if (tkey->key_idx != keyidx) {
- netdev_dbg(skb->dev, "TKIP: RX tkey->key_idx=%d frame "
- "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
- return -6;
- }
- if (!tkey->key_set) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "TKIP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
- return -3;
- }
- iv16 = (pos[0] << 8) | pos[2];
- iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
- pos += 8;
-
- if (!tcb_desc->bHwSec) {
- if (iv32 < tkey->rx_iv32 ||
- (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "TKIP: replay detected: STA=%pM"
- " previous TSC %08x%04x received TSC "
- "%08x%04x\n", hdr->addr2,
- tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
- }
- tkey->dot11RSNAStatsTKIPReplays++;
- return -4;
- }
-
- if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
- tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
- tkey->rx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
-
- plen = skb->len - hdr_len - 12;
-
- arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
- arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
-
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- if (memcmp(icv, pos + plen, 4) != 0) {
- if (iv32 != tkey->rx_iv32) {
- /*
- * Previously cached Phase1 result was already
- * lost, so it needs to be recalculated for the
- * next packet.
- */
- tkey->rx_phase1_done = 0;
- }
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "TKIP: ICV error detected: STA="
- "%pM\n", hdr->addr2);
- }
- tkey->dot11RSNAStatsTKIPICVErrors++;
- return -5;
- }
-
- }
-
- /*
- * Update real counters only after Michael MIC verification has
- * completed.
- */
- tkey->rx_iv32_new = iv32;
- tkey->rx_iv16_new = iv16;
-
- /* Remove IV and ICV */
- memmove(skb->data + 8, skb->data, hdr_len);
- skb_pull(skb, 8);
- skb_trim(skb, skb->len - 4);
-
- return keyidx;
-}
-
-static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
- u8 *data, size_t data_len, u8 *mic)
-{
- SHASH_DESC_ON_STACK(desc, tfm_michael);
- int err;
-
- desc->tfm = tfm_michael;
-
- if (crypto_shash_setkey(tfm_michael, key, 8))
- return -1;
-
- err = crypto_shash_init(desc);
- if (err)
- goto out;
- err = crypto_shash_update(desc, hdr, 16);
- if (err)
- goto out;
- err = crypto_shash_update(desc, data, data_len);
- if (err)
- goto out;
- err = crypto_shash_final(desc, mic);
-
-out:
- shash_desc_zero(desc);
- return err;
-}
-
-static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
-{
- struct rtl_80211_hdr_4addr *hdr11;
-
- hdr11 = (struct rtl_80211_hdr_4addr *)skb->data;
- switch (le16_to_cpu(hdr11->frame_ctl) &
- (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
- break;
- default:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- }
-
- hdr[12] = 0; /* priority */
-
- hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
-}
-
-
-static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 *pos;
- struct rtl_80211_hdr_4addr *hdr;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
-
- if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
- netdev_dbg(skb->dev, "Invalid packet for Michael MIC add "
- "(tailroom=%d hdr_len=%d skb->len=%d)\n",
- skb_tailroom(skb), hdr_len, skb->len);
- return -1;
- }
-
- michael_mic_hdr(skb, tkey->tx_hdr);
-
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
- if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
- tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- // }
- pos = skb_put(skb, 8);
-
- if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
- return -1;
-
- return 0;
-}
-
-static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct rtl_80211_hdr_4addr *hdr,
- int keyidx)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
-
- /* TODO: needed parameters: count, keyid, key type, TSC */
- memset(&ev, 0, sizeof(ev));
- ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
- if (hdr->addr1[0] & 0x01)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
-}
-
-static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 mic[8];
- struct rtl_80211_hdr_4addr *hdr;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
-
- if (!tkey->key_set)
- return -1;
-
- michael_mic_hdr(skb, tkey->rx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
- if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
- tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- // }
-
- if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
- return -1;
- if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
- struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
-
- netdev_dbg(skb->dev, "Michael MIC verification failed for "
- "MSDU from %pM keyidx=%d\n",
- hdr->addr2, keyidx);
- if (skb->dev)
- ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
- tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- return -1;
- }
-
- /*
- * Update TSC counters for RX now that the packet verification has
- * completed.
- */
- tkey->rx_iv32 = tkey->rx_iv32_new;
- tkey->rx_iv16 = tkey->rx_iv16_new;
-
- skb_trim(skb, skb->len - 8);
-
- return 0;
-}
-
-
-static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- int keyidx;
- struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
-
- keyidx = tkey->key_idx;
- memset(tkey, 0, sizeof(*tkey));
- tkey->key_idx = keyidx;
- tkey->tx_tfm_michael = tfm;
- tkey->rx_tfm_michael = tfm3;
-
- if (len == TKIP_KEY_LEN) {
- memcpy(tkey->key, key, TKIP_KEY_LEN);
- tkey->key_set = 1;
- tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
- if (seq) {
- tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
- (seq[3] << 8) | seq[2];
- tkey->rx_iv16 = (seq[1] << 8) | seq[0];
- }
- } else if (len == 0)
- tkey->key_set = 0;
- else
- return -1;
-
- return 0;
-}
-
-
-static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
-
- if (len < TKIP_KEY_LEN)
- return 0;
-
- if (!tkey->key_set)
- return 0;
- memcpy(key, tkey->key, TKIP_KEY_LEN);
-
- if (seq) {
- /* Return the sequence number of the last transmitted frame. */
- u16 iv16 = tkey->tx_iv16;
- u32 iv32 = tkey->tx_iv32;
-
- if (iv16 == 0)
- iv32--;
- iv16--;
- seq[0] = tkey->tx_iv16;
- seq[1] = tkey->tx_iv16 >> 8;
- seq[2] = tkey->tx_iv32;
- seq[3] = tkey->tx_iv32 >> 8;
- seq[4] = tkey->tx_iv32 >> 16;
- seq[5] = tkey->tx_iv32 >> 24;
- }
-
- return TKIP_KEY_LEN;
-}
-
-
-static char *ieee80211_tkip_print_stats(char *p, void *priv)
-{
- struct ieee80211_tkip_data *tkip = priv;
-
- p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
- "tx_pn=%02x%02x%02x%02x%02x%02x "
- "rx_pn=%02x%02x%02x%02x%02x%02x "
- "replays=%d icv_errors=%d local_mic_failures=%d\n",
- tkip->key_idx, tkip->key_set,
- (tkip->tx_iv32 >> 24) & 0xff,
- (tkip->tx_iv32 >> 16) & 0xff,
- (tkip->tx_iv32 >> 8) & 0xff,
- tkip->tx_iv32 & 0xff,
- (tkip->tx_iv16 >> 8) & 0xff,
- tkip->tx_iv16 & 0xff,
- (tkip->rx_iv32 >> 24) & 0xff,
- (tkip->rx_iv32 >> 16) & 0xff,
- (tkip->rx_iv32 >> 8) & 0xff,
- tkip->rx_iv32 & 0xff,
- (tkip->rx_iv16 >> 8) & 0xff,
- tkip->rx_iv16 & 0xff,
- tkip->dot11RSNAStatsTKIPReplays,
- tkip->dot11RSNAStatsTKIPICVErrors,
- tkip->dot11RSNAStatsTKIPLocalMICFailures);
- return p;
-}
-
-
-static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
- .name = "TKIP",
- .init = ieee80211_tkip_init,
- .deinit = ieee80211_tkip_deinit,
- .encrypt_mpdu = ieee80211_tkip_encrypt,
- .decrypt_mpdu = ieee80211_tkip_decrypt,
- .encrypt_msdu = ieee80211_michael_mic_add,
- .decrypt_msdu = ieee80211_michael_mic_verify,
- .set_key = ieee80211_tkip_set_key,
- .get_key = ieee80211_tkip_get_key,
- .print_stats = ieee80211_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
- .owner = THIS_MODULE,
-};
-
-int __init ieee80211_crypto_tkip_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
-}
-
-void ieee80211_crypto_tkip_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
deleted file mode 100644
index a2cdf3bfd1a4..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Host AP crypt: host-based WEP encryption implementation for Host AP driver
- *
- * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- */
-
-#include <linux/fips.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
-
-#include "ieee80211.h"
-
-#include <crypto/arc4.h>
-#include <linux/crc32.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: WEP");
-MODULE_LICENSE("GPL");
-
-struct prism2_wep_data {
- u32 iv;
-#define WEP_KEY_LEN 13
- u8 key[WEP_KEY_LEN + 1];
- u8 key_len;
- u8 key_idx;
- struct arc4_ctx rx_ctx_arc4;
- struct arc4_ctx tx_ctx_arc4;
-};
-
-
-static void *prism2_wep_init(int keyidx)
-{
- struct prism2_wep_data *priv;
-
- if (fips_enabled)
- return NULL;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return NULL;
- priv->key_idx = keyidx;
-
- /* start WEP IV from a random value */
- get_random_bytes(&priv->iv, 4);
-
- return priv;
-}
-
-
-static void prism2_wep_deinit(void *priv)
-{
- kfree_sensitive(priv);
-}
-
-/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
- * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
- * so the payload length increases with 8 bytes.
- *
- * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
- */
-static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct prism2_wep_data *wep = priv;
- u32 klen, len;
- u8 key[WEP_KEY_LEN + 3];
- u8 *pos;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u32 crc;
- u8 *icv;
-
- if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
- return -1;
-
- len = skb->len - hdr_len;
- pos = skb_push(skb, 4);
- memmove(pos, pos + 4, hdr_len);
- pos += hdr_len;
-
- klen = 3 + wep->key_len;
-
- wep->iv++;
-
- /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
- * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
- * can be used to speedup attacks, so avoid using them.
- */
- if ((wep->iv & 0xff00) == 0xff00) {
- u8 B = (wep->iv >> 16) & 0xff;
-
- if (B >= 3 && B < klen)
- wep->iv += 0x0100;
- }
-
- /* Prepend 24-bit IV to RC4 key and TX frame */
- *pos++ = key[0] = (wep->iv >> 16) & 0xff;
- *pos++ = key[1] = (wep->iv >> 8) & 0xff;
- *pos++ = key[2] = wep->iv & 0xff;
- *pos++ = wep->key_idx << 6;
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- if (!tcb_desc->bHwSec) {
- /* Append little-endian CRC32 and encrypt it to produce ICV */
- crc = ~crc32_le(~0, pos, len);
- icv = skb_put(skb, 4);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- arc4_setkey(&wep->tx_ctx_arc4, key, klen);
- arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4);
- }
-
- return 0;
-}
-
-
-/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
- * the frame: IV (4 bytes), encrypted payload (including SNAP header),
- * ICV (4 bytes). len includes both IV and ICV.
- *
- * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
- * failure. If frame is OK, IV and ICV will be removed.
- */
-static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct prism2_wep_data *wep = priv;
- u32 klen, plen;
- u8 key[WEP_KEY_LEN + 3];
- u8 keyidx, *pos;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u32 crc;
- u8 icv[4];
-
- if (skb->len < hdr_len + 8)
- return -1;
-
- pos = skb->data + hdr_len;
- key[0] = *pos++;
- key[1] = *pos++;
- key[2] = *pos++;
- keyidx = *pos++ >> 6;
- if (keyidx != wep->key_idx)
- return -1;
-
- klen = 3 + wep->key_len;
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- /* Apply RC4 to data and compute CRC32 over decrypted data */
- plen = skb->len - hdr_len - 8;
-
- if (!tcb_desc->bHwSec) {
- arc4_setkey(&wep->rx_ctx_arc4, key, klen);
- arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4);
-
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
- if (memcmp(icv, pos + plen, 4) != 0) {
- /* ICV mismatch - drop frame */
- return -2;
- }
- }
- /* Remove IV and ICV */
- memmove(skb->data + 4, skb->data, hdr_len);
- skb_pull(skb, 4);
- skb_trim(skb, skb->len - 4);
-
- return 0;
-}
-
-
-static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct prism2_wep_data *wep = priv;
-
- if (len < 0 || len > WEP_KEY_LEN)
- return -1;
-
- memcpy(wep->key, key, len);
- wep->key_len = len;
-
- return 0;
-}
-
-
-static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct prism2_wep_data *wep = priv;
-
- if (len < wep->key_len)
- return 0;
-
- memcpy(key, wep->key, wep->key_len);
-
- return wep->key_len;
-}
-
-
-static char *prism2_wep_print_stats(char *p, void *priv)
-{
- struct prism2_wep_data *wep = priv;
-
- p += sprintf(p, "key[%d] alg=WEP len=%d\n",
- wep->key_idx, wep->key_len);
- return p;
-}
-
-
-static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
- .name = "WEP",
- .init = prism2_wep_init,
- .deinit = prism2_wep_deinit,
- .encrypt_mpdu = prism2_wep_encrypt,
- .decrypt_mpdu = prism2_wep_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = prism2_wep_set_key,
- .get_key = prism2_wep_get_key,
- .print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
- .owner = THIS_MODULE,
-};
-
-int __init ieee80211_crypto_wep_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
-}
-
-void ieee80211_crypto_wep_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
-}
-
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
deleted file mode 100644
index 3f93939bc4ee..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Copyright(c) 2004 Intel Corporation. All rights reserved.
- *
- * Portions of this file are based on the WEP enablement code provided by the
- * Host AP project hostap-drivers v0.1.3
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Contact Information:
- * James P. Ketrenos <ipw2100-admin@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <linux/uaccess.h>
-#include <net/arp.h>
-
-#include "ieee80211.h"
-
-MODULE_DESCRIPTION("802.11 data/management/control stack");
-MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
-MODULE_LICENSE("GPL");
-
-#define DRV_NAME "ieee80211"
-
-static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
-{
- if (ieee->networks)
- return 0;
-
- ieee->networks = kcalloc(MAX_NETWORK_COUNT,
- sizeof(struct ieee80211_network),
- GFP_KERNEL);
- if (!ieee->networks) {
- netdev_warn(ieee->dev, "Out of memory allocating beacons\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
-{
- if (!ieee->networks)
- return;
- kfree(ieee->networks);
- ieee->networks = NULL;
-}
-
-static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
-{
- int i;
-
- INIT_LIST_HEAD(&ieee->network_free_list);
- INIT_LIST_HEAD(&ieee->network_list);
- for (i = 0; i < MAX_NETWORK_COUNT; i++)
- list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
-}
-
-struct net_device *alloc_ieee80211(int sizeof_priv)
-{
- struct ieee80211_device *ieee;
- struct net_device *dev;
- int i, err;
-
- IEEE80211_DEBUG_INFO("Initializing...\n");
-
- dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
- if (!dev) {
- IEEE80211_ERROR("Unable to network device.\n");
- goto failed;
- }
-
- ieee = netdev_priv(dev);
- ieee->dev = dev;
-
- err = ieee80211_networks_allocate(ieee);
- if (err) {
- IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
- err);
- goto failed;
- }
- ieee80211_networks_initialize(ieee);
-
- /* Default fragmentation threshold is maximum payload size */
- ieee->fts = DEFAULT_FTS;
- ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
- ieee->open_wep = 1;
-
- /* Default to enabling full open WEP with host based encrypt/decrypt */
- ieee->host_encrypt = 1;
- ieee->host_decrypt = 1;
- ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
-
- INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- timer_setup(&ieee->crypt_deinit_timer, ieee80211_crypt_deinit_handler,
- 0);
-
- spin_lock_init(&ieee->lock);
- spin_lock_init(&ieee->wpax_suitlist_lock);
- spin_lock_init(&ieee->bw_spinlock);
- spin_lock_init(&ieee->reorder_spinlock);
- /* added by WB */
- atomic_set(&ieee->atm_chnlop, 0);
- atomic_set(&ieee->atm_swbw, 0);
-
- ieee->wpax_type_set = 0;
- ieee->wpa_enabled = 0;
- ieee->tkip_countermeasures = 0;
- ieee->drop_unencrypted = 0;
- ieee->privacy_invoked = 0;
- ieee->ieee802_1x = 1;
- ieee->raw_tx = 0;
- //ieee->hwsec_support = 1; //defalt support hw security. //use module_param instead.
- ieee->hwsec_active = 0; /* disable hwsec, switch it on when necessary. */
-
- ieee80211_softmac_init(ieee);
-
- ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
- if (!ieee->pHTInfo) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
-
- /* By this point in code ieee80211_networks_allocate() has been
- * successfully called so the memory allocated should be freed
- */
- ieee80211_networks_free(ieee);
- goto failed;
- }
- HTUpdateDefaultSetting(ieee);
- HTInitializeHTInfo(ieee); /* may move to other place. */
- TSInitialize(ieee);
-
- for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
- INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
-
- for (i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
- }
-
- return dev;
-
- failed:
- if (dev)
- free_netdev(dev);
-
- return NULL;
-}
-
-void free_ieee80211(struct net_device *dev)
-{
- struct ieee80211_device *ieee = netdev_priv(dev);
- int i;
- /* struct list_head *p, *q; */
-// del_timer_sync(&ieee->SwBwTimer);
- kfree(ieee->pHTInfo);
- ieee->pHTInfo = NULL;
- RemoveAllTS(ieee);
- ieee80211_softmac_free(ieee);
- del_timer_sync(&ieee->crypt_deinit_timer);
- ieee80211_crypt_deinit_entries(ieee, 1);
-
- for (i = 0; i < WEP_KEYS; i++) {
- struct ieee80211_crypt_data *crypt = ieee->crypt[i];
-
- if (crypt) {
- if (crypt->ops)
- crypt->ops->deinit(crypt->priv);
- kfree(crypt);
- ieee->crypt[i] = NULL;
- }
- }
-
- ieee80211_networks_free(ieee);
- free_netdev(dev);
-}
-
-#ifdef CONFIG_IEEE80211_DEBUG
-
-u32 ieee80211_debug_level;
-static int debug = // IEEE80211_DL_INFO |
- // IEEE80211_DL_WX |
- // IEEE80211_DL_SCAN |
- // IEEE80211_DL_STATE |
- // IEEE80211_DL_MGMT |
- // IEEE80211_DL_FRAG |
- // IEEE80211_DL_EAP |
- // IEEE80211_DL_DROP |
- // IEEE80211_DL_TX |
- // IEEE80211_DL_RX |
- //IEEE80211_DL_QOS |
- // IEEE80211_DL_HT |
- // IEEE80211_DL_TS |
-// IEEE80211_DL_BA |
- // IEEE80211_DL_REORDER|
-// IEEE80211_DL_TRACE |
- //IEEE80211_DL_DATA |
- IEEE80211_DL_ERR /* awayls open this flags to show error out */
- ;
-static struct proc_dir_entry *ieee80211_proc;
-
-static int show_debug_level(struct seq_file *m, void *v)
-{
- seq_printf(m, "0x%08X\n", ieee80211_debug_level);
-
- return 0;
-}
-
-static ssize_t write_debug_level(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- unsigned long val;
- int err = kstrtoul_from_user(buffer, count, 0, &val);
-
- if (err)
- return err;
- ieee80211_debug_level = val;
- return count;
-}
-
-static int open_debug_level(struct inode *inode, struct file *file)
-{
- return single_open(file, show_debug_level, NULL);
-}
-
-static const struct proc_ops debug_level_proc_ops = {
- .proc_open = open_debug_level,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_write = write_debug_level,
- .proc_release = single_release,
-};
-
-int __init ieee80211_debug_init(void)
-{
- struct proc_dir_entry *e;
-
- ieee80211_debug_level = debug;
-
- ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
- if (!ieee80211_proc) {
- IEEE80211_ERROR("Unable to create " DRV_NAME
- " proc directory\n");
- return -EIO;
- }
- e = proc_create("debug_level", 0644, ieee80211_proc, &debug_level_proc_ops);
- if (!e) {
- remove_proc_entry(DRV_NAME, init_net.proc_net);
- ieee80211_proc = NULL;
- return -EIO;
- }
- return 0;
-}
-
-void ieee80211_debug_exit(void)
-{
- if (ieee80211_proc) {
- remove_proc_entry("debug_level", ieee80211_proc);
- remove_proc_entry(DRV_NAME, init_net.proc_net);
- ieee80211_proc = NULL;
- }
-}
-
-module_param(debug, int, 0444);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
deleted file mode 100644
index 5da8ac401df0..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ /dev/null
@@ -1,2430 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Original code based Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3 - hostap.o module, common routines
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Copyright (c) 2004, Intel Corporation
- ******************************************************************************
-
- Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andrea.merello@gmail.com>
-
- A special thanks goes to Realtek for their support !
-
-******************************************************************************/
-
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <linux/uaccess.h>
-#include <linux/ctype.h>
-
-#include "ieee80211.h"
-#include "dot11d.h"
-static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
- struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
-{
- struct rtl_80211_hdr_4addr *hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
-
- skb->dev = ieee->dev;
- skb_reset_mac_header(skb);
-
- skb_pull(skb, ieee80211_get_hdrlen(fc));
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_80211_RAW);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static struct ieee80211_frag_entry *
-ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
- unsigned int frag, u8 tid, u8 *src, u8 *dst)
-{
- struct ieee80211_frag_entry *entry;
- int i;
-
- for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
- entry = &ieee->frag_cache[tid][i];
- if (entry->skb &&
- time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
- IEEE80211_DEBUG_FRAG(
- "expiring fragment cache entry "
- "seq=%u last_frag=%u\n",
- entry->seq, entry->last_frag);
- dev_kfree_skb_any(entry->skb);
- entry->skb = NULL;
- }
-
- if (entry->skb && entry->seq == seq &&
- (entry->last_frag + 1 == frag || frag == -1) &&
- memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
- memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
- return entry;
- }
-
- return NULL;
-}
-
-/* Called only as a tasklet (software IRQ) */
-static struct sk_buff *
-ieee80211_frag_cache_get(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *hdr)
-{
- struct sk_buff *skb = NULL;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
- unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
- unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
- struct ieee80211_frag_entry *entry;
- struct rtl_80211_hdr_3addrqos *hdr_3addrqos;
- struct rtl_80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
- if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else {
- tid = 0;
- }
-
- if (frag == 0) {
- /* Reserve enough space to fit maximum frame length */
- skb = dev_alloc_skb(ieee->dev->mtu +
- sizeof(struct rtl_80211_hdr_4addr) +
- 8 /* LLC */ +
- 2 /* alignment */ +
- 8 /* WEP */ +
- ETH_ALEN /* WDS */ +
- (IEEE80211_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */);
- if (!skb)
- return NULL;
-
- entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
- ieee->frag_next_idx[tid]++;
- if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN)
- ieee->frag_next_idx[tid] = 0;
-
- if (entry->skb)
- dev_kfree_skb_any(entry->skb);
-
- entry->first_frag_time = jiffies;
- entry->seq = seq;
- entry->last_frag = frag;
- entry->skb = skb;
- memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
- memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
- } else {
- /* received a fragment of a frame for which the head fragment
- * should have already been received */
- entry = ieee80211_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
- hdr->addr1);
- if (entry) {
- entry->last_frag = frag;
- skb = entry->skb;
- }
- }
-
- return skb;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *hdr)
-{
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
- unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
- struct ieee80211_frag_entry *entry;
- struct rtl_80211_hdr_3addrqos *hdr_3addrqos;
- struct rtl_80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
- if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else {
- tid = 0;
- }
-
- entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
- hdr->addr1);
-
- if (!entry) {
- IEEE80211_DEBUG_FRAG(
- "could not invalidate fragment cache "
- "entry (seq=%u)\n", seq);
- return -1;
- }
-
- entry->skb = NULL;
- return 0;
-}
-
-
-
-/* ieee80211_rx_frame_mgtmt
- *
- * Responsible for handling management control frames
- *
- * Called by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
-{
- /* On the struct stats definition there is written that
- * this is not mandatory.... but seems that the probe
- * response parser uses it
- */
- struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
-
- rx_stats->len = skb->len;
- ieee80211_rx_mgt(ieee, (struct rtl_80211_hdr_4addr *)skb->data, rx_stats);
- /* if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) */
- if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) {
- /* use ADDR1 to perform address matching for Management frames */
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
-
- dev_kfree_skb_any(skb);
-
- return 0;
-
- #ifdef NOT_YET
- if (ieee->iw_mode == IW_MODE_MASTER) {
- netdev_dbg(ieee->dev, "Master mode not yet supported.\n");
- return 0;
-/*
- hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
- skb->data);*/
- }
-
- if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) {
- if (stype == WLAN_FC_STYPE_BEACON &&
- ieee->iw_mode == IW_MODE_MASTER) {
- struct sk_buff *skb2;
- /* Process beacon frames also in kernel driver to
- * update STA(AP) table statistics */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2)
- hostap_rx(skb2->dev, skb2, rx_stats);
- }
-
- /* send management frames to the user space daemon for
- * processing */
- ieee->apdevstats.rx_packets++;
- ieee->apdevstats.rx_bytes += skb->len;
- prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
- return 0;
- }
-
- if (ieee->iw_mode == IW_MODE_MASTER) {
- if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
- netdev_dbg(skb->dev, "unknown management frame "
- "(type=0x%02x, stype=0x%02x) dropped\n",
- type, stype);
- return -1;
- }
-
- hostap_rx(skb->dev, skb, rx_stats);
- return 0;
- }
-
- netdev_dbg(skb->dev, "hostap_rx_frame_mgmt: management frame "
- "received in non-Host AP mode\n");
- return -1;
- #endif
-}
-
-
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-/* Called by ieee80211_rx_frame_decrypt */
-static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
- struct sk_buff *skb, size_t hdrlen)
-{
- struct net_device *dev = ieee->dev;
- u16 fc, ethertype;
- struct rtl_80211_hdr_4addr *hdr;
- u8 *pos;
-
- if (skb->len < 24)
- return 0;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
-
- /* check that the frame is unicast frame to us */
- if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
- /* ToDS frame with own addr BSSID and DA */
- } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
- /* FromDS frame with own addr as DA */
- } else
- return 0;
-
- if (skb->len < 24 + 8)
- return 0;
-
- /* check for port access entity Ethernet type */
-// pos = skb->data + 24;
- pos = skb->data + hdrlen;
- ethertype = (pos[6] << 8) | pos[7];
- if (ethertype == ETH_P_PAE)
- return 1;
-
- return 0;
-}
-
-/* Called only as a tasklet (software IRQ), by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_crypt_data *crypt)
-{
- struct rtl_80211_hdr_4addr *hdr;
- int res, hdrlen;
-
- if (!crypt || !crypt->ops->decrypt_mpdu)
- return 0;
- if (ieee->hwsec_active) {
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->bHwSec = 1;
- }
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
-
- if (ieee->tkip_countermeasures &&
- strcmp(crypt->ops->name, "TKIP") == 0) {
- if (net_ratelimit()) {
- netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
- "received packet from %pM\n",
- hdr->addr2);
- }
- return -1;
- }
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- IEEE80211_DEBUG_DROP(
- "decryption failed (SA=%pM"
- ") res=%d\n", hdr->addr2, res);
- if (res == -2)
- IEEE80211_DEBUG_DROP("Decryption failed ICV "
- "mismatch (key %d)\n",
- skb->data[hdrlen + 3] >> 6);
- ieee->ieee_stats.rx_discards_undecryptable++;
- return -1;
- }
-
- return res;
-}
-
-
-/* Called only as a tasklet (software IRQ), by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb,
- int keyidx, struct ieee80211_crypt_data *crypt)
-{
- struct rtl_80211_hdr_4addr *hdr;
- int res, hdrlen;
-
- if (!crypt || !crypt->ops->decrypt_msdu)
- return 0;
- if (ieee->hwsec_active) {
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->bHwSec = 1;
- }
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- netdev_dbg(ieee->dev, "MSDU decryption/MIC verification failed"
- " (SA=%pM keyidx=%d)\n",
- hdr->addr2, keyidx);
- return -1;
- }
-
- return 0;
-}
-
-
-/* this function is stolen from ipw2200 driver*/
-#define IEEE_PACKET_RETRY_TIME (5 * HZ)
-static int is_duplicate_packet(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *header)
-{
- u16 fc = le16_to_cpu(header->frame_ctl);
- u16 sc = le16_to_cpu(header->seq_ctl);
- u16 seq = WLAN_GET_SEQ_SEQ(sc);
- u16 frag = WLAN_GET_SEQ_FRAG(sc);
- u16 *last_seq, *last_frag;
- unsigned long *last_time;
- struct rtl_80211_hdr_3addrqos *hdr_3addrqos;
- struct rtl_80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
-
- //TO2DS and QoS
- if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else if (IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else { // no QoS
- tid = 0;
- }
-
- switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
- {
- struct list_head *p;
- struct ieee_ibss_seq *entry = NULL;
- u8 *mac = header->addr2;
- int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
-
- list_for_each(p, &ieee->ibss_mac_hash[index]) {
- entry = list_entry(p, struct ieee_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
- break;
- }
- // if (memcmp(entry->mac, mac, ETH_ALEN)){
- if (p == &ieee->ibss_mac_hash[index]) {
- entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
- if (!entry)
- return 0;
- memcpy(entry->mac, mac, ETH_ALEN);
- entry->seq_num[tid] = seq;
- entry->frag_num[tid] = frag;
- entry->packet_time[tid] = jiffies;
- list_add(&entry->list, &ieee->ibss_mac_hash[index]);
- return 0;
- }
- last_seq = &entry->seq_num[tid];
- last_frag = &entry->frag_num[tid];
- last_time = &entry->packet_time[tid];
- break;
- }
-
- case IW_MODE_INFRA:
- last_seq = &ieee->last_rxseq_num[tid];
- last_frag = &ieee->last_rxfrag_num[tid];
- last_time = &ieee->last_packet_time[tid];
-
- break;
- default:
- return 0;
- }
-
-// if(tid != 0) {
-// printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl);
-// }
- if ((*last_seq == seq) &&
- time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
- if (*last_frag == frag)
- goto drop;
- if (*last_frag + 1 != frag)
- /* out-of-order fragment */
- goto drop;
- } else
- *last_seq = seq;
-
- *last_frag = frag;
- *last_time = jiffies;
- return 0;
-
-drop:
-// BUG_ON(!(fc & IEEE80211_FCTL_RETRY));
-
- return 1;
-}
-
-static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry)
-{
- struct list_head *pList = &pTS->rx_pending_pkt_list;
- while (pList->next != &pTS->rx_pending_pkt_list) {
- if (SN_LESS(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
- pList = pList->next;
- else if (SN_EQUAL(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
- return false;
- else
- break;
- }
- pReorderEntry->List.next = pList->next;
- pReorderEntry->List.next->prev = &pReorderEntry->List;
- pReorderEntry->List.prev = pList;
- pList->next = &pReorderEntry->List;
-
- return true;
-}
-
-static void indicate_packets(struct ieee80211_device *ieee,
- struct ieee80211_rxb *rxb)
-{
- struct net_device_stats *stats = &ieee->stats;
- struct net_device *dev = ieee->dev;
- u16 ethertype;
- u8 i;
-
- for (i = 0; i < rxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = rxb->subframes[i];
-
- if (!sub_skb)
- continue;
-
- /* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
- ethertype != ETH_P_AARP &&
- ethertype != ETH_P_IPX) ||
- !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- } else {
- /* Leave Ethernet header part of hdr and full payload */
- put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
- }
- memcpy(skb_push(sub_skb, ETH_ALEN), rxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), rxb->dst, ETH_ALEN);
-
- stats->rx_packets++;
- stats->rx_bytes += sub_skb->len;
- if (is_multicast_ether_addr(rxb->dst))
- stats->multicast++;
-
- /* Indicate the packets to upper layer */
- sub_skb->protocol = eth_type_trans(sub_skb, dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = dev;
- /* 802.11 crc not sufficient */
- sub_skb->ip_summed = CHECKSUM_NONE;
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
-}
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee,
- struct ieee80211_rxb **prxbIndicateArray,
- u8 index)
-{
- u8 i;
-
- for (i = 0; i < index; i++) {
- struct ieee80211_rxb *prxb = prxbIndicateArray[i];
-
- indicate_packets(ieee, prxb);
- kfree(prxb);
- prxb = NULL;
- }
-}
-
-static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
- struct ieee80211_rxb *prxb,
- struct rx_ts_record *pTS, u16 SeqNum)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct rx_reorder_entry *pReorderEntry = NULL;
- struct ieee80211_rxb **prxbIndicateArray;
- u8 WinSize = pHTInfo->RxReorderWinSize;
- u16 WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
- u8 index = 0;
- bool bMatchWinStart = false, bPktInBuf = false;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Seq is %d,pTS->rx_indicate_seq is %d, WinSize is %d\n", __func__, SeqNum, pTS->rx_indicate_seq, WinSize);
-
- prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
- sizeof(struct ieee80211_rxb *),
- GFP_ATOMIC);
- if (!prxbIndicateArray)
- return;
-
- /* Rx Reorder initialize condition.*/
- if (pTS->rx_indicate_seq == 0xffff)
- pTS->rx_indicate_seq = SeqNum;
-
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- pTS->rx_indicate_seq, SeqNum);
- pHTInfo->RxReorderDropCounter++;
- {
- int i;
- for (i = 0; i < prxb->nr_subframes; i++)
- dev_kfree_skb(prxb->subframes[i]);
-
- kfree(prxb);
- prxb = NULL;
- }
-
- kfree(prxbIndicateArray);
- return;
- }
-
- /*
- * Sliding window manipulation. Conditions includes:
- * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
- * 2. Incoming SeqNum is larger than the WinEnd => Window shift N
- */
- if (SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
- pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
- bMatchWinStart = true;
- } else if (SN_LESS(WinEnd, SeqNum)) {
- if (SeqNum >= (WinSize - 1))
- pTS->rx_indicate_seq = SeqNum + 1 - WinSize;
- else
- pTS->rx_indicate_seq = 4095 - (WinSize - (SeqNum + 1)) + 1;
-
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
- }
-
- /*
- * Indication process.
- * After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets
- * with the SeqNum smaller than latest WinStart and buffer other packets.
- */
- /* For Rx Reorder condition:
- * 1. All packets with SeqNum smaller than WinStart => Indicate
- * 2. All packets with SeqNum larger than or equal to WinStart => Buffer it.
- */
- if (bMatchWinStart) {
- /* Current packet is going to be indicated.*/
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\
- pTS->rx_indicate_seq, SeqNum);
- prxbIndicateArray[0] = prxb;
-// printk("========================>%s(): SeqNum is %d\n",__func__,SeqNum);
- index = 1;
- } else {
- /* Current packet is going to be inserted into pending list.*/
- //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to ordered list\n",__func__);
- if (!list_empty(&ieee->RxReorder_Unused_List)) {
- pReorderEntry = list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, List);
- list_del_init(&pReorderEntry->List);
-
- /* Make a reorder entry and insert into a the packet list.*/
- pReorderEntry->SeqNum = SeqNum;
- pReorderEntry->prxb = prxb;
- // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum);
-
- if (!AddReorderEntry(pTS, pReorderEntry)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
- __func__, pTS->rx_indicate_seq, SeqNum);
- list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
- {
- int i;
- for (i = 0; i < prxb->nr_subframes; i++)
- dev_kfree_skb(prxb->subframes[i]);
-
- kfree(prxb);
- prxb = NULL;
- }
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,
- "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
- }
- } else {
- /*
- * Packets are dropped if there is not enough reorder entries.
- * This part shall be modified!! We can just indicate all the
- * packets in buffer and get reorder entries.
- */
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n");
- {
- int i;
- for (i = 0; i < prxb->nr_subframes; i++)
- dev_kfree_skb(prxb->subframes[i]);
-
- kfree(prxb);
- prxb = NULL;
- }
- }
- }
-
- /* Check if there is any packet need indicate.*/
- while (!list_empty(&pTS->rx_pending_pkt_list)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): start RREORDER indicate\n", __func__);
- pReorderEntry = list_entry(pTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
- if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq)) {
- /* This protect buffer from overflow. */
- if (index >= REORDER_WIN_SIZE) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n");
- bPktInBuf = true;
- break;
- }
-
- list_del_init(&pReorderEntry->List);
-
- if (SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
- pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
-
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
- prxbIndicateArray[index] = pReorderEntry->prxb;
- // printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum);
- index++;
-
- list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
- } else {
- bPktInBuf = true;
- break;
- }
- }
-
- /* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/
- if (index > 0) {
- // Cancel previous pending timer.
- // del_timer_sync(&pTS->rx_pkt_pending_timer);
- pTS->rx_timeout_indicate_seq = 0xffff;
-
- // Indicate packets
- if (index > REORDER_WIN_SIZE) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
- kfree(prxbIndicateArray);
- return;
- }
- ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
- }
-
- if (bPktInBuf && pTS->rx_timeout_indicate_seq == 0xffff) {
- // Set new pending timer.
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): SET rx timeout timer\n", __func__);
- pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
- if (timer_pending(&pTS->rx_pkt_pending_timer))
- del_timer_sync(&pTS->rx_pkt_pending_timer);
- pTS->rx_pkt_pending_timer.expires = jiffies +
- msecs_to_jiffies(pHTInfo->RxReorderPendingTime);
- add_timer(&pTS->rx_pkt_pending_timer);
- }
-
- kfree(prxbIndicateArray);
-}
-
-static u8 parse_subframe(struct ieee80211_device *ieee,
- struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
- struct ieee80211_rxb *rxb, u8 *src, u8 *dst)
-{
- struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
-
- u16 LLCOffset = sizeof(struct rtl_80211_hdr_3addr);
- u16 ChkLength;
- bool bIsAggregateFrame = false;
- u16 nSubframe_Length;
- u8 nPadding_Length = 0;
- u16 SeqNum = 0;
-
- struct sk_buff *sub_skb;
- /* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
-
- if ((IEEE80211_QOS_HAS_SEQ(fc)) && \
- (((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) {
- bIsAggregateFrame = true;
- }
-
- if (IEEE80211_QOS_HAS_SEQ(fc))
- LLCOffset += 2;
-
- if (rx_stats->bContainHTC)
- LLCOffset += HTCLNG;
-
- // Null packet, don't indicate it to upper layer
- ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/
-
- if (skb->len <= ChkLength)
- return 0;
-
- skb_pull(skb, LLCOffset);
-
- if (!bIsAggregateFrame) {
- rxb->nr_subframes = 1;
-#ifdef JOHN_NOCPY
- rxb->subframes[0] = skb;
-#else
- rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC);
-#endif
-
- memcpy(rxb->src, src, ETH_ALEN);
- memcpy(rxb->dst, dst, ETH_ALEN);
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len);
- return 1;
- } else {
- rxb->nr_subframes = 0;
- memcpy(rxb->src, src, ETH_ALEN);
- memcpy(rxb->dst, dst, ETH_ALEN);
- while (skb->len > ETHERNET_HEADER_SIZE) {
- /* Offset 12 denote 2 mac address */
- nSubframe_Length = *((u16 *)(skb->data + 12));
- //==m==>change the length order
- nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8);
-
- if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- netdev_dbg(ieee->dev, "A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
- rxb->nr_subframes);
- netdev_dbg(ieee->dev, "A-MSDU parse error!! Subframe Length: %d\n", nSubframe_Length);
- netdev_dbg(ieee->dev, "nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
- netdev_dbg(ieee->dev, "The Packet SeqNum is %d\n", SeqNum);
- return 0;
- }
-
- /* move the data point to data content */
- skb_pull(skb, ETHERNET_HEADER_SIZE);
-
-#ifdef JOHN_NOCPY
- sub_skb = skb_clone(skb, GFP_ATOMIC);
- sub_skb->len = nSubframe_Length;
- sub_skb->tail = sub_skb->data + nSubframe_Length;
-#else
- /* Allocate new skb for releasing to upper layer */
- sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (!sub_skb)
- return 0;
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, skb->data, nSubframe_Length);
-#endif
- rxb->subframes[rxb->nr_subframes++] = sub_skb;
- if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
- IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n");
- break;
- }
- skb_pull(skb, nSubframe_Length);
-
- if (skb->len != 0) {
- nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4);
- if (nPadding_Length == 4)
- nPadding_Length = 0;
-
- if (skb->len < nPadding_Length)
- return 0;
-
- skb_pull(skb, nPadding_Length);
- }
- }
-#ifdef JOHN_NOCPY
- dev_kfree_skb(skb);
-#endif
- //{just for debug added by david
- //printk("AMSDU::rxb->nr_subframes = %d\n",rxb->nr_subframes);
- //}
- return rxb->nr_subframes;
- }
-}
-
-/* All received frames are sent to this function. @skb contains the frame in
- * IEEE 802.11 format, i.e., in the format it was sent over air.
- * This function is called only as a tasklet (software IRQ). */
-int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
-{
- struct net_device *dev = ieee->dev;
- struct rtl_80211_hdr_4addr *hdr;
- //struct rtl_80211_hdr_3addrqos *hdr;
-
- size_t hdrlen;
- u16 fc, type, stype, sc;
- struct net_device_stats *stats;
- unsigned int frag;
- //added by amy for reorder
- u8 TID = 0;
- u16 SeqNum = 0;
- struct rx_ts_record *pTS = NULL;
- //bool bIsAggregateFrame = false;
- //added by amy for reorder
-#ifdef NOT_YET
- struct net_device *wds = NULL;
- struct net_device *wds = NULL;
- int from_assoc_ap = 0;
- void *sta = NULL;
-#endif
-// u16 qos_ctl = 0;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
- u8 bssid[ETH_ALEN];
- struct ieee80211_crypt_data *crypt = NULL;
- int keyidx = 0;
-
- int i;
- struct ieee80211_rxb *rxb = NULL;
- // cheat the hdr type
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- stats = &ieee->stats;
-
- if (skb->len < 10) {
- netdev_info(dev, "SKB length < 10\n");
- goto rx_dropped;
- }
-
- fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
- sc = le16_to_cpu(hdr->seq_ctl);
-
- frag = WLAN_GET_SEQ_FRAG(sc);
- hdrlen = ieee80211_get_hdrlen(fc);
-
- if (HTCCheck(ieee, skb->data)) {
- if (net_ratelimit())
- netdev_warn(dev, "find HTCControl\n");
- hdrlen += 4;
- rx_stats->bContainHTC = true;
- }
-
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
-#ifdef NOT_YET
- /* Put this code here so that we avoid duplicating it in all
- * Rx paths. - Jean II */
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- /* If spy monitoring on */
- if (iface->spy_data.spy_number > 0) {
- struct iw_quality wstats;
- wstats.level = rx_stats->rssi;
- wstats.noise = rx_stats->noise;
- wstats.updated = 6; /* No qual value */
- /* Update spy records */
- wireless_spy_update(dev, hdr->addr2, &wstats);
- }
-#endif /* IW_WIRELESS_SPY */
- hostap_update_rx_stats(local->ap, hdr, rx_stats);
-#endif
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- unsigned int len = skb->len;
-
- ieee80211_monitor_rx(ieee, skb, rx_stats);
- stats->rx_packets++;
- stats->rx_bytes += len;
- return 1;
- }
-
- if (ieee->host_decrypt) {
- int idx = 0;
- if (skb->len >= hdrlen + 3)
- idx = skb->data[hdrlen + 3] >> 6;
- crypt = ieee->crypt[idx];
-#ifdef NOT_YET
- sta = NULL;
-
- /* Use station specific key to override default keys if the
- * receiver address is a unicast address ("individual RA"). If
- * bcrx_sta_key parameter is set, station specific key is used
- * even with broad/multicast targets (this is against IEEE
- * 802.11, but makes it easier to use different keys with
- * stations that do not support WEP key mapping). */
-
- if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
- (void)hostap_handle_sta_crypto(local, hdr, &crypt,
- &sta);
-#endif
-
- /* allow NULL decrypt to indicate an station specific override
- * for default encryption */
- if (crypt && (!crypt->ops || !crypt->ops->decrypt_mpdu))
- crypt = NULL;
-
- if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
- /* This seems to be triggered by some (multicast?)
- * frames from other than current BSS, so just drop the
- * frames silently instead of filling system log with
- * these reports. */
- IEEE80211_DEBUG_DROP("Decryption failed (not set)"
- " (SA=%pM)\n",
- hdr->addr2);
- ieee->ieee_stats.rx_discards_undecryptable++;
- goto rx_dropped;
- }
- }
-
- if (skb->len < IEEE80211_DATA_HDR3_LEN)
- goto rx_dropped;
-
- // if QoS enabled, should check the sequence for each of the AC
- if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
- if (is_duplicate_packet(ieee, hdr))
- goto rx_dropped;
-
- } else {
- struct rx_ts_record *pRxTS = NULL;
- //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__func__, tid);
- if (GetTs(
- ieee,
- (struct ts_common_info **)&pRxTS,
- hdr->addr2,
- Frame_QoSTID((u8 *)(skb->data)),
- RX_DIR,
- true)) {
-
- // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->rx_last_frag_num is %d,frag is %d,pRxTS->rx_last_seq_num is %d,seq is %d\n",__func__,pRxTS->rx_last_frag_num,frag,pRxTS->rx_last_seq_num,WLAN_GET_SEQ_SEQ(sc));
- if ((fc & (1 << 11)) &&
- (frag == pRxTS->rx_last_frag_num) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num)) {
- goto rx_dropped;
- } else {
- pRxTS->rx_last_frag_num = frag;
- pRxTS->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
- }
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n", __func__);
- goto rx_dropped;
- }
- }
- if (type == IEEE80211_FTYPE_MGMT) {
-
-
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
- if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
- goto rx_dropped;
- else
- goto rx_exit;
- }
-
- /* Data frame - extract src/dst addresses */
- switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_FROMDS:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr3, ETH_ALEN);
- memcpy(bssid, hdr->addr2, ETH_ALEN);
- break;
- case IEEE80211_FCTL_TODS:
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- memcpy(bssid, hdr->addr1, ETH_ALEN);
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- if (skb->len < IEEE80211_DATA_HDR4_LEN)
- goto rx_dropped;
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr4, ETH_ALEN);
- memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
- break;
- default:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- memcpy(bssid, hdr->addr3, ETH_ALEN);
- break;
- }
-
-#ifdef NOT_YET
- if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
- goto rx_dropped;
- if (wds) {
- skb->dev = dev = wds;
- stats = hostap_get_stats(dev);
- }
-
- if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
- (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS &&
- ieee->stadev &&
- memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
- /* Frame from BSSID of the AP for which we are a client */
- skb->dev = dev = ieee->stadev;
- stats = hostap_get_stats(dev);
- from_assoc_ap = 1;
- }
-
- if ((ieee->iw_mode == IW_MODE_MASTER ||
- ieee->iw_mode == IW_MODE_REPEAT) &&
- !from_assoc_ap) {
- switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
- wds)) {
- case AP_RX_CONTINUE_NOT_AUTHORIZED:
- case AP_RX_CONTINUE:
- break;
- case AP_RX_DROP:
- goto rx_dropped;
- case AP_RX_EXIT:
- goto rx_exit;
- }
- }
-#endif
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
- /* Nullfunc frames may have PS-bit set, so they must be passed to
- * hostap_handle_sta_rx() before being dropped here. */
- if (stype != IEEE80211_STYPE_DATA &&
- stype != IEEE80211_STYPE_DATA_CFACK &&
- stype != IEEE80211_STYPE_DATA_CFPOLL &&
- stype != IEEE80211_STYPE_DATA_CFACKPOLL &&
- stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4
- ) {
- if (stype != IEEE80211_STYPE_NULLFUNC)
- IEEE80211_DEBUG_DROP(
- "RX: dropped data frame "
- "with no data (type=0x%02x, "
- "subtype=0x%02x, len=%d)\n",
- type, stype, skb->len);
- goto rx_dropped;
- }
- if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
- goto rx_dropped;
-
- /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
-
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP)) {
- keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt);
- if (keyidx < 0) {
- netdev_dbg(ieee->dev, "decrypt frame error\n");
- goto rx_dropped;
- }
- }
-
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
-
- /* skb: hdr + (possibly fragmented) plaintext payload */
- // PR: FIXME: hostap has additional conditions in the "if" below:
- // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
- int flen;
- struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
- IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
-
- if (!frag_skb) {
- IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
- "Rx cannot get skb from fragment "
- "cache (morefrag=%d seq=%u frag=%u)\n",
- (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
- WLAN_GET_SEQ_SEQ(sc), frag);
- goto rx_dropped;
- }
- flen = skb->len;
- if (frag != 0)
- flen -= hdrlen;
-
- if (frag_skb->tail + flen > frag_skb->end) {
- netdev_warn(dev, "host decrypted and "
- "reassembled frame did not fit skb\n");
- ieee80211_frag_cache_invalidate(ieee, hdr);
- goto rx_dropped;
- }
-
- if (frag == 0) {
- /* copy first fragment (including full headers) into
- * beginning of the fragment cache skb */
- skb_put_data(frag_skb, skb->data, flen);
- } else {
- /* append frame payload to the end of the fragment
- * cache skb */
- skb_put_data(frag_skb, skb->data + hdrlen, flen);
- }
- dev_kfree_skb_any(skb);
- skb = NULL;
-
- if (fc & IEEE80211_FCTL_MOREFRAGS) {
- /* more fragments expected - leave the skb in fragment
- * cache for now; it will be delivered to upper layers
- * after all fragments have been received */
- goto rx_exit;
- }
-
- /* this was the last fragment and the frame will be
- * delivered, so remove skb from fragment cache */
- skb = frag_skb;
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- ieee80211_frag_cache_invalidate(ieee, hdr);
- }
-
- /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
- * encrypted/authenticated */
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
- netdev_dbg(ieee->dev, "==>decrypt msdu error\n");
- goto rx_dropped;
- }
-
- //added by amy for AP roaming
- ieee->LinkDetectInfo.NumRecvDataInPeriod++;
- ieee->LinkDetectInfo.NumRxOkInPeriod++;
-
- hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
- if (/*ieee->ieee802_1x &&*/
- ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
-
-#ifdef CONFIG_IEEE80211_DEBUG
- /* pass unencrypted EAPOL frames even if encryption is
- * configured */
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
-#endif
- } else {
- IEEE80211_DEBUG_DROP(
- "encryption configured, but RX "
- "frame not encrypted (SA=%pM)\n",
- hdr->addr2);
- goto rx_dropped;
- }
- }
-
-#ifdef CONFIG_IEEE80211_DEBUG
- if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
- ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
- }
-#endif
-
- if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
- !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- IEEE80211_DEBUG_DROP(
- "dropped unencrypted RX data "
- "frame from %pM"
- " (drop_unencrypted=1)\n",
- hdr->addr2);
- goto rx_dropped;
- }
-/*
- if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n");
- }
-*/
-//added by amy for reorder
- if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
- && !is_multicast_ether_addr(hdr->addr1)) {
- TID = Frame_QoSTID(skb->data);
- SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID, RX_DIR, true);
- if (TID != 0 && TID != 3)
- ieee->bis_any_nonbepkts = true;
- }
-//added by amy for reorder
- /* skb: hdr + (possible reassembled) full plaintext payload */
- //ethertype = (payload[6] << 8) | payload[7];
- rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
- if (!rxb)
- goto rx_dropped;
- /* to parse amsdu packets */
- /* qos data packets & reserved bit is 1 */
- if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
- /* only to free rxb, and not submit the packets to upper layer */
- for (i = 0; i < rxb->nr_subframes; i++)
- dev_kfree_skb(rxb->subframes[i]);
-
- kfree(rxb);
- rxb = NULL;
- goto rx_dropped;
- }
-
-//added by amy for reorder
- if (!ieee->pHTInfo->bCurRxReorderEnable || !pTS) {
- indicate_packets(ieee, rxb);
- kfree(rxb);
- rxb = NULL;
-
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n", __func__);
- RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
- }
-#ifndef JOHN_NOCPY
- dev_kfree_skb(skb);
-#endif
-
- rx_exit:
-#ifdef NOT_YET
- if (sta)
- hostap_handle_sta_release(sta);
-#endif
- return 1;
-
- rx_dropped:
- kfree(rxb);
- rxb = NULL;
- stats->rx_dropped++;
-
- /* Returning 0 indicates to caller that we have not handled the SKB--
- * so it is still allocated and can be used again by underlying
- * hardware as a DMA target */
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_rx);
-
-#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
-
-static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
-
-/*
-* Make the structure we read from the beacon packet to have
-* the right values
-*/
-static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
- *info_element, int sub_type)
-{
- if (info_element->elementID != QOS_ELEMENT_ID)
- return -1;
- if (info_element->qui_subtype != sub_type)
- return -1;
- if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
- return -1;
- if (info_element->qui_type != QOS_OUI_TYPE)
- return -1;
- if (info_element->version != QOS_VERSION_1)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Parse a QoS parameter element
- */
-static int ieee80211_read_qos_param_element(
- struct ieee80211_qos_parameter_info *element_param,
- struct ieee80211_info_element *info_element)
-{
- size_t size = sizeof(*element_param);
-
- if (!element_param || !info_element || info_element->len != size - 2)
- return -1;
-
- memcpy(element_param, info_element, size);
- return ieee80211_verify_qos_info(&element_param->info_element,
- QOS_OUI_PARAM_SUB_TYPE);
-}
-
-/*
- * Parse a QoS information element
- */
-static int ieee80211_read_qos_info_element(
- struct ieee80211_qos_information_element *element_info,
- struct ieee80211_info_element *info_element)
-{
- size_t size = sizeof(*element_info);
-
- if (!element_info || !info_element || info_element->len != size - 2)
- return -1;
-
- memcpy(element_info, info_element, size);
- return ieee80211_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE);
-}
-
-
-/*
- * Write QoS parameters from the ac parameters.
- */
-static int ieee80211_qos_convert_ac_to_parameters(
- struct ieee80211_qos_parameter_info *param_elm,
- struct ieee80211_qos_parameters *qos_param)
-{
- int i;
- struct ieee80211_qos_ac_parameter *ac_params;
- u8 aci;
- //u8 cw_min;
- //u8 cw_max;
-
- for (i = 0; i < QOS_QUEUE_NUM; i++) {
- ac_params = &(param_elm->ac_params_record[i]);
-
- aci = (ac_params->aci_aifsn & 0x60) >> 5;
-
- if (aci >= QOS_QUEUE_NUM)
- continue;
- qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
-
- /* WMM spec P.11: The minimum value for AIFSN shall be 2 */
- qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci];
-
- qos_param->cw_min[aci] =
- cpu_to_le16(ac_params->ecw_min_max & 0x0F);
-
- qos_param->cw_max[aci] =
- cpu_to_le16((ac_params->ecw_min_max & 0xF0) >> 4);
-
- qos_param->flag[aci] =
- (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = ac_params->tx_op_limit;
- }
- return 0;
-}
-
-/*
- * we have a generic data element which it may contain QoS information or
- * parameters element. check the information element length to decide
- * which type to read
- */
-static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
- *info_element,
- struct ieee80211_network *network)
-{
- int rc = 0;
- struct ieee80211_qos_parameters *qos_param = NULL;
- struct ieee80211_qos_information_element qos_info_element;
-
- rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
-
- if (rc == 0) {
- network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
- network->flags |= NETWORK_HAS_QOS_INFORMATION;
- } else {
- struct ieee80211_qos_parameter_info param_element;
-
- rc = ieee80211_read_qos_param_element(&param_element,
- info_element);
- if (rc == 0) {
- qos_param = &(network->qos_data.parameters);
- ieee80211_qos_convert_ac_to_parameters(&param_element,
- qos_param);
- network->flags |= NETWORK_HAS_QOS_PARAMETERS;
- network->qos_data.param_count =
- param_element.info_element.ac_info & 0x0F;
- }
- }
-
- if (rc == 0) {
- IEEE80211_DEBUG_QOS("QoS is supported\n");
- network->qos_data.supported = 1;
- }
- return rc;
-}
-
-#ifdef CONFIG_IEEE80211_DEBUG
-#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
-
-static const char *get_info_element_string(u16 id)
-{
- switch (id) {
- MFIE_STRING(SSID);
- MFIE_STRING(RATES);
- MFIE_STRING(FH_SET);
- MFIE_STRING(DS_SET);
- MFIE_STRING(CF_SET);
- MFIE_STRING(TIM);
- MFIE_STRING(IBSS_SET);
- MFIE_STRING(COUNTRY);
- MFIE_STRING(HOP_PARAMS);
- MFIE_STRING(HOP_TABLE);
- MFIE_STRING(REQUEST);
- MFIE_STRING(CHALLENGE);
- MFIE_STRING(POWER_CONSTRAINT);
- MFIE_STRING(POWER_CAPABILITY);
- MFIE_STRING(TPC_REQUEST);
- MFIE_STRING(TPC_REPORT);
- MFIE_STRING(SUPP_CHANNELS);
- MFIE_STRING(CSA);
- MFIE_STRING(MEASURE_REQUEST);
- MFIE_STRING(MEASURE_REPORT);
- MFIE_STRING(QUIET);
- MFIE_STRING(IBSS_DFS);
- // MFIE_STRING(ERP_INFO);
- MFIE_STRING(RSN);
- MFIE_STRING(RATES_EX);
- MFIE_STRING(GENERIC);
- MFIE_STRING(QOS_PARAMETER);
- default:
- return "UNKNOWN";
- }
-}
-#endif
-
-static inline void ieee80211_extract_country_ie(
- struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- struct ieee80211_network *network,
- u8 *addr2
-)
-{
- if (IS_DOT11D_ENABLE(ieee)) {
- if (info_element->len != 0) {
- memcpy(network->CountryIeBuf, info_element->data, info_element->len);
- network->CountryIeLen = info_element->len;
-
- if (!IS_COUNTRY_IE_VALID(ieee)) {
- dot11d_update_country_ie(ieee, addr2, info_element->len, info_element->data);
- }
- }
-
- //
- // 070305, rcnjko: I update country IE watch dog here because
- // some AP (e.g. Cisco 1242) don't include country IE in their
- // probe response frame.
- //
- if (IS_EQUAL_CIE_SRC(ieee, addr2))
- UPDATE_CIE_WATCHDOG(ieee);
- }
-}
-
-int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats)
-{
- u8 i;
- short offset;
- u16 tmp_htcap_len = 0;
- u16 tmp_htinfo_len = 0;
- u16 ht_realtek_agg_len = 0;
- u8 ht_realtek_agg_buf[MAX_IE_LEN];
-// u16 broadcom_len = 0;
-#ifdef CONFIG_IEEE80211_DEBUG
- char rates_str[64];
- char *p;
-#endif
-
- while (length >= sizeof(*info_element)) {
- if (sizeof(*info_element) + info_element->len > length) {
- IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
- "info_element->len + 2 > left : "
- "info_element->len+2=%zd left=%d, id=%d.\n",
- info_element->len +
- sizeof(*info_element),
- length, info_element->id);
- /* We stop processing but don't return an error here
- * because some misbehaviour APs break this rule. ie.
- * Orinoco AP1000. */
- break;
- }
-
- switch (info_element->id) {
- case MFIE_TYPE_SSID:
- if (ieee80211_is_empty_essid(info_element->data,
- info_element->len)) {
- network->flags |= NETWORK_EMPTY_ESSID;
- break;
- }
-
- network->ssid_len = min(info_element->len,
- (u8)IW_ESSID_MAX_SIZE);
- memcpy(network->ssid, info_element->data, network->ssid_len);
- if (network->ssid_len < IW_ESSID_MAX_SIZE)
- memset(network->ssid + network->ssid_len, 0,
- IW_ESSID_MAX_SIZE - network->ssid_len);
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
- network->ssid, network->ssid_len);
- break;
-
- case MFIE_TYPE_RATES:
-#ifdef CONFIG_IEEE80211_DEBUG
- p = rates_str;
-#endif
- network->rates_len = min(info_element->len,
- MAX_RATES_LENGTH);
- for (i = 0; i < network->rates_len; i++) {
- network->rates[i] = info_element->data[i];
-#ifdef CONFIG_IEEE80211_DEBUG
- p += scnprintf(p, sizeof(rates_str) -
- (p - rates_str), "%02X ",
- network->rates[i]);
-#endif
- if (ieee80211_is_ofdm_rate
- (info_element->data[i])) {
- network->flags |= NETWORK_HAS_OFDM;
- if (info_element->data[i] &
- IEEE80211_BASIC_RATE_MASK)
- network->flags &=
- ~NETWORK_HAS_CCK;
- }
- }
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
- rates_str, network->rates_len);
- break;
-
- case MFIE_TYPE_RATES_EX:
-#ifdef CONFIG_IEEE80211_DEBUG
- p = rates_str;
-#endif
- network->rates_ex_len = min(info_element->len,
- MAX_RATES_EX_LENGTH);
- for (i = 0; i < network->rates_ex_len; i++) {
- network->rates_ex[i] = info_element->data[i];
-#ifdef CONFIG_IEEE80211_DEBUG
- p += scnprintf(p, sizeof(rates_str) -
- (p - rates_str), "%02X ",
- network->rates_ex[i]);
-#endif
- if (ieee80211_is_ofdm_rate
- (info_element->data[i])) {
- network->flags |= NETWORK_HAS_OFDM;
- if (info_element->data[i] &
- IEEE80211_BASIC_RATE_MASK)
- network->flags &=
- ~NETWORK_HAS_CCK;
- }
- }
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
- rates_str, network->rates_ex_len);
- break;
-
- case MFIE_TYPE_DS_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
- info_element->data[0]);
- network->channel = info_element->data[0];
- break;
-
- case MFIE_TYPE_FH_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
- break;
-
- case MFIE_TYPE_CF_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
- break;
-
- case MFIE_TYPE_TIM:
- if (info_element->len < 4)
- break;
-
- network->tim.tim_count = info_element->data[0];
- network->tim.tim_period = info_element->data[1];
-
- network->dtim_period = info_element->data[1];
- if (ieee->state != IEEE80211_LINKED)
- break;
-
- network->last_dtim_sta_time[0] = stats->mac_time[0];
- network->last_dtim_sta_time[1] = stats->mac_time[1];
-
- network->dtim_data = IEEE80211_DTIM_VALID;
-
- if (info_element->data[0] != 0)
- break;
-
- if (info_element->data[2] & 1)
- network->dtim_data |= IEEE80211_DTIM_MBCAST;
-
- offset = (info_element->data[2] >> 1) * 2;
-
- if (ieee->assoc_id < 8 * offset ||
- ieee->assoc_id > 8 * (offset + info_element->len - 3))
-
- break;
-
- offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
-
- if (info_element->data[3 + offset] & (1 << (ieee->assoc_id % 8)))
- network->dtim_data |= IEEE80211_DTIM_UCAST;
-
- //IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
- break;
-
- case MFIE_TYPE_ERP:
- network->erp_value = info_element->data[0];
- network->flags |= NETWORK_HAS_ERP_VALUE;
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
- network->erp_value);
- break;
- case MFIE_TYPE_IBSS_SET:
- network->atim_window = info_element->data[0];
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
- network->atim_window);
- break;
-
- case MFIE_TYPE_CHALLENGE:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
- break;
-
- case MFIE_TYPE_GENERIC:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
- info_element->len);
- if (!ieee80211_parse_qos_info_param_IE(info_element,
- network))
- break;
-
- if (info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x50 &&
- info_element->data[2] == 0xf2 &&
- info_element->data[3] == 0x01) {
- network->wpa_ie_len = min(info_element->len + 2,
- MAX_WPA_IE_LEN);
- memcpy(network->wpa_ie, info_element,
- network->wpa_ie_len);
- break;
- }
-
-#ifdef THOMAS_TURBO
- if (info_element->len == 7 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0xe0 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x01 &&
- info_element->data[4] == 0x02) {
- network->Turbo_Enable = 1;
- }
-#endif
-
- //for HTcap and HTinfo parameters
- if (tmp_htcap_len == 0) {
- if (info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x90 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x033){
-
- tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htcap_len != 0) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
- sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
- }
- }
- if (tmp_htcap_len != 0)
- network->bssht.bdSupportHT = true;
- else
- network->bssht.bdSupportHT = false;
- }
-
-
- if (tmp_htinfo_len == 0) {
- if (info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x90 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x034){
-
- tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htinfo_len != 0) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- if (tmp_htinfo_len) {
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
- sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
- }
-
- }
-
- }
- }
-
- if (ieee->aggregation) {
- if (network->bssht.bdSupportHT) {
- if (info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0xe0 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x02){
-
- ht_realtek_agg_len = min(info_element->len, (u8)MAX_IE_LEN);
- memcpy(ht_realtek_agg_buf, info_element->data, info_element->len);
-
- }
- if (ht_realtek_agg_len >= 5) {
- network->bssht.bdRT2RTAggregation = true;
-
- if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
- network->bssht.bdRT2RTLongSlotTime = true;
- }
- }
-
- }
-
- //if(tmp_htcap_len !=0 || tmp_htinfo_len != 0)
- {
- if ((info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x05 &&
- info_element->data[2] == 0xb5) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x0a &&
- info_element->data[2] == 0xf7) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x10 &&
- info_element->data[2] == 0x18)){
-
- network->broadcom_cap_exist = true;
-
- }
- }
- if (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x0c &&
- info_element->data[2] == 0x43) {
- network->ralink_cap_exist = true;
- } else
- network->ralink_cap_exist = false;
- //added by amy for atheros AP
- if ((info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x03 &&
- info_element->data[2] == 0x7f) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x13 &&
- info_element->data[2] == 0x74)) {
- netdev_dbg(ieee->dev, "========> Atheros AP exists\n");
- network->atheros_cap_exist = true;
- } else
- network->atheros_cap_exist = false;
-
- if (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96) {
- network->cisco_cap_exist = true;
- } else
- network->cisco_cap_exist = false;
- //added by amy for LEAP of cisco
- if (info_element->len > 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x01) {
- if (info_element->len == 6) {
- memcpy(network->CcxRmState, &info_element[4], 2);
- if (network->CcxRmState[0] != 0)
- network->bCcxRmEnable = true;
- else
- network->bCcxRmEnable = false;
- //
- // CCXv4 Table 59-1 MBSSID Masks.
- //
- network->MBssidMask = network->CcxRmState[1] & 0x07;
- if (network->MBssidMask != 0) {
- network->bMBssidValid = true;
- network->MBssidMask = 0xff << (network->MBssidMask);
- ether_addr_copy(network->MBssid, network->bssid);
- network->MBssid[5] &= network->MBssidMask;
- } else {
- network->bMBssidValid = false;
- }
- } else {
- network->bCcxRmEnable = false;
- }
- }
- if (info_element->len > 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x03) {
- if (info_element->len == 5) {
- network->bWithCcxVerNum = true;
- network->BssCcxVerNumber = info_element->data[4];
- } else {
- network->bWithCcxVerNum = false;
- network->BssCcxVerNumber = 0;
- }
- }
- break;
-
- case MFIE_TYPE_RSN:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
- info_element->len);
- network->rsn_ie_len = min(info_element->len + 2,
- MAX_WPA_IE_LEN);
- memcpy(network->rsn_ie, info_element,
- network->rsn_ie_len);
- break;
-
- //HT related element.
- case MFIE_TYPE_HT_CAP:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
- info_element->len);
- tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htcap_len != 0) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
- sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
-
- //If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
- // windows driver will update WMM parameters each beacon received once connected
- // Linux driver is a bit different.
- network->bssht.bdSupportHT = true;
- } else
- network->bssht.bdSupportHT = false;
- break;
-
-
- case MFIE_TYPE_HT_INFO:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n",
- info_element->len);
- tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
- if (tmp_htinfo_len) {
- network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE;
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
- sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
- }
- break;
-
- case MFIE_TYPE_AIRONET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
- info_element->len);
- if (info_element->len > IE_CISCO_FLAG_POSITION) {
- network->bWithAironetIE = true;
-
- // CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23):
- // "A Cisco access point advertises support for CKIP in beacon and probe response packets,
- // by adding an Aironet element and setting one or both of the CKIP negotiation bits."
- if ((info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_MIC) ||
- (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK)) {
- network->bCkipSupported = true;
- } else {
- network->bCkipSupported = false;
- }
- } else {
- network->bWithAironetIE = false;
- network->bCkipSupported = false;
- }
- break;
- case MFIE_TYPE_QOS_PARAMETER:
- netdev_err(ieee->dev,
- "QoS Error need to parse QOS_PARAMETER IE\n");
- break;
-
- case MFIE_TYPE_COUNTRY:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
- info_element->len);
- ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP
- break;
-/* TODO */
- default:
- IEEE80211_DEBUG_MGMT
- ("Unsupported info element: %s (%d)\n",
- get_info_element_string(info_element->id),
- info_element->id);
- break;
- }
-
- length -= sizeof(*info_element) + info_element->len;
- info_element =
- (struct ieee80211_info_element *)&info_element->
- data[info_element->len];
- }
-
- if (!network->atheros_cap_exist && !network->broadcom_cap_exist &&
- !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) {
- network->unknown_cap_exist = true;
- } else {
- network->unknown_cap_exist = false;
- }
- return 0;
-}
-
-/* 0-100 index */
-static long ieee80211_translate_todbm(u8 signal_strength_index)
-{
- long signal_power; // in dBm.
-
- // Translate to dBm (x=0.5y-95).
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
-
- return signal_power;
-}
-
-static inline int ieee80211_network_init(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats)
-{
-#ifdef CONFIG_IEEE80211_DEBUG
- //char rates_str[64];
- //char *p;
-#endif
-
- network->qos_data.active = 0;
- network->qos_data.supported = 0;
- network->qos_data.param_count = 0;
- network->qos_data.old_param_count = 0;
-
- /* Pull out fixed field data */
- memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
- network->capability = le16_to_cpu(beacon->capability);
- network->last_scanned = jiffies;
- network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
- network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
- /* Where to pull this? beacon->listen_interval;*/
- network->listen_interval = 0x0A;
- network->rates_len = network->rates_ex_len = 0;
- network->last_associate = 0;
- network->ssid_len = 0;
- network->flags = 0;
- network->atim_window = 0;
- network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
- 0x3 : 0x0;
- network->berp_info_valid = false;
- network->broadcom_cap_exist = false;
- network->ralink_cap_exist = false;
- network->atheros_cap_exist = false;
- network->cisco_cap_exist = false;
- network->unknown_cap_exist = false;
-#ifdef THOMAS_TURBO
- network->Turbo_Enable = 0;
-#endif
- network->CountryIeLen = 0;
- memset(network->CountryIeBuf, 0, MAX_IE_LEN);
-//Initialize HT parameters
- //ieee80211_ht_initialize(&network->bssht);
- HTInitializeBssDesc(&network->bssht);
- if (stats->freq == IEEE80211_52GHZ_BAND) {
- /* for A band (No DS info) */
- network->channel = stats->received_channel;
- } else
- network->flags |= NETWORK_HAS_CCK;
-
- network->wpa_ie_len = 0;
- network->rsn_ie_len = 0;
-
- if (ieee80211_parse_info_param
- (ieee, beacon->info_element, stats->len - sizeof(*beacon), network, stats))
- return 1;
-
- network->mode = 0;
- if (stats->freq == IEEE80211_52GHZ_BAND)
- network->mode = IEEE_A;
- else {
- if (network->flags & NETWORK_HAS_OFDM)
- network->mode |= IEEE_G;
- if (network->flags & NETWORK_HAS_CCK)
- network->mode |= IEEE_B;
- }
-
- if (network->mode == 0) {
- IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' "
- "network.\n",
- escape_essid(network->ssid,
- network->ssid_len),
- network->bssid);
- return 1;
- }
-
- if (network->bssht.bdSupportHT) {
- if (network->mode == IEEE_A)
- network->mode = IEEE_N_5G;
- else if (network->mode & (IEEE_G | IEEE_B))
- network->mode = IEEE_N_24G;
- }
- if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
- network->flags |= NETWORK_EMPTY_ESSID;
-
- stats->signal = 30 + (stats->SignalStrength * 70) / 100;
- stats->noise = ieee80211_translate_todbm((u8)(100 - stats->signal)) - 25;
-
- memcpy(&network->stats, stats, sizeof(network->stats));
-
- return 0;
-}
-
-static inline int is_same_network(struct ieee80211_network *src,
- struct ieee80211_network *dst, struct ieee80211_device *ieee)
-{
- /* A network is only a duplicate if the channel, BSSID, ESSID
- * and the capability field (in particular IBSS and BSS) all match.
- * We treat all <hidden> with the same BSSID and channel
- * as one network */
- return //((src->ssid_len == dst->ssid_len) &&
- (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
- (src->channel == dst->channel) &&
- !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
- //!memcmp(src->ssid, dst->ssid, src->ssid_len) &&
- (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
- ((src->capability & WLAN_CAPABILITY_IBSS) ==
- (dst->capability & WLAN_CAPABILITY_IBSS)) &&
- ((src->capability & WLAN_CAPABILITY_BSS) ==
- (dst->capability & WLAN_CAPABILITY_BSS)));
-}
-
-static inline void update_network(struct ieee80211_network *dst,
- struct ieee80211_network *src)
-{
- int qos_active;
- u8 old_param;
-
- memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
- dst->capability = src->capability;
- memcpy(dst->rates, src->rates, src->rates_len);
- dst->rates_len = src->rates_len;
- memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
- dst->rates_ex_len = src->rates_ex_len;
- if (src->ssid_len > 0) {
- memset(dst->ssid, 0, dst->ssid_len);
- dst->ssid_len = src->ssid_len;
- memcpy(dst->ssid, src->ssid, src->ssid_len);
- }
- dst->mode = src->mode;
- dst->flags = src->flags;
- dst->time_stamp[0] = src->time_stamp[0];
- dst->time_stamp[1] = src->time_stamp[1];
- if (src->flags & NETWORK_HAS_ERP_VALUE) {
- dst->erp_value = src->erp_value;
- dst->berp_info_valid = src->berp_info_valid = true;
- }
- dst->beacon_interval = src->beacon_interval;
- dst->listen_interval = src->listen_interval;
- dst->atim_window = src->atim_window;
- dst->dtim_period = src->dtim_period;
- dst->dtim_data = src->dtim_data;
- dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0];
- dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1];
- memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters));
-
- dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
- dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
- dst->bssht.bdHTCapLen = src->bssht.bdHTCapLen;
- memcpy(dst->bssht.bdHTCapBuf, src->bssht.bdHTCapBuf, src->bssht.bdHTCapLen);
- dst->bssht.bdHTInfoLen = src->bssht.bdHTInfoLen;
- memcpy(dst->bssht.bdHTInfoBuf, src->bssht.bdHTInfoBuf, src->bssht.bdHTInfoLen);
- dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer;
- dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime;
- dst->broadcom_cap_exist = src->broadcom_cap_exist;
- dst->ralink_cap_exist = src->ralink_cap_exist;
- dst->atheros_cap_exist = src->atheros_cap_exist;
- dst->cisco_cap_exist = src->cisco_cap_exist;
- dst->unknown_cap_exist = src->unknown_cap_exist;
- memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
- dst->wpa_ie_len = src->wpa_ie_len;
- memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
- dst->rsn_ie_len = src->rsn_ie_len;
-
- dst->last_scanned = jiffies;
- /* qos related parameters */
- //qos_active = src->qos_data.active;
- qos_active = dst->qos_data.active;
- //old_param = dst->qos_data.old_param_count;
- old_param = dst->qos_data.param_count;
- if (dst->flags & NETWORK_HAS_QOS_MASK)
- memcpy(&dst->qos_data, &src->qos_data,
- sizeof(struct ieee80211_qos_data));
- else {
- dst->qos_data.supported = src->qos_data.supported;
- dst->qos_data.param_count = src->qos_data.param_count;
- }
-
- if (dst->qos_data.supported == 1) {
- dst->QoS_Enable = 1;
- if (dst->ssid_len)
- IEEE80211_DEBUG_QOS
- ("QoS the network %s is QoS supported\n",
- dst->ssid);
- else
- IEEE80211_DEBUG_QOS
- ("QoS the network is QoS supported\n");
- }
- dst->qos_data.active = qos_active;
- dst->qos_data.old_param_count = old_param;
-
- /* dst->last_associate is not overwritten */
- dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame.
- if (src->wmm_param[0].aci_aifsn || \
- src->wmm_param[1].aci_aifsn || \
- src->wmm_param[2].aci_aifsn || \
- src->wmm_param[3].aci_aifsn) {
- memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
- }
- //dst->QoS_Enable = src->QoS_Enable;
-#ifdef THOMAS_TURBO
- dst->Turbo_Enable = src->Turbo_Enable;
-#endif
-
- dst->CountryIeLen = src->CountryIeLen;
- memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
-
- //added by amy for LEAP
- dst->bWithAironetIE = src->bWithAironetIE;
- dst->bCkipSupported = src->bCkipSupported;
- memcpy(dst->CcxRmState, src->CcxRmState, 2);
- dst->bCcxRmEnable = src->bCcxRmEnable;
- dst->MBssidMask = src->MBssidMask;
- dst->bMBssidValid = src->bMBssidValid;
- memcpy(dst->MBssid, src->MBssid, 6);
- dst->bWithCcxVerNum = src->bWithCcxVerNum;
- dst->BssCcxVerNumber = src->BssCcxVerNumber;
-
-}
-
-static inline int is_beacon(__le16 fc)
-{
- return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
-}
-
-static inline void ieee80211_process_probe_response(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_rx_stats *stats)
-{
- struct ieee80211_network *network;
- struct ieee80211_network *target;
- struct ieee80211_network *oldest = NULL;
-#ifdef CONFIG_IEEE80211_DEBUG
- struct ieee80211_info_element *info_element = &beacon->info_element[0];
-#endif
- int fc = WLAN_FC_GET_STYPE(le16_to_cpu(beacon->header.frame_ctl));
- unsigned long flags;
- short renew;
- u16 capability;
- //u8 wmm_info;
-
- network = kzalloc(sizeof(*network), GFP_ATOMIC);
- if (!network)
- goto out;
-
- capability = le16_to_cpu(beacon->capability);
- IEEE80211_DEBUG_SCAN(
- "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
- escape_essid(info_element->data, info_element->len),
- beacon->header.addr3,
- (capability & BIT(0xf)) ? '1' : '0',
- (capability & BIT(0xe)) ? '1' : '0',
- (capability & BIT(0xd)) ? '1' : '0',
- (capability & BIT(0xc)) ? '1' : '0',
- (capability & BIT(0xb)) ? '1' : '0',
- (capability & BIT(0xa)) ? '1' : '0',
- (capability & BIT(0x9)) ? '1' : '0',
- (capability & BIT(0x8)) ? '1' : '0',
- (capability & BIT(0x7)) ? '1' : '0',
- (capability & BIT(0x6)) ? '1' : '0',
- (capability & BIT(0x5)) ? '1' : '0',
- (capability & BIT(0x4)) ? '1' : '0',
- (capability & BIT(0x3)) ? '1' : '0',
- (capability & BIT(0x2)) ? '1' : '0',
- (capability & BIT(0x1)) ? '1' : '0',
- (capability & BIT(0x0)) ? '1' : '0');
-
- if (ieee80211_network_init(ieee, beacon, network, stats)) {
- IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
- escape_essid(info_element->data,
- info_element->len),
- beacon->header.addr3,
- fc == IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
- goto out;
- }
-
- // For Asus EeePc request,
- // (1) if wireless adapter receive get any 802.11d country code in AP beacon,
- // wireless adapter should follow the country code.
- // (2) If there is no any country code in beacon,
- // then wireless adapter should do active scan from ch1~11 and
- // passive scan from ch12~14
-
- if (!is_legal_channel(ieee, network->channel))
- goto out;
- if (ieee->bGlobalDomain) {
- if (fc == IEEE80211_STYPE_PROBE_RESP) {
- if (IS_COUNTRY_IE_VALID(ieee)) {
- // Case 1: Country code
- if (!is_legal_channel(ieee, network->channel)) {
- netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
- goto out;
- }
- } else {
- // Case 2: No any country code.
- // Filter over channel ch12~14
- if (network->channel > 11) {
- netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
- goto out;
- }
- }
- } else {
- if (IS_COUNTRY_IE_VALID(ieee)) {
- // Case 1: Country code
- if (!is_legal_channel(ieee, network->channel)) {
- netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
- goto out;
- }
- } else {
- // Case 2: No any country code.
- // Filter over channel ch12~14
- if (network->channel > 14) {
- netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
- goto out;
- }
- }
- }
- }
-
- /* The network parsed correctly -- so now we scan our known networks
- * to see if we can find it in our list.
- *
- * NOTE: This search is definitely not optimized. Once its doing
- * the "right thing" we'll optimize it for efficiency if
- * necessary */
-
- /* Search for this entry in the list and update it if it is
- * already there. */
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (is_same_network(&ieee->current_network, network, ieee)) {
- update_network(&ieee->current_network, network);
- if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
- && ieee->current_network.berp_info_valid){
- if (ieee->current_network.erp_value & ERP_UseProtection)
- ieee->current_network.buseprotection = true;
- else
- ieee->current_network.buseprotection = false;
- }
- if (is_beacon(beacon->header.frame_ctl)) {
- if (ieee->state == IEEE80211_LINKED)
- ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
- } else //hidden AP
- network->flags = (~NETWORK_EMPTY_ESSID & network->flags) | (NETWORK_EMPTY_ESSID & ieee->current_network.flags);
- }
-
- list_for_each_entry(target, &ieee->network_list, list) {
- if (is_same_network(target, network, ieee))
- break;
- if (!oldest ||
- (target->last_scanned < oldest->last_scanned))
- oldest = target;
- }
-
- /* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
- if (&target->list == &ieee->network_list) {
- if (list_empty(&ieee->network_free_list)) {
- /* If there are no more slots, expire the oldest */
- list_del(&oldest->list);
- target = oldest;
- IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from "
- "network list.\n",
- escape_essid(target->ssid,
- target->ssid_len),
- target->bssid);
- } else {
- /* Otherwise just pull from the free list */
- target = list_entry(ieee->network_free_list.next,
- struct ieee80211_network, list);
- list_del(ieee->network_free_list.next);
- }
-
-
-#ifdef CONFIG_IEEE80211_DEBUG
- IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
- escape_essid(network->ssid,
- network->ssid_len),
- network->bssid,
- fc == IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
-#endif
- memcpy(target, network, sizeof(*target));
- list_add_tail(&target->list, &ieee->network_list);
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
- ieee80211_softmac_new_net(ieee, network);
- } else {
- IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
- escape_essid(target->ssid,
- target->ssid_len),
- target->bssid,
- fc == IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
-
- /* we have an entry and we are going to update it. But this entry may
- * be already expired. In this case we do the same as we found a new
- * net and call the new_net handler
- */
- renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
- //YJ,add,080819,for hidden ap
- if (is_beacon(beacon->header.frame_ctl) == 0)
- network->flags = (~NETWORK_EMPTY_ESSID & network->flags) | (NETWORK_EMPTY_ESSID & target->flags);
- //if(strncmp(network->ssid, "linksys-c",9) == 0)
- // printk("====>2 network->ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network->ssid, network->flags, target->ssid, target->flags);
- if (((network->flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
- && (((network->ssid_len > 0) && (strncmp(target->ssid, network->ssid, network->ssid_len)))\
- || ((ieee->current_network.ssid_len == network->ssid_len) && (strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0) && (ieee->state == IEEE80211_NOLINK))))
- renew = 1;
- //YJ,add,080819,for hidden ap,end
-
- update_network(target, network);
- if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
- ieee80211_softmac_new_net(ieee, network);
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- if (is_beacon(beacon->header.frame_ctl) && is_same_network(&ieee->current_network, network, ieee) && \
- (ieee->state == IEEE80211_LINKED)) {
- if (ieee->handle_beacon)
- ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network);
- }
-
-out:
- kfree(network);
-}
-
-void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats)
-{
- switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
-
- case IEEE80211_STYPE_BEACON:
- IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
- IEEE80211_DEBUG_SCAN("Beacon\n");
- ieee80211_process_probe_response(
- ieee, (struct ieee80211_probe_response *)header, stats);
- break;
-
- case IEEE80211_STYPE_PROBE_RESP:
- IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
- IEEE80211_DEBUG_SCAN("Probe response\n");
- ieee80211_process_probe_response(
- ieee, (struct ieee80211_probe_response *)header, stats);
- break;
-
- }
-}
-EXPORT_SYMBOL(ieee80211_rx_mgt);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
deleted file mode 100644
index 92001cb36730..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ /dev/null
@@ -1,3056 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
- *
- * Mostly extracted from the rtl8180-sa2400 driver for the
- * in-kernel generic ieee802.11 stack.
- *
- * Few lines might be stolen from other part of the ieee80211
- * stack. Copyright who own it's copyright
- *
- * WPA code stolen from the ipw2200 driver.
- * Copyright who own it's copyright.
- */
-#include "ieee80211.h"
-
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/etherdevice.h>
-
-#include "dot11d.h"
-
-short ieee80211_is_54g(const struct ieee80211_network *net)
-{
- return (net->rates_ex_len > 0) || (net->rates_len > 4);
-}
-EXPORT_SYMBOL(ieee80211_is_54g);
-
-short ieee80211_is_shortslot(const struct ieee80211_network *net)
-{
- return net->capability & WLAN_CAPABILITY_SHORT_SLOT;
-}
-EXPORT_SYMBOL(ieee80211_is_shortslot);
-
-/* returns the total length needed for placing the RATE MFIE
- * tag and the EXTENDED RATE MFIE tag if needed.
- * It includes two bytes per tag for the tag itself and its len
- */
-static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
-{
- unsigned int rate_len = 0;
-
- if (ieee->modulation & IEEE80211_CCK_MODULATION)
- rate_len = IEEE80211_CCK_RATE_LEN + 2;
-
- if (ieee->modulation & IEEE80211_OFDM_MODULATION)
- rate_len += IEEE80211_OFDM_RATE_LEN + 2;
-
- return rate_len;
-}
-
-/* place the MFIE rate, tag to the memory (double) pointer.
- * Then it updates the pointer so that
- * it points after the new MFIE tag added.
- */
-static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- if (ieee->modulation & IEEE80211_CCK_MODULATION) {
- *tag++ = MFIE_TYPE_RATES;
- *tag++ = 4;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- }
-
- /* We may add an option for custom rates that specific HW might support */
- *tag_p = tag;
-}
-
-static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
- *tag++ = MFIE_TYPE_RATES_EX;
- *tag++ = 8;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
- }
-
- /* We may add an option for custom rates that specific HW might support */
- *tag_p = tag;
-}
-
-static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; /* 0 */
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0x50;
- *tag++ = 0xf2;
- *tag++ = 0x02; /* 5 */
- *tag++ = 0x00;
- *tag++ = 0x01;
-#ifdef SUPPORT_USPD
- if (ieee->current_network.wmm_info & 0x80)
- *tag++ = 0x0f | MAX_SP_Len;
- else
- *tag++ = MAX_SP_Len;
-#else
- *tag++ = MAX_SP_Len;
-#endif
- *tag_p = tag;
-}
-
-#ifdef THOMAS_TURBO
-static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; /* 0 */
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0xe0;
- *tag++ = 0x4c;
- *tag++ = 0x01; /* 5 */
- *tag++ = 0x02;
- *tag++ = 0x11;
- *tag++ = 0x00;
-
- *tag_p = tag;
- netdev_alert(ieee->dev, "This is enable turbo mode IE process\n");
-}
-#endif
-
-static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- int nh;
-
- nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
-
-/*
- * if the queue is full but we have newer frames then
- * just overwrites the oldest.
- *
- * if (nh == ieee->mgmt_queue_tail)
- * return -1;
- */
- ieee->mgmt_queue_head = nh;
- ieee->mgmt_queue_ring[nh] = skb;
-
- //return 0;
-}
-
-static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
-{
- struct sk_buff *ret;
-
- if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
- return NULL;
-
- ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
-
- ieee->mgmt_queue_tail =
- (ieee->mgmt_queue_tail + 1) % MGMT_QUEUE_NUM;
-
- return ret;
-}
-
-static void init_mgmt_queue(struct ieee80211_device *ieee)
-{
- ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
-}
-
-static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8 rate;
-
- /* 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. */
- if (pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
- rate = 0x0c;
- else
- rate = ieee->basic_rate & 0x7f;
-
- if (rate == 0) {
- /* 2005.01.26, by rcnjko. */
- if (ieee->mode == IEEE_A ||
- ieee->mode == IEEE_N_5G ||
- (ieee->mode == IEEE_N_24G && !pHTInfo->bCurSuppCCK))
- rate = 0x0c;
- else
- rate = 0x02;
- }
-
- /*
- // Data rate of ProbeReq is already decided. Annie, 2005-03-31
- if( pMgntInfo->bScanInProgress || (pMgntInfo->bDualModeScanStep!=0) ) {
- if(pMgntInfo->dot11CurrentWirelessMode==WIRELESS_MODE_A)
- rate = 0x0c;
- else
- rate = 0x02;
- }
- */
- return rate;
-}
-
-void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
-
-inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
-{
- unsigned long flags;
- short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct rtl_80211_hdr_3addr *header =
- (struct rtl_80211_hdr_3addr *)skb->data;
-
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- /* called with 2nd param 0, no mgmt lock required */
- ieee80211_sta_wakeup(ieee, 0);
-
- tcb_desc->queue_index = MGNT_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxDisableRateFallBack = 1;
- tcb_desc->bTxUseDriverAssingedRate = 1;
-
- if (single) {
- if (ieee->queue_stop) {
- enqueue_mgmt(ieee, skb);
- } else {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* avoid watchdog triggers */
- netif_trans_update(ieee->dev);
- ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- } else {
- spin_unlock_irqrestore(&ieee->lock, flags);
- spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* check whether the managed packet queued greater than 5 */
- if (!ieee->check_nic_enough_desc(ieee->dev, tcb_desc->queue_index) || \
- (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0) || \
- (ieee->queue_stop)) {
- /* insert the skb packet to the management queue */
- /* as for the completion function, it does not need
- * to check it any more.
- * */
- printk("%s():insert to waitqueue!\n", __func__);
- skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- ieee->softmac_hard_start_xmit(skb, ieee->dev);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
- spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
- }
-}
-
-static inline void
-softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
-{
- short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct rtl_80211_hdr_3addr *header =
- (struct rtl_80211_hdr_3addr *)skb->data;
-
- if (single) {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* avoid watchdog triggers */
- netif_trans_update(ieee->dev);
- ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
- } else {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- ieee->softmac_hard_start_xmit(skb, ieee->dev);
- }
- //dev_kfree_skb_any(skb);//edit by thomas
-}
-
-static inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
-{
- unsigned int len, rate_len;
- u8 *tag;
- struct sk_buff *skb;
- struct ieee80211_probe_request *req;
-
- len = ieee->current_network.ssid_len;
-
- rate_len = ieee80211_MFIE_rate_len(ieee);
-
- skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) +
- 2 + len + rate_len + ieee->tx_headroom);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- req = skb_put(skb, sizeof(struct ieee80211_probe_request));
- req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- req->header.duration_id = 0; /* FIXME: is this OK? */
-
- eth_broadcast_addr(req->header.addr1);
- memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- eth_broadcast_addr(req->header.addr3);
-
- tag = skb_put(skb, len + 2 + rate_len);
-
- *tag++ = MFIE_TYPE_SSID;
- *tag++ = len;
- memcpy(tag, ieee->current_network.ssid, len);
- tag += len;
-
- ieee80211_MFIE_Brate(ieee, &tag);
- ieee80211_MFIE_Grate(ieee, &tag);
- return skb;
-}
-
-struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-
-static void ieee80211_send_beacon(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
-
- if (!ieee->ieee_up)
- return;
- //unsigned long flags;
- skb = ieee80211_get_beacon_(ieee);
-
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_beacons++;
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-// ieee->beacon_timer.expires = jiffies +
-// (MSECS( ieee->current_network.beacon_interval -5));
-
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
- if (ieee->beacon_txing && ieee->ieee_up) {
-// if(!timer_pending(&ieee->beacon_timer))
-// add_timer(&ieee->beacon_timer);
- mod_timer(&ieee->beacon_timer,
- jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval - 5));
- }
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
-}
-
-static void ieee80211_send_beacon_cb(struct timer_list *t)
-{
- struct ieee80211_device *ieee =
- from_timer(ieee, t, beacon_timer);
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
- ieee80211_send_beacon(ieee);
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
-static void ieee80211_send_probe(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
-
- skb = ieee80211_probe_req(ieee);
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_probe_rq++;
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-}
-
-static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
-{
- if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)) {
- ieee80211_send_probe(ieee);
- ieee80211_send_probe(ieee);
- }
-}
-
-/* this performs syncro scan blocking the caller until all channels
- * in the allowed channel map has been checked.
- */
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
-{
- short ch = 0;
- u8 channel_map[MAX_CHANNEL_NUMBER + 1];
-
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER + 1);
- mutex_lock(&ieee->scan_mutex);
-
- while (1) {
- do {
- ch++;
- if (ch > MAX_CHANNEL_NUMBER)
- goto out; /* scan completed */
- } while (!channel_map[ch]);
-
- /* this function can be called in two situations
- * 1- We have switched to ad-hoc mode and we are
- * performing a complete syncro scan before conclude
- * there are no interesting cell and to create a
- * new one. In this case the link state is
- * IEEE80211_NOLINK until we found an interesting cell.
- * If so the ieee8021_new_net, called by the RX path
- * will set the state to IEEE80211_LINKED, so we stop
- * scanning
- * 2- We are linked and the root uses run iwlist scan.
- * So we switch to IEEE80211_LINKED_SCANNING to remember
- * that we are still logically linked (not interested in
- * new network events, despite for updating the net list,
- * but we are temporarily 'unlinked' as the driver shall
- * not filter RX frames and the channel is changing.
- * So the only situation in witch are interested is to check
- * if the state become LINKED because of the #1 situation
- */
-
- if (ieee->state == IEEE80211_LINKED)
- goto out;
- ieee->set_chan(ieee->dev, ch);
- if (channel_map[ch] == 1)
- ieee80211_send_probe_requests(ieee);
-
- /* this prevent excessive time wait when we
- * need to wait for a syncro scan to end..
- */
- if (ieee->state >= IEEE80211_LINKED && ieee->sync_scan_hurryup)
- goto out;
-
- msleep_interruptible(IEEE80211_SOFTMAC_SCAN_TIME);
- }
-out:
- if (ieee->state < IEEE80211_LINKED) {
- ieee->actscanning = false;
- mutex_unlock(&ieee->scan_mutex);
- } else {
- ieee->sync_scan_hurryup = 0;
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_scan_complete(ieee);
- mutex_unlock(&ieee->scan_mutex);
- }
-}
-EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
-
-static void ieee80211_softmac_scan_wq(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
- static short watchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER + 1];
-
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER + 1);
- if (!ieee->ieee_up)
- return;
- mutex_lock(&ieee->scan_mutex);
- do {
- ieee->current_network.channel =
- (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
- if (watchdog++ > MAX_CHANNEL_NUMBER) {
- //if current channel is not in channel map, set to default channel.
- if (!channel_map[ieee->current_network.channel]) {
- ieee->current_network.channel = 6;
- goto out; /* no good chans */
- }
- }
- } while (!channel_map[ieee->current_network.channel]);
- if (ieee->scanning == 0)
- goto out;
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
- if (channel_map[ieee->current_network.channel] == 1)
- ieee80211_send_probe_requests(ieee);
-
- schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
-
- mutex_unlock(&ieee->scan_mutex);
- return;
-out:
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_scan_complete(ieee);
- ieee->actscanning = false;
- watchdog = 0;
- ieee->scanning = 0;
- mutex_unlock(&ieee->scan_mutex);
-}
-
-static void ieee80211_beacons_start(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock, flags);
-
- ieee->beacon_txing = 1;
- ieee80211_send_beacon(ieee);
-
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
-static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
-
- ieee->beacon_txing = 0;
-
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
- del_timer_sync(&ieee->beacon_timer);
-}
-
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
-{
- if (ieee->stop_send_beacons)
- ieee->stop_send_beacons(ieee->dev);
- if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- ieee80211_beacons_stop(ieee);
-}
-EXPORT_SYMBOL(ieee80211_stop_send_beacons);
-
-void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
-{
- if (ieee->start_send_beacons)
- ieee->start_send_beacons(ieee->dev, ieee->basic_rate);
- if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- ieee80211_beacons_start(ieee);
-}
-EXPORT_SYMBOL(ieee80211_start_send_beacons);
-
-static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
-{
-// unsigned long flags;
-
- //ieee->sync_scan_hurryup = 1;
-
- mutex_lock(&ieee->scan_mutex);
-// spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->scanning == 1) {
- ieee->scanning = 0;
-
- cancel_delayed_work(&ieee->softmac_scan_wq);
- }
-
-// spin_unlock_irqrestore(&ieee->lock, flags);
- mutex_unlock(&ieee->scan_mutex);
-}
-
-void ieee80211_stop_scan(struct ieee80211_device *ieee)
-{
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
- ieee80211_softmac_stop_scan(ieee);
- else
- ieee->stop_scan(ieee->dev);
-}
-EXPORT_SYMBOL(ieee80211_stop_scan);
-
-/* called with ieee->lock held */
-static void ieee80211_start_scan(struct ieee80211_device *ieee)
-{
- if (IS_DOT11D_ENABLE(ieee)) {
- if (IS_COUNTRY_IE_VALID(ieee))
- RESET_CIE_WATCHDOG(ieee);
- }
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
- if (ieee->scanning == 0) {
- ieee->scanning = 1;
- schedule_delayed_work(&ieee->softmac_scan_wq, 0);
- }
- } else {
- ieee->start_scan(ieee->dev);
- }
-}
-
-/* called with wx_mutex held */
-void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
-{
- if (IS_DOT11D_ENABLE(ieee)) {
- if (IS_COUNTRY_IE_VALID(ieee))
- RESET_CIE_WATCHDOG(ieee);
- }
- ieee->sync_scan_hurryup = 0;
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
- ieee80211_softmac_scan_syncro(ieee);
- else
- ieee->scan_syncro(ieee->dev);
-}
-EXPORT_SYMBOL(ieee80211_start_scan_syncro);
-
-static inline struct sk_buff *
-ieee80211_authentication_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee, int challengelen)
-{
- struct sk_buff *skb;
- struct ieee80211_authentication *auth;
- int len = sizeof(struct ieee80211_authentication) + challengelen + ieee->tx_headroom;
-
- skb = dev_alloc_skb(len);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
- auth = skb_put(skb, sizeof(struct ieee80211_authentication));
-
- if (challengelen)
- auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH
- | IEEE80211_FCTL_WEP);
- else
- auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
-
- auth->header.duration_id = cpu_to_le16(0x013a);
-
- memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
-
- //auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- if (ieee->auth_mode == 0)
- auth->algorithm = WLAN_AUTH_OPEN;
- else if (ieee->auth_mode == 1)
- auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
- else if (ieee->auth_mode == 2)
- auth->algorithm = WLAN_AUTH_OPEN; /* 0x80; */
- printk("=================>%s():auth->algorithm is %d\n", __func__, auth->algorithm);
- auth->transaction = cpu_to_le16(ieee->associate_seq);
- ieee->associate_seq++;
-
- auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
-
- return skb;
-}
-
-static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
-{
- u8 *tag;
- int beacon_size;
- struct ieee80211_probe_response *beacon_buf;
- struct sk_buff *skb = NULL;
- int encrypt;
- int atim_len, erp_len;
- struct ieee80211_crypt_data *crypt;
-
- char *ssid = ieee->current_network.ssid;
- int ssid_len = ieee->current_network.ssid_len;
- int rate_len = ieee->current_network.rates_len + 2;
- int rate_ex_len = ieee->current_network.rates_ex_len;
- int wpa_ie_len = ieee->wpa_ie_len;
- u8 erpinfo_content = 0;
-
- u8 *tmp_ht_cap_buf;
- u8 tmp_ht_cap_len = 0;
- u8 *tmp_ht_info_buf;
- u8 tmp_ht_info_len = 0;
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8 *tmp_generic_ie_buf = NULL;
- u8 tmp_generic_ie_len = 0;
-
- if (rate_ex_len > 0)
- rate_ex_len += 2;
-
- if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
- atim_len = 4;
- else
- atim_len = 0;
-
- if (ieee80211_is_54g(&ieee->current_network))
- erp_len = 3;
- else
- erp_len = 0;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
-
- encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
- /* HT ralated element */
- tmp_ht_cap_buf = (u8 *)&ieee->pHTInfo->SelfHTCap;
- tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf = (u8 *)&ieee->pHTInfo->SelfHTInfo;
- tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
- HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len, encrypt);
- HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len, encrypt);
-
- if (pHTInfo->bRegRT2RTAggregation) {
- tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
- HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len);
- }
-// printk("===============>tmp_ht_cap_len is %d,tmp_ht_info_len is %d, tmp_generic_ie_len is %d\n",tmp_ht_cap_len,tmp_ht_info_len,tmp_generic_ie_len);
- beacon_size = sizeof(struct ieee80211_probe_response) + 2
- + ssid_len
- + 3 //channel
- + rate_len
- + rate_ex_len
- + atim_len
- + erp_len
- + wpa_ie_len
- // + tmp_ht_cap_len
- // + tmp_ht_info_len
- // + tmp_generic_ie_len
-// + wmm_len+2
- + ieee->tx_headroom;
- skb = dev_alloc_skb(beacon_size);
- if (!skb)
- return NULL;
- skb_reserve(skb, ieee->tx_headroom);
- beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
- memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
- memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
-
- beacon_buf->header.duration_id = 0; /* FIXME */
- beacon_buf->beacon_interval =
- cpu_to_le16(ieee->current_network.beacon_interval);
- beacon_buf->capability =
- cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
- beacon_buf->capability |=
- cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); /* add short preamble here */
-
- if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
- beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
-
- if (encrypt)
- beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
- beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
- beacon_buf->info_element[0].len = ssid_len;
-
- tag = (u8 *)beacon_buf->info_element[0].data;
-
- memcpy(tag, ssid, ssid_len);
-
- tag += ssid_len;
-
- *(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len - 2;
- memcpy(tag, ieee->current_network.rates, rate_len - 2);
- tag += rate_len - 2;
-
- *(tag++) = MFIE_TYPE_DS_SET;
- *(tag++) = 1;
- *(tag++) = ieee->current_network.channel;
-
- if (atim_len) {
- *(tag++) = MFIE_TYPE_IBSS_SET;
- *(tag++) = 2;
-
- put_unaligned_le16(ieee->current_network.atim_window,
- tag);
- tag += 2;
- }
-
- if (erp_len) {
- *(tag++) = MFIE_TYPE_ERP;
- *(tag++) = 1;
- *(tag++) = erpinfo_content;
- }
- if (rate_ex_len) {
- *(tag++) = MFIE_TYPE_RATES_EX;
- *(tag++) = rate_ex_len - 2;
- memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
- tag += rate_ex_len - 2;
- }
-
- if (wpa_ie_len) {
- if (ieee->iw_mode == IW_MODE_ADHOC) {
- //as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
- memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
- }
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- tag += wpa_ie_len;
- }
-
- //skb->dev = ieee->dev;
- return skb;
-}
-
-static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
- u8 *dest)
-{
- struct sk_buff *skb;
- u8 *tag;
-
- struct ieee80211_crypt_data *crypt;
- struct ieee80211_assoc_response_frame *assoc;
- short encrypt;
-
- unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
- int len = sizeof(struct ieee80211_assoc_response_frame) + rate_len + ieee->tx_headroom;
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- assoc = skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
-
- assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
- memcpy(assoc->header.addr1, dest, ETH_ALEN);
- memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
- WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
-
- if (ieee->short_slot)
- assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
-
- if (ieee->host_encrypt)
- crypt = ieee->crypt[ieee->tx_keyidx];
- else
- crypt = NULL;
-
- encrypt = crypt && crypt->ops;
-
- if (encrypt)
- assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- assoc->status = 0;
- assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007)
- ieee->assoc_id = 0;
- else
- ieee->assoc_id++;
-
- tag = skb_put(skb, rate_len);
-
- ieee80211_MFIE_Brate(ieee, &tag);
- ieee80211_MFIE_Grate(ieee, &tag);
-
- return skb;
-}
-
-static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
- int status, u8 *dest)
-{
- struct sk_buff *skb;
- struct ieee80211_authentication *auth;
- int len = ieee->tx_headroom + sizeof(struct ieee80211_authentication) + 1;
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb->len = sizeof(struct ieee80211_authentication);
-
- auth = (struct ieee80211_authentication *)skb->data;
-
- auth->status = cpu_to_le16(status);
- auth->transaction = cpu_to_le16(2);
- auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN);
-
- memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr1, dest, ETH_ALEN);
- auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
- return skb;
-}
-
-static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,
- short pwr)
-{
- struct sk_buff *skb;
- struct rtl_80211_hdr_3addr *hdr;
-
- skb = dev_alloc_skb(sizeof(struct rtl_80211_hdr_3addr));
-
- if (!skb)
- return NULL;
-
- hdr = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
-
- memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
- memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
-
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
- (pwr ? IEEE80211_FCTL_PM : 0));
-
- return skb;
-}
-
-static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
-{
- struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s,
- u8 *dest)
-{
- struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
-{
- struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-static inline struct sk_buff *
-ieee80211_association_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- //unsigned long flags;
-
- struct ieee80211_assoc_request_frame *hdr;
- u8 *tag;//,*rsn_ie;
- //short info_addr = 0;
- //int i;
- //u16 suite_count = 0;
- //u8 suit_select = 0;
- //unsigned int wpa_len = beacon->wpa_ie_len;
- //for HT
- u8 *ht_cap_buf = NULL;
- u8 ht_cap_len = 0;
- u8 *realtek_ie_buf = NULL;
- u8 realtek_ie_len = 0;
- int wpa_ie_len = ieee->wpa_ie_len;
- unsigned int ckip_ie_len = 0;
- unsigned int ccxrm_ie_len = 0;
- unsigned int cxvernum_ie_len = 0;
- struct ieee80211_crypt_data *crypt;
- int encrypt;
-
- unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
- unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
-#ifdef THOMAS_TURBO
- unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
-#endif
-
- int len = 0;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
- encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
-
- /* Include High Throuput capability && Realtek proprietary */
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- ht_cap_buf = (u8 *)&ieee->pHTInfo->SelfHTCap;
- ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt);
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
- realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- realtek_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
- HTConstructRT2RTAggElement(ieee, realtek_ie_buf, &realtek_ie_len);
- }
- }
- if (ieee->qos_support)
- wmm_info_len = beacon->qos_data.supported ? 9 : 0;
-
- if (beacon->bCkipSupported)
- ckip_ie_len = 30 + 2;
-
- if (beacon->bCcxRmEnable)
- ccxrm_ie_len = 6 + 2;
-
- if (beacon->BssCcxVerNumber >= 2)
- cxvernum_ie_len = 5 + 2;
-
-#ifdef THOMAS_TURBO
- len = sizeof(struct ieee80211_assoc_request_frame) + 2
- + beacon->ssid_len /* essid tagged val */
- + rate_len /* rates tagged val */
- + wpa_ie_len
- + wmm_info_len
- + turbo_info_len
- + ht_cap_len
- + realtek_ie_len
- + ckip_ie_len
- + ccxrm_ie_len
- + cxvernum_ie_len
- + ieee->tx_headroom;
-#else
- len = sizeof(struct ieee80211_assoc_request_frame) + 2
- + beacon->ssid_len /* essid tagged val */
- + rate_len /* rates tagged val */
- + wpa_ie_len
- + wmm_info_len
- + ht_cap_len
- + realtek_ie_len
- + ckip_ie_len
- + ccxrm_ie_len
- + cxvernum_ie_len
- + ieee->tx_headroom;
-#endif
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- hdr = skb_put(skb, sizeof(struct ieee80211_assoc_request_frame) + 2);
-
- hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
- hdr->header.duration_id = cpu_to_le16(37);
- memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
-
- memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John
-
- hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
- if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); //add short_preamble here
-
- if (ieee->short_slot)
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- if (wmm_info_len) //QOS
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
-
- hdr->listen_interval = cpu_to_le16(0xa);
-
- hdr->info_element[0].id = MFIE_TYPE_SSID;
-
- hdr->info_element[0].len = beacon->ssid_len;
- skb_put_data(skb, beacon->ssid, beacon->ssid_len);
-
- tag = skb_put(skb, rate_len);
-
- ieee80211_MFIE_Brate(ieee, &tag);
- ieee80211_MFIE_Grate(ieee, &tag);
- // For CCX 1 S13, CKIP. Added by Annie, 2006-08-14.
- if (beacon->bCkipSupported) {
- static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client"
- u8 CcxAironetBuf[30];
- struct octet_string osCcxAironetIE;
-
- memset(CcxAironetBuf, 0, 30);
- osCcxAironetIE.octet = CcxAironetBuf;
- osCcxAironetIE.length = sizeof(CcxAironetBuf);
- //
- // Ref. CCX test plan v3.61, 3.2.3.1 step 13.
- // We want to make the device type as "4500-client". 060926, by CCW.
- //
- memcpy(osCcxAironetIE.octet, AironetIeOui, sizeof(AironetIeOui));
-
- // CCX1 spec V1.13, A01.1 CKIP Negotiation (page23):
- // "The CKIP negotiation is started with the associate request from the client to the access point,
- // containing an Aironet element with both the MIC and KP bits set."
- osCcxAironetIE.octet[IE_CISCO_FLAG_POSITION] |= (SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
- tag = skb_put(skb, ckip_ie_len);
- *tag++ = MFIE_TYPE_AIRONET;
- *tag++ = osCcxAironetIE.length;
- memcpy(tag, osCcxAironetIE.octet, osCcxAironetIE.length);
- tag += osCcxAironetIE.length;
- }
-
- if (beacon->bCcxRmEnable) {
- static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
- struct octet_string osCcxRmCap;
-
- osCcxRmCap.octet = CcxRmCapBuf;
- osCcxRmCap.length = sizeof(CcxRmCapBuf);
- tag = skb_put(skb, ccxrm_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxRmCap.length;
- memcpy(tag, osCcxRmCap.octet, osCcxRmCap.length);
- tag += osCcxRmCap.length;
- }
-
- if (beacon->BssCcxVerNumber >= 2) {
- u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
- struct octet_string osCcxVerNum;
- CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
- osCcxVerNum.octet = CcxVerNumBuf;
- osCcxVerNum.length = sizeof(CcxVerNumBuf);
- tag = skb_put(skb, cxvernum_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxVerNum.length;
- memcpy(tag, osCcxVerNum.octet, osCcxVerNum.length);
- tag += osCcxVerNum.length;
- }
- //HT cap element
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- if (ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
- tag = skb_put(skb, ht_cap_len);
- *tag++ = MFIE_TYPE_HT_CAP;
- *tag++ = ht_cap_len - 2;
- memcpy(tag, ht_cap_buf, ht_cap_len - 2);
- tag += ht_cap_len - 2;
- }
- }
-
- //choose what wpa_supplicant gives to associate.
- if (wpa_ie_len)
- skb_put_data(skb, ieee->wpa_ie, wpa_ie_len);
-
- if (wmm_info_len) {
- tag = skb_put(skb, wmm_info_len);
- ieee80211_WMM_Info(ieee, &tag);
- }
-#ifdef THOMAS_TURBO
- if (turbo_info_len) {
- tag = skb_put(skb, turbo_info_len);
- ieee80211_TURBO_Info(ieee, &tag);
- }
-#endif
-
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
- tag = skb_put(skb, ht_cap_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = ht_cap_len - 2;
- memcpy(tag, ht_cap_buf, ht_cap_len - 2);
- tag += ht_cap_len - 2;
- }
-
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
- tag = skb_put(skb, realtek_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = realtek_ie_len - 2;
- memcpy(tag, realtek_ie_buf, realtek_ie_len - 2);
- }
- }
-// printk("<=====%s(), %p, %p\n", __func__, ieee->dev, ieee->dev->dev_addr);
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
- return skb;
-}
-
-void ieee80211_associate_abort(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
-
- ieee->associate_seq++;
-
- /* don't scan, and avoid having the RX path possibly
- * try again to associate. Even do not react to AUTH or
- * ASSOC response. Just wait for the retry wq to be scheduled.
- * Here we will check if there are good nets to associate
- * with, so we retry or just get back to NO_LINK and scanning
- */
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING) {
- IEEE80211_DEBUG_MGMT("Authentication failed\n");
- ieee->softmac_stats.no_auth_rs++;
- } else {
- IEEE80211_DEBUG_MGMT("Association failed\n");
- ieee->softmac_stats.no_ass_rs++;
- }
-
- ieee->state = IEEE80211_ASSOCIATING_RETRY;
-
- schedule_delayed_work(&ieee->associate_retry_wq, \
- IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-static void ieee80211_associate_abort_cb(struct timer_list *t)
-{
- struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
-
- ieee80211_associate_abort(dev);
-}
-
-static void ieee80211_associate_step1(struct ieee80211_device *ieee)
-{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
-
- IEEE80211_DEBUG_MGMT("Stopping scan\n");
-
- ieee->softmac_stats.tx_auth_rq++;
- skb = ieee80211_authentication_req(beacon, ieee, 0);
-
- if (!skb) {
- ieee80211_associate_abort(ieee);
- } else {
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING;
- IEEE80211_DEBUG_MGMT("Sending authentication request\n");
- softmac_mgmt_xmit(skb, ieee);
- //BUGON when you try to add_timer twice, using mod_timer may be better, john0709
- if (!timer_pending(&ieee->associate_timer)) {
- ieee->associate_timer.expires = jiffies + (HZ / 2);
- add_timer(&ieee->associate_timer);
- }
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-}
-
-static void ieee80211_auth_challenge(struct ieee80211_device *ieee,
- u8 *challenge,
- int chlen)
-{
- u8 *c;
- struct sk_buff *skb;
- struct ieee80211_network *beacon = &ieee->current_network;
-// int hlen = sizeof(struct ieee80211_authentication);
-
- ieee->associate_seq++;
- ieee->softmac_stats.tx_auth_rq++;
-
- skb = ieee80211_authentication_req(beacon, ieee, chlen + 2);
- if (!skb) {
- ieee80211_associate_abort(ieee);
- } else {
- c = skb_put(skb, chlen + 2);
- *(c++) = MFIE_TYPE_CHALLENGE;
- *(c++) = chlen;
- memcpy(c, challenge, chlen);
-
- IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
-
- ieee80211_encrypt_fragment(ieee, skb, sizeof(struct rtl_80211_hdr_3addr));
-
- softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
- //dev_kfree_skb_any(skb);//edit by thomas
- }
- kfree(challenge);
-}
-
-static void ieee80211_associate_step2(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- struct ieee80211_network *beacon = &ieee->current_network;
-
- del_timer_sync(&ieee->associate_timer);
-
- IEEE80211_DEBUG_MGMT("Sending association request\n");
-
- ieee->softmac_stats.tx_ass_rq++;
- skb = ieee80211_association_req(beacon, ieee);
- if (!skb) {
- ieee80211_associate_abort(ieee);
- } else {
- softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-}
-static void ieee80211_associate_complete_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
-
- netdev_info(ieee->dev, "Associated successfully\n");
- if (ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
- ieee->rate = 108;
- netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate);
- } else {
- ieee->rate = 22;
- netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate);
- }
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- printk("Successfully associated, ht enabled\n");
- HTOnAssocRsp(ieee);
- } else {
- printk("Successfully associated, ht not enabled(%d, %d)\n", ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bEnableHT);
- memset(ieee->dot11HTOperationalRateSet, 0, 16);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- }
- ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval / 500);
- // To prevent the immediately calling watch_dog after association.
- if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) {
- ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
- ieee->LinkDetectInfo.NumRecvDataInPeriod = 1;
- }
- ieee->link_change(ieee->dev);
- if (!ieee->is_silent_reset) {
- printk("============>normal associate\n");
- notify_wx_assoc_event(ieee);
- } else {
- printk("==================>silent reset associate\n");
- ieee->is_silent_reset = false;
- }
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
- netif_carrier_on(ieee->dev);
-}
-
-static void ieee80211_associate_complete(struct ieee80211_device *ieee)
-{
-// int i;
-// struct net_device* dev = ieee->dev;
- del_timer_sync(&ieee->associate_timer);
-
- ieee->state = IEEE80211_LINKED;
- //ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet);
- schedule_work(&ieee->associate_complete_wq);
-}
-
-static void ieee80211_associate_procedure_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
- ieee->sync_scan_hurryup = 1;
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
-
- ieee80211_stop_scan(ieee);
- printk("===>%s(), chan:%d\n", __func__, ieee->current_network.channel);
- //ieee->set_chan(ieee->dev, ieee->current_network.channel);
- HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
-
- ieee->associate_seq = 1;
- ieee80211_associate_step1(ieee);
-
- mutex_unlock(&ieee->wx_mutex);
-}
-
-inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
-{
- u8 tmp_ssid[IW_ESSID_MAX_SIZE + 1];
- int tmp_ssid_len = 0;
-
- short apset, ssidset, ssidbroad, apmatch, ssidmatch;
-
- /* we are interested in new only if we are not associated
- * and we are not associating / authenticating
- */
- if (ieee->state != IEEE80211_NOLINK)
- return;
-
- if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability & WLAN_CAPABILITY_BSS))
- return;
-
- if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
- return;
-
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
- /* if the user specified the AP MAC, we need also the essid
- * This could be obtained by beacons or, if the network does not
- * broadcast it, it can be put manually.
- */
- apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 );
- ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0';
- ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
- apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN) == 0);
- ssidmatch = (ieee->current_network.ssid_len == net->ssid_len) &&
- (!strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
-
- /* if the user set the AP check if match.
- * if the network does not broadcast essid we check the user supplyed ANY essid
- * if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid check if essid match
- */
- if ((apset && apmatch &&
- ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
- /* if the ap is not set, check that the user set the bssid
- * and the network does broadcast and that those two bssid matches
- */
- (!apset && ssidset && ssidbroad && ssidmatch)) {
- /* if the essid is hidden replace it with the
- * essid provided by the user.
- */
- if (!ssidbroad) {
- strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
- tmp_ssid_len = ieee->current_network.ssid_len;
- }
- memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
-
- strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
- ieee->current_network.ssid_len = tmp_ssid_len;
- netdev_info(ieee->dev,
- "Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",
- ieee->current_network.ssid,
- ieee->current_network.channel,
- ieee->current_network.qos_data.supported,
- ieee->pHTInfo->bEnableHT,
- ieee->current_network.bssht.bdSupportHT);
-
- //ieee->pHTInfo->IOTAction = 0;
- HTResetIOTSetting(ieee->pHTInfo);
- if (ieee->iw_mode == IW_MODE_INFRA) {
- /* Join the network for the first time */
- ieee->AsocRetryCount = 0;
- //for HT by amy 080514
- if ((ieee->current_network.qos_data.supported == 1) &&
- // (ieee->pHTInfo->bEnableHT && ieee->current_network.bssht.bdSupportHT))
- ieee->current_network.bssht.bdSupportHT) {
-/*WB, 2008.09.09:bCurrentHTSupport and bEnableHT two flags are going to put together to check whether we are in HT now, so needn't to check bEnableHT flags here. That's is to say we will set to HT support whenever joined AP has the ability to support HT. And whether we are in HT or not, please check bCurrentHTSupport&&bEnableHT now please.*/
- // ieee->pHTInfo->bCurrentHTSupport = true;
- HTResetSelfAndSavePeerSetting(ieee, &ieee->current_network);
- } else {
- ieee->pHTInfo->bCurrentHTSupport = false;
- }
-
- ieee->state = IEEE80211_ASSOCIATING;
- schedule_work(&ieee->associate_procedure_wq);
- } else {
- if (ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
- ieee->rate = 108;
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
- netdev_info(ieee->dev,
- "Using G rates\n");
- } else {
- ieee->rate = 22;
- ieee->SetWirelessMode(ieee->dev, IEEE_B);
- netdev_info(ieee->dev,
- "Using B rates\n");
- }
- memset(ieee->dot11HTOperationalRateSet, 0, 16);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- ieee->state = IEEE80211_LINKED;
- }
- }
- }
-}
-
-void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- struct ieee80211_network *target;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- list_for_each_entry(target, &ieee->network_list, list) {
- /* if the state become different that NOLINK means
- * we had found what we are searching for
- */
-
- if (ieee->state != IEEE80211_NOLINK)
- break;
-
- if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
- ieee80211_softmac_new_net(ieee, target);
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-static inline int auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
-{
- struct ieee80211_authentication *a;
- u8 *t;
- if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
- return -EINVAL;
- }
- *challenge = NULL;
- a = (struct ieee80211_authentication *)skb->data;
- if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
- t = skb->data + sizeof(struct ieee80211_authentication);
-
- if (*(t++) == MFIE_TYPE_CHALLENGE) {
- *chlen = *(t++);
- *challenge = kmemdup(t, *chlen, GFP_ATOMIC);
- if (!*challenge)
- return -ENOMEM;
- }
- }
-
- if (a->status) {
- IEEE80211_DEBUG_MGMT("auth_parse() failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
-{
- struct ieee80211_authentication *a;
-
- if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
- return -1;
- }
- a = (struct ieee80211_authentication *)skb->data;
-
- memcpy(dest, a->header.addr2, ETH_ALEN);
-
- if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
- return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
-
- return WLAN_STATUS_SUCCESS;
-}
-
-static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src)
-{
- u8 *tag;
- u8 *skbend;
- u8 *ssid = NULL;
- u8 ssidlen = 0;
-
- struct rtl_80211_hdr_3addr *header =
- (struct rtl_80211_hdr_3addr *)skb->data;
-
- if (skb->len < sizeof(struct rtl_80211_hdr_3addr))
- return -1; /* corrupted */
-
- memcpy(src, header->addr2, ETH_ALEN);
-
- skbend = (u8 *)skb->data + skb->len;
-
- tag = skb->data + sizeof(struct rtl_80211_hdr_3addr);
-
- while (tag + 1 < skbend) {
- if (*tag == 0) {
- ssid = tag + 2;
- ssidlen = *(tag + 1);
- break;
- }
- tag++; /* point to the len field */
- tag = tag + *(tag); /* point to the last data byte of the tag */
- tag++; /* point to the next tag */
- }
-
- //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src));
- if (ssidlen == 0)
- return 1;
-
- if (!ssid)
- return 1; /* ssid not found in tagged param */
-
- return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
-}
-
-static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
-{
- struct ieee80211_assoc_request_frame *a;
-
- if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
- sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
- return -1;
- }
-
- a = (struct ieee80211_assoc_request_frame *)skb->data;
-
- memcpy(dest, a->header.addr2, ETH_ALEN);
-
- return 0;
-}
-
-static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb, int *aid)
-{
- struct ieee80211_assoc_response_frame *response_head;
- u16 status_code;
-
- if (skb->len < sizeof(struct ieee80211_assoc_response_frame)) {
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
- }
-
- response_head = (struct ieee80211_assoc_response_frame *)skb->data;
- *aid = le16_to_cpu(response_head->aid) & 0x3fff;
-
- status_code = le16_to_cpu(response_head->status);
- if ((status_code == WLAN_STATUS_ASSOC_DENIED_RATES ||
- status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
- ((ieee->mode == IEEE_G) &&
- (ieee->current_network.mode == IEEE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
- ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
- } else {
- ieee->AsocRetryCount = 0;
- }
-
- return le16_to_cpu(response_head->status);
-}
-
-static inline void
-ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
-
- //IEEE80211DMESG("Rx probe");
- ieee->softmac_stats.rx_probe_rq++;
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
- if (probe_rq_parse(ieee, skb, dest)) {
- //IEEE80211DMESG("Was for me!");
- ieee->softmac_stats.tx_probe_rs++;
- ieee80211_resp_to_probe(ieee, dest);
- }
-}
-
-static inline void
-ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
- int status;
- //IEEE80211DMESG("Rx probe");
- ieee->softmac_stats.rx_auth_rq++;
-
- status = auth_rq_parse(skb, dest);
- if (status != -1)
- ieee80211_resp_to_auth(ieee, status, dest);
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
-}
-
-static inline void
-ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
- //unsigned long flags;
-
- ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(skb, dest) != -1)
- ieee80211_resp_to_assoc_rq(ieee, dest);
-
- netdev_info(ieee->dev, "New client associated: %pM\n", dest);
- //FIXME
-}
-
-static void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
- short pwr)
-{
- struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
-
- if (buf)
- softmac_ps_mgmt_xmit(buf, ieee);
-}
-/* EXPORT_SYMBOL(ieee80211_sta_ps_send_null_frame); */
-
-static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
- u32 *time_l)
-{
- int timeout;
- u8 dtim;
- /*if(ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)
-
- return 0;
- */
- dtim = ieee->current_network.dtim_data;
- if (!(dtim & IEEE80211_DTIM_VALID))
- return 0;
- timeout = ieee->current_network.beacon_interval; //should we use ps_timeout value or beacon_interval
- ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
-
- if (dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST) & ieee->ps))
- return 2;
-
- if (!time_after(jiffies,
- dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
- return 0;
-
- if (!time_after(jiffies,
- ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
- return 0;
-
- if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
- (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
- return 0;
-
- if (time_l) {
- *time_l = ieee->current_network.last_dtim_sta_time[0]
- + (ieee->current_network.beacon_interval
- * ieee->current_network.dtim_period) * 1000;
- }
-
- if (time_h) {
- *time_h = ieee->current_network.last_dtim_sta_time[1];
- if (time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
- *time_h += 1;
- }
-
- return 1;
-}
-
-static inline void ieee80211_sta_ps(struct work_struct *work)
-{
- struct ieee80211_device *ieee;
- u32 th, tl;
- short sleep;
- unsigned long flags, flags2;
-
- ieee = container_of(work, struct ieee80211_device, ps_task);
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if ((ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)) {
- // #warning CHECK_LOCK_HERE
- spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
-
- ieee80211_sta_wakeup(ieee, 1);
-
- spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
- }
-
- sleep = ieee80211_sta_ps_sleep(ieee, &th, &tl);
- /* 2 wake, 1 sleep, 0 do nothing */
- if (sleep == 0)
- goto out;
-
- if (sleep == 1) {
- if (ieee->sta_sleep == 1) {
- ieee->enter_sleep_state(ieee->dev, th, tl);
- } else if (ieee->sta_sleep == 0) {
- // printk("send null 1\n");
- spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
-
- if (ieee->ps_is_queue_empty(ieee->dev)) {
- ieee->sta_sleep = 2;
-
- ieee->ps_request_tx_ack(ieee->dev);
-
- ieee80211_sta_ps_send_null_frame(ieee, 1);
-
- ieee->ps_th = th;
- ieee->ps_tl = tl;
- }
- spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
- }
- } else if (sleep == 2) {
-//#warning CHECK_LOCK_HERE
- spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
-
- ieee80211_sta_wakeup(ieee, 1);
-
- spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
- }
-out:
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
-{
- if (ieee->sta_sleep == 0) {
- if (nl) {
- printk("Warning: driver is probably failing to report TX ps error\n");
- ieee->ps_request_tx_ack(ieee->dev);
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- }
- return;
- }
-
- if (ieee->sta_sleep == 1)
- ieee->sta_wake_up(ieee->dev);
-
- ieee->sta_sleep = 0;
-
- if (nl) {
- ieee->ps_request_tx_ack(ieee->dev);
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- }
-}
-
-void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
-{
- unsigned long flags, flags2;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->sta_sleep == 2) {
- /* Null frame with PS bit set */
- if (success) {
- ieee->sta_sleep = 1;
- ieee->enter_sleep_state(ieee->dev, ieee->ps_th, ieee->ps_tl);
- }
- /* if the card report not success we can't be sure the AP
- * has not RXed so we can't assume the AP believe us awake
- */
- } else {
- /* 21112005 - tx again null without PS bit if lost */
- if ((ieee->sta_sleep == 0) && !success) {
- spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
- }
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-EXPORT_SYMBOL(ieee80211_ps_tx_ack);
-
-static void ieee80211_process_action(struct ieee80211_device *ieee,
- struct sk_buff *skb)
-{
- struct rtl_80211_hdr *header = (struct rtl_80211_hdr *)skb->data;
- u8 *act = ieee80211_get_payload(header);
- u8 tmp = 0;
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- if (!act) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "error to get payload of action frame\n");
- return;
- }
- tmp = *act;
- act++;
- switch (tmp) {
- case ACT_CAT_BA:
- if (*act == ACT_ADDBAREQ)
- ieee80211_rx_ADDBAReq(ieee, skb);
- else if (*act == ACT_ADDBARSP)
- ieee80211_rx_ADDBARsp(ieee, skb);
- else if (*act == ACT_DELBA)
- ieee80211_rx_DELBA(ieee, skb);
- break;
- default:
- break;
- }
- return;
-}
-
-static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
- struct sk_buff *skb)
-{
- /* default support N mode, disable halfNmode */
- bool bSupportNmode = true, bHalfSupportNmode = false;
- int errcode;
- u8 *challenge;
- int chlen = 0;
- u32 iotAction;
-
- errcode = auth_parse(skb, &challenge, &chlen);
- if (!errcode) {
- if (ieee->open_wep || !challenge) {
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
- ieee->softmac_stats.rx_auth_rs_ok++;
- iotAction = ieee->pHTInfo->IOTAction;
- if (!(iotAction & HT_IOT_ACT_PURE_N_MODE)) {
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
- /* WEP or TKIP encryption */
- if (IsHTHalfNmodeAPs(ieee)) {
- bSupportNmode = true;
- bHalfSupportNmode = true;
- } else {
- bSupportNmode = false;
- bHalfSupportNmode = false;
- }
- netdev_dbg(ieee->dev, "SEC(%d, %d)\n",
- bSupportNmode,
- bHalfSupportNmode);
- }
- }
- /* Dummy wirless mode setting- avoid encryption issue */
- if (bSupportNmode) {
- /* N mode setting */
- ieee->SetWirelessMode(ieee->dev,
- ieee->current_network.mode);
- } else {
- /* b/g mode setting - TODO */
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
- }
-
- if (ieee->current_network.mode == IEEE_N_24G &&
- bHalfSupportNmode) {
- netdev_dbg(ieee->dev, "enter half N mode\n");
- ieee->bHalfWirelessN24GMode = true;
- } else {
- ieee->bHalfWirelessN24GMode = false;
- }
- ieee80211_associate_step2(ieee);
- } else {
- ieee80211_auth_challenge(ieee, challenge, chlen);
- }
- } else {
- ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Auth response status code %d\n", errcode);
- ieee80211_associate_abort(ieee);
- }
-}
-
-inline int
-ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
-{
- struct rtl_80211_hdr_3addr *header = (struct rtl_80211_hdr_3addr *)skb->data;
- u16 errcode;
- int aid;
- struct ieee80211_assoc_response_frame *assoc_resp;
-// struct ieee80211_info_element *info_element;
-
- if (!ieee->proto_started)
- return 0;
-
- if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
- ieee->iw_mode == IW_MODE_INFRA &&
- ieee->state == IEEE80211_LINKED))
- schedule_work(&ieee->ps_task);
-
- if (WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
- ieee->last_rx_ps_time = jiffies;
-
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
- IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA) {
- struct ieee80211_network *network;
-
- network = kzalloc(sizeof(*network), GFP_KERNEL);
- if (!network)
- return -ENOMEM;
-
- errcode = assoc_parse(ieee, skb, &aid);
- if (!errcode) {
- ieee->state = IEEE80211_LINKED;
- ieee->assoc_id = aid;
- ieee->softmac_stats.rx_ass_ok++;
- /* station support qos */
- /* Let the register setting defaultly with Legacy station */
- if (ieee->qos_support) {
- assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
- if (ieee80211_parse_info_param(ieee, assoc_resp->info_element,\
- rx_stats->len - sizeof(*assoc_resp), \
- network, rx_stats)) {
- return 1;
- } else {
- //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
- memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen);
- memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
- }
- if (ieee->handle_assoc_response)
- ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame *)header, network);
- }
- ieee80211_associate_complete(ieee);
- } else {
- /* aid could not been allocated */
- ieee->softmac_stats.rx_ass_err++;
- printk("Association response status code 0x%x\n",
- errcode);
- IEEE80211_DEBUG_MGMT("Association response status code 0x%x\n",
- errcode);
- if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
- schedule_work(&ieee->associate_procedure_wq);
- else
- ieee80211_associate_abort(ieee);
- }
- kfree(network);
- }
- break;
-
- case IEEE80211_STYPE_ASSOC_REQ:
- case IEEE80211_STYPE_REASSOC_REQ:
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_rx_assoc_rq(ieee, skb);
- break;
-
- case IEEE80211_STYPE_AUTH:
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING
- && ieee->iw_mode == IW_MODE_INFRA) {
- IEEE80211_DEBUG_MGMT("Received auth response");
- ieee80211_check_auth_response(ieee, skb);
- } else if (ieee->iw_mode == IW_MODE_MASTER) {
- ieee80211_rx_auth_rq(ieee, skb);
- }
- }
- break;
-
- case IEEE80211_STYPE_PROBE_REQ:
- if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- ((ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER) &&
- ieee->state == IEEE80211_LINKED)) {
- ieee80211_rx_probe_rq(ieee, skb);
- }
- break;
-
- case IEEE80211_STYPE_DISASSOC:
- case IEEE80211_STYPE_DEAUTH:
- /* FIXME for now repeat all the association procedure
- * both for disassociation and deauthentication
- */
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_INFRA) {
- ieee->state = IEEE80211_ASSOCIATING;
- ieee->softmac_stats.reassoc++;
-
- notify_wx_assoc_event(ieee);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- RemovePeerTS(ieee, header->addr2);
- schedule_work(&ieee->associate_procedure_wq);
- }
- break;
- case IEEE80211_STYPE_MANAGE_ACT:
- ieee80211_process_action(ieee, skb);
- break;
- default:
- return -1;
- }
-
- //dev_kfree_skb_any(skb);
- return 0;
-}
-
-/* The following are for a simpler TX queue management.
- * Instead of using netif_[stop/wake]_queue, the driver
- * will use these two functions (plus a reset one) that
- * will internally call the kernel netif_* and take care
- * of the ieee802.11 fragmentation.
- * So, the driver receives a fragment at a time and might
- * call the stop function when it wants, without taking
- * care to have enough room to TX an entire packet.
- * This might be useful if each fragment needs its own
- * descriptor. Thus, just keeping a total free memory > than
- * the max fragmentation threshold is not enough. If the
- * ieee802.11 stack passed a TXB struct, then you would need
- * to keep N free descriptors where
- * N = MAX_PACKET_SIZE / MIN_FRAG_THRESHOLD.
- * In this way you need just one and the 802.11 stack
- * will take care of buffering fragments and pass them to
- * the driver later, when it wakes the queue.
- */
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
-{
- unsigned int queue_index = txb->queue_index;
- unsigned long flags;
- int i;
- struct cb_desc *tcb_desc = NULL;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- /* called with 2nd parm 0, no tx mgmt lock required */
- ieee80211_sta_wakeup(ieee, 0);
-
- /* update the tx status */
- ieee->stats.tx_bytes += le16_to_cpu(txb->payload_size);
- ieee->stats.tx_packets++;
- tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- if (tcb_desc->bMulticast)
- ieee->stats.multicast++;
-
- /* if xmit available, just xmit it immediately, else just insert it to the wait queue */
- for (i = 0; i < txb->nr_frags; i++) {
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- if ((skb_queue_len(&ieee->skb_drv_aggQ[queue_index]) != 0) ||
-#else
- if ((skb_queue_len(&ieee->skb_waitQ[queue_index]) != 0) ||
-#endif
- (!ieee->check_nic_enough_desc(ieee->dev, queue_index)) ||
- (ieee->queue_stop)) {
- /* insert the skb packet to the wait queue */
- /* as for the completion function, it does not need
- * to check it any more.
- * */
- //printk("error:no descriptor left@queue_index %d\n", queue_index);
- //ieee80211_stop_queue(ieee);
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]);
-#else
- skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
-#endif
- } else {
- ieee->softmac_data_hard_start_xmit(txb->fragments[i],
- ieee->dev, ieee->rate);
- //ieee->stats.tx_packets++;
- //ieee->stats.tx_bytes += txb->fragments[i]->len;
- //ieee->dev->trans_start = jiffies;
- }
- }
- ieee80211_txb_free(txb);
-
-//exit:
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-EXPORT_SYMBOL(ieee80211_softmac_xmit);
-
-/* called with ieee->lock acquired */
-static void ieee80211_resume_tx(struct ieee80211_device *ieee)
-{
- int i;
- for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
- if (ieee->queue_stop) {
- ieee->tx_pending.frag = i;
- return;
- } else {
- ieee->softmac_data_hard_start_xmit(ieee->tx_pending.txb->fragments[i],
- ieee->dev, ieee->rate);
- //(i+1)<ieee->tx_pending.txb->nr_frags);
- ieee->stats.tx_packets++;
- netif_trans_update(ieee->dev);
- }
- }
-
- ieee80211_txb_free(ieee->tx_pending.txb);
- ieee->tx_pending.txb = NULL;
-}
-
-void ieee80211_reset_queue(struct ieee80211_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock, flags);
- init_mgmt_queue(ieee);
- if (ieee->tx_pending.txb) {
- ieee80211_txb_free(ieee->tx_pending.txb);
- ieee->tx_pending.txb = NULL;
- }
- ieee->queue_stop = 0;
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-EXPORT_SYMBOL(ieee80211_reset_queue);
-
-void ieee80211_wake_queue(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- struct sk_buff *skb;
- struct rtl_80211_hdr_3addr *header;
-
- spin_lock_irqsave(&ieee->lock, flags);
- if (!ieee->queue_stop)
- goto exit;
-
- ieee->queue_stop = 0;
-
- if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
- while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
- header = (struct rtl_80211_hdr_3addr *)skb->data;
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
- }
- if (!ieee->queue_stop && ieee->tx_pending.txb)
- ieee80211_resume_tx(ieee);
-
- if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)) {
- ieee->softmac_stats.swtxawake++;
- netif_wake_queue(ieee->dev);
- }
-exit:
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-EXPORT_SYMBOL(ieee80211_wake_queue);
-
-void ieee80211_stop_queue(struct ieee80211_device *ieee)
-{
- //unsigned long flags;
- //spin_lock_irqsave(&ieee->lock,flags);
-
- if (!netif_queue_stopped(ieee->dev)) {
- netif_stop_queue(ieee->dev);
- ieee->softmac_stats.swtxstop++;
- }
- ieee->queue_stop = 1;
- //spin_unlock_irqrestore(&ieee->lock,flags);
-}
-EXPORT_SYMBOL(ieee80211_stop_queue);
-
-/* called in user context only */
-void ieee80211_start_master_bss(struct ieee80211_device *ieee)
-{
- ieee->assoc_id = 1;
-
- if (ieee->current_network.ssid_len == 0) {
- strncpy(ieee->current_network.ssid,
- IEEE80211_DEFAULT_TX_ESSID,
- IW_ESSID_MAX_SIZE);
-
- ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN);
-
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->state = IEEE80211_LINKED;
- ieee->link_change(ieee->dev);
- notify_wx_assoc_event(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
- netif_carrier_on(ieee->dev);
-}
-
-static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
-{
- if (ieee->raw_tx) {
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
- netif_carrier_on(ieee->dev);
- }
-}
-static void ieee80211_start_ibss_wq(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
- /* iwconfig mode ad-hoc will schedule this and return
- * on the other hand this will block further iwconfig SET
- * operations because of the wx_mutex hold.
- * Anyway some most set operations set a flag to speed-up
- * (abort) this wq (when syncro scanning) before sleeping
- * on the semaphore
- */
- if (!ieee->proto_started) {
- printk("==========oh driver down return\n");
- return;
- }
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->current_network.ssid_len == 0) {
- strscpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID,
- sizeof(ieee->current_network.ssid));
- ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- /* check if we have this cell in our network list */
- ieee80211_softmac_check_all_nets(ieee);
-
-// if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK))
- if (ieee->state == IEEE80211_NOLINK)
- ieee->current_network.channel = 6;
- /* if not then the state is not linked. Maybe the user switched to
- * ad-hoc mode just after being in monitor mode, or just after
- * being very few time in managed mode (so the card have had no
- * time to scan all the chans..) or we have just run up the iface
- * after setting ad-hoc mode. So we have to give another try..
- * Here, in ibss mode, should be safe to do this without extra care
- * (in bss mode we had to make sure no-one tried to associate when
- * we had just checked the ieee->state and we was going to start the
- * scan) because in ibss mode the ieee80211_new_net function, when
- * finds a good net, just set the ieee->state to IEEE80211_LINKED,
- * so, at worst, we waste a bit of time to initiate an unneeded syncro
- * scan, that will stop at the first round because it sees the state
- * associated.
- */
- if (ieee->state == IEEE80211_NOLINK)
- ieee80211_start_scan_syncro(ieee);
-
- /* the network definitively is not here.. create a new cell */
- if (ieee->state == IEEE80211_NOLINK) {
- printk("creating new IBSS cell\n");
- if (!ieee->wap_set)
- eth_random_addr(ieee->current_network.bssid);
-
- if (ieee->modulation & IEEE80211_CCK_MODULATION) {
- ieee->current_network.rates_len = 4;
-
- ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- } else {
- ieee->current_network.rates_len = 0;
- }
- if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
- ieee->current_network.rates_ex_len = 8;
-
- ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- ieee->current_network.rates_ex[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- ieee->current_network.rates_ex[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- ieee->current_network.rates_ex[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- ieee->current_network.rates_ex[4] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- ieee->current_network.rates_ex[5] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- ieee->current_network.rates_ex[6] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
-
- ieee->rate = 108;
- } else {
- ieee->current_network.rates_ex_len = 0;
- ieee->rate = 22;
- }
-
- // By default, WMM function will be disabled in IBSS mode
- ieee->current_network.QoS_Enable = 0;
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
- ieee->current_network.atim_window = 0;
- ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- if (ieee->short_slot)
- ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
- }
-
- ieee->state = IEEE80211_LINKED;
-
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->link_change(ieee->dev);
-
- notify_wx_assoc_event(ieee);
-
- ieee80211_start_send_beacons(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
- netif_carrier_on(ieee->dev);
-
- mutex_unlock(&ieee->wx_mutex);
-}
-
-inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
-{
- schedule_delayed_work(&ieee->start_ibss_wq, 150);
-}
-
-/* this is called only in user context, with wx_mutex held */
-void ieee80211_start_bss(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- //
- // Ref: 802.11d 11.1.3.3
- // STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
- //
- if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
- if (!ieee->bGlobalDomain)
- return;
- }
- /* check if we have already found the net we
- * are interested in (if any).
- * if not (we are disassociated and we are not
- * in associating / authenticating phase) start the background scanning.
- */
- ieee80211_softmac_check_all_nets(ieee);
-
- /* ensure no-one start an associating process (thus setting
- * the ieee->state to ieee80211_ASSOCIATING) while we
- * have just checked it and we are going to enable scan.
- * The ieee80211_new_net function is always called with
- * lock held (from both ieee80211_softmac_check_all_nets and
- * the rx path), so we cannot be in the middle of such function
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->state == IEEE80211_NOLINK) {
- ieee->actscanning = true;
- ieee80211_start_scan(ieee);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-/* called only in userspace context */
-void ieee80211_disassociate(struct ieee80211_device *ieee)
-{
- netif_carrier_off(ieee->dev);
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
- ieee80211_reset_queue(ieee);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_reset(ieee);
- ieee->state = IEEE80211_NOLINK;
- ieee->is_set_key = false;
- ieee->link_change(ieee->dev);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- notify_wx_assoc_event(ieee);
-}
-EXPORT_SYMBOL(ieee80211_disassociate);
-
-static void ieee80211_associate_retry_wq(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
- unsigned long flags;
-
- mutex_lock(&ieee->wx_mutex);
- if (!ieee->proto_started)
- goto exit;
-
- if (ieee->state != IEEE80211_ASSOCIATING_RETRY)
- goto exit;
-
- /* until we do not set the state to IEEE80211_NOLINK
- * there are no possibility to have someone else trying
- * to start an association procedure (we get here with
- * ieee->state = IEEE80211_ASSOCIATING).
- * When we set the state to IEEE80211_NOLINK it is possible
- * that the RX path run an attempt to associate, but
- * both ieee80211_softmac_check_all_nets and the
- * RX path works with ieee->lock held so there are no
- * problems. If we are still disassociated then start a scan.
- * the lock here is necessary to ensure no one try to start
- * an association procedure when we have just checked the
- * state and we are going to start the scan.
- */
- ieee->state = IEEE80211_NOLINK;
-
- ieee80211_softmac_check_all_nets(ieee);
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->state == IEEE80211_NOLINK)
- ieee80211_start_scan(ieee);
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
-exit:
- mutex_unlock(&ieee->wx_mutex);
-}
-
-struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
-{
- u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- struct sk_buff *skb;
- struct ieee80211_probe_response *b;
-
- skb = ieee80211_probe_resp(ieee, broadcast_addr);
-
- if (!skb)
- return NULL;
-
- b = (struct ieee80211_probe_response *)skb->data;
- b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
-
- return skb;
-}
-
-struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- struct ieee80211_probe_response *b;
-
- skb = ieee80211_get_beacon_(ieee);
- if (!skb)
- return NULL;
-
- b = (struct ieee80211_probe_response *)skb->data;
- b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- return skb;
-}
-EXPORT_SYMBOL(ieee80211_get_beacon);
-
-void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
-{
- ieee->sync_scan_hurryup = 1;
- mutex_lock(&ieee->wx_mutex);
- ieee80211_stop_protocol(ieee);
- mutex_unlock(&ieee->wx_mutex);
-}
-EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
-
-void ieee80211_stop_protocol(struct ieee80211_device *ieee)
-{
- if (!ieee->proto_started)
- return;
-
- ieee->proto_started = 0;
-
- ieee80211_stop_send_beacons(ieee);
- del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- cancel_delayed_work(&ieee->start_ibss_wq);
- ieee80211_stop_scan(ieee);
-
- ieee80211_disassociate(ieee);
- RemoveAllTS(ieee); //added as we disconnect from the previous BSS, Remove all TS
-}
-
-void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
-{
- ieee->sync_scan_hurryup = 0;
- mutex_lock(&ieee->wx_mutex);
- ieee80211_start_protocol(ieee);
- mutex_unlock(&ieee->wx_mutex);
-}
-EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
-
-void ieee80211_start_protocol(struct ieee80211_device *ieee)
-{
- short ch = 0;
- int i = 0;
-
- if (ieee->proto_started)
- return;
-
- ieee->proto_started = 1;
-
- if (ieee->current_network.channel == 0) {
- do {
- ch++;
- if (ch > MAX_CHANNEL_NUMBER)
- return; /* no channel found */
- } while (!GET_DOT11D_INFO(ieee)->channel_map[ch]);
- ieee->current_network.channel = ch;
- }
-
- if (ieee->current_network.beacon_interval == 0)
- ieee->current_network.beacon_interval = 100;
-// printk("===>%s(), chan:%d\n", __func__, ieee->current_network.channel);
-// ieee->set_chan(ieee->dev,ieee->current_network.channel);
-
- for (i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
- }
-
- ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers.
-
- /* if the user set the MAC of the ad-hoc cell and then
- * switch to managed mode, shall we make sure that association
- * attempts does not fail just because the user provide the essid
- * and the nic is still checking for the AP MAC ??
- */
- if (ieee->iw_mode == IW_MODE_INFRA)
- ieee80211_start_bss(ieee);
-
- else if (ieee->iw_mode == IW_MODE_ADHOC)
- ieee80211_start_ibss(ieee);
-
- else if (ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_start_master_bss(ieee);
-
- else if (ieee->iw_mode == IW_MODE_MONITOR)
- ieee80211_start_monitor_mode(ieee);
-}
-
-#define DRV_NAME "Ieee80211"
-void ieee80211_softmac_init(struct ieee80211_device *ieee)
-{
- int i;
- memset(&ieee->current_network, 0, sizeof(struct ieee80211_network));
-
- ieee->state = IEEE80211_NOLINK;
- ieee->sync_scan_hurryup = 0;
- for (i = 0; i < 5; i++)
- ieee->seq_ctrl[i] = 0;
-
- ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_KERNEL);
- if (!ieee->dot11d_info)
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
- //added for AP roaming
- ieee->LinkDetectInfo.SlotNum = 2;
- ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
- ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
-
- ieee->assoc_id = 0;
- ieee->queue_stop = 0;
- ieee->scanning = 0;
- ieee->softmac_features = 0; //so IEEE2100-like driver are happy
- ieee->wap_set = 0;
- ieee->ssid_set = 0;
- ieee->proto_started = 0;
- ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE;
- ieee->rate = 22;
- ieee->ps = IEEE80211_PS_DISABLED;
- ieee->sta_sleep = 0;
- ieee->Regdot11HTOperationalRateSet[0] = 0xff;//support MCS 0~7
- ieee->Regdot11HTOperationalRateSet[1] = 0xff;//support MCS 8~15
- ieee->Regdot11HTOperationalRateSet[4] = 0x01;
- //added by amy
- ieee->actscanning = false;
- ieee->beinretry = false;
- ieee->is_set_key = false;
- init_mgmt_queue(ieee);
-
- ieee->sta_edca_param[0] = 0x0000A403;
- ieee->sta_edca_param[1] = 0x0000A427;
- ieee->sta_edca_param[2] = 0x005E4342;
- ieee->sta_edca_param[3] = 0x002F3262;
- ieee->aggregation = true;
- ieee->enable_rx_imm_BA = true;
- ieee->tx_pending.txb = NULL;
-
- timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
-
- timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
-
- INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
- INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
- INIT_WORK(&ieee->associate_procedure_wq, ieee80211_associate_procedure_wq);
- INIT_DELAYED_WORK(&ieee->softmac_scan_wq, ieee80211_softmac_scan_wq);
- INIT_DELAYED_WORK(&ieee->associate_retry_wq, ieee80211_associate_retry_wq);
- INIT_WORK(&ieee->wx_sync_scan_wq, ieee80211_wx_sync_scan_wq);
-
- mutex_init(&ieee->wx_mutex);
- mutex_init(&ieee->scan_mutex);
-
- spin_lock_init(&ieee->mgmt_tx_lock);
- spin_lock_init(&ieee->beacon_lock);
-
- INIT_WORK(&ieee->ps_task, ieee80211_sta_ps);
-}
-
-void ieee80211_softmac_free(struct ieee80211_device *ieee)
-{
- mutex_lock(&ieee->wx_mutex);
- kfree(ieee->dot11d_info);
- ieee->dot11d_info = NULL;
- del_timer_sync(&ieee->associate_timer);
-
- cancel_delayed_work(&ieee->associate_retry_wq);
- cancel_work_sync(&ieee->ps_task);
- mutex_unlock(&ieee->wx_mutex);
-}
-
-/********************************************************
- * Start of WPA code. *
- * this is stolen from the ipw2200 driver *
- ********************************************************/
-static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
-{
- /* This is called when wpa_supplicant loads and closes the driver
- * interface. */
- printk("%s WPA\n", value ? "enabling" : "disabling");
- ieee->wpa_enabled = value;
- return 0;
-}
-
-static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee,
- char *wpa_ie, int wpa_ie_len)
-{
- /* make sure WPA is enabled */
- ieee80211_wpa_enable(ieee, 1);
-
- ieee80211_disassociate(ieee);
-}
-
-static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason)
-{
- int ret = 0;
-
- switch (command) {
- case IEEE_MLME_STA_DEAUTH:
- // silently ignore
- break;
-
- case IEEE_MLME_STA_DISASSOC:
- ieee80211_disassociate(ieee);
- break;
-
- default:
- printk("Unknown MLME request: %d\n", command);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
- struct ieee_param *param, int plen)
-{
- u8 *buf;
-
- if (param->u.wpa_ie.len > MAX_WPA_IE_LEN)
- return -EINVAL;
-
- if (param->u.wpa_ie.len) {
- buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
- GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = buf;
- ieee->wpa_ie_len = param->u.wpa_ie.len;
- } else {
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = NULL;
- ieee->wpa_ie_len = 0;
- }
-
- ieee80211_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len);
- return 0;
-}
-
-#define AUTH_ALG_OPEN_SYSTEM 0x1
-#define AUTH_ALG_SHARED_KEY 0x2
-
-static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
-{
- struct ieee80211_security sec = {
- .flags = SEC_AUTH_MODE,
- };
-
- if (value & AUTH_ALG_SHARED_KEY) {
- sec.auth_mode = WLAN_AUTH_SHARED_KEY;
- ieee->open_wep = 0;
- ieee->auth_mode = 1;
- } else if (value & AUTH_ALG_OPEN_SYSTEM) {
- sec.auth_mode = WLAN_AUTH_OPEN;
- ieee->open_wep = 1;
- ieee->auth_mode = 0;
- } else if (value & IW_AUTH_ALG_LEAP) {
- sec.auth_mode = WLAN_AUTH_LEAP;
- ieee->open_wep = 1;
- ieee->auth_mode = 2;
- }
-
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
- //else
- // ret = -EOPNOTSUPP;
-
- return 0;
-}
-
-static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value)
-{
- int ret = 0;
- unsigned long flags;
-
- switch (name) {
- case IEEE_PARAM_WPA_ENABLED:
- ret = ieee80211_wpa_enable(ieee, value);
- break;
-
- case IEEE_PARAM_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures = value;
- break;
-
- case IEEE_PARAM_DROP_UNENCRYPTED: {
- /* HACK:
- *
- * wpa_supplicant calls set_wpa_enabled when the driver
- * is loaded and unloaded, regardless of if WPA is being
- * used. No other calls are made which can be used to
- * determine if encryption will be used or not prior to
- * association being expected. If encryption is not being
- * used, drop_unencrypted is set to false, else true -- we
- * can use this to determine if the CAP_PRIVACY_ON bit should
- * be set.
- */
- struct ieee80211_security sec = {
- .flags = SEC_ENABLED,
- .enabled = value,
- };
- ieee->drop_unencrypted = value;
- /* We only change SEC_LEVEL for open mode. Others
- * are set by ipw_wpa_set_encryption.
- */
- if (!value) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_0;
- } else {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- }
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
- break;
- }
-
- case IEEE_PARAM_PRIVACY_INVOKED:
- ieee->privacy_invoked = value;
- break;
-
- case IEEE_PARAM_AUTH_ALGS:
- ret = ieee80211_wpa_set_auth_algs(ieee, value);
- break;
-
- case IEEE_PARAM_IEEE_802_1X:
- ieee->ieee802_1x = value;
- break;
- case IEEE_PARAM_WPAX_SELECT:
- // added for WPA2 mixed mode
- spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
- ieee->wpax_type_set = 1;
- ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
- break;
-
- default:
- printk("Unknown WPA param: %d\n", name);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-/* implementation borrowed from hostap driver */
-static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
- struct ieee_param *param, int param_len)
-{
- int ret = 0;
- const char *module = NULL;
-
- struct ieee80211_crypto_ops *ops = NULL;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len !=
- (int)((char *)param->u.crypt.key - (char *)param) +
- param->u.crypt.key_len) {
- printk("Len mismatch %d, %d\n", param_len,
- param->u.crypt.key_len);
- return -EINVAL;
- }
- if (is_broadcast_ether_addr(param->sta_addr)) {
- if (param->u.crypt.idx >= WEP_KEYS)
- return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
- } else {
- return -EINVAL;
- }
-
- if (strcmp(param->u.crypt.alg, "none") == 0) {
- if (crypt) {
- sec.enabled = 0;
- // FIXME FIXME
- //sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_ENABLED | SEC_LEVEL;
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- }
- goto done;
- }
- sec.enabled = 1;
-// FIXME FIXME
-// sec.encrypt = 1;
- sec.flags |= SEC_ENABLED;
-
- /* IPW HW cannot build TKIP MIC, host decryption still needed. */
- if (!(ieee->host_encrypt || ieee->host_decrypt) &&
- strcmp(param->u.crypt.alg, "TKIP"))
- goto skip_host_crypt;
-
- //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place
- if (!strcmp(param->u.crypt.alg, "WEP"))
- module = "ieee80211_crypt_wep";
- else if (!strcmp(param->u.crypt.alg, "TKIP"))
- module = "ieee80211_crypt_tkip";
- else if (!strcmp(param->u.crypt.alg, "CCMP"))
- module = "ieee80211_crypt_ccmp";
- if (module)
- ops = try_then_request_module(ieee80211_get_crypto_ops(param->u.crypt.alg),
- module);
- if (!ops) {
- printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
- param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
- ret = -EINVAL;
- goto done;
- }
-
- if (!*crypt || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (!new_crypt) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv =
- new_crypt->ops->init(param->u.crypt.idx);
-
- if (!new_crypt->priv) {
- kfree(new_crypt);
- param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- *crypt = new_crypt;
- }
-
- if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(param->u.crypt.key,
- param->u.crypt.key_len, param->u.crypt.seq,
- (*crypt)->priv) < 0) {
- printk("key setting failed\n");
- param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- skip_host_crypt:
- if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
- sec.active_key = param->u.crypt.idx;
- sec.flags |= SEC_ACTIVE_KEY;
- } else {
- sec.flags &= ~SEC_ACTIVE_KEY;
- }
- memcpy(sec.keys[param->u.crypt.idx],
- param->u.crypt.key,
- param->u.crypt.key_len);
- sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
- sec.flags |= (1 << param->u.crypt.idx);
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- done:
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port &&
- ieee->reset_port(ieee->dev)) {
- printk("reset_port failed\n");
- param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED;
- return -EINVAL;
- }
-
- return ret;
-}
-
-static inline struct sk_buff *ieee80211_disassociate_skb(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee,
- u8 asRsn)
-{
- struct sk_buff *skb;
- struct ieee80211_disassoc *disass;
-
- skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc));
- if (!skb)
- return NULL;
-
- disass = skb_put(skb, sizeof(struct ieee80211_disassoc));
- disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
- disass->header.duration_id = 0;
-
- memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN);
-
- disass->reason = cpu_to_le16(asRsn);
- return skb;
-}
-
-void
-SendDisassociation(struct ieee80211_device *ieee,
- u8 *asSta,
- u8 asRsn
-)
-{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
-
- skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-}
-EXPORT_SYMBOL(SendDisassociation);
-
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p)
-{
- struct ieee_param *param;
- int ret = 0;
-
- mutex_lock(&ieee->wx_mutex);
- //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
-
- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
- ret = -EINVAL;
- goto out;
- }
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param)) {
- ret = PTR_ERR(param);
- goto out;
- }
-
- switch (param->cmd) {
- case IEEE_CMD_SET_WPA_PARAM:
- ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
- param->u.wpa_param.value);
- break;
-
- case IEEE_CMD_SET_WPA_IE:
- ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
- break;
-
- case IEEE_CMD_SET_ENCRYPTION:
- ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
- break;
-
- case IEEE_CMD_MLME:
- ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command,
- param->u.mlme.reason_code);
- break;
-
- default:
- printk("Unknown WPA supplicant request: %d\n", param->cmd);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-out:
- mutex_unlock(&ieee->wx_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wpa_supplicant_ioctl);
-
-void notify_wx_assoc_event(struct ieee80211_device *ieee)
-{
- union iwreq_data wrqu;
-
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (ieee->state == IEEE80211_LINKED)
- memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN);
- else
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
-}
-EXPORT_SYMBOL(notify_wx_assoc_event);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
deleted file mode 100644
index afa92ddfa005..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ /dev/null
@@ -1,598 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
- *
- * Mostly extracted from the rtl8180-sa2400 driver for the
- * in-kernel generic ieee802.11 stack.
- *
- * Some pieces of code might be stolen from ipw2100 driver
- * copyright of who own it's copyright ;-)
- *
- * PS wx handler mostly stolen from hostap, copyright who
- * own it's copyright ;-)
- */
-
-
-#include <linux/etherdevice.h>
-
-#include "ieee80211.h"
-#include "dot11d.h"
-/* FIXME: add A freqs */
-
-const long ieee80211_wlan_frequencies[] = {
- 2412, 2417, 2422, 2427,
- 2432, 2437, 2442, 2447,
- 2452, 2457, 2462, 2467,
- 2472, 2484
-};
-EXPORT_SYMBOL(ieee80211_wlan_frequencies);
-
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct iw_freq *fwrq = &wrqu->freq;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->iw_mode == IW_MODE_INFRA) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* if setting by freq convert to channel */
- if (fwrq->e == 1) {
- if ((fwrq->m >= (int)2.412e8 &&
- fwrq->m <= (int)2.487e8)) {
- int f = fwrq->m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != ieee80211_wlan_frequencies[c]))
- c++;
-
- /* hack to fall through */
- fwrq->e = 0;
- fwrq->m = c + 1;
- }
- }
-
- if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
- ret = -EOPNOTSUPP;
- goto out;
-
- } else { /* Set the channel */
-
- if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) {
- ret = -EINVAL;
- goto out;
- }
- ieee->current_network.channel = fwrq->m;
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
-
- if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
- if (ieee->state == IEEE80211_LINKED) {
- ieee80211_stop_send_beacons(ieee);
- ieee80211_start_send_beacons(ieee);
- }
- }
-
- ret = 0;
-out:
- mutex_unlock(&ieee->wx_mutex);
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_freq);
-
-int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct iw_freq *fwrq = &wrqu->freq;
-
- if (ieee->current_network.channel == 0)
- return -1;
- /* NM 0.7.0 will not accept channel any more. */
- fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel - 1] * 100000;
- fwrq->e = 1;
- /* fwrq->m = ieee->current_network.channel; */
- /* fwrq->e = 0; */
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_freq);
-
-int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- unsigned long flags;
-
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-
- if (ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- /* We want avoid to give to the user inconsistent infos*/
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->state != IEEE80211_LINKED &&
- ieee->state != IEEE80211_LINKED_SCANNING &&
- ieee->wap_set == 0)
-
- eth_zero_addr(wrqu->ap_addr.sa_data);
- else
- memcpy(wrqu->ap_addr.sa_data,
- ieee->current_network.bssid, ETH_ALEN);
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_wap);
-
-int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
-
- int ret = 0;
- unsigned long flags;
-
- short ifup = ieee->proto_started; /* dev->flags & IFF_UP; */
- struct sockaddr *temp = (struct sockaddr *)awrq;
-
- ieee->sync_scan_hurryup = 1;
-
- mutex_lock(&ieee->wx_mutex);
- /* use ifconfig hw ether */
- if (ieee->iw_mode == IW_MODE_MASTER) {
- ret = -1;
- goto out;
- }
-
- if (temp->sa_family != ARPHRD_ETHER) {
- ret = -EINVAL;
- goto out;
- }
-
- if (ifup)
- ieee80211_stop_protocol(ieee);
-
- /* just to avoid to give inconsistent infos in the
- * get wx method. not really needed otherwise
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN);
- ieee->wap_set = !is_zero_ether_addr(temp->sa_data);
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- if (ifup)
- ieee80211_start_protocol(ieee);
-out:
- mutex_unlock(&ieee->wx_mutex);
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_wap);
-
-int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b)
-{
- int len, ret = 0;
- unsigned long flags;
-
- if (ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- /* We want avoid to give to the user inconsistent infos*/
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->current_network.ssid[0] == '\0' ||
- ieee->current_network.ssid_len == 0) {
- ret = -1;
- goto out;
- }
-
- if (ieee->state != IEEE80211_LINKED &&
- ieee->state != IEEE80211_LINKED_SCANNING &&
- ieee->ssid_set == 0) {
- ret = -1;
- goto out;
- }
- len = ieee->current_network.ssid_len;
- wrqu->essid.length = len;
- strncpy(b, ieee->current_network.ssid, len);
- wrqu->essid.flags = 1;
-
-out:
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- return ret;
-
-}
-EXPORT_SYMBOL(ieee80211_wx_get_essid);
-
-int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-
- u32 target_rate = wrqu->bitrate.value;
-
- ieee->rate = target_rate / 100000;
- /* FIXME: we might want to limit rate also in management protocols. */
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_rate);
-
-int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u32 tmp_rate;
-
- tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate);
-
- wrqu->bitrate.value = tmp_rate * 500000;
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_rate);
-
-int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (wrqu->rts.disabled || !wrqu->rts.fixed) {
- ieee->rts = DEFAULT_RTS_THRESHOLD;
- } else {
- if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
- wrqu->rts.value > MAX_RTS_THRESHOLD)
- return -EINVAL;
- ieee->rts = wrqu->rts.value;
- }
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_rts);
-
-int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wrqu->rts.value = ieee->rts;
- wrqu->rts.fixed = 0; /* no auto select */
- wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_rts);
-
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
-
- ieee->sync_scan_hurryup = 1;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (wrqu->mode == ieee->iw_mode)
- goto out;
-
- if (wrqu->mode == IW_MODE_MONITOR)
- ieee->dev->type = ARPHRD_IEEE80211;
- else
- ieee->dev->type = ARPHRD_ETHER;
-
- if (!ieee->proto_started) {
- ieee->iw_mode = wrqu->mode;
- } else {
- ieee80211_stop_protocol(ieee);
- ieee->iw_mode = wrqu->mode;
- ieee80211_start_protocol(ieee);
- }
-
-out:
- mutex_unlock(&ieee->wx_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_mode);
-
-void ieee80211_wx_sync_scan_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq);
- short chan;
- enum ht_extension_chan_offset chan_offset = 0;
- enum ht_channel_width bandwidth = 0;
- int b40M = 0;
-
- chan = ieee->current_network.channel;
- netif_carrier_off(ieee->dev);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
-
- ieee80211_stop_send_beacons(ieee);
-
- ieee->state = IEEE80211_LINKED_SCANNING;
- ieee->link_change(ieee->dev);
- ieee->InitialGainHandler(ieee->dev, IG_Backup);
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) {
- b40M = 1;
- chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
- bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz;
- printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth);
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- }
- ieee80211_start_scan_syncro(ieee);
- if (b40M) {
- printk("Scan in 20M, back to 40M\n");
- if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
- ieee->set_chan(ieee->dev, chan + 2);
- else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
- ieee->set_chan(ieee->dev, chan - 2);
- else
- ieee->set_chan(ieee->dev, chan);
- ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset);
- } else {
- ieee->set_chan(ieee->dev, chan);
- }
-
- ieee->InitialGainHandler(ieee->dev, IG_Restore);
- ieee->state = IEEE80211_LINKED;
- ieee->link_change(ieee->dev);
- /* To prevent the immediately calling watch_dog after scan. */
- if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) {
- ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
- ieee->LinkDetectInfo.NumRecvDataInPeriod = 1;
- }
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
- if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_start_send_beacons(ieee);
-
- netif_carrier_on(ieee->dev);
- mutex_unlock(&ieee->wx_mutex);
-
-}
-
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret = 0;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
- ret = -1;
- goto out;
- }
-
- if (ieee->state == IEEE80211_LINKED) {
- queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
- /* intentionally forget to up sem */
- return 0;
- }
-
-out:
- mutex_unlock(&ieee->wx_mutex);
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_scan);
-
-int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
-
- int ret = 0, len;
- short proto_started;
- unsigned long flags;
-
- ieee->sync_scan_hurryup = 1;
- mutex_lock(&ieee->wx_mutex);
-
- proto_started = ieee->proto_started;
-
- if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
- ret = -E2BIG;
- goto out;
- }
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- ret = -1;
- goto out;
- }
-
- if (proto_started)
- ieee80211_stop_protocol(ieee);
-
-
- /* this is just to be sure that the GET wx callback
- * has consisten infos. not needed otherwise
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (wrqu->essid.flags && wrqu->essid.length) {
- /* first flush current network.ssid */
- len = ((wrqu->essid.length - 1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length - 1) : IW_ESSID_MAX_SIZE;
- strncpy(ieee->current_network.ssid, extra, len + 1);
- ieee->current_network.ssid_len = len + 1;
- ieee->ssid_set = 1;
- } else {
- ieee->ssid_set = 0;
- ieee->current_network.ssid[0] = '\0';
- ieee->current_network.ssid_len = 0;
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- if (proto_started)
- ieee80211_start_protocol(ieee);
-out:
- mutex_unlock(&ieee->wx_mutex);
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_essid);
-
-int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
-
- wrqu->mode = ieee->iw_mode;
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_mode);
-
-int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-
- int *parms = (int *)extra;
- int enable = (parms[0] > 0);
- short prev = ieee->raw_tx;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (enable)
- ieee->raw_tx = 1;
- else
- ieee->raw_tx = 0;
-
- netdev_info(ieee->dev, "raw TX is %s\n",
- ieee->raw_tx ? "enabled" : "disabled");
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- if (prev == 0 && ieee->raw_tx) {
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
- netif_carrier_on(ieee->dev);
- }
-
- if (prev && ieee->raw_tx == 1)
- netif_carrier_off(ieee->dev);
- }
-
- mutex_unlock(&ieee->wx_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_rawtx);
-
-int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- strscpy(wrqu->name, "802.11", IFNAMSIZ);
- if (ieee->modulation & IEEE80211_CCK_MODULATION) {
- strlcat(wrqu->name, "b", IFNAMSIZ);
- if (ieee->modulation & IEEE80211_OFDM_MODULATION)
- strlcat(wrqu->name, "/g", IFNAMSIZ);
- } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
- strlcat(wrqu->name, "g", IFNAMSIZ);
- }
-
- if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
- strlcat(wrqu->name, "/n", IFNAMSIZ);
-
- if ((ieee->state == IEEE80211_LINKED) ||
- (ieee->state == IEEE80211_LINKED_SCANNING))
- strlcat(wrqu->name, " linked", IFNAMSIZ);
- else if (ieee->state != IEEE80211_NOLINK)
- strlcat(wrqu->name, " link..", IFNAMSIZ);
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_name);
-
-/* this is mostly stolen from hostap */
-int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
-
- mutex_lock(&ieee->wx_mutex);
-
- if (wrqu->power.disabled) {
- ieee->ps = IEEE80211_PS_DISABLED;
- goto exit;
- }
- if (wrqu->power.flags & IW_POWER_TIMEOUT) {
- /* ieee->ps_period = wrqu->power.value / 1000; */
- ieee->ps_timeout = wrqu->power.value / 1000;
- }
-
- if (wrqu->power.flags & IW_POWER_PERIOD) {
-
- /* ieee->ps_timeout = wrqu->power.value / 1000; */
- ieee->ps_period = wrqu->power.value / 1000;
- /* wrq->value / 1024; */
-
- }
- switch (wrqu->power.flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- ieee->ps = IEEE80211_PS_UNICAST;
- break;
- case IW_POWER_MULTICAST_R:
- ieee->ps = IEEE80211_PS_MBCAST;
- break;
- case IW_POWER_ALL_R:
- ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST;
- break;
-
- case IW_POWER_ON:
- /* ieee->ps = IEEE80211_PS_DISABLED; */
- break;
-
- default:
- ret = -EINVAL;
- goto exit;
-
- }
-exit:
- mutex_unlock(&ieee->wx_mutex);
- return ret;
-
-}
-EXPORT_SYMBOL(ieee80211_wx_set_power);
-
-/* this is stolen from hostap */
-int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- mutex_lock(&ieee->wx_mutex);
-
- if (ieee->ps == IEEE80211_PS_DISABLED) {
- wrqu->power.disabled = 1;
- goto exit;
- }
-
- wrqu->power.disabled = 0;
-
- if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- wrqu->power.flags = IW_POWER_TIMEOUT;
- wrqu->power.value = ieee->ps_timeout * 1000;
- } else {
- /* ret = -EOPNOTSUPP; */
- /* goto exit; */
- wrqu->power.flags = IW_POWER_PERIOD;
- wrqu->power.value = ieee->ps_period * 1000;
- /* ieee->current_network.dtim_period * ieee->current_network.beacon_interval * 1024; */
- }
-
- if ((ieee->ps & (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) == (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST))
- wrqu->power.flags |= IW_POWER_ALL_R;
- else if (ieee->ps & IEEE80211_PS_MBCAST)
- wrqu->power.flags |= IW_POWER_MULTICAST_R;
- else
- wrqu->power.flags |= IW_POWER_UNICAST_R;
-
-exit:
- mutex_unlock(&ieee->wx_mutex);
- return 0;
-
-}
-EXPORT_SYMBOL(ieee80211_wx_get_power);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
deleted file mode 100644
index e4b6454809a0..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ /dev/null
@@ -1,839 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * James P. Ketrenos <ipw2100-admin@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *
- * Few modifications for Realtek's Wi-Fi drivers by
- * Andrea Merello <andrea.merello@gmail.com>
- *
- * A special thanks goes to Realtek for their support !
- *
- ******************************************************************************/
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <linux/uaccess.h>
-#include <linux/if_vlan.h>
-
-#include "ieee80211.h"
-
-
-/*
- *
- *
- * 802.11 Data Frame
- *
- *
- * 802.11 frame_contorl for data frames - 2 bytes
- * ,-----------------------------------------------------------------------------------------.
- * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
- * |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
- * val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
- * |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
- * desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
- * | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
- * '-----------------------------------------------------------------------------------------'
- * /\
- * |
- * 802.11 Data Frame |
- * ,--------- 'ctrl' expands to >-----------'
- * |
- * ,--'---,-------------------------------------------------------------.
- * Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- * |------|------|---------|---------|---------|------|---------|------|
- * Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
- * | | tion | (BSSID) | | | ence | data | |
- * `--------------------------------------------------| |------'
- * Total: 28 non-data bytes `----.----'
- * |
- * .- 'Frame data' expands to <---------------------------'
- * |
- * V
- * ,---------------------------------------------------.
- * Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
- * |------|------|---------|----------|------|---------|
- * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
- * | DSAP | SSAP | | | | Packet |
- * | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
- * `-----------------------------------------| |
- * Total: 8 non-data bytes `----.----'
- * |
- * .- 'IP Packet' expands, if WEP enabled, to <--'
- * |
- * V
- * ,-----------------------.
- * Bytes | 4 | 0-2296 | 4 |
- * |-----|-----------|-----|
- * Desc. | IV | Encrypted | ICV |
- * | | IP Packet | |
- * `-----------------------'
- * Total: 8 non-data bytes
- *
- *
- * 802.3 Ethernet Data Frame
- *
- * ,-----------------------------------------.
- * Bytes | 6 | 6 | 2 | Variable | 4 |
- * |-------|-------|------|-----------|------|
- * Desc. | Dest. | Source| Type | IP Packet | fcs |
- * | MAC | MAC | | | |
- * `-----------------------------------------'
- * Total: 18 non-data bytes
- *
- * In the event that fragmentation is required, the incoming payload is split into
- * N parts of size ieee->fts. The first fragment contains the SNAP header and the
- * remaining packets are just data.
- *
- * If encryption is enabled, each fragment payload size is reduced by enough space
- * to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
- * So if you have 1500 bytes of payload with ieee->fts set to 500 without
- * encryption it will take 3 frames. With WEP it will take 4 frames as the
- * payload of each frame is reduced to 492 bytes.
- *
- * SKB visualization
- *
- * ,- skb->data
- * |
- * | ETHERNET HEADER ,-<-- PAYLOAD
- * | | 14 bytes from skb->data
- * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
- * | | | |
- * |,-Dest.--. ,--Src.---. | | |
- * | 6 bytes| | 6 bytes | | | |
- * v | | | | | |
- * 0 | v 1 | v | v 2
- * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
- * ^ | ^ | ^ |
- * | | | | | |
- * | | | | `T' <---- 2 bytes for Type
- * | | | |
- * | | '---SNAP--' <-------- 6 bytes for SNAP
- * | |
- * `-IV--' <-------------------- 4 bytes for IV (WEP)
- *
- * SNAP HEADER
- *
- */
-
-static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-
-static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
-{
- struct ieee80211_snap_hdr *snap;
- u8 *oui;
-
- snap = (struct ieee80211_snap_hdr *)data;
- snap->dsap = 0xaa;
- snap->ssap = 0xaa;
- snap->ctrl = 0x03;
-
- if (h_proto == 0x8137 || h_proto == 0x80f3)
- oui = P802_1H_OUI;
- else
- oui = RFC1042_OUI;
- snap->oui[0] = oui[0];
- snap->oui[1] = oui[1];
- snap->oui[2] = oui[2];
-
- *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
-
- return SNAP_SIZE + sizeof(u16);
-}
-
-int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len)
-{
- struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
- int res;
-
- if (!(crypt && crypt->ops)) {
- printk("=========>%s(), crypt is null\n", __func__);
- return -1;
- }
-
- if (ieee->tkip_countermeasures &&
- crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
- if (net_ratelimit()) {
- struct rtl_80211_hdr_3addrqos *header;
-
- header = (struct rtl_80211_hdr_3addrqos *)frag->data;
- netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
- "TX packet to %pM\n", header->addr1);
- }
- return -1;
- }
-
- /* To encrypt, frame format is:
- * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
- */
-
- // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
- /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
- * call both MSDU and MPDU encryption functions from here.
- */
- atomic_inc(&crypt->refcnt);
- res = 0;
- if (crypt->ops->encrypt_msdu)
- res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
- if (res == 0 && crypt->ops->encrypt_mpdu)
- res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
-
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- netdev_info(ieee->dev, "Encryption failed: len=%d.\n",
- frag->len);
- ieee->ieee_stats.tx_discards++;
- return -1;
- }
-
- return 0;
-}
-
-
-void ieee80211_txb_free(struct ieee80211_txb *txb)
-{
- //int i;
- if (unlikely(!txb))
- return;
- kfree(txb);
-}
-EXPORT_SYMBOL(ieee80211_txb_free);
-
-static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- gfp_t gfp_mask)
-{
- struct ieee80211_txb *txb;
- int i;
- txb = kmalloc(
- sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
- gfp_mask);
- if (!txb)
- return NULL;
-
- memset(txb, 0, sizeof(struct ieee80211_txb));
- txb->nr_frags = nr_frags;
- txb->frag_size = __cpu_to_le16(txb_size);
-
- for (i = 0; i < nr_frags; i++) {
- txb->fragments[i] = dev_alloc_skb(txb_size);
- if (unlikely(!txb->fragments[i])) {
- i--;
- break;
- }
- memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
- }
- if (unlikely(i != nr_frags)) {
- while (i >= 0)
- dev_kfree_skb_any(txb->fragments[i--]);
- kfree(txb);
- return NULL;
- }
- return txb;
-}
-
-// Classify the to-be send data packet
-// Need to acquire the sent queue index.
-static int
-ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
-{
- struct ethhdr *eth;
- struct iphdr *ip;
- eth = (struct ethhdr *)skb->data;
- if (eth->h_proto != htons(ETH_P_IP))
- return 0;
-
- ip = ip_hdr(skb);
- switch (ip->tos & 0xfc) {
- case 0x20:
- return 2;
- case 0x40:
- return 1;
- case 0x60:
- return 3;
- case 0x80:
- return 4;
- case 0xa0:
- return 5;
- case 0xc0:
- return 6;
- case 0xe0:
- return 7;
- default:
- return 0;
- }
-}
-
-static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
- struct sk_buff *skb, struct cb_desc *tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct tx_ts_record *pTxTs = NULL;
- struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
-
- if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
- return;
- if (!IsQoSDataFrame(skb->data))
- return;
-
- if (is_multicast_ether_addr(hdr->addr1))
- return;
- //check packet and mode later
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
- return;
-
- if (pHTInfo->bCurrentAMPDUEnable) {
- if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) {
- printk("===>can't get TS\n");
- return;
- }
- if (!pTxTs->tx_admitted_ba_record.valid) {
- TsStartAddBaProcess(ieee, pTxTs);
- goto FORCED_AGG_SETTING;
- } else if (!pTxTs->using_ba) {
- if (SN_LESS(pTxTs->tx_admitted_ba_record.start_seq_ctrl.field.seq_num, (pTxTs->tx_cur_seq + 1) % 4096))
- pTxTs->using_ba = true;
- else
- goto FORCED_AGG_SETTING;
- }
-
- if (ieee->iw_mode == IW_MODE_INFRA) {
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
- tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
- }
- }
-FORCED_AGG_SETTING:
- switch (pHTInfo->ForcedAMPDUMode) {
- case HT_AGG_AUTO:
- break;
-
- case HT_AGG_FORCE_ENABLE:
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
- tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
- break;
-
- case HT_AGG_FORCE_DISABLE:
- tcb_desc->bAMPDUEnable = false;
- tcb_desc->ampdu_density = 0;
- tcb_desc->ampdu_factor = 0;
- break;
-
- }
- return;
-}
-
-static void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee,
- struct cb_desc *tcb_desc)
-{
- tcb_desc->bUseShortPreamble = false;
- if (tcb_desc->data_rate == 2) {//// 1M can only use Long Preamble. 11B spec
- return;
- } else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- tcb_desc->bUseShortPreamble = true;
- }
- return;
-}
-static void
-ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- tcb_desc->bUseShortGI = false;
-
- if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
- return;
-
- if (pHTInfo->bForcedShortGI) {
- tcb_desc->bUseShortGI = true;
- return;
- }
-
- if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
- tcb_desc->bUseShortGI = true;
- else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
- tcb_desc->bUseShortGI = true;
-}
-
-static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
- struct cb_desc *tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- tcb_desc->bPacketBW = false;
-
- if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
- return;
-
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
- return;
-
- if ((tcb_desc->data_rate & 0x80) == 0) // If using legacy rate, it shall use 20MHz channel.
- return;
- //BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
- if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
- tcb_desc->bPacketBW = true;
- return;
-}
-
-static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
- struct cb_desc *tcb_desc,
- struct sk_buff *skb)
-{
- // Common Settings
- tcb_desc->bRTSSTBC = false;
- tcb_desc->bRTSUseShortGI = false; // Since protection frames are always sent by legacy rate, ShortGI will never be used.
- tcb_desc->bCTSEnable = false; // Most of protection using RTS/CTS
- tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate.
- tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz
-
- if (tcb_desc->bBroadcast || tcb_desc->bMulticast) //only unicast frame will use rts/cts
- return;
-
- if (is_broadcast_ether_addr(skb->data + 16)) //check addr3 as infrastructure add3 is DA.
- return;
-
- if (ieee->mode < IEEE_N_24G) /* b, g mode */ {
- // (1) RTS_Threshold is compared to the MPDU, not MSDU.
- // (2) If there are more than one frag in this MSDU, only the first frag uses protection frame.
- // Other fragments are protected by previous fragment.
- // So we only need to check the length of first fragment.
- if (skb->len > ieee->rts) {
- tcb_desc->bRTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- } else if (ieee->current_network.buseprotection) {
- // Use CTS-to-SELF in protection mode.
- tcb_desc->bRTSEnable = true;
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- }
- //otherwise return;
- return;
- } else { // 11n High throughput case.
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- while (true) {
- //check ERP protection
- if (ieee->current_network.buseprotection) {// CTS-to-SELF
- tcb_desc->bRTSEnable = true;
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- break;
- }
- //check HT op mode
- if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
- u8 HTOpMode = pHTInfo->CurrentOpMode;
- if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
- (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- tcb_desc->bRTSEnable = true;
- break;
- }
- }
- //check rts
- if (skb->len > ieee->rts) {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- tcb_desc->bRTSEnable = true;
- break;
- }
- //to do list: check MIMO power save condition.
- //check AMPDU aggregation for TXOP
- if (tcb_desc->bAMPDUEnable) {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- // According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads
- // throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily
- tcb_desc->bRTSEnable = false;
- break;
- }
- //check IOT action
- if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- tcb_desc->bRTSEnable = true;
- break;
- }
- // Totally no protection case!!
- goto NO_PROTECTION;
- }
- }
- // For test , CTS replace with RTS
- if (0) {
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- tcb_desc->bRTSEnable = true;
- }
- if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- tcb_desc->bUseShortPreamble = true;
- if (ieee->mode == IW_MODE_MASTER)
- goto NO_PROTECTION;
- return;
-NO_PROTECTION:
- tcb_desc->bRTSEnable = false;
- tcb_desc->bCTSEnable = false;
- tcb_desc->rts_rate = 0;
- tcb_desc->RTSSC = 0;
- tcb_desc->bRTSBW = false;
-}
-
-
-static void ieee80211_txrate_selectmode(struct ieee80211_device *ieee,
- struct cb_desc *tcb_desc)
-{
- if (ieee->bTxDisableRateFallBack)
- tcb_desc->bTxDisableRateFallBack = true;
-
- if (ieee->bTxUseDriverAssingedRate)
- tcb_desc->bTxUseDriverAssingedRate = true;
- if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) {
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
- tcb_desc->RATRIndex = 0;
- }
-}
-
-static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
- struct sk_buff *skb, u8 *dst)
-{
- if (is_multicast_ether_addr(dst))
- return;
- if (IsQoSDataFrame(skb->data)) /* we deal qos data only */ {
- struct tx_ts_record *pTS = NULL;
- if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) {
- return;
- }
- pTS->tx_cur_seq = (pTS->tx_cur_seq + 1) % 4096;
- }
-}
-
-netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ieee80211_device *ieee = netdev_priv(dev);
- struct ieee80211_txb *txb = NULL;
- struct rtl_80211_hdr_3addrqos *frag_hdr;
- int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
- unsigned long flags;
- struct net_device_stats *stats = &ieee->stats;
- int ether_type = 0, encrypt;
- int bytes, fc, qos_ctl = 0, hdr_len;
- struct sk_buff *skb_frag;
- struct rtl_80211_hdr_3addrqos header = { /* Ensure zero initialized */
- .duration_id = 0,
- .seq_ctl = 0,
- .qos_ctl = 0
- };
- u8 dest[ETH_ALEN], src[ETH_ALEN];
- int qos_actived = ieee->current_network.qos_data.active;
-
- struct ieee80211_crypt_data *crypt;
-
- struct cb_desc *tcb_desc;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- /* If there is no driver handler to take the TXB, dont' bother
- * creating it...
- */
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
- ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
- netdev_warn(ieee->dev, "No xmit handler.\n");
- goto success;
- }
-
-
- if (likely(ieee->raw_tx == 0)) {
- if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
- netdev_warn(ieee->dev, "skb too small (%d).\n",
- skb->len);
- goto success;
- }
-
- memset(skb->cb, 0, sizeof(skb->cb));
- ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
-
- crypt = ieee->crypt[ieee->tx_keyidx];
-
- encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
- ieee->host_encrypt && crypt && crypt->ops;
-
- if (!encrypt && ieee->ieee802_1x &&
- ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
- stats->tx_dropped++;
- goto success;
- }
- #ifdef CONFIG_IEEE80211_DEBUG
- if (crypt && !encrypt && ether_type == ETH_P_PAE) {
- struct eapol *eap = (struct eapol *)(skb->data +
- sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
- IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
- eap_get_type(eap->type));
- }
- #endif
-
- /* Save source and destination addresses */
- memcpy(&dest, skb->data, ETH_ALEN);
- memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
-
- /* Advance the SKB to the start of the payload */
- skb_pull(skb, sizeof(struct ethhdr));
-
- /* Determine total amount of storage required for TXB packets */
- bytes = skb->len + SNAP_SIZE + sizeof(u16);
-
- if (encrypt)
- fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
- else
-
- fc = IEEE80211_FTYPE_DATA;
-
- //if(ieee->current_network.QoS_Enable)
- if (qos_actived)
- fc |= IEEE80211_STYPE_QOS_DATA;
- else
- fc |= IEEE80211_STYPE_DATA;
-
- if (ieee->iw_mode == IW_MODE_INFRA) {
- fc |= IEEE80211_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA,
- * Addr3 = DA
- */
- memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
- memcpy(&header.addr2, &src, ETH_ALEN);
- memcpy(&header.addr3, &dest, ETH_ALEN);
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA,
- * Addr3 = BSSID
- */
- memcpy(&header.addr1, dest, ETH_ALEN);
- memcpy(&header.addr2, src, ETH_ALEN);
- memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
- }
-
- header.frame_ctl = cpu_to_le16(fc);
-
- /* Determine fragmentation size based on destination (multicast
- * and broadcast are not fragmented)
- */
- if (is_multicast_ether_addr(header.addr1)) {
- frag_size = MAX_FRAG_THRESHOLD;
- qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
- } else {
- frag_size = ieee->fts;//default:392
- qos_ctl = 0;
- }
-
- //if (ieee->current_network.QoS_Enable)
- if (qos_actived) {
- hdr_len = IEEE80211_3ADDR_LEN + 2;
-
- skb->priority = ieee80211_classify(skb, &ieee->current_network);
- qos_ctl |= skb->priority; //set in the ieee80211_classify
- header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
- } else {
- hdr_len = IEEE80211_3ADDR_LEN;
- }
- /* Determine amount of payload per fragment. Regardless of if
- * this stack is providing the full 802.11 header, one will
- * eventually be affixed to this fragment -- so we must account for
- * it when determining the amount of payload space.
- */
- bytes_per_frag = frag_size - hdr_len;
- if (ieee->config &
- (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
- bytes_per_frag -= IEEE80211_FCS_LEN;
-
- /* Each fragment may need to have room for encryption pre/postfix */
- if (encrypt)
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
-
- /* Number of fragments is the total bytes_per_frag /
- * payload_per_fragment
- */
- nr_frags = bytes / bytes_per_frag;
- bytes_last_frag = bytes % bytes_per_frag;
- if (bytes_last_frag)
- nr_frags++;
- else
- bytes_last_frag = bytes_per_frag;
-
- /* When we allocate the TXB we allocate enough space for the reserve
- * and full fragment bytes (bytes_per_frag doesn't include prefix,
- * postfix, header, FCS, etc.)
- */
- txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
- if (unlikely(!txb)) {
- netdev_warn(ieee->dev, "Could not allocate TXB\n");
- goto failed;
- }
- txb->encrypted = encrypt;
- txb->payload_size = __cpu_to_le16(bytes);
-
- //if (ieee->current_network.QoS_Enable)
- if (qos_actived)
- txb->queue_index = UP2AC(skb->priority);
- else
- txb->queue_index = WME_AC_BK;
-
-
-
- for (i = 0; i < nr_frags; i++) {
- skb_frag = txb->fragments[i];
- tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
- if (qos_actived) {
- skb_frag->priority = skb->priority;//UP2AC(skb->priority);
- tcb_desc->queue_index = UP2AC(skb->priority);
- } else {
- skb_frag->priority = WME_AC_BK;
- tcb_desc->queue_index = WME_AC_BK;
- }
- skb_reserve(skb_frag, ieee->tx_headroom);
-
- if (encrypt) {
- if (ieee->hwsec_active)
- tcb_desc->bHwSec = 1;
- else
- tcb_desc->bHwSec = 0;
- skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
- } else {
- tcb_desc->bHwSec = 0;
- }
- frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
-
- /* If this is not the last fragment, then add the MOREFRAGS
- * bit to the frame control
- */
- if (i != nr_frags - 1) {
- frag_hdr->frame_ctl = cpu_to_le16(
- fc | IEEE80211_FCTL_MOREFRAGS);
- bytes = bytes_per_frag;
-
- } else {
- /* The last fragment takes the remaining length */
- bytes = bytes_last_frag;
- }
- //if(ieee->current_network.QoS_Enable)
- if (qos_actived) {
- // add 1 only indicate to corresponding seq number control 2006/7/12
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority) + 1] << 4 | i);
- } else {
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
- }
-
- /* Put a SNAP header on the first fragment */
- if (i == 0) {
- ieee80211_put_snap(
- skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
- ether_type);
- bytes -= SNAP_SIZE + sizeof(u16);
- }
-
- skb_put_data(skb_frag, skb->data, bytes);
-
- /* Advance the SKB... */
- skb_pull(skb, bytes);
-
- /* Encryption routine will move the header forward in order
- * to insert the IV between the header and the payload
- */
- if (encrypt)
- ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
- if (ieee->config &
- (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
- skb_put(skb_frag, 4);
- }
-
- if (qos_actived) {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
- } else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
- }
- } else {
- if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
- netdev_warn(ieee->dev, "skb too small (%d).\n",
- skb->len);
- goto success;
- }
-
- txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
- if (!txb) {
- netdev_warn(ieee->dev, "Could not allocate TXB\n");
- goto failed;
- }
-
- txb->encrypted = 0;
- txb->payload_size = __cpu_to_le16(skb->len);
- skb_put_data(txb->fragments[0], skb->data, skb->len);
- }
-
- success:
-//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
- if (txb) {
- tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->bTxEnableFwCalcDur = 1;
- if (is_multicast_ether_addr(header.addr1))
- tcb_desc->bMulticast = 1;
- if (is_broadcast_ether_addr(header.addr1))
- tcb_desc->bBroadcast = 1;
- ieee80211_txrate_selectmode(ieee, tcb_desc);
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
- tcb_desc->data_rate = ieee->basic_rate;
- else
- tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
- ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
- ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
- ieee80211_query_HTCapShortGI(ieee, tcb_desc);
- ieee80211_query_BandwidthMode(ieee, tcb_desc);
- ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
- ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
- dev_kfree_skb_any(skb);
- if (txb) {
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
- ieee80211_softmac_xmit(txb, ieee);
- } else {
- if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
- stats->tx_packets++;
- stats->tx_bytes += __le16_to_cpu(txb->payload_size);
- return NETDEV_TX_OK;
- }
- ieee80211_txb_free(txb);
- }
- }
-
- return NETDEV_TX_OK;
-
- failed:
- spin_unlock_irqrestore(&ieee->lock, flags);
- netif_stop_queue(dev);
- stats->tx_errors++;
- return 1;
-
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
deleted file mode 100644
index d6829cf6f7e3..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ /dev/null
@@ -1,810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
-#include <linux/wireless.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include "ieee80211.h"
-struct modes_unit {
- char *mode_string;
- int mode_size;
-};
-static struct modes_unit ieee80211_modes[] = {
- {"a", 1},
- {"b", 1},
- {"g", 1},
- {"?", 1},
- {"N-24G", 5},
- {"N-5G", 4},
-};
-
-#define iwe_stream_add_event_rsl iwe_stream_add_event
-
-#define MAX_CUSTOM_LEN 64
-static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
- char *start, char *stop,
- struct ieee80211_network *network,
- struct iw_request_info *info)
-{
- char custom[MAX_CUSTOM_LEN];
- char proto_name[IFNAMSIZ];
- char *pname = proto_name;
- char *p;
- struct iw_event iwe;
- int i, j;
- u16 max_rate, rate;
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN);
- /* Remaining entries will be displayed in the order we provide them */
-
- /* Add the ESSID */
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
-// if (network->flags & NETWORK_EMPTY_ESSID) {
- if (network->ssid_len == 0) {
- iwe.u.data.length = sizeof("<hidden>");
- start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
- } else {
- iwe.u.data.length = min(network->ssid_len, (u8)32);
- start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
- }
- /* Add the protocol name */
- iwe.cmd = SIOCGIWNAME;
- for (i = 0; i < ARRAY_SIZE(ieee80211_modes); i++) {
- if (network->mode & BIT(i)) {
- sprintf(pname, ieee80211_modes[i].mode_string, ieee80211_modes[i].mode_size);
- pname += ieee80211_modes[i].mode_size;
- }
- }
- *pname = '\0';
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN);
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- if (network->capability &
- (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
- if (network->capability & WLAN_CAPABILITY_BSS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency/channel */
- iwe.cmd = SIOCGIWFREQ;
-/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
- iwe.u.freq.e = 3; */
- iwe.u.freq.m = network->channel;
- iwe.u.freq.e = 0;
- iwe.u.freq.i = 0;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN);
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (network->capability & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
- /* Add basic and extended rates */
- max_rate = 0;
- p = custom;
- p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
- for (i = 0, j = 0; i < network->rates_len; ) {
- if (j < network->rates_ex_len &&
- ((network->rates_ex[j] & 0x7F) <
- (network->rates[i] & 0x7F)))
- rate = network->rates_ex[j++] & 0x7F;
- else
- rate = network->rates[i++] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- }
- for (; j < network->rates_ex_len; j++) {
- rate = network->rates_ex[j] & 0x7F;
- p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- if (rate > max_rate)
- max_rate = rate;
- }
-
- if (network->mode >= IEEE_N_24G) /* add N rate here */ {
- struct ht_capability_ele *ht_cap = NULL;
- bool is40M = false, isShortGI = false;
- u8 max_mcs = 0;
- if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4))
- ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[4];
- else
- ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[0];
- is40M = (ht_cap->ChlWidth) ? 1 : 0;
- isShortGI = (ht_cap->ChlWidth) ?
- ((ht_cap->ShortGI40Mhz) ? 1 : 0) :
- ((ht_cap->ShortGI20Mhz) ? 1 : 0);
-
- max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
- rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
- if (rate > max_rate)
- max_rate = rate;
- }
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- iwe.u.bitrate.value = max_rate * 500000;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
- IW_EV_PARAM_LEN);
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
- /* Add quality statistics */
- /* TODO: Fix these values... */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = network->stats.signal;
- iwe.u.qual.level = network->stats.rssi;
- iwe.u.qual.noise = network->stats.noise;
- iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
- if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
- iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
- iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
- iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
- iwe.u.qual.updated = 7;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN);
- iwe.cmd = IWEVCUSTOM;
- p = custom;
-
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-
- if (ieee->wpa_enabled && network->wpa_ie_len) {
- char buf[MAX_WPA_IE_LEN * 2 + 30];
- // printk("WPA IE\n");
- u8 *p = buf;
- p += sprintf(p, "wpa_ie=");
- for (i = 0; i < network->wpa_ie_len; i++)
- p += sprintf(p, "%02x", network->wpa_ie[i]);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
- if (ieee->wpa_enabled && network->rsn_ie_len) {
- char buf[MAX_WPA_IE_LEN * 2 + 30];
-
- u8 *p = buf;
- p += sprintf(p, "rsn_ie=");
- for (i = 0; i < network->rsn_ie_len; i++)
- p += sprintf(p, "%02x", network->rsn_ie[i]);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
-
- /* Add EXTRA: Age to display seconds since last beacon/probe response
- * for given network. */
- iwe.cmd = IWEVCUSTOM;
- p = custom;
- p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
- " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-
- return start;
-}
-
-int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct ieee80211_network *network;
- unsigned long flags;
-
- char *ev = extra;
-// char *stop = ev + IW_SCAN_MAX_DATA;
- char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
- //char *stop = ev + IW_SCAN_MAX_DATA;
- int i = 0;
- int err = 0;
- IEEE80211_DEBUG_WX("Getting scan\n");
- mutex_lock(&ieee->wx_mutex);
- spin_lock_irqsave(&ieee->lock, flags);
-
- list_for_each_entry(network, &ieee->network_list, list) {
- i++;
- if ((stop - ev) < 200) {
- err = -E2BIG;
- break;
- }
- if (ieee->scan_age == 0 ||
- time_after(network->last_scanned + ieee->scan_age, jiffies))
- ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
- else
- IEEE80211_DEBUG_SCAN(
- "Not showing network '%s ("
- "%pM)' due to age (%lums).\n",
- escape_essid(network->ssid,
- network->ssid_len),
- network->bssid,
- (jiffies - network->last_scanned) / (HZ / 100));
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- mutex_unlock(&ieee->wx_mutex);
- wrqu->data.length = ev - extra;
- wrqu->data.flags = 0;
-
- IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
-
- return err;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_scan);
-
-int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- struct iw_point *erq = &(wrqu->encoding);
- struct net_device *dev = ieee->dev;
- struct ieee80211_security sec = {
- .flags = 0
- };
- int i, key, key_provided, len;
- struct ieee80211_crypt_data **crypt;
-
- IEEE80211_DEBUG_WX("SET_ENCODE\n");
-
- key = erq->flags & IW_ENCODE_INDEX;
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- key_provided = 1;
- } else {
- key_provided = 0;
- key = ieee->tx_keyidx;
- }
-
- IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
- "provided" : "default");
- crypt = &ieee->crypt[key];
-
- if (erq->flags & IW_ENCODE_DISABLED) {
- if (key_provided && *crypt) {
- IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
- key);
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- } else
- IEEE80211_DEBUG_WX("Disabling encryption.\n");
-
- /* Check all the keys to see if any are still configured,
- * and if no key index was provided, de-init them all */
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i]) {
- if (key_provided)
- break;
- ieee80211_crypt_delayed_deinit(
- ieee, &ieee->crypt[i]);
- }
- }
-
- if (i == WEP_KEYS) {
- sec.enabled = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_ENABLED | SEC_LEVEL;
- }
-
- goto done;
- }
-
-
-
- sec.enabled = 1;
- sec.flags |= SEC_ENABLED;
-
- if (*crypt && (*crypt)->ops &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
- /* changing to use WEP; deinit previously used algorithm
- * on this key */
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- }
-
- if (!*crypt) {
- struct ieee80211_crypt_data *new_crypt;
-
- /* take WEP into use */
- new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
- GFP_KERNEL);
- if (!new_crypt)
- return -ENOMEM;
- new_crypt->ops = try_then_request_module(ieee80211_get_crypto_ops("WEP"),
- "ieee80211_crypt_wep");
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv = new_crypt->ops->init(key);
-
- if (!new_crypt->ops || !new_crypt->priv) {
- kfree(new_crypt);
- new_crypt = NULL;
-
- netdev_warn(dev, "could not initialize WEP: "
- "load module ieee80211_crypt_wep\n");
- return -EOPNOTSUPP;
- }
- *crypt = new_crypt;
- }
-
- /* If a new key was provided, set it up */
- if (erq->length > 0) {
- len = erq->length <= 5 ? 5 : 13;
- memcpy(sec.keys[key], keybuf, erq->length);
- if (len > erq->length)
- memset(sec.keys[key] + erq->length, 0,
- len - erq->length);
- IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
- key, escape_essid(sec.keys[key], len),
- erq->length, len);
- sec.key_sizes[key] = len;
- (*crypt)->ops->set_key(sec.keys[key], len, NULL,
- (*crypt)->priv);
- sec.flags |= BIT(key);
- /* This ensures a key will be activated if no key is
- * explicitly set
- */
- if (key == sec.active_key)
- sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;
-
- } else {
- len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
- NULL, (*crypt)->priv);
- if (len == 0) {
- /* Set a default key of all 0 */
- printk("Setting key %d to all zero.\n",
- key);
-
- IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
- key);
- memset(sec.keys[key], 0, 13);
- (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
- (*crypt)->priv);
- sec.key_sizes[key] = 13;
- sec.flags |= BIT(key);
- }
-
- /* No key data - just set the default TX key index */
- if (key_provided) {
- IEEE80211_DEBUG_WX(
- "Setting key %d to default Tx key.\n", key);
- ieee->tx_keyidx = key;
- sec.active_key = key;
- sec.flags |= SEC_ACTIVE_KEY;
- }
- }
-
- done:
- ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
- ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- sec.flags |= SEC_AUTH_MODE;
- IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
- "OPEN" : "SHARED KEY");
-
- /* For now we just support WEP, so only set that security level...
- * TODO: When WPA is added this is one place that needs to change */
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
-
- if (ieee->set_security)
- ieee->set_security(dev, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- netdev_dbg(ieee->dev, "reset_port failed\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_encode);
-
-int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- struct iw_point *erq = &(wrqu->encoding);
- int len, key;
- struct ieee80211_crypt_data *crypt;
-
- IEEE80211_DEBUG_WX("GET_ENCODE\n");
-
- if (ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- key = erq->flags & IW_ENCODE_INDEX;
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- } else
- key = ieee->tx_keyidx;
-
- crypt = ieee->crypt[key];
- erq->flags = key + 1;
-
- if (!crypt || !crypt->ops) {
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- return 0;
- }
- len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
- if (len < 0)
- len = 0;
- erq->length = len;
-
- erq->flags |= IW_ENCODE_ENABLED;
-
- if (ieee->open_wep)
- erq->flags |= IW_ENCODE_OPEN;
- else
- erq->flags |= IW_ENCODE_RESTRICTED;
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_encode);
-
-int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct net_device *dev = ieee->dev;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int i, idx;
- int group_key = 0;
- const char *alg, *module;
- struct ieee80211_crypto_ops *ops;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
- return -EINVAL;
- idx--;
- } else
- idx = ieee->tx_keyidx;
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
-
- crypt = &ieee->crypt[idx];
-
- group_key = 1;
- } else {
- /* some Cisco APs use idx>0 for unicast in dynamic WEP */
- if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
- return -EINVAL;
- if (ieee->iw_mode == IW_MODE_INFRA)
-
- crypt = &ieee->crypt[idx];
-
- else
- return -EINVAL;
- }
-
- sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
- if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- if (*crypt)
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- for (i = 0; i < WEP_KEYS; i++)
-
- if (ieee->crypt[i])
-
- break;
-
- if (i == WEP_KEYS) {
- sec.enabled = 0;
- // sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_LEVEL;
- }
- goto done;
- }
-
- sec.enabled = 1;
- // sec.encrypt = 1;
- switch (ext->alg) {
- case IW_ENCODE_ALG_WEP:
- alg = "WEP";
- module = "ieee80211_crypt_wep";
- break;
- case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
- module = "ieee80211_crypt_tkip";
- break;
- case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
- module = "ieee80211_crypt_ccmp";
- break;
- default:
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
- ret = -EINVAL;
- goto done;
- }
- printk("alg name:%s\n", alg);
-
- ops = try_then_request_module(ieee80211_get_crypto_ops(alg), module);
- if (!ops) {
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
- printk("========>unknown crypto alg %d\n", ext->alg);
- ret = -EINVAL;
- goto done;
- }
-
- if (!*crypt || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (!new_crypt) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv = new_crypt->ops->init(idx);
- if (!new_crypt->priv) {
- kfree(new_crypt);
- ret = -EINVAL;
- goto done;
- }
- *crypt = new_crypt;
- }
-
- if (ext->key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
- (*crypt)->priv) < 0) {
- IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
- printk("key setting failed\n");
- ret = -EINVAL;
- goto done;
- }
- //skip_host_crypt:
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
- sec.active_key = idx;
- sec.flags |= SEC_ACTIVE_KEY;
- }
-
- if (ext->alg != IW_ENCODE_ALG_NONE) {
- //memcpy(sec.keys[idx], ext->key, ext->key_len);
- sec.key_sizes[idx] = ext->key_len;
- sec.flags |= BIT(idx);
- if (ext->alg == IW_ENCODE_ALG_WEP) {
- // sec.encode_alg[idx] = SEC_ALG_WEP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
- // sec.encode_alg[idx] = SEC_ALG_TKIP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
- // sec.encode_alg[idx] = SEC_ALG_CCMP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- /* Don't set sec level for group keys. */
- if (group_key)
- sec.flags &= ~SEC_LEVEL;
- }
-done:
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
-
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_encode_ext);
-
-int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct ieee80211_crypt_data *crypt;
- int idx, max_key_len;
-
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
- return -EINVAL;
- idx--;
- } else
- idx = ieee->tx_keyidx;
-
- if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
- ext->alg != IW_ENCODE_ALG_WEP)
- if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
- return -EINVAL;
-
- crypt = ieee->crypt[idx];
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- if (!crypt || !crypt->ops) {
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- encoding->flags |= IW_ENCODE_DISABLED;
- } else {
- if (strcmp(crypt->ops->name, "WEP") == 0)
- ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP") == 0)
- ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP") == 0)
- ext->alg = IW_ENCODE_ALG_CCMP;
- else
- return -EINVAL;
- ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN, NULL, crypt->priv);
- encoding->flags |= IW_ENCODE_ENABLED;
- if (ext->key_len &&
- (ext->alg == IW_ENCODE_ALG_TKIP ||
- ext->alg == IW_ENCODE_ALG_CCMP))
- ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_get_encode_ext);
-
-int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- case IW_MLME_DISASSOC:
- ieee80211_disassociate(ieee);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_mlme);
-
-int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
-{
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- /*need to support wpa2 here*/
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- /*
- * * Host AP driver does not use these parameters and allows
- * * wpa_supplicant to control them internally.
- * */
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures = data->value;
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- ieee->drop_unencrypted = data->value;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- //printk("======>%s():data->value is %d\n",__func__,data->value);
- // ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
- if (data->value & IW_AUTH_ALG_SHARED_KEY) {
- ieee->open_wep = 0;
- ieee->auth_mode = 1;
- } else if (data->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- ieee->open_wep = 1;
- ieee->auth_mode = 0;
- } else if (data->value & IW_AUTH_ALG_LEAP) {
- ieee->open_wep = 1;
- ieee->auth_mode = 2;
- } else
- return -EINVAL;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- ieee->wpa_enabled = (data->value) ? 1 : 0;
- break;
-
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- ieee->ieee802_1x = data->value;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- ieee->privacy_invoked = data->value;
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_auth);
-
-int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
-{
- u8 *buf;
-
- if (len > MAX_WPA_IE_LEN || (len && !ie)) {
- //printk("return error out, len:%d\n", len);
- return -EINVAL;
- }
-
-
- if (len) {
- if (len != ie[1] + 2) {
- printk("len:%zu, ie:%d\n", len, ie[1]);
- return -EINVAL;
- }
- buf = kmemdup(ie, len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = buf;
- ieee->wpa_ie_len = len;
- } else {
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = NULL;
- ieee->wpa_ie_len = 0;
- }
- return 0;
-}
-EXPORT_SYMBOL(ieee80211_wx_set_gen_ie);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
deleted file mode 100644
index 1a727856ba53..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BATYPE_H_
-#define _BATYPE_H_
-
-#define BA_SETUP_TIMEOUT 200
-
-#define BA_POLICY_DELAYED 0
-#define BA_POLICY_IMMEDIATE 1
-
-#define ADDBA_STATUS_SUCCESS 0
-#define ADDBA_STATUS_REFUSED 37
-#define ADDBA_STATUS_INVALID_PARAM 38
-
-#define DELBA_REASON_END_BA 37
-#define DELBA_REASON_UNKNOWN_BA 38
-#define DELBA_REASON_TIMEOUT 39
-
-union sequence_control {
- u16 short_data;
- struct {
- u16 frag_num:4;
- u16 seq_num:12;
- } field;
-};
-
-union ba_param_set {
- u16 short_data;
- struct {
- u16 amsdu_support:1;
- u16 ba_policy:1;
- u16 tid:4;
- u16 buffer_size:10;
- } field;
-};
-
-union delba_param_set {
- u16 short_data;
- struct {
- u16 reserved:11;
- u16 initiator:1;
- u16 tid:4;
- } field;
-};
-
-struct ba_record {
- struct timer_list timer;
- u8 valid;
- u8 dialog_token;
- union ba_param_set param_set;
- u16 timeout_value;
- union sequence_control start_seq_ctrl;
-};
-
-#endif //end _BATYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
deleted file mode 100644
index 6823e405eeb8..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ /dev/null
@@ -1,700 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/********************************************************************************************************************************
- * This file is created to process BA Action Frame. According to 802.11 spec, there are 3 BA action types at all. And as BA is
- * related to TS, this part need some structure defined in QOS side code. Also TX RX is going to be resturctured, so how to send
- * ADDBAREQ ADDBARSP and DELBA packet is still on consideration. Temporarily use MANAGE QUEUE instead of Normal Queue.
- * WB 2008-05-27
- * *****************************************************************************************************************************/
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include "ieee80211.h"
-#include "rtl819x_BA.h"
-
-/********************************************************************************************************************
- *function: Activate BA entry. And if Time is nozero, start timer.
- * input: struct ba_record *pBA //BA entry to be enabled
- * u16 Time //indicate time delay.
- * output: none
- ********************************************************************************************************************/
-static void ActivateBAEntry(struct ieee80211_device *ieee, struct ba_record *pBA, u16 Time)
-{
- pBA->valid = true;
- if (Time != 0)
- mod_timer(&pBA->timer, jiffies + msecs_to_jiffies(Time));
-}
-
-/********************************************************************************************************************
- *function: deactivate BA entry, including its timer.
- * input: struct ba_record *pBA //BA entry to be disabled
- * output: none
- ********************************************************************************************************************/
-static void DeActivateBAEntry(struct ieee80211_device *ieee, struct ba_record *pBA)
-{
- pBA->valid = false;
- del_timer_sync(&pBA->timer);
-}
-/********************************************************************************************************************
- *function: deactivete BA entry in Tx Ts, and send DELBA.
- * input:
- * struct tx_ts_record *pTxTs //Tx Ts which is to deactivate BA entry.
- * output: none
- * notice: As struct tx_ts_record * structure will be defined in QOS, so wait to be merged. //FIXME
- ********************************************************************************************************************/
-static u8 TxTsDeleteBA(struct ieee80211_device *ieee, struct tx_ts_record *pTxTs)
-{
- struct ba_record *pAdmittedBa = &pTxTs->tx_admitted_ba_record; //These two BA entries must exist in TS structure
- struct ba_record *pPendingBa = &pTxTs->tx_pending_ba_record;
- u8 bSendDELBA = false;
-
- // Delete pending BA
- if (pPendingBa->valid) {
- DeActivateBAEntry(ieee, pPendingBa);
- bSendDELBA = true;
- }
-
- // Delete admitted BA
- if (pAdmittedBa->valid) {
- DeActivateBAEntry(ieee, pAdmittedBa);
- bSendDELBA = true;
- }
-
- return bSendDELBA;
-}
-
-/********************************************************************************************************************
- *function: deactivete BA entry in Tx Ts, and send DELBA.
- * input:
- * struct rx_ts_record *pRxTs //Rx Ts which is to deactivate BA entry.
- * output: none
- * notice: As struct rx_ts_record * structure will be defined in QOS, so wait to be merged. //FIXME, same with above
- ********************************************************************************************************************/
-static u8 RxTsDeleteBA(struct ieee80211_device *ieee, struct rx_ts_record *pRxTs)
-{
- struct ba_record *pBa = &pRxTs->rx_admitted_ba_record;
- u8 bSendDELBA = false;
-
- if (pBa->valid) {
- DeActivateBAEntry(ieee, pBa);
- bSendDELBA = true;
- }
-
- return bSendDELBA;
-}
-
-/********************************************************************************************************************
- *function: reset BA entry
- * input:
- * struct ba_record *pBA //entry to be reset
- * output: none
- ********************************************************************************************************************/
-void ResetBaEntry(struct ba_record *pBA)
-{
- pBA->valid = false;
- pBA->param_set.short_data = 0;
- pBA->timeout_value = 0;
- pBA->dialog_token = 0;
- pBA->start_seq_ctrl.short_data = 0;
-}
-//These functions need porting here or not?
-/*******************************************************************************************************************************
- *function: construct ADDBAREQ and ADDBARSP frame here together.
- * input: u8* Dst //ADDBA frame's destination
- * struct ba_record *pBA //BA_RECORD entry which stores the necessary information for BA.
- * u16 StatusCode //status code in RSP and I will use it to indicate whether it's RSP or REQ(will I?)
- * u8 type //indicate whether it's RSP(ACT_ADDBARSP) ow REQ(ACT_ADDBAREQ)
- * output: none
- * return: sk_buff* skb //return constructed skb to xmit
- *******************************************************************************************************************************/
-static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, struct ba_record *pBA, u16 StatusCode, u8 type)
-{
- struct sk_buff *skb = NULL;
- struct rtl_80211_hdr_3addr *BAReq = NULL;
- u8 *tag = NULL;
- u16 len = ieee->tx_headroom + 9;
- //category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __func__, type, Dst, ieee->dev);
- if (pBA == NULL) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA is NULL\n");
- return NULL;
- }
- skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (!skb)
- return NULL;
-
- memset(skb->data, 0, sizeof(struct rtl_80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
- skb_reserve(skb, ieee->tx_headroom);
-
- BAReq = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
-
- memcpy(BAReq->addr1, Dst, ETH_ALEN);
- memcpy(BAReq->addr2, ieee->dev->dev_addr, ETH_ALEN);
-
- memcpy(BAReq->addr3, ieee->current_network.bssid, ETH_ALEN);
-
- BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
-
- //tag += sizeof( struct rtl_80211_hdr_3addr); //move to action field
- tag = skb_put(skb, 9);
- *tag++ = ACT_CAT_BA;
- *tag++ = type;
- // Dialog Token
- *tag++ = pBA->dialog_token;
-
- if (type == ACT_ADDBARSP) {
- // Status Code
- netdev_info(ieee->dev, "=====>to send ADDBARSP\n");
-
- put_unaligned_le16(StatusCode, tag);
- tag += 2;
- }
- // BA Parameter Set
-
- put_unaligned_le16(pBA->param_set.short_data, tag);
- tag += 2;
- // BA Timeout Value
-
- put_unaligned_le16(pBA->timeout_value, tag);
- tag += 2;
-
- if (type == ACT_ADDBAREQ) {
- // BA Start SeqCtrl
- memcpy(tag, (u8 *)&(pBA->start_seq_ctrl), 2);
- tag += 2;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
- return skb;
- //return NULL;
-}
-
-
-/********************************************************************************************************************
- *function: construct DELBA frame
- * input: u8* dst //DELBA frame's destination
- * struct ba_record *pBA //BA_RECORD entry which stores the necessary information for BA
- * enum tr_select TxRxSelect //TX RX direction
- * u16 ReasonCode //status code.
- * output: none
- * return: sk_buff* skb //return constructed skb to xmit
- ********************************************************************************************************************/
-static struct sk_buff *ieee80211_DELBA(
- struct ieee80211_device *ieee,
- u8 *dst,
- struct ba_record *pBA,
- enum tr_select TxRxSelect,
- u16 ReasonCode
- )
-{
- union delba_param_set DelbaParamSet;
- struct sk_buff *skb = NULL;
- struct rtl_80211_hdr_3addr *Delba = NULL;
- u8 *tag = NULL;
- //len = head len + DELBA Parameter Set(2) + Reason Code(2)
- u16 len = 6 + ieee->tx_headroom;
-
- if (net_ratelimit())
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA,
- "========>%s(), ReasonCode(%d) sentd to:%pM\n",
- __func__, ReasonCode, dst);
-
- memset(&DelbaParamSet, 0, 2);
-
- DelbaParamSet.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
- DelbaParamSet.field.tid = pBA->param_set.field.tid;
-
- skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (!skb)
- return NULL;
-// memset(skb->data, 0, len+sizeof( struct rtl_80211_hdr_3addr));
- skb_reserve(skb, ieee->tx_headroom);
-
- Delba = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
-
- memcpy(Delba->addr1, dst, ETH_ALEN);
- memcpy(Delba->addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
- Delba->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
-
- tag = skb_put(skb, 6);
-
- *tag++ = ACT_CAT_BA;
- *tag++ = ACT_DELBA;
-
- // DELBA Parameter Set
-
- put_unaligned_le16(DelbaParamSet.short_data, tag);
- tag += 2;
- // Reason Code
-
- put_unaligned_le16(ReasonCode, tag);
- tag += 2;
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
- if (net_ratelimit())
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA,
- "<=====%s()\n", __func__);
- return skb;
-}
-
-/********************************************************************************************************************
- *function: send ADDBAReq frame out
- * input: u8* dst //ADDBAReq frame's destination
- * struct ba_record *pBA //BA_RECORD entry which stores the necessary information for BA
- * output: none
- * notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- ********************************************************************************************************************/
-static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee,
- u8 *dst, struct ba_record *pBA)
-{
- struct sk_buff *skb;
- skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
-
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- //add statistic needed here.
- //and skb will be freed in softmac_mgmt_xmit(), so omit all dev_kfree_skb_any() outside softmac_mgmt_xmit()
- //WB
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
- }
-}
-
-/********************************************************************************************************************
- *function: send ADDBARSP frame out
- * input: u8* dst //DELBA frame's destination
- * struct ba_record *pBA //BA_RECORD entry which stores the necessary information for BA
- * u16 StatusCode //RSP StatusCode
- * output: none
- * notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- ********************************************************************************************************************/
-static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
- struct ba_record *pBA, u16 StatusCode)
-{
- struct sk_buff *skb;
- skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- //same above
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
- }
-
- return;
-
-}
-/********************************************************************************************************************
- *function: send ADDBARSP frame out
- * input: u8* dst //DELBA frame's destination
- * struct ba_record *pBA //BA_RECORD entry which stores the necessary information for BA
- * enum tr_select TxRxSelect //TX or RX
- * u16 ReasonCode //DEL ReasonCode
- * output: none
- * notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- ********************************************************************************************************************/
-
-static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
- struct ba_record *pBA, enum tr_select TxRxSelect,
- u16 ReasonCode)
-{
- struct sk_buff *skb;
- skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- //same above
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
- }
-}
-
-/********************************************************************************************************************
- *function: RX ADDBAReq
- * input: struct sk_buff * skb //incoming ADDBAReq skb.
- * return: 0(pass), other(fail)
- * notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
- ********************************************************************************************************************/
-int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- struct rtl_80211_hdr_3addr *req = NULL;
- u16 rc = 0;
- u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
- struct ba_record *pBA = NULL;
- union ba_param_set *pBaParamSet = NULL;
- u16 *pBaTimeoutVal = NULL;
- union sequence_control *pBaStartSeqCtrl = NULL;
- struct rx_ts_record *pTS = NULL;
-
- if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 9) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- " Invalid skb len in BAREQ(%d / %zu)\n",
- skb->len,
- (sizeof(struct rtl_80211_hdr_3addr) + 9));
- return -1;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
-
- req = (struct rtl_80211_hdr_3addr *)skb->data;
- tag = (u8 *)req;
- dst = &req->addr2[0];
- tag += sizeof(struct rtl_80211_hdr_3addr);
- pDialogToken = tag + 2; //category+action
- pBaParamSet = (union ba_param_set *)(tag + 3); //+DialogToken
- pBaTimeoutVal = (u16 *)(tag + 5);
- pBaStartSeqCtrl = (union sequence_control *)(req + 7);
-
- netdev_info(ieee->dev, "====================>rx ADDBAREQ from :%pM\n", dst);
-//some other capability is not ready now.
- if ((ieee->current_network.qos_data.active == 0) ||
- (!ieee->pHTInfo->bCurrentHTSupport)) //||
- // (!ieee->pStaQos->bEnableRxImmBA) )
- {
- rc = ADDBA_STATUS_REFUSED;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
- goto OnADDBAReq_Fail;
- }
- // Search for related traffic stream.
- // If there is no matched TS, reject the ADDBA request.
- if (!GetTs(
- ieee,
- (struct ts_common_info **)(&pTS),
- dst,
- (u8)(pBaParamSet->field.tid),
- RX_DIR,
- true)) {
- rc = ADDBA_STATUS_REFUSED;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __func__);
- goto OnADDBAReq_Fail;
- }
- pBA = &pTS->rx_admitted_ba_record;
- // To Determine the ADDBA Req content
- // We can do much more check here, including buffer_size, AMSDU_Support, Policy, StartSeqCtrl...
- // I want to check StartSeqCtrl to make sure when we start aggregation!!!
- //
- if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
- rc = ADDBA_STATUS_INVALID_PARAM;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "BA Policy is not correct in %s()\n", __func__);
- goto OnADDBAReq_Fail;
- }
- // Admit the ADDBA Request
- //
- DeActivateBAEntry(ieee, pBA);
- pBA->dialog_token = *pDialogToken;
- pBA->param_set = *pBaParamSet;
- pBA->timeout_value = *pBaTimeoutVal;
- pBA->start_seq_ctrl = *pBaStartSeqCtrl;
- //for half N mode we only aggregate 1 frame
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- pBA->param_set.field.buffer_size = 1;
- else
- pBA->param_set.field.buffer_size = 32;
- ActivateBAEntry(ieee, pBA, pBA->timeout_value);
- ieee80211_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
-
- // End of procedure.
- return 0;
-
-OnADDBAReq_Fail:
- {
- struct ba_record BA;
- BA.param_set = *pBaParamSet;
- BA.timeout_value = *pBaTimeoutVal;
- BA.dialog_token = *pDialogToken;
- BA.param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
- ieee80211_send_ADDBARsp(ieee, dst, &BA, rc);
- return 0; //we send RSP out.
- }
-
-}
-
-/********************************************************************************************************************
- *function: RX ADDBARSP
- * input: struct sk_buff * skb //incoming ADDBAReq skb.
- * return: 0(pass), other(fail)
- * notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
- ********************************************************************************************************************/
-int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- struct rtl_80211_hdr_3addr *rsp = NULL;
- struct ba_record *pPendingBA, *pAdmittedBA;
- struct tx_ts_record *pTS = NULL;
- u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
- u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
- union ba_param_set *pBaParamSet = NULL;
- u16 ReasonCode;
-
- if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 9) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- " Invalid skb len in BARSP(%d / %zu)\n",
- skb->len,
- (sizeof(struct rtl_80211_hdr_3addr) + 9));
- return -1;
- }
- rsp = (struct rtl_80211_hdr_3addr *)skb->data;
- tag = (u8 *)rsp;
- dst = &rsp->addr2[0];
- tag += sizeof(struct rtl_80211_hdr_3addr);
- pDialogToken = tag + 2;
- pStatusCode = (u16 *)(tag + 3);
- pBaParamSet = (union ba_param_set *)(tag + 5);
- pBaTimeoutVal = (u16 *)(tag + 7);
-
- // Check the capability
- // Since we can always receive A-MPDU, we just check if it is under HT mode.
- if (ieee->current_network.qos_data.active == 0 ||
- !ieee->pHTInfo->bCurrentHTSupport ||
- !ieee->pHTInfo->bCurrentAMPDUEnable) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- }
-
-
- //
- // Search for related TS.
- // If there is no TS found, we wil reject ADDBA Rsp by sending DELBA frame.
- //
- if (!GetTs(
- ieee,
- (struct ts_common_info **)(&pTS),
- dst,
- (u8)(pBaParamSet->field.tid),
- TX_DIR,
- false)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __func__);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- }
-
- pTS->add_ba_req_in_progress = false;
- pPendingBA = &pTS->tx_pending_ba_record;
- pAdmittedBA = &pTS->tx_admitted_ba_record;
-
-
- //
- // Check if related BA is waiting for setup.
- // If not, reject by sending DELBA frame.
- //
- if (pAdmittedBA->valid) {
- // Since BA is already setup, we ignore all other ADDBA Response.
- IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. Drop because already admit it! \n");
- return -1;
- } else if ((!pPendingBA->valid) || (*pDialogToken != pPendingBA->dialog_token)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "OnADDBARsp(): Recv ADDBA Rsp. BA invalid, DELBA! \n");
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n", *pStatusCode);
- DeActivateBAEntry(ieee, pPendingBA);
- }
-
-
- if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
- //
- // Determine ADDBA Rsp content here.
- // We can compare the value of BA parameter set that Peer returned and Self sent.
- // If it is OK, then admitted. Or we can send DELBA to cancel BA mechanism.
- //
- if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
- // Since this is a kind of ADDBA failed, we delay next ADDBA process.
- pTS->add_ba_req_delayed = true;
- DeActivateBAEntry(ieee, pAdmittedBA);
- ReasonCode = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
- }
-
-
- //
- // Admitted condition
- //
- pAdmittedBA->dialog_token = *pDialogToken;
- pAdmittedBA->timeout_value = *pBaTimeoutVal;
- pAdmittedBA->start_seq_ctrl = pPendingBA->start_seq_ctrl;
- pAdmittedBA->param_set = *pBaParamSet;
- DeActivateBAEntry(ieee, pAdmittedBA);
- ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
- } else {
- // Delay next ADDBA process.
- pTS->add_ba_req_delayed = true;
- }
-
- // End of procedure
- return 0;
-
-OnADDBARsp_Reject:
- {
- struct ba_record BA;
- BA.param_set = *pBaParamSet;
- ieee80211_send_DELBA(ieee, dst, &BA, TX_DIR, ReasonCode);
- return 0;
- }
-
-}
-
-/********************************************************************************************************************
- *function: RX DELBA
- * input: struct sk_buff * skb //incoming ADDBAReq skb.
- * return: 0(pass), other(fail)
- * notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
- ********************************************************************************************************************/
-int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- struct rtl_80211_hdr_3addr *delba = NULL;
- union delba_param_set *pDelBaParamSet = NULL;
- u8 *dst = NULL;
-
- if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 6) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- " Invalid skb len in DELBA(%d / %zu)\n",
- skb->len,
- (sizeof(struct rtl_80211_hdr_3addr) + 6));
- return -1;
- }
-
- if (ieee->current_network.qos_data.active == 0 ||
- !ieee->pHTInfo->bCurrentHTSupport) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
- return -1;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
- delba = (struct rtl_80211_hdr_3addr *)skb->data;
- dst = &delba->addr2[0];
- pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
-
- if (pDelBaParamSet->field.initiator == 1) {
- struct rx_ts_record *pRxTs;
-
- if (!GetTs(
- ieee,
- (struct ts_common_info **)&pRxTs,
- dst,
- (u8)pDelBaParamSet->field.tid,
- RX_DIR,
- false)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for RXTS in %s()\n", __func__);
- return -1;
- }
-
- RxTsDeleteBA(ieee, pRxTs);
- } else {
- struct tx_ts_record *pTxTs;
-
- if (!GetTs(
- ieee,
- (struct ts_common_info **)&pTxTs,
- dst,
- (u8)pDelBaParamSet->field.tid,
- TX_DIR,
- false)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for TXTS in %s()\n", __func__);
- return -1;
- }
-
- pTxTs->using_ba = false;
- pTxTs->add_ba_req_in_progress = false;
- pTxTs->add_ba_req_delayed = false;
- del_timer_sync(&pTxTs->ts_add_ba_timer);
- //PlatformCancelTimer(Adapter, &pTxTs->ts_add_ba_timer);
- TxTsDeleteBA(ieee, pTxTs);
- }
- return 0;
-}
-
-//
-// ADDBA initiate. This can only be called by TX side.
-//
-void
-TsInitAddBA(
- struct ieee80211_device *ieee,
- struct tx_ts_record *pTS,
- u8 Policy,
- u8 bOverwritePending
- )
-{
- struct ba_record *pBA = &pTS->tx_pending_ba_record;
-
- if (pBA->valid && !bOverwritePending)
- return;
-
- // Set parameters to "Pending" variable set
- DeActivateBAEntry(ieee, pBA);
-
- pBA->dialog_token++; // DialogToken: Only keep the latest dialog token
- pBA->param_set.field.amsdu_support = 0; // Do not support A-MSDU with A-MPDU now!!
- pBA->param_set.field.ba_policy = Policy; // Policy: Delayed or Immediate
- pBA->param_set.field.tid = pTS->ts_common_info.t_spec.ts_info.uc_tsid; // TID
- // buffer_size: This need to be set according to A-MPDU vector
- pBA->param_set.field.buffer_size = 32; // buffer_size: This need to be set according to A-MPDU vector
- pBA->timeout_value = 0; // Timeout value: Set 0 to disable Timer
- pBA->start_seq_ctrl.field.seq_num = (pTS->tx_cur_seq + 3) % 4096; // Block Ack will start after 3 packets later.
-
- ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
-
- ieee80211_send_ADDBAReq(ieee, pTS->ts_common_info.addr, pBA);
-}
-
-void
-TsInitDelBA(struct ieee80211_device *ieee, struct ts_common_info *pTsCommonInfo, enum tr_select TxRxSelect)
-{
- if (TxRxSelect == TX_DIR) {
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)pTsCommonInfo;
-
- if (TxTsDeleteBA(ieee, pTxTs))
- ieee80211_send_DELBA(
- ieee,
- pTsCommonInfo->addr,
- (pTxTs->tx_admitted_ba_record.valid) ? (&pTxTs->tx_admitted_ba_record) : (&pTxTs->tx_pending_ba_record),
- TxRxSelect,
- DELBA_REASON_END_BA);
- } else if (TxRxSelect == RX_DIR) {
- struct rx_ts_record *pRxTs = (struct rx_ts_record *)pTsCommonInfo;
- if (RxTsDeleteBA(ieee, pRxTs))
- ieee80211_send_DELBA(
- ieee,
- pTsCommonInfo->addr,
- &pRxTs->rx_admitted_ba_record,
- TxRxSelect,
- DELBA_REASON_END_BA);
- }
-}
-/********************************************************************************************************************
- *function: BA setup timer
- * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
- * return: NULL
- * notice:
- ********************************************************************************************************************/
-void BaSetupTimeOut(struct timer_list *t)
-{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_pending_ba_record.timer);
-
- pTxTs->add_ba_req_in_progress = false;
- pTxTs->add_ba_req_delayed = true;
- pTxTs->tx_pending_ba_record.valid = false;
-}
-
-void TxBaInactTimeout(struct timer_list *t)
-{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_admitted_ba_record.timer);
- struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[pTxTs->num]);
- TxTsDeleteBA(ieee, pTxTs);
- ieee80211_send_DELBA(
- ieee,
- pTxTs->ts_common_info.addr,
- &pTxTs->tx_admitted_ba_record,
- TX_DIR,
- DELBA_REASON_TIMEOUT);
-}
-
-void RxBaInactTimeout(struct timer_list *t)
-{
- struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_admitted_ba_record.timer);
- struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
-
- RxTsDeleteBA(ieee, pRxTs);
- ieee80211_send_DELBA(
- ieee,
- pRxTs->ts_common_info.addr,
- &pRxTs->rx_admitted_ba_record,
- RX_DIR,
- DELBA_REASON_TIMEOUT);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
deleted file mode 100644
index 79346a00af09..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _RTL819XU_HTTYPE_H_
-#define _RTL819XU_HTTYPE_H_
-
-/*
- * The HT Capability element is present in beacons, association request,
- * reassociation request and probe response frames
- */
-
-/*
- * MIMO Power Save Settings
- */
-#define MIMO_PS_STATIC 0
-
-/*
- * There should be 128 bits to cover all of the MCS rates. However, since
- * 8190 does not support too much rates, one integer is quite enough.
- */
-#define HTCLNG 4
-
-/*
- * Represent Channel Width in HT Capabilities
- */
-enum ht_channel_width {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_20_40 = 1,
-};
-
-/*
- * Represent Extension Channel Offset in HT Capabilities
- * This is available only in 40Mhz mode.
- */
-enum ht_extension_chan_offset {
- HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
- HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-};
-
-struct ht_capability_ele {
- //HT capability info
- u8 AdvCoding:1;
- u8 ChlWidth:1;
- u8 MimoPwrSave:2;
- u8 GreenField:1;
- u8 ShortGI20Mhz:1;
- u8 ShortGI40Mhz:1;
- u8 TxSTBC:1;
- u8 RxSTBC:2;
- u8 DelayBA:1;
- u8 MaxAMSDUSize:1;
- u8 DssCCk:1;
- u8 PSMP:1;
- u8 Rsvd1:1;
- u8 LSigTxopProtect:1;
-
- //MAC HT parameters info
- u8 MaxRxAMPDUFactor:2;
- u8 MPDUDensity:3;
- u8 Rsvd2:3;
-
- //Supported MCS set
- u8 MCS[16];
-
- //Extended HT Capability Info
- u16 ExtHTCapInfo;
-
- //TXBF Capabilities
- u8 TxBFCap[4];
-
- //Antenna Selection Capabilities
- u8 ASCap;
-
-} __packed;
-
-/*
- * The HT Information element is present in beacons
- * Only AP is required to include this element
- */
-typedef struct _HT_INFORMATION_ELE {
- u8 ControlChl;
-
- u8 ExtChlOffset:2;
- u8 RecommemdedTxWidth:1;
- u8 RIFS:1;
- u8 PSMPAccessOnly:1;
- u8 SrvIntGranularity:3;
-
- u8 OptMode:2;
- u8 NonGFDevPresent:1;
- u8 Revd1:5;
- u8 Revd2:8;
-
- u8 Rsvd3:6;
- u8 DualBeacon:1;
- u8 DualCTSProtect:1;
-
- u8 SecondaryBeacon:1;
- u8 LSigTxopProtectFull:1;
- u8 PcoActive:1;
- u8 PcoPhase:1;
- u8 Rsvd4:4;
-
- u8 BasicMSC[16];
-} __attribute__ ((packed)) HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-
-typedef enum _HT_SPEC_VER {
- HT_SPEC_VER_IEEE = 0,
- HT_SPEC_VER_EWC = 1,
-} HT_SPEC_VER, *PHT_SPEC_VER;
-
-typedef enum _HT_AGGRE_MODE_E {
- HT_AGG_AUTO = 0,
- HT_AGG_FORCE_ENABLE = 1,
- HT_AGG_FORCE_DISABLE = 2,
-} HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
-
-/*
- * The Data structure is used to keep HT related variables when card is
- * configured as non-AP STA mode. **Note** Current_xxx should be set
- * to default value in HTInitializeHTInfo()
- */
-typedef struct _RT_HIGH_THROUGHPUT {
- u8 bEnableHT;
- u8 bCurrentHTSupport;
-
- u8 bRegBW40MHz; // Tx 40MHz channel capability
- u8 bCurBW40MHz; // Tx 40MHz channel capability
-
- u8 bRegShortGI40MHz; // Tx Short GI for 40Mhz
- u8 bCurShortGI40MHz; // Tx Short GI for 40MHz
-
- u8 bRegShortGI20MHz; // Tx Short GI for 20MHz
- u8 bCurShortGI20MHz; // Tx Short GI for 20MHz
-
- u8 bRegSuppCCK; // Tx CCK rate capability
- u8 bCurSuppCCK; // Tx CCK rate capability
-
- // 802.11n spec version for "peer"
- HT_SPEC_VER ePeerHTSpecVer;
-
- // HT related information for "Self"
- struct ht_capability_ele SelfHTCap; // This is HT cap element sent to peer STA, which also indicate HT Rx capabilities.
- HT_INFORMATION_ELE SelfHTInfo; // This is HT info element sent to peer STA, which also indicate HT Rx capabilities.
-
- // HT related information for "Peer"
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
- // A-MSDU related
- u8 bAMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nAMSDU_MaxSize; // This indicates Tx A-MSDU capability
- u8 bCurrent_AMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nCurrent_AMSDU_MaxSize; // This indicates Tx A-MSDU capability
-
- // AMPDU related <2006.08.10 Emily>
- u8 bAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 bCurrentAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 AMPDU_Factor; // This indicate Tx A-MPDU capability
- u8 CurrentAMPDUFactor; // This indicate Tx A-MPDU capability
- u8 MPDU_Density; // This indicate Tx A-MPDU capability
- u8 CurrentMPDUDensity; // This indicate Tx A-MPDU capability
-
- // Forced A-MPDU enable
- HT_AGGRE_MODE_E ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
-
- // Forced A-MSDU enable
- HT_AGGRE_MODE_E ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
-
- u8 bForcedShortGI;
-
- u8 CurrentOpMode;
-
- // MIMO PS related
- u8 SelfMimoPs;
- u8 PeerMimoPs;
-
- // 40MHz Channel Offset settings.
- enum ht_extension_chan_offset CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz; // If we use 40 MHz to Tx
- u8 PeerBandwidth;
-
- // For Bandwidth Switching
- u8 bSwBwInProgress;
- u8 SwBwStep;
- //struct timer_list SwBwTimer; //moved to ieee80211_device. as timer_list need include some header file here.
-
- // For Realtek proprietary A-MPDU factor for aggregation
- u8 bRegRT2RTAggregation;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- // Rx Reorder control
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- u8 UsbTxAggrNum;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- u8 UsbRxFwAggrEn;
- u8 UsbRxFwAggrPageNum;
- u8 UsbRxFwAggrPacketNum;
- u8 UsbRxFwAggrTimeout;
-#endif
-
- // Add for Broadcom(Linksys) IOT. Joseph
- u8 bIsPeerBcm;
-
- // For IOT issue.
- u8 IOTPeer;
- u32 IOTAction;
-} __attribute__ ((packed)) RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-
-/*
- * The Data structure is used to keep HT related variable for "each AP"
- * when card is configured as "STA mode"
- */
-typedef struct _BSS_HT {
- u8 bdSupportHT;
-
- // HT related elements
- u8 bdHTCapBuf[32];
- u16 bdHTCapLen;
- u8 bdHTInfoBuf[32];
- u16 bdHTInfoLen;
-
- HT_SPEC_VER bdHTSpecVer;
- //struct ht_capability_ele bdHTCapEle;
- //HT_INFORMATION_ELE bdHTInfoEle;
-
- u8 bdRT2RTAggregation;
- u8 bdRT2RTLongSlotTime;
-} __attribute__ ((packed)) BSS_HT, *PBSS_HT;
-
-extern u8 MCS_FILTER_ALL[16];
-extern u8 MCS_FILTER_1SS[16];
-
-/*
- * 2007/07/11 MH Modify the macro. Becaus STA may link with a N-AP. If we set
- * STA in A/B/G mode and AP is still in N mode. The macro will be wrong. We have
- * to add a macro to judge wireless mode.
- */
-#define PICK_RATE(_nLegacyRate, _nMcsRate) \
- (_nMcsRate == 0) ? (_nLegacyRate & 0x7f) : (_nMcsRate)
-/* 2007/07/12 MH We only define legacy and HT wireless mode now. */
-#define LEGACY_WIRELESS_MODE IEEE_MODE_MASK
-
-#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
- ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ? \
- (LegacyRate) : \
- (PICK_RATE(LegacyRate, HTRate))
-
-// MCS Bw 40 {1~7, 12~15,32}
-#define RATE_ADPT_1SS_MASK 0xFF
-#define RATE_ADPT_2SS_MASK 0xF0 //Skip MCS8~11 because mcs7 > mcs6, 9, 10, 11. 2007.01.16 by Emily
-#define RATE_ADPT_MCS32_MASK 0x01
-
-#define IS_11N_MCS_RATE(rate) (rate & 0x80)
-
-typedef enum _HT_AGGRE_SIZE {
- HT_AGG_SIZE_8K = 0,
- HT_AGG_SIZE_16K = 1,
- HT_AGG_SIZE_32K = 2,
- HT_AGG_SIZE_64K = 3,
-} HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
-
-/* Indicate different AP vendor for IOT issue */
-typedef enum _HT_IOT_PEER {
- HT_IOT_PEER_UNKNOWN = 0,
- HT_IOT_PEER_REALTEK = 1,
- HT_IOT_PEER_BROADCOM = 2,
- HT_IOT_PEER_RALINK = 3,
- HT_IOT_PEER_ATHEROS = 4,
- HT_IOT_PEER_CISCO = 5,
- HT_IOT_PEER_MAX = 6
-} HT_IOT_PEER_E, *PHTIOT_PEER_E;
-
-/*
- * IOT Action for different AP
- */
-typedef enum _HT_IOT_ACTION {
- HT_IOT_ACT_TX_USE_AMSDU_4K = 0x00000001,
- HT_IOT_ACT_TX_USE_AMSDU_8K = 0x00000002,
- HT_IOT_ACT_DISABLE_MCS14 = 0x00000004,
- HT_IOT_ACT_DISABLE_MCS15 = 0x00000008,
- HT_IOT_ACT_DISABLE_ALL_2SS = 0x00000010,
- HT_IOT_ACT_DISABLE_EDCA_TURBO = 0x00000020,
- HT_IOT_ACT_MGNT_USE_CCK_6M = 0x00000040,
- HT_IOT_ACT_CDD_FSYNC = 0x00000080,
- HT_IOT_ACT_PURE_N_MODE = 0x00000100,
- HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
-} HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
-
-#endif //_RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
deleted file mode 100644
index a93f09033d9d..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ /dev/null
@@ -1,1295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * As this function is mainly ported from Windows driver, so leave the name
- * little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
- */
-#include "ieee80211.h"
-
-u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-u16 MCS_DATA_RATE[2][2][77] = {
- { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234, 260,
- 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
- 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
- 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
- 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
- {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
- 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
- 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
- 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
- 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
- { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
- 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
- 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
- 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
- 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
- {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
- 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
- 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
- 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
- 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
-};
-
-static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
-static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
-static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
-static u8 NETGEAR834Bv2_BROADCOM[3] = {0x00, 0x1b, 0x2f};
-static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; //cosa 03202008
-static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
-static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
-static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
-static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
-//static u8 DLINK_ATHEROS[3] = {0x00, 0x1c, 0xf0};
-static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
-/*
- * 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we
- * put the code in other place??
- * static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
- */
-/*
- *function: This function update default settings in pHTInfo structure
- * input: PRT_HIGH_THROUGHPUT pHTInfo
- * output: none
- * return: none
- * notice: These value need be modified if any changes.
- */
-void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- //const typeof( ((struct ieee80211_device *)0)->pHTInfo ) *__mptr = &pHTInfo;
-
- //printk("pHTinfo:%p, &pHTinfo:%p, mptr:%p, offsetof:%x\n", pHTInfo, &pHTInfo, __mptr, offsetof(struct ieee80211_device, pHTInfo));
- //printk("===>ieee:%p,\n", ieee);
- // ShortGI support
- pHTInfo->bRegShortGI20MHz = 1;
- pHTInfo->bRegShortGI40MHz = 1;
-
- // 40MHz channel support
- pHTInfo->bRegBW40MHz = 1;
-
- // CCK rate support in 40MHz channel
- if (pHTInfo->bRegBW40MHz)
- pHTInfo->bRegSuppCCK = 1;
- else
- pHTInfo->bRegSuppCCK = true;
-
- // AMSDU related
- pHTInfo->nAMSDU_MaxSize = 7935UL;
- pHTInfo->bAMSDU_Support = 0;
-
- // AMPDU related
- pHTInfo->bAMPDUEnable = 1;
- pHTInfo->AMPDU_Factor = 2; //// 0: 2n13(8K), 1:2n14(16K), 2:2n15(32K), 3:2n16(64k)
- pHTInfo->MPDU_Density = 0;// 0: No restriction, 1: 1/8usec, 2: 1/4usec, 3: 1/2usec, 4: 1usec, 5: 2usec, 6: 4usec, 7:8usec
-
- // MIMO Power Save
- pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.)
- if (pHTInfo->SelfMimoPs == 2)
- pHTInfo->SelfMimoPs = 3;
- // 8190 only. Assign rate operation mode to firmware
- ieee->bTxDisableRateFallBack = 0;
- ieee->bTxUseDriverAssingedRate = 0;
-
- /*
- * 8190 only, Realtek proprietary aggregation mode
- * Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
- */
- pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
-
- // For Rx Reorder Control
- pHTInfo->bRegRxReorderEnable = 1;
- pHTInfo->RxReorderWinSize = 64;
- pHTInfo->RxReorderPendingTime = 30;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- pHTInfo->UsbTxAggrNum = 4;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- pHTInfo->UsbRxFwAggrEn = 1;
- pHTInfo->UsbRxFwAggrPageNum = 24;
- pHTInfo->UsbRxFwAggrPacketNum = 8;
- pHTInfo->UsbRxFwAggrTimeout = 16; ////usb rx FW aggregation timeout threshold.It's in units of 64us
-#endif
-}
-
-/*
- *function: This function print out each field on HT capability
- * IE mainly from (Beacon/ProbeRsp/AssocReq)
- * input: u8* CapIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
- * output: none
- * return: none
- * notice: Driver should not print out this message by default.
- */
-void HTDebugHTCapability(u8 *CapIE, u8 *TitleString)
-{
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- struct ht_capability_ele *pCapELE;
-
- if (!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) {
- //EWC IE
- IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __func__);
- pCapELE = (struct ht_capability_ele *)(&CapIE[4]);
- } else {
- pCapELE = (struct ht_capability_ele *)(&CapIE[0]);
- }
- IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString);
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth) ? "20MHz" : "20/40MHz");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz) ? "YES" : "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz) ? "YES" : "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC) ? "YES" : "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize) ? "3839" : "7935");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk) ? "YES" : "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\
- pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]);
-}
-
-/*
- *function: This function print out each field on HT Information
- * IE mainly from (Beacon/ProbeRsp)
- * input: u8* InfoIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
- * output: none
- * return: none
- * notice: Driver should not print out this message by default.
- */
-void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
-{
- static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
- PHT_INFORMATION_ELE pHTInfoEle;
-
- if (!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) {
- // Not EWC IE
- IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __func__);
- pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]);
- } else {
- pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]);
- }
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString);
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSecondary channel =");
- switch (pHTInfoEle->ExtChlOffset) {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n");
- break;
- }
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth) ? "20Mhz" : "40Mhz");
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = ");
- switch (pHTInfoEle->OptMode) {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n");
- break;
- }
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\
- pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]);
-}
-
-static u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0;
- u8 isShortGI = (pHTInfo->bCurBW40MHz) ?
- ((pHTInfo->bCurShortGI40MHz) ? 1 : 0) :
- ((pHTInfo->bCurShortGI20MHz) ? 1 : 0);
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
-}
-
-/*
- *function: This function returns current datarate.
- * input: struct ieee80211_device* ieee
- * u8 nDataRate
- * output: none
- * return: tx rate
- * notice: quite unsure about how to use this function //wb
- */
-u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate)
-{
- //PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c};
- u8 is40MHz = 0;
- u8 isShortGI = 0;
-
- if (nDataRate < 12) {
- return CCKOFDMRate[nDataRate];
- } else {
- if (nDataRate >= 0x10 && nDataRate <= 0x1f) { //if(nDataRate > 11 && nDataRate < 28 )
- is40MHz = 0;
- isShortGI = 0;
-
- // nDataRate = nDataRate - 12;
- } else if (nDataRate >= 0x20 && nDataRate <= 0x2f) { //(27, 44)
- is40MHz = 1;
- isShortGI = 0;
-
- //nDataRate = nDataRate - 28;
- } else if (nDataRate >= 0x30 && nDataRate <= 0x3f) { //(43, 60)
- is40MHz = 0;
- isShortGI = 1;
-
- //nDataRate = nDataRate - 44;
- } else if (nDataRate >= 0x40 && nDataRate <= 0x4f) { //(59, 76)
- is40MHz = 1;
- isShortGI = 1;
-
- //nDataRate = nDataRate - 60;
- }
- return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate & 0xf];
- }
-}
-
-bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
-{
- bool retValue = false;
- struct ieee80211_network *net = &ieee->current_network;
-
- if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
- (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
- (net->ralink_cap_exist))
- retValue = true;
- else if ((memcmp(net->bssid, UNKNOWN_BORADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0) ||
- (net->broadcom_cap_exist))
- retValue = true;
- else if (net->bssht.bdRT2RTAggregation)
- retValue = true;
- else
- retValue = false;
-
- return retValue;
-}
-
-/*
- *function: This function returns peer IOT.
- * input: struct ieee80211_device* ieee
- * output: none
- * return:
- * notice:
- */
-static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct ieee80211_network *net = &ieee->current_network;
-
- if (net->bssht.bdRT2RTAggregation)
- pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
- else if (net->broadcom_cap_exist)
- pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- else if ((memcmp(net->bssid, UNKNOWN_BORADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0))
- pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
- (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
- net->ralink_cap_exist)
- pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
- else if (net->atheros_cap_exist)
- pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
- else if (memcmp(net->bssid, CISCO_BROADCOM, 3) == 0)
- pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
- else
- pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
-
- IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer);
-}
-
-/*
- *function: Check whether driver should declare received rate up to MCS13
- * only since some chipset is not good at receiving MCS14~15 frame
- * from some AP.
- * input: struct ieee80211_device* ieee
- * u8 * PeerMacAddr
- * output: none
- * return: return 1 if driver should declare MCS13 only(otherwise return 0)
- */
-static u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
-{
- return 0;
-}
-
-/*
- * Function: HTIOTActIsDisableMCS15
- *
- * Overview: Check whether driver should declare capability of receiving
- * MCS15
- *
- * Input:
- * PADAPTER Adapter,
- *
- * Output: None
- * Return: true if driver should disable MCS15
- * 2008.04.15 Emily
- */
-static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
-{
- bool retValue = false;
-
-#ifdef TODO
- // Apply for 819u only
-#if (HAL_CODE_BASE == RTL8192)
-
-#if (DEV_BUS_TYPE == USB_INTERFACE)
- // Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15
- retValue = true;
-#elif (DEV_BUS_TYPE == PCI_INTERFACE)
- // Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12
-// if(pBssDesc->bCiscoCapExist)
-// retValue = false;
-// else
- retValue = false;
-#endif
-#endif
-#endif
- // Jerry Chang suggest that 8190 1x2 does not need to disable MCS15
-
- return retValue;
-}
-
-/*
- * Function: HTIOTActIsDisableMCSTwoSpatialStream
- *
- * Overview: Check whether driver should declare capability of receiving
- * All 2 ss packets
- *
- * Input:
- * PADAPTER Adapter,
- *
- * Output: None
- * Return: true if driver should disable all two spatial stream packet
- * 2008.04.21 Emily
- */
-static bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee,
- u8 *PeerMacAddr)
-{
-#ifdef TODO
- // Apply for 819u only
-#endif
- return false;
-}
-
-/*
- *function: Check whether driver should disable EDCA turbo mode
- * input: struct ieee80211_device* ieee
- * u8* PeerMacAddr
- * output: none
- * return: return 1 if driver should disable EDCA turbo mode
- * (otherwise return 0)
- */
-static u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee,
- u8 *PeerMacAddr)
-{ /* default enable EDCA Turbo mode. */
- return false;
-}
-
-/*
- *function: Check whether we need to use OFDM to sned MGNT frame for
- * broadcom AP
- * input: struct ieee80211_network *network //current network we live
- * output: none
- * return: return 1 if true
- */
-static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
-{
- u8 retValue = 0;
-
- // 2008/01/25 MH Judeg if we need to use OFDM to sned MGNT frame for broadcom AP.
- // 2008/01/28 MH We must prevent that we select null bssid to link.
-
- if (network->broadcom_cap_exist)
- retValue = 1;
-
- return retValue;
-}
-
-static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
-{
- u8 retValue = 0;
-
- if ((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3) == 0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0))
- retValue = 1;
-
- return retValue;
-}
-
-void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo)
-{
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
-}
-
-/*
- *function: Construct Capablility Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Capability Ele
- * u8* len //store length of CE
- * u8 IsEncrypt //whether encrypt, needed further
- * output: none
- * return: none
- * notice: posHTCap can't be null and should be initialized before.
- */
-void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 IsEncrypt)
-{
- PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
- struct ht_capability_ele *pCapELE = NULL;
- //u8 bIsDeclareMCS13;
-
- if (!posHTCap || !pHT) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "posHTCap or pHTInfo can't be null in %s\n",
- __func__);
- return;
- }
- memset(posHTCap, 0, *len);
- if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
- static const u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
-
- memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
- pCapELE = (struct ht_capability_ele *)&posHTCap[4];
- } else {
- pCapELE = (struct ht_capability_ele *)posHTCap;
- }
-
- //HT capability info
- pCapELE->AdvCoding = 0; // This feature is not supported now!!
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- pCapELE->ChlWidth = 0;
- else
- pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
-
-// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
- pCapELE->MimoPwrSave = pHT->SelfMimoPs;
- pCapELE->GreenField = 0; // This feature is not supported now!!
- pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
- pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
- //DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
- //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
- pCapELE->TxSTBC = 1;
- pCapELE->RxSTBC = 0;
- pCapELE->DelayBA = 0; // Do not support now!!
- pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
- pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
- pCapELE->PSMP = 0; // Do not support now!!
- pCapELE->LSigTxopProtect = 0; // Do not support now!!
-
- /*
- * MAC HT parameters info
- * TODO: Nedd to take care of this part
- */
- IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
-
- if (IsEncrypt) {
- pCapELE->MPDUDensity = 7; // 8us
- pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
- } else {
- pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
- pCapELE->MPDUDensity = 0; // no density
- }
-
- //Supported MCS set
- memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
- pCapELE->MCS[1] &= 0x7f;
-
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
- pCapELE->MCS[1] &= 0xbf;
-
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
- pCapELE->MCS[1] &= 0x00;
-
- /*
- * 2008.06.12
- * For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
- */
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
- int i;
-
- for (i = 1; i < 16; i++)
- pCapELE->MCS[i] = 0;
- }
-
- //Extended HT Capability Info
- memset(&pCapELE->ExtHTCapInfo, 0, 2);
-
- //TXBF Capabilities
- memset(pCapELE->TxBFCap, 0, 4);
-
- //Antenna Selection Capabilities
- pCapELE->ASCap = 0;
-//add 2 to give space for element ID and len when construct frames
- if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- *len = 30 + 2;
- else
- *len = 26 + 2;
-
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTCap, *len -2);
-
- /*
- * Print each field in detail. Driver should not print out this message
- * by default
- */
-// HTDebugHTCapability(posHTCap, (u8*)"HTConstructCapability()");
-}
-
-/*
- *function: Construct Information Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Information Ele
- * u8* len //store len of
- * u8 IsEncrypt //whether encrypt, needed further
- * output: none
- * return: none
- * notice: posHTCap can't be null and be initialized before.
- * Only AP and IBSS sta should do this
- */
-void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 IsEncrypt)
-{
- PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
- PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
-
- if (!posHTInfo || !pHTInfoEle) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "posHTInfo or pHTInfoEle can't be null in %s\n",
- __func__);
- return;
- }
-
- memset(posHTInfo, 0, *len);
- if ((ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) { //ap mode is not currently supported
- pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ? HT_EXTCHNL_OFFSET_NO_EXT :
- (ieee->current_network.channel <= 6) ?
- HT_EXTCHNL_OFFSET_UPPER : HT_EXTCHNL_OFFSET_LOWER);
- pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
- pHTInfoEle->RIFS = 0;
- pHTInfoEle->PSMPAccessOnly = 0;
- pHTInfoEle->SrvIntGranularity = 0;
- pHTInfoEle->OptMode = pHT->CurrentOpMode;
- pHTInfoEle->NonGFDevPresent = 0;
- pHTInfoEle->DualBeacon = 0;
- pHTInfoEle->SecondaryBeacon = 0;
- pHTInfoEle->LSigTxopProtectFull = 0;
- pHTInfoEle->PcoActive = 0;
- pHTInfoEle->PcoPhase = 0;
-
- memset(pHTInfoEle->BasicMSC, 0, 16);
-
- *len = 22 + 2; //same above
- } else {
- //STA should not generate High Throughput Information Element
- *len = 0;
- }
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTInfo, *len - 2);
- //HTDebugHTInfo(posHTInfo, "HTConstructInforElement");
-}
-
-/*
- * According to experiment, Realtek AP to STA (based on rtl8190) may achieve
- * best performance if both STA and AP set limitation of aggregation size to
- * 32K, that is, set AMPDU density to 2 (Ref: IEEE 11n specification).
- * However, if Realtek STA associates to other AP, STA should set limitation of
- * aggregation size to 8K, otherwise, performance of traffic stream from STA to
- * AP will be much less than the traffic stream from AP to STA if both of the
- * stream runs concurrently at the same time.
- *
- * Frame Format
- * Element ID Length OUI Type1 Reserved
- * 1 byte 1 byte 3 bytes 1 byte 1 byte
- *
- * OUI = 0x00, 0xe0, 0x4c,
- * Type = 0x02
- * Reserved = 0x00
- *
- * 2007.8.21 by Emily
- */
-/*
- *function: Construct Information Element in Beacon... in RT2RT condition
- * input: struct ieee80211_device* ieee
- * u8* posRT2RTAgg //pointer to store Information Ele
- * u8* len //store len
- * output: none
- * return: none
- * notice:
- */
-void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len)
-{
- if (!posRT2RTAgg) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "posRT2RTAgg can't be null in %s\n",
- __func__);
- return;
- }
- memset(posRT2RTAgg, 0, *len);
- *posRT2RTAgg++ = 0x00;
- *posRT2RTAgg++ = 0xe0;
- *posRT2RTAgg++ = 0x4c;
- *posRT2RTAgg++ = 0x02;
- *posRT2RTAgg++ = 0x01;
- *posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02;
-
- if (ieee->bSupportRemoteWakeUp)
- *posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW;
-
- *len = 6 + 2;
- return;
-#ifdef TODO
-#if (HAL_CODE_BASE == RTL8192 && DEV_BUS_TYPE == USB_INTERFACE)
- /*
- //Emily. If it is required to Ask Realtek AP to send AMPDU during AES mode, enable this
- section of code.
- if(IS_UNDER_11N_AES_MODE(Adapter))
- {
- posRT2RTAgg->octet[5] |= RT_HT_CAP_USE_AMPDU;
- }else
- {
- posRT2RTAgg->octet[5] &= 0xfb;
- }
- */
-#else
- // Do Nothing
-#endif
-
- posRT2RTAgg->Length = 6;
-#endif
-}
-
-/*
- *function: Pick the right Rate Adaptive table to use
- * input: struct ieee80211_device* ieee
- * u8* pOperateMCS //A pointer to MCS rate bitmap
- * return: always we return true
- * notice:
- */
-static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
-{
- if (!pOperateMCS) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "pOperateMCS can't be null in %s\n",
- __func__);
- return false;
- }
-
- switch (ieee->mode) {
- case IEEE_A:
- case IEEE_B:
- case IEEE_G:
- //legacy rate routine handled at selectedrate
-
- //no MCS rate
- memset(pOperateMCS, 0, 16);
- break;
-
- case IEEE_N_24G: //assume CCK rate ok
- case IEEE_N_5G:
- // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G.
- // Legacy part shall be handled at SelectRateSet().
-
- //HT part
- // TODO: may be different if we have different number of antenna
- pOperateMCS[0] &= RATE_ADPT_1SS_MASK; //support MCS 0~7
- pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
- pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
- break;
-
- //should never reach here
- default:
- break;
- }
-
- return true;
-}
-
-/*
- * Description:
- * This function will get the highest speed rate in input MCS set.
- *
- * /param Adapter Pionter to Adapter entity
- * pMCSRateSet Pointer to MCS rate bitmap
- * pMCSFilter Pointer to MCS rate filter
- *
- * /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter.
- *
- */
-/*
- *function: This function will get the highest speed rate in input MCS set.
- * input: struct ieee80211_device* ieee
- * u8* pMCSRateSet //Pointer to MCS rate bitmap
- * u8* pMCSFilter //Pointer to MCS rate filter
- * return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
- * notice:
- */
-u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter)
-{
- u8 i, j;
- u8 bitMap;
- u8 mcsRate = 0;
- u8 availableMcsRate[16];
-
- if (!pMCSRateSet || !pMCSFilter) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "pMCSRateSet or pMCSFilter can't be null in %s\n",
- __func__);
- return false;
- }
- for (i = 0; i < 16; i++)
- availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
-
- for (i = 0; i < 16; i++) {
- if (availableMcsRate[i] != 0)
- break;
- }
- if (i == 16)
- return false;
-
- for (i = 0; i < 16; i++) {
- if (availableMcsRate[i] != 0) {
- bitMap = availableMcsRate[i];
- for (j = 0; j < 8; j++) {
- if ((bitMap % 2) != 0) {
- if (HTMcsToDataRate(ieee, (8 * i + j)) > HTMcsToDataRate(ieee, mcsRate))
- mcsRate = (8 * i + j);
- }
- bitMap >>= 1;
- }
- }
- }
- return (mcsRate | 0x80);
-}
-
-/*
- * 1.Filter our operation rate set with AP's rate set
- * 2.shall reference channel bandwidth, STBC, Antenna number
- * 3.generate rate adative table for firmware
- * David 20060906
- *
- * \pHTSupportedCap: the connected STA's supported rate Capability element
- */
-static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
- u8 *pOperateMCS)
-{
- u8 i = 0;
-
- // filter out operational rate set not supported by AP, the length of it is 16
- for (i = 0; i <= 15; i++)
- pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i] & pSupportMCS[i];
-
- // TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number
- /*
- * TODO: fill suggested rate adaptive rate index and give firmware info
- * using Tx command packet we also shall suggested the first start rate
- * set according to our signal strength
- */
- HT_PickMCSRate(ieee, pOperateMCS);
-
- // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- pOperateMCS[1] = 0;
-
- /*
- * For RTL819X, we support only MCS0~15.
- * And also, we do not know how to use MCS32 now.
- */
- for (i = 2; i <= 15; i++)
- pOperateMCS[i] = 0;
-
- return true;
-}
-
-void HTOnAssocRsp(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct ht_capability_ele *pPeerHTCap = NULL;
- PHT_INFORMATION_ELE pPeerHTInfo = NULL;
- u16 nMaxAMSDUSize = 0;
- u8 *pMcsFilter = NULL;
-
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
-
- if (!pHTInfo->bCurrentHTSupport) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,
- "<=== %s: HT_DISABLE\n",
- __func__);
- return;
- }
- IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTCapBuf, sizeof(struct ht_capability_ele));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTInfoBuf, sizeof(HT_INFORMATION_ELE));
-
-// HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
-// HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
- //
- if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
- pPeerHTCap = (struct ht_capability_ele *)(&pHTInfo->PeerHTCapBuf[4]);
- else
- pPeerHTCap = (struct ht_capability_ele *)(pHTInfo->PeerHTCapBuf);
-
- if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
- pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]);
- else
- pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf);
-
- ////////////////////////////////////////////////////////
- // Configurations:
- ////////////////////////////////////////////////////////
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, pPeerHTCap, sizeof(struct ht_capability_ele));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE));
- // Config Supported Channel Width setting
- //
- HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth), (enum ht_extension_chan_offset)(pPeerHTInfo->ExtChlOffset));
-
- pHTInfo->bCurTxBW40MHz = (pPeerHTInfo->RecommemdedTxWidth == 1);
-
- /*
- * Update short GI/ long GI setting
- *
- * TODO:
- */
- pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz &&
- (pPeerHTCap->ShortGI20Mhz == 1);
- pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz &&
- (pPeerHTCap->ShortGI40Mhz == 1);
-
- /*
- * Config TX STBC setting
- *
- * TODO:
- */
-
- /*
- * Config DSSS/CCK mode in 40MHz mode
- *
- * TODO:
- */
- pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK &&
- (pPeerHTCap->DssCCk == 1);
-
- /*
- * Config and configure A-MSDU setting
- */
- pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
-
- nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
-
- if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize)
- pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
- else
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
- /*
- * Config A-MPDU setting
- */
- pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
-
- /*
- * <1> Decide AMPDU Factor
- * By Emily
- */
- if (!pHTInfo->bRegRT2RTAggregation) {
- // Decide AMPDU Factor according to protocol handshake
- if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
- } else {
- /*
- * Set MPDU density to 2 to Realtek AP, and set it to 0 for others
- * Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
- */
- if (ieee->current_network.bssht.bdRT2RTAggregation) {
- if (ieee->pairwise_key_type != KEY_TYPE_NA)
- // Realtek may set 32k in security mode and 64k for others
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
- } else {
- pHTInfo->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
- HT_AGG_SIZE_32K);
- }
- }
-
- /*
- * <2> Set AMPDU Minimum MPDU Start Spacing
- * 802.11n 3.0 section 9.7d.3
- */
- pHTInfo->CurrentMPDUDensity = max_t(u32, pHTInfo->MPDU_Density,
- pPeerHTCap->MPDUDensity);
-
- if (ieee->pairwise_key_type != KEY_TYPE_NA)
- pHTInfo->CurrentMPDUDensity = 7; // 8us
- // Force TX AMSDU
-
- // Lanhsin: mark for tmp to avoid deauth by ap from s3
- //if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0)
- if (0) {
- pHTInfo->bCurrentAMPDUEnable = false;
- pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
- pHTInfo->ForcedAMSDUMaxSize = 7935;
-
- pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K;
- }
-
- // Rx Reorder Setting
- pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
-
- /*
- * Filter out unsupported HT rate for this AP
- * Update RATR table
- * This is only for 8190 ,8192 or later product which using firmware to
- * handle rate adaptive mechanism.
- */
-
- /*
- * Handle Ralink AP bad MCS rate set condition. Joseph.
- * This fix the bug of Ralink AP. This may be removed in the future.
- */
- if (pPeerHTCap->MCS[0] == 0)
- pPeerHTCap->MCS[0] = 0xff;
-
- HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
-
- /*
- * Config MIMO Power Save setting
- */
- pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
- if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
- pMcsFilter = MCS_FILTER_1SS;
- else
- pMcsFilter = MCS_FILTER_ALL;
- //WB add for MCS8 bug
-// pMcsFilter = MCS_FILTER_1SS;
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- /*
- * Config current operation mode.
- */
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
-}
-
-/*
- *function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
- * input: struct ieee80211_device* ieee
- * output: none
- * return: none
- * notice: This function is called when
- * * (1) MPInitialization Phase
- * * (2) Receiving of Deauthentication from AP
- */
-// TODO: Should this funciton be called when receiving of Disassociation?
-void HTInitializeHTInfo(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- /*
- * These parameters will be reset when receiving deauthentication packet
- */
- IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __func__);
- pHTInfo->bCurrentHTSupport = false;
-
- // 40MHz channel support
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->bCurTxBW40MHz = false;
-
- // Short GI support
- pHTInfo->bCurShortGI20MHz = false;
- pHTInfo->bCurShortGI40MHz = false;
- pHTInfo->bForcedShortGI = false;
-
- /*
- * CCK rate support
- * This flag is set to true to support CCK rate by default.
- * It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities
- * only when associate to 11N BSS.
- */
- pHTInfo->bCurSuppCCK = true;
-
- // AMSDU related
- pHTInfo->bCurrent_AMSDU_Support = false;
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
- // AMPUD related
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
- // Initialize all of the parameters related to 11n
- memset(&pHTInfo->SelfHTCap, 0, sizeof(pHTInfo->SelfHTCap));
- memset(&pHTInfo->SelfHTInfo, 0, sizeof(pHTInfo->SelfHTInfo));
- memset(&pHTInfo->PeerHTCapBuf, 0, sizeof(pHTInfo->PeerHTCapBuf));
- memset(&pHTInfo->PeerHTInfoBuf, 0, sizeof(pHTInfo->PeerHTInfoBuf));
-
- pHTInfo->bSwBwInProgress = false;
-
- // Set default IEEE spec for Draft N
- pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
-
- // Realtek proprietary aggregation mode
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
- pHTInfo->IOTPeer = 0;
- pHTInfo->IOTAction = 0;
-
- //MCS rate initialized here
- {
- u8 *RegHTSuppRateSets = &ieee->RegHTSuppRateSet[0];
-
- RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7
- RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15
- RegHTSuppRateSets[4] = 0x01; //support MCS 32
- }
-}
-
-/*
- *function: initialize Bss HT structure(struct PBSS_HT)
- * input: PBSS_HT pBssHT //to be initialized
- * output: none
- * return: none
- * notice: This function is called when initialize network structure
- */
-void HTInitializeBssDesc(PBSS_HT pBssHT)
-{
- pBssHT->bdSupportHT = false;
- memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
- pBssHT->bdHTCapLen = 0;
- memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
- pBssHT->bdHTInfoLen = 0;
-
- pBssHT->bdHTSpecVer = HT_SPEC_VER_IEEE;
-
- pBssHT->bdRT2RTAggregation = false;
- pBssHT->bdRT2RTLongSlotTime = false;
-}
-
-/*
- *function: initialize Bss HT structure(struct PBSS_HT)
- * input: struct ieee80211_device *ieee
- * struct ieee80211_network *pNetwork //usually current network
- * we are live in
- * output: none
- * return: none
- * notice: This function should ONLY be called before association
- */
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u16 nMaxAMSDUSize;
-// struct ht_capability_ele *pPeerHTCap = (struct ht_capability_ele *)pNetwork->bssht.bdHTCapBuf;
-// PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
-// u8* pMcsFilter;
- u8 bIOTAction = 0;
-
- //
- // Save Peer Setting before Association
- //
- IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __func__);
- /*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/
-// if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT)
- if (pNetwork->bssht.bdSupportHT) {
- pHTInfo->bCurrentHTSupport = true;
- pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
-
- // Save HTCap and HTInfo information Element
- if (pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
- memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen);
-
- if (pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
- memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen);
-
- // Check whether RT to RT aggregation mode is enabled
- if (pHTInfo->bRegRT2RTAggregation) {
- pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation;
- pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime;
- } else {
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
- }
-
- // Determine the IOT Peer Vendor.
- HTIOTPeerDetermine(ieee);
-
- /*
- * Decide IOT Action
- * Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
- */
- pHTInfo->IOTAction = 0;
- bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
-
- bIOTAction = HTIOTActIsDisableMCS15(ieee);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
-
- bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee, pNetwork->bssid);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
-
- bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
-
- bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
-
- bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid);
- if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
- } else {
- pHTInfo->bCurrentHTSupport = false;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
-
- pHTInfo->IOTAction = 0;
- }
-}
-
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// struct ht_capability_ele *pPeerHTCap = (struct ht_capability_ele *)pNetwork->bssht.bdHTCapBuf;
- PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
-
- if (pHTInfo->bCurrentHTSupport) {
- /*
- * Config current operation mode.
- */
- if (pNetwork->bssht.bdHTInfoLen != 0)
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
-
- /*
- * <TODO: Config according to OBSS non-HT STA present!!>
- */
- }
-}
-EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting);
-
-/*
- *function: check whether HT control field exists
- * input: struct ieee80211_device *ieee
- * u8* pFrame //coming skb->data
- * output: none
- * return: return true if HT control field exists(false otherwise)
- * notice:
- */
-u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
-{
- if (ieee->pHTInfo->bCurrentHTSupport) {
- if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
- return true;
- }
- }
- return false;
-}
-
-static void HTSetConnectBwModeCallback(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __func__);
-
- if (pHTInfo->bCurBW40MHz) {
- if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
- ieee->set_chan(ieee->dev, ieee->current_network.channel + 2);
- else if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_LOWER)
- ieee->set_chan(ieee->dev, ieee->current_network.channel - 2);
- else
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
-
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset);
- } else {
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- }
-
- pHTInfo->bSwBwInProgress = false;
-}
-
-/*
- * This function set bandwidth mode in protocol layer.
- */
-void HTSetConnectBwMode(struct ieee80211_device *ieee, enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u32 flags = 0;
-
- if (!pHTInfo->bRegBW40MHz)
- return;
-
- // To reduce dummy operation
-// if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) ||
-// (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset))
-// return;
-
-// spin_lock_irqsave(&(ieee->bw_spinlock), flags);
- if (pHTInfo->bSwBwInProgress) {
-// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
- return;
- }
- //if in half N mode, set to 20M bandwidth please 09.08.2008 WB.
- if (Bandwidth == HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))) {
- // Handle Illegal extension channel offset!!
- if (ieee->current_network.channel < 2 && Offset == HT_EXTCHNL_OFFSET_LOWER)
- Offset = HT_EXTCHNL_OFFSET_NO_EXT;
- if (Offset == HT_EXTCHNL_OFFSET_UPPER || Offset == HT_EXTCHNL_OFFSET_LOWER) {
- pHTInfo->bCurBW40MHz = true;
- pHTInfo->CurSTAExtChnlOffset = Offset;
- } else {
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
- }
- } else {
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
- }
-
- pHTInfo->bSwBwInProgress = true;
-
- /*
- * TODO: 2007.7.13 by Emily Wait 2000ms in order to guarantee that
- * switching bandwidth is executed after scan is finished. It is a
- * temporal solution because software should ganrantee the last
- * operation of switching bandwidth is executed properlly.
- */
- HTSetConnectBwModeCallback(ieee);
-
-// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
deleted file mode 100644
index 3052f53d2e7e..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INC_QOS_TYPE_H
-#define __INC_QOS_TYPE_H
-
-/*
- * ACI/AIFSN Field.
- * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
- * Note: 1 Byte Length
- */
-struct aci_aifsn {
- u8 aifsn:4;
- u8 acm:1;
- u8 aci:2;
- u8:1;
-};
-
-/*
- * Direction Field Values.
- * Ref: WMM spec 2.2.11: WME TSPEC Element, p.18.
- */
-enum direction_value {
- DIR_UP = 0, // 0x00 // UpLink
- DIR_DOWN = 1, // 0x01 // DownLink
- DIR_DIRECT = 2, // 0x10 // DirectLink
- DIR_BI_DIR = 3, // 0x11 // Bi-Direction
-};
-
-/*
- * TS Info field in WMM TSPEC Element.
- * Ref:
- * 1. WMM spec 2.2.11: WME TSPEC Element, p.18.
- * 2. 8185 QoS code: QOS_TSINFO [def. in QoS_mp.h]
- * Note: sizeof 3 Bytes
- */
-struct qos_tsinfo {
- u16 uc_traffic_type:1; //WMM is reserved
- u16 uc_tsid:4;
- u16 uc_direction:2;
- u16 uc_access_policy:2; //WMM: bit8=0, bit7=1
- u16 uc_aggregation:1; //WMM is reserved
- u16 uc_psb:1; //WMMSA is APSD
- u16 uc_up:3;
- u16 uc_ts_info_ack_policy:2; //WMM is reserved
- u8 uc_schedule:1; //WMM is reserved
- u8:7;
-};
-
-/*
- * WMM TSPEC Body.
- * Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
- * Note: sizeof 55 bytes
- */
-struct tspec_body {
- struct qos_tsinfo ts_info; //u8 TSInfo[3];
- u16 nominal_msd_usize;
- u16 max_msd_usize;
- u32 min_service_itv;
- u32 max_service_itv;
- u32 inactivity_itv;
- u32 suspen_itv;
- u32 service_start_time;
- u32 min_data_rate;
- u32 mean_data_rate;
- u32 peak_data_rate;
- u32 max_burst_size;
- u32 delay_bound;
- u32 min_phy_rate;
- u16 surplus_bandwidth_allowance;
- u16 medium_time;
-};
-
-/*
- * 802.11 Management frame Status Code field
- */
-struct octet_string {
- u8 *octet;
- u16 length;
-};
-
-#define is_ac_valid(ac) (((ac) <= 7) ? true : false)
-
-#endif // #ifndef __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
deleted file mode 100644
index 7ed140009760..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _TSTYPE_H_
-#define _TSTYPE_H_
-#include "rtl819x_Qos.h"
-
-#define TS_ADDBA_DELAY 60
-
-#define TOTAL_TS_NUM 16
-#define TCLAS_NUM 4
-
-/* This define the Tx/Rx directions */
-enum tr_select {
- TX_DIR = 0,
- RX_DIR = 1,
-};
-
-union qos_tclas {
- struct type_general {
- u8 priority;
- u8 classifier_type;
- u8 mask;
- } type_general;
-
- struct type0_eth {
- u8 priority;
- u8 classifier_type;
- u8 mask;
- u8 src_addr[6];
- u8 dst_addr[6];
- u16 type;
- } type0_eth;
-
- struct type1_ipv4 {
- u8 priority;
- u8 classifier_type;
- u8 mask;
- u8 version;
- u8 src_ip[4];
- u8 dst_ip[4];
- u16 src_port;
- u16 dst_port;
- u8 dscp;
- u8 protocol;
- u8 reserved;
- } type1_ipv4;
-
- struct type1_ipv6 {
- u8 priority;
- u8 classifier_type;
- u8 mask;
- u8 version;
- u8 src_ip[16];
- u8 dst_ip[16];
- u16 src_port;
- u16 dst_port;
- u8 flow_label[3];
- } type1_ipv6;
-
- struct type2_8021q {
- u8 priority;
- u8 classifier_type;
- u8 mask;
- u16 tag_type;
- } type2_8021q;
-};
-
-struct ts_common_info {
- struct list_head list;
- struct timer_list setup_timer;
- struct timer_list inact_timer;
- u8 addr[6];
- struct tspec_body t_spec;
- union qos_tclas t_class[TCLAS_NUM];
- u8 t_clas_proc;
- u8 t_clas_num;
-};
-
-struct tx_ts_record {
- struct ts_common_info ts_common_info;
- u16 tx_cur_seq;
- struct ba_record tx_pending_ba_record;
- struct ba_record tx_admitted_ba_record;
- u8 add_ba_req_in_progress;
- u8 add_ba_req_delayed;
- u8 using_ba;
- struct timer_list ts_add_ba_timer;
- u8 num;
-};
-
-struct rx_ts_record {
- struct ts_common_info ts_common_info;
- u16 rx_indicate_seq;
- u16 rx_timeout_indicate_seq;
- struct list_head rx_pending_pkt_list;
- struct timer_list rx_pkt_pending_timer;
- struct ba_record rx_admitted_ba_record;
- u16 rx_last_seq_num;
- u8 rx_last_frag_num;
- u8 num;
-};
-
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
deleted file mode 100644
index 3aabb401b15a..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ /dev/null
@@ -1,534 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "ieee80211.h"
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include "rtl819x_TS.h"
-
-static void TsSetupTimeOut(struct timer_list *unused)
-{
- // Not implement yet
- // This is used for WMMSA and ACM , that would send ADDTSReq frame.
-}
-
-static void TsInactTimeout(struct timer_list *unused)
-{
- // Not implement yet
- // This is used for WMMSA and ACM.
- // This function would be call when TS is no Tx/Rx for some period of time.
-}
-
-/********************************************************************************************************************
- *function: I still not understand this function, so wait for further implementation
- * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
- * return: NULL
- * notice:
- ********************************************************************************************************************/
-static void RxPktPendingTimeout(struct timer_list *t)
-{
- struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_pkt_pending_timer);
- struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
-
- struct rx_reorder_entry *pReorderEntry = NULL;
-
- //u32 flags = 0;
- unsigned long flags = 0;
- u8 index = 0;
- bool bPktInBuf = false;
-
- spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "==================>%s()\n", __func__);
- if (pRxTs->rx_timeout_indicate_seq != 0xffff) {
- // Indicate the pending packets sequentially according to SeqNum until meet the gap.
- while (!list_empty(&pRxTs->rx_pending_pkt_list)) {
- pReorderEntry = list_entry(pRxTs->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
- if (index == 0)
- pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;
-
- if (SN_LESS(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq)) {
- list_del_init(&pReorderEntry->List);
-
- if (SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq))
- pRxTs->rx_indicate_seq = (pRxTs->rx_indicate_seq + 1) % 4096;
-
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s: IndicateSeq: %d\n", __func__, pReorderEntry->SeqNum);
- ieee->stats_IndicateArray[index] = pReorderEntry->prxb;
- index++;
-
- list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
- } else {
- bPktInBuf = true;
- break;
- }
- }
- }
-
- if (index > 0) {
- // Set rx_timeout_indicate_seq to 0xffff to indicate no pending packets in buffer now.
- pRxTs->rx_timeout_indicate_seq = 0xffff;
-
- // Indicate packets
- if (index > REORDER_WIN_SIZE) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- return;
- }
- ieee80211_indicate_packets(ieee, ieee->stats_IndicateArray, index);
- }
-
- if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
- pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
- mod_timer(&pRxTs->rx_pkt_pending_timer,
- jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
- }
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
-}
-
-/********************************************************************************************************************
- *function: Add BA timer function
- * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
- * return: NULL
- * notice:
- ********************************************************************************************************************/
-static void TsAddBaProcess(struct timer_list *t)
-{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t, ts_add_ba_timer);
- u8 num = pTxTs->num;
- struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]);
-
- TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
- IEEE80211_DEBUG(IEEE80211_DL_BA, "%s: ADDBA Req is started!! \n", __func__);
-}
-
-
-static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
-{
- eth_zero_addr(pTsCommonInfo->addr);
- memset(&pTsCommonInfo->t_spec, 0, sizeof(struct tspec_body));
- memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas) * TCLAS_NUM);
- pTsCommonInfo->t_clas_proc = 0;
- pTsCommonInfo->t_clas_num = 0;
-}
-
-static void ResetTxTsEntry(struct tx_ts_record *pTS)
-{
- ResetTsCommonInfo(&pTS->ts_common_info);
- pTS->tx_cur_seq = 0;
- pTS->add_ba_req_in_progress = false;
- pTS->add_ba_req_delayed = false;
- pTS->using_ba = false;
- ResetBaEntry(&pTS->tx_admitted_ba_record); //For BA Originator
- ResetBaEntry(&pTS->tx_pending_ba_record);
-}
-
-static void ResetRxTsEntry(struct rx_ts_record *pTS)
-{
- ResetTsCommonInfo(&pTS->ts_common_info);
- pTS->rx_indicate_seq = 0xffff; // This indicate the rx_indicate_seq is not used now!!
- pTS->rx_timeout_indicate_seq = 0xffff; // This indicate the rx_timeout_indicate_seq is not used now!!
- ResetBaEntry(&pTS->rx_admitted_ba_record); // For BA Recipient
-}
-
-void TSInitialize(struct ieee80211_device *ieee)
-{
- struct tx_ts_record *pTxTS = ieee->TxTsRecord;
- struct rx_ts_record *pRxTS = ieee->RxTsRecord;
- struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
- u8 count = 0;
- IEEE80211_DEBUG(IEEE80211_DL_TS, "==========>%s()\n", __func__);
- // Initialize Tx TS related info.
- INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
- INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
- INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
-
- for (count = 0; count < TOTAL_TS_NUM; count++) {
- //
- pTxTS->num = count;
- // The timers for the operation of Traffic Stream and Block Ack.
- // DLS related timer will be add here in the future!!
- timer_setup(&pTxTS->ts_common_info.setup_timer, TsSetupTimeOut,
- 0);
- timer_setup(&pTxTS->ts_common_info.inact_timer, TsInactTimeout,
- 0);
- timer_setup(&pTxTS->ts_add_ba_timer, TsAddBaProcess, 0);
- timer_setup(&pTxTS->tx_pending_ba_record.timer, BaSetupTimeOut,
- 0);
- timer_setup(&pTxTS->tx_admitted_ba_record.timer,
- TxBaInactTimeout, 0);
- ResetTxTsEntry(pTxTS);
- list_add_tail(&pTxTS->ts_common_info.list, &ieee->Tx_TS_Unused_List);
- pTxTS++;
- }
-
- // Initialize Rx TS related info.
- INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
- INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
- INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
- for (count = 0; count < TOTAL_TS_NUM; count++) {
- pRxTS->num = count;
- INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
- timer_setup(&pRxTS->ts_common_info.setup_timer, TsSetupTimeOut,
- 0);
- timer_setup(&pRxTS->ts_common_info.inact_timer, TsInactTimeout,
- 0);
- timer_setup(&pRxTS->rx_admitted_ba_record.timer,
- RxBaInactTimeout, 0);
- timer_setup(&pRxTS->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
- ResetRxTsEntry(pRxTS);
- list_add_tail(&pRxTS->ts_common_info.list, &ieee->Rx_TS_Unused_List);
- pRxTS++;
- }
- // Initialize unused Rx Reorder List.
- INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
- for (count = 0; count < REORDER_ENTRY_NUM; count++) {
- list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
- if (count == (REORDER_ENTRY_NUM - 1))
- break;
- pRxReorderEntry = &ieee->RxReorderEntry[count + 1];
- }
-}
-
-static void AdmitTS(struct ieee80211_device *ieee,
- struct ts_common_info *pTsCommonInfo, u32 InactTime)
-{
- del_timer_sync(&pTsCommonInfo->setup_timer);
- del_timer_sync(&pTsCommonInfo->inact_timer);
-
- if (InactTime != 0)
- mod_timer(&pTsCommonInfo->inact_timer,
- jiffies + msecs_to_jiffies(InactTime));
-}
-
-
-static struct ts_common_info *SearchAdmitTRStream(struct ieee80211_device *ieee,
- u8 *Addr, u8 TID,
- enum tr_select TxRxSelect)
-{
- //DIRECTION_VALUE dir;
- u8 dir;
- bool search_dir[4] = {0};
- struct list_head *psearch_list; //FIXME
- struct ts_common_info *pRet = NULL;
- if (ieee->iw_mode == IW_MODE_MASTER) { //ap mode
- if (TxRxSelect == TX_DIR) {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR] = true;
- } else {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR] = true;
- }
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
- if (TxRxSelect == TX_DIR)
- search_dir[DIR_UP] = true;
- else
- search_dir[DIR_DOWN] = true;
- } else {
- if (TxRxSelect == TX_DIR) {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR] = true;
- search_dir[DIR_DIRECT] = true;
- } else {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR] = true;
- search_dir[DIR_DIRECT] = true;
- }
- }
-
- if (TxRxSelect == TX_DIR)
- psearch_list = &ieee->Tx_TS_Admit_List;
- else
- psearch_list = &ieee->Rx_TS_Admit_List;
-
- //for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++)
- for (dir = 0; dir <= DIR_BI_DIR; dir++) {
- if (!search_dir[dir])
- continue;
- list_for_each_entry(pRet, psearch_list, list) {
- // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.ts_info.ucTSID, pRet->TSpec.ts_info.ucDirection);
- if (memcmp(pRet->addr, Addr, 6) == 0)
- if (pRet->t_spec.ts_info.uc_tsid == TID)
- if (pRet->t_spec.ts_info.uc_direction == dir) {
- // printk("Bingo! got it\n");
- break;
- }
- }
- if (&pRet->list != psearch_list)
- break;
- }
-
- if (&pRet->list != psearch_list)
- return pRet;
- else
- return NULL;
-}
-
-static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
- struct tspec_body *pTSPEC, union qos_tclas *pTCLAS, u8 TCLAS_Num,
- u8 TCLAS_Proc)
-{
- u8 count;
-
- if (pTsCommonInfo == NULL)
- return;
-
- memcpy(pTsCommonInfo->addr, Addr, 6);
-
- if (pTSPEC != NULL)
- memcpy((u8 *)(&(pTsCommonInfo->t_spec)), (u8 *)pTSPEC, sizeof(struct tspec_body));
-
- for (count = 0; count < TCLAS_Num; count++)
- memcpy((u8 *)(&(pTsCommonInfo->t_class[count])), (u8 *)pTCLAS, sizeof(union qos_tclas));
-
- pTsCommonInfo->t_clas_proc = TCLAS_Proc;
- pTsCommonInfo->t_clas_num = TCLAS_Num;
-}
-
-
-bool GetTs(
- struct ieee80211_device *ieee,
- struct ts_common_info **ppTS,
- u8 *Addr,
- u8 TID,
- enum tr_select TxRxSelect, //Rx:1, Tx:0
- bool bAddNewTs
- )
-{
- u8 UP = 0;
- //
- // We do not build any TS for Broadcast or Multicast stream.
- // So reject these kinds of search here.
- //
- if (is_multicast_ether_addr(Addr)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
- return false;
- }
-
- if (ieee->current_network.qos_data.supported == 0) {
- UP = 0;
- } else {
- // In WMM case: we use 4 TID only
- if (!is_ac_valid(TID)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __func__, TID);
- return false;
- }
-
- switch (TID) {
- case 0:
- case 3:
- UP = 0;
- break;
-
- case 1:
- case 2:
- UP = 2;
- break;
-
- case 4:
- case 5:
- UP = 5;
- break;
-
- case 6:
- case 7:
- UP = 7;
- break;
- }
- }
-
- *ppTS = SearchAdmitTRStream(
- ieee,
- Addr,
- UP,
- TxRxSelect);
- if (*ppTS != NULL) {
- return true;
- } else {
- if (!bAddNewTs) {
- IEEE80211_DEBUG(IEEE80211_DL_TS, "add new TS failed(tid:%d)\n", UP);
- return false;
- } else {
- //
- // Create a new Traffic stream for current Tx/Rx
- // This is for EDCA and WMM to add a new TS.
- // For HCCA or WMMSA, TS cannot be addmit without negotiation.
- //
- struct tspec_body TSpec;
- struct qos_tsinfo *pTSInfo = &TSpec.ts_info;
- struct list_head *pUnusedList =
- (TxRxSelect == TX_DIR) ?
- (&ieee->Tx_TS_Unused_List) :
- (&ieee->Rx_TS_Unused_List);
-
- struct list_head *pAddmitList =
- (TxRxSelect == TX_DIR) ?
- (&ieee->Tx_TS_Admit_List) :
- (&ieee->Rx_TS_Admit_List);
-
- enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER) ?
- ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
- ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
- IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
- if (!list_empty(pUnusedList)) {
- (*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, list);
- list_del_init(&(*ppTS)->list);
- if (TxRxSelect == TX_DIR) {
- struct tx_ts_record *tmp = container_of(*ppTS, struct tx_ts_record, ts_common_info);
- ResetTxTsEntry(tmp);
- } else {
- struct rx_ts_record *tmp = container_of(*ppTS, struct rx_ts_record, ts_common_info);
- ResetRxTsEntry(tmp);
- }
-
- IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr);
- // Prepare TS Info related field
- pTSInfo->uc_traffic_type = 0; // Traffic type: WMM is reserved in this field
- pTSInfo->uc_tsid = UP; // TSID
- pTSInfo->uc_direction = Dir; // Direction: if there is DirectLink, this need additional consideration.
- pTSInfo->uc_access_policy = 1; // Access policy
- pTSInfo->uc_aggregation = 0; // Aggregation
- pTSInfo->uc_psb = 0; // Aggregation
- pTSInfo->uc_up = UP; // User priority
- pTSInfo->uc_ts_info_ack_policy = 0; // Ack policy
- pTSInfo->uc_schedule = 0; // Schedule
-
- MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
- AdmitTS(ieee, *ppTS, 0);
- list_add_tail(&((*ppTS)->list), pAddmitList);
- // if there is DirectLink, we need to do additional operation here!!
-
- return true;
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "in function %s() There is not enough TS record to be used!!", __func__);
- return false;
- }
- }
- }
-}
-
-static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *pTs,
- enum tr_select TxRxSelect)
-{
- //u32 flags = 0;
- unsigned long flags = 0;
- del_timer_sync(&pTs->setup_timer);
- del_timer_sync(&pTs->inact_timer);
- TsInitDelBA(ieee, pTs, TxRxSelect);
-
- if (TxRxSelect == RX_DIR) {
- struct rx_reorder_entry *pRxReorderEntry;
- struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
- if (timer_pending(&pRxTS->rx_pkt_pending_timer))
- del_timer_sync(&pRxTS->rx_pkt_pending_timer);
-
- while (!list_empty(&pRxTS->rx_pending_pkt_list)) {
- spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //pRxReorderEntry = list_entry(&pRxTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
- pRxReorderEntry = list_entry(pRxTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
- list_del_init(&pRxReorderEntry->List);
- {
- int i = 0;
- struct ieee80211_rxb *prxb = pRxReorderEntry->prxb;
- if (unlikely(!prxb)) {
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- return;
- }
- for (i = 0; i < prxb->nr_subframes; i++)
- dev_kfree_skb(prxb->subframes[i]);
-
- kfree(prxb);
- prxb = NULL;
- }
- list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- }
-
- } else {
- struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
- del_timer_sync(&pTxTS->ts_add_ba_timer);
- }
-}
-
-void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr)
-{
- struct ts_common_info *pTS, *pTmpTS;
-
- printk("===========>%s,%pM\n", __func__, Addr);
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
- if (memcmp(pTS->addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
- if (memcmp(pTS->addr, Addr, 6) == 0) {
- printk("====>remove Tx_TS_admin_list\n");
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
- if (memcmp(pTS->addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
- if (memcmp(pTS->addr, Addr, 6) == 0) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
- }
- }
-}
-
-void RemoveAllTS(struct ieee80211_device *ieee)
-{
- struct ts_common_info *pTS, *pTmpTS;
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->list);
- list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
- }
-}
-
-void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTxTS)
-{
- if (!pTxTS->add_ba_req_in_progress) {
- pTxTS->add_ba_req_in_progress = true;
- if (pTxTS->add_ba_req_delayed) {
- IEEE80211_DEBUG(IEEE80211_DL_BA, "%s: Delayed Start ADDBA after 60 sec!!\n", __func__);
- mod_timer(&pTxTS->ts_add_ba_timer,
- jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_BA, "%s: Immediately Start ADDBA now!!\n", __func__);
- mod_timer(&pTxTS->ts_add_ba_timer, jiffies + 10); //set 10 ticks
- }
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __func__);
- }
-}
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
deleted file mode 100644
index 2527cea60e3e..000000000000
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ /dev/null
@@ -1,170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This files contains card eeprom (93c46 or 93c56) programming routines,
- * memory is addressed by 16 bits words.
- *
- * This is part of rtl8180 OpenSource driver.
- * Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- *
- * Parts of this driver are based on the GPL part of the
- * official realtek driver.
- *
- * Parts of this driver are based on the rtl8180 driver skeleton
- * from Patric Schenke & Andres Salomon.
- *
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
- *
- * We want to thank the Authors of those projects and the Ndiswrapper
- * project Authors.
- */
-
-#include "r8180_93cx6.h"
-
-static void eprom_cs(struct net_device *dev, short bit)
-{
- u8 cmdreg;
- int err;
-
- err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
- if (err)
- return;
- if (bit)
- /* enable EPROM */
- write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CS_BIT);
- else
- /* disable EPROM */
- write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_CS_BIT);
-
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-static void eprom_ck_cycle(struct net_device *dev)
-{
- u8 cmdreg;
- int err;
-
- err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
- if (err)
- return;
- write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CK_BIT);
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
- write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_CK_BIT);
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-static void eprom_w(struct net_device *dev, short bit)
-{
- u8 cmdreg;
- int err;
-
- err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
- if (err)
- return;
- if (bit)
- write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_W_BIT);
- else
- write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_W_BIT);
-
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-static short eprom_r(struct net_device *dev)
-{
- u8 bit;
- int err;
-
- err = read_nic_byte_E(dev, EPROM_CMD, &bit);
- if (err)
- return err;
-
- udelay(EPROM_DELAY);
-
- if (bit & EPROM_R_BIT)
- return 1;
-
- return 0;
-}
-
-static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- eprom_w(dev, b[i]);
- eprom_ck_cycle(dev);
- }
-}
-
-int eprom_read(struct net_device *dev, u32 addr)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- short read_cmd[] = {1, 1, 0};
- short addr_str[8];
- int i;
- int addr_len;
- u32 ret;
- int err;
-
- ret = 0;
- /* enable EPROM programming */
- write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-
- if (priv->epromtype == EPROM_93c56) {
- addr_str[7] = addr & 1;
- addr_str[6] = addr & BIT(1);
- addr_str[5] = addr & BIT(2);
- addr_str[4] = addr & BIT(3);
- addr_str[3] = addr & BIT(4);
- addr_str[2] = addr & BIT(5);
- addr_str[1] = addr & BIT(6);
- addr_str[0] = addr & BIT(7);
- addr_len = 8;
- } else {
- addr_str[5] = addr & 1;
- addr_str[4] = addr & BIT(1);
- addr_str[3] = addr & BIT(2);
- addr_str[2] = addr & BIT(3);
- addr_str[1] = addr & BIT(4);
- addr_str[0] = addr & BIT(5);
- addr_len = 6;
- }
- eprom_cs(dev, 1);
- eprom_ck_cycle(dev);
- eprom_send_bits_string(dev, read_cmd, 3);
- eprom_send_bits_string(dev, addr_str, addr_len);
-
- /*
- * keep chip pin D to low state while reading.
- * I'm unsure if it is necessary, but anyway shouldn't hurt
- */
- eprom_w(dev, 0);
-
- for (i = 0; i < 16; i++) {
- /* eeprom needs a clk cycle between writing opcode&adr
- * and reading data. (eeprom outs a dummy 0)
- */
- eprom_ck_cycle(dev);
- err = eprom_r(dev);
- if (err < 0)
- return err;
-
- ret |= err << (15 - i);
- }
-
- eprom_cs(dev, 0);
- eprom_ck_cycle(dev);
-
- /* disable EPROM programming */
- write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_NORMAL << EPROM_CMD_OPERATING_MODE_SHIFT));
- return ret;
-}
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h
deleted file mode 100644
index 0cdd00a4f7b8..000000000000
--- a/drivers/staging/rtl8192u/r8180_93cx6.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is part of rtl8187 OpenSource driver
- * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
- *
- * Parts of this driver are based on the GPL part of the
- * official realtek driver
- * Parts of this driver are based on the rtl8180 driver skeleton
- * from Patric Schenke & Andres Salomon
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
- *
- * We want to thank the Authors of such projects and the Ndiswrapper
- * project Authors.
- */
-
-/*This files contains card eeprom (93c46 or 93c56) programming routines*/
-/*memory is addressed by WORDS*/
-
-#include "r8192U.h"
-#include "r8192U_hw.h"
-
-#define EPROM_DELAY 10
-
-int eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
deleted file mode 100644
index 54747fda552f..000000000000
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is part of the rtl8192 driver
- *
- * This files contains programming code for the rtl8256
- * radio frontend.
- *
- * *Many* thanks to Realtek Corp. for their great support!
- */
-
-#include "r8192U.h"
-#include "r8192U_hw.h"
-#include "r819xU_phyreg.h"
-#include "r819xU_phy.h"
-#include "r8190_rtl8256.h"
-
-/*
- * Forward declaration of local functions
- */
-static void phy_rf8256_config_para_file(struct net_device *dev);
-
-/*--------------------------------------------------------------------------
- * Overview: set RF band width (20M or 40M)
- * Input: struct net_device* dev
- * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M
- * Output: NONE
- * Return: NONE
- * Note: 8226 support both 20M and 40 MHz
- *--------------------------------------------------------------------------
- */
-void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Bandwidth)
-{
- u8 eRFPath;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath;
- * eRFPath++)
- */
- for (eRFPath = 0; eRFPath < RF90_PATH_MAX; eRFPath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
- continue;
-
- switch (Bandwidth) {
- case HT_CHANNEL_WIDTH_20:
- if (priv->card_8192_version == VERSION_819XU_A ||
- priv->card_8192_version == VERSION_819XU_B) {
- /* 8256 D-cut, E-cut, xiong: consider it later! */
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path_e)eRFPath,
- 0x0b, bMask12Bits, 0x100); /* phy para:1ba */
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path_e)eRFPath,
- 0x2c, bMask12Bits, 0x3d7);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path_e)eRFPath,
- 0x0e, bMask12Bits, 0x021);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path_e)eRFPath,
- 0x14, bMask12Bits, 0x5ab);
- } else {
- RT_TRACE(COMP_ERR, "%s(): unknown hardware version\n", __func__);
- }
- break;
- case HT_CHANNEL_WIDTH_20_40:
- if (priv->card_8192_version == VERSION_819XU_A || priv->card_8192_version == VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0b, bMask12Bits, 0x300); /* phy para:3ba */
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x2c, bMask12Bits, 0x3df);
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0e, bMask12Bits, 0x0a1);
-
- if (priv->chan == 3 || priv->chan == 9)
- /* I need to set priv->chan whenever current channel changes */
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x14, bMask12Bits, 0x59b);
- else
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x14, bMask12Bits, 0x5ab);
- } else {
- RT_TRACE(COMP_ERR, "%s(): unknown hardware version\n", __func__);
- }
- break;
- default:
- RT_TRACE(COMP_ERR, "%s(): unknown Bandwidth: %#X\n", __func__, Bandwidth);
- break;
- }
- }
-}
-
-/*--------------------------------------------------------------------------
- * Overview: Interface to config 8256
- * Input: struct net_device* dev
- * Output: NONE
- * Return: NONE
- *--------------------------------------------------------------------------
- */
-void phy_rf8256_config(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- /* Initialize general global value
- *
- * TODO: Extend RF_PATH_C and RF_PATH_D in the future
- */
- priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
- /* Config BB and RF */
- phy_rf8256_config_para_file(dev);
-}
-
-/*--------------------------------------------------------------------------
- * Overview: Interface to config 8256
- * Input: struct net_device* dev
- * Output: NONE
- * Return: NONE
- *--------------------------------------------------------------------------
- */
-static void phy_rf8256_config_para_file(struct net_device *dev)
-{
- u32 u4RegValue = 0;
- u8 eRFPath;
- BB_REGISTER_DEFINITION_T *pPhyReg;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 RegOffSetToBeCheck = 0x3;
- u32 RegValueToBeCheck = 0x7f1;
- u32 RF3_Final_Value = 0;
- u8 ConstRetryTimes = 5, RetryTimes = 5;
- u8 ret = 0;
- /* Initialize RF */
- for (eRFPath = (enum rf90_radio_path_e)RF90_PATH_A; eRFPath < priv->NumTotalRFPath; eRFPath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
- continue;
-
- pPhyReg = &priv->PHYRegDef[eRFPath];
-
- /* Joseph test for shorten RF config
- * pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, rGlobalCtrl, bMaskDWord);
- * ----Store original RFENV control type
- */
- switch (eRFPath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16);
- break;
- }
-
- /*----Set RF_ENV enable----*/
- rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
-
- /*----Set RF_ENV output high----*/
- rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
-
- /* Set bit number of Address and Data for RF register */
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 */
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? */
-
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0, bMask12Bits, 0xbf);
-
- /* Check RF block (for FPGA platform only)----
- * TODO: this function should be removed on ASIC , Emily 2007.2.2
- */
- if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (enum rf90_radio_path_e)eRFPath)) {
- RT_TRACE(COMP_ERR, "phy_rf8256_config():Check Radio[%d] Fail!!\n", eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
- }
-
- RetryTimes = ConstRetryTimes;
- RF3_Final_Value = 0;
- /*----Initialize RF fom connfiguration file----*/
- switch (eRFPath) {
- case RF90_PATH_A:
- while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_B:
- while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_C:
- while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_D:
- while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- }
-
- /*----Restore RFENV control type----*/
- switch (eRFPath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
- break;
- }
-
- if (ret) {
- RT_TRACE(COMP_ERR, "%s():Radio[%d] Fail!!", __func__, eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
- }
- }
-
- RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
- return;
-
-phy_RF8256_Config_ParaFile_Fail:
- RT_TRACE(COMP_ERR, "PHY Initialization failed\n");
-}
-
-void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel)
-{
- u32 TxAGC = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- TxAGC = powerlevel;
-
- if (priv->bDynamicTxLowPower) {
- if (priv->CustomerID == RT_CID_819x_Netcore)
- TxAGC = 0x22;
- else
- TxAGC += priv->CckPwEnl;
- }
-
- if (TxAGC > 0x24)
- TxAGC = 0x24;
- rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
-}
-
-void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- /* Joseph TxPower for 8192 testing */
- u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
- u8 index = 0;
- u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
- u8 byte0, byte1, byte2, byte3;
-
- powerBase0 = powerlevel + priv->TxPowerDiff; /* OFDM rates */
- powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0;
- powerBase1 = powerlevel; /* MCS rates */
- powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
-
- for (index = 0; index < 6; index++) {
- writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index < 2) ? powerBase0 : powerBase1);
- byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00) >> 8);
- byte2 = (u8)((writeVal & 0x7f0000) >> 16);
- byte3 = (u8)((writeVal & 0x7f000000) >> 24);
-
- if (byte0 > 0x24)
- /* Max power index = 0x24 */
- byte0 = 0x24;
- if (byte1 > 0x24)
- byte1 = 0x24;
- if (byte2 > 0x24)
- byte2 = 0x24;
- if (byte3 > 0x24)
- byte3 = 0x24;
-
- /* for tx power track */
- if (index == 3) {
- writeVal_tmp = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
- priv->Pwr_Track = writeVal_tmp;
- }
-
- if (priv->bDynamicTxHighPower) {
- /*Add by Jacken 2008/03/06
- *Emily, 20080613. Set low tx power for both MCS and legacy OFDM
- */
- writeVal = 0x03030303;
- } else {
- writeVal = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
- }
- rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
- }
-}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
deleted file mode 100644
index 9ea67f86f911..000000000000
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is part of the rtl8180-sa2400 driver
- * released under the GPL (See file COPYING for details).
- * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
- *
- *
- * This files contains programming code for the rtl8256
- * radio frontend.
- *
- * *Many* thanks to Realtek Corp. for their great support!
- */
-
-#ifndef RTL8225H
-#define RTL8225H
-
-#define RTL819X_TOTAL_RF_PATH 2 /* for 8192U */
-void phy_set_rf8256_bandwidth(struct net_device *dev,
- enum ht_channel_width bandwidth);
-void phy_rf8256_config(struct net_device *dev);
-void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel);
-void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
deleted file mode 100644
index ff0ada00bf41..000000000000
--- a/drivers/staging/rtl8192u/r8192U.h
+++ /dev/null
@@ -1,1129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is part of rtl8187 OpenSource driver.
- * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- * Released under the terms of GPL (General Public Licence)
- *
- * Parts of this driver are based on the GPL part of the
- * official realtek driver
- *
- * Parts of this driver are based on the rtl8192 driver skeleton
- * from Patric Schenke & Andres Salomon
- *
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
- *
- * We want to thank the Authors of those projects and the Ndiswrapper
- * project Authors.
- */
-
-#ifndef R8192U_H
-#define R8192U_H
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/usb.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/rtnetlink.h>
-#include <linux/wireless.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/if_arp.h>
-#include <linux/random.h>
-#include <linux/io.h>
-#include "ieee80211/ieee80211.h"
-
-#define RTL8192U
-#define RTL819XU_MODULE_NAME "rtl819xU"
-/* HW security */
-#define MAX_KEY_LEN 61
-#define KEY_BUF_SIZE 5
-
-#define RX_SMOOTH_FACTOR 20
-#define DMESG(x, a...) no_printk(x, ##a)
-#define DMESGW(x, a...) no_printk(x, ##a)
-#define DMESGE(x, a...) no_printk(x, ##a)
-extern u32 rt_global_debug_component;
-#define RT_TRACE(component, x, args...) \
- do { \
- if (rt_global_debug_component & (component)) \
- pr_debug("RTL8192U: " x "\n", ##args); \
- } while (0)
-
-#define COMP_TRACE BIT(0) /* Function call tracing. */
-#define COMP_DBG BIT(1)
-#define COMP_INIT BIT(2) /* Driver initialization/halt/reset. */
-
-#define COMP_RECV BIT(3) /* Receive data path. */
-#define COMP_SEND BIT(4) /* Send data path. */
-#define COMP_IO BIT(5)
-/* 802.11 Power Save mode or System/Device Power state. */
-#define COMP_POWER BIT(6)
-/* 802.11 link related: join/start BSS, leave BSS. */
-#define COMP_EPROM BIT(7)
-#define COMP_SWBW BIT(8) /* Bandwidth switch. */
-#define COMP_POWER_TRACKING BIT(9) /* 8190 TX Power Tracking */
-#define COMP_TURBO BIT(10) /* Turbo Mode */
-#define COMP_QOS BIT(11)
-#define COMP_RATE BIT(12) /* Rate Adaptive mechanism */
-#define COMP_RM BIT(13) /* Radio Measurement */
-#define COMP_DIG BIT(14)
-#define COMP_PHY BIT(15)
-#define COMP_CH BIT(16) /* Channel setting debug */
-#define COMP_TXAGC BIT(17) /* Tx power */
-#define COMP_HIPWR BIT(18) /* High Power Mechanism */
-#define COMP_HALDM BIT(19) /* HW Dynamic Mechanism */
-#define COMP_SEC BIT(20) /* Event handling */
-#define COMP_LED BIT(21)
-#define COMP_RF BIT(22)
-#define COMP_RXDESC BIT(23) /* Rx desc information for SD3 debug */
-
-/* 11n or 8190 specific code */
-
-#define COMP_FIRMWARE BIT(24) /* Firmware downloading */
-#define COMP_HT BIT(25) /* 802.11n HT related information */
-#define COMP_AMSDU BIT(26) /* A-MSDU Debugging */
-#define COMP_SCAN BIT(27)
-#define COMP_DOWN BIT(29) /* rm driver module */
-#define COMP_RESET BIT(30) /* Silent reset */
-#define COMP_ERR BIT(31) /* Error out, always on */
-
-#define RTL819x_DEBUG
-#ifdef RTL819x_DEBUG
-#define RTL8192U_ASSERT(expr) \
- do { \
- if (!(expr)) { \
- pr_debug("Assertion failed! %s, %s, %s, line = %d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
- } while (0)
-/*
- * Debug out data buf.
- * If you want to print DATA buffer related BA,
- * please set ieee80211_debug_level to DATA|BA
- */
-#define RT_DEBUG_DATA(level, data, datalen) \
- do { \
- if ((rt_global_debug_component & (level)) == (level)) { \
- int i; \
- u8 *pdata = (u8 *)data; \
- pr_debug("RTL8192U: %s()\n", __func__); \
- for (i = 0; i < (int)(datalen); i++) { \
- printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
- } while (0)
-#else
-#define RTL8192U_ASSERT(expr) do {} while (0)
-#define RT_DEBUG_DATA(level, data, datalen) do {} while (0)
-#endif /* RTL8169_DEBUG */
-
-/* Queue Select Value in TxDesc */
-#define QSLT_BK 0x1
-#define QSLT_BE 0x0
-#define QSLT_VI 0x4
-#define QSLT_VO 0x6
-#define QSLT_BEACON 0x10
-#define QSLT_HIGH 0x11
-#define QSLT_MGNT 0x12
-#define QSLT_CMD 0x13
-
-#define DESC90_RATE1M 0x00
-#define DESC90_RATE2M 0x01
-#define DESC90_RATE5_5M 0x02
-#define DESC90_RATE11M 0x03
-#define DESC90_RATE6M 0x04
-#define DESC90_RATE9M 0x05
-#define DESC90_RATE12M 0x06
-#define DESC90_RATE18M 0x07
-#define DESC90_RATE24M 0x08
-#define DESC90_RATE36M 0x09
-#define DESC90_RATE48M 0x0a
-#define DESC90_RATE54M 0x0b
-#define DESC90_RATEMCS0 0x00
-#define DESC90_RATEMCS1 0x01
-#define DESC90_RATEMCS2 0x02
-#define DESC90_RATEMCS3 0x03
-#define DESC90_RATEMCS4 0x04
-#define DESC90_RATEMCS5 0x05
-#define DESC90_RATEMCS6 0x06
-#define DESC90_RATEMCS7 0x07
-#define DESC90_RATEMCS8 0x08
-#define DESC90_RATEMCS9 0x09
-#define DESC90_RATEMCS10 0x0a
-#define DESC90_RATEMCS11 0x0b
-#define DESC90_RATEMCS12 0x0c
-#define DESC90_RATEMCS13 0x0d
-#define DESC90_RATEMCS14 0x0e
-#define DESC90_RATEMCS15 0x0f
-#define DESC90_RATEMCS32 0x20
-
-#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
-
-#define IEEE80211_WATCH_DOG_TIME 2000
-#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
-/* For Tx Power Tracking */
-#define OFDM_Table_Length 19
-#define CCK_Table_length 12
-
-/* For rtl819x */
-struct tx_desc_819x_usb {
- /* DWORD 0 */
- u16 PktSize;
- u8 Offset;
- u8 Reserved0:3;
- u8 CmdInit:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 LINIP:1;
- u8 OWN:1;
-
- /* DWORD 1 */
- u8 TxFWInfoSize;
- u8 RATid:3;
- u8 DISFB:1;
- u8 USERATE:1;
- u8 MOREFRAG:1;
- u8 NoEnc:1;
- u8 PIFS:1;
- u8 QueueSelect:5;
- u8 NoACM:1;
- u8 Reserved1:2;
- u8 SecCAMID:5;
- u8 SecDescAssign:1;
- u8 SecType:2;
-
- /* DWORD 2 */
- u16 TxBufferSize;
- u8 ResvForPaddingLen:7;
- u8 Reserved3:1;
- u8 Reserved4;
-
- /* DWORD 3, 4, 5 */
- u32 Reserved5;
- u32 Reserved6;
- u32 Reserved7;
-};
-
-struct tx_desc_cmd_819x_usb {
- /* DWORD 0 */
- u16 Reserved0;
- u8 Reserved1;
- u8 Reserved2:3;
- u8 CmdInit:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 LINIP:1;
- u8 OWN:1;
-
- /* DOWRD 1 */
- u8 TxFWInfoSize;
- u8 Reserved3;
- u8 QueueSelect;
- u8 Reserved4;
-
- /* DOWRD 2 */
- u16 TxBufferSize;
- u16 Reserved5;
-
- /* DWORD 3, 4, 5 */
- u32 Reserved6;
- u32 Reserved7;
- u32 Reserved8;
-};
-
-struct tx_fwinfo_819x_usb {
- /* DOWRD 0 */
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1; /* Error out, always on */
- u8 TxBandwidth:1; /* Used for HT MCS rate only */
- u8 TxSubCarrier:2; /* Used for legacy OFDM rate only */
- u8 STBC:2;
- u8 AllowAggregation:1;
- /* Interpret RtsRate field as high throughput data rate */
- u8 RtsHT:1;
- u8 RtsShort:1; /* Short PLCP for CCK or short GI for 11n MCS */
- u8 RtsBandwidth:1; /* Used for HT MCS rate only */
- u8 RtsSubcarrier:2;/* Used for legacy OFDM rate only */
- u8 RtsSTBC:2;
- /* Enable firmware to recalculate and assign packet duration */
- u8 EnableCPUDur:1;
-
- /* DWORD 1 */
- u32 RxMF:2;
- u32 RxAMD:3;
- /* 1 indicate Tx info gathered by firmware and returned by Rx Cmd */
- u32 TxPerPktInfoFeedback:1;
- u32 Reserved1:2;
- u32 TxAGCOffSet:4;
- u32 TxAGCSign:1;
- u32 Tx_INFO_RSVD:6;
- u32 PacketID:13;
-};
-
-struct rtl8192_rx_info {
- struct urb *urb;
- struct net_device *dev;
- u8 out_pipe;
-};
-
-struct rx_desc_819x_usb {
- /* DOWRD 0 */
- u16 Length:14;
- u16 CRC32:1;
- u16 ICV:1;
- u8 RxDrvInfoSize;
- u8 Shift:2;
- u8 PHYStatus:1;
- u8 SWDec:1;
- u8 Reserved1:4;
-
- /* DWORD 1 */
- u32 Reserved2;
-};
-
-struct rx_drvinfo_819x_usb {
- /* DWORD 0 */
- u16 Reserved1:12;
- u16 PartAggr:1;
- u16 FirstAGGR:1;
- u16 Reserved2:2;
-
- u8 RxRate:7;
- u8 RxHT:1;
-
- u8 BW:1;
- u8 SPLCP:1;
- u8 Reserved3:2;
- u8 PAM:1;
- u8 Mcast:1;
- u8 Bcast:1;
- u8 Reserved4:1;
-
- /* DWORD 1 */
- u32 TSFL;
-
-};
-
-/* Support till 64 bit bus width OS */
-#define MAX_DEV_ADDR_SIZE 8
-/* For RTL8190 */
-#define MAX_FIRMWARE_INFORMATION_SIZE 32
-#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
-#define ENCRYPTION_MAX_OVERHEAD 128
-#define USB_HWDESC_HEADER_LEN sizeof(struct tx_desc_819x_usb)
-#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(struct tx_fwinfo_819x_usb))
-#define MAX_FRAGMENT_COUNT 8
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
-#define MAX_TRANSMIT_BUFFER_SIZE 32000
-#else
-#define MAX_TRANSMIT_BUFFER_SIZE 8000
-#endif
-/* Octets for crc32 (FCS, ICV) */
-#define scrclng 4
-
-enum rf_op_type {
- RF_OP_By_SW_3wire = 0,
- RF_OP_By_FW,
- RF_OP_MAX
-};
-
-/* 8190 Loopback Mode definition */
-typedef enum _rtl819xUsb_loopback {
- RTL819xU_NO_LOOPBACK = 0,
- RTL819xU_MAC_LOOPBACK = 1,
- RTL819xU_DMA_LOOPBACK = 2,
- RTL819xU_CCK_LOOPBACK = 3,
-} rtl819xUsb_loopback_e;
-
-/* due to rtl8192 firmware */
-typedef enum _desc_packet_type_e {
- DESC_PACKET_TYPE_INIT = 0,
- DESC_PACKET_TYPE_NORMAL = 1,
-} desc_packet_type_e;
-
-typedef enum _firmware_status {
- FW_STATUS_0_INIT = 0,
- FW_STATUS_1_MOVE_BOOT_CODE = 1,
- FW_STATUS_2_MOVE_MAIN_CODE = 2,
- FW_STATUS_3_TURNON_CPU = 3,
- FW_STATUS_4_MOVE_DATA_CODE = 4,
- FW_STATUS_5_READY = 5,
-} firmware_status_e;
-
-typedef struct _fw_seg_container {
- u16 seg_size;
- u8 *seg_ptr;
-} fw_seg_container, *pfw_seg_container;
-typedef struct _rt_firmware {
- firmware_status_e firmware_status;
- u16 cmdpacket_frag_threshold;
-#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000
- u8 firmware_buf[RTL8190_MAX_FIRMWARE_CODE_SIZE];
- u16 firmware_buf_size;
-} rt_firmware, *prt_firmware;
-
-/* Add this to 9100 bytes to receive A-MSDU from RT-AP */
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-
-typedef struct _rt_firmware_info_819xUsb {
- u8 sz_info[16];
-} rt_firmware_info_819xUsb, *prt_firmware_info_819xUsb;
-
-/* Firmware Queue Layout */
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-
-#ifdef USE_ONE_PIPE
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x000
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x000
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x0ff
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x000
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x00
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0x00
-#else
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x020
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x020
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x040
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x040
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x4
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x20
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x4
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0x18
-
-#endif
-
-#define APPLIED_RESERVED_QUEUE_IN_FW 0x80000000
-#define RSVD_FW_QUEUE_PAGE_BK_SHIFT 0x00
-#define RSVD_FW_QUEUE_PAGE_BE_SHIFT 0x08
-#define RSVD_FW_QUEUE_PAGE_VI_SHIFT 0x10
-#define RSVD_FW_QUEUE_PAGE_VO_SHIFT 0x18
-#define RSVD_FW_QUEUE_PAGE_MGNT_SHIFT 0x10
-#define RSVD_FW_QUEUE_PAGE_CMD_SHIFT 0x08
-#define RSVD_FW_QUEUE_PAGE_BCN_SHIFT 0x00
-#define RSVD_FW_QUEUE_PAGE_PUB_SHIFT 0x08
-
-/*
- * =================================================================
- * =================================================================
- */
-
-#define EPROM_93c46 0
-#define EPROM_93c56 1
-
-#define DEFAULT_FRAG_THRESHOLD 2342U
-#define MIN_FRAG_THRESHOLD 256U
-#define DEFAULT_BEACONINTERVAL 0x64U
-#define DEFAULT_BEACON_ESSID "Rtl819xU"
-
-#define DEFAULT_SSID ""
-#define DEFAULT_RETRY_RTS 7
-#define DEFAULT_RETRY_DATA 7
-#define PRISM_HDR_SIZE 64
-
-#define PHY_RSSI_SLID_WIN_MAX 100
-
-typedef enum _WIRELESS_MODE {
- WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_A = 0x01,
- WIRELESS_MODE_B = 0x02,
- WIRELESS_MODE_G = 0x04,
- WIRELESS_MODE_AUTO = 0x08,
- WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20
-} WIRELESS_MODE;
-
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
-
-typedef struct buffer {
- struct buffer *next;
- u32 *buf;
-
-} buffer;
-
-typedef struct rtl_reg_debug {
- unsigned int cmd;
- struct {
- unsigned char type;
- unsigned char addr;
- unsigned char page;
- unsigned char length;
- } head;
- unsigned char buf[0xff];
-} rtl_reg_debug;
-
-typedef struct _rt_9x_tx_rate_history {
- u32 cck[4];
- u32 ofdm[8];
- u32 ht_mcs[4][16];
-} rt_tx_rahis_t, *prt_tx_rahis_t;
-typedef struct _RT_SMOOTH_DATA_4RF {
- s8 elements[4][100]; /* array to store values */
- u32 index; /* index to current array to store */
- u32 TotalNum; /* num of valid elements */
- u32 TotalVal[4]; /* sum of valid elements */
-} RT_SMOOTH_DATA_4RF, *PRT_SMOOTH_DATA_4RF;
-
-/* This maybe changed for D-cut larger aggregation size */
-#define MAX_8192U_RX_SIZE 8192
-/* Stats seems messed up, clean it ASAP */
-typedef struct Stats {
- unsigned long txrdu;
- unsigned long rxok;
- unsigned long rxframgment;
- unsigned long rxurberr;
- unsigned long rxstaterr;
- /* 0: Total, 1: OK, 2: CRC, 3: ICV */
- unsigned long received_rate_histogram[4][32];
- /* 0: Long preamble/GI, 1: Short preamble/GI */
- unsigned long received_preamble_GI[2][32];
- /* level: (<4K), (4K~8K), (8K~16K), (16K~32K), (32K~64K) */
- unsigned long rx_AMPDUsize_histogram[5];
- /* level: (<5), (5~10), (10~20), (20~40), (>40) */
- unsigned long rx_AMPDUnum_histogram[5];
- unsigned long numpacket_matchbssid;
- unsigned long numpacket_toself;
- unsigned long num_process_phyinfo;
- unsigned long numqry_phystatus;
- unsigned long numqry_phystatusCCK;
- unsigned long numqry_phystatusHT;
- /* 0: 20M, 1: funn40M, 2: upper20M, 3: lower20M, 4: duplicate */
- unsigned long received_bwtype[5];
- unsigned long txnperr;
- unsigned long txnpdrop;
- unsigned long txresumed;
- unsigned long txnpokint;
- unsigned long txoverflow;
- unsigned long txlpokint;
- unsigned long txlpdrop;
- unsigned long txlperr;
- unsigned long txbeokint;
- unsigned long txbedrop;
- unsigned long txbeerr;
- unsigned long txbkokint;
- unsigned long txbkdrop;
- unsigned long txbkerr;
- unsigned long txviokint;
- unsigned long txvidrop;
- unsigned long txvierr;
- unsigned long txvookint;
- unsigned long txvodrop;
- unsigned long txvoerr;
- unsigned long txbeaconokint;
- unsigned long txbeacondrop;
- unsigned long txbeaconerr;
- unsigned long txmanageokint;
- unsigned long txmanagedrop;
- unsigned long txmanageerr;
- unsigned long txdatapkt;
- unsigned long txfeedback;
- unsigned long txfeedbackok;
-
- unsigned long txoktotal;
- unsigned long txokbytestotal;
- unsigned long txokinperiod;
- unsigned long txmulticast;
- unsigned long txbytesmulticast;
- unsigned long txbroadcast;
- unsigned long txbytesbroadcast;
- unsigned long txunicast;
- unsigned long txbytesunicast;
-
- unsigned long rxoktotal;
- unsigned long rxbytesunicast;
- unsigned long txfeedbackfail;
- unsigned long txerrtotal;
- unsigned long txerrbytestotal;
- unsigned long txerrmulticast;
- unsigned long txerrbroadcast;
- unsigned long txerrunicast;
- unsigned long txretrycount;
- unsigned long txfeedbackretry;
- u8 last_packet_rate;
- unsigned long slide_signal_strength[100];
- unsigned long slide_evm[100];
- /* For recording sliding window's RSSI value */
- unsigned long slide_rssi_total;
- /* For recording sliding window's EVM value */
- unsigned long slide_evm_total;
- /* Transformed in dbm. Beautified signal strength for UI, not correct */
- long signal_strength;
- long signal_quality;
- long last_signal_strength_inpercent;
- /* Correct smoothed ss in dbm, only used in driver
- * to report real power now
- */
- long recv_signal_power;
- u8 rx_rssi_percentage[4];
- u8 rx_evm_percentage[2];
- long rxSNRdB[4];
- rt_tx_rahis_t txrate;
- /* For beacon RSSI */
- u32 Slide_Beacon_pwdb[100];
- u32 Slide_Beacon_Total;
- RT_SMOOTH_DATA_4RF cck_adc_pwdb;
-
- u32 CurrentShowTxate;
-} Stats;
-
-/* Bandwidth Offset */
-#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
-#define HAL_PRIME_CHNL_OFFSET_LOWER 1
-#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
-typedef struct ChnlAccessSetting {
- u16 SIFS_Timer;
- u16 DIFS_Timer;
- u16 SlotTimeTimer;
- u16 EIFS_Timer;
- u16 CWminIndex;
- u16 CWmaxIndex;
-} *PCHANNEL_ACCESS_SETTING, CHANNEL_ACCESS_SETTING;
-
-typedef struct _BB_REGISTER_DEFINITION {
- /* set software control: 0x870~0x877 [8 bytes] */
- u32 rfintfs;
- /* readback data: 0x8e0~0x8e7 [8 bytes] */
- u32 rfintfi;
- /* output data: 0x860~0x86f [16 bytes] */
- u32 rfintfo;
- /* output enable: 0x860~0x86f [16 bytes] */
- u32 rfintfe;
- /* LSSI data: 0x840~0x84f [16 bytes] */
- u32 rf3wireOffset;
- /* BB Band Select: 0x878~0x87f [8 bytes] */
- u32 rfLSSI_Select;
- /* Tx gain stage: 0x80c~0x80f [4 bytes] */
- u32 rfTxGainStage;
- /* wire parameter control1: 0x820~0x823, 0x828~0x82b,
- * 0x830~0x833, 0x838~0x83b [16 bytes]
- */
- u32 rfHSSIPara1;
- /* wire parameter control2: 0x824~0x827, 0x82c~0x82f,
- * 0x834~0x837, 0x83c~0x83f [16 bytes]
- */
- u32 rfHSSIPara2;
- /* Tx Rx antenna control: 0x858~0x85f [16 bytes] */
- u32 rfSwitchControl;
- /* AGC parameter control1: 0xc50~0xc53, 0xc58~0xc5b,
- * 0xc60~0xc63, 0xc68~0xc6b [16 bytes]
- */
- u32 rfAGCControl1;
- /* AGC parameter control2: 0xc54~0xc57, 0xc5c~0xc5f,
- * 0xc64~0xc67, 0xc6c~0xc6f [16 bytes]
- */
- u32 rfAGCControl2;
- /* OFDM Rx IQ imbalance matrix: 0xc14~0xc17, 0xc1c~0xc1f,
- * 0xc24~0xc27, 0xc2c~0xc2f [16 bytes]
- */
- u32 rfRxIQImbalance;
- /* Rx IQ DC offset and Rx digital filter, Rx DC notch filter:
- * 0xc10~0xc13, 0xc18~0xc1b,
- * 0xc20~0xc23, 0xc28~0xc2b [16 bytes]
- */
- u32 rfRxAFE;
- /* OFDM Tx IQ imbalance matrix: 0xc80~0xc83, 0xc88~0xc8b,
- * 0xc90~0xc93, 0xc98~0xc9b [16 bytes]
- */
- u32 rfTxIQImbalance;
- /* Tx IQ DC Offset and Tx DFIR type:
- * 0xc84~0xc87, 0xc8c~0xc8f,
- * 0xc94~0xc97, 0xc9c~0xc9f [16 bytes]
- */
- u32 rfTxAFE;
- /* LSSI RF readback data: 0x8a0~0x8af [16 bytes] */
- u32 rfLSSIReadBack;
-} BB_REGISTER_DEFINITION_T, *PBB_REGISTER_DEFINITION_T;
-
-typedef enum _RT_RF_TYPE_819xU {
- RF_TYPE_MIN = 0,
- RF_8225,
- RF_8256,
- RF_8258,
- RF_PSEUDO_11N = 4,
-} RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
-
-/* 2007/10/08 MH Define RATR state. */
-enum dynamic_ratr_state {
- DM_RATR_STA_HIGH = 0,
- DM_RATR_STA_MIDDLE = 1,
- DM_RATR_STA_LOW = 2,
- DM_RATR_STA_MAX
-};
-
-typedef struct _rate_adaptive {
- u8 rate_adaptive_disabled;
- enum dynamic_ratr_state ratr_state;
- u16 reserve;
-
- u32 high_rssi_thresh_for_ra;
- u32 high2low_rssi_thresh_for_ra;
- u8 low2high_rssi_thresh_for_ra40M;
- u32 low_rssi_thresh_for_ra40M;
- u8 low2high_rssi_thresh_for_ra20M;
- u32 low_rssi_thresh_for_ra20M;
- u32 upper_rssi_threshold_ratr;
- u32 middle_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr_40M;
- u32 low_rssi_threshold_ratr_20M;
- u8 ping_rssi_enable;
- u32 ping_rssi_ratr;
- u32 ping_rssi_thresh_for_ra;
- u32 last_ratr;
-
-} rate_adaptive, *prate_adaptive;
-
-#define TxBBGainTableLength 37
-#define CCKTxBBGainTableLength 23
-
-typedef struct _txbbgain_struct {
- long txbb_iq_amplifygain;
- u32 txbbgain_value;
-} txbbgain_struct, *ptxbbgain_struct;
-
-typedef struct _ccktxbbgain_struct {
- /* The value is from a22 to a29, one byte one time is much safer */
- u8 ccktxbb_valuearray[8];
-} ccktxbbgain_struct, *pccktxbbgain_struct;
-
-typedef struct _init_gain {
- u8 xaagccore1;
- u8 xbagccore1;
- u8 xcagccore1;
- u8 xdagccore1;
- u8 cca;
-
-} init_gain, *pinit_gain;
-
-typedef struct _phy_ofdm_rx_status_report_819xusb {
- u8 trsw_gain_X[4];
- u8 pwdb_all;
- u8 cfosho_X[4];
- u8 cfotail_X[4];
- u8 rxevm_X[2];
- u8 rxsnr_X[4];
- u8 pdsnr_X[2];
- u8 csi_current_X[2];
- u8 csi_target_X[2];
- u8 sigevm;
- u8 max_ex_pwr;
- u8 sgi_en;
- u8 rxsc_sgien_exflg;
-} phy_sts_ofdm_819xusb_t;
-
-typedef struct _phy_cck_rx_status_report_819xusb {
- /* For CCK rate descriptor. This is an unsigned 8:1 variable.
- * LSB bit presend 0.5. And MSB 7 bts presend a signed value.
- * Range from -64~+63.5.
- */
- u8 adc_pwdb_X[4];
- u8 sq_rpt;
- u8 cck_agc_rpt;
-} phy_sts_cck_819xusb_t;
-
-struct phy_ofdm_rx_status_rxsc_sgien_exintfflag {
- u8 reserved:4;
- u8 rxsc:2;
- u8 sgi_en:1;
- u8 ex_intf_flag:1;
-};
-
-typedef enum _RT_CUSTOMER_ID {
- RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
- RT_CID_TOSHIBA = 9,
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
- RT_CID_DLINK = 12,
- RT_CID_PRONET = 13,
-} RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
-
-/*
- * ==========================================================================
- * LED customization.
- * ==========================================================================
- */
-
-typedef enum _LED_STRATEGY_8190 {
- SW_LED_MODE0, /* SW control 1 LED via GPIO0. It is default option. */
- SW_LED_MODE1, /* SW control for PCI Express */
- SW_LED_MODE2, /* SW control for Cameo. */
- SW_LED_MODE3, /* SW control for RunTop. */
- SW_LED_MODE4, /* SW control for Netcore. */
- /* HW control 2 LEDs, LED0 and LED1 (4 different control modes) */
- HW_LED,
-} LED_STRATEGY_8190, *PLED_STRATEGY_8190;
-
-typedef enum _RESET_TYPE {
- RESET_TYPE_NORESET = 0x00,
- RESET_TYPE_NORMAL = 0x01,
- RESET_TYPE_SILENT = 0x02
-} RESET_TYPE;
-
-/* The simple tx command OP code. */
-typedef enum _tag_TxCmd_Config_Index {
- TXCMD_TXRA_HISTORY_CTRL = 0xFF900000,
- TXCMD_RESET_TX_PKT_BUFF = 0xFF900001,
- TXCMD_RESET_RX_PKT_BUFF = 0xFF900002,
- TXCMD_SET_TX_DURATION = 0xFF900003,
- TXCMD_SET_RX_RSSI = 0xFF900004,
- TXCMD_SET_TX_PWR_TRACKING = 0xFF900005,
- TXCMD_XXXX_CTRL,
-} DCMD_TXCMD_OP;
-
-enum version_819xu {
- VERSION_819XU_A, // A-cut
- VERSION_819XU_B, // B-cut
- VERSION_819XU_C,// C-cut
-};
-
-//added for different RF type
-enum rt_rf_type {
- RF_1T2R = 0,
- RF_2T4R,
-};
-
-typedef struct r8192_priv {
- struct usb_device *udev;
- /* For maintain info from eeprom */
- short epromtype;
- u16 eeprom_vid;
- u16 eeprom_pid;
- u8 eeprom_CustomerID;
- u8 eeprom_ChannelPlan;
- RT_CUSTOMER_ID CustomerID;
- LED_STRATEGY_8190 LedStrategy;
- u8 txqueue_to_outpipemap[9];
- int irq;
- struct ieee80211_device *ieee80211;
-
- /* O: rtl8192, 1: rtl8185 V B/C, 2: rtl8185 V D */
- short card_8192;
- /* If TCR reports card V B/C, this discriminates */
- enum version_819xu card_8192_version;
- short enable_gpio0;
- enum card_type {
- PCI, MINIPCI, CARDBUS, USB
- } card_type;
- short hw_plcp_len;
- short plcp_preamble_mode;
-
- spinlock_t irq_lock;
- spinlock_t tx_lock;
- struct mutex mutex;
-
- u16 irq_mask;
- short chan;
- short sens;
- short max_sens;
-
- short up;
- /* If 1, allow bad crc frame, reception in monitor mode */
- short crcmon;
-
- struct mutex wx_mutex;
-
- enum rt_rf_type rf_type; /* 0: 1T2R, 1: 2T4R */
- RT_RF_TYPE_819xU rf_chip;
-
- short (*rf_set_sens)(struct net_device *dev, short sens);
- u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
- void (*rf_close)(struct net_device *dev);
- void (*rf_init)(struct net_device *dev);
- short promisc;
- /* Stats */
- struct Stats stats;
- struct iw_statistics wstats;
-
- /* RX stuff */
- struct urb **rx_urb;
- struct urb **rx_cmd_urb;
-#ifdef THOMAS_BEACON
- u32 *oldaddr;
-#endif
-#ifdef THOMAS_TASKLET
- atomic_t irt_counter; /* count for irq_rx_tasklet */
-#endif
-#ifdef JACKSON_NEW_RX
- struct sk_buff **pp_rxskb;
- int rx_inx;
-#endif
-
- struct sk_buff_head rx_queue;
- struct sk_buff_head skb_queue;
- struct work_struct qos_activate;
- short tx_urb_index;
- atomic_t tx_pending[0x10]; /* UART_PRIORITY + 1 */
-
- struct tasklet_struct irq_rx_tasklet;
- struct urb *rxurb_task;
-
- /* Tx Related variables */
- u16 ShortRetryLimit;
- u16 LongRetryLimit;
- u32 TransmitConfig;
- u8 RegCWinMin; /* For turbo mode CW adaptive */
-
- u32 LastRxDescTSFHigh;
- u32 LastRxDescTSFLow;
-
- /* Rx Related variables */
- u16 EarlyRxThreshold;
- u32 ReceiveConfig;
- u8 AcmControl;
-
- u8 RFProgType;
-
- u8 retry_data;
- u8 retry_rts;
- u16 rts;
-
- struct ChnlAccessSetting ChannelAccessSetting;
- struct work_struct reset_wq;
-
-/**********************************************************/
- /* For rtl819xUsb */
- u16 basic_rate;
- u8 short_preamble;
- u8 slot_time;
- bool bDcut;
- bool bCurrentRxAggrEnable;
- enum rf_op_type Rf_Mode; /* For Firmware RF -R/W switch */
- prt_firmware pFirmware;
- rtl819xUsb_loopback_e LoopbackMode;
- u16 EEPROMTxPowerDiff;
- u8 EEPROMThermalMeter;
- u8 EEPROMPwDiff;
- u8 EEPROMCrystalCap;
- u8 EEPROM_Def_Ver;
- u8 EEPROMTxPowerLevelCCK; /* CCK channel 1~14 */
- u8 EEPROMTxPowerLevelCCK_V1[3];
- u8 EEPROMTxPowerLevelOFDM24G[3]; /* OFDM 2.4G channel 1~14 */
- u8 EEPROMTxPowerLevelOFDM5G[24]; /* OFDM 5G */
-
- /* PHY related */
- BB_REGISTER_DEFINITION_T PHYRegDef[4]; /* Radio A/B/C/D */
- /* Read/write are allow for following hardware information variables */
- u32 MCSTxPowerLevelOriginalOffset[6];
- u32 CCKTxPowerLevelOriginalOffset;
- u8 TxPowerLevelCCK[14]; /* CCK channel 1~14 */
- u8 TxPowerLevelOFDM24G[14]; /* OFDM 2.4G channel 1~14 */
- u8 TxPowerLevelOFDM5G[14]; /* OFDM 5G */
- u32 Pwr_Track;
- u8 TxPowerDiff;
- u8 AntennaTxPwDiff[2]; /* Antenna gain offset, 0: B, 1: C, 2: D */
- u8 CrystalCap;
- u8 ThermalMeter[2]; /* index 0: RFIC0, index 1: RFIC1 */
-
- u8 CckPwEnl;
- /* Use to calculate PWBD */
- u8 bCckHighPower;
- long undecorated_smoothed_pwdb;
-
- /* For set channel */
- u8 SwChnlInProgress;
- u8 SwChnlStage;
- u8 SwChnlStep;
- u8 SetBWModeInProgress;
- enum ht_channel_width CurrentChannelBW;
- u8 ChannelPlan;
- /* 8190 40MHz mode */
- /* Control channel sub-carrier */
- u8 nCur40MhzPrimeSC;
- /* Test for shorten RF configuration time.
- * We save RF reg0 in this variable to reduce RF reading.
- */
- u32 RfReg0Value[4];
- u8 NumTotalRFPath;
- bool brfpath_rxenable[4];
- /* RF set related */
- bool SetRFPowerStateInProgress;
- struct timer_list watch_dog_timer;
-
- /* For dynamic mechanism */
- /* Tx Power Control for Near/Far Range */
- bool bdynamic_txpower;
- bool bDynamicTxHighPower;
- bool bDynamicTxLowPower;
- bool bLastDTPFlag_High;
- bool bLastDTPFlag_Low;
-
- bool bstore_last_dtpflag;
- /* Define to discriminate on High power State or
- * on sitesurvey to change Tx gain index
- */
- bool bstart_txctrl_bydtp;
- rate_adaptive rate_adaptive;
- /* TX power tracking
- * OPEN/CLOSE TX POWER TRACKING
- */
- txbbgain_struct txbbgain_table[TxBBGainTableLength];
- u8 txpower_count; /* For 6 sec do tracking again */
- bool btxpower_trackingInit;
- u8 OFDM_index;
- u8 CCK_index;
- /* CCK TX Power Tracking */
- ccktxbbgain_struct cck_txbbgain_table[CCKTxBBGainTableLength];
- ccktxbbgain_struct cck_txbbgain_ch14_table[CCKTxBBGainTableLength];
- u8 rfa_txpowertrackingindex;
- u8 rfa_txpowertrackingindex_real;
- u8 rfa_txpowertracking_default;
- u8 rfc_txpowertrackingindex;
- u8 rfc_txpowertrackingindex_real;
-
- s8 cck_present_attenuation;
- u8 cck_present_attenuation_20Mdefault;
- u8 cck_present_attenuation_40Mdefault;
- s8 cck_present_attenuation_difference;
- bool btxpower_tracking;
- bool bcck_in_ch14;
- bool btxpowerdata_readfromEEPORM;
- u16 TSSI_13dBm;
- init_gain initgain_backup;
- u8 DefaultInitialGain[4];
- /* For EDCA Turbo mode */
- bool bis_any_nonbepkts;
- bool bcurrent_turbo_EDCA;
- bool bis_cur_rdlstate;
- struct delayed_work fsync_work;
- bool bfsync_processing; /* 500ms Fsync timer is active or not */
- u32 rate_record;
- u32 rateCountDiffRecord;
- u32 ContinueDiffCount;
- bool bswitch_fsync;
-
- u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- u16 nrxAMPDU_size;
- u8 nrxAMPDU_aggr_num;
-
- /* For gpio */
- bool bHwRadioOff;
-
- u32 reset_count;
- bool bpbc_pressed;
- u32 txpower_checkcnt;
- u32 txpower_tracking_callback_cnt;
- u8 thermal_read_val[40];
- u8 thermal_readback_index;
- u32 ccktxpower_adjustcnt_not_ch14;
- u32 ccktxpower_adjustcnt_ch14;
- u8 tx_fwinfo_force_subcarriermode;
- u8 tx_fwinfo_force_subcarrierval;
- /* For silent reset */
- RESET_TYPE ResetProgress;
- bool bForcedSilentReset;
- bool bDisableNormalResetCheck;
- u16 TxCounter;
- u16 RxCounter;
- int IrpPendingCount;
- bool bResetInProgress;
- bool force_reset;
- u8 InitialGainOperateType;
-
- u16 SifsTime;
-
- /* Define work item */
-
- struct delayed_work update_beacon_wq;
- struct delayed_work watch_dog_wq;
- struct delayed_work txpower_tracking_wq;
- struct delayed_work rfpath_check_wq;
- struct delayed_work gpio_change_rf_wq;
- struct delayed_work initialgain_operate_wq;
- struct workqueue_struct *priv_wq;
-
- /* debugfs */
- struct dentry *debugfs_dir;
-} r8192_priv;
-
-/* For rtl8187B */
-typedef enum{
- BULK_PRIORITY = 0x01,
- LOW_PRIORITY,
- NORM_PRIORITY,
- VO_PRIORITY,
- VI_PRIORITY,
- BE_PRIORITY,
- BK_PRIORITY,
- RSVD2,
- RSVD3,
- BEACON_PRIORITY,
- HIGH_PRIORITY,
- MANAGE_PRIORITY,
- RSVD4,
- RSVD5,
- UART_PRIORITY
-} priority_t;
-
-typedef enum {
- NIC_8192U = 1,
- NIC_8190P = 2,
- NIC_8192E = 3,
-} nic_t;
-
-bool init_firmware(struct net_device *dev);
-short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb);
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
-
-int read_nic_byte(struct net_device *dev, int x, u8 *data);
-int read_nic_byte_E(struct net_device *dev, int x, u8 *data);
-int read_nic_dword(struct net_device *dev, int x, u32 *data);
-int read_nic_word(struct net_device *dev, int x, u16 *data);
-int write_nic_byte(struct net_device *dev, int x, u8 y);
-int write_nic_byte_E(struct net_device *dev, int x, u8 y);
-int write_nic_word(struct net_device *dev, int x, u16 y);
-int write_nic_dword(struct net_device *dev, int x, u32 y);
-void force_pci_posting(struct net_device *dev);
-
-void rtl8192_rtx_disable(struct net_device *dev);
-void rtl8192_rx_enable(struct net_device *dev);
-
-void rtl8192_update_msr(struct net_device *dev);
-int rtl8192_down(struct net_device *dev);
-int rtl8192_up(struct net_device *dev);
-void rtl8192_commit(struct net_device *dev);
-void rtl8192_set_chan(struct net_device *dev, short ch);
-void rtl8192_set_rxconf(struct net_device *dev);
-void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
-
-void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
-
-void rtl8192_debugfs_init_one(struct net_device *dev);
-void rtl8192_debugfs_exit_one(struct net_device *dev);
-void rtl8192_debugfs_rename_one(struct net_device *dev);
-void rtl8192_debugfs_init(void);
-void rtl8192_debugfs_exit(void);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
deleted file mode 100644
index 0a60ef20107c..000000000000
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ /dev/null
@@ -1,4800 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- * Linux device driver for RTL8192U
- *
- * Based on the r8187 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
- *
- * Contact Information:
- * Jerry chuang <wlanfae@realtek.com>
- */
-
-#ifndef CONFIG_FORCE_HARD_FLOAT
-double __floatsidf(int i)
-{
- return i;
-}
-
-unsigned int __fixunsdfsi(double d)
-{
- return d;
-}
-
-double __adddf3(double a, double b)
-{
- return a + b;
-}
-
-double __addsf3(float a, float b)
-{
- return a + b;
-}
-
-double __subdf3(double a, double b)
-{
- return a - b;
-}
-
-double __extendsfdf2(float a)
-{
- return a;
-}
-#endif
-
-#define CONFIG_RTL8192_IO_MAP
-
-#include <linux/uaccess.h>
-#include "r8192U_hw.h"
-#include "r8192U.h"
-#include "r8190_rtl8256.h" /* RTL8225 Radio frontend */
-#include "r8180_93cx6.h" /* Card EEPROM */
-#include "r8192U_wx.h"
-#include "r819xU_phy.h"
-#include "r819xU_phyreg.h"
-#include "r819xU_cmdpkt.h"
-#include "r8192U_dm.h"
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-/* FIXME: check if 2.6.7 is ok */
-
-#include "ieee80211/dot11d.h"
-/* set here to open your trace code. */
-u32 rt_global_debug_component = COMP_DOWN |
- COMP_SEC |
- COMP_ERR; /* always open err flags on */
-
-#define TOTAL_CAM_ENTRY 32
-#define CAM_CONTENT_COUNT 8
-
-static const struct usb_device_id rtl8192_usb_id_tbl[] = {
- /* Realtek */
- {USB_DEVICE(0x0bda, 0x8709)},
- /* Corega */
- {USB_DEVICE(0x07aa, 0x0043)},
- /* Belkin */
- {USB_DEVICE(0x050d, 0x805E)},
- /* Sitecom */
- {USB_DEVICE(0x0df6, 0x0031)},
- /* EnGenius */
- {USB_DEVICE(0x1740, 0x9201)},
- /* Dlink */
- {USB_DEVICE(0x2001, 0x3301)},
- /* Zinwell */
- {USB_DEVICE(0x5a57, 0x0290)},
- /* LG */
- {USB_DEVICE(0x043e, 0x7a01)},
- {}
-};
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION("V 1.1");
-MODULE_DEVICE_TABLE(usb, rtl8192_usb_id_tbl);
-MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
-
-static char *ifname = "wlan%d";
-static int hwwep = 1; /* default use hw. set 0 to use software security */
-
-module_param(ifname, charp, 0644);
-module_param(hwwep, int, 0644);
-
-MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
-MODULE_PARM_DESC(hwwep, " Try to use hardware security support. ");
-
-static int rtl8192_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void rtl8192_usb_disconnect(struct usb_interface *intf);
-
-static struct usb_driver rtl8192_usb_driver = {
- .name = RTL819XU_MODULE_NAME, /* Driver name */
- .id_table = rtl8192_usb_id_tbl, /* PCI_ID table */
- .probe = rtl8192_usb_probe, /* probe fn */
- .disconnect = rtl8192_usb_disconnect, /* remove fn */
- .suspend = NULL, /* PM suspend fn */
- .resume = NULL, /* PM resume fn */
-};
-
-struct CHANNEL_LIST {
- u8 Channel[32];
- u8 Len;
-};
-
-static struct CHANNEL_LIST ChannelPlan[] = {
- /* FCC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 24},
- /* IC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
- /* ETSI */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21},
- /* Spain. Change to ETSI. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- /* France. Change to ETSI. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- /* MKK */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},
- /* MKK1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},
- /* Israel. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- /* For 11a , TELEC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},
- /* MIC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},
- /* For Global Domain. 1-11:active scan, 12-14 passive scan. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}
-};
-
-static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
-{
- int i, max_chan = -1, min_chan = -1;
- struct ieee80211_device *ieee = priv->ieee80211;
-
- switch (channel_plan) {
- case COUNTRY_CODE_FCC:
- case COUNTRY_CODE_IC:
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
- rtl8192u_dot11d_init(ieee);
- ieee->bGlobalDomain = false;
- /* actually 8225 & 8256 rf chips only support B,G,24N mode */
- if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256)) {
- min_chan = 1;
- max_chan = 14;
- } else {
- RT_TRACE(COMP_ERR,
- "unknown rf chip, can't set channel map in function:%s()\n",
- __func__);
- }
- if (ChannelPlan[channel_plan].Len != 0) {
- /* Clear old channel map */
- memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
- sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- /* Set new channel map */
- for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
- if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
- break;
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
- }
- }
- break;
-
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- /* this flag enabled to follow 11d country IE setting,
- * otherwise, it shall follow global domain settings.
- */
- GET_DOT11D_INFO(ieee)->dot11d_enabled = 0;
- dot11d_reset(ieee);
- ieee->bGlobalDomain = true;
- break;
-
- default:
- break;
- }
-}
-
-static void CamResetAllEntry(struct net_device *dev)
-{
- u32 ulcommand = 0;
- /* In static WEP, OID_ADD_KEY or OID_ADD_WEP are set before STA
- * associate to AP. However, ResetKey is called on
- * OID_802_11_INFRASTRUCTURE_MODE and MlmeAssociateRequest. In this
- * condition, Cam can not be reset because upper layer will not set
- * this static key again.
- */
- ulcommand |= BIT(31) | BIT(30);
- write_nic_dword(dev, RWCAM, ulcommand);
-}
-
-int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
-{
- int status;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
- *usbdata = data;
-
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- indx | 0xfe00, 0, usbdata, 1, 500);
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
- return status;
- }
- return 0;
-}
-
-int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
-{
- int status;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u8 *usbdata = kzalloc(sizeof(u8), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
-
- status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx | 0xfe00, 0, usbdata, 1, 500);
- *data = *usbdata;
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s failure status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-/* as 92U has extend page from 4 to 16, so modify functions below. */
-int write_nic_byte(struct net_device *dev, int indx, u8 data)
-{
- int status;
-
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
- *usbdata = data;
-
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 1, 500);
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-int write_nic_word(struct net_device *dev, int indx, u16 data)
-{
- int status;
-
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u16 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
- *usbdata = data;
-
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 2, 500);
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-int write_nic_dword(struct net_device *dev, int indx, u32 data)
-{
- int status;
-
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u32 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
- *usbdata = data;
-
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 4, 500);
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-int read_nic_byte(struct net_device *dev, int indx, u8 *data)
-{
- int status;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u8 *usbdata = kzalloc(sizeof(u8), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
-
- status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 1, 500);
- *data = *usbdata;
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s failure status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-int read_nic_word(struct net_device *dev, int indx, u16 *data)
-{
- int status;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u16 *usbdata = kzalloc(sizeof(u16), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
-
- status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 2, 500);
- *data = *usbdata;
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s failure status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-static int read_nic_word_E(struct net_device *dev, int indx, u16 *data)
-{
- int status;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u16 *usbdata = kzalloc(sizeof(u16), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
-
- status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx | 0xfe00, 0, usbdata, 2, 500);
- *data = *usbdata;
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s failure status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-int read_nic_dword(struct net_device *dev, int indx, u32 *data)
-{
- int status;
-
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct usb_device *udev = priv->udev;
- u32 *usbdata = kzalloc(sizeof(u32), GFP_KERNEL);
-
- if (!usbdata)
- return -ENOMEM;
-
- status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 4, 500);
- *data = *usbdata;
- kfree(usbdata);
-
- if (status < 0) {
- netdev_err(dev, "%s failure status: %d\n", __func__, status);
- return status;
- }
-
- return 0;
-}
-
-/* u8 read_phy_cck(struct net_device *dev, u8 adr); */
-/* u8 read_phy_ofdm(struct net_device *dev, u8 adr); */
-/* this might still called in what was the PHY rtl8185/rtl8192 common code
- * plans are to possibility turn it again in one common code...
- */
-inline void force_pci_posting(struct net_device *dev)
-{
-}
-
-static struct net_device_stats *rtl8192_stats(struct net_device *dev);
-static void rtl8192_restart(struct work_struct *work);
-static void watch_dog_timer_callback(struct timer_list *t);
-
-/****************************************************************************
- * -----------------------------MISC STUFF-------------------------
- *****************************************************************************/
-
-short check_nic_enough_desc(struct net_device *dev, int queue_index)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int used = atomic_read(&priv->tx_pending[queue_index]);
-
- return (used < MAX_TX_URB);
-}
-
-static void tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- schedule_work(&priv->reset_wq);
-}
-
-void rtl8192_update_msr(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 msr;
-
- read_nic_byte(dev, MSR, &msr);
- msr &= ~MSR_LINK_MASK;
-
- /* do not change in link_state != WLAN_LINK_ASSOCIATED.
- * msr must be updated if the state is ASSOCIATING.
- * this is intentional and make sense for ad-hoc and
- * master (see the create BSS/IBSS func)
- */
- if (priv->ieee80211->state == IEEE80211_LINKED) {
- if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
- msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
- else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
- else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
- msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
-
- } else {
- msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
- }
-
- write_nic_byte(dev, MSR, msr);
-}
-
-void rtl8192_set_chan(struct net_device *dev, short ch)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
- priv->chan = ch;
-
- /* this hack should avoid frame TX during channel setting*/
-
- /* need to implement rf set channel here */
-
- if (priv->rf_set_chan)
- priv->rf_set_chan(dev, priv->chan);
- mdelay(10);
-}
-
-static void rtl8192_rx_isr(struct urb *urb);
-
-static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
-{
- return (sizeof(struct rx_desc_819x_usb) + pstats->RxDrvInfoSize
- + pstats->RxBufShift);
-}
-
-void rtl8192_rx_enable(struct net_device *dev)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct urb *entry;
- struct sk_buff *skb;
- struct rtl8192_rx_info *info;
-
- /* nomal packet rx procedure */
- while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB) {
- skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
- if (!skb)
- break;
- entry = usb_alloc_urb(0, GFP_KERNEL);
- if (!entry) {
- kfree_skb(skb);
- break;
- }
- usb_fill_bulk_urb(entry, priv->udev,
- usb_rcvbulkpipe(priv->udev, 3),
- skb_tail_pointer(skb),
- RX_URB_SIZE, rtl8192_rx_isr, skb);
- info = (struct rtl8192_rx_info *)skb->cb;
- info->urb = entry;
- info->dev = dev;
- info->out_pipe = 3; /* denote rx normal packet queue */
- skb_queue_tail(&priv->rx_queue, skb);
- usb_submit_urb(entry, GFP_KERNEL);
- }
-
- /* command packet rx procedure */
- while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB + 3) {
- skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
- if (!skb)
- break;
- entry = usb_alloc_urb(0, GFP_KERNEL);
- if (!entry) {
- kfree_skb(skb);
- break;
- }
- usb_fill_bulk_urb(entry, priv->udev,
- usb_rcvbulkpipe(priv->udev, 9),
- skb_tail_pointer(skb),
- RX_URB_SIZE, rtl8192_rx_isr, skb);
- info = (struct rtl8192_rx_info *)skb->cb;
- info->urb = entry;
- info->dev = dev;
- info->out_pipe = 9; /* denote rx cmd packet queue */
- skb_queue_tail(&priv->rx_queue, skb);
- usb_submit_urb(entry, GFP_KERNEL);
- }
-}
-
-void rtl8192_set_rxconf(struct net_device *dev)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- u32 rxconf;
-
- read_nic_dword(dev, RCR, &rxconf);
- rxconf = rxconf & ~MAC_FILTER_MASK;
- rxconf = rxconf | RCR_AMF;
- rxconf = rxconf | RCR_ADF;
- rxconf = rxconf | RCR_AB;
- rxconf = rxconf | RCR_AM;
-
- if (dev->flags & IFF_PROMISC)
- DMESG("NIC in promisc mode");
-
- if (priv->ieee80211->iw_mode == IW_MODE_MONITOR ||
- dev->flags & IFF_PROMISC) {
- rxconf = rxconf | RCR_AAP;
- } else {
- rxconf = rxconf | RCR_APM;
- rxconf = rxconf | RCR_CBSSID;
- }
-
- if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
- rxconf = rxconf | RCR_AICV;
- rxconf = rxconf | RCR_APWRMGT;
- }
-
- if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
- rxconf = rxconf | RCR_ACRC32;
-
- rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
- rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE << RX_FIFO_THRESHOLD_SHIFT);
- rxconf = rxconf & ~MAX_RX_DMA_MASK;
- rxconf = rxconf | ((u32)7 << RCR_MXDMA_OFFSET);
-
- rxconf = rxconf | RCR_ONLYERLPKT;
-
- write_nic_dword(dev, RCR, rxconf);
-}
-
-void rtl8192_rtx_disable(struct net_device *dev)
-{
- u8 cmd;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sk_buff *skb;
- struct rtl8192_rx_info *info;
-
- read_nic_byte(dev, CMDR, &cmd);
- write_nic_byte(dev, CMDR, cmd & ~(CR_TE | CR_RE));
- force_pci_posting(dev);
- mdelay(10);
-
- while ((skb = __skb_dequeue(&priv->rx_queue))) {
- info = (struct rtl8192_rx_info *)skb->cb;
- if (!info->urb)
- continue;
-
- usb_kill_urb(info->urb);
- kfree_skb(skb);
- }
-
- if (skb_queue_len(&priv->skb_queue))
- netdev_warn(dev, "skb_queue not empty\n");
-
- skb_queue_purge(&priv->skb_queue);
-}
-
-/* The prototype of rx_isr has changed since one version of Linux Kernel */
-static void rtl8192_rx_isr(struct urb *urb)
-{
- struct sk_buff *skb = (struct sk_buff *)urb->context;
- struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev = info->dev;
- struct r8192_priv *priv = ieee80211_priv(dev);
- int out_pipe = info->out_pipe;
- int err;
-
- if (!priv->up)
- return;
-
- if (unlikely(urb->status)) {
- info->urb = NULL;
- priv->stats.rxstaterr++;
- priv->ieee80211->stats.rx_errors++;
- usb_free_urb(urb);
- return;
- }
- skb_unlink(skb, &priv->rx_queue);
- skb_put(skb, urb->actual_length);
-
- skb_queue_tail(&priv->skb_queue, skb);
- tasklet_schedule(&priv->irq_rx_tasklet);
-
- skb = dev_alloc_skb(RX_URB_SIZE);
- if (unlikely(!skb)) {
- usb_free_urb(urb);
- netdev_err(dev, "%s(): can't alloc skb\n", __func__);
- /* TODO check rx queue length and refill *somewhere* */
- return;
- }
-
- usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, out_pipe),
- skb_tail_pointer(skb),
- RX_URB_SIZE, rtl8192_rx_isr, skb);
-
- info = (struct rtl8192_rx_info *)skb->cb;
- info->urb = urb;
- info->dev = dev;
- info->out_pipe = out_pipe;
-
- urb->transfer_buffer = skb_tail_pointer(skb);
- urb->context = skb;
- skb_queue_tail(&priv->rx_queue, skb);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err && err != -EPERM)
- netdev_err(dev,
- "can not submit rxurb, err is %x, URB status is %x\n",
- err, urb->status);
-}
-
-static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
-{
- u32 status;
-
- status = cmpk_message_handle_rx(dev, pstats);
- if (status)
- DMESG("rxcommandpackethandle819xusb: It is a command packet\n");
-
- return status;
-}
-
-static void rtl8192_data_hard_stop(struct net_device *dev)
-{
- /* FIXME !! */
-}
-
-static void rtl8192_data_hard_resume(struct net_device *dev)
-{
- /* FIXME !! */
-}
-
-/* this function TX data frames when the ieee80211 stack requires this.
- * It checks also if we need to stop the ieee tx queue, eventually do it
- */
-static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
- int rate)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- unsigned long flags;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
-
- /* shall not be referred by command packet */
- RTL8192U_ASSERT(queue_index != TXCMD_QUEUE);
-
- spin_lock_irqsave(&priv->tx_lock, flags);
-
- *(struct net_device **)(skb->cb) = dev;
- tcb_desc->bTxEnableFwCalcDur = 1;
- skb_push(skb, priv->ieee80211->tx_headroom);
- rtl8192_tx(dev, skb);
-
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-}
-
-/* This is a rough attempt to TX a frame
- * This is called by the ieee 80211 stack to TX management frames.
- * If the ring is full packet are dropped (for data frame the queue
- * is stopped before this can happen).
- */
-static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- int ret;
- unsigned long flags;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
-
- spin_lock_irqsave(&priv->tx_lock, flags);
-
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
- if (queue_index == TXCMD_QUEUE) {
- skb_push(skb, USB_HWDESC_HEADER_LEN);
- rtl819xU_tx_cmd(dev, skb);
- ret = 1;
- } else {
- skb_push(skb, priv->ieee80211->tx_headroom);
- ret = rtl8192_tx(dev, skb);
- }
-
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- return ret;
-}
-
-static void rtl8192_tx_isr(struct urb *tx_urb)
-{
- struct sk_buff *skb = (struct sk_buff *)tx_urb->context;
- struct net_device *dev;
- struct r8192_priv *priv = NULL;
- struct cb_desc *tcb_desc;
- u8 queue_index;
-
- if (!skb)
- return;
-
- dev = *(struct net_device **)(skb->cb);
- tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- queue_index = tcb_desc->queue_index;
-
- priv = ieee80211_priv(dev);
-
- if (tcb_desc->queue_index != TXCMD_QUEUE) {
- if (tx_urb->status == 0) {
- netif_trans_update(dev);
- priv->stats.txoktotal++;
- priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
- priv->stats.txbytesunicast +=
- (skb->len - priv->ieee80211->tx_headroom);
- } else {
- priv->ieee80211->stats.tx_errors++;
- /* TODO */
- }
- }
-
- /* free skb and tx_urb */
- dev_kfree_skb_any(skb);
- usb_free_urb(tx_urb);
- atomic_dec(&priv->tx_pending[queue_index]);
-
- /*
- * Handle HW Beacon:
- * We had transfer our beacon frame to host controller at this moment.
- *
- *
- * Caution:
- * Handling the wait queue of command packets.
- * For Tx command packets, we must not do TCB fragment because it is
- * not handled right now. We must cut the packets to match the size of
- * TX_CMD_PKT before we send it.
- */
-
- /* Handle MPDU in wait queue. */
- if (queue_index != BEACON_QUEUE) {
- /* Don't send data frame during scanning.*/
- if ((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0) &&
- (!(priv->ieee80211->queue_stop))) {
- skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]));
- if (skb)
- priv->ieee80211->softmac_hard_start_xmit(skb,
- dev);
-
- return; /* avoid further processing AMSDU */
- }
- }
-}
-
-static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_network *net;
- u8 i = 0, basic_rate = 0;
-
- net = &priv->ieee80211->current_network;
-
- for (i = 0; i < net->rates_len; i++) {
- basic_rate = net->rates[i] & 0x7f;
- switch (basic_rate) {
- case MGN_1M:
- *rate_config |= RRSR_1M;
- break;
- case MGN_2M:
- *rate_config |= RRSR_2M;
- break;
- case MGN_5_5M:
- *rate_config |= RRSR_5_5M;
- break;
- case MGN_11M:
- *rate_config |= RRSR_11M;
- break;
- case MGN_6M:
- *rate_config |= RRSR_6M;
- break;
- case MGN_9M:
- *rate_config |= RRSR_9M;
- break;
- case MGN_12M:
- *rate_config |= RRSR_12M;
- break;
- case MGN_18M:
- *rate_config |= RRSR_18M;
- break;
- case MGN_24M:
- *rate_config |= RRSR_24M;
- break;
- case MGN_36M:
- *rate_config |= RRSR_36M;
- break;
- case MGN_48M:
- *rate_config |= RRSR_48M;
- break;
- case MGN_54M:
- *rate_config |= RRSR_54M;
- break;
- }
- }
- for (i = 0; i < net->rates_ex_len; i++) {
- basic_rate = net->rates_ex[i] & 0x7f;
- switch (basic_rate) {
- case MGN_1M:
- *rate_config |= RRSR_1M;
- break;
- case MGN_2M:
- *rate_config |= RRSR_2M;
- break;
- case MGN_5_5M:
- *rate_config |= RRSR_5_5M;
- break;
- case MGN_11M:
- *rate_config |= RRSR_11M;
- break;
- case MGN_6M:
- *rate_config |= RRSR_6M;
- break;
- case MGN_9M:
- *rate_config |= RRSR_9M;
- break;
- case MGN_12M:
- *rate_config |= RRSR_12M;
- break;
- case MGN_18M:
- *rate_config |= RRSR_18M;
- break;
- case MGN_24M:
- *rate_config |= RRSR_24M;
- break;
- case MGN_36M:
- *rate_config |= RRSR_36M;
- break;
- case MGN_48M:
- *rate_config |= RRSR_48M;
- break;
- case MGN_54M:
- *rate_config |= RRSR_54M;
- break;
- }
- }
-}
-
-#define SHORT_SLOT_TIME 9
-#define NON_SHORT_SLOT_TIME 20
-
-static void rtl8192_update_cap(struct net_device *dev, u16 cap)
-{
- u32 tmp = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_network *net = &priv->ieee80211->current_network;
-
- priv->short_preamble = cap & WLAN_CAPABILITY_SHORT_PREAMBLE;
- tmp = priv->basic_rate;
- if (priv->short_preamble)
- tmp |= BRSR_AckShortPmb;
- write_nic_dword(dev, RRSR, tmp);
-
- if (net->mode & (IEEE_G | IEEE_N_24G)) {
- u8 slot_time = 0;
-
- if ((cap & WLAN_CAPABILITY_SHORT_SLOT) &&
- (!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime))
- /* short slot time */
- slot_time = SHORT_SLOT_TIME;
- else /* long slot time */
- slot_time = NON_SHORT_SLOT_TIME;
- priv->slot_time = slot_time;
- write_nic_byte(dev, SLOT_TIME, slot_time);
- }
-}
-
-static void rtl8192_net_update(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_network *net;
- u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
- u16 rate_config = 0;
-
- net = &priv->ieee80211->current_network;
-
- rtl8192_config_rate(dev, &rate_config);
- priv->basic_rate = rate_config & 0x15f;
-
- write_nic_dword(dev, BSSIDR, ((u32 *)net->bssid)[0]);
- write_nic_word(dev, BSSIDR + 4, ((u16 *)net->bssid)[2]);
-
- rtl8192_update_msr(dev);
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
- write_nic_word(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_DMATIME, 1023);
- write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
- write_nic_word(dev, BCN_DRV_EARLY_INT, 1);
- write_nic_byte(dev, BCN_ERR_THRESH, 100);
- BcnTimeCfg |= (BcnCW << BCN_TCFG_CW_SHIFT);
- /* TODO: BcnIFS may required to be changed on ASIC */
- BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
-
- write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
- }
-}
-
-/* temporary hw beacon is not used any more.
- * open it when necessary
- */
-void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
-{
-}
-
-short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int status;
- struct urb *tx_urb;
- unsigned int idx_pipe;
- struct tx_desc_cmd_819x_usb *pdesc = (struct tx_desc_cmd_819x_usb *)skb->data;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
-
- atomic_inc(&priv->tx_pending[queue_index]);
- tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- memset(pdesc, 0, USB_HWDESC_HEADER_LEN);
- /* Tx descriptor ought to be set according to the skb->cb */
- pdesc->FirstSeg = 1;
- pdesc->LastSeg = 1;
- pdesc->CmdInit = tcb_desc->bCmdOrInit;
- pdesc->TxBufferSize = tcb_desc->txbuf_size;
- pdesc->OWN = 1;
- pdesc->LINIP = tcb_desc->bLastIniPkt;
-
- /*---------------------------------------------------------------------
- * Fill up USB_OUT_CONTEXT.
- *---------------------------------------------------------------------
- */
- idx_pipe = 0x04;
- usb_fill_bulk_urb(tx_urb, priv->udev,
- usb_sndbulkpipe(priv->udev, idx_pipe),
- skb->data, skb->len, rtl8192_tx_isr, skb);
-
- status = usb_submit_urb(tx_urb, GFP_ATOMIC);
-
- if (!status)
- return 0;
-
- DMESGE("Error TX CMD URB, error %d", status);
- dev_kfree_skb(skb);
- usb_free_urb(tx_urb);
- return -1;
-}
-
-/*
- * Mapping Software/Hardware descriptor queue id to "Queue Select Field"
- * in TxFwInfo data structure
- * 2006.10.30 by Emily
- *
- * \param QUEUEID Software Queue
- */
-static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
-{
- u8 QueueSelect = 0x0; /* default set to */
-
- switch (QueueID) {
- case BE_QUEUE:
- QueueSelect = QSLT_BE;
- break;
-
- case BK_QUEUE:
- QueueSelect = QSLT_BK;
- break;
-
- case VO_QUEUE:
- QueueSelect = QSLT_VO;
- break;
-
- case VI_QUEUE:
- QueueSelect = QSLT_VI;
- break;
- case MGNT_QUEUE:
- QueueSelect = QSLT_MGNT;
- break;
-
- case BEACON_QUEUE:
- QueueSelect = QSLT_BEACON;
- break;
-
- /* TODO: mark other queue selection until we verify it is OK */
- /* TODO: Remove Assertions */
- case TXCMD_QUEUE:
- QueueSelect = QSLT_CMD;
- break;
- case HIGH_QUEUE:
- QueueSelect = QSLT_HIGH;
- break;
-
- default:
- RT_TRACE(COMP_ERR,
- "TransmitTCB(): Impossible Queue Selection: %d\n",
- QueueID);
- break;
- }
- return QueueSelect;
-}
-
-static u8 MRateToHwRate8190Pci(u8 rate)
-{
- u8 ret = DESC90_RATE1M;
-
- switch (rate) {
- case MGN_1M:
- ret = DESC90_RATE1M;
- break;
- case MGN_2M:
- ret = DESC90_RATE2M;
- break;
- case MGN_5_5M:
- ret = DESC90_RATE5_5M;
- break;
- case MGN_11M:
- ret = DESC90_RATE11M;
- break;
- case MGN_6M:
- ret = DESC90_RATE6M;
- break;
- case MGN_9M:
- ret = DESC90_RATE9M;
- break;
- case MGN_12M:
- ret = DESC90_RATE12M;
- break;
- case MGN_18M:
- ret = DESC90_RATE18M;
- break;
- case MGN_24M:
- ret = DESC90_RATE24M;
- break;
- case MGN_36M:
- ret = DESC90_RATE36M;
- break;
- case MGN_48M:
- ret = DESC90_RATE48M;
- break;
- case MGN_54M:
- ret = DESC90_RATE54M;
- break;
-
- /* HT rate since here */
- case MGN_MCS0:
- ret = DESC90_RATEMCS0;
- break;
- case MGN_MCS1:
- ret = DESC90_RATEMCS1;
- break;
- case MGN_MCS2:
- ret = DESC90_RATEMCS2;
- break;
- case MGN_MCS3:
- ret = DESC90_RATEMCS3;
- break;
- case MGN_MCS4:
- ret = DESC90_RATEMCS4;
- break;
- case MGN_MCS5:
- ret = DESC90_RATEMCS5;
- break;
- case MGN_MCS6:
- ret = DESC90_RATEMCS6;
- break;
- case MGN_MCS7:
- ret = DESC90_RATEMCS7;
- break;
- case MGN_MCS8:
- ret = DESC90_RATEMCS8;
- break;
- case MGN_MCS9:
- ret = DESC90_RATEMCS9;
- break;
- case MGN_MCS10:
- ret = DESC90_RATEMCS10;
- break;
- case MGN_MCS11:
- ret = DESC90_RATEMCS11;
- break;
- case MGN_MCS12:
- ret = DESC90_RATEMCS12;
- break;
- case MGN_MCS13:
- ret = DESC90_RATEMCS13;
- break;
- case MGN_MCS14:
- ret = DESC90_RATEMCS14;
- break;
- case MGN_MCS15:
- ret = DESC90_RATEMCS15;
- break;
- case (0x80 | 0x20):
- ret = DESC90_RATEMCS32;
- break;
-
- default:
- break;
- }
- return ret;
-}
-
-static u8 QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
-{
- u8 tmp_Short;
-
- tmp_Short = (TxHT == 1) ?
- ((tcb_desc->bUseShortGI) ? 1 : 0) :
- ((tcb_desc->bUseShortPreamble) ? 1 : 0);
-
- if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
- tmp_Short = 0;
-
- return tmp_Short;
-}
-
-static void tx_zero_isr(struct urb *tx_urb)
-{
-}
-
-/*
- * The tx procedure is just as following,
- * skb->cb will contain all the following information,
- * priority, morefrag, rate, &dev.
- */
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct tx_desc_819x_usb *tx_desc = (struct tx_desc_819x_usb *)skb->data;
- struct tx_fwinfo_819x_usb *tx_fwinfo =
- (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
- struct usb_device *udev = priv->udev;
- int pend;
- int status, rt = -1;
- struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
- unsigned int idx_pipe;
-
- pend = atomic_read(&priv->tx_pending[tcb_desc->queue_index]);
- /* we are locked here so the two atomic_read and inc are executed
- * without interleaves
- * !!! For debug purpose
- */
- if (pend > MAX_TX_URB) {
- netdev_dbg(dev, "To discard skb packet!\n");
- dev_kfree_skb_any(skb);
- return -1;
- }
-
- tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb) {
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- /* Fill Tx firmware info */
- memset(tx_fwinfo, 0, sizeof(struct tx_fwinfo_819x_usb));
- /* DWORD 0 */
- tx_fwinfo->TxHT = (tcb_desc->data_rate & 0x80) ? 1 : 0;
- tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
- tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
- tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate,
- tcb_desc);
- if (tcb_desc->bAMPDUEnable) { /* AMPDU enabled */
- tx_fwinfo->AllowAggregation = 1;
- /* DWORD 1 */
- tx_fwinfo->RxMF = tcb_desc->ampdu_factor;
- tx_fwinfo->RxAMD = tcb_desc->ampdu_density & 0x07;
- } else {
- tx_fwinfo->AllowAggregation = 0;
- /* DWORD 1 */
- tx_fwinfo->RxMF = 0;
- tx_fwinfo->RxAMD = 0;
- }
-
- /* Protection mode related */
- tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable) ? 1 : 0;
- tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable) ? 1 : 0;
- tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC) ? 1 : 0;
- tx_fwinfo->RtsHT = (tcb_desc->rts_rate & 0x80) ? 1 : 0;
- tx_fwinfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
- tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->RTSSC) : 0;
- tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT == 1) ? ((tcb_desc->bRTSBW) ? 1 : 0) : 0;
- tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->bRTSUseShortPreamble ? 1 : 0) :
- (tcb_desc->bRTSUseShortGI ? 1 : 0);
-
- /* Set Bandwidth and sub-channel settings. */
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
- if (tcb_desc->bPacketBW) {
- tx_fwinfo->TxBandwidth = 1;
- /* use duplicated mode */
- tx_fwinfo->TxSubCarrier = 0;
- } else {
- tx_fwinfo->TxBandwidth = 0;
- tx_fwinfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
- }
- } else {
- tx_fwinfo->TxBandwidth = 0;
- tx_fwinfo->TxSubCarrier = 0;
- }
-
- /* Fill Tx descriptor */
- memset(tx_desc, 0, sizeof(struct tx_desc_819x_usb));
- /* DWORD 0 */
- tx_desc->LINIP = 0;
- tx_desc->CmdInit = 1;
- tx_desc->Offset = sizeof(struct tx_fwinfo_819x_usb) + 8;
- tx_desc->PktSize = (skb->len - TX_PACKET_SHIFT_BYTES) & 0xffff;
-
- /*DWORD 1*/
- tx_desc->SecCAMID = 0;
- tx_desc->RATid = tcb_desc->RATRIndex;
- tx_desc->NoEnc = 1;
- tx_desc->SecType = 0x0;
- if (tcb_desc->bHwSec) {
- switch (priv->ieee80211->pairwise_key_type) {
- case KEY_TYPE_WEP40:
- case KEY_TYPE_WEP104:
- tx_desc->SecType = 0x1;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_TKIP:
- tx_desc->SecType = 0x2;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_CCMP:
- tx_desc->SecType = 0x3;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_NA:
- tx_desc->SecType = 0x0;
- tx_desc->NoEnc = 1;
- break;
- }
- }
-
- tx_desc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
- tx_desc->TxFWInfoSize = sizeof(struct tx_fwinfo_819x_usb);
-
- tx_desc->DISFB = tcb_desc->bTxDisableRateFallBack;
- tx_desc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
-
- /* Fill fields that are required to be initialized in
- * all of the descriptors
- */
- /* DWORD 0 */
- tx_desc->FirstSeg = 1;
- tx_desc->LastSeg = 1;
- tx_desc->OWN = 1;
-
- /* DWORD 2 */
- tx_desc->TxBufferSize = (u32)(skb->len - USB_HWDESC_HEADER_LEN);
- idx_pipe = 0x5;
-
- /* To submit bulk urb */
- usb_fill_bulk_urb(tx_urb, udev,
- usb_sndbulkpipe(udev, idx_pipe), skb->data,
- skb->len, rtl8192_tx_isr, skb);
-
- status = usb_submit_urb(tx_urb, GFP_ATOMIC);
- if (!status) {
- /* We need to send 0 byte packet whenever
- * 512N bytes/64N(HIGN SPEED/NORMAL SPEED) bytes packet has
- * been transmitted. Otherwise, it will be halt to wait for
- * another packet.
- */
- bool bSend0Byte = false;
- u8 zero = 0;
-
- if (udev->speed == USB_SPEED_HIGH) {
- if (skb->len > 0 && skb->len % 512 == 0)
- bSend0Byte = true;
- } else {
- if (skb->len > 0 && skb->len % 64 == 0)
- bSend0Byte = true;
- }
- if (bSend0Byte) {
- tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero) {
- rt = -ENOMEM;
- goto error;
- }
- usb_fill_bulk_urb(tx_urb_zero, udev,
- usb_sndbulkpipe(udev, idx_pipe),
- &zero, 0, tx_zero_isr, dev);
- status = usb_submit_urb(tx_urb_zero, GFP_ATOMIC);
- if (status) {
- RT_TRACE(COMP_ERR,
- "Error TX URB for zero byte %d, error %d",
- atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
- status);
- goto error;
- }
- }
- netif_trans_update(dev);
- atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
- return 0;
- }
-
- RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
- atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
- status);
-
-error:
- dev_kfree_skb_any(skb);
- usb_free_urb(tx_urb);
- usb_free_urb(tx_urb_zero);
- return rt;
-}
-
-static short rtl8192_usb_initendpoints(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->rx_urb = kmalloc_array(MAX_RX_URB + 1, sizeof(struct urb *),
- GFP_KERNEL);
- if (!priv->rx_urb)
- return -ENOMEM;
-
-#ifndef JACKSON_NEW_RX
- for (i = 0; i < (MAX_RX_URB + 1); i++) {
- priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!priv->rx_urb[i])
- return -ENOMEM;
-
- priv->rx_urb[i]->transfer_buffer =
- kmalloc(RX_URB_SIZE, GFP_KERNEL);
- if (!priv->rx_urb[i]->transfer_buffer)
- return -ENOMEM;
-
- priv->rx_urb[i]->transfer_buffer_length = RX_URB_SIZE;
- }
-#endif
-
-#ifdef THOMAS_BEACON
- {
- long align = 0;
- void *oldaddr, *newaddr;
-
- priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
- if (!priv->rx_urb[16])
- return -ENOMEM;
- priv->oldaddr = kmalloc(16, GFP_KERNEL);
- if (!priv->oldaddr)
- return -ENOMEM;
- oldaddr = priv->oldaddr;
- align = ((long)oldaddr) & 3;
- if (align) {
- newaddr = oldaddr + 4 - align;
- priv->rx_urb[16]->transfer_buffer_length = 16 - 4 + align;
- } else {
- newaddr = oldaddr;
- priv->rx_urb[16]->transfer_buffer_length = 16;
- }
- priv->rx_urb[16]->transfer_buffer = newaddr;
- }
-#endif
-
- memset(priv->rx_urb, 0, sizeof(struct urb *) * MAX_RX_URB);
- priv->pp_rxskb = kcalloc(MAX_RX_URB, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->pp_rxskb) {
- kfree(priv->rx_urb);
-
- priv->pp_rxskb = NULL;
- priv->rx_urb = NULL;
-
- DMESGE("Endpoint Alloc Failure");
- return -ENOMEM;
- }
-
- netdev_dbg(dev, "End of initendpoints\n");
- return 0;
-}
-
-#ifdef THOMAS_BEACON
-static void rtl8192_usb_deleteendpoints(struct net_device *dev)
-{
- int i;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->rx_urb) {
- for (i = 0; i < (MAX_RX_URB + 1); i++) {
- usb_kill_urb(priv->rx_urb[i]);
- usb_free_urb(priv->rx_urb[i]);
- }
- kfree(priv->rx_urb);
- priv->rx_urb = NULL;
- }
- kfree(priv->oldaddr);
- priv->oldaddr = NULL;
-
- kfree(priv->pp_rxskb);
- priv->pp_rxskb = NULL;
-}
-#else
-void rtl8192_usb_deleteendpoints(struct net_device *dev)
-{
- int i;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-#ifndef JACKSON_NEW_RX
-
- if (priv->rx_urb) {
- for (i = 0; i < (MAX_RX_URB + 1); i++) {
- usb_kill_urb(priv->rx_urb[i]);
- kfree(priv->rx_urb[i]->transfer_buffer);
- usb_free_urb(priv->rx_urb[i]);
- }
- kfree(priv->rx_urb);
- priv->rx_urb = NULL;
- }
-#else
- kfree(priv->rx_urb);
- priv->rx_urb = NULL;
- kfree(priv->oldaddr);
- priv->oldaddr = NULL;
-
- kfree(priv->pp_rxskb);
- priv->pp_rxskb = 0;
-
-#endif
-}
-#endif
-
-static void rtl8192_update_ratr_table(struct net_device *dev);
-static void rtl8192_link_change(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
-
- if (ieee->state == IEEE80211_LINKED) {
- rtl8192_net_update(dev);
- rtl8192_update_ratr_table(dev);
- /* Add this as in pure N mode, wep encryption will use software
- * way, but there is no chance to set this as wep will not set
- * group key in wext.
- */
- if (ieee->pairwise_key_type == KEY_TYPE_WEP40 ||
- ieee->pairwise_key_type == KEY_TYPE_WEP104)
- EnableHWSecurityConfig8192(dev);
- }
- /*update timing params*/
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
- u32 reg = 0;
-
- read_nic_dword(dev, RCR, &reg);
- if (priv->ieee80211->state == IEEE80211_LINKED)
- priv->ReceiveConfig = reg |= RCR_CBSSID;
- else
- priv->ReceiveConfig = reg &= ~RCR_CBSSID;
- write_nic_dword(dev, RCR, reg);
- }
-}
-
-static const struct ieee80211_qos_parameters def_qos_parameters = {
- {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
- {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
- {2, 2, 2, 2},/* aifs */
- {0, 0, 0, 0},/* flags */
- {0, 0, 0, 0} /* tx_op_limit */
-};
-
-static void rtl8192_update_beacon(struct work_struct *work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv,
- update_beacon_wq.work);
- struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *net = &ieee->current_network;
-
- if (ieee->pHTInfo->bCurrentHTSupport)
- HTUpdateSelfAndPeerSetting(ieee, net);
- ieee->pHTInfo->bCurrentRT2RTLongSlotTime =
- net->bssht.bdRT2RTLongSlotTime;
- rtl8192_update_cap(dev, net->capability);
-}
-
-/*
- * background support to run QoS activate functionality
- */
-static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK,
- EDCAPARA_VI, EDCAPARA_VO};
-static void rtl8192_qos_activate(struct work_struct *work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv,
- qos_activate);
- struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_qos_parameters *qos_parameters =
- &priv->ieee80211->current_network.qos_data.parameters;
- u8 mode = priv->ieee80211->current_network.mode;
- u32 u1bAIFS;
- u32 u4bAcParam;
- u32 op_limit;
- u32 cw_max;
- u32 cw_min;
- int i;
-
- mutex_lock(&priv->mutex);
- if (priv->ieee80211->state != IEEE80211_LINKED)
- goto success;
- RT_TRACE(COMP_QOS,
- "qos active process with associate response received\n");
- /* It better set slot time at first
- *
- * For we just support b/g mode at present, let the slot time at
- * 9/20 selection
- *
- * update the ac parameter to related registers
- */
- for (i = 0; i < QOS_QUEUE_NUM; i++) {
- /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
- u1bAIFS = qos_parameters->aifs[i] * ((mode & (IEEE_G | IEEE_N_24G)) ? 9 : 20) + aSifsTime;
- u1bAIFS <<= AC_PARAM_AIFS_OFFSET;
- op_limit = (u32)le16_to_cpu(qos_parameters->tx_op_limit[i]);
- op_limit <<= AC_PARAM_TXOP_LIMIT_OFFSET;
- cw_max = (u32)le16_to_cpu(qos_parameters->cw_max[i]);
- cw_max <<= AC_PARAM_ECW_MAX_OFFSET;
- cw_min = (u32)le16_to_cpu(qos_parameters->cw_min[i]);
- cw_min <<= AC_PARAM_ECW_MIN_OFFSET;
- u4bAcParam = op_limit | cw_max | cw_min | u1bAIFS;
- write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
- }
-
-success:
- mutex_unlock(&priv->mutex);
-}
-
-static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
- int active_network,
- struct ieee80211_network *network)
-{
- int ret = 0;
- u32 size = sizeof(struct ieee80211_qos_parameters);
-
- if (priv->ieee80211->state != IEEE80211_LINKED)
- return ret;
-
- if (priv->ieee80211->iw_mode != IW_MODE_INFRA)
- return ret;
-
- if (network->flags & NETWORK_HAS_QOS_MASK) {
- if (active_network &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS))
- network->qos_data.active = network->qos_data.supported;
-
- if ((network->qos_data.active == 1) && (active_network == 1) &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
- (network->qos_data.old_param_count !=
- network->qos_data.param_count)) {
- network->qos_data.old_param_count =
- network->qos_data.param_count;
- schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS parameters change call qos_activate\n");
- }
- } else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &def_qos_parameters, size);
-
- if ((network->qos_data.active == 1) && (active_network == 1)) {
- schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS was disabled call qos_activate\n");
- }
- network->qos_data.active = 0;
- network->qos_data.supported = 0;
- }
-
- return 0;
-}
-
-/* handle and manage frame from beacon and probe response */
-static int rtl8192_handle_beacon(struct net_device *dev,
- struct ieee80211_beacon *beacon,
- struct ieee80211_network *network)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- rtl8192_qos_handle_probe_response(priv, 1, network);
- schedule_delayed_work(&priv->update_beacon_wq, 0);
- return 0;
-}
-
-/*
- * handling the beaconing responses. if we get different QoS setting
- * off the network from the associated setting, adjust the QoS
- * setting
- */
-static int rtl8192_qos_association_resp(struct r8192_priv *priv,
- struct ieee80211_network *network)
-{
- unsigned long flags;
- u32 size = sizeof(struct ieee80211_qos_parameters);
- int set_qos_param = 0;
-
- if (!priv || !network)
- return 0;
-
- if (priv->ieee80211->state != IEEE80211_LINKED)
- return 0;
-
- if (priv->ieee80211->iw_mode != IW_MODE_INFRA)
- return 0;
-
- spin_lock_irqsave(&priv->ieee80211->lock, flags);
- if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &network->qos_data.parameters,
- sizeof(struct ieee80211_qos_parameters));
- priv->ieee80211->current_network.qos_data.active = 1;
- set_qos_param = 1;
- /* update qos parameter for current network */
- priv->ieee80211->current_network.qos_data.old_param_count =
- priv->ieee80211->current_network.qos_data.param_count;
- priv->ieee80211->current_network.qos_data.param_count =
- network->qos_data.param_count;
- } else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &def_qos_parameters, size);
- priv->ieee80211->current_network.qos_data.active = 0;
- priv->ieee80211->current_network.qos_data.supported = 0;
- set_qos_param = 1;
- }
-
- spin_unlock_irqrestore(&priv->ieee80211->lock, flags);
-
- RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
- network->flags,
- priv->ieee80211->current_network.qos_data.active);
- if (set_qos_param == 1)
- schedule_work(&priv->qos_activate);
-
- return 0;
-}
-
-static int rtl8192_handle_assoc_response(struct net_device *dev,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- rtl8192_qos_association_resp(priv, network);
- return 0;
-}
-
-static void rtl8192_update_ratr_table(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- u8 *pMcsRate = ieee->dot11HTOperationalRateSet;
- u32 ratr_value = 0;
- u8 rate_index = 0;
-
- rtl8192_config_rate(dev, (u16 *)(&ratr_value));
- ratr_value |= (*(u16 *)(pMcsRate)) << 12;
- switch (ieee->mode) {
- case IEEE_A:
- ratr_value &= 0x00000FF0;
- break;
- case IEEE_B:
- ratr_value &= 0x0000000F;
- break;
- case IEEE_G:
- ratr_value &= 0x00000FF7;
- break;
- case IEEE_N_24G:
- case IEEE_N_5G:
- if (ieee->pHTInfo->PeerMimoPs == MIMO_PS_STATIC) {
- ratr_value &= 0x0007F007;
- } else {
- if (priv->rf_type == RF_1T2R)
- ratr_value &= 0x000FF007;
- else
- ratr_value &= 0x0F81F007;
- }
- break;
- default:
- break;
- }
- ratr_value &= 0x0FFFFFFF;
- if (ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz)
- ratr_value |= 0x80000000;
- else if (!ieee->pHTInfo->bCurTxBW40MHz &&
- ieee->pHTInfo->bCurShortGI20MHz)
- ratr_value |= 0x80000000;
- write_nic_dword(dev, RATR0 + rate_index * 4, ratr_value);
- write_nic_byte(dev, UFWP, 1);
-}
-
-static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
-static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
-static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *network = &ieee->current_network;
- int wpa_ie_len = ieee->wpa_ie_len;
- struct ieee80211_crypt_data *crypt;
- int encrypt;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
- /* we use connecting AP's capability instead of only security config
- * on our driver to distinguish whether it should use N mode or G mode
- */
- encrypt = (network->capability & WLAN_CAPABILITY_PRIVACY) ||
- (ieee->host_encrypt && crypt && crypt->ops &&
- (strcmp(crypt->ops->name, "WEP") == 0));
-
- /* simply judge */
- if (encrypt && (wpa_ie_len == 0)) {
- /* wep encryption, no N mode setting */
- return false;
- } else if ((wpa_ie_len != 0)) {
- /* parse pairwise key type */
- if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) || ((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
- return true;
- else
- return false;
- } else {
- return true;
- }
-
- return true;
-}
-
-static bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return priv->ieee80211->bHalfWirelessN24GMode;
-}
-
-static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
-{
- struct ieee80211_device *ieee = priv->ieee80211;
- /* We do not consider set support rate for ABG mode, only
- * HT MCS rate is set here.
- */
- if (ieee->mode == WIRELESS_MODE_N_24G ||
- ieee->mode == WIRELESS_MODE_N_5G)
- memcpy(ieee->Regdot11HTOperationalRateSet,
- ieee->RegHTSuppRateSet, 16);
- else
- memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
-}
-
-static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 ret = 0;
-
- switch (priv->rf_chip) {
- case RF_8225:
- case RF_8256:
- case RF_PSEUDO_11N:
- ret = WIRELESS_MODE_N_24G | WIRELESS_MODE_G | WIRELESS_MODE_B;
- break;
- case RF_8258:
- ret = WIRELESS_MODE_A | WIRELESS_MODE_N_5G;
- break;
- default:
- ret = WIRELESS_MODE_B;
- break;
- }
- return ret;
-}
-
-static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
-
- if (wireless_mode == WIRELESS_MODE_AUTO ||
- (wireless_mode & bSupportMode) == 0) {
- if (bSupportMode & WIRELESS_MODE_N_24G) {
- wireless_mode = WIRELESS_MODE_N_24G;
- } else if (bSupportMode & WIRELESS_MODE_N_5G) {
- wireless_mode = WIRELESS_MODE_N_5G;
- } else if ((bSupportMode & WIRELESS_MODE_A)) {
- wireless_mode = WIRELESS_MODE_A;
- } else if ((bSupportMode & WIRELESS_MODE_G)) {
- wireless_mode = WIRELESS_MODE_G;
- } else if ((bSupportMode & WIRELESS_MODE_B)) {
- wireless_mode = WIRELESS_MODE_B;
- } else {
- RT_TRACE(COMP_ERR,
- "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n",
- __func__, bSupportMode);
- wireless_mode = WIRELESS_MODE_B;
- }
- }
- priv->ieee80211->mode = wireless_mode;
-
- if (wireless_mode == WIRELESS_MODE_N_24G ||
- wireless_mode == WIRELESS_MODE_N_5G)
- priv->ieee80211->pHTInfo->bEnableHT = 1;
- else
- priv->ieee80211->pHTInfo->bEnableHT = 0;
- RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
- rtl8192_refresh_supportrate(priv);
-}
-
-/* init priv variables here. only non_zero value should be initialized here. */
-static int rtl8192_init_priv_variable(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 i;
-
- priv->card_8192 = NIC_8192U;
- priv->chan = 1; /* set to channel 1 */
- priv->ieee80211->mode = WIRELESS_MODE_AUTO; /* SET AUTO */
- priv->ieee80211->iw_mode = IW_MODE_INFRA;
- priv->ieee80211->ieee_up = 0;
- priv->retry_rts = DEFAULT_RETRY_RTS;
- priv->retry_data = DEFAULT_RETRY_DATA;
- priv->ieee80211->rts = DEFAULT_RTS_THRESHOLD;
- priv->ieee80211->rate = 110; /* 11 mbps */
- priv->ieee80211->short_slot = 1;
- priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
- priv->CckPwEnl = 6;
- /* for silent reset */
- priv->IrpPendingCount = 1;
- priv->ResetProgress = RESET_TYPE_NORESET;
- priv->bForcedSilentReset = false;
- priv->bDisableNormalResetCheck = false;
- priv->force_reset = false;
-
- /* we don't use FW read/write RF until stable firmware is available. */
- priv->ieee80211->FwRWRF = 0;
- priv->ieee80211->current_network.beacon_interval =
- DEFAULT_BEACONINTERVAL;
- priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
- IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
- IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE |
- IEEE_SOFTMAC_BEACONS;
-
- priv->ieee80211->active_scan = 1;
- priv->ieee80211->modulation =
- IEEE80211_CCK_MODULATION | IEEE80211_OFDM_MODULATION;
- priv->ieee80211->host_encrypt = 1;
- priv->ieee80211->host_decrypt = 1;
- priv->ieee80211->start_send_beacons = NULL;
- priv->ieee80211->stop_send_beacons = NULL;
- priv->ieee80211->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
- priv->ieee80211->set_chan = rtl8192_set_chan;
- priv->ieee80211->link_change = rtl8192_link_change;
- priv->ieee80211->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
- priv->ieee80211->data_hard_stop = rtl8192_data_hard_stop;
- priv->ieee80211->data_hard_resume = rtl8192_data_hard_resume;
- priv->ieee80211->init_wmmparam_flag = 0;
- priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
- priv->ieee80211->check_nic_enough_desc = check_nic_enough_desc;
- priv->ieee80211->tx_headroom = TX_PACKET_SHIFT_BYTES;
- priv->ieee80211->qos_support = 1;
-
- priv->ieee80211->SetBWModeHandler = rtl8192_SetBWMode;
- priv->ieee80211->handle_assoc_response = rtl8192_handle_assoc_response;
- priv->ieee80211->handle_beacon = rtl8192_handle_beacon;
-
- priv->ieee80211->GetNmodeSupportBySecCfg = GetNmodeSupportBySecCfg8192;
- priv->ieee80211->GetHalfNmodeSupportByAPsHandler =
- GetHalfNmodeSupportByAPs819xUsb;
- priv->ieee80211->SetWirelessMode = rtl8192_SetWirelessMode;
-
- priv->ieee80211->InitialGainHandler = InitialGain819xUsb;
- priv->card_type = USB;
- priv->ShortRetryLimit = 0x30;
- priv->LongRetryLimit = 0x30;
- priv->EarlyRxThreshold = 7;
- priv->enable_gpio0 = 0;
- priv->TransmitConfig =
- /* Max DMA Burst Size per Tx DMA Burst, 7: reserved. */
- (TCR_MXDMA_2048 << TCR_MXDMA_OFFSET) |
- /* Short retry limit */
- (priv->ShortRetryLimit << TCR_SRL_OFFSET) |
- /* Long retry limit */
- (priv->LongRetryLimit << TCR_LRL_OFFSET) |
- /* FALSE: HW provides PLCP length and LENGEXT
- * TRUE: SW provides them
- */
- (false ? TCR_SAT : 0);
- priv->ReceiveConfig =
- /* accept management/data */
- RCR_AMF | RCR_ADF |
- /* accept control frame for SW AP needs PS-poll */
- RCR_ACF |
- /* accept BC/MC/UC */
- RCR_AB | RCR_AM | RCR_APM |
- /* Max DMA Burst Size per Rx DMA Burst, 7: unlimited. */
- ((u32)7 << RCR_MXDMA_OFFSET) |
- /* Rx FIFO Threshold, 7: No Rx threshold. */
- (priv->EarlyRxThreshold << RX_FIFO_THRESHOLD_SHIFT) |
- (priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT : 0);
-
- priv->AcmControl = 0;
- priv->pFirmware = kzalloc(sizeof(rt_firmware), GFP_KERNEL);
- if (!priv->pFirmware)
- return -ENOMEM;
-
- /* rx related queue */
- skb_queue_head_init(&priv->rx_queue);
- skb_queue_head_init(&priv->skb_queue);
-
- /* Tx related queue */
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_head_init(&priv->ieee80211->skb_waitQ[i]);
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_head_init(&priv->ieee80211->skb_aggQ[i]);
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_head_init(&priv->ieee80211->skb_drv_aggQ[i]);
- priv->rf_set_chan = rtl8192_phy_SwChnl;
-
- return 0;
-}
-
-/* init lock here */
-static void rtl8192_init_priv_lock(struct r8192_priv *priv)
-{
- spin_lock_init(&priv->tx_lock);
- spin_lock_init(&priv->irq_lock);
- mutex_init(&priv->wx_mutex);
- mutex_init(&priv->mutex);
-}
-
-static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-
-static void rtl8192_irq_rx_tasklet(struct tasklet_struct *t);
-/* init tasklet and wait_queue here. only 2.6 above kernel is considered */
-#define DRV_NAME "wlan0"
-static void rtl8192_init_priv_task(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- INIT_WORK(&priv->reset_wq, rtl8192_restart);
-
- INIT_DELAYED_WORK(&priv->watch_dog_wq,
- rtl819x_watchdog_wqcallback);
- INIT_DELAYED_WORK(&priv->txpower_tracking_wq,
- dm_txpower_trackingcallback);
- INIT_DELAYED_WORK(&priv->rfpath_check_wq,
- dm_rf_pathcheck_workitemcallback);
- INIT_DELAYED_WORK(&priv->update_beacon_wq,
- rtl8192_update_beacon);
- INIT_DELAYED_WORK(&priv->initialgain_operate_wq,
- InitialGainOperateWorkItemCallBack);
- INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
-
- tasklet_setup(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet);
-}
-
-static void rtl8192_get_eeprom_size(struct net_device *dev)
-{
- u16 curCR = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
- read_nic_word_E(dev, EPROM_CMD, &curCR);
- RT_TRACE(COMP_EPROM,
- "read from Reg EPROM_CMD(%x):%x\n", EPROM_CMD, curCR);
- /* whether need I consider BIT(5?) */
- priv->epromtype =
- (curCR & Cmd9346CR_9356SEL) ? EPROM_93c56 : EPROM_93c46;
- RT_TRACE(COMP_EPROM,
- "<===========%s(), epromtype:%d\n", __func__, priv->epromtype);
-}
-
-/* used to swap endian. as ntohl & htonl are not necessary
- * to swap endian, so use this instead.
- */
-static inline u16 endian_swap(u16 *data)
-{
- u16 tmp = *data;
- *data = (tmp >> 8) | (tmp << 8);
- return *data;
-}
-
-static int rtl8192_read_eeprom_info(struct net_device *dev)
-{
- u16 wEPROM_ID = 0;
- u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
- u8 bLoad_From_EEPOM = false;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 tmpValue = 0;
- int i;
- int ret;
-
- RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
- ret = eprom_read(dev, 0); /* first read EEPROM ID out; */
- if (ret < 0)
- return ret;
- wEPROM_ID = (u16)ret;
- RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
-
- if (wEPROM_ID != RTL8190_EEPROM_ID)
- RT_TRACE(COMP_ERR,
- "EEPROM ID is invalid(is 0x%x(should be 0x%x)\n",
- wEPROM_ID, RTL8190_EEPROM_ID);
- else
- bLoad_From_EEPOM = true;
-
- if (bLoad_From_EEPOM) {
- tmpValue = eprom_read(dev, EEPROM_VID >> 1);
- ret = eprom_read(dev, EEPROM_VID >> 1);
- if (ret < 0)
- return ret;
- tmpValue = (u16)ret;
- priv->eeprom_vid = endian_swap(&tmpValue);
- ret = eprom_read(dev, EEPROM_PID >> 1);
- if (ret < 0)
- return ret;
- priv->eeprom_pid = (u16)ret;
- ret = eprom_read(dev, EEPROM_CHANNEL_PLAN >> 1);
- if (ret < 0)
- return ret;
- tmpValue = (u16)ret;
- priv->eeprom_ChannelPlan = (tmpValue & 0xff00) >> 8;
- priv->btxpowerdata_readfromEEPORM = true;
- ret = eprom_read(dev, (EEPROM_CUSTOMER_ID >> 1)) >> 8;
- if (ret < 0)
- return ret;
- priv->eeprom_CustomerID = (u16)ret;
- } else {
- priv->eeprom_vid = 0;
- priv->eeprom_pid = 0;
- priv->card_8192_version = VERSION_819XU_B;
- priv->eeprom_ChannelPlan = 0;
- priv->eeprom_CustomerID = 0;
- }
- RT_TRACE(COMP_EPROM,
- "vid:0x%4x, pid:0x%4x, CustomID:0x%2x, ChanPlan:0x%x\n",
- priv->eeprom_vid, priv->eeprom_pid, priv->eeprom_CustomerID,
- priv->eeprom_ChannelPlan);
- /* set channelplan from eeprom */
- priv->ChannelPlan = priv->eeprom_ChannelPlan;
- if (bLoad_From_EEPOM) {
- u8 addr[ETH_ALEN];
-
- for (i = 0; i < 6; i += 2) {
- ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
- if (ret < 0)
- return ret;
- *(u16 *)(&addr[i]) = (u16)ret;
- }
- eth_hw_addr_set(dev, addr);
- } else {
- eth_hw_addr_set(dev, bMac_Tmp_Addr);
- /* should I set IDR0 here? */
- }
- RT_TRACE(COMP_EPROM, "MAC addr:%pM\n", dev->dev_addr);
- priv->rf_type = RTL819X_DEFAULT_RF_TYPE; /* default 1T2R */
- priv->rf_chip = RF_8256;
-
- if (priv->card_8192_version == VERSION_819XU_A) {
- /* read Tx power gain offset of legacy OFDM to HT rate */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_TX_POWER_DIFF >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROMTxPowerDiff = ((u16)ret & 0xff00) >> 8;
- } else
- priv->EEPROMTxPowerDiff = EEPROM_DEFAULT_TX_POWER;
- RT_TRACE(COMP_EPROM, "TxPowerDiff:%d\n", priv->EEPROMTxPowerDiff);
- /* read ThermalMeter from EEPROM */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_THERMAL_METER >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROMThermalMeter = (u8)((u16)ret & 0x00ff);
- } else
- priv->EEPROMThermalMeter = EEPROM_DEFAULT_THERNAL_METER;
- RT_TRACE(COMP_EPROM, "ThermalMeter:%d\n", priv->EEPROMThermalMeter);
- /* for tx power track */
- priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
- /* read antenna tx power offset of B/C/D to A from EEPROM */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_PW_DIFF >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROMPwDiff = ((u16)ret & 0x0f00) >> 8;
- } else
- priv->EEPROMPwDiff = EEPROM_DEFAULT_PW_DIFF;
- RT_TRACE(COMP_EPROM, "TxPwDiff:%d\n", priv->EEPROMPwDiff);
- /* Read CrystalCap from EEPROM */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_CRYSTAL_CAP >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROMCrystalCap = (u16)ret & 0x0f;
- } else
- priv->EEPROMCrystalCap = EEPROM_DEFAULT_CRYSTAL_CAP;
- RT_TRACE(COMP_EPROM, "CrystalCap = %d\n", priv->EEPROMCrystalCap);
- /* get per-channel Tx power level */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_VER >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROM_Def_Ver = ((u16)ret & 0xff00) >> 8;
- } else
- priv->EEPROM_Def_Ver = 1;
- RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
- if (priv->EEPROM_Def_Ver == 0) { /* old eeprom definition */
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1));
- if (ret < 0)
- return ret;
- priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8;
- } else
- priv->EEPROMTxPowerLevelCCK = 0x10;
- RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
- for (i = 0; i < 3; i++) {
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_OFDM_24G + i) >> 1);
- if (ret < 0)
- return ret;
- if (((EEPROM_TX_PW_INDEX_OFDM_24G + i) % 2) == 0)
- tmpValue = (u16)ret & 0x00ff;
- else
- tmpValue = ((u16)ret & 0xff00) >> 8;
- } else {
- tmpValue = 0x10;
- }
- priv->EEPROMTxPowerLevelOFDM24G[i] = (u8)tmpValue;
- RT_TRACE(COMP_EPROM, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelCCK);
- }
- } else if (priv->EEPROM_Def_Ver == 1) {
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, EEPROM_TX_PW_INDEX_CCK_V1 >> 1);
- if (ret < 0)
- return ret;
- tmpValue = ((u16)ret & 0xff00) >> 8;
- } else {
- tmpValue = 0x10;
- }
- priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
-
- if (bLoad_From_EEPOM) {
- ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK_V1 + 2) >> 1);
- if (ret < 0)
- return ret;
- tmpValue = (u16)ret;
- } else
- tmpValue = 0x1010;
- *((u16 *)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
- if (bLoad_From_EEPOM)
- tmpValue = eprom_read(dev,
- EEPROM_TX_PW_INDEX_OFDM_24G_V1 >> 1);
- else
- tmpValue = 0x1010;
- *((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[0])) = tmpValue;
- if (bLoad_From_EEPOM)
- tmpValue = eprom_read(dev, (EEPROM_TX_PW_INDEX_OFDM_24G_V1 + 2) >> 1);
- else
- tmpValue = 0x10;
- priv->EEPROMTxPowerLevelOFDM24G[2] = (u8)tmpValue;
- } /* endif EEPROM_Def_Ver == 1 */
-
- /* update HAL variables */
- for (i = 0; i < 14; i++) {
- if (i <= 3)
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[0];
- else if (i >= 4 && i <= 9)
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[1];
- else
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[2];
- }
-
- for (i = 0; i < 14; i++) {
- if (priv->EEPROM_Def_Ver == 0) {
- if (i <= 3)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[0] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
- else if (i >= 4 && i <= 9)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK;
- else
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[2] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
- } else if (priv->EEPROM_Def_Ver == 1) {
- if (i <= 3)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[0];
- else if (i >= 4 && i <= 9)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[1];
- else
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[2];
- }
- }
- priv->TxPowerDiff = priv->EEPROMPwDiff;
- /* Antenna B gain offset to antenna A, bit0~3 */
- priv->AntennaTxPwDiff[0] = (priv->EEPROMTxPowerDiff & 0xf);
- /* Antenna C gain offset to antenna A, bit4~7 */
- priv->AntennaTxPwDiff[1] =
- (priv->EEPROMTxPowerDiff & 0xf0) >> 4;
- /* CrystalCap, bit12~15 */
- priv->CrystalCap = priv->EEPROMCrystalCap;
- /* ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
- * 92U does not enable TX power tracking.
- */
- priv->ThermalMeter[0] = priv->EEPROMThermalMeter;
- } /* end if VersionID == VERSION_819XU_A */
-
- /* for dlink led */
- switch (priv->eeprom_CustomerID) {
- case EEPROM_CID_RUNTOP:
- priv->CustomerID = RT_CID_819x_RUNTOP;
- break;
-
- case EEPROM_CID_DLINK:
- priv->CustomerID = RT_CID_DLINK;
- break;
-
- default:
- priv->CustomerID = RT_CID_DEFAULT;
- break;
- }
-
- switch (priv->CustomerID) {
- case RT_CID_819x_RUNTOP:
- priv->LedStrategy = SW_LED_MODE2;
- break;
-
- case RT_CID_DLINK:
- priv->LedStrategy = SW_LED_MODE4;
- break;
-
- default:
- priv->LedStrategy = SW_LED_MODE0;
- break;
- }
-
- if (priv->rf_type == RF_1T2R)
- RT_TRACE(COMP_EPROM, "\n1T2R config\n");
- else
- RT_TRACE(COMP_EPROM, "\n2T4R config\n");
-
- /* We can only know RF type in the function. So we have to init
- * DIG RATR table again.
- */
- init_rate_adaptive(dev);
-
- RT_TRACE(COMP_EPROM, "<===========%s()\n", __func__);
-
- return 0;
-}
-
-static short rtl8192_get_channel_map(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN) {
- netdev_err(dev,
- "rtl8180_init: Error channel plan! Set to default.\n");
- priv->ChannelPlan = 0;
- }
- RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
-
- rtl819x_set_channel_map(priv->ChannelPlan, priv);
- return 0;
-}
-
-static short rtl8192_init(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int err;
-
- memset(&(priv->stats), 0, sizeof(struct Stats));
- memset(priv->txqueue_to_outpipemap, 0, 9);
-#ifdef PIPE12
- {
- int i = 0;
- static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
-
- memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
- }
-#else
- {
- static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
-
- memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
- }
-#endif
- err = rtl8192_init_priv_variable(dev);
- if (err)
- return err;
-
- rtl8192_init_priv_lock(priv);
- rtl8192_init_priv_task(dev);
- rtl8192_get_eeprom_size(dev);
- err = rtl8192_read_eeprom_info(dev);
- if (err) {
- DMESG("Reading EEPROM info failed");
- return err;
- }
- rtl8192_get_channel_map(dev);
- init_hal_dm(dev);
- timer_setup(&priv->watch_dog_timer, watch_dog_timer_callback, 0);
- if (rtl8192_usb_initendpoints(dev) != 0) {
- DMESG("Endopoints initialization failed");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/******************************************************************************
- *function: This function actually only set RRSR, RATR and BW_OPMODE registers
- * not to do all the hw config as its name says
- * input: net_device dev
- * output: none
- * return: none
- * notice: This part need to modified according to the rate set we filtered
- * ****************************************************************************/
-static void rtl8192_hwconfig(struct net_device *dev)
-{
- u32 regRATR = 0, regRRSR = 0;
- u8 regBwOpMode = 0, regTmp = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 ratr_value = 0;
-
- /* Set RRSR, RATR, and BW_OPMODE registers */
- switch (priv->ieee80211->mode) {
- case WIRELESS_MODE_B:
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK;
- regRRSR = RATE_ALL_CCK;
- break;
- case WIRELESS_MODE_A:
- regBwOpMode = BW_OPMODE_5G | BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_G:
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_AUTO:
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_N_24G:
- /* It support CCK rate by default. CCK rate will be filtered
- * out only when associated AP does not support it.
- */
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_N_5G:
- regBwOpMode = BW_OPMODE_5G;
- regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS |
- RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_OFDM_AG;
- break;
- }
-
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- ratr_value = regRATR;
- if (priv->rf_type == RF_1T2R)
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
- read_nic_byte(dev, 0x313, &regTmp);
- regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
- write_nic_dword(dev, RRSR, regRRSR);
-
- /* Set Retry Limit here */
- write_nic_word(dev, RETRY_LIMIT,
- priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
- priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
- /* Set Contention Window here */
-
- /* Set Tx AGC */
-
- /* Set Tx Antenna including Feedback control */
-
- /* Set Auto Rate fallback control */
-}
-
-/* InitializeAdapter and PhyCfg */
-static bool rtl8192_adapter_start(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 dwRegRead = 0;
- bool init_status = true;
- u8 SECR_value = 0x0;
- u8 tmp;
-
- RT_TRACE(COMP_INIT, "====>%s()\n", __func__);
- priv->Rf_Mode = RF_OP_By_SW_3wire;
- /* for ASIC power on sequence */
- write_nic_byte_E(dev, 0x5f, 0x80);
- mdelay(50);
- write_nic_byte_E(dev, 0x5f, 0xf0);
- write_nic_byte_E(dev, 0x5d, 0x00);
- write_nic_byte_E(dev, 0x5e, 0x80);
- write_nic_byte(dev, 0x17, 0x37);
- mdelay(10);
- priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
- /* config CPUReset Register */
- /* Firmware Reset or not? */
- read_nic_dword(dev, CPU_GEN, &dwRegRead);
- dwRegRead |= CPU_GEN_SYSTEM_RESET; /* do nothing here? */
-
- write_nic_dword(dev, CPU_GEN, dwRegRead);
- /* config BB. */
- rtl8192_BBConfig(dev);
-
- /* Loopback mode or not */
- priv->LoopbackMode = RTL819xU_NO_LOOPBACK;
-
- read_nic_dword(dev, CPU_GEN, &dwRegRead);
- if (priv->LoopbackMode == RTL819xU_NO_LOOPBACK)
- dwRegRead = (dwRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
- CPU_GEN_NO_LOOPBACK_SET;
- else if (priv->LoopbackMode == RTL819xU_MAC_LOOPBACK)
- dwRegRead |= CPU_CCK_LOOPBACK;
- else
- RT_TRACE(COMP_ERR,
- "Serious error in %s(): wrong loopback mode setting(%d)\n",
- __func__, priv->LoopbackMode);
-
- write_nic_dword(dev, CPU_GEN, dwRegRead);
-
- /* after reset cpu, we need wait for a seconds to write in register. */
- udelay(500);
-
- /* add for new bitfile:usb suspend reset pin set to 1. Do we need? */
- read_nic_byte_E(dev, 0x5f, &tmp);
- write_nic_byte_E(dev, 0x5f, tmp | 0x20);
-
- /* Set Hardware */
- rtl8192_hwconfig(dev);
-
- /* turn on Tx/Rx */
- write_nic_byte(dev, CMDR, CR_RE | CR_TE);
-
- /* set IDR0 here */
- write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
- write_nic_word(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
-
- /* set RCR */
- write_nic_dword(dev, RCR, priv->ReceiveConfig);
-
- /* Initialize Number of Reserved Pages in Firmware Queue */
- write_nic_dword(dev, RQPN1,
- NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VO << RSVD_FW_QUEUE_PAGE_VO_SHIFT);
- write_nic_dword(dev, RQPN2,
- NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_CMD << RSVD_FW_QUEUE_PAGE_CMD_SHIFT);
- write_nic_dword(dev, RQPN3,
- APPLIED_RESERVED_QUEUE_IN_FW |
- NUM_OF_PAGE_IN_FW_QUEUE_BCN << RSVD_FW_QUEUE_PAGE_BCN_SHIFT);
- write_nic_dword(dev, RATR0 + 4 * 7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
-
- /* Set AckTimeout */
- /* TODO: (it value is only for FPGA version). need to be changed!! */
- write_nic_byte(dev, ACK_TIMEOUT, 0x30);
-
- if (priv->ResetProgress == RESET_TYPE_NORESET)
- rtl8192_SetWirelessMode(dev, priv->ieee80211->mode);
- if (priv->ResetProgress == RESET_TYPE_NORESET) {
- CamResetAllEntry(dev);
- SECR_value |= SCR_TxEncEnable;
- SECR_value |= SCR_RxDecEnable;
- SECR_value |= SCR_NoSKMC;
- write_nic_byte(dev, SECR, SECR_value);
- }
-
- /* Beacon related */
- write_nic_word(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_INTERVAL, 100);
-
-#define DEFAULT_EDCA 0x005e4332
- {
- int i;
-
- for (i = 0; i < QOS_QUEUE_NUM; i++)
- write_nic_dword(dev, WDCAPARA_ADD[i], DEFAULT_EDCA);
- }
-
- rtl8192_phy_configmac(dev);
-
- if (priv->card_8192_version == VERSION_819XU_A) {
- rtl8192_phy_getTxPower(dev);
- rtl8192_phy_setTxPower(dev, priv->chan);
- }
-
- /* Firmware download */
- init_status = init_firmware(dev);
- if (!init_status) {
- RT_TRACE(COMP_ERR, "ERR!!! %s(): Firmware download is failed\n",
- __func__);
- return init_status;
- }
- RT_TRACE(COMP_INIT, "%s():after firmware download\n", __func__);
-
- /* config RF. */
- if (priv->ResetProgress == RESET_TYPE_NORESET) {
- rtl8192_phy_RFConfig(dev);
- RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __func__);
- }
-
- if (priv->ieee80211->FwRWRF)
- /* We can force firmware to do RF-R/W */
- priv->Rf_Mode = RF_OP_By_FW;
- else
- priv->Rf_Mode = RF_OP_By_SW_3wire;
-
- rtl8192_phy_updateInitGain(dev);
- /*--set CCK and OFDM Block "ON"--*/
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
-
- if (priv->ResetProgress == RESET_TYPE_NORESET) {
- /* if D or C cut */
- u8 tmpvalue;
-
- read_nic_byte(dev, 0x301, &tmpvalue);
- if (tmpvalue == 0x03) {
- priv->bDcut = true;
- RT_TRACE(COMP_POWER_TRACKING, "D-cut\n");
- } else {
- priv->bDcut = false;
- RT_TRACE(COMP_POWER_TRACKING, "C-cut\n");
- }
- dm_initialize_txpower_tracking(dev);
-
- if (priv->bDcut) {
- u32 i, TempCCk;
- u32 tmpRegA = rtl8192_QueryBBReg(dev,
- rOFDM0_XATxIQImbalance,
- bMaskDWord);
-
- for (i = 0; i < TxBBGainTableLength; i++) {
- if (tmpRegA == priv->txbbgain_table[i].txbbgain_value) {
- priv->rfa_txpowertrackingindex = (u8)i;
- priv->rfa_txpowertrackingindex_real =
- (u8)i;
- priv->rfa_txpowertracking_default =
- priv->rfa_txpowertrackingindex;
- break;
- }
- }
-
- TempCCk = rtl8192_QueryBBReg(dev,
- rCCK0_TxFilter1,
- bMaskByte2);
-
- for (i = 0; i < CCKTxBBGainTableLength; i++) {
- if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
- priv->cck_present_attenuation_20Mdefault = (u8)i;
- break;
- }
- }
- priv->cck_present_attenuation_40Mdefault = 0;
- priv->cck_present_attenuation_difference = 0;
- priv->cck_present_attenuation =
- priv->cck_present_attenuation_20Mdefault;
- }
- }
- write_nic_byte(dev, 0x87, 0x0);
-
- return init_status;
-}
-
-/* this configures registers for beacon tx and enables it via
- * rtl8192_beacon_tx_enable(). rtl8192_beacon_tx_disable() might
- * be used to stop beacon transmission
- */
-/***************************************************************************
- * -------------------------------NET STUFF---------------------------
- ***************************************************************************/
-
-static struct net_device_stats *rtl8192_stats(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return &priv->ieee80211->stats;
-}
-
-static bool HalTxCheckStuck819xUsb(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 RegTxCounter;
- bool bStuck = false;
-
- read_nic_word(dev, 0x128, &RegTxCounter);
- RT_TRACE(COMP_RESET,
- "%s():RegTxCounter is %d,TxCounter is %d\n", __func__,
- RegTxCounter, priv->TxCounter);
- if (priv->TxCounter == RegTxCounter)
- bStuck = true;
-
- priv->TxCounter = RegTxCounter;
-
- return bStuck;
-}
-
-/*
- * <Assumption: RT_TX_SPINLOCK is acquired.>
- * First added: 2006.11.19 by emily
- */
-static RESET_TYPE TxCheckStuck(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 QueueID;
- bool bCheckFwTxCnt = false;
-
- /* Decide such threshold according to current power save mode */
-
- for (QueueID = 0; QueueID <= BEACON_QUEUE; QueueID++) {
- if (QueueID == TXCMD_QUEUE)
- continue;
- if ((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
- continue;
-
- bCheckFwTxCnt = true;
- }
- if (bCheckFwTxCnt) {
- if (HalTxCheckStuck819xUsb(dev)) {
- RT_TRACE(COMP_RESET,
- "%s: Fw indicates no Tx condition!\n",
- __func__);
- return RESET_TYPE_SILENT;
- }
- }
- return RESET_TYPE_NORESET;
-}
-
-static bool HalRxCheckStuck819xUsb(struct net_device *dev)
-{
- u16 RegRxCounter;
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool bStuck = false;
- static u8 rx_chk_cnt;
-
- read_nic_word(dev, 0x130, &RegRxCounter);
- RT_TRACE(COMP_RESET,
- "%s(): RegRxCounter is %d,RxCounter is %d\n", __func__,
- RegRxCounter, priv->RxCounter);
- /* If rssi is small, we should check rx for long time because of bad rx.
- * or maybe it will continuous silent reset every 2 seconds.
- */
- rx_chk_cnt++;
- if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
- rx_chk_cnt = 0; /* high rssi, check rx stuck right now. */
- } else if (priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5) &&
- ((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M) ||
- (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M))) {
- if (rx_chk_cnt < 2)
- return bStuck;
-
- rx_chk_cnt = 0;
- } else if (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_40M) ||
- (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_20M)) &&
- priv->undecorated_smoothed_pwdb >= VERY_LOW_RSSI) {
- if (rx_chk_cnt < 4)
- return bStuck;
-
- rx_chk_cnt = 0;
- } else {
- if (rx_chk_cnt < 8)
- return bStuck;
-
- rx_chk_cnt = 0;
- }
-
- if (priv->RxCounter == RegRxCounter)
- bStuck = true;
-
- priv->RxCounter = RegRxCounter;
-
- return bStuck;
-}
-
-static RESET_TYPE RxCheckStuck(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool bRxCheck = false;
-
- if (priv->IrpPendingCount > 1)
- bRxCheck = true;
-
- if (bRxCheck) {
- if (HalRxCheckStuck819xUsb(dev)) {
- RT_TRACE(COMP_RESET, "RxStuck Condition\n");
- return RESET_TYPE_SILENT;
- }
- }
- return RESET_TYPE_NORESET;
-}
-
-/*
- * This function is called by Checkforhang to check whether we should
- * ask OS to reset driver
- *
- * Note:NIC with USB interface sholud not call this function because we
- * cannot scan descriptor to judge whether there is tx stuck.
- * Note: This function may be required to be rewrite for Vista OS.
- * <<<Assumption: Tx spinlock has been acquired >>>
- *
- * 8185 and 8185b does not implement this function.
- */
-static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- RESET_TYPE TxResetType = RESET_TYPE_NORESET;
- RESET_TYPE RxResetType = RESET_TYPE_NORESET;
- RT_RF_POWER_STATE rfState;
-
- rfState = priv->ieee80211->eRFPowerState;
-
- TxResetType = TxCheckStuck(dev);
- if (rfState != eRfOff ||
- (priv->ieee80211->iw_mode != IW_MODE_ADHOC)) {
- /* If driver is in the status of firmware download failure,
- * driver skips RF initialization and RF is in turned off
- * state. Driver should check whether Rx stuck and do silent
- * reset. And if driver is in firmware download failure status,
- * driver should initialize RF in the following silent reset
- * procedure
- *
- * Driver should not check RX stuck in IBSS mode because it is
- * required to set Check BSSID in order to send beacon,
- * however, if check BSSID is set, STA cannot hear any packet
- * at all.
- */
- RxResetType = RxCheckStuck(dev);
- }
- if (TxResetType == RESET_TYPE_NORMAL ||
- RxResetType == RESET_TYPE_NORMAL) {
- return RESET_TYPE_NORMAL;
- } else if (TxResetType == RESET_TYPE_SILENT ||
- RxResetType == RESET_TYPE_SILENT) {
- RT_TRACE(COMP_RESET, "%s():silent reset\n", __func__);
- return RESET_TYPE_SILENT;
- } else {
- return RESET_TYPE_NORESET;
- }
-}
-
-static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
-static int _rtl8192_up(struct net_device *dev);
-static int rtl8192_close(struct net_device *dev);
-
-static void CamRestoreAllEntry(struct net_device *dev)
-{
- u8 EntryId = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 *MacAddr = priv->ieee80211->current_network.bssid;
-
- static u8 CAM_CONST_ADDR[4][6] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
- static u8 CAM_CONST_BROAD[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
- if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
- (priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
- for (EntryId = 0; EntryId < 4; EntryId++) {
- MacAddr = CAM_CONST_ADDR[EntryId];
- setKey(dev, EntryId, EntryId,
- priv->ieee80211->pairwise_key_type,
- MacAddr, 0, NULL);
- }
-
- } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- (const u8 *)dev->dev_addr, 0, NULL);
- else
- setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- MacAddr, 0, NULL);
- } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- (const u8 *)dev->dev_addr, 0, NULL);
- else
- setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- MacAddr, 0, NULL);
- }
-
- if (priv->ieee80211->group_key_type == KEY_TYPE_TKIP) {
- MacAddr = CAM_CONST_BROAD;
- for (EntryId = 1; EntryId < 4; EntryId++) {
- setKey(dev, EntryId, EntryId,
- priv->ieee80211->group_key_type,
- MacAddr, 0, NULL);
- }
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev, 0, 0, priv->ieee80211->group_key_type,
- CAM_CONST_ADDR[0], 0, NULL);
- } else if (priv->ieee80211->group_key_type == KEY_TYPE_CCMP) {
- MacAddr = CAM_CONST_BROAD;
- for (EntryId = 1; EntryId < 4; EntryId++) {
- setKey(dev, EntryId, EntryId,
- priv->ieee80211->group_key_type,
- MacAddr, 0, NULL);
- }
-
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev, 0, 0, priv->ieee80211->group_key_type,
- CAM_CONST_ADDR[0], 0, NULL);
- }
-}
-
-/* This function is used to fix Tx/Rx stop bug temporarily.
- * This function will do "system reset" to NIC when Tx or Rx is stuck.
- * The method checking Tx/Rx stuck of this function is supported by FW,
- * which reports Tx and Rx counter to register 0x128 and 0x130.
- */
-static void rtl819x_ifsilentreset(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 reset_times = 0;
- int reset_status = 0;
- struct ieee80211_device *ieee = priv->ieee80211;
-
- /* If we need to check CCK stop, please uncomment this line. */
- /* bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter); */
-
- if (priv->ResetProgress == RESET_TYPE_NORESET) {
-RESET_START:
-
- RT_TRACE(COMP_RESET, "=========>Reset progress!!\n");
-
- /* Set the variable for reset. */
- priv->ResetProgress = RESET_TYPE_SILENT;
- mutex_lock(&priv->wx_mutex);
- if (priv->up == 0) {
- RT_TRACE(COMP_ERR,
- "%s():the driver is not up! return\n",
- __func__);
- mutex_unlock(&priv->wx_mutex);
- return;
- }
- priv->up = 0;
- RT_TRACE(COMP_RESET,
- "%s():======>start to down the driver\n",
- __func__);
-
- rtl8192_rtx_disable(dev);
- rtl8192_cancel_deferred_work(priv);
- deinit_hal_dm(dev);
- del_timer_sync(&priv->watch_dog_timer);
-
- ieee->sync_scan_hurryup = 1;
- if (ieee->state == IEEE80211_LINKED) {
- mutex_lock(&ieee->wx_mutex);
- netdev_dbg(dev, "ieee->state is IEEE80211_LINKED\n");
- ieee80211_stop_send_beacons(priv->ieee80211);
- del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- ieee80211_stop_scan(ieee);
- netif_carrier_off(dev);
- mutex_unlock(&ieee->wx_mutex);
- } else {
- netdev_dbg(dev, "ieee->state is NOT LINKED\n");
- ieee80211_softmac_stop_protocol(priv->ieee80211);
- }
- mutex_unlock(&priv->wx_mutex);
- RT_TRACE(COMP_RESET,
- "%s():<==========down process is finished\n",
- __func__);
- RT_TRACE(COMP_RESET,
- "%s():===========>start up the driver\n",
- __func__);
- reset_status = _rtl8192_up(dev);
-
- RT_TRACE(COMP_RESET,
- "%s():<===========up process is finished\n",
- __func__);
- if (reset_status == -EAGAIN) {
- if (reset_times < 3) {
- reset_times++;
- goto RESET_START;
- } else {
- RT_TRACE(COMP_ERR,
- " ERR!!! %s(): Reset Failed!!\n",
- __func__);
- }
- }
- ieee->is_silent_reset = 1;
- EnableHWSecurityConfig8192(dev);
- if (ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_INFRA) {
- ieee->set_chan(ieee->dev,
- ieee->current_network.channel);
-
- queue_work(ieee->wq, &ieee->associate_complete_wq);
-
- } else if (ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_ADHOC) {
- ieee->set_chan(ieee->dev,
- ieee->current_network.channel);
- ieee->link_change(ieee->dev);
-
- ieee80211_start_send_beacons(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
- netif_carrier_on(ieee->dev);
- }
-
- CamRestoreAllEntry(dev);
-
- priv->ResetProgress = RESET_TYPE_NORESET;
- priv->reset_count++;
-
- priv->bForcedSilentReset = false;
- priv->bResetInProgress = false;
-
- /* For test --> force write UFWP. */
- write_nic_byte(dev, UFWP, 1);
- RT_TRACE(COMP_RESET,
- "Reset finished!! ====>[%d]\n",
- priv->reset_count);
- }
-}
-
-static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
- u32 *TotalRxDataNum)
-{
- u16 SlotIndex;
- u16 i;
-
- *TotalRxBcnNum = 0;
- *TotalRxDataNum = 0;
-
- SlotIndex = (priv->ieee80211->LinkDetectInfo.SlotIndex++) %
- (priv->ieee80211->LinkDetectInfo.SlotNum);
- priv->ieee80211->LinkDetectInfo.RxBcnNum[SlotIndex] =
- priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod;
- priv->ieee80211->LinkDetectInfo.RxDataNum[SlotIndex] =
- priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod;
- for (i = 0; i < priv->ieee80211->LinkDetectInfo.SlotNum; i++) {
- *TotalRxBcnNum += priv->ieee80211->LinkDetectInfo.RxBcnNum[i];
- *TotalRxDataNum += priv->ieee80211->LinkDetectInfo.RxDataNum[i];
- }
-}
-
-static void rtl819x_watchdog_wqcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct r8192_priv *priv = container_of(dwork,
- struct r8192_priv, watch_dog_wq);
- struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_device *ieee = priv->ieee80211;
- RESET_TYPE ResetType = RESET_TYPE_NORESET;
- static u8 check_reset_cnt;
- bool bBusyTraffic = false;
- u32 TotalRxBcnNum = 0;
- u32 TotalRxDataNum = 0;
-
- if (!priv->up)
- return;
- hal_dm_watchdog(dev);
-
- /* to get busy traffic condition */
- if (ieee->state == IEEE80211_LINKED) {
- if (ieee->LinkDetectInfo.NumRxOkInPeriod > 666 ||
- ieee->LinkDetectInfo.NumTxOkInPeriod > 666) {
- bBusyTraffic = true;
- }
- ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
- ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
- ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- }
- /* for AP roaming */
- if (priv->ieee80211->state == IEEE80211_LINKED &&
- priv->ieee80211->iw_mode == IW_MODE_INFRA) {
- rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
- if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
-#ifdef TODO
- if (rfState == eRfOff)
- RT_TRACE(COMP_ERR, "========>%s()\n", __func__);
-#endif
- netdev_dbg(dev,
- "===>%s(): AP is power off, connect another one\n",
- __func__);
- priv->ieee80211->state = IEEE80211_ASSOCIATING;
- notify_wx_assoc_event(priv->ieee80211);
- RemovePeerTS(priv->ieee80211,
- priv->ieee80211->current_network.bssid);
- priv->ieee80211->link_change(dev);
- queue_work(priv->ieee80211->wq,
- &priv->ieee80211->associate_procedure_wq);
- }
- }
- priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
- priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod = 0;
- /* check if reset the driver */
- if (check_reset_cnt++ >= 3) {
- ResetType = rtl819x_ifcheck_resetornot(dev);
- check_reset_cnt = 3;
- }
- /* This is control by OID set in Pomelo */
- if ((priv->force_reset) || (priv->ResetProgress == RESET_TYPE_NORESET &&
- (priv->bForcedSilentReset ||
- (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_SILENT)))) {
- RT_TRACE(COMP_RESET,
- "%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",
- __func__, priv->force_reset, priv->ResetProgress,
- priv->bForcedSilentReset,
- priv->bDisableNormalResetCheck, ResetType);
- rtl819x_ifsilentreset(dev);
- }
- priv->force_reset = false;
- priv->bForcedSilentReset = false;
- priv->bResetInProgress = false;
- RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-}
-
-static void watch_dog_timer_callback(struct timer_list *t)
-{
- struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
-
- schedule_delayed_work(&priv->watch_dog_wq, 0);
- mod_timer(&priv->watch_dog_timer,
- jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
-}
-
-static int _rtl8192_up(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int init_status = 0;
-
- priv->up = 1;
- priv->ieee80211->ieee_up = 1;
- RT_TRACE(COMP_INIT, "Bringing up iface");
- init_status = rtl8192_adapter_start(dev);
- if (!init_status) {
- RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization failed!\n",
- __func__);
- priv->up = priv->ieee80211->ieee_up = 0;
- return -EAGAIN;
- }
- RT_TRACE(COMP_INIT, "start adapter finished\n");
- rtl8192_rx_enable(dev);
- if (priv->ieee80211->state != IEEE80211_LINKED)
- ieee80211_softmac_start_protocol(priv->ieee80211);
- ieee80211_reset_queue(priv->ieee80211);
- watch_dog_timer_callback(&priv->watch_dog_timer);
- if (!netif_queue_stopped(dev))
- netif_start_queue(dev);
- else
- netif_wake_queue(dev);
-
- return 0;
-}
-
-static int rtl8192_open(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- mutex_lock(&priv->wx_mutex);
- ret = rtl8192_up(dev);
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-int rtl8192_up(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->up == 1)
- return -1;
-
- return _rtl8192_up(dev);
-}
-
-static int rtl8192_close(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = rtl8192_down(dev);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-int rtl8192_down(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int i;
-
- if (priv->up == 0)
- return -1;
-
- priv->up = 0;
- priv->ieee80211->ieee_up = 0;
- RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
- /* FIXME */
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
-
- rtl8192_rtx_disable(dev);
-
- /* Tx related queue release */
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_purge(&priv->ieee80211->skb_waitQ[i]);
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_purge(&priv->ieee80211->skb_aggQ[i]);
-
- for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_purge(&priv->ieee80211->skb_drv_aggQ[i]);
-
- /* as cancel_delayed_work will del work->timer, so if work is not
- * defined as struct delayed_work, it will corrupt
- */
- rtl8192_cancel_deferred_work(priv);
- deinit_hal_dm(dev);
- del_timer_sync(&priv->watch_dog_timer);
-
- ieee80211_softmac_stop_protocol(priv->ieee80211);
- memset(&priv->ieee80211->current_network, 0,
- offsetof(struct ieee80211_network, list));
- RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__);
-
- return 0;
-}
-
-void rtl8192_commit(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->up == 0)
- return;
- priv->up = 0;
-
- rtl8192_cancel_deferred_work(priv);
- del_timer_sync(&priv->watch_dog_timer);
-
- ieee80211_softmac_stop_protocol(priv->ieee80211);
-
- rtl8192_rtx_disable(dev);
- _rtl8192_up(dev);
-}
-
-static void rtl8192_restart(struct work_struct *work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv,
- reset_wq);
- struct net_device *dev = priv->ieee80211->dev;
-
- mutex_lock(&priv->wx_mutex);
-
- rtl8192_commit(dev);
-
- mutex_unlock(&priv->wx_mutex);
-}
-
-static void r8192_set_multicast(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- short promisc;
-
- /* FIXME FIXME */
-
- promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
-
- if (promisc != priv->promisc)
-
- priv->promisc = promisc;
-}
-
-static int r8192_set_mac_adr(struct net_device *dev, void *mac)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sockaddr *addr = mac;
-
- mutex_lock(&priv->wx_mutex);
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- schedule_work(&priv->reset_wq);
- mutex_unlock(&priv->wx_mutex);
-
- return 0;
-}
-
-/* based on ipw2200 driver */
-static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct iwreq *wrq = (struct iwreq *)rq;
- int ret = -1;
- struct ieee80211_device *ieee = priv->ieee80211;
- u32 key[4];
- u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- struct iw_point *p = &wrq->u.data;
- struct ieee_param *ipw = NULL;
-
- mutex_lock(&priv->wx_mutex);
-
- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
- ret = -EINVAL;
- goto out;
- }
-
- ipw = memdup_user(p->pointer, p->length);
- if (IS_ERR(ipw)) {
- ret = PTR_ERR(ipw);
- goto out;
- }
-
- switch (cmd) {
- case RTL_IOCTL_WPA_SUPPLICANT:
- /* parse here for HW security */
- if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION) {
- if (ipw->u.crypt.set_tx) {
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) {
- ieee->pairwise_key_type = KEY_TYPE_CCMP;
- } else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) {
- ieee->pairwise_key_type = KEY_TYPE_TKIP;
- } else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
- if (ipw->u.crypt.key_len == 13)
- ieee->pairwise_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->pairwise_key_type = KEY_TYPE_WEP40;
- } else {
- ieee->pairwise_key_type = KEY_TYPE_NA;
- }
-
- if (ieee->pairwise_key_type) {
- memcpy((u8 *)key, ipw->u.crypt.key, 16);
- EnableHWSecurityConfig8192(dev);
- /* We fill both index entry and 4th
- * entry for pairwise key as in IPW
- * interface, adhoc will only get here,
- * so we need index entry for its
- * default key serching!
- */
- setKey(dev, 4, ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8 *)ieee->ap_mac_addr,
- 0, key);
- if (ieee->auth_mode != 2)
- setKey(dev, ipw->u.crypt.idx,
- ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8 *)ieee->ap_mac_addr,
- 0, key);
- }
- } else {
- memcpy((u8 *)key, ipw->u.crypt.key, 16);
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) {
- ieee->group_key_type = KEY_TYPE_CCMP;
- } else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) {
- ieee->group_key_type = KEY_TYPE_TKIP;
- } else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
- if (ipw->u.crypt.key_len == 13)
- ieee->group_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->group_key_type = KEY_TYPE_WEP40;
- } else {
- ieee->group_key_type = KEY_TYPE_NA;
- }
-
- if (ieee->group_key_type) {
- setKey(dev, ipw->u.crypt.idx,
- /* KeyIndex */
- ipw->u.crypt.idx,
- /* KeyType */
- ieee->group_key_type,
- /* MacAddr */
- broadcast_addr,
- /* DefaultKey */
- 0,
- /* KeyContent */
- key);
- }
- }
- }
- ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211,
- &wrq->u.data);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
- kfree(ipw);
- ipw = NULL;
-out:
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static u8 HwRateToMRate90(bool bIsHT, u8 rate)
-{
- u8 ret_rate = 0xff;
-
- if (!bIsHT) {
- switch (rate) {
- case DESC90_RATE1M:
- ret_rate = MGN_1M;
- break;
- case DESC90_RATE2M:
- ret_rate = MGN_2M;
- break;
- case DESC90_RATE5_5M:
- ret_rate = MGN_5_5M;
- break;
- case DESC90_RATE11M:
- ret_rate = MGN_11M;
- break;
- case DESC90_RATE6M:
- ret_rate = MGN_6M;
- break;
- case DESC90_RATE9M:
- ret_rate = MGN_9M;
- break;
- case DESC90_RATE12M:
- ret_rate = MGN_12M;
- break;
- case DESC90_RATE18M:
- ret_rate = MGN_18M;
- break;
- case DESC90_RATE24M:
- ret_rate = MGN_24M;
- break;
- case DESC90_RATE36M:
- ret_rate = MGN_36M;
- break;
- case DESC90_RATE48M:
- ret_rate = MGN_48M;
- break;
- case DESC90_RATE54M:
- ret_rate = MGN_54M;
- break;
-
- default:
- ret_rate = 0xff;
- RT_TRACE(COMP_RECV,
- "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
- }
-
- } else {
- switch (rate) {
- case DESC90_RATEMCS0:
- ret_rate = MGN_MCS0;
- break;
- case DESC90_RATEMCS1:
- ret_rate = MGN_MCS1;
- break;
- case DESC90_RATEMCS2:
- ret_rate = MGN_MCS2;
- break;
- case DESC90_RATEMCS3:
- ret_rate = MGN_MCS3;
- break;
- case DESC90_RATEMCS4:
- ret_rate = MGN_MCS4;
- break;
- case DESC90_RATEMCS5:
- ret_rate = MGN_MCS5;
- break;
- case DESC90_RATEMCS6:
- ret_rate = MGN_MCS6;
- break;
- case DESC90_RATEMCS7:
- ret_rate = MGN_MCS7;
- break;
- case DESC90_RATEMCS8:
- ret_rate = MGN_MCS8;
- break;
- case DESC90_RATEMCS9:
- ret_rate = MGN_MCS9;
- break;
- case DESC90_RATEMCS10:
- ret_rate = MGN_MCS10;
- break;
- case DESC90_RATEMCS11:
- ret_rate = MGN_MCS11;
- break;
- case DESC90_RATEMCS12:
- ret_rate = MGN_MCS12;
- break;
- case DESC90_RATEMCS13:
- ret_rate = MGN_MCS13;
- break;
- case DESC90_RATEMCS14:
- ret_rate = MGN_MCS14;
- break;
- case DESC90_RATEMCS15:
- ret_rate = MGN_MCS15;
- break;
- case DESC90_RATEMCS32:
- ret_rate = 0x80 | 0x20;
- break;
-
- default:
- ret_rate = 0xff;
- RT_TRACE(COMP_RECV,
- "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
- }
- }
-
- return ret_rate;
-}
-
-/*
- * Function: UpdateRxPktTimeStamp
- * Overview: Record the TSF time stamp when receiving a packet
- *
- * Input:
- * PADAPTER Adapter
- * PRT_RFD pRfd,
- *
- * Output:
- * PRT_RFD pRfd
- * (pRfd->Status.TimeStampHigh is updated)
- * (pRfd->Status.TimeStampLow is updated)
- * Return:
- * None
- */
-static void UpdateRxPktTimeStamp8190(struct net_device *dev,
- struct ieee80211_rx_stats *stats)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- if (stats->bIsAMPDU && !stats->bFirstMPDU) {
- stats->mac_time[0] = priv->LastRxDescTSFLow;
- stats->mac_time[1] = priv->LastRxDescTSFHigh;
- } else {
- priv->LastRxDescTSFLow = stats->mac_time[0];
- priv->LastRxDescTSFHigh = stats->mac_time[1];
- }
-}
-
-/* 0-100 index. */
-static long rtl819x_translate_todbm(u8 signal_strength_index)
-{
- long signal_power; /* in dBm. */
-
- /* Translate to dBm (x=0.5y-95). */
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
-
- return signal_power;
-}
-
-/* We can not declare RSSI/EVM total value of sliding window to
- * be a local static. Otherwise, it may increase when we return from S3/S4. The
- * value will be kept in memory or disk. Declare the value in the adaptor
- * and it will be reinitialized when returned from S3/S4.
- */
-static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
- struct ieee80211_rx_stats *pprevious_stats,
- struct ieee80211_rx_stats *pcurrent_stats)
-{
- bool bcheck = false;
- u8 rfpath;
- u32 nspatial_stream, tmp_val;
- static u32 slide_rssi_index, slide_rssi_statistics;
- static u32 slide_evm_index, slide_evm_statistics;
- static u32 last_rssi, last_evm;
-
- static u32 slide_beacon_adc_pwdb_index;
- static u32 slide_beacon_adc_pwdb_statistics;
- static u32 last_beacon_adc_pwdb;
-
- struct rtl_80211_hdr_3addr *hdr;
- u16 sc;
- unsigned int seq;
-
- hdr = (struct rtl_80211_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctl);
- seq = WLAN_GET_SEQ_SEQ(sc);
- /* to record the sequence number */
- pcurrent_stats->Seq_Num = seq;
-
- /* Check whether we should take the previous packet into accounting */
- if (!pprevious_stats->bIsAMPDU) {
- /* if previous packet is not aggregated packet */
- bcheck = true;
- }
-
- if (slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
- slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
- last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
- priv->stats.slide_rssi_total -= last_rssi;
- }
- priv->stats.slide_rssi_total += pprevious_stats->SignalStrength;
-
- priv->stats.slide_signal_strength[slide_rssi_index++] =
- pprevious_stats->SignalStrength;
- if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
- slide_rssi_index = 0;
-
- /* <1> Showed on UI for user, in dbm */
- tmp_val = priv->stats.slide_rssi_total / slide_rssi_statistics;
- priv->stats.signal_strength = rtl819x_translate_todbm((u8)tmp_val);
- pcurrent_stats->rssi = priv->stats.signal_strength;
-
- /* If the previous packet does not match the criteria, neglect it */
- if (!pprevious_stats->bPacketMatchBSSID) {
- if (!pprevious_stats->bToSelfBA)
- return;
- }
-
- if (!bcheck)
- return;
-
- /* only rtl8190 supported
- * rtl8190_process_cck_rxpathsel(priv,pprevious_stats);
- */
-
- /* Check RSSI */
- priv->stats.num_process_phyinfo++;
-
- /* record the general signal strength to the sliding window. */
-
- /* <2> Showed on UI for engineering
- * hardware does not provide rssi information for each rf path in CCK
- */
- if (!pprevious_stats->bIsCCK &&
- (pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA)) {
- for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev,
- rfpath))
- continue;
- if (priv->stats.rx_rssi_percentage[rfpath] == 0)
- priv->stats.rx_rssi_percentage[rfpath] =
- pprevious_stats->RxMIMOSignalStrength[rfpath];
- if (pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath]) {
- priv->stats.rx_rssi_percentage[rfpath] =
- ((priv->stats.rx_rssi_percentage[rfpath] * (RX_SMOOTH_FACTOR - 1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) / (RX_SMOOTH_FACTOR);
- priv->stats.rx_rssi_percentage[rfpath] = priv->stats.rx_rssi_percentage[rfpath] + 1;
- } else {
- priv->stats.rx_rssi_percentage[rfpath] =
- ((priv->stats.rx_rssi_percentage[rfpath] * (RX_SMOOTH_FACTOR - 1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) / (RX_SMOOTH_FACTOR);
- }
- RT_TRACE(COMP_DBG,
- "priv->stats.rx_rssi_percentage[rfPath] = %d\n",
- priv->stats.rx_rssi_percentage[rfpath]);
- }
- }
-
- /* Check PWDB. */
- RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- pprevious_stats->bIsCCK ? "CCK" : "OFDM",
- pprevious_stats->RxPWDBAll);
-
- if (pprevious_stats->bPacketBeacon) {
- /* record the beacon pwdb to the sliding window. */
- if (slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX) {
- slide_beacon_adc_pwdb_statistics = PHY_Beacon_RSSI_SLID_WIN_MAX;
- last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index];
- priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
- }
- priv->stats.Slide_Beacon_Total += pprevious_stats->RxPWDBAll;
- priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] = pprevious_stats->RxPWDBAll;
- slide_beacon_adc_pwdb_index++;
- if (slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
- slide_beacon_adc_pwdb_index = 0;
- pprevious_stats->RxPWDBAll = priv->stats.Slide_Beacon_Total / slide_beacon_adc_pwdb_statistics;
- if (pprevious_stats->RxPWDBAll >= 3)
- pprevious_stats->RxPWDBAll -= 3;
- }
-
- RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- pprevious_stats->bIsCCK ? "CCK" : "OFDM",
- pprevious_stats->RxPWDBAll);
-
- if (pprevious_stats->bPacketToSelf ||
- pprevious_stats->bPacketBeacon ||
- pprevious_stats->bToSelfBA) {
- if (priv->undecorated_smoothed_pwdb < 0)
- /* initialize */
- priv->undecorated_smoothed_pwdb =
- pprevious_stats->RxPWDBAll;
- if (pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb) {
- priv->undecorated_smoothed_pwdb =
- (((priv->undecorated_smoothed_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
- (pprevious_stats->RxPWDBAll)) / (RX_SMOOTH_FACTOR);
- priv->undecorated_smoothed_pwdb = priv->undecorated_smoothed_pwdb + 1;
- } else {
- priv->undecorated_smoothed_pwdb =
- (((priv->undecorated_smoothed_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
- (pprevious_stats->RxPWDBAll)) / (RX_SMOOTH_FACTOR);
- }
- }
-
- /* Check EVM */
- /* record the general EVM to the sliding window. */
- if (pprevious_stats->SignalQuality) {
- if (pprevious_stats->bPacketToSelf ||
- pprevious_stats->bPacketBeacon ||
- pprevious_stats->bToSelfBA) {
- if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
- slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
- last_evm = priv->stats.slide_evm[slide_evm_index];
- priv->stats.slide_evm_total -= last_evm;
- }
-
- priv->stats.slide_evm_total +=
- pprevious_stats->SignalQuality;
-
- priv->stats.slide_evm[slide_evm_index++] =
- pprevious_stats->SignalQuality;
- if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
- slide_evm_index = 0;
-
- /* <1> Showed on UI for user, in percentage. */
- tmp_val = priv->stats.slide_evm_total /
- slide_evm_statistics;
- priv->stats.signal_quality = tmp_val;
- /* Showed on UI for user in Windows Vista,
- * for Link quality.
- */
- priv->stats.last_signal_strength_inpercent = tmp_val;
- }
-
- /* <2> Showed on UI for engineering */
- if (pprevious_stats->bPacketToSelf ||
- pprevious_stats->bPacketBeacon ||
- pprevious_stats->bToSelfBA) {
- for (nspatial_stream = 0; nspatial_stream < 2; nspatial_stream++) { /* 2 spatial stream */
- if (pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1) {
- if (priv->stats.rx_evm_percentage[nspatial_stream] == 0) /* initialize */
- priv->stats.rx_evm_percentage[nspatial_stream] = pprevious_stats->RxMIMOSignalQuality[nspatial_stream];
- priv->stats.rx_evm_percentage[nspatial_stream] =
- ((priv->stats.rx_evm_percentage[nspatial_stream] * (RX_SMOOTH_FACTOR - 1)) +
- (pprevious_stats->RxMIMOSignalQuality[nspatial_stream] * 1)) / (RX_SMOOTH_FACTOR);
- }
- }
- }
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: rtl819x_query_rxpwrpercentage()
- *
- * Overview:
- *
- * Input: char antpower
- *
- * Output: NONE
- *
- * Return: 0-100 percentage
- *---------------------------------------------------------------------------
- */
-static u8 rtl819x_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-
-} /* QueryRxPwrPercentage */
-
-static u8 rtl819x_evm_dbtopercentage(s8 value)
-{
- s8 ret_val = clamp(-value, 0, 33) * 3;
-
- if (ret_val == 99)
- ret_val = 100;
-
- return ret_val;
-}
-
-/* We want good-looking for signal strength/quality */
-static long rtl819x_signal_scale_mapping(long currsig)
-{
- long retsig;
-
- /* Step 1. Scale mapping. */
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
-
- return retsig;
-}
-
-static inline bool rx_hal_is_cck_rate(struct rx_drvinfo_819x_usb *pdrvinfo)
-{
- if (pdrvinfo->RxHT)
- return false;
-
- switch (pdrvinfo->RxRate) {
- case DESC90_RATE1M:
- case DESC90_RATE2M:
- case DESC90_RATE5_5M:
- case DESC90_RATE11M:
- return true;
- default:
- return false;
- }
-}
-
-static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
- struct ieee80211_rx_stats *pstats,
- struct rx_drvinfo_819x_usb *pdrvinfo,
- struct ieee80211_rx_stats *precord_stats,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool bPacketBeacon,
- bool bToSelfBA)
-{
- phy_sts_ofdm_819xusb_t *pofdm_buf;
- phy_sts_cck_819xusb_t *pcck_buf;
- struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
- u8 *prxpkt;
- u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
- s8 rx_pwr[4], rx_pwr_all = 0;
- s8 rx_snrX, rx_evmX;
- u8 evm, pwdb_all;
- u32 RSSI, total_rssi = 0;
- u8 is_cck_rate = 0;
- u8 rf_rx_num = 0;
- u8 sq;
-
- priv->stats.numqry_phystatus++;
-
- is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
-
- /* Record it for next packet processing */
- memset(precord_stats, 0, sizeof(struct ieee80211_rx_stats));
- pstats->bPacketMatchBSSID =
- precord_stats->bPacketMatchBSSID = bpacket_match_bssid;
- pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
- pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
- pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
- pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
-
- prxpkt = (u8 *)pdrvinfo;
-
- /* Move pointer to the 16th bytes. Phy status start address. */
- prxpkt += sizeof(struct rx_drvinfo_819x_usb);
-
- /* Initial the cck and ofdm buffer pointer */
- pcck_buf = (phy_sts_cck_819xusb_t *)prxpkt;
- pofdm_buf = (phy_sts_ofdm_819xusb_t *)prxpkt;
-
- pstats->RxMIMOSignalQuality[0] = -1;
- pstats->RxMIMOSignalQuality[1] = -1;
- precord_stats->RxMIMOSignalQuality[0] = -1;
- precord_stats->RxMIMOSignalQuality[1] = -1;
-
- if (is_cck_rate) {
- /* (1)Hardware does not provide RSSI for CCK */
-
- /* (2)PWDB, Average PWDB calculated by hardware
- * (for rate adaptive)
- */
- u8 report;
-
- priv->stats.numqry_phystatusCCK++;
-
- if (!priv->bCckHighPower) {
- report = pcck_buf->cck_agc_rpt & 0xc0;
- report >>= 6;
- switch (report) {
- case 0x3:
- rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 6 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- report = pcck_buf->cck_agc_rpt & 0x60;
- report >>= 5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x2:
- rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x1:
- rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x0:
- rx_pwr_all = 6 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1);
- break;
- }
- }
-
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
- pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RecvSignalPower = pwdb_all;
-
- /* (3) Get Signal Quality (EVM) */
-
- if (pstats->RxPWDBAll > 40) {
- sq = 100;
- } else {
- sq = pcck_buf->sq_rpt;
-
- if (pcck_buf->sq_rpt > 64)
- sq = 0;
- else if (pcck_buf->sq_rpt < 20)
- sq = 100;
- else
- sq = ((64 - sq) * 100) / 44;
- }
- pstats->SignalQuality = precord_stats->SignalQuality = sq;
- pstats->RxMIMOSignalQuality[0] =
- precord_stats->RxMIMOSignalQuality[0] = sq;
- pstats->RxMIMOSignalQuality[1] =
- precord_stats->RxMIMOSignalQuality[1] = -1;
-
- } else {
- priv->stats.numqry_phystatusHT++;
-
- /* (1)Get RSSI for HT rate */
- for (i = RF90_PATH_A; i < priv->NumTotalRFPath; i++) {
- /* We will judge RF RX path now. */
- if (priv->brfpath_rxenable[i])
- rf_rx_num++;
- else
- continue;
-
- if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
- continue;
-
- rx_pwr[i] =
- ((pofdm_buf->trsw_gain_X[i] & 0x3F) * 2) - 106;
-
- /* Get Rx snr value in DB */
- tmp_rxsnr = pofdm_buf->rxsnr_X[i];
- rx_snrX = (s8)(tmp_rxsnr);
- rx_snrX /= 2;
- priv->stats.rxSNRdB[i] = (long)rx_snrX;
-
- /* Translate DBM to percentage. */
- RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
- total_rssi += RSSI;
-
- /* Record Signal Strength for next packet */
- pstats->RxMIMOSignalStrength[i] = (u8)RSSI;
- precord_stats->RxMIMOSignalStrength[i] = (u8)RSSI;
- }
-
- /* (2)PWDB, Average PWDB calculated by hardware
- * (for rate adaptive)
- */
- rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
-
- pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
-
- /* (3)EVM of HT rate */
- if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
- pdrvinfo->RxRate <= DESC90_RATEMCS15)
- /* both spatial stream make sense */
- max_spatial_stream = 2;
- else
- /* only spatial stream 1 makes sense */
- max_spatial_stream = 1;
-
- for (i = 0; i < max_spatial_stream; i++) {
- tmp_rxevm = pofdm_buf->rxevm_X[i];
- rx_evmX = (s8)(tmp_rxevm);
-
- /* Do not use shift operation like "rx_evmX >>= 1"
- * because the compiler of free build environment will
- * set the most significant bit to "zero" when doing
- * shifting operation which may change a negative value
- * to positive one, then the dbm value (which is
- * supposed to be negative) is not correct anymore.
- */
- rx_evmX /= 2; /* dbm */
-
- evm = rtl819x_evm_dbtopercentage(rx_evmX);
- if (i == 0)
- /* Fill value in RFD, Get the first spatial
- * stream only
- */
- pstats->SignalQuality =
- precord_stats->SignalQuality =
- evm & 0xff;
- pstats->RxMIMOSignalQuality[i] =
- precord_stats->RxMIMOSignalQuality[i] =
- evm & 0xff;
- }
-
- /* record rx statistics for debug */
- rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
- prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
- &rxsc_sgien_exflg;
- if (pdrvinfo->BW) /* 40M channel */
- priv->stats.received_bwtype[1 + prxsc->rxsc]++;
- else /* 20M channel */
- priv->stats.received_bwtype[0]++;
- }
-
- /* UI BSS List signal strength(in percentage), make it good looking,
- * from 0~100. It is assigned to the BSS List in
- * GetValueFromBeaconOrProbeRsp().
- */
- if (is_cck_rate) {
- pstats->SignalStrength =
- precord_stats->SignalStrength =
- (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));
- } else {
- /* We can judge RX path number now. */
- if (rf_rx_num != 0) {
- pstats->SignalStrength =
- precord_stats->SignalStrength =
- (u8)(rtl819x_signal_scale_mapping((long)(total_rssi /= rf_rx_num)));
- }
- }
-} /* QueryRxPhyStatus8190Pci */
-
-static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
-{
- ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
- ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
- ptarget_stats->Seq_Num = psrc_stats->Seq_Num;
-}
-
-static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
- struct ieee80211_rx_stats *pstats,
- struct rx_drvinfo_819x_usb *pdrvinfo)
-{
- /* TODO: We must only check packet for current MAC address.
- * Not finish
- */
- struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev = info->dev;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- bool bpacket_match_bssid, bpacket_toself;
- bool bPacketBeacon = false, bToSelfBA = false;
- static struct ieee80211_rx_stats previous_stats;
- struct rtl_80211_hdr_3addr *hdr;
- u16 fc, type;
-
- /* Get Signal Quality for only RX data queue (but not command queue) */
-
- u8 *tmp_buf;
- u8 *praddr;
-
- /* Get MAC frame start address. */
- tmp_buf = (u8 *)skb->data;
-
- hdr = (struct rtl_80211_hdr_3addr *)tmp_buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- praddr = hdr->addr1;
-
- /* Check if the received packet is acceptable. */
- bpacket_match_bssid = (type != IEEE80211_FTYPE_CTL) &&
- (ether_addr_equal(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
- && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV);
- bpacket_toself = bpacket_match_bssid &&
- (ether_addr_equal(praddr, priv->ieee80211->dev->dev_addr));
-
- if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BEACON)
- bPacketBeacon = true;
- if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK) {
- if ((ether_addr_equal(praddr, dev->dev_addr)))
- bToSelfBA = true;
- }
-
- if (bpacket_match_bssid)
- priv->stats.numpacket_matchbssid++;
- if (bpacket_toself)
- priv->stats.numpacket_toself++;
- /* Process PHY information for previous packet (RSSI/PWDB/EVM)
- * Because phy information is contained in the last packet of AMPDU
- * only, so driver should process phy information of previous packet
- */
- rtl8192_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
- rtl8192_query_rxphystatus(priv, pstats, pdrvinfo, &previous_stats,
- bpacket_match_bssid, bpacket_toself,
- bPacketBeacon, bToSelfBA);
- rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
-}
-
-/*
- * Function: UpdateReceivedRateHistogramStatistics
- * Overview: Record the received data rate
- *
- * Input:
- * struct net_device *dev
- * struct ieee80211_rx_stats *stats
- *
- * Output:
- *
- * (priv->stats.ReceivedRateHistogram[] is updated)
- * Return:
- * None
- */
-static void
-UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
- struct ieee80211_rx_stats *stats)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- /* 0: Total, 1:OK, 2:CRC, 3:ICV */
- u32 rcvType = 1;
- u32 rateIndex;
- /* 1: short preamble/GI, 0: long preamble/GI */
- u32 preamble_guardinterval;
-
- if (stats->bCRC)
- rcvType = 2;
- else if (stats->bICV)
- rcvType = 3;
-
- if (stats->bShortPreamble)
- preamble_guardinterval = 1; /* short */
- else
- preamble_guardinterval = 0; /* long */
-
- switch (stats->rate) {
- /* CCK rate */
- case MGN_1M:
- rateIndex = 0;
- break;
- case MGN_2M:
- rateIndex = 1;
- break;
- case MGN_5_5M:
- rateIndex = 2;
- break;
- case MGN_11M:
- rateIndex = 3;
- break;
- /* Legacy OFDM rate */
- case MGN_6M:
- rateIndex = 4;
- break;
- case MGN_9M:
- rateIndex = 5;
- break;
- case MGN_12M:
- rateIndex = 6;
- break;
- case MGN_18M:
- rateIndex = 7;
- break;
- case MGN_24M:
- rateIndex = 8;
- break;
- case MGN_36M:
- rateIndex = 9;
- break;
- case MGN_48M:
- rateIndex = 10;
- break;
- case MGN_54M:
- rateIndex = 11;
- break;
- /* 11n High throughput rate */
- case MGN_MCS0:
- rateIndex = 12;
- break;
- case MGN_MCS1:
- rateIndex = 13;
- break;
- case MGN_MCS2:
- rateIndex = 14;
- break;
- case MGN_MCS3:
- rateIndex = 15;
- break;
- case MGN_MCS4:
- rateIndex = 16;
- break;
- case MGN_MCS5:
- rateIndex = 17;
- break;
- case MGN_MCS6:
- rateIndex = 18;
- break;
- case MGN_MCS7:
- rateIndex = 19;
- break;
- case MGN_MCS8:
- rateIndex = 20;
- break;
- case MGN_MCS9:
- rateIndex = 21;
- break;
- case MGN_MCS10:
- rateIndex = 22;
- break;
- case MGN_MCS11:
- rateIndex = 23;
- break;
- case MGN_MCS12:
- rateIndex = 24;
- break;
- case MGN_MCS13:
- rateIndex = 25;
- break;
- case MGN_MCS14:
- rateIndex = 26;
- break;
- case MGN_MCS15:
- rateIndex = 27;
- break;
- default:
- rateIndex = 28;
- break;
- }
- priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
- priv->stats.received_rate_histogram[0][rateIndex]++; /* total */
- priv->stats.received_rate_histogram[rcvType][rateIndex]++;
-}
-
-static void query_rxdesc_status(struct sk_buff *skb,
- struct ieee80211_rx_stats *stats,
- bool bIsRxAggrSubframe)
-{
- struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev = info->dev;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct rx_drvinfo_819x_usb *driver_info = NULL;
-
- /* Get Rx Descriptor Information */
- struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
-
- stats->Length = desc->Length;
- stats->RxDrvInfoSize = desc->RxDrvInfoSize;
- stats->RxBufShift = 0;
- stats->bICV = desc->ICV;
- stats->bCRC = desc->CRC32;
- stats->bHwError = stats->bCRC | stats->bICV;
- /* RTL8190 set this bit to indicate that Hw does not decrypt packet */
- stats->Decrypted = !desc->SWDec;
-
- if ((priv->ieee80211->pHTInfo->bCurrentHTSupport) &&
- (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
- stats->bHwError = false;
- else
- stats->bHwError = stats->bCRC | stats->bICV;
-
- if (stats->Length < 24 || stats->Length > MAX_8192U_RX_SIZE)
- stats->bHwError |= 1;
- /* Get Driver Info */
- /* TODO: Need to verify it on FGPA platform
- * Driver info are written to the RxBuffer following rx desc
- */
- if (stats->RxDrvInfoSize != 0) {
- driver_info = (struct rx_drvinfo_819x_usb *)(skb->data
- + sizeof(struct rx_desc_819x_usb)
- + stats->RxBufShift
- );
- /* unit: 0.5M */
- /* TODO */
- if (!stats->bHwError) {
- u8 ret_rate;
-
- ret_rate = HwRateToMRate90(driver_info->RxHT,
- driver_info->RxRate);
- if (ret_rate == 0xff) {
- /* Abnormal Case: Receive CRC OK packet with Rx
- * descriptor indicating non supported rate.
- * Special Error Handling here
- */
-
- stats->bHwError = 1;
- /* Set 1M rate by default */
- stats->rate = MGN_1M;
- } else {
- stats->rate = ret_rate;
- }
- } else {
- stats->rate = 0x02;
- }
-
- stats->bShortPreamble = driver_info->SPLCP;
-
- UpdateReceivedRateHistogramStatistics8190(dev, stats);
-
- stats->bIsAMPDU = (driver_info->PartAggr == 1);
- stats->bFirstMPDU = (driver_info->PartAggr == 1) &&
- (driver_info->FirstAGGR == 1);
- stats->TimeStampLow = driver_info->TSFL;
-
- UpdateRxPktTimeStamp8190(dev, stats);
-
- /* Rx A-MPDU */
- if (driver_info->FirstAGGR == 1 || driver_info->PartAggr == 1)
- RT_TRACE(COMP_RXDESC,
- "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
- driver_info->FirstAGGR, driver_info->PartAggr);
- }
-
- skb_pull(skb, sizeof(struct rx_desc_819x_usb));
- /* Get Total offset of MPDU Frame Body */
- if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0) {
- stats->bShift = 1;
- skb_pull(skb, stats->RxBufShift + stats->RxDrvInfoSize);
- }
-
- if (driver_info) {
- stats->RxIs40MHzPacket = driver_info->BW;
- TranslateRxSignalStuff819xUsb(skb, stats, driver_info);
- }
-}
-
-static void rtl8192_rx_nomal(struct sk_buff *skb)
-{
- struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev = info->dev;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_rx_stats stats = {
- .signal = 0,
- .noise = 0x100 - 98,
- .rate = 0,
- .freq = IEEE80211_24GHZ_BAND,
- };
- u32 rx_pkt_len = 0;
- struct rtl_80211_hdr_1addr *ieee80211_hdr = NULL;
- bool unicast_packet = false;
-
- /* 20 is for ps-poll */
- if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
- /* first packet should not contain Rx aggregation header */
- query_rxdesc_status(skb, &stats, false);
- /* TODO */
- /* hardware related info */
- /* Process the MPDU received */
- skb_trim(skb, skb->len - 4/*sCrcLng*/);
-
- rx_pkt_len = skb->len;
- ieee80211_hdr = (struct rtl_80211_hdr_1addr *)skb->data;
- unicast_packet = false;
- if (is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
- /* TODO */
- } else if (is_multicast_ether_addr(ieee80211_hdr->addr1)) {
- /* TODO */
- } else {
- /* unicast packet */
- unicast_packet = true;
- }
-
- if (!ieee80211_rx(priv->ieee80211, skb, &stats)) {
- dev_kfree_skb_any(skb);
- } else {
- priv->stats.rxoktotal++;
- if (unicast_packet)
- priv->stats.rxbytesunicast += rx_pkt_len;
- }
- } else {
- priv->stats.rxurberr++;
- netdev_dbg(dev, "actual_length: %d\n", skb->len);
- dev_kfree_skb_any(skb);
- }
-}
-
-static void rtl819xusb_process_received_packet(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Get shifted bytes of Starting address of 802.11 header. */
- pstats->virtual_address += get_rxpacket_shiftbytes_819xusb(pstats);
-#ifdef TODO /* about HCT */
- if (!Adapter->bInHctTest)
- CountRxErrStatistics(Adapter, pRfd);
-#endif
-#ifdef ENABLE_PS /* for adding ps function in future */
- RT_RF_POWER_STATE rtState;
- /* When RF is off, we should not count the packet for hw/sw synchronize
- * reason, ie. there may be a duration while sw switch is changed and
- * hw switch is being changed.
- */
- Adapter->HalFunc.GetHwRegHandler(Adapter, HW_VAR_RF_STATE,
- (u8 *)(&rtState));
- if (rtState == eRfOff)
- return;
-#endif
- priv->stats.rxframgment++;
-
-#ifdef TODO
- RmMonitorSignalStrength(Adapter, pRfd);
-#endif
- /* We have to release RFD and return if rx pkt is cmd pkt. */
- if (rtl819xusb_rx_command_packet(dev, pstats))
- return;
-
-#ifdef SW_CRC_CHECK
- SwCrcCheck();
-#endif
-}
-
-static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
- struct ieee80211_rx_stats *stats)
-{
- struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
-
- /* Get Rx Descriptor Information */
- stats->virtual_address = (u8 *)skb->data;
- stats->Length = desc->Length;
- stats->RxDrvInfoSize = 0;
- stats->RxBufShift = 0;
- stats->packetlength = stats->Length - scrclng;
- stats->fraglength = stats->packetlength;
- stats->fragoffset = 0;
- stats->ntotalfrag = 1;
-}
-
-static void rtl8192_rx_cmd(struct sk_buff *skb)
-{
- struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev = info->dev;
- /* TODO */
- struct ieee80211_rx_stats stats = {
- .signal = 0,
- .noise = 0x100 - 98,
- .rate = 0,
- .freq = IEEE80211_24GHZ_BAND,
- };
-
- if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
- query_rx_cmdpkt_desc_status(skb, &stats);
- /* prfd->queue_id = 1; */
-
- /* Process the command packet received. */
-
- rtl819xusb_process_received_packet(dev, &stats);
-
- dev_kfree_skb_any(skb);
- }
-}
-
-static void rtl8192_irq_rx_tasklet(struct tasklet_struct *t)
-{
- struct r8192_priv *priv = from_tasklet(priv, t, irq_rx_tasklet);
- struct sk_buff *skb;
- struct rtl8192_rx_info *info;
-
- while (NULL != (skb = skb_dequeue(&priv->skb_queue))) {
- info = (struct rtl8192_rx_info *)skb->cb;
- switch (info->out_pipe) {
- /* Nomal packet pipe */
- case 3:
- priv->IrpPendingCount--;
- rtl8192_rx_nomal(skb);
- break;
-
- /* Command packet pipe */
- case 9:
- RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",
- info->out_pipe);
-
- rtl8192_rx_cmd(skb);
- break;
-
- default: /* should never get here! */
- RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",
- info->out_pipe);
- dev_kfree_skb(skb);
- break;
- }
- }
-}
-
-static const struct net_device_ops rtl8192_netdev_ops = {
- .ndo_open = rtl8192_open,
- .ndo_stop = rtl8192_close,
- .ndo_get_stats = rtl8192_stats,
- .ndo_tx_timeout = tx_timeout,
- .ndo_do_ioctl = rtl8192_ioctl,
- .ndo_set_rx_mode = r8192_set_multicast,
- .ndo_set_mac_address = r8192_set_mac_adr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_start_xmit = ieee80211_xmit,
-};
-
-/****************************************************************************
- * ---------------------------- USB_STUFF---------------------------
- *****************************************************************************/
-
-static int rtl8192_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct net_device *dev = NULL;
- struct r8192_priv *priv = NULL;
- struct usb_device *udev = interface_to_usbdev(intf);
- int ret;
-
- RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
-
- dev = alloc_ieee80211(sizeof(struct r8192_priv));
- if (!dev)
- return -ENOMEM;
-
- usb_set_intfdata(intf, dev);
- SET_NETDEV_DEV(dev, &intf->dev);
- priv = ieee80211_priv(dev);
- priv->ieee80211 = netdev_priv(dev);
- priv->udev = udev;
-
- dev->netdev_ops = &rtl8192_netdev_ops;
-
- dev->wireless_handlers = &r8192_wx_handlers_def;
-
- dev->type = ARPHRD_ETHER;
-
- dev->watchdog_timeo = HZ * 3;
-
- if (dev_alloc_name(dev, ifname) < 0) {
- RT_TRACE(COMP_INIT,
- "Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
- dev_alloc_name(dev, ifname);
- }
-
- RT_TRACE(COMP_INIT, "Driver probe completed1\n");
- if (rtl8192_init(dev) != 0) {
- RT_TRACE(COMP_ERR, "Initialization failed");
- ret = -ENODEV;
- goto fail;
- }
- netif_carrier_off(dev);
- netif_stop_queue(dev);
-
- ret = register_netdev(dev);
- if (ret)
- goto fail2;
-
- RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
- rtl8192_debugfs_init_one(dev);
-
- RT_TRACE(COMP_INIT, "Driver probe completed\n");
- return 0;
-
-fail2:
- rtl8192_down(dev);
-fail:
- kfree(priv->pFirmware);
- priv->pFirmware = NULL;
- rtl8192_usb_deleteendpoints(dev);
- msleep(10);
- free_ieee80211(dev);
-
- RT_TRACE(COMP_ERR, "wlan driver load failed\n");
- return ret;
-}
-
-/* detach all the work and timer structure declared or inititialize
- * in r8192U_init function.
- */
-static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
-{
- cancel_work_sync(&priv->reset_wq);
- cancel_delayed_work(&priv->watch_dog_wq);
- cancel_delayed_work(&priv->update_beacon_wq);
- cancel_work_sync(&priv->qos_activate);
-}
-
-static void rtl8192_usb_disconnect(struct usb_interface *intf)
-{
- struct net_device *dev = usb_get_intfdata(intf);
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-
- RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
- rtl8192_debugfs_exit_one(dev);
-
- unregister_netdev(dev);
-
- rtl8192_down(dev);
- kfree(priv->pFirmware);
- priv->pFirmware = NULL;
- rtl8192_usb_deleteendpoints(dev);
- usleep_range(10000, 11000);
- free_ieee80211(dev);
-
- RT_TRACE(COMP_DOWN, "wlan driver removed\n");
-}
-
-static int rtl8192_usb_netdev_event(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(data);
-
- if (netdev->netdev_ops != &rtl8192_netdev_ops)
- goto out;
-
- switch (event) {
- case NETDEV_CHANGENAME:
- rtl8192_debugfs_rename_one(netdev);
- break;
- default:
- break;
- }
-
-out:
- return NOTIFY_DONE;
-}
-
-static struct notifier_block rtl8192_usb_netdev_notifier = {
- .notifier_call = rtl8192_usb_netdev_event,
-};
-
-static int __init rtl8192_usb_module_init(void)
-{
- int ret;
-
- pr_info("\nLinux kernel driver for RTL8192 based WLAN cards\n");
- pr_info("Copyright (c) 2007-2008, Realsil Wlan\n");
- RT_TRACE(COMP_INIT, "Initializing module");
- RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
-
- ret = register_netdevice_notifier(&rtl8192_usb_netdev_notifier);
- if (ret) {
- pr_err("register_netdevice_notifier failed %d\n", ret);
- return ret;
- }
-
- rtl8192_debugfs_init();
- ret = ieee80211_debug_init();
- if (ret) {
- pr_err("ieee80211_debug_init() failed %d\n", ret);
- goto debugfs_exit;
- }
-
- ret = ieee80211_crypto_init();
- if (ret) {
- pr_err("ieee80211_crypto_init() failed %d\n", ret);
- goto debug_exit;
- }
-
- ret = ieee80211_crypto_tkip_init();
- if (ret) {
- pr_err("ieee80211_crypto_tkip_init() failed %d\n", ret);
- goto crypto_exit;
- }
-
- ret = ieee80211_crypto_ccmp_init();
- if (ret) {
- pr_err("ieee80211_crypto_ccmp_init() failed %d\n", ret);
- goto crypto_tkip_exit;
- }
-
- ret = ieee80211_crypto_wep_init();
- if (ret) {
- pr_err("ieee80211_crypto_wep_init() failed %d\n", ret);
- goto crypto_ccmp_exit;
- }
-
- ret = usb_register(&rtl8192_usb_driver);
- if (ret)
- goto crypto_wep_exit;
- return ret;
-
-crypto_wep_exit:
- ieee80211_crypto_wep_exit();
-crypto_ccmp_exit:
- ieee80211_crypto_ccmp_exit();
-crypto_tkip_exit:
- ieee80211_crypto_tkip_exit();
-crypto_exit:
- ieee80211_crypto_deinit();
-debug_exit:
- ieee80211_debug_exit();
-debugfs_exit:
- rtl8192_debugfs_exit();
- unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
- return ret;
-}
-
-static void __exit rtl8192_usb_module_exit(void)
-{
- usb_deregister(&rtl8192_usb_driver);
- ieee80211_crypto_wep_exit();
- ieee80211_crypto_ccmp_exit();
- ieee80211_crypto_tkip_exit();
- ieee80211_crypto_deinit();
- ieee80211_debug_exit();
- rtl8192_debugfs_exit();
- unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
- RT_TRACE(COMP_DOWN, "Exiting");
-}
-
-void EnableHWSecurityConfig8192(struct net_device *dev)
-{
- u8 SECR_value = 0x0;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
-
- SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
- if (((ieee->pairwise_key_type == KEY_TYPE_WEP40) || (ieee->pairwise_key_type == KEY_TYPE_WEP104)) && (priv->ieee80211->auth_mode != 2)) {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
- } else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP))) {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
- }
- /* add HWSec active enable here.
- * default using hwsec. when peer AP is in N mode only and
- * pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates
- * it), use software security. when peer AP is in b,g,n mode mixed and
- * pairwise_key_type is none_aes, use g mode hw security.
- */
-
- ieee->hwsec_active = 1;
-
- /* add hwsec_support flag to totol control hw_sec on/off */
- if ((ieee->pHTInfo->IOTAction & HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
- ieee->hwsec_active = 0;
- SECR_value &= ~SCR_RxDecEnable;
- }
- RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
- __func__, ieee->hwsec_active, ieee->pairwise_key_type,
- SECR_value);
- write_nic_byte(dev, SECR, SECR_value);
-}
-
-void setKey(struct net_device *dev, u8 entryno, u8 keyindex, u16 keytype,
- const u8 *macaddr, u8 defaultkey, u32 *keycontent)
-{
- u32 target_command = 0;
- u32 target_content = 0;
- u16 us_config = 0;
- u8 i;
-
- if (entryno >= TOTAL_CAM_ENTRY)
- RT_TRACE(COMP_ERR, "cam entry exceeds in %s\n", __func__);
-
- RT_TRACE(COMP_SEC,
- "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n",
- __func__, dev, entryno, keyindex, keytype, macaddr);
-
- if (defaultkey)
- us_config |= BIT(15) | (keytype << 2);
- else
- us_config |= BIT(15) | (keytype << 2) | keyindex;
-
- for (i = 0; i < CAM_CONTENT_COUNT; i++) {
- target_command = i + CAM_CONTENT_COUNT * entryno;
- target_command |= BIT(31) | BIT(16);
-
- if (i == 0) { /* MAC|Config */
- target_content = (u32)(*(macaddr + 0)) << 16 |
- (u32)(*(macaddr + 1)) << 24 |
- (u32)us_config;
-
- write_nic_dword(dev, WCAMI, target_content);
- write_nic_dword(dev, RWCAM, target_command);
- } else if (i == 1) { /* MAC */
- target_content = (u32)(*(macaddr + 2)) |
- (u32)(*(macaddr + 3)) << 8 |
- (u32)(*(macaddr + 4)) << 16 |
- (u32)(*(macaddr + 5)) << 24;
- write_nic_dword(dev, WCAMI, target_content);
- write_nic_dword(dev, RWCAM, target_command);
- } else {
- /* Key Material */
- if (keycontent) {
- write_nic_dword(dev, WCAMI,
- *(keycontent + i - 2));
- write_nic_dword(dev, RWCAM, target_command);
- }
- }
- }
-}
-
-/***************************************************************************
- * ------------------- module init / exit stubs ----------------
- ****************************************************************************/
-module_init(rtl8192_usb_module_init);
-module_exit(rtl8192_usb_module_exit);
diff --git a/drivers/staging/rtl8192u/r8192U_debugfs.c b/drivers/staging/rtl8192u/r8192U_debugfs.c
deleted file mode 100644
index fe8ef72506ee..000000000000
--- a/drivers/staging/rtl8192u/r8192U_debugfs.c
+++ /dev/null
@@ -1,188 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/****************************************************************************
- * -----------------------------DEGUGFS STUFF-------------------------
- ****************************************************************************/
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include "r8192U.h"
-
-#define KBUILD_MODNAME "r8192u_usb"
-
-static int rtl8192_usb_stats_ap_show(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
-
- list_for_each_entry(target, &ieee->network_list, list) {
- const char *wpa = "non_WPA";
-
- if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
- wpa = "WPA";
-
- seq_printf(m, "%s %s\n", target->ssid, wpa);
- }
-
- return 0;
-}
-
-static int rtl8192_usb_registers_show(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- int i, n, max = 0xff;
- u8 byte_rd;
-
- seq_puts(m, "\n####################page 0##################\n ");
-
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x000 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 1##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x100 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 3##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x300 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-static int rtl8192_usb_stats_tx_show(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- seq_printf(m,
- "TX VI priority ok int: %lu\n"
- "TX VI priority error int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX VO priority error int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BE priority error int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX BK priority error int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX MANAGE priority error int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
- "TX queue resume: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
- "TX VI queue: %d\n"
- "TX VO queue: %d\n"
- "TX BE queue: %d\n"
- "TX BK queue: %d\n"
- "TX VI dropped: %lu\n"
- "TX VO dropped: %lu\n"
- "TX BE dropped: %lu\n"
- "TX BK dropped: %lu\n"
- "TX total data packets %lu\n",
- priv->stats.txviokint,
- priv->stats.txvierr,
- priv->stats.txvookint,
- priv->stats.txvoerr,
- priv->stats.txbeokint,
- priv->stats.txbeerr,
- priv->stats.txbkokint,
- priv->stats.txbkerr,
- priv->stats.txmanageokint,
- priv->stats.txmanageerr,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
- priv->stats.txresumed,
- netif_queue_stopped(dev),
- priv->stats.txoverflow,
- atomic_read(&(priv->tx_pending[VI_PRIORITY])),
- atomic_read(&(priv->tx_pending[VO_PRIORITY])),
- atomic_read(&(priv->tx_pending[BE_PRIORITY])),
- atomic_read(&(priv->tx_pending[BK_PRIORITY])),
- priv->stats.txvidrop,
- priv->stats.txvodrop,
- priv->stats.txbedrop,
- priv->stats.txbkdrop,
- priv->stats.txdatapkt
- );
-
- return 0;
-}
-
-static int rtl8192_usb_stats_rx_show(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- seq_printf(m,
- "RX packets: %lu\n"
- "RX urb status error: %lu\n"
- "RX invalid urb error: %lu\n",
- priv->stats.rxoktotal,
- priv->stats.rxstaterr,
- priv->stats.rxurberr);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_rx);
-DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_tx);
-DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_ap);
-DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_registers);
-
-void rtl8192_debugfs_init_one(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
- struct dentry *dir = debugfs_create_dir(dev->name, parent_dir);
-
- debugfs_create_file("stats-rx", 0444, dir, dev, &rtl8192_usb_stats_rx_fops);
- debugfs_create_file("stats-tx", 0444, dir, dev, &rtl8192_usb_stats_tx_fops);
- debugfs_create_file("stats-ap", 0444, dir, dev, &rtl8192_usb_stats_ap_fops);
- debugfs_create_file("registers", 0444, dir, dev, &rtl8192_usb_registers_fops);
-
- priv->debugfs_dir = dir;
-}
-
-void rtl8192_debugfs_exit_one(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- debugfs_remove_recursive(priv->debugfs_dir);
-}
-
-void rtl8192_debugfs_rename_one(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
-
- debugfs_rename(parent_dir, priv->debugfs_dir, parent_dir, dev->name);
-}
-
-void rtl8192_debugfs_init(void)
-{
- debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-void rtl8192_debugfs_exit(void)
-{
- debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
-}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
deleted file mode 100644
index cbae852478ea..000000000000
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ /dev/null
@@ -1,2821 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*++
- * Copyright-c Realtek Semiconductor Corp. All rights reserved.
- *
- * Module Name:
- * r8192U_dm.c
- *
- * Abstract:
- * HW dynamic mechanism.
- *--
- */
-#include "r8192U.h"
-#include "r8192U_dm.h"
-#include "r8192U_hw.h"
-#include "r819xU_phy.h"
-#include "r819xU_phyreg.h"
-#include "r8190_rtl8256.h"
-#include "r819xU_cmdpkt.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Indicate different AP vendor for IOT issue. */
-static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
- 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0x00a44f, 0x5ea44f
-};
-
-static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
- 0x5e4322, 0x00a44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f
-};
-
-#define RTK_UL_EDCA 0xa44f
-#define RTK_DL_EDCA 0x5e4322
-/*---------------------------Define Local Constant---------------------------*/
-
-
-/*------------------------Define global variable-----------------------------*/
-/* Debug variable ? */
-struct dig dm_digtable;
-/* Store current software write register content for MAC PHY. */
-u8 dm_shadow[16][256] = { {0} };
-/* For Dynamic Rx Path Selection by Signal Strength */
-static struct dynamic_rx_path_sel DM_RxPathSelTable;
-
-extern void dm_check_fsync(struct net_device *dev);
-
-/* DM --> Rate Adaptive */
-static void dm_check_rate_adaptive(struct net_device *dev);
-
-/* DM --> Bandwidth switch */
-static void dm_init_bandwidth_autoswitch(struct net_device *dev);
-static void dm_bandwidth_autoswitch(struct net_device *dev);
-
-/* DM --> TX power control */
-static void dm_check_txpower_tracking(struct net_device *dev);
-
-/* DM --> Dynamic Init Gain by RSSI */
-static void dm_dig_init(struct net_device *dev);
-static void dm_ctrl_initgain_byrssi(struct net_device *dev);
-static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
-static void dm_ctrl_initgain_byrssi_by_driverrssi(struct net_device *dev);
-static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev);
-static void dm_initial_gain(struct net_device *dev);
-static void dm_pd_th(struct net_device *dev);
-static void dm_cs_ratio(struct net_device *dev);
-
-static void dm_init_ctstoself(struct net_device *dev);
-/* DM --> EDCA turbo mode control */
-static void dm_check_edca_turbo(struct net_device *dev);
-
-/* DM --> Check PBC */
-static void dm_check_pbc_gpio(struct net_device *dev);
-
-/* DM --> Check current RX RF path state */
-static void dm_check_rx_path_selection(struct net_device *dev);
-static void dm_init_rxpath_selection(struct net_device *dev);
-static void dm_rxpath_sel_byrssi(struct net_device *dev);
-
-/* DM --> Fsync for broadcom ap */
-static void dm_init_fsync(struct net_device *dev);
-static void dm_deInit_fsync(struct net_device *dev);
-
-/* Added by vivi, 20080522 */
-static void dm_check_txrateandretrycount(struct net_device *dev);
-
-/*---------------------Define local function prototype-----------------------*/
-
-/*---------------------Define of Tx Power Control For Near/Far Range --------*/ /*Add by Jacken 2008/02/18 */
-static void dm_init_dynamic_txpower(struct net_device *dev);
-static void dm_dynamic_txpower(struct net_device *dev);
-
-/* DM --> For rate adaptive and DIG, we must send RSSI to firmware */
-static void dm_send_rssi_tofw(struct net_device *dev);
-static void dm_ctstoself(struct net_device *dev);
-/*---------------------------Define function prototype------------------------*/
-/* ================================================================================
- * HW Dynamic mechanism interface.
- * ================================================================================
- *
- *
- * Description:
- * Prepare SW resource for HW dynamic mechanism.
- *
- * Assumption:
- * This function is only invoked at driver initialization once.
- */
-void init_hal_dm(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism. */
- priv->undecorated_smoothed_pwdb = -1;
-
- /* Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. */
- dm_init_dynamic_txpower(dev);
- init_rate_adaptive(dev);
- dm_dig_init(dev);
- dm_init_edca_turbo(dev);
- dm_init_bandwidth_autoswitch(dev);
- dm_init_fsync(dev);
- dm_init_rxpath_selection(dev);
- dm_init_ctstoself(dev);
-
-} /* InitHalDm */
-
-void deinit_hal_dm(struct net_device *dev)
-{
- dm_deInit_fsync(dev);
-}
-
-#ifdef USB_RX_AGGREGATION_SUPPORT
-void dm_CheckRxAggregation(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- static unsigned long lastTxOkCnt;
- static unsigned long lastRxOkCnt;
- unsigned long curTxOkCnt = 0;
- unsigned long curRxOkCnt = 0;
-
- curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
- curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
-
- if ((curTxOkCnt + curRxOkCnt) < 15000000)
- return;
-
- if (curTxOkCnt > 4*curRxOkCnt) {
- if (priv->bCurrentRxAggrEnable) {
- write_nic_dword(dev, 0x1a8, 0);
- priv->bCurrentRxAggrEnable = false;
- }
- } else {
- if (!priv->bCurrentRxAggrEnable && !pHTInfo->bCurrentRT2RTAggregation) {
- u32 ulValue;
-
- ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
- (pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
- /* If usb rx firmware aggregation is enabled,
- * when anyone of three threshold conditions above is reached,
- * firmware will send aggregated packet to driver.
- */
- write_nic_dword(dev, 0x1a8, ulValue);
- priv->bCurrentRxAggrEnable = true;
- }
- }
-
- lastTxOkCnt = priv->stats.txbytesunicast;
- lastRxOkCnt = priv->stats.rxbytesunicast;
-} /* dm_CheckEdcaTurbo */
-#endif
-
-void hal_dm_watchdog(struct net_device *dev)
-{
- /*Add by amy 2008/05/15 ,porting from windows code.*/
- dm_check_rate_adaptive(dev);
- dm_dynamic_txpower(dev);
- dm_check_txrateandretrycount(dev);
- dm_check_txpower_tracking(dev);
- dm_ctrl_initgain_byrssi(dev);
- dm_check_edca_turbo(dev);
- dm_bandwidth_autoswitch(dev);
- dm_check_rx_path_selection(dev);
- dm_check_fsync(dev);
-
- /* Add by amy 2008-05-15 porting from windows code. */
- dm_check_pbc_gpio(dev);
- dm_send_rssi_tofw(dev);
- dm_ctstoself(dev);
-#ifdef USB_RX_AGGREGATION_SUPPORT
- dm_CheckRxAggregation(dev);
-#endif
-} /* HalDmWatchDog */
-
-/* Decide Rate Adaptive Set according to distance (signal strength)
- * 01/11/2008 MHC Modify input arguments and RATR table level.
- * 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
- * the function after making sure RF_Type.
- */
-void init_rate_adaptive(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
-
- pra->ratr_state = DM_RATR_STA_MAX;
- pra->high2low_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH;
- pra->low2high_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M + 5;
- pra->low2high_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M + 5;
-
- pra->high_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH + 5;
- pra->low_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M;
- pra->low_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M;
-
- if (priv->CustomerID == RT_CID_819x_Netcore)
- pra->ping_rssi_enable = 1;
- else
- pra->ping_rssi_enable = 0;
- pra->ping_rssi_thresh_for_ra = 15;
-
- if (priv->rf_type == RF_2T4R) {
- /* 07/10/08 MH Modify for RA smooth scheme.
- * 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.
- */
- pra->upper_rssi_threshold_ratr = 0x8f0f0000;
- pra->middle_rssi_threshold_ratr = 0x8f0ff000;
- pra->low_rssi_threshold_ratr = 0x8f0ff001;
- pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
- pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
- pra->ping_rssi_ratr = 0x0000000d;/* cosa add for test */
- } else if (priv->rf_type == RF_1T2R) {
- pra->upper_rssi_threshold_ratr = 0x000f0000;
- pra->middle_rssi_threshold_ratr = 0x000ff000;
- pra->low_rssi_threshold_ratr = 0x000ff001;
- pra->low_rssi_threshold_ratr_40M = 0x000ff005;
- pra->low_rssi_threshold_ratr_20M = 0x000ff001;
- pra->ping_rssi_ratr = 0x0000000d;/* cosa add for test */
- }
-
-} /* InitRateAdaptive */
-
-/*-----------------------------------------------------------------------------
- * Function: dm_check_rate_adaptive()
- *
- * Overview:
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_check_rate_adaptive(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
- u32 currentRATR, targetRATR = 0;
- u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
- bool bshort_gi_enabled = false;
- static u8 ping_rssi_state;
-
- if (!priv->up) {
- RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n");
- return;
- }
-
- if (pra->rate_adaptive_disabled) /* this variable is set by ioctl. */
- return;
-
- /* TODO: Only 11n mode is implemented currently, */
- if (!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode == WIRELESS_MODE_N_5G))
- return;
-
- if (priv->ieee80211->state == IEEE80211_LINKED) {
-
- /* Check whether Short GI is enabled */
- bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) ||
- (!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz);
-
- pra->upper_rssi_threshold_ratr =
- (pra->upper_rssi_threshold_ratr & (~BIT(31))) |
- ((bshort_gi_enabled) ? BIT(31) : 0);
-
- pra->middle_rssi_threshold_ratr =
- (pra->middle_rssi_threshold_ratr & (~BIT(31))) |
- ((bshort_gi_enabled) ? BIT(31) : 0);
-
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_40M & (~BIT(31))) |
- ((bshort_gi_enabled) ? BIT(31) : 0);
- } else {
- pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_20M & (~BIT(31))) |
- ((bshort_gi_enabled) ? BIT(31) : 0);
- }
- /* cosa add for test */
- pra->ping_rssi_ratr =
- (pra->ping_rssi_ratr & (~BIT(31))) |
- ((bshort_gi_enabled) ? BIT(31) : 0);
-
- /* 2007/10/08 MH We support RA smooth scheme now. When it is the first
- * time to link with AP. We will not change upper/lower threshold. If
- * STA stay in high or low level, we must change two different threshold
- * to prevent jumping frequently.
- */
- if (pra->ratr_state == DM_RATR_STA_HIGH) {
- HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
- (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
- } else if (pra->ratr_state == DM_RATR_STA_LOW) {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
- (pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M);
- } else {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
- (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
- }
-
- if (priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA) {
- pra->ratr_state = DM_RATR_STA_HIGH;
- targetRATR = pra->upper_rssi_threshold_ratr;
- } else if (priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA) {
- pra->ratr_state = DM_RATR_STA_MIDDLE;
- targetRATR = pra->middle_rssi_threshold_ratr;
- } else {
- pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->low_rssi_threshold_ratr;
- }
-
- /* cosa add for test */
- if (pra->ping_rssi_enable) {
- if (priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5)) {
- if ((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
- ping_rssi_state) {
- pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->ping_rssi_ratr;
- ping_rssi_state = 1;
- }
- } else {
- ping_rssi_state = 0;
- }
- }
-
- /* 2008.04.01
- * For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- */
- if (priv->ieee80211->GetHalfNmodeSupportByAPsHandler(dev))
- targetRATR &= 0xf00fffff;
-
- /* Check whether updating of RATR0 is required */
- read_nic_dword(dev, RATR0, &currentRATR);
- if (targetRATR != currentRATR) {
- u32 ratr_value;
-
- ratr_value = targetRATR;
- RT_TRACE(COMP_RATE, "currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
- if (priv->rf_type == RF_1T2R)
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
-
- pra->last_ratr = targetRATR;
- }
-
- } else {
- pra->ratr_state = DM_RATR_STA_MAX;
- }
-
-} /* dm_CheckRateAdaptive */
-
-static void dm_init_bandwidth_autoswitch(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH;
- priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW;
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
- priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable = false;
-
-} /* dm_init_bandwidth_autoswitch */
-
-static void dm_bandwidth_autoswitch(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 || !priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable)
- return;
- if (!priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz) { /* If send packets in 40 Mhz in 20/40 */
- if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
- } else { /* in force send packets in 20 Mhz in 20/40 */
- if (priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
- }
-} /* dm_BandwidthAutoSwitch */
-
-/* OFDM default at 0db, index=6. */
-static u32 OFDMSwingTable[OFDM_Table_Length] = {
- 0x7f8001fe, /* 0, +6db */
- 0x71c001c7, /* 1, +5db */
- 0x65400195, /* 2, +4db */
- 0x5a400169, /* 3, +3db */
- 0x50800142, /* 4, +2db */
- 0x47c0011f, /* 5, +1db */
- 0x40000100, /* 6, +0db ===> default, upper for higher temperature, lower for low temperature */
- 0x390000e4, /* 7, -1db */
- 0x32c000cb, /* 8, -2db */
- 0x2d4000b5, /* 9, -3db */
- 0x288000a2, /* 10, -4db */
- 0x24000090, /* 11, -5db */
- 0x20000080, /* 12, -6db */
- 0x1c800072, /* 13, -7db */
- 0x19800066, /* 14, -8db */
- 0x26c0005b, /* 15, -9db */
- 0x24400051, /* 16, -10db */
- 0x12000048, /* 17, -11db */
- 0x10000040 /* 18, -12db */
-};
-
-static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0db ===> CCK40M default */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 1, -1db */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 2, -2db */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 3, -3db */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 4, -4db */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 5, -5db */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 6, -6db ===> CCK20M default */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 7, -7db */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 8, -8db */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 9, -9db */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 10, -10db */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} /* 11, -11db */
-};
-
-static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0db ===> CCK40M default */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 1, -1db */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 2, -2db */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 3, -3db */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 4, -4db */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 5, -5db */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 6, -6db ===> CCK20M default */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 7, -7db */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 8, -8db */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 9, -9db */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 10, -10db */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} /* 11, -11db */
-};
-
-static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool viviflag = false;
- struct tx_config_cmd tx_cmd;
- u8 powerlevelOFDM24G;
- int i = 0, j = 0, k = 0;
- u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0};
- u32 Value;
- u8 Pwr_Flag;
- u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
- bool rtStatus = true;
- u32 delta = 0;
-
- write_nic_byte(dev, 0x1ba, 0);
-
- priv->ieee80211->bdynamic_txpower_enable = false;
-
- powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
- RF_Type = priv->rf_type;
- Value = (RF_Type<<8) | powerlevelOFDM24G;
-
- RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G);
-
- for (j = 0; j <= 30; j++) { /* fill tx_cmd */
- tx_cmd.cmd_op = TXCMD_SET_TX_PWR_TRACKING;
- tx_cmd.cmd_length = sizeof(tx_cmd.cmd_op);
- tx_cmd.cmd_value = Value;
- rtStatus = SendTxCommandPacket(dev, &tx_cmd, sizeof(struct tx_config_cmd));
- if (rtStatus == RT_STATUS_FAILURE)
- RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
- usleep_range(1000, 2000);
- for (i = 0; i <= 30; i++) {
- read_nic_byte(dev, 0x1ba, &Pwr_Flag);
-
- if (Pwr_Flag == 0) {
- usleep_range(1000, 2000);
- continue;
- }
- read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
- if (Avg_TSSI_Meas == 0) {
- write_nic_byte(dev, 0x1ba, 0);
- break;
- }
-
- for (k = 0; k < 5; k++) {
- if (k != 4)
- read_nic_byte(dev, 0x134+k, &tmp_report[k]);
- else
- read_nic_byte(dev, 0x13e, &tmp_report[k]);
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
- }
-
- /* check if the report value is right */
- for (k = 0; k < 5; k++) {
- if (tmp_report[k] <= 20) {
- viviflag = true;
- break;
- }
- }
- if (viviflag) {
- write_nic_byte(dev, 0x1ba, 0);
- viviflag = false;
- RT_TRACE(COMP_POWER_TRACKING, "we filtered the data\n");
- for (k = 0; k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
-
- for (k = 0; k < 5; k++)
- Avg_TSSI_Meas_from_driver += tmp_report[k];
-
- Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
- RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver);
- TSSI_13dBm = priv->TSSI_13dBm;
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
-
- if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
- delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
- else
- delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
-
- if (delta <= E_FOR_TX_POWER_TRACK) {
- priv->ieee80211->bdynamic_txpower_enable = true;
- write_nic_byte(dev, 0x1ba, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation_difference = %d\n", priv->cck_present_attenuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation = %d\n", priv->cck_present_attenuation);
- return;
- }
- if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) {
- if (priv->rfa_txpowertrackingindex > 0) {
- priv->rfa_txpowertrackingindex--;
- if (priv->rfa_txpowertrackingindex_real > 4) {
- priv->rfa_txpowertrackingindex_real--;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
- }
- }
- } else {
- if (priv->rfa_txpowertrackingindex < 36) {
- priv->rfa_txpowertrackingindex++;
- priv->rfa_txpowertrackingindex_real++;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
- }
- }
- priv->cck_present_attenuation_difference
- = priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
-
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- priv->cck_present_attenuation
- = priv->cck_present_attenuation_20Mdefault + priv->cck_present_attenuation_difference;
- else
- priv->cck_present_attenuation
- = priv->cck_present_attenuation_40Mdefault + priv->cck_present_attenuation_difference;
-
- if (priv->cck_present_attenuation > -1 && priv->cck_present_attenuation < 23) {
- if (priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else if (priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- }
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation_difference = %d\n", priv->cck_present_attenuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation = %d\n", priv->cck_present_attenuation);
-
- if (priv->cck_present_attenuation_difference <= -12 || priv->cck_present_attenuation_difference >= 24) {
- priv->ieee80211->bdynamic_txpower_enable = true;
- write_nic_byte(dev, 0x1ba, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
- return;
- }
-
- write_nic_byte(dev, 0x1ba, 0);
- Avg_TSSI_Meas_from_driver = 0;
- for (k = 0; k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
- }
- priv->ieee80211->bdynamic_txpower_enable = true;
- write_nic_byte(dev, 0x1ba, 0);
-}
-
-static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
-{
-#define ThermalMeterVal 9
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 tmpRegA, TempCCk;
- u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
- int i = 0, CCKSwingNeedUpdate = 0;
-
- if (!priv->btxpower_trackingInit) {
- /* Query OFDM default setting */
- tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
- for (i = 0; i < OFDM_Table_Length; i++) { /* find the index */
- if (tmpRegA == OFDMSwingTable[i]) {
- priv->OFDM_index = (u8)i;
- RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index=0x%x\n",
- rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index);
- }
- }
-
- /* Query CCK default setting From 0xa22 */
- TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
- for (i = 0; i < CCK_Table_length; i++) {
- if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
- priv->CCK_index = (u8) i;
- RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, CCK_index=0x%x\n",
- rCCK0_TxFilter1, TempCCk, priv->CCK_index);
- break;
- }
- }
- priv->btxpower_trackingInit = true;
- return;
- }
-
- /* ==========================
- * this is only for test, should be masked
- * ==========================
- */
-
- /* read and filter out unreasonable value */
- tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); /* 0x12: RF Reg[10:7] */
- RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
- if (tmpRegA < 3 || tmpRegA > 13)
- return;
- if (tmpRegA >= 12) /* if over 12, TP will be bad when high temperature */
- tmpRegA = 12;
- RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
- priv->ThermalMeter[0] = ThermalMeterVal; /* We use fixed value by Bryant's suggestion */
- priv->ThermalMeter[1] = ThermalMeterVal; /* We use fixed value by Bryant's suggestion */
-
- /* Get current RF-A temperature index */
- if (priv->ThermalMeter[0] >= (u8)tmpRegA) { /* lower temperature */
- tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0]-(u8)tmpRegA);
- tmpCCK40Mindex = tmpCCK20Mindex - 6;
- if (tmpOFDMindex >= OFDM_Table_Length)
- tmpOFDMindex = OFDM_Table_Length-1;
- if (tmpCCK20Mindex >= CCK_Table_length)
- tmpCCK20Mindex = CCK_Table_length-1;
- if (tmpCCK40Mindex >= CCK_Table_length)
- tmpCCK40Mindex = CCK_Table_length-1;
- } else {
- tmpval = (u8)tmpRegA - priv->ThermalMeter[0];
-
- if (tmpval >= 6) {
- /* higher temperature */
- tmpOFDMindex = 0;
- tmpCCK20Mindex = 0;
- } else {
- /* max to +6dB */
- tmpOFDMindex = 6 - tmpval;
- tmpCCK20Mindex = 6 - tmpval;
- }
- tmpCCK40Mindex = 0;
- }
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) /* 40M */
- tmpCCKindex = tmpCCK40Mindex;
- else
- tmpCCKindex = tmpCCK20Mindex;
-
- if (priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = true;
- CCKSwingNeedUpdate = 1;
- } else if (priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = false;
- CCKSwingNeedUpdate = 1;
- }
-
- if (priv->CCK_index != tmpCCKindex) {
- priv->CCK_index = tmpCCKindex;
- CCKSwingNeedUpdate = 1;
- }
-
- if (CCKSwingNeedUpdate) {
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- }
- if (priv->OFDM_index != tmpOFDMindex) {
- priv->OFDM_index = tmpOFDMindex;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index]);
- RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
- priv->OFDM_index, OFDMSwingTable[priv->OFDM_index]);
- }
- priv->txpower_count = 0;
-}
-
-void dm_txpower_trackingcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct r8192_priv *priv = container_of(dwork, struct r8192_priv, txpower_tracking_wq);
- struct net_device *dev = priv->ieee80211->dev;
-
- if (priv->bDcut)
- dm_TXPowerTrackingCallback_TSSI(dev);
- else
- dm_TXPowerTrackingCallback_ThermalMeter(dev);
-}
-
-static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Initial the Tx BB index and mapping value */
- priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
- priv->txbbgain_table[0].txbbgain_value = 0x7f8001fe;
- priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
- priv->txbbgain_table[1].txbbgain_value = 0x788001e2;
- priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
- priv->txbbgain_table[2].txbbgain_value = 0x71c001c7;
- priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
- priv->txbbgain_table[3].txbbgain_value = 0x6b8001ae;
- priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
- priv->txbbgain_table[4].txbbgain_value = 0x65400195;
- priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
- priv->txbbgain_table[5].txbbgain_value = 0x5fc0017f;
- priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
- priv->txbbgain_table[6].txbbgain_value = 0x5a400169;
- priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
- priv->txbbgain_table[7].txbbgain_value = 0x55400155;
- priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
- priv->txbbgain_table[8].txbbgain_value = 0x50800142;
- priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
- priv->txbbgain_table[9].txbbgain_value = 0x4c000130;
- priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
- priv->txbbgain_table[10].txbbgain_value = 0x47c0011f;
- priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
- priv->txbbgain_table[11].txbbgain_value = 0x43c0010f;
- priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
- priv->txbbgain_table[12].txbbgain_value = 0x40000100;
- priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
- priv->txbbgain_table[13].txbbgain_value = 0x3c8000f2;
- priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
- priv->txbbgain_table[14].txbbgain_value = 0x390000e4;
- priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
- priv->txbbgain_table[15].txbbgain_value = 0x35c000d7;
- priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
- priv->txbbgain_table[16].txbbgain_value = 0x32c000cb;
- priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
- priv->txbbgain_table[17].txbbgain_value = 0x300000c0;
- priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
- priv->txbbgain_table[18].txbbgain_value = 0x2d4000b5;
- priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
- priv->txbbgain_table[19].txbbgain_value = 0x2ac000ab;
- priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
- priv->txbbgain_table[20].txbbgain_value = 0x288000a2;
- priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
- priv->txbbgain_table[21].txbbgain_value = 0x26000098;
- priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
- priv->txbbgain_table[22].txbbgain_value = 0x24000090;
- priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
- priv->txbbgain_table[23].txbbgain_value = 0x22000088;
- priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
- priv->txbbgain_table[24].txbbgain_value = 0x20000080;
- priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
- priv->txbbgain_table[25].txbbgain_value = 0x1a00006c;
- priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
- priv->txbbgain_table[26].txbbgain_value = 0x1c800072;
- priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
- priv->txbbgain_table[27].txbbgain_value = 0x18000060;
- priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
- priv->txbbgain_table[28].txbbgain_value = 0x19800066;
- priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
- priv->txbbgain_table[29].txbbgain_value = 0x15800056;
- priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
- priv->txbbgain_table[30].txbbgain_value = 0x26c0005b;
- priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
- priv->txbbgain_table[31].txbbgain_value = 0x14400051;
- priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
- priv->txbbgain_table[32].txbbgain_value = 0x24400051;
- priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
- priv->txbbgain_table[33].txbbgain_value = 0x1300004c;
- priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
- priv->txbbgain_table[34].txbbgain_value = 0x12000048;
- priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
- priv->txbbgain_table[35].txbbgain_value = 0x11000044;
- priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
- priv->txbbgain_table[36].txbbgain_value = 0x10000040;
-
- /* ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- * This Table is for CH1~CH13
- */
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[3] = 0x25;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[4] = 0x1c;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[5] = 0x12;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[6] = 0x09;
- priv->cck_txbbgain_table[0].ccktxbb_valuearray[7] = 0x04;
-
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[0] = 0x33;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[1] = 0x32;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[2] = 0x2b;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[3] = 0x23;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[4] = 0x1a;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[5] = 0x11;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[6] = 0x08;
- priv->cck_txbbgain_table[1].ccktxbb_valuearray[7] = 0x04;
-
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[0] = 0x30;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[1] = 0x2f;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[2] = 0x29;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[3] = 0x21;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[4] = 0x19;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[5] = 0x10;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[6] = 0x08;
- priv->cck_txbbgain_table[2].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[0] = 0x2d;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[1] = 0x2d;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[2] = 0x27;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[3] = 0x1f;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[4] = 0x18;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[5] = 0x0f;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[6] = 0x08;
- priv->cck_txbbgain_table[3].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[0] = 0x2b;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[1] = 0x2a;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[2] = 0x25;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[3] = 0x1e;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[4] = 0x16;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[5] = 0x0e;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[6] = 0x07;
- priv->cck_txbbgain_table[4].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[0] = 0x28;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[1] = 0x28;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[2] = 0x22;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[3] = 0x1c;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[4] = 0x15;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[5] = 0x0d;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[6] = 0x07;
- priv->cck_txbbgain_table[5].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[0] = 0x26;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[1] = 0x25;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[2] = 0x21;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[3] = 0x1b;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[4] = 0x14;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[5] = 0x0d;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[6] = 0x06;
- priv->cck_txbbgain_table[6].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[0] = 0x24;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[1] = 0x23;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[2] = 0x1f;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[3] = 0x19;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[4] = 0x13;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[5] = 0x0c;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[6] = 0x06;
- priv->cck_txbbgain_table[7].ccktxbb_valuearray[7] = 0x03;
-
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[0] = 0x22;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[1] = 0x21;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[2] = 0x1d;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[3] = 0x18;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[4] = 0x11;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[5] = 0x0b;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[6] = 0x06;
- priv->cck_txbbgain_table[8].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[0] = 0x20;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[1] = 0x20;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[2] = 0x1b;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[3] = 0x16;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[4] = 0x11;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[5] = 0x08;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[6] = 0x05;
- priv->cck_txbbgain_table[9].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[0] = 0x1f;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[1] = 0x1e;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[2] = 0x1a;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[3] = 0x15;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[4] = 0x10;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[5] = 0x0a;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[6] = 0x05;
- priv->cck_txbbgain_table[10].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[0] = 0x1d;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[1] = 0x1c;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[2] = 0x18;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[3] = 0x14;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[4] = 0x0f;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[5] = 0x0a;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[6] = 0x05;
- priv->cck_txbbgain_table[11].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[0] = 0x1b;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[1] = 0x1a;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[2] = 0x17;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[3] = 0x13;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[4] = 0x0e;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[5] = 0x09;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[6] = 0x04;
- priv->cck_txbbgain_table[12].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[0] = 0x1a;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[1] = 0x19;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[2] = 0x16;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[3] = 0x12;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[4] = 0x0d;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[5] = 0x09;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[6] = 0x04;
- priv->cck_txbbgain_table[13].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[0] = 0x18;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[1] = 0x17;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[2] = 0x15;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[3] = 0x11;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[4] = 0x0c;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[5] = 0x08;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[6] = 0x04;
- priv->cck_txbbgain_table[14].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[0] = 0x17;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[1] = 0x16;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[2] = 0x13;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[3] = 0x10;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[4] = 0x0c;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[5] = 0x08;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[6] = 0x04;
- priv->cck_txbbgain_table[15].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[0] = 0x16;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[1] = 0x15;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[2] = 0x12;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[3] = 0x0f;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[4] = 0x0b;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[5] = 0x07;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[6] = 0x04;
- priv->cck_txbbgain_table[16].ccktxbb_valuearray[7] = 0x01;
-
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[0] = 0x14;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[1] = 0x14;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[2] = 0x11;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[3] = 0x0e;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[4] = 0x0b;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[5] = 0x07;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[17].ccktxbb_valuearray[7] = 0x02;
-
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[0] = 0x13;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[1] = 0x13;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[2] = 0x10;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[3] = 0x0d;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[4] = 0x0a;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[5] = 0x06;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[18].ccktxbb_valuearray[7] = 0x01;
-
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[0] = 0x12;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[1] = 0x12;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[2] = 0x0f;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[3] = 0x0c;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[4] = 0x09;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[5] = 0x06;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[19].ccktxbb_valuearray[7] = 0x01;
-
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[0] = 0x11;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[1] = 0x11;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[2] = 0x0f;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[3] = 0x0c;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[4] = 0x09;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[5] = 0x06;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[20].ccktxbb_valuearray[7] = 0x01;
-
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[0] = 0x10;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[1] = 0x10;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[2] = 0x0e;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[3] = 0x0b;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[4] = 0x08;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[5] = 0x05;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[21].ccktxbb_valuearray[7] = 0x01;
-
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[0] = 0x0f;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[1] = 0x0f;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[2] = 0x0d;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[3] = 0x0b;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[4] = 0x08;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[5] = 0x05;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03;
- priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01;
-
- /* ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- * This Table is for CH14
- */
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[3] = 0x1b;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[0] = 0x33;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[1] = 0x32;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[2] = 0x2b;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[3] = 0x19;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[0] = 0x30;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[1] = 0x2f;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[2] = 0x29;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[3] = 0x18;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[0] = 0x2d;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[1] = 0x2d;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[2] = 0x27;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[3] = 0x17;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[0] = 0x2b;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[1] = 0x2a;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[2] = 0x25;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[3] = 0x15;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[0] = 0x28;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[1] = 0x28;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[2] = 0x22;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[3] = 0x14;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[0] = 0x26;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[1] = 0x25;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[2] = 0x21;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[3] = 0x13;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[0] = 0x24;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[1] = 0x23;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[2] = 0x1f;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[3] = 0x12;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[0] = 0x22;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[1] = 0x21;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[2] = 0x1d;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[3] = 0x11;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[0] = 0x20;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[1] = 0x20;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[2] = 0x1b;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[3] = 0x10;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[0] = 0x1f;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[1] = 0x1e;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[2] = 0x1a;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[3] = 0x0f;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[0] = 0x1d;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[1] = 0x1c;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[2] = 0x18;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[3] = 0x0e;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[0] = 0x1b;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[1] = 0x1a;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[2] = 0x17;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[3] = 0x0e;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[0] = 0x1a;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[1] = 0x19;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[2] = 0x16;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[3] = 0x0d;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[0] = 0x18;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[1] = 0x17;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[2] = 0x15;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[3] = 0x0c;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[0] = 0x17;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[1] = 0x16;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[2] = 0x13;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[3] = 0x0b;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[0] = 0x16;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[1] = 0x15;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[2] = 0x12;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[3] = 0x0b;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[0] = 0x14;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[1] = 0x14;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[2] = 0x11;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[3] = 0x0a;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[0] = 0x13;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[1] = 0x13;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[2] = 0x10;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[3] = 0x0a;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[0] = 0x12;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[1] = 0x12;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[2] = 0x0f;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[3] = 0x09;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[0] = 0x11;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[1] = 0x11;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[2] = 0x0f;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[3] = 0x09;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[0] = 0x10;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[1] = 0x10;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[2] = 0x0e;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[3] = 0x08;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[7] = 0x00;
-
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[0] = 0x0f;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[1] = 0x0f;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[2] = 0x0d;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[3] = 0x08;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[4] = 0x00;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[5] = 0x00;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[6] = 0x00;
- priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[7] = 0x00;
-
- priv->btxpower_tracking = true;
- priv->txpower_count = 0;
- priv->btxpower_trackingInit = false;
-}
-
-static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Tx Power tracking by Thermal Meter requires Firmware R/W 3-wire. This mechanism
- * can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w
- * 3-wire by driver causes RF to go into a wrong state.
- */
- if (priv->ieee80211->FwRWRF)
- priv->btxpower_tracking = true;
- else
- priv->btxpower_tracking = false;
- priv->txpower_count = 0;
- priv->btxpower_trackingInit = false;
-}
-
-void dm_initialize_txpower_tracking(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bDcut)
- dm_InitializeTXPowerTracking_TSSI(dev);
- else
- dm_InitializeTXPowerTracking_ThermalMeter(dev);
-} /* dm_InitializeTXPowerTracking */
-
-static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 tx_power_track_counter;
-
- if (!priv->btxpower_tracking)
- return;
- if ((tx_power_track_counter % 30 == 0) && (tx_power_track_counter != 0))
- queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
- tx_power_track_counter++;
-}
-
-static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 TM_Trigger;
- if (!priv->btxpower_tracking)
- return;
- if (priv->txpower_count <= 2) {
- priv->txpower_count++;
- return;
- }
-
- if (!TM_Trigger) {
- /* Attention!! You have to write all 12bits of data to RF, or it may cause RF to crash
- * actually write reg0x02 bit1=0, then bit1=1.
- * DbgPrint("Trigger ThermalMeter, write RF reg0x2 = 0x4d to 0x4f\n");
- */
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
- TM_Trigger = 1;
- return;
- }
- queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
- TM_Trigger = 0;
-}
-
-static void dm_check_txpower_tracking(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-#ifdef RTL8190P
- dm_CheckTXPowerTracking_TSSI(dev);
-#else
- if (priv->bDcut)
- dm_CheckTXPowerTracking_TSSI(dev);
- else
- dm_CheckTXPowerTracking_ThermalMeter(dev);
-#endif
-
-} /* dm_CheckTXPowerTracking */
-
-static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
-{
- u32 TempVal;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Write 0xa22 0xa23 */
- TempVal = 0;
- if (!bInCH14) {
- /* Write 0xa22 0xa23 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[1]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- /* Write 0xa24 ~ 0xa27 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[4]<<16)+
- (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[5]<<24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- /* Write 0xa28 0xa29 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[7]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- } else {
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[1]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- /* Write 0xa24 ~ 0xa27 */
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[4]<<16)+
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[5]<<24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- /* Write 0xa28 0xa29 */
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[7]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- }
-}
-
-static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14)
-{
- u32 TempVal;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- TempVal = 0;
- if (!bInCH14) {
- /* Write 0xa22 0xa23 */
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8);
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
- /* Write 0xa24 ~ 0xa27 */
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
- /* Write 0xa28 0xa29 */
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
- } else {
- /* Write 0xa22 0xa23 */
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
- (CCKSwingTable_Ch14[priv->CCK_index][1]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
- /* Write 0xa24 ~ 0xa27 */
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
- (CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
- (CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
- /* Write 0xa28 0xa29 */
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
- (CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
-
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
- }
-}
-
-void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
-{ /* dm_CCKTxPowerAdjust */
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bDcut)
- dm_CCKTxPowerAdjust_TSSI(dev, binch14);
- else
- dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
-}
-
-#ifndef RTL8192U
-static void dm_txpower_reset_recovery(
- struct net_device *dev
-)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n", priv->cck_present_attenuation);
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
-
- rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n", priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain);
-
-} /* dm_TXPowerResetRecovery */
-
-void dm_restore_dynamic_mechanism_state(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 reg_ratr = priv->rate_adaptive.last_ratr;
-
- if (!priv->up) {
- RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
- return;
- }
-
- /* Restore previous state for rate adaptive */
- if (priv->rate_adaptive.rate_adaptive_disabled)
- return;
- /* TODO: Only 11n mode is implemented currently, */
- if (!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode == WIRELESS_MODE_N_5G))
- return;
-
- {
- /* 2007/11/15 MH Copy from 8190PCI. */
- u32 ratr_value;
-
- ratr_value = reg_ratr;
- if (priv->rf_type == RF_1T2R) { /* 1T2R, Spatial Stream 2 should be disabled */
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- }
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
- }
- /* Restore TX Power Tracking Index */
- if (priv->btxpower_trackingInit && priv->btxpower_tracking)
- dm_txpower_reset_recovery(dev);
-
- /* Restore BB Initial Gain */
- dm_bb_initialgain_restore(dev);
-
-} /* DM_RestoreDynamicMechanismState */
-
-static void dm_bb_initialgain_restore(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 bit_mask = 0x7f; /* Bit0~ Bit6 */
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- return;
-
- /* Disable Initial Gain */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask, (u32)priv->initgain_backup.xdagccore1);
- bit_mask = bMaskByte2;
- rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n", priv->initgain_backup.cca);
- /* Enable Initial Gain */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite. */
-
-} /* dm_BBInitialGainRestore */
-
-static void dm_bb_initialgain_backup(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 bit_mask = bMaskByte0; /* Bit0~ Bit6 */
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- return;
-
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
- priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
- priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
- priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
- priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask);
- bit_mask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
-
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n", priv->initgain_backup.cca);
-
-} /* dm_BBInitialGainBakcup */
-
-#endif
-/*-----------------------------------------------------------------------------
- * Function: dm_dig_init()
- *
- * Overview: Set DIG scheme init value.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_dig_init(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- /* 2007/10/05 MH Disable DIG scheme now. Not tested. */
- dm_digtable.dig_enable_flag = true;
- dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
- dm_digtable.dig_algorithm_switch = 0;
-
- /* 2007/10/04 MH Define init gain threshold. */
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
-
- dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
- dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
-
- dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
- dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
-
- dm_digtable.rssi_val = 50; /* for new dig debug rssi value */
- dm_digtable.backoff_val = DM_DIG_BACKOFF;
- if (priv->CustomerID == RT_CID_819x_Netcore)
- dm_digtable.rx_gain_range_min = DM_DIG_MIN_NETCORE;
- else
- dm_digtable.rx_gain_range_min = DM_DIG_MIN;
-
-} /* dm_dig_init */
-
-/*-----------------------------------------------------------------------------
- * Function: dm_ctrl_initgain_byrssi()
- *
- * Overview: Driver must monitor RSSI and notify firmware to change initial
- * gain according to different threshold. BB team provide the
- * suggested solution.
- *
- * Input: struct net_device *dev
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_ctrl_initgain_byrssi(struct net_device *dev)
-{
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
- else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- dm_ctrl_initgain_byrssi_by_driverrssi(dev);
- /* ; */
- else
- return;
-}
-
-static void dm_ctrl_initgain_byrssi_by_driverrssi(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 i;
- static u8 fw_dig;
-
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm_switch) /* if switched algorithm, we have to disable FW Dig. */
- fw_dig = 0;
-
- if (fw_dig <= 3) { /* execute several times to make sure the FW Dig is disabled */
- /* FW DIG Off */
- for (i = 0; i < 3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
- fw_dig++;
- dm_digtable.dig_state = DM_STA_DIG_OFF; /* fw dig off. */
- }
-
- if (priv->ieee80211->state == IEEE80211_LINKED)
- dm_digtable.cur_connect_state = DIG_CONNECT;
- else
- dm_digtable.cur_connect_state = DIG_DISCONNECT;
-
- dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
- dm_initial_gain(dev);
- dm_pd_th(dev);
- dm_cs_ratio(dev);
- if (dm_digtable.dig_algorithm_switch)
- dm_digtable.dig_algorithm_switch = 0;
- dm_digtable.pre_connect_state = dm_digtable.cur_connect_state;
-
-} /* dm_CtrlInitGainByRssi */
-
-static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 reset_cnt;
- u8 i;
-
- if (!dm_digtable.dig_enable_flag)
- return;
-
- if (dm_digtable.dig_algorithm_switch) {
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- /* Fw DIG On. */
- for (i = 0; i < 3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite.*/
- dm_digtable.dig_algorithm_switch = 0;
- }
-
- if (priv->ieee80211->state != IEEE80211_LINKED)
- return;
-
- /* For smooth, we can not change DIG state. */
- if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
- return;
-
- /* 1. When RSSI decrease, We have to judge if it is smaller than a threshold
- * and then execute the step below.
- */
- if (priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh) {
- /* 2008/02/05 MH When we execute silent reset, the DIG PHY parameters
- * will be reset to init value. We must prevent the condition.
- */
- if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt)) {
- return;
- }
- reset_cnt = priv->reset_count;
-
- /* If DIG is off, DIG high power state must reset. */
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
- dm_digtable.dig_state = DM_STA_DIG_OFF;
-
- /* 1.1 DIG Off. */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); /* Only clear byte 1 and rewrite. */
-
- /* 1.2 Set initial gain. */
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
-
- /* 1.3 Lower PD_TH for OFDM. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same.
- * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- */
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
-
- /* 1.4 Lower CS ratio for CCK. */
- write_nic_byte(dev, 0xa0a, 0x08);
-
- /* 1.5 Higher EDCCA. */
- return;
- }
-
- /* 2. When RSSI increase, We have to judge if it is larger than a threshold
- * and then execute the step below.
- */
- if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
- u8 reset_flag = 0;
-
- if (dm_digtable.dig_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt)) {
- dm_ctrl_initgain_byrssi_highpwr(dev);
- return;
- }
- if (priv->reset_count != reset_cnt)
- reset_flag = 1;
-
- reset_cnt = priv->reset_count;
-
- dm_digtable.dig_state = DM_STA_DIG_ON;
-
- /* 2.1 Set initial gain.
- * 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment.
- */
- if (reset_flag == 1) {
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
- } else {
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
- }
-
- /* 2.2 Higher PD_TH for OFDM. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same.
- * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- */
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
-
- /* 2.3 Higher CS ratio for CCK. */
- write_nic_byte(dev, 0xa0a, 0xcd);
-
- /* 2.4 Lower EDCCA.
- * 2008/01/11 MH 90/92 series are the same.
- */
-
- /* 2.5 DIG On. */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite. */
- }
-
- dm_ctrl_initgain_byrssi_highpwr(dev);
-
-} /* dm_CtrlInitGainByRssi */
-
-/*-----------------------------------------------------------------------------
- * Function: dm_ctrl_initgain_byrssi_highpwr()
- *
- * Overview:
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_ctrl_initgain_byrssi_highpwr(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 reset_cnt_highpwr;
-
- /* For smooth, we can not change high power DIG state in the range. */
- if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh))
- return;
-
- /* 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if
- * it is larger than a threshold and then execute the step below.
- *
- * 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue.
- */
- if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
-
- /* 3.1 Higher PD_TH for OFDM for high power state. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
- } else {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
-
- if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh &&
- priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
- /* 3.2 Recover PD_TH for OFDM for normal power region. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
- }
- }
-
- reset_cnt_highpwr = priv->reset_count;
-
-} /* dm_CtrlInitGainByRssiHighPwr */
-
-static void dm_initial_gain(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 initial_gain = 0;
- static u8 initialized, force_write;
- static u32 reset_cnt;
- u8 tmp;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
- if (dm_digtable.cur_connect_state == DIG_CONNECT) {
- if ((dm_digtable.rssi_val + 10 - dm_digtable.backoff_val) > DM_DIG_MAX)
- dm_digtable.cur_ig_value = DM_DIG_MAX;
- else if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
- dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
- else
- dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val;
- } else { /* current state is disconnected */
- if (dm_digtable.cur_ig_value == 0)
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
- else
- dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
- }
- } else { /* disconnected -> connected or connected -> disconnected */
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
- dm_digtable.pre_ig_value = 0;
- }
-
- /* if silent reset happened, we should rewrite the values back */
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
- read_nic_byte(dev, rOFDM0_XAAGCCore1, &tmp);
- if (dm_digtable.pre_ig_value != tmp)
- force_write = 1;
-
- {
- if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
- || !initialized || force_write) {
- initial_gain = (u8)dm_digtable.cur_ig_value;
- /* Set initial gain. */
- write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
- dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
- initialized = 1;
- force_write = 0;
- }
- }
-}
-
-static void dm_pd_th(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 initialized, force_write;
- static u32 reset_cnt;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
- if (dm_digtable.cur_connect_state == DIG_CONNECT) {
- if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh)
- dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER;
- else if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) &&
- (dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh))
- dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER;
- else
- dm_digtable.curpd_thstate = dm_digtable.prepd_thstate;
- } else {
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- }
- } else { /* disconnected -> connected or connected -> disconnected */
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- }
-
- /* if silent reset happened, we should rewrite the values back */
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
- {
- if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
- (initialized <= 3) || force_write) {
- if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
- /* Lower PD_TH for OFDM. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same.
- * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- */
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
- } else if (dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER) {
- /* Higher PD_TH for OFDM. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same.
- * 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- */
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
- } else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
- /* Higher PD_TH for OFDM for high power state. */
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
- } else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
- }
- dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
- if (initialized <= 3)
- initialized++;
- force_write = 0;
- }
- }
-}
-
-static void dm_cs_ratio(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 initialized, force_write;
- static u32 reset_cnt;
-
- if (dm_digtable.dig_algorithm_switch) {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
- if (dm_digtable.cur_connect_state == DIG_CONNECT) {
- if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- else if (dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh)
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
- else
- dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
- } else {
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- }
- } else /* disconnected -> connected or connected -> disconnected */
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
-
- /* if silent reset happened, we should rewrite the values back */
- if (priv->reset_count != reset_cnt) {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
- {
- if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
- !initialized || force_write) {
- if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER) {
- /* Lower CS ratio for CCK. */
- write_nic_byte(dev, 0xa0a, 0x08);
- } else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER) {
- /* Higher CS ratio for CCK. */
- write_nic_byte(dev, 0xa0a, 0xcd);
- }
- dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
- initialized = 1;
- force_write = 0;
- }
- }
-}
-
-void dm_init_edca_turbo(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->bcurrent_turbo_EDCA = false;
- priv->ieee80211->bis_any_nonbepkts = false;
- priv->bis_cur_rdlstate = false;
-} /* dm_init_edca_turbo */
-
-static void dm_check_edca_turbo(
- struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
-
- /* Keep past Tx/Rx packet count for RT-to-RT EDCA turbo. */
- static unsigned long lastTxOkCnt;
- static unsigned long lastRxOkCnt;
- unsigned long curTxOkCnt = 0;
- unsigned long curRxOkCnt = 0;
-
- /* Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
- * should follow the settings from QAP. By Bruce, 2007-12-07.
- */
- if (priv->ieee80211->state != IEEE80211_LINKED)
- goto dm_CheckEdcaTurbo_EXIT;
- /* We do not turn on EDCA turbo mode for some AP that has IOT issue */
- if (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
- goto dm_CheckEdcaTurbo_EXIT;
-
- if (!priv->ieee80211->bis_any_nonbepkts) {
- curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
- curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- /* For RT-AP, we needs to turn it on when Rx>Tx */
- if (curRxOkCnt > 4*curTxOkCnt) {
- if (!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) {
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
- priv->bis_cur_rdlstate = true;
- }
- } else {
- if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) {
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
- priv->bis_cur_rdlstate = false;
- }
- }
-
- priv->bcurrent_turbo_EDCA = true;
- } else {
- /* Turn Off EDCA turbo here.
- * Restore original EDCA according to the declaration of AP.
- */
- if (priv->bcurrent_turbo_EDCA) {
- u8 u1bAIFS;
- u32 u4bAcParam, op_limit, cw_max, cw_min;
-
- struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
- u8 mode = priv->ieee80211->mode;
-
- /* For Each time updating EDCA parameter, reset EDCA turbo mode status. */
- dm_init_edca_turbo(dev);
-
- u1bAIFS = qos_parameters->aifs[0] * ((mode & (IEEE_G | IEEE_N_24G)) ? 9 : 20) + aSifsTime;
-
- op_limit = (u32)le16_to_cpu(qos_parameters->tx_op_limit[0]);
- cw_max = (u32)le16_to_cpu(qos_parameters->cw_max[0]);
- cw_min = (u32)le16_to_cpu(qos_parameters->cw_min[0]);
-
- op_limit <<= AC_PARAM_TXOP_LIMIT_OFFSET;
- cw_max <<= AC_PARAM_ECW_MAX_OFFSET;
- cw_min <<= AC_PARAM_ECW_MIN_OFFSET;
- u1bAIFS <<= AC_PARAM_AIFS_OFFSET;
-
- u4bAcParam = op_limit | cw_max | cw_min | u1bAIFS;
- cpu_to_le32s(&u4bAcParam);
-
- write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
-
- /* Check ACM bit.
- * If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
- */
- {
- /* TODO: Modified this part and try to set acm control in only 1 IO processing!! */
-
- struct aci_aifsn *pAciAifsn = (struct aci_aifsn *)&(qos_parameters->aifs[0]);
- u8 AcmCtrl;
-
- read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
-
- if (pAciAifsn->acm) { /* acm bit is 1. */
- AcmCtrl |= AcmHw_BeqEn;
- } else { /* ACM bit is 0. */
- AcmCtrl &= (~AcmHw_BeqEn);
- }
-
- RT_TRACE(COMP_QOS, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
- write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
- }
- priv->bcurrent_turbo_EDCA = false;
- }
- }
-
-dm_CheckEdcaTurbo_EXIT:
- /* Set variables for next time. */
- priv->ieee80211->bis_any_nonbepkts = false;
- lastTxOkCnt = priv->stats.txbytesunicast;
- lastRxOkCnt = priv->stats.rxbytesunicast;
-} /* dm_CheckEdcaTurbo */
-
-static void dm_init_ctstoself(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->ieee80211->bCTSToSelfEnable = true;
- priv->ieee80211->CTSToSelfTH = CTS_TO_SELF_TH_VAL;
-}
-
-static void dm_ctstoself(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- static unsigned long lastTxOkCnt;
- static unsigned long lastRxOkCnt;
- unsigned long curTxOkCnt = 0;
- unsigned long curRxOkCnt = 0;
-
- if (!priv->ieee80211->bCTSToSelfEnable) {
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
- return;
- }
- /* 1. Uplink
- * 2. Linksys350/Linksys300N
- * 3. <50 disable, >55 enable
- */
-
- if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
- curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
- curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if (curRxOkCnt > 4*curTxOkCnt) { /* downlink, disable CTS to self */
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
- } else { /* uplink */
- pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
- }
-
- lastTxOkCnt = priv->stats.txbytesunicast;
- lastRxOkCnt = priv->stats.rxbytesunicast;
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: dm_check_pbc_gpio()
- *
- * Overview: Check if PBC button is pressed.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_check_pbc_gpio(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 tmp1byte;
-
- read_nic_byte(dev, GPI, &tmp1byte);
- if (tmp1byte == 0xff)
- return;
-
- if (tmp1byte & BIT(6) || tmp1byte & BIT(0)) {
- /* Here we only set bPbcPressed to TRUE
- * After trigger PBC, the variable will be set to FALSE
- */
- RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n");
- priv->bpbc_pressed = true;
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: DM_RFPathCheckWorkItemCallBack()
- *
- * Overview: Check if Current RF RX path is enabled
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct r8192_priv *priv = container_of(dwork, struct r8192_priv, rfpath_check_wq);
- struct net_device *dev = priv->ieee80211->dev;
- u8 rfpath = 0, i;
-
- /* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will
- * always be the same. We only read 0xc04 now.
- */
- read_nic_byte(dev, 0xc04, &rfpath);
-
- /* Check Bit 0-3, it means if RF A-D is enabled. */
- for (i = 0; i < RF90_PATH_MAX; i++) {
- if (rfpath & (0x01<<i))
- priv->brfpath_rxenable[i] = true;
- else
- priv->brfpath_rxenable[i] = false;
- }
-
- dm_rxpath_sel_byrssi(dev);
-} /* DM_RFPathCheckWorkItemCallBack */
-
-static void dm_init_rxpath_selection(struct net_device *dev)
-{
- u8 i;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->CustomerID == RT_CID_819x_Netcore)
- DM_RxPathSelTable.cck_method = CCK_RX_VERSION_2;
- else
- DM_RxPathSelTable.cck_method = CCK_RX_VERSION_1;
- DM_RxPathSelTable.disabled_rf = 0;
- for (i = 0; i < 4; i++) {
- DM_RxPathSelTable.rf_rssi[i] = 50;
- DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
- DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
- }
-}
-
-static void dm_rxpath_sel_byrssi(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 i, max_rssi_index = 0, min_rssi_index = 0, sec_rssi_index = 0, rf_num = 0;
- u8 tmp_max_rssi = 0, tmp_min_rssi = 0, tmp_sec_rssi = 0;
- u8 cck_default_Rx = 0x2; /* RF-C */
- u8 cck_optional_Rx = 0x3; /* RF-D */
- long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
- u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0, cck_rx_ver2_sec_index = 0;
- u8 cur_rf_rssi;
- long cur_cck_pwdb;
- static u8 disabled_rf_cnt, cck_Rx_Path_initialized;
- u8 update_cck_rx_path;
-
- if (priv->rf_type != RF_2T4R)
- return;
-
- if (!cck_Rx_Path_initialized) {
- read_nic_byte(dev, 0xa07, &DM_RxPathSelTable.cck_rx_path);
- DM_RxPathSelTable.cck_rx_path &= 0xf;
- cck_Rx_Path_initialized = 1;
- }
-
- read_nic_byte(dev, 0xc04, &DM_RxPathSelTable.disabled_rf);
- DM_RxPathSelTable.disabled_rf = ~DM_RxPathSelTable.disabled_rf & 0xf;
-
- if (priv->ieee80211->mode == WIRELESS_MODE_B) {
- DM_RxPathSelTable.cck_method = CCK_RX_VERSION_2; /* pure B mode, fixed cck version2 */
- }
-
- /* decide max/sec/min rssi index */
- for (i = 0; i < RF90_PATH_MAX; i++) {
- DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
-
- if (priv->brfpath_rxenable[i]) {
- rf_num++;
- cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
-
- if (rf_num == 1) { /* find first enabled rf path and the rssi values */
- /* initialize, set all rssi index to the same one */
- max_rssi_index = min_rssi_index = sec_rssi_index = i;
- tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
- } else if (rf_num == 2) { /* we pick up the max index first, and let sec and min to be the same one */
- if (cur_rf_rssi >= tmp_max_rssi) {
- tmp_max_rssi = cur_rf_rssi;
- max_rssi_index = i;
- } else {
- tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
- sec_rssi_index = min_rssi_index = i;
- }
- } else {
- if (cur_rf_rssi > tmp_max_rssi) {
- tmp_sec_rssi = tmp_max_rssi;
- sec_rssi_index = max_rssi_index;
- tmp_max_rssi = cur_rf_rssi;
- max_rssi_index = i;
- } else if (cur_rf_rssi == tmp_max_rssi) { /* let sec and min point to the different index */
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- } else if ((cur_rf_rssi < tmp_max_rssi) && (cur_rf_rssi > tmp_sec_rssi)) {
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- } else if (cur_rf_rssi == tmp_sec_rssi) {
- if (tmp_sec_rssi == tmp_min_rssi) {
- /* let sec and min point to the different index */
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- } else {
- /* This case we don't need to set any index */
- }
- } else if ((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi)) {
- /* This case we don't need to set any index */
- } else if (cur_rf_rssi == tmp_min_rssi) {
- if (tmp_sec_rssi == tmp_min_rssi) {
- /* let sec and min point to the different index */
- tmp_min_rssi = cur_rf_rssi;
- min_rssi_index = i;
- } else {
- /* This case we don't need to set any index */
- }
- } else if (cur_rf_rssi < tmp_min_rssi) {
- tmp_min_rssi = cur_rf_rssi;
- min_rssi_index = i;
- }
- }
- }
- }
-
- rf_num = 0;
- /* decide max/sec/min cck pwdb index */
- if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_2) {
- for (i = 0; i < RF90_PATH_MAX; i++) {
- if (priv->brfpath_rxenable[i]) {
- rf_num++;
- cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i];
-
- if (rf_num == 1) { /* find first enabled rf path and the rssi values */
- /* initialize, set all rssi index to the same one */
- cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index = i;
- tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb;
- } else if (rf_num == 2) { /* we pick up the max index first, and let sec and min to be the same one */
- if (cur_cck_pwdb >= tmp_cck_max_pwdb) {
- tmp_cck_max_pwdb = cur_cck_pwdb;
- cck_rx_ver2_max_index = i;
- } else {
- tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = cck_rx_ver2_min_index = i;
- }
- } else {
- if (cur_cck_pwdb > tmp_cck_max_pwdb) {
- tmp_cck_sec_pwdb = tmp_cck_max_pwdb;
- cck_rx_ver2_sec_index = cck_rx_ver2_max_index;
- tmp_cck_max_pwdb = cur_cck_pwdb;
- cck_rx_ver2_max_index = i;
- } else if (cur_cck_pwdb == tmp_cck_max_pwdb) {
- /* let sec and min point to the different index */
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- } else if ((cur_cck_pwdb < tmp_cck_max_pwdb) && (cur_cck_pwdb > tmp_cck_sec_pwdb)) {
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- } else if (cur_cck_pwdb == tmp_cck_sec_pwdb && tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
- /* let sec and min point to the different index */
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- /* otherwise we don't need to set any index */
- } else if ((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb)) {
- /* This case we don't need to set any index */
- } else if (cur_cck_pwdb == tmp_cck_min_pwdb && tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
- /* let sec and min point to the different index */
- tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- /* otherwise we don't need to set any index */
- } else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
- tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
- }
- }
- }
- }
-
- /* Set CCK Rx path
- * reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
- */
- update_cck_rx_path = 0;
- if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_2) {
- cck_default_Rx = cck_rx_ver2_max_index;
- cck_optional_Rx = cck_rx_ver2_sec_index;
- if (tmp_cck_max_pwdb != -64)
- update_cck_rx_path = 1;
- }
-
- if (tmp_min_rssi < RX_PATH_SELECTION_SS_TH_LOW && disabled_rf_cnt < 2) {
- if ((tmp_max_rssi - tmp_min_rssi) >= RX_PATH_SELECTION_DIFF_TH) {
- /* record the enabled rssi threshold */
- DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5;
- /* disable the BB Rx path, OFDM */
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); /* 0xc04[3:0] */
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); /* 0xd04[3:0] */
- disabled_rf_cnt++;
- }
- if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_1) {
- cck_default_Rx = max_rssi_index;
- cck_optional_Rx = sec_rssi_index;
- if (tmp_max_rssi)
- update_cck_rx_path = 1;
- }
- }
-
- if (update_cck_rx_path) {
- DM_RxPathSelTable.cck_rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
- rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_rx_path);
- }
-
- if (DM_RxPathSelTable.disabled_rf) {
- for (i = 0; i < 4; i++) {
- if ((DM_RxPathSelTable.disabled_rf >> i) & 0x1) { /* disabled rf */
- if (tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i]) {
- /* enable the BB Rx path */
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); /* 0xc04[3:0] */
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); /* 0xd04[3:0] */
- DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
- disabled_rf_cnt--;
- }
- }
- }
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: dm_check_rx_path_selection()
- *
- * Overview: Call a workitem to check current RXRF path and Rx Path selection by RSSI.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_check_rx_path_selection(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- queue_delayed_work(priv->priv_wq, &priv->rfpath_check_wq, 0);
-} /* dm_CheckRxRFPath */
-
-static void dm_init_fsync(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->ieee80211->fsync_time_interval = 500;
- priv->ieee80211->fsync_rate_bitmap = 0x0f000800;
- priv->ieee80211->fsync_rssi_threshold = 30;
- priv->ieee80211->bfsync_enable = false;
- priv->ieee80211->fsync_multiple_timeinterval = 3;
- priv->ieee80211->fsync_firstdiff_ratethreshold = 100;
- priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
- priv->ieee80211->fsync_state = Default_Fsync;
- priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback);
-}
-
-static void dm_deInit_fsync(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- cancel_delayed_work_sync(&priv->fsync_work);
-}
-
-void dm_fsync_work_callback(struct work_struct *work)
-{
- struct r8192_priv *priv =
- container_of(work, struct r8192_priv, fsync_work.work);
- struct net_device *dev = priv->ieee80211->dev;
- u32 rate_index, rate_count = 0, rate_count_diff = 0;
- bool bSwitchFromCountDiff = false;
- bool bDoubleTimeInterval = false;
-
- if (priv->ieee80211->state == IEEE80211_LINKED &&
- priv->ieee80211->bfsync_enable &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
- /* Count rate 54, MCS [7], [12, 13, 14, 15] */
- u32 rate_bitmap;
-
- for (rate_index = 0; rate_index <= 27; rate_index++) {
- rate_bitmap = 1 << rate_index;
- if (priv->ieee80211->fsync_rate_bitmap & rate_bitmap)
- rate_count += priv->stats.received_rate_histogram[1][rate_index];
- }
-
- if (rate_count < priv->rate_record)
- rate_count_diff = 0xffffffff - rate_count + priv->rate_record;
- else
- rate_count_diff = rate_count - priv->rate_record;
- if (rate_count_diff < priv->rateCountDiffRecord) {
- u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff;
- /* Continue count */
- if (DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold)
- priv->ContinueDiffCount++;
- else
- priv->ContinueDiffCount = 0;
-
- /* Continue count over */
- if (priv->ContinueDiffCount >= 2) {
- bSwitchFromCountDiff = true;
- priv->ContinueDiffCount = 0;
- }
- } else {
- /* Stop the continued count */
- priv->ContinueDiffCount = 0;
- }
-
- /* If Count diff <= FsyncRateCountThreshold */
- if (rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold) {
- bSwitchFromCountDiff = true;
- priv->ContinueDiffCount = 0;
- }
- priv->rate_record = rate_count;
- priv->rateCountDiffRecord = rate_count_diff;
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync);
- /* if we never receive those mcs rate and rssi > 30 % then switch fsyn */
- if (priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff) {
- bDoubleTimeInterval = true;
- priv->bswitch_fsync = !priv->bswitch_fsync;
- if (priv->bswitch_fsync) {
- write_nic_byte(dev, 0xC36, 0x1c);
- write_nic_byte(dev, 0xC3e, 0x90);
- } else {
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
- }
- } else if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold) {
- if (priv->bswitch_fsync) {
- priv->bswitch_fsync = false;
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
- }
- }
- if (bDoubleTimeInterval) {
- cancel_delayed_work_sync(&priv->fsync_work);
- schedule_delayed_work(&priv->fsync_work,
- msecs_to_jiffies(priv
- ->ieee80211->fsync_time_interval *
- priv->ieee80211->fsync_multiple_timeinterval));
- } else {
- cancel_delayed_work_sync(&priv->fsync_work);
- schedule_delayed_work(&priv->fsync_work,
- msecs_to_jiffies(priv
- ->ieee80211->fsync_time_interval));
- }
- } else {
- /* Let Register return to default value; */
- if (priv->bswitch_fsync) {
- priv->bswitch_fsync = false;
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
- }
- priv->ContinueDiffCount = 0;
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
- }
- RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff, priv->bswitch_fsync);
-}
-
-static void dm_StartHWFsync(struct net_device *dev)
-{
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf);
- write_nic_byte(dev, 0xc3b, 0x41);
-}
-
-static void dm_EndSWFsync(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
- cancel_delayed_work_sync(&priv->fsync_work);
-
- /* Let Register return to default value; */
- if (priv->bswitch_fsync) {
- priv->bswitch_fsync = false;
-
- write_nic_byte(dev, 0xC36, 0x5c);
-
- write_nic_byte(dev, 0xC3e, 0x96);
- }
-
- priv->ContinueDiffCount = 0;
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
-}
-
-static void dm_StartSWFsync(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 rateIndex;
- u32 rateBitmap;
-
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
- /* Initial rate record to zero, start to record. */
- priv->rate_record = 0;
- /* Initialize continue diff count to zero, start to record. */
- priv->ContinueDiffCount = 0;
- priv->rateCountDiffRecord = 0;
- priv->bswitch_fsync = false;
-
- if (priv->ieee80211->mode == WIRELESS_MODE_N_24G) {
- priv->ieee80211->fsync_firstdiff_ratethreshold = 600;
- priv->ieee80211->fsync_seconddiff_ratethreshold = 0xffff;
- } else {
- priv->ieee80211->fsync_firstdiff_ratethreshold = 200;
- priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
- }
- for (rateIndex = 0; rateIndex <= 27; rateIndex++) {
- rateBitmap = 1 << rateIndex;
- if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
- priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
- }
- cancel_delayed_work_sync(&priv->fsync_work);
- schedule_delayed_work(&priv->fsync_work,
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval));
-
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
-}
-
-static void dm_EndHWFsync(struct net_device *dev)
-{
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
- write_nic_byte(dev, 0xc3b, 0x49);
-}
-
-void dm_check_fsync(struct net_device *dev)
-{
-#define RegC38_Default 0
-#define RegC38_NonFsync_Other_AP 1
-#define RegC38_Fsync_AP_BCM 2
- struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 reg_c38_State = RegC38_Default;
- static u32 reset_cnt;
-
- RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
- RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
-
- if (priv->ieee80211->state == IEEE80211_LINKED &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
- if (priv->ieee80211->bfsync_enable == 0) {
- switch (priv->ieee80211->fsync_state) {
- case Default_Fsync:
- dm_StartHWFsync(dev);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(dev);
- dm_StartHWFsync(dev);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case HW_Fsync:
- default:
- break;
- }
- } else {
- switch (priv->ieee80211->fsync_state) {
- case Default_Fsync:
- dm_StartSWFsync(dev);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case HW_Fsync:
- dm_EndHWFsync(dev);
- dm_StartSWFsync(dev);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case SW_Fsync:
- default:
- break;
- }
- }
- if (priv->framesyncMonitor) {
- if (reg_c38_State != RegC38_Fsync_AP_BCM) {
- /* For broadcom AP we write different default value */
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
-
- reg_c38_State = RegC38_Fsync_AP_BCM;
- }
- }
- } else {
- switch (priv->ieee80211->fsync_state) {
- case HW_Fsync:
- dm_EndHWFsync(dev);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(dev);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case Default_Fsync:
- default:
- break;
- }
-
- if (priv->framesyncMonitor) {
- if (priv->ieee80211->state == IEEE80211_LINKED) {
- if (priv->undecorated_smoothed_pwdb <= REG_C38_TH) {
- if (reg_c38_State != RegC38_NonFsync_Other_AP) {
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
-
- reg_c38_State = RegC38_NonFsync_Other_AP;
- }
- } else if (priv->undecorated_smoothed_pwdb >= (REG_C38_TH + 5)) {
- if (reg_c38_State) {
- write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
- } else {
- if (reg_c38_State) {
- write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
- }
- }
- if (priv->framesyncMonitor) {
- if (priv->reset_count != reset_cnt) { /* After silent reset, the reg_c38_State will be returned to default value */
- write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- reset_cnt = priv->reset_count;
- }
- } else {
- if (reg_c38_State) {
- write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
-}
-
-/*---------------------------Define function prototype------------------------*/
-/*-----------------------------------------------------------------------------
- * Function: DM_DynamicTxPower()
- *
- * Overview: Detect Signal strength to control TX Registry
- Tx Power Control For Near/Far Range
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *---------------------------------------------------------------------------
- */
-static void dm_init_dynamic_txpower(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. */
- priv->ieee80211->bdynamic_txpower_enable = true; /* Default to enable Tx Power Control */
- priv->bLastDTPFlag_High = false;
- priv->bLastDTPFlag_Low = false;
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
-}
-
-static void dm_dynamic_txpower(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- unsigned int txhipower_threshold = 0;
- unsigned int txlowpower_threshold = 0;
-
- if (!priv->ieee80211->bdynamic_txpower_enable) {
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
- return;
- }
- if ((priv->ieee80211->current_network.atheros_cap_exist) && (priv->ieee80211->mode == IEEE_G)) {
- txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
- txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
- } else {
- txhipower_threshold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
- txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
- }
-
- RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n", priv->undecorated_smoothed_pwdb);
-
- if (priv->ieee80211->state == IEEE80211_LINKED) {
- if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
- priv->bDynamicTxHighPower = true;
- priv->bDynamicTxLowPower = false;
- } else {
- /* high power state check */
- if (priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower)
- priv->bDynamicTxHighPower = false;
-
- /* low power state check */
- if (priv->undecorated_smoothed_pwdb < 35)
- priv->bDynamicTxLowPower = true;
- else if (priv->undecorated_smoothed_pwdb >= 40)
- priv->bDynamicTxLowPower = false;
- }
- } else {
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
- }
-
- if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
- (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
- RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n", priv->ieee80211->current_network.channel);
-
-#if defined(RTL8190P) || defined(RTL8192E)
- SetTxPowerLevel8190(Adapter, pHalData->CurrentChannel);
-#endif
-
- rtl8192_phy_setTxPower(dev, priv->ieee80211->current_network.channel);
- /*pHalData->bStartTxCtrlByTPCNFR = FALSE; Clear th flag of Set TX Power from Sitesurvey*/
- }
- priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
- priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
-
-} /* dm_dynamic_txpower */
-
-/* added by vivi, for read tx rate and retrycount */
-static void dm_check_txrateandretrycount(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- /* for 11n tx rate */
- read_nic_byte(dev, CURRENT_TX_RATE_REG, &ieee->softmac_stats.CurrentShowTxate);
- /* for initial tx rate */
- read_nic_byte(dev, INITIAL_TX_RATE_REG, &ieee->softmac_stats.last_packet_rate);
- /* for tx retry count */
- read_nic_dword(dev, TX_RETRY_COUNT_REG, &ieee->softmac_stats.txretrycount);
-}
-
-static void dm_send_rssi_tofw(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* If we test chariot, we should stop the TX command ?
- * Because 92E will always silent reset when we send tx command. We use register
- * 0x1e0(byte) to notify driver.
- */
- write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
-}
-
-/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
deleted file mode 100644
index f4eb18216677..000000000000
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*****************************************************************************
- * Copyright(c) 2007, RealTEK Technology Inc. All Right Reserved.
- *
- * Module: Hal819xUsbDM.h (RTL8192 Header H File)
- *
- *
- * Note: For dynamic control definition constant structure.
- *
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- * 10/04/2007 MHC Create initial version.
- *
- *****************************************************************************/
- /* Check to see if the file has been included already. */
-#ifndef __R8192UDM_H__
-#define __R8192UDM_H__
-
-/*--------------------------Define Parameters-------------------------------*/
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
-#define DM_DIG_HIGH_PWR_THRESH_LOW 70
-
-#define BW_AUTO_SWITCH_HIGH_LOW 25
-#define BW_AUTO_SWITCH_LOW_HIGH 30
-
-#define DM_DIG_BACKOFF 12
-#define DM_DIG_MAX 0x36
-#define DM_DIG_MIN 0x1c
-#define DM_DIG_MIN_NETCORE 0x12
-
-#define RX_PATH_SELECTION_SS_TH_LOW 30
-#define RX_PATH_SELECTION_DIFF_TH 18
-
-#define RATE_ADAPTIVE_TH_HIGH 50
-#define RATE_ADAPTIVE_TH_LOW_20M 30
-#define RATE_ADAPTIVE_TH_LOW_40M 10
-#define VERY_LOW_RSSI 15
-#define CTS_TO_SELF_TH_VAL 30
-
-/* defined by vivi, for tx power track */
-#define E_FOR_TX_POWER_TRACK 300
-/* Dynamic Tx Power Control Threshold */
-#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
-#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
-/* added by amy for atheros AP */
-#define TX_POWER_ATHEROAP_THRESH_HIGH 78
-#define TX_POWER_ATHEROAP_THRESH_LOW 72
-
-/* defined by vivi, for showing on UI */
-#define CURRENT_TX_RATE_REG 0x1b8
-#define INITIAL_TX_RATE_REG 0x1b9
-#define TX_RETRY_COUNT_REG 0x1ac
-#define REG_C38_TH 20
-/*--------------------------Define Parameters-------------------------------*/
-
-/*------------------------------Define structure----------------------------*/
-
-enum dig_algorithm {
- DIG_ALGO_BY_FALSE_ALARM = 0,
- DIG_ALGO_BY_RSSI = 1,
-};
-
-enum dynamic_init_gain_state {
- DM_STA_DIG_OFF = 0,
- DM_STA_DIG_ON,
- DM_STA_DIG_MAX
-};
-
-enum dig_connect {
- DIG_DISCONNECT = 0,
- DIG_CONNECT = 1,
-};
-
-enum dig_pkt_detection_threshold {
- DIG_PD_AT_LOW_POWER = 0,
- DIG_PD_AT_NORMAL_POWER = 1,
- DIG_PD_AT_HIGH_POWER = 2,
-};
-
-enum dig_cck_cs_ratio_state {
- DIG_CS_RATIO_LOWER = 0,
- DIG_CS_RATIO_HIGHER = 1,
-};
-
-/* 2007/10/04 MH Define upper and lower threshold of DIG enable or disable. */
-struct dig {
- u8 dig_enable_flag;
- enum dig_algorithm dig_algorithm;
- u8 dig_algorithm_switch;
-
- long rssi_low_thresh;
- long rssi_high_thresh;
-
- long rssi_high_power_lowthresh;
- long rssi_high_power_highthresh;
-
- enum dynamic_init_gain_state dig_state;
- enum dynamic_init_gain_state dig_highpwr_state;
- enum dig_connect cur_connect_state;
- enum dig_connect pre_connect_state;
-
- enum dig_pkt_detection_threshold curpd_thstate;
- enum dig_pkt_detection_threshold prepd_thstate;
- enum dig_cck_cs_ratio_state curcs_ratio_state;
- enum dig_cck_cs_ratio_state precs_ratio_state;
-
- u32 pre_ig_value;
- u32 cur_ig_value;
-
- u8 backoff_val;
- u8 rx_gain_range_min;
-
- long rssi_val;
-};
-
-enum cck_rx_path_method {
- CCK_RX_VERSION_1 = 0,
- CCK_RX_VERSION_2 = 1,
-};
-
-struct dynamic_rx_path_sel {
- enum cck_rx_path_method cck_method;
- u8 cck_rx_path;
-
- u8 disabled_rf;
-
- u8 rf_rssi[4];
- u8 rf_enable_rssi_th[4];
- long cck_pwdb_sta[4];
-};
-
-struct tx_config_cmd {
- u32 cmd_op; /* Command packet type. */
- u32 cmd_length; /* Command packet length. */
- u32 cmd_value;
-};
-
-/*------------------------------Define structure----------------------------*/
-
-/*------------------------Export global variable----------------------------*/
-extern struct dig dm_digtable;
-extern u8 dm_shadow[16][256];
-/*------------------------Export global variable----------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-/*--------------------------Exported Function prototype---------------------*/
-void init_hal_dm(struct net_device *dev);
-void deinit_hal_dm(struct net_device *dev);
-void hal_dm_watchdog(struct net_device *dev);
-void init_rate_adaptive(struct net_device *dev);
-void dm_txpower_trackingcallback(struct work_struct *work);
-void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type, u32 force_value);
-void dm_init_edca_turbo(struct net_device *dev);
-void dm_rf_operation_test_callback(unsigned long data);
-void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_work_callback(struct work_struct *work);
-void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-void dm_initialize_txpower_tracking(struct net_device *dev);
-/*--------------------------Exported Function prototype---------------------*/
-
-#endif /*__R8192UDM_H__ */
-
-/* End of r8192U_dm.h */
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
deleted file mode 100644
index 217e77766442..000000000000
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is part of rtl8187 OpenSource driver.
- * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- *
- * Parts of this driver are based on the GPL part of the
- * official Realtek driver.
- * Parts of this driver are based on the rtl8180 driver skeleton
- * from Patric Schenke & Andres Salomon.
- * Parts of this driver are based on the Intel Pro Wireless
- * 2100 GPL driver.
- *
- * We want to thank the Authors of those projects
- * and the Ndiswrapper project Authors.
- */
-
-/* Mariusz Matuszek added full registers definition with Realtek's name */
-
-/* this file contains register definitions for the rtl8187 MAC controller */
-#ifndef R8192_HW
-#define R8192_HW
-
-#define RTL8187_REQT_READ 0xc0
-#define RTL8187_REQT_WRITE 0x40
-#define RTL8187_REQ_GET_REGS 0x05
-#define RTL8187_REQ_SET_REGS 0x05
-
-#define MAX_TX_URB 5
-#define MAX_RX_URB 16
-
-#define R8180_MAX_RETRY 255
-
-#define RX_URB_SIZE 9100
-
-#define RTL8190_EEPROM_ID 0x8129
-#define EEPROM_VID 0x02
-#define EEPROM_PID 0x04
-#define EEPROM_NODE_ADDRESS_BYTE_0 0x0C
-
-#define EEPROM_TX_POWER_DIFF 0x1F
-#define EEPROM_THERMAL_METER 0x20
-#define EEPROM_PW_DIFF 0x21 //0x21
-#define EEPROM_CRYSTAL_CAP 0x22 //0x22
-
-#define EEPROM_TX_PW_INDEX_CCK 0x23 //0x23
-#define EEPROM_TX_PW_INDEX_OFDM_24G 0x24 //0x24~0x26
-#define EEPROM_TX_PW_INDEX_CCK_V1 0x29 //0x29~0x2B
-#define EEPROM_TX_PW_INDEX_OFDM_24G_V1 0x2C //0x2C~0x2E
-#define EEPROM_TX_PW_INDEX_VER 0x27 //0x27
-
-#define EEPROM_DEFAULT_THERNAL_METER 0x7
-#define EEPROM_DEFAULT_PW_DIFF 0x4
-#define EEPROM_DEFAULT_CRYSTAL_CAP 0x5
-#define EEPROM_DEFAULT_TX_POWER 0x1010
-#define EEPROM_CUSTOMER_ID 0x7B //0x7B:CustomerID
-#define EEPROM_CHANNEL_PLAN 0x16 //0x7C
-
-#define EEPROM_CID_RUNTOP 0x2
-#define EEPROM_CID_DLINK 0x8
-
-#define AC_PARAM_TXOP_LIMIT_OFFSET 16
-#define AC_PARAM_ECW_MAX_OFFSET 12
-#define AC_PARAM_ECW_MIN_OFFSET 8
-#define AC_PARAM_AIFS_OFFSET 0
-
-//#endif
-enum _RTL8192Usb_HW {
- MAC0 = 0x000,
- MAC4 = 0x004,
-
-#define BB_GLOBAL_RESET_BIT 0x1
- BB_GLOBAL_RESET = 0x020, // BasebandGlobal Reset Register
- BSSIDR = 0x02E, // BSSID Register
- CMDR = 0x037, // Command register
-#define CR_RE 0x08
-#define CR_TE 0x04
- SIFS = 0x03E, // SIFS register
-
-#define TCR_MXDMA_2048 7
-#define TCR_LRL_OFFSET 0
-#define TCR_SRL_OFFSET 8
-#define TCR_MXDMA_OFFSET 21
-#define TCR_SAT BIT(24) // Enable Rate depedent ack timeout timer
- RCR = 0x044, // Receive Configuration Register
-#define MAC_FILTER_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \
- BIT(12) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
- BIT(22) | BIT(23))
-#define RX_FIFO_THRESHOLD_MASK (BIT(13) | BIT(14) | BIT(15))
-#define RX_FIFO_THRESHOLD_SHIFT 13
-#define RX_FIFO_THRESHOLD_NONE 7
-#define MAX_RX_DMA_MASK (BIT(8) | BIT(9) | BIT(10))
-#define RCR_MXDMA_OFFSET 8
-#define RCR_FIFO_OFFSET 13
-#define RCR_ONLYERLPKT BIT(31) // Early Receiving based on Packet Size.
-#define RCR_CBSSID BIT(23) // Accept BSSID match packet
-#define RCR_APWRMGT BIT(22) // Accept power management packet
-#define RCR_AMF BIT(20) // Accept management type frame
-#define RCR_ACF BIT(19) // Accept control type frame
-#define RCR_ADF BIT(18) // Accept data type frame
-#define RCR_AICV BIT(12) // Accept ICV error packet
-#define RCR_ACRC32 BIT(5) // Accept CRC32 error packet
-#define RCR_AB BIT(3) // Accept broadcast packet
-#define RCR_AM BIT(2) // Accept multicast packet
-#define RCR_APM BIT(1) // Accept physical match packet
-#define RCR_AAP BIT(0) // Accept all unicast packet
- SLOT_TIME = 0x049, // Slot Time Register
- ACK_TIMEOUT = 0x04c, // Ack Timeout Register
- EDCAPARA_BE = 0x050, // EDCA Parameter of AC BE
- EDCAPARA_BK = 0x054, // EDCA Parameter of AC BK
- EDCAPARA_VO = 0x058, // EDCA Parameter of AC VO
- EDCAPARA_VI = 0x05C, // EDCA Parameter of AC VI
- BCN_TCFG = 0x062, // Beacon Time Configuration
-#define BCN_TCFG_CW_SHIFT 8
-#define BCN_TCFG_IFS 0
- BCN_INTERVAL = 0x070, // Beacon Interval (TU)
- ATIMWND = 0x072, // ATIM Window Size (TU)
- BCN_DRV_EARLY_INT = 0x074, // Driver Early Interrupt Time (TU). Time to send interrupt to notify to change beacon content before TBTT
- BCN_DMATIME = 0x076, // Beacon DMA and ATIM interrupt time (US). Indicates the time before TBTT to perform beacon queue DMA
- BCN_ERR_THRESH = 0x078, // Beacon Error Threshold
- RWCAM = 0x0A0, //IN 8190 Data Sheet is called CAMcmd
- WCAMI = 0x0A4, // Software write CAM input content
- SECR = 0x0B0, //Security Configuration Register
-#define SCR_TxUseDK BIT(0) //Force Tx Use Default Key
-#define SCR_RxUseDK BIT(1) //Force Rx Use Default Key
-#define SCR_TxEncEnable BIT(2) //Enable Tx Encryption
-#define SCR_RxDecEnable BIT(3) //Enable Rx Decryption
-#define SCR_SKByA2 BIT(4) //Search kEY BY A2
-#define SCR_NoSKMC BIT(5) //No Key Search for Multicast
-
-//----------------------------------------------------------------------------
-// 8190 CPU General Register (offset 0x100, 4 byte)
-//----------------------------------------------------------------------------
-#define CPU_CCK_LOOPBACK 0x00030000
-#define CPU_GEN_SYSTEM_RESET 0x00000001
-#define CPU_GEN_FIRMWARE_RESET 0x00000008
-#define CPU_GEN_BOOT_RDY 0x00000010
-#define CPU_GEN_FIRM_RDY 0x00000020
-#define CPU_GEN_PUT_CODE_OK 0x00000080
-#define CPU_GEN_BB_RST 0x00000100
-#define CPU_GEN_PWR_STB_CPU 0x00000004
-#define CPU_GEN_NO_LOOPBACK_MSK 0xFFF8FFFF // Set bit18,17,16 to 0. Set bit19
-#define CPU_GEN_NO_LOOPBACK_SET 0x00080000 // Set BIT19 to 1
- CPU_GEN = 0x100, // CPU Reset Register
-
- AcmHwCtrl = 0x171, // ACM Hardware Control Register
-//----------------------------------------------------------------------------
-////
-//// 8190 AcmHwCtrl bits (offset 0x171, 1 byte)
-////----------------------------------------------------------------------------
-//
-#define AcmHw_BeqEn BIT(1)
-
- RQPN1 = 0x180, // Reserved Queue Page Number , Vo Vi, Be, Bk
- RQPN2 = 0x184, // Reserved Queue Page Number, HCCA, Cmd, Mgnt, High
- RQPN3 = 0x188, // Reserved Queue Page Number, Bcn, Public,
- QPNR = 0x1D0, //0x1F0, // Queue Packet Number report per TID
-
-#define BW_OPMODE_5G BIT(1)
-#define BW_OPMODE_20MHZ BIT(2)
- BW_OPMODE = 0x300, // Bandwidth operation mode
- MSR = 0x303, // Media Status register
-#define MSR_LINK_MASK (BIT(0) | BIT(1))
-#define MSR_LINK_MANAGED 2
-#define MSR_LINK_NONE 0
-#define MSR_LINK_SHIFT 0
-#define MSR_LINK_ADHOC 1
-#define MSR_LINK_MASTER 3
- RETRY_LIMIT = 0x304, // Retry Limit [15:8]-short, [7:0]-long
-#define RETRY_LIMIT_SHORT_SHIFT 8
-#define RETRY_LIMIT_LONG_SHIFT 0
- RRSR = 0x310, // Response Rate Set
-#define RRSR_1M BIT(0)
-#define RRSR_2M BIT(1)
-#define RRSR_5_5M BIT(2)
-#define RRSR_11M BIT(3)
-#define RRSR_6M BIT(4)
-#define RRSR_9M BIT(5)
-#define RRSR_12M BIT(6)
-#define RRSR_18M BIT(7)
-#define RRSR_24M BIT(8)
-#define RRSR_36M BIT(9)
-#define RRSR_48M BIT(10)
-#define RRSR_54M BIT(11)
-#define BRSR_AckShortPmb BIT(23) // CCK ACK: use Short Preamble or not.
- UFWP = 0x318,
- RATR0 = 0x320, // Rate Adaptive Table register1
- DRIVER_RSSI = 0x32c, // Driver tell Firmware current RSSI
-//----------------------------------------------------------------------------
-// 8190 Rate Adaptive Table Register (offset 0x320, 4 byte)
-//----------------------------------------------------------------------------
-//CCK
-#define RATR_1M 0x00000001
-#define RATR_2M 0x00000002
-#define RATR_55M 0x00000004
-#define RATR_11M 0x00000008
-//OFDM
-#define RATR_6M 0x00000010
-#define RATR_9M 0x00000020
-#define RATR_12M 0x00000040
-#define RATR_18M 0x00000080
-#define RATR_24M 0x00000100
-#define RATR_36M 0x00000200
-#define RATR_48M 0x00000400
-#define RATR_54M 0x00000800
-//MCS 1 Spatial Stream
-#define RATR_MCS0 0x00001000
-#define RATR_MCS1 0x00002000
-#define RATR_MCS2 0x00004000
-#define RATR_MCS3 0x00008000
-#define RATR_MCS4 0x00010000
-#define RATR_MCS5 0x00020000
-#define RATR_MCS6 0x00040000
-#define RATR_MCS7 0x00080000
-//MCS 2 Spatial Stream
-#define RATR_MCS8 0x00100000
-#define RATR_MCS9 0x00200000
-#define RATR_MCS10 0x00400000
-#define RATR_MCS11 0x00800000
-#define RATR_MCS12 0x01000000
-#define RATR_MCS13 0x02000000
-#define RATR_MCS14 0x04000000
-#define RATR_MCS15 0x08000000
-// ALL CCK Rate
-#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
-#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
- RATR_24M | RATR_36M | RATR_48M | RATR_54M)
-#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | RATR_MCS3 |\
- RATR_MCS4 | RATR_MCS5 | RATR_MCS6 | RATR_MCS7)
-#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | RATR_MCS11 |\
- RATR_MCS12 | RATR_MCS13 | RATR_MCS14 | RATR_MCS15)
- EPROM_CMD = 0xfe58,
-#define Cmd9346CR_9356SEL BIT(4)
-#define EPROM_CMD_OPERATING_MODE_SHIFT 6
-#define EPROM_CMD_NORMAL 0
-#define EPROM_CMD_PROGRAM 2
-#define EPROM_CS_BIT BIT(3)
-#define EPROM_CK_BIT BIT(2)
-#define EPROM_W_BIT BIT(1)
-#define EPROM_R_BIT BIT(0)
-};
-
-//----------------------------------------------------------------------------
-// 818xB AnaParm & AnaParm2 Register
-//----------------------------------------------------------------------------
-#define GPI 0x108
-#endif
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
deleted file mode 100644
index 203ea3bfc843..000000000000
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ /dev/null
@@ -1,943 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * This file contains wireless extension handlers.
- *
- * This is part of rtl8180 OpenSource driver.
- * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- *
- * Parts of this driver are based on the GPL part
- * of the official realtek driver.
- *
- * Parts of this driver are based on the rtl8180 driver skeleton
- * from Patric Schenke & Andres Salomon.
- *
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
- *
- * We want to thank the Authors of those projects and the Ndiswrapper
- * project Authors.
- *
- *****************************************************************************/
-
-#include <linux/string.h>
-#include "r8192U.h"
-#include "r8192U_hw.h"
-
-#include "ieee80211/dot11d.h"
-#include "r8192U_wx.h"
-
-#define RATE_COUNT 12
-static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
- 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
-
-#ifndef ENETDOWN
-#define ENETDOWN 1
-#endif
-
-static int r8192_wx_get_freq(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
-}
-
-static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
-}
-
-static int r8192_wx_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
-}
-
-static int r8192_wx_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_rts(priv->ieee80211, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_rts(priv->ieee80211, info, wrqu, extra);
-}
-
-static int r8192_wx_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_power(priv->ieee80211, info, wrqu, extra);
-}
-
-static int r8192_wx_force_reset(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- netdev_dbg(dev, "%s(): force reset ! extra is %d\n", __func__, *extra);
- priv->force_reset = *extra;
- mutex_unlock(&priv->wx_mutex);
- return 0;
-}
-
-static int r8192_wx_set_rawtx(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_set_crcmon(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int *parms = (int *)extra;
- int enable = (parms[0] > 0);
-
- mutex_lock(&priv->wx_mutex);
-
- if (enable)
- priv->crcmon = 1;
- else
- priv->crcmon = 0;
-
- DMESG("bad CRC in monitor mode are %s",
- priv->crcmon ? "accepted" : "rejected");
-
- mutex_unlock(&priv->wx_mutex);
-
- return 0;
-}
-
-static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
-
- rtl8192_set_rxconf(dev);
-
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-struct iw_range_with_scan_capa {
- /* Informative stuff (to choose between different interface) */
- __u32 throughput; /* To give an idea... */
- /* In theory this value should be the maximum benchmarked
- * TCP/IP throughput, because with most of these devices the
- * bit rate is meaningless (overhead an co) to estimate how
- * fast the connection will go and pick the fastest one.
- * I suggest people to play with Netperf or any benchmark...
- */
-
- /* NWID (or domain id) */
- __u32 min_nwid; /* Minimal NWID we are able to set */
- __u32 max_nwid; /* Maximal NWID we are able to set */
-
- /* Old Frequency (backward compat - moved lower ) */
- __u16 old_num_channels;
- __u8 old_num_frequency;
-
- /* Scan capabilities */
- __u8 scan_capa;
-};
-
-static int rtl8180_wx_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- struct iw_range_with_scan_capa *tmp = (struct iw_range_with_scan_capa *)range;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 val;
- int i;
-
- wrqu->data.length = sizeof(*range);
- memset(range, 0, sizeof(*range));
-
- /* Let's try to keep this struct in the same order as in
- * linux/include/wireless.h
- */
-
- /* TODO: See what values we can set, and remove the ones we can't
- * set, or fill them with some default data.
- */
-
- /* ~5 Mb/s real (802.11b) */
- range->throughput = 5 * 1000 * 1000;
-
- /* TODO: Not used in 802.11b? */
- /* range->min_nwid; */ /* Minimal NWID we are able to set */
- /* TODO: Not used in 802.11b? */
- /* range->max_nwid; */ /* Maximal NWID we are able to set */
-
- /* Old Frequency (backward compat - moved lower ) */
- /* range->old_num_channels; */
- /* range->old_num_frequency; */
- /* range->old_freq[6]; */ /* Filler to keep "version" at the same offset */
- if (priv->rf_set_sens)
- range->sensitivity = priv->max_sens; /* signal level threshold range */
-
- range->max_qual.qual = 100;
- /* TODO: Find real max RSSI and stick here */
- range->max_qual.level = 0;
- range->max_qual.noise = 0x100 - 98;
- range->max_qual.updated = 7; /* Updated all three */
-
- range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
- /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
- range->avg_qual.level = 0x100 - 78;
- range->avg_qual.noise = 0;
- range->avg_qual.updated = 7; /* Updated all three */
-
- range->num_bitrates = RATE_COUNT;
-
- for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
- range->bitrate[i] = rtl8180_rates[i];
-
- range->min_frag = MIN_FRAG_THRESHOLD;
- range->max_frag = MAX_FRAG_THRESHOLD;
-
- range->min_pmp = 0;
- range->max_pmp = 5000000;
- range->min_pmt = 0;
- range->max_pmt = 65535 * 1000;
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
-
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 16;
-
- /* range->retry_capa; */ /* What retry options are supported */
- /* range->retry_flags; */ /* How to decode max/min retry limit */
- /* range->r_time_flags; */ /* How to decode max/min retry life */
- /* range->min_retry; */ /* Minimal number of retries */
- /* range->max_retry; */ /* Maximal number of retries */
- /* range->min_r_time; */ /* Minimal retry lifetime */
- /* range->max_r_time; */ /* Maximal retry lifetime */
-
- for (i = 0, val = 0; i < 14; i++) {
- /* Include only legal frequencies for some countries */
- if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i + 1]) {
- range->freq[val].i = i + 1;
- range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
- range->freq[val].e = 1;
- val++;
- } else {
- /* FIXME: do we need to set anything for channels */
- /* we don't use ? */
- }
-
- if (val == IW_MAX_FREQUENCIES)
- break;
- }
- range->num_frequency = val;
- range->num_channels = val;
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
- tmp->scan_capa = 0x01;
- return 0;
-}
-
-static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- int ret = 0;
-
- if (!priv->up)
- return -ENETDOWN;
-
- if (priv->ieee80211->LinkDetectInfo.bBusyTraffic)
- return -EAGAIN;
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)b;
-
- if (req->essid_len) {
- int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
-
- ieee->current_network.ssid_len = len;
- memcpy(ieee->current_network.ssid, req->essid, len);
- }
- }
-
- mutex_lock(&priv->wx_mutex);
- if (priv->ieee80211->state != IEEE80211_LINKED) {
- priv->ieee80211->scanning = 0;
- ieee80211_softmac_scan_syncro(priv->ieee80211);
- ret = 0;
- } else {
- ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
- }
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (!priv->up)
- return -ENETDOWN;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_set_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_get_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
-
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int r8192_wx_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
-}
-
-static int r8192_wx_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (wrqu->frag.disabled) {
- priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
- } else {
- if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
- wrqu->frag.value > MAX_FRAG_THRESHOLD)
- return -EINVAL;
-
- priv->ieee80211->fts = wrqu->frag.value & ~0x1;
- }
-
- return 0;
-}
-
-static int r8192_wx_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- wrqu->frag.value = priv->ieee80211->fts;
- wrqu->frag.fixed = 0; /* no auto select */
- wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
-
- return 0;
-}
-
-static int r8192_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
- /* struct sockaddr *temp = (struct sockaddr *)awrq; */
- mutex_lock(&priv->wx_mutex);
-
- ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra);
-
- mutex_unlock(&priv->wx_mutex);
-
- return ret;
-}
-
-static int r8192_wx_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
-}
-
-static int r8192_wx_get_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key);
-}
-
-static int r8192_wx_set_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- int ret;
- u32 hwkey[4] = {0, 0, 0, 0};
- u8 mask = 0xff;
- u32 key_idx = 0;
- u8 zero_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
- int i;
-
- if (!priv->up)
- return -ENETDOWN;
-
- mutex_lock(&priv->wx_mutex);
-
- RT_TRACE(COMP_SEC, "Setting SW wep key");
- ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
-
- mutex_unlock(&priv->wx_mutex);
-
- /* sometimes, the length is zero while we do not type key value */
- if (wrqu->encoding.length != 0) {
- for (i = 0; i < 4; i++) {
- hwkey[i] |= key[4 * i + 0] & mask;
- if (i == 1 && (4 * i + 1) == wrqu->encoding.length)
- mask = 0x00;
- if (i == 3 && (4 * i + 1) == wrqu->encoding.length)
- mask = 0x00;
- hwkey[i] |= (key[4 * i + 1] & mask) << 8;
- hwkey[i] |= (key[4 * i + 2] & mask) << 16;
- hwkey[i] |= (key[4 * i + 3] & mask) << 24;
- }
-
- #define CONF_WEP40 0x4
- #define CONF_WEP104 0x14
-
- switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
- case 0:
- key_idx = ieee->tx_keyidx;
- break;
- case 1:
- key_idx = 0;
- break;
- case 2:
- key_idx = 1;
- break;
- case 3:
- key_idx = 2;
- break;
- case 4:
- key_idx = 3;
- break;
- default:
- break;
- }
-
- if (wrqu->encoding.length == 0x5) {
- ieee->pairwise_key_type = KEY_TYPE_WEP40;
- EnableHWSecurityConfig8192(dev);
-
- setKey(dev,
- key_idx, /* EntryNo */
- key_idx, /* KeyIndex */
- KEY_TYPE_WEP40, /* KeyType */
- zero_addr[key_idx],
- 0, /* DefaultKey */
- hwkey); /* KeyContent */
- } else if (wrqu->encoding.length == 0xd) {
- ieee->pairwise_key_type = KEY_TYPE_WEP104;
- EnableHWSecurityConfig8192(dev);
-
- setKey(dev,
- key_idx, /* EntryNo */
- key_idx, /* KeyIndex */
- KEY_TYPE_WEP104, /* KeyType */
- zero_addr[key_idx],
- 0, /* DefaultKey */
- hwkey); /* KeyContent */
- } else {
- netdev_warn(dev, "wrong type in WEP, not WEP40 and WEP104\n");
- }
- }
-
- return ret;
-}
-
-static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa,
- union iwreq_data *wrqu, char *p)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int *parms = (int *)p;
- int mode = parms[0];
-
- priv->ieee80211->active_scan = mode;
-
- return 1;
-}
-
-static int r8192_wx_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int err = 0;
-
- mutex_lock(&priv->wx_mutex);
-
- if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
- wrqu->retry.disabled){
- err = -EINVAL;
- goto exit;
- }
- if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) {
- err = -EINVAL;
- goto exit;
- }
-
- if (wrqu->retry.value > R8180_MAX_RETRY) {
- err = -EINVAL;
- goto exit;
- }
- if (wrqu->retry.flags & IW_RETRY_MAX) {
- priv->retry_rts = wrqu->retry.value;
- DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value);
-
- } else {
- priv->retry_data = wrqu->retry.value;
- DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value);
- }
-
- /* FIXME !
- * We might try to write directly the TX config register
- * or to restart just the (R)TX process.
- * I'm unsure if whole reset is really needed
- */
-
- rtl8192_commit(dev);
-exit:
- mutex_unlock(&priv->wx_mutex);
-
- return err;
-}
-
-static int r8192_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- wrqu->retry.disabled = 0; /* can't be disabled */
-
- if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
- IW_RETRY_LIFETIME)
- return -EINVAL;
-
- if (wrqu->retry.flags & IW_RETRY_MAX) {
- wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
- wrqu->retry.value = priv->retry_rts;
- } else {
- wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
- wrqu->retry.value = priv->retry_data;
- }
-
- return 0;
-}
-
-static int r8192_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (!priv->rf_set_sens)
- return -1; /* we have not this support for this radio */
- wrqu->sens.value = priv->sens;
- return 0;
-}
-
-static int r8192_wx_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- short err = 0;
-
- mutex_lock(&priv->wx_mutex);
- if (!priv->rf_set_sens) {
- err = -1; /* we have not this support for this radio */
- goto exit;
- }
- if (priv->rf_set_sens(dev, wrqu->sens.value) == 0)
- priv->sens = wrqu->sens.value;
- else
- err = -EINVAL;
-
-exit:
- mutex_unlock(&priv->wx_mutex);
-
- return err;
-}
-
-/* hw security need to reorganized. */
-static int r8192_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
-
- mutex_lock(&priv->wx_mutex);
- ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
-
- {
- u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 zero[6] = {0};
- u32 key[4] = {0};
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct iw_point *encoding = &wrqu->encoding;
- u8 idx = 0, alg = 0, group = 0;
-
- if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE)
- /* none is not allowed to use hwsec WB 2008.07.01 */
- goto end_hw_sec;
-
- /* as IW_ENCODE_ALG_CCMP is defined to be 3 and KEY_TYPE_CCMP is defined to 4; */
- alg = (ext->alg == IW_ENCODE_ALG_CCMP) ? KEY_TYPE_CCMP : ext->alg;
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx)
- idx--;
- group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
-
- if ((!group) || (ieee->iw_mode == IW_MODE_ADHOC) || (alg == KEY_TYPE_WEP40)) {
- if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
- alg = KEY_TYPE_WEP104;
- ieee->pairwise_key_type = alg;
- EnableHWSecurityConfig8192(dev);
- }
- memcpy((u8 *)key, ext->key, 16); /* we only get 16 bytes key.why? WB 2008.7.1 */
-
- if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
- setKey(dev,
- idx, /* EntryNao */
- idx, /* KeyIndex */
- alg, /* KeyType */
- zero, /* MacAddr */
- 0, /* DefaultKey */
- key); /* KeyContent */
- } else if (group) {
- ieee->group_key_type = alg;
- setKey(dev,
- idx, /* EntryNo */
- idx, /* KeyIndex */
- alg, /* KeyType */
- broadcast_addr, /* MacAddr */
- 0, /* DefaultKey */
- key); /* KeyContent */
- } else { /* pairwise key */
- setKey(dev,
- 4, /* EntryNo */
- idx, /* KeyIndex */
- alg, /* KeyType */
- (u8 *)ieee->ap_mac_addr,/* MacAddr */
- 0, /* DefaultKey */
- key); /* KeyContent */
- }
- }
-
-end_hw_sec:
-
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int r8192_wx_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- int ret = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
- ret = ieee80211_wx_set_auth(priv->ieee80211, info, &data->param, extra);
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int r8192_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
- ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
-
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int r8192_wx_set_gen_ie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- int ret = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- mutex_lock(&priv->wx_mutex);
- ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
- mutex_unlock(&priv->wx_mutex);
- return ret;
-}
-
-static int dummy(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
-static iw_handler r8192_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
- r8192_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
- r8192_wx_set_freq, /* SIOCSIWFREQ */
- r8192_wx_get_freq, /* SIOCGIWFREQ */
- r8192_wx_set_mode, /* SIOCSIWMODE */
- r8192_wx_get_mode, /* SIOCGIWMODE */
- r8192_wx_set_sens, /* SIOCSIWSENS */
- r8192_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- r8192_wx_set_wap, /* SIOCSIWAP */
- r8192_wx_get_wap, /* SIOCGIWAP */
- r8192_wx_set_mlme, /* MLME-- */
- dummy, /* SIOCGIWAPLIST -- deprecated */
- r8192_wx_set_scan, /* SIOCSIWSCAN */
- r8192_wx_get_scan, /* SIOCGIWSCAN */
- r8192_wx_set_essid, /* SIOCSIWESSID */
- r8192_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- r8192_wx_set_rate, /* SIOCSIWRATE */
- r8192_wx_get_rate, /* SIOCGIWRATE */
- r8192_wx_set_rts, /* SIOCSIWRTS */
- r8192_wx_get_rts, /* SIOCGIWRTS */
- r8192_wx_set_frag, /* SIOCSIWFRAG */
- r8192_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
- r8192_wx_set_retry, /* SIOCSIWRETRY */
- r8192_wx_get_retry, /* SIOCGIWRETRY */
- r8192_wx_set_enc, /* SIOCSIWENCODE */
- r8192_wx_get_enc, /* SIOCGIWENCODE */
- r8192_wx_set_power, /* SIOCSIWPOWER */
- r8192_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8192_wx_set_gen_ie, /* NULL, */ /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
-
- r8192_wx_set_auth,/* NULL, */ /* SIOCSIWAUTH */
- NULL,/* r8192_wx_get_auth, */ /* NULL, */ /* SIOCSIWAUTH */
- r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL,/* r8192_wx_get_enc_ext, *//* NULL, */ /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
-
-};
-
-static const struct iw_priv_args r8192_private_args[] = {
- {
- SIOCIWFIRSTPRIV + 0x0,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc"
- },
- {
- SIOCIWFIRSTPRIV + 0x1,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
-
- },
- {
- SIOCIWFIRSTPRIV + 0x2,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
- },
- {
- SIOCIWFIRSTPRIV + 0x3,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
- }
-};
-
-static iw_handler r8192_private_handler[] = {
- r8192_wx_set_crcmon,
- r8192_wx_set_scan_type,
- r8192_wx_set_rawtx,
- r8192_wx_force_reset,
-};
-
-struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct iw_statistics *wstats = &priv->wstats;
- int tmp_level = 0;
- int tmp_qual = 0;
- int tmp_noise = 0;
-
- if (ieee->state < IEEE80211_LINKED) {
- wstats->qual.qual = 0;
- wstats->qual.level = 0;
- wstats->qual.noise = 0;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- return wstats;
- }
-
- tmp_level = (&ieee->current_network)->stats.rssi;
- tmp_qual = (&ieee->current_network)->stats.signal;
- tmp_noise = (&ieee->current_network)->stats.noise;
-
- wstats->qual.level = tmp_level;
- wstats->qual.qual = tmp_qual;
- wstats->qual.noise = tmp_noise;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- return wstats;
-}
-
-const struct iw_handler_def r8192_wx_handlers_def = {
- .standard = r8192_wx_handlers,
- .num_standard = ARRAY_SIZE(r8192_wx_handlers),
- .private = r8192_private_handler,
- .num_private = ARRAY_SIZE(r8192_private_handler),
- .num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args),
- .get_wireless_stats = r8192_get_wireless_stats,
- .private_args = (struct iw_priv_args *)r8192_private_args,
-};
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
deleted file mode 100644
index 27423cd64b4c..000000000000
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is part of rtl8180 OpenSource driver - v 0.3
- * Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
- *
- * Parts of this driver are based on the GPL part of the official realtek driver
- * Parts of this driver are based on the rtl8180 driver skeleton from Patric
- * Schenke & Andres Salomon
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
- *
- * We want to thank the Authors of such projects and the Ndiswrapper project
- * Authors.
- */
-
-/* this file (will) contains wireless extension handlers */
-
-#ifndef R8180_WX_H
-#define R8180_WX_H
-
-extern const struct iw_handler_def r8192_wx_handlers_def;
-/* Enable the rtl819x_core.c to share this function, david 2008.9.22 */
-struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
deleted file mode 100644
index 30a320422358..000000000000
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ /dev/null
@@ -1,508 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
- *
- * Module: r819xusb_cmdpkt.c
- * (RTL8190 TX/RX command packet handler Source C File)
- *
- * Note: The module is responsible for handling TX and RX command packet.
- * 1. TX : Send set and query configuration command packet.
- * 2. RX : Receive tx feedback, beacon state, query configuration
- * command packet.
- *
- * Function:
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- *
- * Date Who Remark
- * 05/06/2008 amy Create initial version porting from
- * windows driver.
- *
- ******************************************************************************/
-#include "r8192U.h"
-#include "r819xU_cmdpkt.h"
-
-rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sk_buff *skb;
- struct cb_desc *tcb_desc;
-
- /* Get TCB and local buffer from common pool.
- * (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
- */
- skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- if (!skb)
- return RT_STATUS_FAILURE;
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
- tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
- tcb_desc->bLastIniPkt = 0;
- skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- skb_put_data(skb, pData, DataLen);
- tcb_desc->txbuf_size = (u16)DataLen;
-
- if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
- (priv->ieee80211->queue_stop)) {
- RT_TRACE(COMP_FIRMWARE, "=== NULL packet ======> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb, dev);
- }
-
- return RT_STATUS_SUCCESS;
-}
-
-static void cmpk_count_txstatistic(struct net_device *dev, struct cmd_pkt_tx_feedback *pstx_fb)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef ENABLE_PS
- RT_RF_POWER_STATE rtState;
-
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
- (pu1Byte)(&rtState));
-
- /* When RF is off, we should not count the packet for hw/sw synchronize
- * reason, ie. there may be a duration while sw switch is changed and
- * hw switch is being changed.
- */
- if (rtState == eRfOff)
- return;
-#endif
-
-#ifdef TODO
- if (pAdapter->bInHctTest)
- return;
-#endif
- /* We can not know the packet length and transmit type:
- * broadcast or uni or multicast. So the relative statistics
- * must be collected in tx feedback info.
- */
- if (pstx_fb->tok) {
- priv->stats.txfeedbackok++;
- priv->stats.txoktotal++;
- priv->stats.txokbytestotal += pstx_fb->pkt_length;
- priv->stats.txokinperiod++;
-
- /* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type == PACKET_MULTICAST) {
- priv->stats.txmulticast++;
- priv->stats.txbytesmulticast += pstx_fb->pkt_length;
- } else if (pstx_fb->pkt_type == PACKET_BROADCAST) {
- priv->stats.txbroadcast++;
- priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
- } else {
- priv->stats.txunicast++;
- priv->stats.txbytesunicast += pstx_fb->pkt_length;
- }
- } else {
- priv->stats.txfeedbackfail++;
- priv->stats.txerrtotal++;
- priv->stats.txerrbytestotal += pstx_fb->pkt_length;
-
- /* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type == PACKET_MULTICAST)
- priv->stats.txerrmulticast++;
- else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- priv->stats.txerrbroadcast++;
- else
- priv->stats.txerrunicast++;
- }
-
- priv->stats.txretrycount += pstx_fb->retry_cnt;
- priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_handle_tx_feedback()
- *
- * Overview: The function is responsible for extract the message inside TX
- * feedbck message from firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification.
- * Please refer to chapter "TX Feedback Element".
- * We have to read 20 bytes in the command packet.
- *
- * Input: struct net_device *dev
- * u8 *pmsg - Msg Ptr of the command packet.
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/08/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct cmd_pkt_tx_feedback rx_tx_fb;
-
- priv->stats.txfeedback++;
-
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- * windows OS. So we have to read the content byte by byte or transfer
- * endian type before copy the message copy.
- */
- /* Use pointer to transfer structure memory. */
- memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(struct cmd_pkt_tx_feedback));
- /* 2. Use tx feedback info to count TX statistics. */
- cmpk_count_txstatistic(dev, &rx_tx_fb);
- /* Comment previous method for TX statistic function. */
- /* Collect info TX feedback packet to fill TCB. */
- /* We can not know the packet length and transmit type: broadcast or uni
- * or multicast.
- */
-}
-
-static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 tx_rate;
-
- /* 87B have to S/W beacon for DTM encryption_cmn. */
- if (priv->ieee80211->current_network.mode == IEEE_A ||
- priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G &&
- (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
- tx_rate = 60;
- DMESG("send beacon frame tx rate is 6Mbpm\n");
- } else {
- tx_rate = 10;
- DMESG("send beacon frame tx rate is 1Mbpm\n");
- }
-
- rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_handle_interrupt_status()
- *
- * Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
- *
- * Input: struct net_device *dev
- * u8 *pmsg - Message Pointer of the command packet.
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Add this for rtl8192 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
-{
- struct cmd_pkt_interrupt_status rx_intr_status; /* */
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- DMESG("---> cmpk_Handle_Interrupt_Status()\n");
-
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- * windows OS. So we have to read the content byte by byte or transfer
- * endian type before copy the message copy.
- */
- rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(struct cmd_pkt_interrupt_status) - 2)) {
- DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
- return;
- }
-
- /* Statistics of beacon for ad-hoc mode. */
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
- /* 2 maybe need endian transform? */
- rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
-
- DMESG("interrupt status = 0x%x\n",
- rx_intr_status.interrupt_status);
-
- if (rx_intr_status.interrupt_status & ISR_TX_BCN_OK) {
- priv->ieee80211->bibsscoordinator = true;
- priv->stats.txbeaconokint++;
- } else if (rx_intr_status.interrupt_status & ISR_TX_BCN_ERR) {
- priv->ieee80211->bibsscoordinator = false;
- priv->stats.txbeaconerr++;
- }
-
- if (rx_intr_status.interrupt_status & ISR_BCN_TIMER_INTR)
- cmdpkt_beacontimerinterrupt_819xusb(dev);
- }
-
- /* Other information in interrupt status we need? */
-
- DMESG("<---- cmpk_handle_interrupt_status()\n");
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_count_tx_status()
- *
- * Overview: Count aggregated tx status from firmwar of one type rx command
- * packet element id = RX_TX_STATUS.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_count_tx_status(struct net_device *dev,
- cmpk_tx_status_t *pstx_status)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-#ifdef ENABLE_PS
-
- RT_RF_POWER_STATE rtstate;
-
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
- (pu1Byte)(&rtState));
-
- /* When RF is off, we should not count the packet for hw/sw synchronize
- * reason, ie. there may be a duration while sw switch is changed and
- * hw switch is being changed.
- */
- if (rtState == eRfOff)
- return;
-#endif
-
- priv->stats.txfeedbackok += pstx_status->txok;
- priv->stats.txoktotal += pstx_status->txok;
-
- priv->stats.txfeedbackfail += pstx_status->txfail;
- priv->stats.txerrtotal += pstx_status->txfail;
-
- priv->stats.txretrycount += pstx_status->txretry;
- priv->stats.txfeedbackretry += pstx_status->txretry;
-
- priv->stats.txmulticast += pstx_status->txmcok;
- priv->stats.txbroadcast += pstx_status->txbcok;
- priv->stats.txunicast += pstx_status->txucok;
-
- priv->stats.txerrmulticast += pstx_status->txmcfail;
- priv->stats.txerrbroadcast += pstx_status->txbcfail;
- priv->stats.txerrunicast += pstx_status->txucfail;
-
- priv->stats.txbytesmulticast += pstx_status->txmclength;
- priv->stats.txbytesbroadcast += pstx_status->txbclength;
- priv->stats.txbytesunicast += pstx_status->txuclength;
-
- priv->stats.last_packet_rate = pstx_status->rate;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_handle_tx_status()
- *
- * Overview: Firmware add a new tx feedback status to reduce rx command
- * packet buffer operation load.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
-{
- cmpk_tx_status_t rx_tx_sts;
-
- memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(cmpk_tx_status_t));
- /* 2. Use tx feedback info to count TX statistics. */
- cmpk_count_tx_status(dev, &rx_tx_sts);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_handle_tx_rate_history()
- *
- * Overview: Firmware add a new tx rate history
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
-{
- cmpk_tx_rahis_t *ptxrate;
- u8 i, j;
- u16 length = sizeof(cmpk_tx_rahis_t);
- u32 *ptemp;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-#ifdef ENABLE_PS
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
- (pu1Byte)(&rtState));
-
- /* When RF is off, we should not count the packet for hw/sw synchronize
- * reason, ie. there may be a duration while sw switch is changed and
- * hw switch is being changed.
- */
- if (rtState == eRfOff)
- return;
-#endif
-
- ptemp = (u32 *)pmsg;
-
- /* Do endian transfer to word alignment(16 bits) for windows system.
- * You must do different endian transfer for linux and MAC OS
- */
- for (i = 0; i < (length/4); i++) {
- u16 temp1, temp2;
-
- temp1 = ptemp[i] & 0x0000FFFF;
- temp2 = ptemp[i] >> 16;
- ptemp[i] = (temp1 << 16) | temp2;
- }
-
- ptxrate = (cmpk_tx_rahis_t *)pmsg;
-
- if (!ptxrate)
- return;
-
- for (i = 0; i < 16; i++) {
- /* Collect CCK rate packet num */
- if (i < 4)
- priv->stats.txrate.cck[i] += ptxrate->cck[i];
-
- /* Collect OFDM rate packet num */
- if (i < 8)
- priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
-
- for (j = 0; j < 4; j++)
- priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i];
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: cmpk_message_handle_rx()
- *
- * Overview: In the function, we will capture different RX command packet
- * info. Every RX command packet element has different message
- * length and meaning in content. We only support three type of RX
- * command packet now. Please refer to document
- * ws-06-0063-rtl8190-command-packet-specification.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/06/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-u32 cmpk_message_handle_rx(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
-{
- int total_length;
- u8 cmd_length, exe_cnt = 0;
- u8 element_id;
- u8 *pcmd_buff;
-
- /* 0. Check inpt arguments. It is a command queue message or
- * pointer is null.
- */
- if (!pstats)
- return 0; /* This is not a command packet. */
-
- /* 1. Read received command packet message length from RFD. */
- total_length = pstats->Length;
-
- /* 2. Read virtual address from RFD. */
- pcmd_buff = pstats->virtual_address;
-
- /* 3. Read command packet element id and length. */
- element_id = pcmd_buff[0];
-
- /* 4. Check every received command packet content according to different
- * element type. Because FW may aggregate RX command packet to
- * minimize transmit time between DRV and FW.
- */
- /* Add a counter to prevent the lock in the loop from being held too
- * long
- */
- while (total_length > 0 && exe_cnt++ < 100) {
- /* We support aggregation of different cmd in the same packet */
- element_id = pcmd_buff[0];
-
- switch (element_id) {
- case RX_TX_FEEDBACK:
- cmpk_handle_tx_feedback(dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_INTERRUPT_STATUS:
- cmpk_handle_interrupt_status(dev, pcmd_buff);
- cmd_length = sizeof(struct cmd_pkt_interrupt_status);
- break;
-
- case BOTH_QUERY_CONFIG:
- cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
- break;
-
- case RX_TX_STATUS:
- cmpk_handle_tx_status(dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_STS_SIZE;
- break;
-
- case RX_TX_PER_PKT_FEEDBACK:
- /* You must at lease add a switch case element here,
- * Otherwise, we will jump to default case.
- */
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_TX_RATE_HISTORY:
- cmpk_handle_tx_rate_history(dev, pcmd_buff);
- cmd_length = CMPK_TX_RAHIS_SIZE;
- break;
-
- default:
-
- RT_TRACE(COMP_ERR, "---->%s():unknown CMD Element\n",
- __func__);
- return 1; /* This is a command packet. */
- }
-
- total_length -= cmd_length;
- pcmd_buff += cmd_length;
- }
- return 1; /* This is a command packet. */
-}
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
deleted file mode 100644
index be45cd609d67..000000000000
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef R819XUSB_CMDPKT_H
-#define R819XUSB_CMDPKT_H
-/* Different command packet have dedicated message length and definition. */
-#define CMPK_RX_TX_FB_SIZE sizeof(struct cmd_pkt_tx_feedback) /* 20 */
-#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(struct cmd_pkt_set_configuration) /* 16 */
-#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)
-#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
-
-/* 2008/05/08 amy For USB constant. */
-#define ISR_TX_BCN_OK BIT(27) /* Transmit Beacon OK */
-#define ISR_TX_BCN_ERR BIT(26) /* Transmit Beacon Error */
-#define ISR_BCN_TIMER_INTR BIT(13) /* Beacon Timer Interrupt */
-
-/* Define element ID of command packet. */
-
-/*------------------------------Define structure----------------------------*/
-/* Define different command packet structure. */
-/* 1. RX side: TX feedback packet. */
-struct cmd_pkt_tx_feedback {
- /* DWORD 0 */
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- /* Change tx feedback info field. */
- /*------TX Feedback Info Field */
- u8 TID:4;
- u8 fail_reason:3;
- u8 tok:1; /* Transmit ok. */
- u8 reserve1:4;
- u8 pkt_type:2;
- u8 bandwidth:1;
- u8 qos_pkt:1;
-
- /* DWORD 1 */
- u8 reserve2;
- /*------TX Feedback Info Field */
- u8 retry_cnt;
- u16 pkt_id;
-
- /* DWORD 3 */
- u16 seq_num;
- u8 s_rate; /* Start rate. */
- u8 f_rate; /* Final rate. */
-
- /* DWORD 4 */
- u8 s_rts_rate;
- u8 f_rts_rate;
- u16 pkt_length;
-
- /* DWORD 5 */
- u16 reserve3;
- u16 duration;
-};
-
-/* 2. RX side: Interrupt status packet. It includes Beacon State,
- * Beacon Timer Interrupt and other useful information in MAC ISR Reg.
- */
-struct cmd_pkt_interrupt_status {
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve;
- u32 interrupt_status; /* Interrupt Status. */
-};
-
-/* 3. TX side: Set configuration packet. */
-struct cmd_pkt_set_configuration {
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve1;
- /* Configuration info. */
- u8 cfg_reserve1:3;
- u8 cfg_size:2;
- u8 cfg_type:2;
- u8 cfg_action:1;
- u8 cfg_reserve2;
- u8 cfg_page:4;
- u8 cfg_reserve3:4;
- u8 cfg_offset;
- u32 value;
- u32 mask;
-};
-
-/* 4. Both side : TX/RX query configuration packet. The query structure is the
- * same as set configuration.
- */
-#define cmpk_query_cfg cmd_pkt_set_configuration
-
-/* 5. Multi packet feedback status. */
-typedef struct tag_tx_stats_feedback {
- /* For endian transfer --> Driver will not the same as
- * firmware structure.
- */
- /* DW 0 */
- u16 reserve1;
- u8 length; /* Command packet length */
- u8 element_id; /* Command packet type */
-
- /* DW 1 */
- u16 txfail; /* Tx fail count */
- u16 txok; /* Tx ok count */
-
- /* DW 2 */
- u16 txmcok; /* Tx multicast */
- u16 txretry; /* Tx retry count */
-
- /* DW 3 */
- u16 txucok; /* Tx unicast */
- u16 txbcok; /* Tx broadcast */
-
- /* DW 4 */
- u16 txbcfail;
- u16 txmcfail;
-
- /* DW 5 */
- u16 reserve2;
- u16 txucfail;
-
- /* DW 6-8 */
- u32 txmclength;
- u32 txbclength;
- u32 txuclength;
-
- /* DW 9 */
- u16 reserve3_23;
- u8 reserve3_1;
- u8 rate;
-} __packed cmpk_tx_status_t;
-
-/* 6. Debug feedback message. */
-/* Define RX debug message */
-typedef struct tag_rx_debug_message_feedback {
- /* For endian transfer --> for driver */
- /* DW 0 */
- u16 reserve1;
- u8 length; /* Command packet length */
- u8 element_id; /* Command packet type */
-
- /* DW 1-?? */
- /* Variable debug message. */
-
-} cmpk_rx_dbginfo_t;
-
-/* Define transmit rate history. For big endian format. */
-typedef struct tag_tx_rate_history {
- /* For endian transfer --> for driver */
- /* DW 0 */
- u8 element_id; /* Command packet type */
- u8 length; /* Command packet length */
- u16 reserved1;
-
- /* DW 1-2 CCK rate counter */
- u16 cck[4];
-
- /* DW 3-6 */
- u16 ofdm[8];
-
- /* DW 7-14 BW=0 SG=0
- * DW 15-22 BW=1 SG=0
- * DW 23-30 BW=0 SG=1
- * DW 31-38 BW=1 SG=1
- */
- u16 ht_mcs[4][16];
-
-} __packed cmpk_tx_rahis_t;
-
-typedef enum tag_command_packet_directories {
- RX_TX_FEEDBACK = 0,
- RX_INTERRUPT_STATUS = 1,
- TX_SET_CONFIG = 2,
- BOTH_QUERY_CONFIG = 3,
- RX_TX_STATUS = 4,
- RX_DBGINFO_FEEDBACK = 5,
- RX_TX_PER_PKT_FEEDBACK = 6,
- RX_TX_RATE_HISTORY = 7,
- RX_CMD_ELE_MAX
-} cmpk_element_e;
-
-typedef enum _rt_status {
- RT_STATUS_SUCCESS,
- RT_STATUS_FAILURE,
- RT_STATUS_PENDING,
- RT_STATUS_RESOURCE
-} rt_status, *prt_status;
-
-u32 cmpk_message_handle_rx(struct net_device *dev,
- struct ieee80211_rx_stats *pstats);
-rt_status SendTxCommandPacket(struct net_device *dev,
- void *pData, u32 DataLen);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
deleted file mode 100644
index 4f8629e47e82..000000000000
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/**************************************************************************************************
- * Procedure: Init boot code/firmware code/data session
- *
- * Description: This routine will initialize firmware. If any error occurs during the initialization
- * process, the routine shall terminate immediately and return fail.
- * NIC driver should call NdisOpenFile only from MiniportInitialize.
- *
- * Arguments: The pointer of the adapter
-
- * Returns:
- * NDIS_STATUS_FAILURE - the following initialization process should be terminated
- * NDIS_STATUS_SUCCESS - if firmware initialization process success
- **************************************************************************************************/
-
-#include "r8192U.h"
-#include "r8192U_hw.h"
-#include "r819xU_firmware_img.h"
-#include "r819xU_firmware.h"
-#include <linux/firmware.h>
-
-static void firmware_init_param(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- rt_firmware *pfirmware = priv->pFirmware;
-
- pfirmware->cmdpacket_frag_threshold = GET_COMMAND_PACKET_FRAG_THRESHOLD(MAX_TRANSMIT_BUFFER_SIZE);
-}
-
-/*
- * segment the img and use the ptr and length to remember info on each segment
- *
- */
-static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
- u32 buffer_len)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool rt_status = true;
- u16 frag_threshold;
- u16 frag_length, frag_offset = 0;
- int i;
-
- rt_firmware *pfirmware = priv->pFirmware;
- struct sk_buff *skb;
- unsigned char *seg_ptr;
- struct cb_desc *tcb_desc;
- u8 bLastIniPkt;
- u8 index;
-
- firmware_init_param(dev);
- /* Fragmentation might be required */
- frag_threshold = pfirmware->cmdpacket_frag_threshold;
- do {
- if ((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold;
- bLastIniPkt = 0;
- } else {
- frag_length = buffer_len - frag_offset;
- bLastIniPkt = 1;
- }
-
- /* Allocate skb buffer to contain firmware info and tx descriptor info
- * add 4 to avoid packet appending overflow.
- */
- skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
- if (!skb)
- return false;
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
- tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
- tcb_desc->bLastIniPkt = bLastIniPkt;
-
- skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- seg_ptr = skb->data;
- /*
- * Transform from little endian to big endian
- * and pending zero
- */
- for (i = 0; i < frag_length; i += 4) {
- *seg_ptr++ = ((i+0) < frag_length)?code_virtual_address[i+3] : 0;
- *seg_ptr++ = ((i+1) < frag_length)?code_virtual_address[i+2] : 0;
- *seg_ptr++ = ((i+2) < frag_length)?code_virtual_address[i+1] : 0;
- *seg_ptr++ = ((i+3) < frag_length)?code_virtual_address[i+0] : 0;
- }
- tcb_desc->txbuf_size = (u16)i;
- skb_put(skb, i);
-
- index = tcb_desc->queue_index;
- if (!priv->ieee80211->check_nic_enough_desc(dev, index) ||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[index])) ||
- (priv->ieee80211->queue_stop)) {
- RT_TRACE(COMP_FIRMWARE, "=====================================================> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb, dev);
- }
-
- code_virtual_address += frag_length;
- frag_offset += frag_length;
-
- } while (frag_offset < buffer_len);
-
- return rt_status;
-}
-
-/*
- * Procedure: Check whether main code is download OK. If OK, turn on CPU
- *
- * Description: CPU register locates in different page against general register.
- * Switch to CPU register in the begin and switch back before return
- *
- *
- * Arguments: The pointer of the adapter
- *
- * Returns:
- * NDIS_STATUS_FAILURE - the following initialization process should
- * be terminated
- * NDIS_STATUS_SUCCESS - if firmware initialization process success
- */
-static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
-{
- bool rt_status = true;
- int check_putcodeOK_time = 200000, check_bootOk_time = 200000;
- u32 CPU_status = 0;
-
- /* Check whether put code OK */
- do {
- read_nic_dword(dev, CPU_GEN, &CPU_status);
-
- if (CPU_status&CPU_GEN_PUT_CODE_OK)
- break;
-
- } while (check_putcodeOK_time--);
-
- if (!(CPU_status&CPU_GEN_PUT_CODE_OK)) {
- RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n");
- goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
- } else {
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n");
- }
-
- /* Turn On CPU */
- read_nic_dword(dev, CPU_GEN, &CPU_status);
- write_nic_byte(dev, CPU_GEN,
- (u8)((CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff));
- mdelay(1000);
-
- /* Check whether CPU boot OK */
- do {
- read_nic_dword(dev, CPU_GEN, &CPU_status);
-
- if (CPU_status&CPU_GEN_BOOT_RDY)
- break;
- } while (check_bootOk_time--);
-
- if (!(CPU_status&CPU_GEN_BOOT_RDY))
- goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
- else
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n");
-
- return rt_status;
-
-CPUCheckMainCodeOKAndTurnOnCPU_Fail:
- RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
- rt_status = false;
- return rt_status;
-}
-
-static bool CPUcheck_firmware_ready(struct net_device *dev)
-{
- bool rt_status = true;
- int check_time = 200000;
- u32 CPU_status = 0;
-
- /* Check Firmware Ready */
- do {
- read_nic_dword(dev, CPU_GEN, &CPU_status);
-
- if (CPU_status&CPU_GEN_FIRM_RDY)
- break;
-
- } while (check_time--);
-
- if (!(CPU_status&CPU_GEN_FIRM_RDY))
- goto CPUCheckFirmwareReady_Fail;
- else
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n");
-
- return rt_status;
-
-CPUCheckFirmwareReady_Fail:
- RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
- rt_status = false;
- return rt_status;
-}
-
-bool init_firmware(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool rt_status = true;
-
- u32 file_length = 0;
- u8 *mapped_file = NULL;
- u32 init_step = 0;
- enum opt_rst_type_e rst_opt = OPT_SYSTEM_RESET;
- enum firmware_init_step_e starting_state = FW_INIT_STEP0_BOOT;
-
- rt_firmware *pfirmware = priv->pFirmware;
- const struct firmware *fw_entry;
- const char *fw_name[3] = { "RTL8192U/boot.img",
- "RTL8192U/main.img",
- "RTL8192U/data.img"};
- int rc;
-
- RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n");
-
- if (pfirmware->firmware_status == FW_STATUS_0_INIT) {
- /* it is called by reset */
- rst_opt = OPT_SYSTEM_RESET;
- starting_state = FW_INIT_STEP0_BOOT;
- /* TODO: system reset */
-
- } else if (pfirmware->firmware_status == FW_STATUS_5_READY) {
- /* it is called by Initialize */
- rst_opt = OPT_FIRMWARE_RESET;
- starting_state = FW_INIT_STEP2_DATA;
- } else {
- RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n");
- }
-
- /*
- * Download boot, main, and data image for System reset.
- * Download data image for firmware reset
- */
- for (init_step = starting_state; init_step <= FW_INIT_STEP2_DATA; init_step++) {
- /*
- * Open image file, and map file to continuous memory if open file success.
- * or read image file from array. Default load from IMG file
- */
- if (rst_opt == OPT_SYSTEM_RESET) {
- rc = request_firmware(&fw_entry, fw_name[init_step], &priv->udev->dev);
- if (rc < 0) {
- RT_TRACE(COMP_ERR, "request firmware fail!\n");
- goto download_firmware_fail;
- }
-
- if (fw_entry->size > sizeof(pfirmware->firmware_buf)) {
- RT_TRACE(COMP_ERR, "img file size exceed the container buffer fail!\n");
- goto download_firmware_fail;
- }
-
- if (init_step != FW_INIT_STEP1_MAIN) {
- memcpy(pfirmware->firmware_buf, fw_entry->data, fw_entry->size);
- mapped_file = pfirmware->firmware_buf;
- file_length = fw_entry->size;
- } else {
- memset(pfirmware->firmware_buf, 0, 128);
- memcpy(&pfirmware->firmware_buf[128], fw_entry->data, fw_entry->size);
- mapped_file = pfirmware->firmware_buf;
- file_length = fw_entry->size + 128;
- }
- pfirmware->firmware_buf_size = file_length;
- } else if (rst_opt == OPT_FIRMWARE_RESET) {
- /* we only need to download data.img here */
- mapped_file = pfirmware->firmware_buf;
- file_length = pfirmware->firmware_buf_size;
- }
-
- /* Download image file */
- /* The firmware download process is just as following,
- * 1. that is each packet will be segmented and inserted to the wait queue.
- * 2. each packet segment will be put in the skb_buff packet.
- * 3. each skb_buff packet data content will already include the firmware info
- * and Tx descriptor info
- */
- rt_status = fw_download_code(dev, mapped_file, file_length);
- if (rst_opt == OPT_SYSTEM_RESET)
- release_firmware(fw_entry);
-
- if (!rt_status)
- goto download_firmware_fail;
-
- switch (init_step) {
- case FW_INIT_STEP0_BOOT:
- /* Download boot
- * initialize command descriptor.
- * will set polling bit when firmware code is also configured
- */
- pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
- /* mdelay(1000); */
- /*
- * To initialize IMEM, CPU move code from 0x80000080,
- * hence, we send 0x80 byte packet
- */
- break;
-
- case FW_INIT_STEP1_MAIN:
- /* Download firmware code. Wait until Boot Ready and Turn on CPU */
- pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE;
-
- /* Check Put Code OK and Turn On CPU */
- rt_status = CPUcheck_maincodeok_turnonCPU(dev);
- if (!rt_status) {
- RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n");
- goto download_firmware_fail;
- }
-
- pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU;
- break;
-
- case FW_INIT_STEP2_DATA:
- /* download initial data code */
- pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE;
- mdelay(1);
-
- rt_status = CPUcheck_firmware_ready(dev);
- if (!rt_status) {
- RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n", rt_status);
- goto download_firmware_fail;
- }
-
- /* wait until data code is initialized ready.*/
- pfirmware->firmware_status = FW_STATUS_5_READY;
- break;
- }
- }
-
- RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n");
- return rt_status;
-
-download_firmware_fail:
- RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
- rt_status = false;
- return rt_status;
-}
-
-MODULE_FIRMWARE("RTL8192U/boot.img");
-MODULE_FIRMWARE("RTL8192U/main.img");
-MODULE_FIRMWARE("RTL8192U/data.img");
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.h b/drivers/staging/rtl8192u/r819xU_firmware.h
deleted file mode 100644
index b84344c1e62b..000000000000
--- a/drivers/staging/rtl8192u/r819xU_firmware.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INC_FIRMWARE_H
-#define __INC_FIRMWARE_H
-
-#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) \
- (4 * ((v) / 4) - 8 - USB_HWDESC_HEADER_LEN)
-
-enum firmware_init_step_e {
- FW_INIT_STEP0_BOOT = 0,
- FW_INIT_STEP1_MAIN = 1,
- FW_INIT_STEP2_DATA = 2,
-};
-
-enum opt_rst_type_e {
- OPT_SYSTEM_RESET = 0,
- OPT_FIRMWARE_RESET = 1,
-};
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.c b/drivers/staging/rtl8192u/r819xU_firmware_img.c
deleted file mode 100644
index 0af062036688..000000000000
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*Created on 2008/ 7/16, 5:31*/
-#include <linux/types.h>
-#include "r819xU_firmware_img.h"
-
-u32 Rtl8192UsbPHY_REGArray[] = {
- 0x0, };
-
-u32 Rtl8192UsbPHY_REG_1T2RArray[] = {
- 0x800, 0x00000000,
- 0x804, 0x00000001,
- 0x808, 0x0000fc00,
- 0x80c, 0x0000001c,
- 0x810, 0x801010aa,
- 0x814, 0x008514d0,
- 0x818, 0x00000040,
- 0x81c, 0x00000000,
- 0x820, 0x00000004,
- 0x824, 0x00690000,
- 0x828, 0x00000004,
- 0x82c, 0x00e90000,
- 0x830, 0x00000004,
- 0x834, 0x00690000,
- 0x838, 0x00000004,
- 0x83c, 0x00e90000,
- 0x840, 0x00000000,
- 0x844, 0x00000000,
- 0x848, 0x00000000,
- 0x84c, 0x00000000,
- 0x850, 0x00000000,
- 0x854, 0x00000000,
- 0x858, 0x65a965a9,
- 0x85c, 0x65a965a9,
- 0x860, 0x001f0010,
- 0x864, 0x007f0010,
- 0x868, 0x001f0010,
- 0x86c, 0x007f0010,
- 0x870, 0x0f100f70,
- 0x874, 0x0f100f70,
- 0x878, 0x00000000,
- 0x87c, 0x00000000,
- 0x880, 0x6870e36c,
- 0x884, 0xe3573600,
- 0x888, 0x4260c340,
- 0x88c, 0x0000ff00,
- 0x890, 0x00000000,
- 0x894, 0xfffffffe,
- 0x898, 0x4c42382f,
- 0x89c, 0x00656056,
- 0x8b0, 0x00000000,
- 0x8e0, 0x00000000,
- 0x8e4, 0x00000000,
- 0x900, 0x00000000,
- 0x904, 0x00000023,
- 0x908, 0x00000000,
- 0x90c, 0x31121311,
- 0xa00, 0x00d0c7d8,
- 0xa04, 0x811f0008,
- 0xa08, 0x80cd8300,
- 0xa0c, 0x2e62740f,
- 0xa10, 0x95009b78,
- 0xa14, 0x11145008,
- 0xa18, 0x00881117,
- 0xa1c, 0x89140fa0,
- 0xa20, 0x1a1b0000,
- 0xa24, 0x090e1317,
- 0xa28, 0x00000204,
- 0xa2c, 0x00000000,
- 0xc00, 0x00000040,
- 0xc04, 0x00005433,
- 0xc08, 0x000000e4,
- 0xc0c, 0x6c6c6c6c,
- 0xc10, 0x08800000,
- 0xc14, 0x40000100,
- 0xc18, 0x08000000,
- 0xc1c, 0x40000100,
- 0xc20, 0x08000000,
- 0xc24, 0x40000100,
- 0xc28, 0x08000000,
- 0xc2c, 0x40000100,
- 0xc30, 0x6de9ac44,
- 0xc34, 0x465c52cd,
- 0xc38, 0x497f5994,
- 0xc3c, 0x0a969764,
- 0xc40, 0x1f7c403f,
- 0xc44, 0x000100b7,
- 0xc48, 0xec020000,
- 0xc4c, 0x00000300,
- 0xc50, 0x69543420,
- 0xc54, 0x433c0094,
- 0xc58, 0x69543420,
- 0xc5c, 0x433c0094,
- 0xc60, 0x69543420,
- 0xc64, 0x433c0094,
- 0xc68, 0x69543420,
- 0xc6c, 0x433c0094,
- 0xc70, 0x2c7f000d,
- 0xc74, 0x0186175b,
- 0xc78, 0x0000001f,
- 0xc7c, 0x00b91612,
- 0xc80, 0x40000100,
- 0xc84, 0x20000000,
- 0xc88, 0x40000100,
- 0xc8c, 0x20200000,
- 0xc90, 0x40000100,
- 0xc94, 0x00000000,
- 0xc98, 0x40000100,
- 0xc9c, 0x00000000,
- 0xca0, 0x00492492,
- 0xca4, 0x00000000,
- 0xca8, 0x00000000,
- 0xcac, 0x00000000,
- 0xcb0, 0x00000000,
- 0xcb4, 0x00000000,
- 0xcb8, 0x00000000,
- 0xcbc, 0x00492492,
- 0xcc0, 0x00000000,
- 0xcc4, 0x00000000,
- 0xcc8, 0x00000000,
- 0xccc, 0x00000000,
- 0xcd0, 0x00000000,
- 0xcd4, 0x00000000,
- 0xcd8, 0x64b22427,
- 0xcdc, 0x00766932,
- 0xce0, 0x00222222,
- 0xd00, 0x00000750,
- 0xd04, 0x00000403,
- 0xd08, 0x0000907f,
- 0xd0c, 0x00000001,
- 0xd10, 0xa0633333,
- 0xd14, 0x33333c63,
- 0xd18, 0x6a8f5b6b,
- 0xd1c, 0x00000000,
- 0xd20, 0x00000000,
- 0xd24, 0x00000000,
- 0xd28, 0x00000000,
- 0xd2c, 0xcc979975,
- 0xd30, 0x00000000,
- 0xd34, 0x00000000,
- 0xd38, 0x00000000,
- 0xd3c, 0x00027293,
- 0xd40, 0x00000000,
- 0xd44, 0x00000000,
- 0xd48, 0x00000000,
- 0xd4c, 0x00000000,
- 0xd50, 0x6437140a,
- 0xd54, 0x024dbd02,
- 0xd58, 0x00000000,
- 0xd5c, 0x04032064,
- 0xe00, 0x161a1a1a,
- 0xe04, 0x12121416,
- 0xe08, 0x00001800,
- 0xe0c, 0x00000000,
- 0xe10, 0x161a1a1a,
- 0xe14, 0x12121416,
- 0xe18, 0x161a1a1a,
- 0xe1c, 0x12121416,
-};
-
-u32 Rtl8192UsbRadioA_Array[] = {
- 0x019, 0x00000003,
- 0x000, 0x000000bf,
- 0x001, 0x00000ee0,
- 0x002, 0x0000004c,
- 0x003, 0x000007f1,
- 0x004, 0x00000975,
- 0x005, 0x00000c58,
- 0x006, 0x00000ae6,
- 0x007, 0x000000ca,
- 0x008, 0x00000e1c,
- 0x009, 0x000007f0,
- 0x00a, 0x000009d0,
- 0x00b, 0x000001ba,
- 0x00c, 0x00000240,
- 0x00e, 0x00000020,
- 0x00f, 0x00000990,
- 0x012, 0x00000806,
- 0x014, 0x000005ab,
- 0x015, 0x00000f80,
- 0x016, 0x00000020,
- 0x017, 0x00000597,
- 0x018, 0x0000050a,
- 0x01a, 0x00000f80,
- 0x01b, 0x00000f5e,
- 0x01c, 0x00000008,
- 0x01d, 0x00000607,
- 0x01e, 0x000006cc,
- 0x01f, 0x00000000,
- 0x020, 0x000001a5,
- 0x01f, 0x00000001,
- 0x020, 0x00000165,
- 0x01f, 0x00000002,
- 0x020, 0x000000c6,
- 0x01f, 0x00000003,
- 0x020, 0x00000086,
- 0x01f, 0x00000004,
- 0x020, 0x00000046,
- 0x01f, 0x00000005,
- 0x020, 0x000001e6,
- 0x01f, 0x00000006,
- 0x020, 0x000001a6,
- 0x01f, 0x00000007,
- 0x020, 0x00000166,
- 0x01f, 0x00000008,
- 0x020, 0x000000c7,
- 0x01f, 0x00000009,
- 0x020, 0x00000087,
- 0x01f, 0x0000000a,
- 0x020, 0x000000f7,
- 0x01f, 0x0000000b,
- 0x020, 0x000000d7,
- 0x01f, 0x0000000c,
- 0x020, 0x000000b7,
- 0x01f, 0x0000000d,
- 0x020, 0x00000097,
- 0x01f, 0x0000000e,
- 0x020, 0x00000077,
- 0x01f, 0x0000000f,
- 0x020, 0x00000057,
- 0x01f, 0x00000010,
- 0x020, 0x00000037,
- 0x01f, 0x00000011,
- 0x020, 0x000000fb,
- 0x01f, 0x00000012,
- 0x020, 0x000000db,
- 0x01f, 0x00000013,
- 0x020, 0x000000bb,
- 0x01f, 0x00000014,
- 0x020, 0x000000ff,
- 0x01f, 0x00000015,
- 0x020, 0x000000e3,
- 0x01f, 0x00000016,
- 0x020, 0x000000c3,
- 0x01f, 0x00000017,
- 0x020, 0x000000a3,
- 0x01f, 0x00000018,
- 0x020, 0x00000083,
- 0x01f, 0x00000019,
- 0x020, 0x00000063,
- 0x01f, 0x0000001a,
- 0x020, 0x00000043,
- 0x01f, 0x0000001b,
- 0x020, 0x00000023,
- 0x01f, 0x0000001c,
- 0x020, 0x00000003,
- 0x01f, 0x0000001d,
- 0x020, 0x000001e3,
- 0x01f, 0x0000001e,
- 0x020, 0x000001c3,
- 0x01f, 0x0000001f,
- 0x020, 0x000001a3,
- 0x01f, 0x00000020,
- 0x020, 0x00000183,
- 0x01f, 0x00000021,
- 0x020, 0x00000163,
- 0x01f, 0x00000022,
- 0x020, 0x00000143,
- 0x01f, 0x00000023,
- 0x020, 0x00000123,
- 0x01f, 0x00000024,
- 0x020, 0x00000103,
- 0x023, 0x00000203,
- 0x024, 0x00000200,
- 0x00b, 0x000001ba,
- 0x02c, 0x000003d7,
- 0x02d, 0x00000ff0,
- 0x000, 0x00000037,
- 0x004, 0x00000160,
- 0x007, 0x00000080,
- 0x002, 0x0000088d,
- 0x0fe, 0x00000000,
- 0x0fe, 0x00000000,
- 0x016, 0x00000200,
- 0x016, 0x00000380,
- 0x016, 0x00000020,
- 0x016, 0x000001a0,
- 0x000, 0x000000bf,
- 0x00d, 0x0000001f,
- 0x00d, 0x00000c9f,
- 0x002, 0x0000004d,
- 0x000, 0x00000cbf,
- 0x004, 0x00000975,
- 0x007, 0x00000700,
-};
-
-u32 Rtl8192UsbRadioB_Array[] = {
- 0x019, 0x00000003,
- 0x000, 0x000000bf,
- 0x001, 0x000006e0,
- 0x002, 0x0000004c,
- 0x003, 0x000007f1,
- 0x004, 0x00000975,
- 0x005, 0x00000c58,
- 0x006, 0x00000ae6,
- 0x007, 0x000000ca,
- 0x008, 0x00000e1c,
- 0x000, 0x000000b7,
- 0x00a, 0x00000850,
- 0x000, 0x000000bf,
- 0x00b, 0x000001ba,
- 0x00c, 0x00000240,
- 0x00e, 0x00000020,
- 0x015, 0x00000f80,
- 0x016, 0x00000020,
- 0x017, 0x00000597,
- 0x018, 0x0000050a,
- 0x01a, 0x00000e00,
- 0x01b, 0x00000f5e,
- 0x01d, 0x00000607,
- 0x01e, 0x000006cc,
- 0x00b, 0x000001ba,
- 0x023, 0x00000203,
- 0x024, 0x00000200,
- 0x000, 0x00000037,
- 0x004, 0x00000160,
- 0x016, 0x00000200,
- 0x016, 0x00000380,
- 0x016, 0x00000020,
- 0x016, 0x000001a0,
- 0x00d, 0x00000ccc,
- 0x000, 0x000000bf,
- 0x002, 0x0000004d,
- 0x000, 0x00000cbf,
- 0x004, 0x00000975,
- 0x007, 0x00000700,
-};
-
-u32 Rtl8192UsbRadioC_Array[] = {
- 0x0, };
-
-u32 Rtl8192UsbRadioD_Array[] = {
- 0x0, };
-
-u32 Rtl8192UsbMACPHY_Array[] = {
- 0x03c, 0xffff0000, 0x00000f0f,
- 0x340, 0xffffffff, 0x161a1a1a,
- 0x344, 0xffffffff, 0x12121416,
- 0x348, 0x0000ffff, 0x00001818,
- 0x12c, 0xffffffff, 0x04000802,
- 0x318, 0x00000fff, 0x00000100,
-};
-
-u32 Rtl8192UsbMACPHY_Array_PG[] = {
- 0x03c, 0xffff0000, 0x00000f0f,
- 0xe00, 0xffffffff, 0x06090909,
- 0xe04, 0xffffffff, 0x00030306,
- 0xe08, 0x0000ff00, 0x00000000,
- 0xe10, 0xffffffff, 0x0a0c0d0f,
- 0xe14, 0xffffffff, 0x06070809,
- 0xe18, 0xffffffff, 0x0a0c0d0f,
- 0xe1c, 0xffffffff, 0x06070809,
- 0x12c, 0xffffffff, 0x04000802,
- 0x318, 0x00000fff, 0x00000800,
-};
-
-u32 Rtl8192UsbAGCTAB_Array[] = {
- 0xc78, 0x7d000001,
- 0xc78, 0x7d010001,
- 0xc78, 0x7d020001,
- 0xc78, 0x7d030001,
- 0xc78, 0x7d040001,
- 0xc78, 0x7d050001,
- 0xc78, 0x7c060001,
- 0xc78, 0x7b070001,
- 0xc78, 0x7a080001,
- 0xc78, 0x79090001,
- 0xc78, 0x780a0001,
- 0xc78, 0x770b0001,
- 0xc78, 0x760c0001,
- 0xc78, 0x750d0001,
- 0xc78, 0x740e0001,
- 0xc78, 0x730f0001,
- 0xc78, 0x72100001,
- 0xc78, 0x71110001,
- 0xc78, 0x70120001,
- 0xc78, 0x6f130001,
- 0xc78, 0x6e140001,
- 0xc78, 0x6d150001,
- 0xc78, 0x6c160001,
- 0xc78, 0x6b170001,
- 0xc78, 0x6a180001,
- 0xc78, 0x69190001,
- 0xc78, 0x681a0001,
- 0xc78, 0x671b0001,
- 0xc78, 0x661c0001,
- 0xc78, 0x651d0001,
- 0xc78, 0x641e0001,
- 0xc78, 0x491f0001,
- 0xc78, 0x48200001,
- 0xc78, 0x47210001,
- 0xc78, 0x46220001,
- 0xc78, 0x45230001,
- 0xc78, 0x44240001,
- 0xc78, 0x43250001,
- 0xc78, 0x28260001,
- 0xc78, 0x27270001,
- 0xc78, 0x26280001,
- 0xc78, 0x25290001,
- 0xc78, 0x242a0001,
- 0xc78, 0x232b0001,
- 0xc78, 0x222c0001,
- 0xc78, 0x212d0001,
- 0xc78, 0x202e0001,
- 0xc78, 0x0a2f0001,
- 0xc78, 0x08300001,
- 0xc78, 0x06310001,
- 0xc78, 0x05320001,
- 0xc78, 0x04330001,
- 0xc78, 0x03340001,
- 0xc78, 0x02350001,
- 0xc78, 0x01360001,
- 0xc78, 0x00370001,
- 0xc78, 0x00380001,
- 0xc78, 0x00390001,
- 0xc78, 0x003a0001,
- 0xc78, 0x003b0001,
- 0xc78, 0x003c0001,
- 0xc78, 0x003d0001,
- 0xc78, 0x003e0001,
- 0xc78, 0x003f0001,
- 0xc78, 0x7d400001,
- 0xc78, 0x7d410001,
- 0xc78, 0x7d420001,
- 0xc78, 0x7d430001,
- 0xc78, 0x7d440001,
- 0xc78, 0x7d450001,
- 0xc78, 0x7c460001,
- 0xc78, 0x7b470001,
- 0xc78, 0x7a480001,
- 0xc78, 0x79490001,
- 0xc78, 0x784a0001,
- 0xc78, 0x774b0001,
- 0xc78, 0x764c0001,
- 0xc78, 0x754d0001,
- 0xc78, 0x744e0001,
- 0xc78, 0x734f0001,
- 0xc78, 0x72500001,
- 0xc78, 0x71510001,
- 0xc78, 0x70520001,
- 0xc78, 0x6f530001,
- 0xc78, 0x6e540001,
- 0xc78, 0x6d550001,
- 0xc78, 0x6c560001,
- 0xc78, 0x6b570001,
- 0xc78, 0x6a580001,
- 0xc78, 0x69590001,
- 0xc78, 0x685a0001,
- 0xc78, 0x675b0001,
- 0xc78, 0x665c0001,
- 0xc78, 0x655d0001,
- 0xc78, 0x645e0001,
- 0xc78, 0x495f0001,
- 0xc78, 0x48600001,
- 0xc78, 0x47610001,
- 0xc78, 0x46620001,
- 0xc78, 0x45630001,
- 0xc78, 0x44640001,
- 0xc78, 0x43650001,
- 0xc78, 0x28660001,
- 0xc78, 0x27670001,
- 0xc78, 0x26680001,
- 0xc78, 0x25690001,
- 0xc78, 0x246a0001,
- 0xc78, 0x236b0001,
- 0xc78, 0x226c0001,
- 0xc78, 0x216d0001,
- 0xc78, 0x206e0001,
- 0xc78, 0x0a6f0001,
- 0xc78, 0x08700001,
- 0xc78, 0x06710001,
- 0xc78, 0x05720001,
- 0xc78, 0x04730001,
- 0xc78, 0x03740001,
- 0xc78, 0x02750001,
- 0xc78, 0x01760001,
- 0xc78, 0x00770001,
- 0xc78, 0x00780001,
- 0xc78, 0x00790001,
- 0xc78, 0x007a0001,
- 0xc78, 0x007b0001,
- 0xc78, 0x007c0001,
- 0xc78, 0x007d0001,
- 0xc78, 0x007e0001,
- 0xc78, 0x007f0001,
- 0xc78, 0x2e00001e,
- 0xc78, 0x2e01001e,
- 0xc78, 0x2e02001e,
- 0xc78, 0x2e03001e,
- 0xc78, 0x2e04001e,
- 0xc78, 0x2e05001e,
- 0xc78, 0x3006001e,
- 0xc78, 0x3407001e,
- 0xc78, 0x3908001e,
- 0xc78, 0x3c09001e,
- 0xc78, 0x3f0a001e,
- 0xc78, 0x420b001e,
- 0xc78, 0x440c001e,
- 0xc78, 0x450d001e,
- 0xc78, 0x460e001e,
- 0xc78, 0x460f001e,
- 0xc78, 0x4710001e,
- 0xc78, 0x4811001e,
- 0xc78, 0x4912001e,
- 0xc78, 0x4a13001e,
- 0xc78, 0x4b14001e,
- 0xc78, 0x4b15001e,
- 0xc78, 0x4c16001e,
- 0xc78, 0x4d17001e,
- 0xc78, 0x4e18001e,
- 0xc78, 0x4f19001e,
- 0xc78, 0x4f1a001e,
- 0xc78, 0x501b001e,
- 0xc78, 0x511c001e,
- 0xc78, 0x521d001e,
- 0xc78, 0x521e001e,
- 0xc78, 0x531f001e,
- 0xc78, 0x5320001e,
- 0xc78, 0x5421001e,
- 0xc78, 0x5522001e,
- 0xc78, 0x5523001e,
- 0xc78, 0x5624001e,
- 0xc78, 0x5725001e,
- 0xc78, 0x5726001e,
- 0xc78, 0x5827001e,
- 0xc78, 0x5828001e,
- 0xc78, 0x5929001e,
- 0xc78, 0x592a001e,
- 0xc78, 0x5a2b001e,
- 0xc78, 0x5b2c001e,
- 0xc78, 0x5c2d001e,
- 0xc78, 0x5c2e001e,
- 0xc78, 0x5d2f001e,
- 0xc78, 0x5e30001e,
- 0xc78, 0x5f31001e,
- 0xc78, 0x6032001e,
- 0xc78, 0x6033001e,
- 0xc78, 0x6134001e,
- 0xc78, 0x6235001e,
- 0xc78, 0x6336001e,
- 0xc78, 0x6437001e,
- 0xc78, 0x6438001e,
- 0xc78, 0x6539001e,
- 0xc78, 0x663a001e,
- 0xc78, 0x673b001e,
- 0xc78, 0x673c001e,
- 0xc78, 0x683d001e,
- 0xc78, 0x693e001e,
- 0xc78, 0x6a3f001e,
-};
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.h b/drivers/staging/rtl8192u/r819xU_firmware_img.h
deleted file mode 100644
index 61585a72465e..000000000000
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef IMG_H
-#define IMG_H
-
-#define MACPHY_Array_PGLength 30
-#define PHY_REG_1T2RArrayLength 296
-#define AGCTAB_ArrayLength 384
-#define MACPHY_ArrayLength 18
-
-#define RadioA_ArrayLength 246
-#define RadioB_ArrayLength 78
-#define RadioC_ArrayLength 1
-#define RadioD_ArrayLength 1
-#define PHY_REGArrayLength 1
-
-extern u32 Rtl8192UsbPHY_REGArray[];
-extern u32 Rtl8192UsbPHY_REG_1T2RArray[];
-extern u32 Rtl8192UsbRadioA_Array[];
-extern u32 Rtl8192UsbRadioB_Array[];
-extern u32 Rtl8192UsbRadioC_Array[];
-extern u32 Rtl8192UsbRadioD_Array[];
-extern u32 Rtl8192UsbMACPHY_Array[];
-extern u32 Rtl8192UsbMACPHY_Array_PG[];
-extern u32 Rtl8192UsbAGCTAB_Array[];
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
deleted file mode 100644
index e6836eacc7aa..000000000000
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ /dev/null
@@ -1,1646 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "r8192U.h"
-#include "r8192U_hw.h"
-#include "r819xU_phy.h"
-#include "r819xU_phyreg.h"
-#include "r8190_rtl8256.h"
-#include "r8192U_dm.h"
-#include "r819xU_firmware_img.h"
-
-#include "ieee80211/dot11d.h"
-#include <linux/bitops.h>
-
-static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
- 0,
- 0x085c, /* 2412 1 */
- 0x08dc, /* 2417 2 */
- 0x095c, /* 2422 3 */
- 0x09dc, /* 2427 4 */
- 0x0a5c, /* 2432 5 */
- 0x0adc, /* 2437 6 */
- 0x0b5c, /* 2442 7 */
- 0x0bdc, /* 2447 8 */
- 0x0c5c, /* 2452 9 */
- 0x0cdc, /* 2457 10 */
- 0x0d5c, /* 2462 11 */
- 0x0ddc, /* 2467 12 */
- 0x0e5c, /* 2472 13 */
- 0x0f72, /* 2484 */
-};
-
-#define rtl819XMACPHY_Array Rtl8192UsbMACPHY_Array
-
-/******************************************************************************
- * function: This function checks different RF type to execute legal judgement.
- * If RF Path is illegal, we will return false.
- * input: net_device *dev
- * u32 e_rfpath
- * output: none
- * return: 0(illegal, false), 1(legal, true)
- *****************************************************************************/
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 e_rfpath)
-{
- u8 ret = 1;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->rf_type == RF_2T4R) {
- ret = 0;
- } else if (priv->rf_type == RF_1T2R) {
- if (e_rfpath == RF90_PATH_A || e_rfpath == RF90_PATH_B)
- ret = 1;
- else if (e_rfpath == RF90_PATH_C || e_rfpath == RF90_PATH_D)
- ret = 0;
- }
- return ret;
-}
-
-/******************************************************************************
- * function: This function sets specific bits to BB register
- * input: net_device *dev
- * u32 reg_addr //target addr to be modified
- * u32 bitmask //taget bit pos to be modified
- * u32 data //value to be write
- * output: none
- * return: none
- * notice:
- ******************************************************************************/
-void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr, u32 bitmask,
- u32 data)
-{
- u32 reg, bitshift;
-
- if (bitmask != bMaskDWord) {
- read_nic_dword(dev, reg_addr, &reg);
- bitshift = ffs(bitmask) - 1;
- reg &= ~bitmask;
- reg |= data << bitshift;
- write_nic_dword(dev, reg_addr, reg);
- } else {
- write_nic_dword(dev, reg_addr, data);
- }
-}
-
-/******************************************************************************
- * function: This function reads specific bits from BB register
- * input: net_device *dev
- * u32 reg_addr //target addr to be readback
- * u32 bitmask //taget bit pos to be readback
- * output: none
- * return: u32 data //the readback register value
- * notice:
- ******************************************************************************/
-u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask)
-{
- u32 reg, bitshift;
-
- read_nic_dword(dev, reg_addr, &reg);
- bitshift = ffs(bitmask) - 1;
-
- return (reg & bitmask) >> bitshift;
-}
-
-static u32 phy_FwRFSerialRead(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 offset);
-
-static void phy_FwRFSerialWrite(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 offset,
- u32 data);
-
-/******************************************************************************
- * function: This function reads register from RF chip
- * input: net_device *dev
- * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
- * u32 offset //target address to be read
- * output: none
- * return: u32 readback value
- * notice: There are three types of serial operations:
- * (1) Software serial write.
- * (2)Hardware LSSI-Low Speed Serial Interface.
- * (3)Hardware HSSI-High speed serial write.
- * Driver here need to implement (1) and (2)
- * ---need more spec for this information.
- ******************************************************************************/
-static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath, u32 offset)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 ret = 0;
- u32 new_offset = 0;
- BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[e_rfpath];
-
- rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0);
- /* Make sure RF register offset is correct */
- offset &= 0x3f;
-
- /* Switch page for 8256 RF IC */
- if (priv->rf_chip == RF_8256) {
- if (offset >= 31) {
- priv->RfReg0Value[e_rfpath] |= 0x140;
- /* Switch to Reg_Mode2 for Reg 31-45 */
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- priv->RfReg0Value[e_rfpath]<<16);
- /* Modify offset */
- new_offset = offset - 30;
- } else if (offset >= 16) {
- priv->RfReg0Value[e_rfpath] |= 0x100;
- priv->RfReg0Value[e_rfpath] &= (~0x40);
- /* Switch to Reg_Mode1 for Reg16-30 */
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- priv->RfReg0Value[e_rfpath]<<16);
-
- new_offset = offset - 15;
- } else {
- new_offset = offset;
- }
- } else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
- new_offset = offset;
- }
- /* Put desired read addr to LSSI control Register */
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
- new_offset);
- /* Issue a posedge trigger */
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
-
- /* TODO: we should not delay such a long time. Ask for help from SD3 */
- usleep_range(1000, 1000);
-
- ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
- bLSSIReadBackData);
-
- /* Switch back to Reg_Mode0 */
- if (priv->rf_chip == RF_8256) {
- priv->RfReg0Value[e_rfpath] &= 0xebf;
-
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
- priv->RfReg0Value[e_rfpath] << 16);
- }
-
- return ret;
-}
-
-/******************************************************************************
- * function: This function writes data to RF register
- * input: net_device *dev
- * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
- * u32 offset //target address to be written
- * u32 data //the new register data to be written
- * output: none
- * return: none
- * notice: For RF8256 only.
- * ===========================================================================
- * Reg Mode RegCTL[1] RegCTL[0] Note
- * (Reg00[12]) (Reg00[10])
- * ===========================================================================
- * Reg_Mode0 0 x Reg 0 ~ 15(0x0 ~ 0xf)
- * ---------------------------------------------------------------------------
- * Reg_Mode1 1 0 Reg 16 ~ 30(0x1 ~ 0xf)
- * ---------------------------------------------------------------------------
- * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
- * ---------------------------------------------------------------------------
- *****************************************************************************/
-static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 offset,
- u32 data)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 DataAndAddr = 0, new_offset = 0;
- BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[e_rfpath];
-
- offset &= 0x3f;
- if (priv->rf_chip == RF_8256) {
- if (offset >= 31) {
- priv->RfReg0Value[e_rfpath] |= 0x140;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- priv->RfReg0Value[e_rfpath] << 16);
- new_offset = offset - 30;
- } else if (offset >= 16) {
- priv->RfReg0Value[e_rfpath] |= 0x100;
- priv->RfReg0Value[e_rfpath] &= (~0x40);
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- priv->RfReg0Value[e_rfpath]<<16);
- new_offset = offset - 15;
- } else {
- new_offset = offset;
- }
- } else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
- new_offset = offset;
- }
-
- /* Put write addr in [5:0] and write data in [31:16] */
- DataAndAddr = (data<<16) | (new_offset&0x3f);
-
- /* Write operation */
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
-
- if (offset == 0x0)
- priv->RfReg0Value[e_rfpath] = data;
-
- /* Switch back to Reg_Mode0 */
- if (priv->rf_chip == RF_8256) {
- if (offset != 0) {
- priv->RfReg0Value[e_rfpath] &= 0xebf;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- priv->RfReg0Value[e_rfpath] << 16);
- }
- }
-}
-
-/******************************************************************************
- * function: This function set specific bits to RF register
- * input: net_device dev
- * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
- * u32 reg_addr //target addr to be modified
- * u32 bitmask //taget bit pos to be modified
- * u32 data //value to be written
- * output: none
- * return: none
- * notice:
- *****************************************************************************/
-void rtl8192_phy_SetRFReg(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 reg_addr, u32 bitmask, u32 data)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 reg, bitshift;
-
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, e_rfpath))
- return;
-
- if (priv->Rf_Mode == RF_OP_By_FW) {
- if (bitmask != bMask12Bits) {
- /* RF data is 12 bits only */
- reg = phy_FwRFSerialRead(dev, e_rfpath, reg_addr);
- bitshift = ffs(bitmask) - 1;
- reg &= ~bitmask;
- reg |= data << bitshift;
-
- phy_FwRFSerialWrite(dev, e_rfpath, reg_addr, reg);
- } else {
- phy_FwRFSerialWrite(dev, e_rfpath, reg_addr, data);
- }
-
- udelay(200);
-
- } else {
- if (bitmask != bMask12Bits) {
- /* RF data is 12 bits only */
- reg = rtl8192_phy_RFSerialRead(dev, e_rfpath, reg_addr);
- bitshift = ffs(bitmask) - 1;
- reg &= ~bitmask;
- reg |= data << bitshift;
-
- rtl8192_phy_RFSerialWrite(dev, e_rfpath, reg_addr, reg);
- } else {
- rtl8192_phy_RFSerialWrite(dev, e_rfpath, reg_addr, data);
- }
- }
-}
-
-/******************************************************************************
- * function: This function reads specific bits from RF register
- * input: net_device *dev
- * u32 reg_addr //target addr to be readback
- * u32 bitmask //taget bit pos to be readback
- * output: none
- * return: u32 data //the readback register value
- * notice:
- *****************************************************************************/
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 reg_addr, u32 bitmask)
-{
- u32 reg, bitshift;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, e_rfpath))
- return 0;
- if (priv->Rf_Mode == RF_OP_By_FW) {
- reg = phy_FwRFSerialRead(dev, e_rfpath, reg_addr);
- udelay(200);
- } else {
- reg = rtl8192_phy_RFSerialRead(dev, e_rfpath, reg_addr);
- }
- bitshift = ffs(bitmask) - 1;
- reg = (reg & bitmask) >> bitshift;
- return reg;
-}
-
-/******************************************************************************
- * function: We support firmware to execute RF-R/W.
- * input: net_device *dev
- * rf90_radio_path_e e_rfpath
- * u32 offset
- * output: none
- * return: u32
- * notice:
- ****************************************************************************/
-static u32 phy_FwRFSerialRead(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 offset)
-{
- u32 reg = 0;
- u32 data = 0;
- u8 time = 0;
- u32 tmp;
-
- /* Firmware RF Write control.
- * We can not execute the scheme in the initial step.
- * Otherwise, RF-R/W will waste much time.
- * This is only for site survey.
- */
- /* 1. Read operation need not insert data. bit 0-11 */
- /* 2. Write RF register address. bit 12-19 */
- data |= ((offset&0xFF)<<12);
- /* 3. Write RF path. bit 20-21 */
- data |= ((e_rfpath&0x3)<<20);
- /* 4. Set RF read indicator. bit 22=0 */
- /* 5. Trigger Fw to operate the command. bit 31 */
- data |= 0x80000000;
- /* 6. We can not execute read operation if bit 31 is 1. */
- read_nic_dword(dev, QPNR, &tmp);
- while (tmp & 0x80000000) {
- /* If FW can not finish RF-R/W for more than ?? times.
- * We must reset FW.
- */
- if (time++ < 100) {
- udelay(10);
- read_nic_dword(dev, QPNR, &tmp);
- } else {
- break;
- }
- }
- /* 7. Execute read operation. */
- write_nic_dword(dev, QPNR, data);
- /* 8. Check if firmware send back RF content. */
- read_nic_dword(dev, QPNR, &tmp);
- while (tmp & 0x80000000) {
- /* If FW can not finish RF-R/W for more than ?? times.
- * We must reset FW.
- */
- if (time++ < 100) {
- udelay(10);
- read_nic_dword(dev, QPNR, &tmp);
- } else {
- return 0;
- }
- }
- read_nic_dword(dev, RF_DATA, &reg);
-
- return reg;
-}
-
-/******************************************************************************
- * function: We support firmware to execute RF-R/W.
- * input: net_device *dev
- * rf90_radio_path_e e_rfpath
- * u32 offset
- * u32 data
- * output: none
- * return: none
- * notice:
- ****************************************************************************/
-static void phy_FwRFSerialWrite(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 offset, u32 data)
-{
- u8 time = 0;
- u32 tmp;
-
- /* Firmware RF Write control.
- * We can not execute the scheme in the initial step.
- * Otherwise, RF-R/W will waste much time.
- * This is only for site survey.
- */
-
- /* 1. Set driver write bit and 12 bit data. bit 0-11 */
- /* 2. Write RF register address. bit 12-19 */
- data |= ((offset&0xFF)<<12);
- /* 3. Write RF path. bit 20-21 */
- data |= ((e_rfpath&0x3)<<20);
- /* 4. Set RF write indicator. bit 22=1 */
- data |= 0x400000;
- /* 5. Trigger Fw to operate the command. bit 31=1 */
- data |= 0x80000000;
-
- /* 6. Write operation. We can not write if bit 31 is 1. */
- read_nic_dword(dev, QPNR, &tmp);
- while (tmp & 0x80000000) {
- /* If FW can not finish RF-R/W for more than ?? times.
- * We must reset FW.
- */
- if (time++ < 100) {
- udelay(10);
- read_nic_dword(dev, QPNR, &tmp);
- } else {
- break;
- }
- }
- /* 7. No matter check bit. We always force the write.
- * Because FW will not accept the command.
- */
- write_nic_dword(dev, QPNR, data);
- /* According to test, we must delay 20us to wait firmware
- * to finish RF write operation.
- */
- /* We support delay in firmware side now. */
-}
-
-/******************************************************************************
- * function: This function reads BB parameters from header file we generate,
- * and do register read/write
- * input: net_device *dev
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- *****************************************************************************/
-void rtl8192_phy_configmac(struct net_device *dev)
-{
- u32 dwArrayLen = 0, i;
- u32 *pdwArray = NULL;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->btxpowerdata_readfromEEPORM) {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
- dwArrayLen = MACPHY_Array_PGLength;
- pdwArray = Rtl8192UsbMACPHY_Array_PG;
-
- } else {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array\n");
- dwArrayLen = MACPHY_ArrayLength;
- pdwArray = rtl819XMACPHY_Array;
- }
- for (i = 0; i < dwArrayLen; i = i+3) {
- if (pdwArray[i] == 0x318)
- pdwArray[i+2] = 0x00000800;
-
- RT_TRACE(COMP_DBG,
- "Rtl8190MACPHY_Array[0]=%x Rtl8190MACPHY_Array[1]=%x Rtl8190MACPHY_Array[2]=%x\n",
- pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
- rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1],
- pdwArray[i+2]);
- }
-}
-
-/******************************************************************************
- * function: This function does dirty work
- * input: net_device *dev
- * u8 ConfigType
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- *****************************************************************************/
-static void rtl8192_phyConfigBB(struct net_device *dev,
- enum baseband_config_type ConfigType)
-{
- u32 i;
-
- if (ConfigType == BASEBAND_CONFIG_PHY_REG) {
- for (i = 0; i < PHY_REG_1T2RArrayLength; i += 2) {
- rtl8192_setBBreg(dev, Rtl8192UsbPHY_REG_1T2RArray[i],
- bMaskDWord,
- Rtl8192UsbPHY_REG_1T2RArray[i+1]);
- RT_TRACE(COMP_DBG,
- "i: %x, Rtl819xUsbPHY_REGArray[0]=%x Rtl819xUsbPHY_REGArray[1]=%x\n",
- i, Rtl8192UsbPHY_REG_1T2RArray[i],
- Rtl8192UsbPHY_REG_1T2RArray[i+1]);
- }
- } else if (ConfigType == BASEBAND_CONFIG_AGC_TAB) {
- for (i = 0; i < AGCTAB_ArrayLength; i += 2) {
- rtl8192_setBBreg(dev, Rtl8192UsbAGCTAB_Array[i],
- bMaskDWord, Rtl8192UsbAGCTAB_Array[i+1]);
- RT_TRACE(COMP_DBG,
- "i: %x, Rtl8192UsbAGCTAB_Array[0]=%x Rtl8192UsbAGCTAB_Array[1]=%x\n",
- i, Rtl8192UsbAGCTAB_Array[i],
- Rtl8192UsbAGCTAB_Array[i+1]);
- }
- }
-}
-
-/******************************************************************************
- * function: This function initializes Register definition offset for
- * Radio Path A/B/C/D
- * input: net_device *dev
- * output: none
- * return: none
- * notice: Initialization value here is constant and it should never
- * be changed
- *****************************************************************************/
-static void rtl8192_InitBBRFRegDef(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- /* RF Interface Software Control */
- /* 16 LSBs if read 32-bit from 0x870 */
- priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
- /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
- /* 16 LSBs if read 32-bit from 0x874 */
- priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;
- /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
- priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;
-
- /* RF Interface Readback Value */
- /* 16 LSBs if read 32-bit from 0x8E0 */
- priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
- /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
- /* 16 LSBs if read 32-bit from 0x8E4 */
- priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;
- /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
- priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;
-
- /* RF Interface Output (and Enable) */
- /* 16 LSBs if read 32-bit from 0x860 */
- priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
- /* 16 LSBs if read 32-bit from 0x864 */
- priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
- /* 16 LSBs if read 32-bit from 0x868 */
- priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;
- /* 16 LSBs if read 32-bit from 0x86C */
- priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;
-
- /* RF Interface (Output and) Enable */
- /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
- priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
- /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
- priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
- /* 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A) */
- priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;
- /* 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E) */
- priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;
-
- /* Addr of LSSI. Write RF register by driver */
- priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
-
- /* RF parameter */
- /* BB Band Select */
- priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
-
- /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
- priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage;
-
- /* Tranceiver A~D HSSI Parameter-1 */
- /* wire control parameter1 */
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1;
-
- /* Tranceiver A~D HSSI Parameter-2 */
- /* wire control parameter2 */
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2;
-
- /* RF Switch Control */
- /* TR/Ant switch control */
- priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
-
- /* AGC control 1 */
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
-
- /* AGC control 2 */
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
-
- /* RX AFE control 1 */
- priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
-
- /* RX AFE control 1 */
- priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
-
- /* Tx AFE control 1 */
- priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
-
- /* Tx AFE control 2 */
- priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
-
- /* Tranceiver LSSI Readback */
- priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
-}
-
-/******************************************************************************
- * function: This function is to write register and then readback to make
- * sure whether BB and RF is OK
- * input: net_device *dev
- * hw90_block_e CheckBlock
- * rf90_radio_path_e e_rfpath //only used when checkblock is
- * //HW90_BLOCK_RF
- * output: none
- * return: return whether BB and RF is ok (0:OK, 1:Fail)
- * notice: This function may be removed in the ASIC
- ******************************************************************************/
-u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block_e CheckBlock,
- enum rf90_radio_path_e e_rfpath)
-{
- u8 ret = 0;
- u32 i, CheckTimes = 4, reg = 0;
- u32 WriteAddr[4];
- u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f};
-
- /* Initialize register address offset to be checked */
- WriteAddr[HW90_BLOCK_MAC] = 0x100;
- WriteAddr[HW90_BLOCK_PHY0] = 0x900;
- WriteAddr[HW90_BLOCK_PHY1] = 0x800;
- WriteAddr[HW90_BLOCK_RF] = 0x3;
- RT_TRACE(COMP_PHY, "%s(), CheckBlock: %d\n", __func__, CheckBlock);
- for (i = 0; i < CheckTimes; i++) {
- /* Write data to register and readback */
- switch (CheckBlock) {
- case HW90_BLOCK_MAC:
- RT_TRACE(COMP_ERR,
- "PHY_CheckBBRFOK(): Never Write 0x100 here!\n");
- break;
-
- case HW90_BLOCK_PHY0:
- case HW90_BLOCK_PHY1:
- write_nic_dword(dev, WriteAddr[CheckBlock],
- WriteData[i]);
- read_nic_dword(dev, WriteAddr[CheckBlock], &reg);
- break;
-
- case HW90_BLOCK_RF:
- WriteData[i] &= 0xfff;
- rtl8192_phy_SetRFReg(dev, e_rfpath,
- WriteAddr[HW90_BLOCK_RF],
- bMask12Bits, WriteData[i]);
- /* TODO: we should not delay for such a long time.
- * Ask SD3
- */
- usleep_range(1000, 1000);
- reg = rtl8192_phy_QueryRFReg(dev, e_rfpath,
- WriteAddr[HW90_BLOCK_RF],
- bMask12Bits);
- usleep_range(1000, 1000);
- break;
-
- default:
- ret = 1;
- break;
- }
-
- /* Check whether readback data is correct */
- if (reg != WriteData[i]) {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "error reg: %x, WriteData: %x\n",
- reg, WriteData[i]);
- ret = 1;
- break;
- }
- }
-
- return ret;
-}
-
-/******************************************************************************
- * function: This function initializes BB&RF
- * input: net_device *dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- ******************************************************************************/
-static void rtl8192_BB_Config_ParaFile(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 reg_u8 = 0, eCheckItem = 0, status = 0;
- u32 reg_u32 = 0;
-
- /**************************************
- * <1> Initialize BaseBand
- *************************************/
-
- /* --set BB Global Reset-- */
- read_nic_byte(dev, BB_GLOBAL_RESET, &reg_u8);
- write_nic_byte(dev, BB_GLOBAL_RESET, (reg_u8|BB_GLOBAL_RESET_BIT));
- mdelay(50);
- /* ---set BB reset Active--- */
- read_nic_dword(dev, CPU_GEN, &reg_u32);
- write_nic_dword(dev, CPU_GEN, (reg_u32&(~CPU_GEN_BB_RST)));
-
- /* ----Ckeck FPGAPHY0 and PHY1 board is OK---- */
- /* TODO: this function should be removed on ASIC */
- for (eCheckItem = (enum hw90_block_e)HW90_BLOCK_PHY0;
- eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
- /* don't care RF path */
- status = rtl8192_phy_checkBBAndRF(dev, (enum hw90_block_e)eCheckItem,
- (enum rf90_radio_path_e)0);
- if (status != 0) {
- RT_TRACE((COMP_ERR | COMP_PHY),
- "phy_rf8256_config(): Check PHY%d Fail!!\n",
- eCheckItem-1);
- return;
- }
- }
- /* ---- Set CCK and OFDM Block "OFF"---- */
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
- /* ----BB Register Initilazation---- */
- /* ==m==>Set PHY REG From Header<==m== */
- rtl8192_phyConfigBB(dev, BASEBAND_CONFIG_PHY_REG);
-
- /* ----Set BB reset de-Active---- */
- read_nic_dword(dev, CPU_GEN, &reg_u32);
- write_nic_dword(dev, CPU_GEN, (reg_u32|CPU_GEN_BB_RST));
-
- /* ----BB AGC table Initialization---- */
- /* ==m==>Set PHY REG From Header<==m== */
- rtl8192_phyConfigBB(dev, BASEBAND_CONFIG_AGC_TAB);
-
- /* ----Enable XSTAL ---- */
- write_nic_byte_E(dev, 0x5e, 0x00);
- if (priv->card_8192_version == VERSION_819XU_A) {
- /* Antenna gain offset from B/C/D to A */
- reg_u32 = priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0];
- rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC),
- reg_u32);
-
- /* XSTALLCap */
- reg_u32 = priv->CrystalCap & 0xf;
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap,
- reg_u32);
- }
-
- /* Check if the CCK HighPower is turned ON.
- * This is used to calculate PWDB.
- */
- priv->bCckHighPower = (u8)rtl8192_QueryBBReg(dev,
- rFPGA0_XA_HSSIParameter2,
- 0x200);
-}
-
-/******************************************************************************
- * function: This function initializes BB&RF
- * input: net_device *dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- *****************************************************************************/
-void rtl8192_BBConfig(struct net_device *dev)
-{
- rtl8192_InitBBRFRegDef(dev);
- /* config BB&RF. As hardCode based initialization has not been well
- * implemented, so use file first.
- * FIXME: should implement it for hardcode?
- */
- rtl8192_BB_Config_ParaFile(dev);
-}
-
-/******************************************************************************
- * function: This function obtains the initialization value of Tx power Level
- * offset
- * input: net_device *dev
- * output: none
- * return: none
- *****************************************************************************/
-void rtl8192_phy_getTxPower(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 tmp;
-
- read_nic_dword(dev, rTxAGC_Rate18_06,
- &priv->MCSTxPowerLevelOriginalOffset[0]);
- read_nic_dword(dev, rTxAGC_Rate54_24,
- &priv->MCSTxPowerLevelOriginalOffset[1]);
- read_nic_dword(dev, rTxAGC_Mcs03_Mcs00,
- &priv->MCSTxPowerLevelOriginalOffset[2]);
- read_nic_dword(dev, rTxAGC_Mcs07_Mcs04,
- &priv->MCSTxPowerLevelOriginalOffset[3]);
- read_nic_dword(dev, rTxAGC_Mcs11_Mcs08,
- &priv->MCSTxPowerLevelOriginalOffset[4]);
- read_nic_dword(dev, rTxAGC_Mcs15_Mcs12,
- &priv->MCSTxPowerLevelOriginalOffset[5]);
-
- /* Read rx initial gain */
- read_nic_byte(dev, rOFDM0_XAAGCCore1, &priv->DefaultInitialGain[0]);
- read_nic_byte(dev, rOFDM0_XBAGCCore1, &priv->DefaultInitialGain[1]);
- read_nic_byte(dev, rOFDM0_XCAGCCore1, &priv->DefaultInitialGain[2]);
- read_nic_byte(dev, rOFDM0_XDAGCCore1, &priv->DefaultInitialGain[3]);
- RT_TRACE(COMP_INIT,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
- priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
-
- /* Read framesync */
- read_nic_byte(dev, rOFDM0_RxDetector3, &priv->framesync);
- read_nic_byte(dev, rOFDM0_RxDetector2, &tmp);
- priv->framesyncC34 = tmp;
- RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
- rOFDM0_RxDetector3, priv->framesync);
-
- /* Read SIFS (save the value read fome MACPHY_REG.txt) */
- read_nic_word(dev, SIFS, &priv->SifsTime);
-}
-
-/******************************************************************************
- * function: This function sets the initialization value of Tx power Level
- * offset
- * input: net_device *dev
- * u8 channel
- * output: none
- * return: none
- ******************************************************************************/
-void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
- u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
-
- switch (priv->rf_chip) {
- case RF_8256:
- /* need further implement */
- phy_set_rf8256_cck_tx_power(dev, powerlevel);
- phy_set_rf8256_ofdm_tx_power(dev, powerlevelOFDM24G);
- break;
- default:
- RT_TRACE((COMP_PHY|COMP_ERR),
- "error RF chipID(8225 or 8258) in function %s()\n",
- __func__);
- break;
- }
-}
-
-/******************************************************************************
- * function: This function checks Rf chip to do RF config
- * input: net_device *dev
- * output: none
- * return: only 8256 is supported
- ******************************************************************************/
-void rtl8192_phy_RFConfig(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- switch (priv->rf_chip) {
- case RF_8256:
- phy_rf8256_config(dev);
- break;
- default:
- RT_TRACE(COMP_ERR, "error chip id\n");
- break;
- }
-}
-
-/******************************************************************************
- * function: This function updates Initial gain
- * input: net_device *dev
- * output: none
- * return: As Windows has not implemented this, wait for complement
- ******************************************************************************/
-void rtl8192_phy_updateInitGain(struct net_device *dev)
-{
-}
-
-/******************************************************************************
- * function: This function read RF parameters from general head file,
- * and do RF 3-wire
- * input: net_device *dev
- * rf90_radio_path_e e_rfpath
- * output: none
- * return: return code show if RF configuration is successful(0:pass, 1:fail)
- * notice: Delay may be required for RF configuration
- *****************************************************************************/
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath)
-{
- int i;
-
- switch (e_rfpath) {
- case RF90_PATH_A:
- for (i = 0; i < RadioA_ArrayLength; i = i+2) {
- if (Rtl8192UsbRadioA_Array[i] == 0xfe) {
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, e_rfpath,
- Rtl8192UsbRadioA_Array[i],
- bMask12Bits,
- Rtl8192UsbRadioA_Array[i+1]);
- mdelay(1);
- }
- break;
- case RF90_PATH_B:
- for (i = 0; i < RadioB_ArrayLength; i = i+2) {
- if (Rtl8192UsbRadioB_Array[i] == 0xfe) {
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, e_rfpath,
- Rtl8192UsbRadioB_Array[i],
- bMask12Bits,
- Rtl8192UsbRadioB_Array[i+1]);
- mdelay(1);
- }
- break;
- case RF90_PATH_C:
- for (i = 0; i < RadioC_ArrayLength; i = i+2) {
- if (Rtl8192UsbRadioC_Array[i] == 0xfe) {
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, e_rfpath,
- Rtl8192UsbRadioC_Array[i],
- bMask12Bits,
- Rtl8192UsbRadioC_Array[i+1]);
- mdelay(1);
- }
- break;
- case RF90_PATH_D:
- for (i = 0; i < RadioD_ArrayLength; i = i+2) {
- if (Rtl8192UsbRadioD_Array[i] == 0xfe) {
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, e_rfpath,
- Rtl8192UsbRadioD_Array[i],
- bMask12Bits,
- Rtl8192UsbRadioD_Array[i+1]);
- mdelay(1);
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-/******************************************************************************
- * function: This function sets Tx Power of the channel
- * input: net_device *dev
- * u8 channel
- * output: none
- * return: none
- * notice:
- ******************************************************************************/
-static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
- u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
-
- switch (priv->rf_chip) {
- case RF_8225:
- break;
-
- case RF_8256:
- phy_set_rf8256_cck_tx_power(dev, powerlevel);
- phy_set_rf8256_ofdm_tx_power(dev, powerlevelOFDM24G);
- break;
-
- case RF_8258:
- break;
- default:
- RT_TRACE(COMP_ERR, "unknown rf chip ID in %s()\n", __func__);
- break;
- }
-}
-
-/******************************************************************************
- * function: This function sets command table variable (struct sw_chnl_cmd).
- * input: sw_chnl_cmd *CmdTable //table to be set
- * u32 CmdTableIdx //variable index in table to be set
- * u32 CmdTableSz //table size
- * switch_chan_cmd_id CmdID //command ID to set
- * u32 Para1
- * u32 Para2
- * u32 msDelay
- * output:
- * return: true if finished, false otherwise
- * notice:
- ******************************************************************************/
-static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable, u32 CmdTableIdx,
- u32 CmdTableSz, enum switch_chan_cmd_id CmdID,
- u32 Para1, u32 Para2, u32 msDelay)
-{
- struct sw_chnl_cmd *pCmd;
-
- if (!CmdTable) {
- RT_TRACE(COMP_ERR, "%s(): CmdTable cannot be NULL\n", __func__);
- return false;
- }
- if (CmdTableIdx >= CmdTableSz) {
- RT_TRACE(COMP_ERR, "%s(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n",
- __func__, CmdTableIdx, CmdTableSz);
- return false;
- }
-
- pCmd = CmdTable + CmdTableIdx;
- pCmd->cmd_id = CmdID;
- pCmd->para_1 = Para1;
- pCmd->para_2 = Para2;
- pCmd->ms_delay = msDelay;
-
- return true;
-}
-
-/******************************************************************************
- * function: This function sets channel step by step
- * input: net_device *dev
- * u8 channel
- * u8 *stage //3 stages
- * u8 *step
- * u32 *delay //whether need to delay
- * output: store new stage, step and delay for next step
- * (combine with function above)
- * return: true if finished, false otherwise
- * notice: Wait for simpler function to replace it
- *****************************************************************************/
-static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
- u8 *stage, u8 *step, u32 *delay)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sw_chnl_cmd *pre_cmd;
- u32 pre_cmd_cnt = 0;
- struct sw_chnl_cmd *post_cmd;
- u32 post_cmd_cnt = 0;
- struct sw_chnl_cmd *rf_cmd;
- u32 rf_cmd_cnt = 0;
- struct sw_chnl_cmd *current_cmd = NULL;
- u8 e_rfpath;
- bool ret;
-
- pre_cmd = kcalloc(MAX_PRECMD_CNT, sizeof(*pre_cmd), GFP_KERNEL);
- if (!pre_cmd)
- return false;
-
- post_cmd = kcalloc(MAX_POSTCMD_CNT, sizeof(*post_cmd), GFP_KERNEL);
- if (!post_cmd) {
- kfree(pre_cmd);
- return false;
- }
-
- rf_cmd = kcalloc(MAX_RFDEPENDCMD_CNT, sizeof(*rf_cmd), GFP_KERNEL);
- if (!rf_cmd) {
- kfree(pre_cmd);
- kfree(post_cmd);
- return false;
- }
-
- RT_TRACE(COMP_CH, "%s() stage: %d, step: %d, channel: %d\n",
- __func__, *stage, *step, channel);
- if (!is_legal_channel(priv->ieee80211, channel)) {
- RT_TRACE(COMP_ERR, "set to illegal channel: %d\n", channel);
- /* return true to tell upper caller function this channel
- * setting is finished! Or it will in while loop.
- */
- ret = true;
- goto out;
- }
- /* FIXME: need to check whether channel is legal or not here */
-
- /* <1> Fill up pre common command. */
- rtl8192_phy_SetSwChnlCmdArray(pre_cmd, pre_cmd_cnt++,
- MAX_PRECMD_CNT, CMD_ID_SET_TX_PWR_LEVEL,
- 0, 0, 0);
- rtl8192_phy_SetSwChnlCmdArray(pre_cmd, pre_cmd_cnt++,
- MAX_PRECMD_CNT, CMD_ID_END, 0, 0, 0);
-
- /* <2> Fill up post common command. */
- rtl8192_phy_SetSwChnlCmdArray(post_cmd, post_cmd_cnt++,
- MAX_POSTCMD_CNT, CMD_ID_END, 0, 0, 0);
-
- /* <3> Fill up RF dependent command. */
- switch (priv->rf_chip) {
- case RF_8225:
- if (!(channel >= 1 && channel <= 14)) {
- RT_TRACE(COMP_ERR,
- "illegal channel for Zebra 8225: %d\n",
- channel);
- ret = true;
- goto out;
- }
- rtl8192_phy_SetSwChnlCmdArray(rf_cmd, rf_cmd_cnt++,
- MAX_RFDEPENDCMD_CNT,
- CMD_ID_RF_WRITE_REG,
- rZebra1_Channel,
- RF_CHANNEL_TABLE_ZEBRA[channel],
- 10);
- rtl8192_phy_SetSwChnlCmdArray(rf_cmd, rf_cmd_cnt++,
- MAX_RFDEPENDCMD_CNT,
- CMD_ID_END, 0, 0, 0);
- break;
-
- case RF_8256:
- /* TEST!! This is not the table for 8256!! */
- if (!(channel >= 1 && channel <= 14)) {
- RT_TRACE(COMP_ERR,
- "illegal channel for Zebra 8256: %d\n",
- channel);
- ret = true;
- goto out;
- }
- rtl8192_phy_SetSwChnlCmdArray(rf_cmd, rf_cmd_cnt++,
- MAX_RFDEPENDCMD_CNT,
- CMD_ID_RF_WRITE_REG,
- rZebra1_Channel, channel, 10);
- rtl8192_phy_SetSwChnlCmdArray(rf_cmd, rf_cmd_cnt++,
- MAX_RFDEPENDCMD_CNT,
- CMD_ID_END, 0, 0, 0);
- break;
-
- case RF_8258:
- break;
-
- default:
- RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
- ret = true;
- goto out;
- }
-
- do {
- switch (*stage) {
- case 0:
- current_cmd = &pre_cmd[*step];
- break;
- case 1:
- current_cmd = &rf_cmd[*step];
- break;
- case 2:
- current_cmd = &post_cmd[*step];
- break;
- }
-
- if (current_cmd->cmd_id == CMD_ID_END) {
- if ((*stage) == 2) {
- *delay = current_cmd->ms_delay;
- ret = true;
- goto out;
- }
- (*stage)++;
- (*step) = 0;
- continue;
- }
-
- switch (current_cmd->cmd_id) {
- case CMD_ID_SET_TX_PWR_LEVEL:
- if (priv->card_8192_version == VERSION_819XU_A)
- /* consider it later! */
- rtl8192_SetTxPowerLevel(dev, channel);
- break;
- case CMD_ID_WRITE_PORT_ULONG:
- write_nic_dword(dev, current_cmd->para_1,
- current_cmd->para_2);
- break;
- case CMD_ID_WRITE_PORT_USHORT:
- write_nic_word(dev, current_cmd->para_1,
- (u16)current_cmd->para_2);
- break;
- case CMD_ID_WRITE_PORT_UCHAR:
- write_nic_byte(dev, current_cmd->para_1,
- (u8)current_cmd->para_2);
- break;
- case CMD_ID_RF_WRITE_REG:
- for (e_rfpath = 0; e_rfpath < RF90_PATH_MAX; e_rfpath++) {
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path_e)e_rfpath,
- current_cmd->para_1,
- bZebra1_ChannelNum,
- current_cmd->para_2);
- }
- break;
- default:
- break;
- }
-
- break;
- } while (true);
-
- *delay = current_cmd->ms_delay;
- (*step)++;
- ret = false;
-
-out:
- kfree(pre_cmd);
- kfree(post_cmd);
- kfree(rf_cmd);
-
- return ret;
-}
-
-/******************************************************************************
- * function: This function does actually set channel work
- * input: net_device *dev
- * u8 channel
- * output: none
- * return: none
- * notice: We should not call this function directly
- *****************************************************************************/
-static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 delay = 0;
-
- while (!rtl8192_phy_SwChnlStepByStep(dev, channel, &priv->SwChnlStage,
- &priv->SwChnlStep, &delay)) {
- if (!priv->up)
- break;
- }
-}
-
-/******************************************************************************
- * function: Callback routine of the work item for switch channel.
- * input: net_device *dev
- *
- * output: none
- * return: none
- *****************************************************************************/
-void rtl8192_SwChnl_WorkItem(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- RT_TRACE(COMP_CH, "==> SwChnlCallback819xUsbWorkItem(), chan:%d\n",
- priv->chan);
-
- rtl8192_phy_FinishSwChnlNow(dev, priv->chan);
-
- RT_TRACE(COMP_CH, "<== SwChnlCallback819xUsbWorkItem()\n");
-}
-
-/******************************************************************************
- * function: This function scheduled actual work item to set channel
- * input: net_device *dev
- * u8 channel //channel to set
- * output: none
- * return: return code show if workitem is scheduled (1:pass, 0:fail)
- * notice: Delay may be required for RF configuration
- ******************************************************************************/
-u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- RT_TRACE(COMP_CH, "%s(), SwChnlInProgress: %d\n", __func__,
- priv->SwChnlInProgress);
- if (!priv->up)
- return false;
- if (priv->SwChnlInProgress)
- return false;
-
- /* -------------------------------------------- */
- switch (priv->ieee80211->mode) {
- case WIRELESS_MODE_A:
- case WIRELESS_MODE_N_5G:
- if (channel <= 14) {
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14\n");
- return false;
- }
- break;
- case WIRELESS_MODE_B:
- if (channel > 14) {
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14\n");
- return false;
- }
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- if (channel > 14) {
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14\n");
- return false;
- }
- break;
- }
- /* -------------------------------------------- */
-
- priv->SwChnlInProgress = true;
- if (channel == 0)
- channel = 1;
-
- priv->chan = channel;
-
- priv->SwChnlStage = 0;
- priv->SwChnlStep = 0;
- if (priv->up)
- rtl8192_SwChnl_WorkItem(dev);
-
- priv->SwChnlInProgress = false;
- return true;
-}
-
-/******************************************************************************
- * function: Callback routine of the work item for set bandwidth mode.
- * input: net_device *dev
- * output: none
- * return: none
- * notice: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- *****************************************************************************/
-void rtl8192_SetBWModeWorkItem(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 regBwOpMode;
-
- RT_TRACE(COMP_SWBW, "%s() Switch to %s bandwidth\n", __func__,
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz");
-
- if (priv->rf_chip == RF_PSEUDO_11N) {
- priv->SetBWModeInProgress = false;
- return;
- }
-
- /* <1> Set MAC register */
- read_nic_byte(dev, BW_OPMODE, &regBwOpMode);
-
- switch (priv->CurrentChannelBW) {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- /* We have not verify whether this register works */
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- break;
-
- case HT_CHANNEL_WIDTH_20_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- /* We have not verify whether this register works */
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- break;
-
- default:
- RT_TRACE(COMP_ERR,
- "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",
- priv->CurrentChannelBW);
- break;
- }
-
- /* <2> Set PHY related register */
- switch (priv->CurrentChannelBW) {
- case HT_CHANNEL_WIDTH_20:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
- 0x00100000, 1);
-
- /* Correct the tx power for CCK rate in 20M. */
- priv->cck_present_attenuation =
- priv->cck_present_attenuation_20Mdefault +
- priv->cck_present_attenuation_difference;
-
- if (priv->cck_present_attenuation > 22)
- priv->cck_present_attenuation = 22;
- if (priv->cck_present_attenuation < 0)
- priv->cck_present_attenuation = 0;
- RT_TRACE(COMP_INIT,
- "20M, pHalData->CCKPresentAttentuation = %d\n",
- priv->cck_present_attenuation);
-
- if (priv->chan == 14 && !priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else if (priv->chan != 14 && priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else {
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- }
-
- break;
- case HT_CHANNEL_WIDTH_20_40:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
- priv->nCur40MhzPrimeSC >> 1);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
- rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
- priv->nCur40MhzPrimeSC);
- priv->cck_present_attenuation =
- priv->cck_present_attenuation_40Mdefault +
- priv->cck_present_attenuation_difference;
-
- if (priv->cck_present_attenuation > 22)
- priv->cck_present_attenuation = 22;
- if (priv->cck_present_attenuation < 0)
- priv->cck_present_attenuation = 0;
-
- RT_TRACE(COMP_INIT,
- "40M, pHalData->CCKPresentAttentuation = %d\n",
- priv->cck_present_attenuation);
- if (priv->chan == 14 && !priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else if (priv->chan != 14 && priv->bcck_in_ch14) {
- priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else {
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- }
-
- break;
- default:
- RT_TRACE(COMP_ERR,
- "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",
- priv->CurrentChannelBW);
- break;
- }
- /* Skip over setting of J-mode in BB register here.
- * Default value is "None J mode".
- */
-
- /* <3> Set RF related register */
- switch (priv->rf_chip) {
- case RF_8225:
- break;
-
- case RF_8256:
- phy_set_rf8256_bandwidth(dev, priv->CurrentChannelBW);
- break;
-
- case RF_8258:
- break;
-
- case RF_PSEUDO_11N:
- break;
-
- default:
- RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
- break;
- }
- priv->SetBWModeInProgress = false;
-
- RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb(), %d\n",
- atomic_read(&priv->ieee80211->atm_swbw));
-}
-
-/******************************************************************************
- * function: This function schedules bandwidth switch work.
- * input: struct net_deviceq *dev
- * HT_CHANNEL_WIDTH bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET offset //Upper, Lower, or Don't care
- * output: none
- * return: none
- * notice: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- *****************************************************************************/
-void rtl8192_SetBWMode(struct net_device *dev,
- enum ht_channel_width bandwidth,
- enum ht_extension_chan_offset offset)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->SetBWModeInProgress)
- return;
- priv->SetBWModeInProgress = true;
-
- priv->CurrentChannelBW = bandwidth;
-
- if (offset == HT_EXTCHNL_OFFSET_LOWER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
- else if (offset == HT_EXTCHNL_OFFSET_UPPER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER;
- else
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- rtl8192_SetBWModeWorkItem(dev);
-}
-
-void InitialGain819xUsb(struct net_device *dev, u8 Operation)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->InitialGainOperateType = Operation;
-
- if (priv->up)
- queue_delayed_work(priv->priv_wq, &priv->initialgain_operate_wq, 0);
-}
-
-void InitialGainOperateWorkItemCallBack(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct r8192_priv *priv = container_of(dwork, struct r8192_priv,
- initialgain_operate_wq);
- struct net_device *dev = priv->ieee80211->dev;
-#define SCAN_RX_INITIAL_GAIN 0x17
-#define POWER_DETECTION_TH 0x08
- u32 bitmask;
- u8 initial_gain;
- u8 Operation;
-
- Operation = priv->InitialGainOperateType;
-
- switch (Operation) {
- case IG_Backup:
- RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n");
- initial_gain = SCAN_RX_INITIAL_GAIN;
- bitmask = bMaskByte0;
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- /* FW DIG OFF */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
- priv->initgain_backup.xaagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bitmask);
- priv->initgain_backup.xbagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bitmask);
- priv->initgain_backup.xcagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bitmask);
- priv->initgain_backup.xdagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bitmask);
- bitmask = bMaskByte2;
- priv->initgain_backup.cca =
- (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bitmask);
-
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
- initial_gain);
- write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
- RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
- POWER_DETECTION_TH);
- write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
- break;
- case IG_Restore:
- RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n");
- bitmask = 0x7f; /* Bit0 ~ Bit6 */
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- /* FW DIG OFF */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
-
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bitmask,
- (u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bitmask,
- (u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bitmask,
- (u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bitmask,
- (u32)priv->initgain_backup.xdagccore1);
- bitmask = bMaskByte2;
- rtl8192_setBBreg(dev, rCCK0_CCA, bitmask,
- (u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
- rtl8192_phy_setTxPower(dev, priv->ieee80211->current_network.channel);
-
- if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- /* FW DIG ON */
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
- break;
- default:
- RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
- break;
- }
-}
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
deleted file mode 100644
index bafaa6a90c50..000000000000
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _R819XU_PHY_H
-#define _R819XU_PHY_H
-
-/* Channel switch: The size of command tables for switch channel */
-#define MAX_PRECMD_CNT 16
-#define MAX_RFDEPENDCMD_CNT 16
-#define MAX_POSTCMD_CNT 16
-
-enum baseband_config_type {
- BASEBAND_CONFIG_PHY_REG = 0, //Radio Path A
- BASEBAND_CONFIG_AGC_TAB = 1, //Radio Path B
-};
-
-enum switch_chan_cmd_id {
- CMD_ID_END,
- CMD_ID_SET_TX_PWR_LEVEL,
- CMD_ID_WRITE_PORT_ULONG,
- CMD_ID_WRITE_PORT_USHORT,
- CMD_ID_WRITE_PORT_UCHAR,
- CMD_ID_RF_WRITE_REG,
-};
-
-/* -----------------------Define structure---------------------- */
-/* 1. Switch channel related */
-struct sw_chnl_cmd {
- enum switch_chan_cmd_id cmd_id;
- u32 para_1;
- u32 para_2;
- u32 ms_delay;
-} __packed;
-
-enum hw90_block_e {
- HW90_BLOCK_MAC = 0,
- HW90_BLOCK_PHY0 = 1,
- HW90_BLOCK_PHY1 = 2,
- HW90_BLOCK_RF = 3,
- HW90_BLOCK_MAXIMUM = 4, /* Never use this */
-};
-
-enum rf90_radio_path_e {
- RF90_PATH_A = 0, /* Radio Path A */
- RF90_PATH_B = 1, /* Radio Path B */
- RF90_PATH_C = 2, /* Radio Path C */
- RF90_PATH_D = 3, /* Radio Path D */
- RF90_PATH_MAX /* Max RF number 92 support */
-};
-
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 e_rfpath);
-void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
- u32 bitmask, u32 data);
-u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask);
-void rtl8192_phy_SetRFReg(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 reg_addr, u32 bitmask, u32 data);
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath,
- u32 reg_addr, u32 bitmask);
-void rtl8192_phy_configmac(struct net_device *dev);
-u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
- enum hw90_block_e CheckBlock,
- enum rf90_radio_path_e e_rfpath);
-void rtl8192_BBConfig(struct net_device *dev);
-void rtl8192_phy_getTxPower(struct net_device *dev);
-void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
-void rtl8192_phy_RFConfig(struct net_device *dev);
-void rtl8192_phy_updateInitGain(struct net_device *dev);
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- enum rf90_radio_path_e e_rfpath);
-
-u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-void rtl8192_SetBWMode(struct net_device *dev,
- enum ht_channel_width bandwidth,
- enum ht_extension_chan_offset offset);
-void rtl8192_SwChnl_WorkItem(struct net_device *dev);
-void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-void InitialGain819xUsb(struct net_device *dev, u8 Operation);
-
-void InitialGainOperateWorkItemCallBack(struct work_struct *work);
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phyreg.h b/drivers/staging/rtl8192u/r819xU_phyreg.h
deleted file mode 100644
index c9669821b278..000000000000
--- a/drivers/staging/rtl8192u/r819xU_phyreg.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _R819XU_PHYREG_H
-#define _R819XU_PHYREG_H
-
-#define RF_DATA 0x1d4 /* FW will write RF data in the register.*/
-
-/* page8 */
-#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC */
-#define rFPGA0_TxGainStage 0x80c
-#define rFPGA0_XA_HSSIParameter1 0x820
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-#define rFPGA0_XC_HSSIParameter1 0x830
-#define rFPGA0_XC_HSSIParameter2 0x834
-#define rFPGA0_XD_HSSIParameter1 0x838
-#define rFPGA0_XD_HSSIParameter2 0x83c
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-#define rFPGA0_XC_LSSIParameter 0x848
-#define rFPGA0_XD_LSSIParameter 0x84c
-#define rFPGA0_XAB_SwitchControl 0x858
-#define rFPGA0_XCD_SwitchControl 0x85c
-#define rFPGA0_XA_RFInterfaceOE 0x860
-#define rFPGA0_XB_RFInterfaceOE 0x864
-#define rFPGA0_XC_RFInterfaceOE 0x868
-#define rFPGA0_XD_RFInterfaceOE 0x86c
-#define rFPGA0_XAB_RFInterfaceSW 0x870
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-#define rFPGA0_XAB_RFParameter 0x878
-#define rFPGA0_XCD_RFParameter 0x87c
-#define rFPGA0_AnalogParameter1 0x880
-#define rFPGA0_AnalogParameter4 0x88c
-#define rFPGA0_XA_LSSIReadBack 0x8a0
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4
-
-/* page 9 */
-#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC */
-
-/* page a */
-#define rCCK0_System 0xa00
-#define rCCK0_AFESetting 0xa04
-#define rCCK0_CCA 0xa08
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-
-/* page c */
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD */
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync.*/
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync.*/
-#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
-#define rOFDM0_XAAGCCore1 0xc50
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-#define rOFDM0_XATxIQImbalance 0xc80
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-
-/* page d */
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-/* page e */
-#define rTxAGC_Rate18_06 0xe00
-#define rTxAGC_Rate54_24 0xe04
-#define rTxAGC_CCK_Mcs32 0xe08
-#define rTxAGC_Mcs03_Mcs00 0xe10
-#define rTxAGC_Mcs07_Mcs04 0xe14
-#define rTxAGC_Mcs11_Mcs08 0xe18
-#define rTxAGC_Mcs15_Mcs12 0xe1c
-
-/* RF
- * Zebra1
- */
-#define rZebra1_Channel 0x7
-
-/* Zebra4 */
-#define rGlobalCtrl 0
-
-/* Bit Mask
- * page-8
- */
-#define bRFMOD 0x1
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-#define bXBTxAGC 0xf00
-#define bXCTxAGC 0xf000
-#define b3WireDataLength 0x800
-#define b3WireAddressLength 0x400
-#define bRFSI_RFENV 0x10
-#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address */
-#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-#define bLSSIReadBackData 0xfff
-#define bXtalCap 0x0f000000
-
-/* page-a */
-#define bCCKSideBand 0x10
-
-/* page e */
-#define bTxAGCRateCCK 0x7f00
-
-/* RF
- * Zebra1
- */
-#define bZebra1_ChannelNum 0xf80
-
-/* RTL8258 */
-/* for PutRegsetting & GetRegSetting BitMask */
-#define bMaskByte0 0xff
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-
-/* for PutRFRegsetting & GetRFRegSetting BitMask */
-#define bMask12Bits 0xfff
-
-#endif /* __INC_HAL8190PCIPHYREG_H */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index a3bff27af523..7764896a04ea 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -153,7 +153,7 @@ static u32 phy_RFSerialRead_8723B(
* @Data: The new register Data in the target bit position
* of the target to be read
*
- * .. Note:: Threre are three types of serial operations:
+ * .. Note:: There are three types of serial operations:
* 1. Software serial write
* 2. Hardware LSSI-Low Speed Serial Interface
* 3. Hardware HSSI-High speed
@@ -280,7 +280,7 @@ void PHY_SetRFReg_8723B(
/*-----------------------------------------------------------------------------
- * PHY_MACConfig8192C - Condig MAC by header file or parameter file.
+ * PHY_MACConfig8192C - Config MAC by header file or parameter file.
*
* Revised History:
* When Who Remark
@@ -325,7 +325,7 @@ static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; /* wire control parameter2 */
- /* Tranceiver Readback LSSI/HSPI mode */
+ /* Transceiver Readback LSSI/HSPI mode */
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 08543a3936da..86d32e3b3282 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -117,7 +117,7 @@ static int slave_configure(struct scsi_device *sdev)
} while (0)
/* queue a command */
-/* This is always called with scsi_lock(host) held */
+/* This is always called with spin_lock_irq(host->host_lock) held */
static int queuecommand_lck(struct scsi_cmnd *srb)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
@@ -159,18 +159,18 @@ static int command_abort(struct scsi_cmnd *srb)
struct rtsx_dev *dev = host_to_rtsx(host);
struct rtsx_chip *chip = dev->chip;
- scsi_lock(host);
+ spin_lock_irq(host->host_lock);
/* Is this command still active? */
if (chip->srb != srb) {
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
dev_info(&dev->pci->dev, "-- nothing to abort\n");
return FAILED;
}
rtsx_set_stat(chip, RTSX_STAT_ABORT);
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
/* Wait for the aborted command to finish */
wait_for_completion(&dev->notify);
@@ -366,7 +366,7 @@ static int rtsx_control_thread(void *__dev)
}
/* lock access to the state */
- scsi_lock(host);
+ spin_lock_irq(host->host_lock);
/* has the command aborted ? */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
@@ -374,7 +374,7 @@ static int rtsx_control_thread(void *__dev)
goto skip_for_abort;
}
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
/* reject the command if the direction indicator
* is UNKNOWN
@@ -402,7 +402,7 @@ static int rtsx_control_thread(void *__dev)
}
/* lock access to the state */
- scsi_lock(host);
+ spin_lock_irq(host->host_lock);
/* did the command already complete because of a disconnect? */
if (!chip->srb)
@@ -424,7 +424,7 @@ skip_for_abort:
/* finished working on this command */
chip->srb = NULL;
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
/* unlock the device pointers */
mutex_unlock(&dev->dev_mutex);
@@ -603,9 +603,9 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
* interrupt a SCSI-scan or device-reset delay
*/
mutex_lock(&dev->dev_mutex);
- scsi_lock(host);
+ spin_lock_irq(host->host_lock);
rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
mutex_unlock(&dev->dev_mutex);
wake_up(&dev->delay_wait);
wait_for_completion(&dev->scanning_done);
@@ -621,10 +621,10 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
mutex_lock(&dev->dev_mutex);
if (chip->srb) {
chip->srb->result = DID_NO_CONNECT << 16;
- scsi_lock(host);
+ spin_lock_irq(host->host_lock);
scsi_done(dev->chip->srb);
chip->srb = NULL;
- scsi_unlock(host);
+ spin_unlock_irq(host->host_lock);
}
mutex_unlock(&dev->dev_mutex);
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index 2e101da83220..ec6f5b07390b 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -39,25 +39,6 @@
/*
* macros for easy use
*/
-#define rtsx_writel(chip, reg, value) \
- iowrite32(value, (chip)->rtsx->remap_addr + reg)
-#define rtsx_readl(chip, reg) \
- ioread32((chip)->rtsx->remap_addr + reg)
-#define rtsx_writew(chip, reg, value) \
- iowrite16(value, (chip)->rtsx->remap_addr + reg)
-#define rtsx_readw(chip, reg) \
- ioread16((chip)->rtsx->remap_addr + reg)
-#define rtsx_writeb(chip, reg, value) \
- iowrite8(value, (chip)->rtsx->remap_addr + reg)
-#define rtsx_readb(chip, reg) \
- ioread8((chip)->rtsx->remap_addr + reg)
-
-#define rtsx_read_config_byte(chip, where, val) \
- pci_read_config_byte((chip)->rtsx->pci, where, val)
-
-#define rtsx_write_config_byte(chip, where, val) \
- pci_write_config_byte((chip)->rtsx->pci, where, val)
-
#define wait_timeout_x(task_state, msecs) \
do { \
set_current_state((task_state)); \
@@ -127,13 +108,6 @@ static inline struct rtsx_dev *host_to_rtsx(struct Scsi_Host *host)
return (struct rtsx_dev *)host->hostdata;
}
-/*
- * The scsi_lock() and scsi_unlock() macros protect the sm_state and the
- * single queue element srb for write access
- */
-#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
-#define scsi_lock(host) spin_lock_irq(host->host_lock)
-
#define lock_state(chip) spin_lock_irq(&((chip)->rtsx->reg_lock))
#define unlock_state(chip) spin_unlock_irq(&((chip)->rtsx->reg_lock))
@@ -147,4 +121,44 @@ enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
#include "rtsx_sys.h"
#include "general.h"
+static inline void rtsx_writel(struct rtsx_chip *chip, u32 reg, u32 value)
+{
+ iowrite32(value, chip->rtsx->remap_addr + reg);
+}
+
+static inline u32 rtsx_readl(struct rtsx_chip *chip, u32 reg)
+{
+ return ioread32(chip->rtsx->remap_addr + reg);
+}
+
+static inline void rtsx_writew(struct rtsx_chip *chip, u32 reg, u16 value)
+{
+ iowrite16(value, chip->rtsx->remap_addr + reg);
+}
+
+static inline u16 rtsx_readw(struct rtsx_chip *chip, u32 reg)
+{
+ return ioread16(chip->rtsx->remap_addr + reg);
+}
+
+static inline void rtsx_writeb(struct rtsx_chip *chip, u32 reg, u8 value)
+{
+ iowrite8(value, chip->rtsx->remap_addr + reg);
+}
+
+static inline u8 rtsx_readb(struct rtsx_chip *chip, u32 reg)
+{
+ return ioread8((chip)->rtsx->remap_addr + reg);
+}
+
+static inline int rtsx_read_config_byte(struct rtsx_chip *chip, int where, u8 *val)
+{
+ return pci_read_config_byte(chip->rtsx->pci, where, val);
+}
+
+static inline int rtsx_write_config_byte(struct rtsx_chip *chip, int where, u8 val)
+{
+ return pci_write_config_byte(chip->rtsx->pci, where, val);
+}
+
#endif /* __REALTEK_RTSX_H */
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index e00a6cb31947..3b25892af713 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -14,13 +14,13 @@
* in bit 29:27 of Display Control register.
*/
static unsigned long
-displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
- unsigned long dispControl)
+display_control_adjust_SM750LE(struct mode_parameter *mode_param,
+ unsigned long disp_control)
{
unsigned long x, y;
- x = pModeParam->horizontal_display_end;
- y = pModeParam->vertical_display_end;
+ x = mode_param->horizontal_display_end;
+ y = mode_param->vertical_display_end;
/*
* SM750LE has to set up the top-left and bottom-right
@@ -36,49 +36,48 @@ displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
/*
- * Assume common fields in dispControl have been properly set before
+ * Assume common fields in disp_control have been properly set before
* calling this function.
- * This function only sets the extra fields in dispControl.
+ * This function only sets the extra fields in disp_control.
*/
/* Clear bit 29:27 of display control register */
- dispControl &= ~CRT_DISPLAY_CTRL_CLK_MASK;
+ disp_control &= ~CRT_DISPLAY_CTRL_CLK_MASK;
/* Set bit 29:27 of display control register for the right clock */
/* Note that SM750LE only need to supported 7 resolutions. */
if (x == 800 && y == 600)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL41;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL41;
else if (x == 1024 && y == 768)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL65;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL65;
else if (x == 1152 && y == 864)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL80;
else if (x == 1280 && y == 768)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL80;
else if (x == 1280 && y == 720)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL74;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL74;
else if (x == 1280 && y == 960)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL108;
else if (x == 1280 && y == 1024)
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL108;
else /* default to VGA clock */
- dispControl |= CRT_DISPLAY_CTRL_CLK_PLL25;
+ disp_control |= CRT_DISPLAY_CTRL_CLK_PLL25;
/* Set bit 25:24 of display controller */
- dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
+ disp_control |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
/* Set bit 14 of display controller */
- dispControl |= DISPLAY_CTRL_CLOCK_PHASE;
+ disp_control |= DISPLAY_CTRL_CLOCK_PHASE;
- poke32(CRT_DISPLAY_CTRL, dispControl);
+ poke32(CRT_DISPLAY_CTRL, disp_control);
- return dispControl;
+ return disp_control;
}
/* only timing related registers will be programed */
-static int programModeRegisters(struct mode_parameter *pModeParam,
- struct pll_value *pll)
+static void program_mode_registers(struct mode_parameter *mode_param,
+ struct pll_value *pll)
{
- int ret = 0;
int cnt = 0;
unsigned int tmp, reg;
@@ -86,46 +85,46 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
/* programe secondary pixel clock */
poke32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
- tmp = ((pModeParam->horizontal_total - 1) <<
+ tmp = ((mode_param->horizontal_total - 1) <<
CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
CRT_HORIZONTAL_TOTAL_TOTAL_MASK;
- tmp |= (pModeParam->horizontal_display_end - 1) &
+ tmp |= (mode_param->horizontal_display_end - 1) &
CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK;
poke32(CRT_HORIZONTAL_TOTAL, tmp);
- tmp = (pModeParam->horizontal_sync_width <<
+ tmp = (mode_param->horizontal_sync_width <<
CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) &
CRT_HORIZONTAL_SYNC_WIDTH_MASK;
- tmp |= (pModeParam->horizontal_sync_start - 1) &
+ tmp |= (mode_param->horizontal_sync_start - 1) &
CRT_HORIZONTAL_SYNC_START_MASK;
poke32(CRT_HORIZONTAL_SYNC, tmp);
- tmp = ((pModeParam->vertical_total - 1) <<
+ tmp = ((mode_param->vertical_total - 1) <<
CRT_VERTICAL_TOTAL_TOTAL_SHIFT) &
CRT_VERTICAL_TOTAL_TOTAL_MASK;
- tmp |= (pModeParam->vertical_display_end - 1) &
+ tmp |= (mode_param->vertical_display_end - 1) &
CRT_VERTICAL_TOTAL_DISPLAY_END_MASK;
poke32(CRT_VERTICAL_TOTAL, tmp);
- tmp = ((pModeParam->vertical_sync_height <<
+ tmp = ((mode_param->vertical_sync_height <<
CRT_VERTICAL_SYNC_HEIGHT_SHIFT)) &
CRT_VERTICAL_SYNC_HEIGHT_MASK;
- tmp |= (pModeParam->vertical_sync_start - 1) &
+ tmp |= (mode_param->vertical_sync_start - 1) &
CRT_VERTICAL_SYNC_START_MASK;
poke32(CRT_VERTICAL_SYNC, tmp);
tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
- if (pModeParam->vertical_sync_polarity)
+ if (mode_param->vertical_sync_polarity)
tmp |= DISPLAY_CTRL_VSYNC_PHASE;
- if (pModeParam->horizontal_sync_polarity)
+ if (mode_param->horizontal_sync_polarity)
tmp |= DISPLAY_CTRL_HSYNC_PHASE;
if (sm750_get_chip_type() == SM750LE) {
- displayControlAdjust_SM750LE(pModeParam, tmp);
+ display_control_adjust_SM750LE(mode_param, tmp);
} else {
reg = peek32(CRT_DISPLAY_CTRL) &
~(DISPLAY_CTRL_VSYNC_PHASE |
@@ -140,40 +139,40 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
poke32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
- reg = ((pModeParam->horizontal_total - 1) <<
+ reg = ((mode_param->horizontal_total - 1) <<
PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
PANEL_HORIZONTAL_TOTAL_TOTAL_MASK;
- reg |= ((pModeParam->horizontal_display_end - 1) &
+ reg |= ((mode_param->horizontal_display_end - 1) &
PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK);
poke32(PANEL_HORIZONTAL_TOTAL, reg);
poke32(PANEL_HORIZONTAL_SYNC,
- ((pModeParam->horizontal_sync_width <<
+ ((mode_param->horizontal_sync_width <<
PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) &
PANEL_HORIZONTAL_SYNC_WIDTH_MASK) |
- ((pModeParam->horizontal_sync_start - 1) &
+ ((mode_param->horizontal_sync_start - 1) &
PANEL_HORIZONTAL_SYNC_START_MASK));
poke32(PANEL_VERTICAL_TOTAL,
- (((pModeParam->vertical_total - 1) <<
+ (((mode_param->vertical_total - 1) <<
PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) &
PANEL_VERTICAL_TOTAL_TOTAL_MASK) |
- ((pModeParam->vertical_display_end - 1) &
+ ((mode_param->vertical_display_end - 1) &
PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK));
poke32(PANEL_VERTICAL_SYNC,
- ((pModeParam->vertical_sync_height <<
+ ((mode_param->vertical_sync_height <<
PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) &
PANEL_VERTICAL_SYNC_HEIGHT_MASK) |
- ((pModeParam->vertical_sync_start - 1) &
+ ((mode_param->vertical_sync_start - 1) &
PANEL_VERTICAL_SYNC_START_MASK));
tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
- if (pModeParam->vertical_sync_polarity)
+ if (mode_param->vertical_sync_polarity)
tmp |= DISPLAY_CTRL_VSYNC_PHASE;
- if (pModeParam->horizontal_sync_polarity)
+ if (mode_param->horizontal_sync_polarity)
tmp |= DISPLAY_CTRL_HSYNC_PHASE;
- if (pModeParam->clock_phase_polarity)
+ if (mode_param->clock_phase_polarity)
tmp |= DISPLAY_CTRL_CLOCK_PHASE;
reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK |
@@ -201,13 +200,10 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
break;
poke32(PANEL_DISPLAY_CTRL, tmp | reg);
}
- } else {
- ret = -1;
}
- return ret;
}
-int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock)
+int ddk750_set_mode_timing(struct mode_parameter *parm, enum clock_type clock)
{
struct pll_value pll;
@@ -220,6 +216,6 @@ int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock)
outb_p(0x88, 0x3d4);
outb_p(0x06, 0x3d5);
}
- programModeRegisters(parm, &pll);
+ program_mode_registers(parm, &pll);
return 0;
}
diff --git a/drivers/staging/sm750fb/ddk750_mode.h b/drivers/staging/sm750fb/ddk750_mode.h
index 2df78a0937b2..1b70885f85e5 100644
--- a/drivers/staging/sm750fb/ddk750_mode.h
+++ b/drivers/staging/sm750fb/ddk750_mode.h
@@ -33,5 +33,5 @@ struct mode_parameter {
enum spolarity clock_phase_polarity;
};
-int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock);
+int ddk750_set_mode_timing(struct mode_parameter *parm, enum clock_type clock);
#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 71247eaf26ee..4bc89218c11c 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -305,7 +305,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
clock = SECONDARY_PLL;
pr_debug("Request pixel clock = %lu\n", modparm.pixel_clock);
- ret = ddk750_setModeTiming(&modparm, clock);
+ ret = ddk750_set_mode_timing(&modparm, clock);
if (ret) {
pr_err("Set mode timing failed\n");
goto exit;
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 44794bdf6173..e8b897a7b9a6 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
vchiq-objs := \
interface/vchiq_arm/vchiq_core.o \
interface/vchiq_arm/vchiq_arm.o \
+ interface/vchiq_arm/vchiq_bus.o \
interface/vchiq_arm/vchiq_debugfs.o \
interface/vchiq_arm/vchiq_connected.o \
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index 00bc898b0189..b74cb104e9de 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2011 Broadcom Corporation. All rights reserved. */
-#include <linux/platform_device.h>
-
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "../interface/vchiq_arm/vchiq_bus.h"
#include "bcm2835.h"
static bool enable_hdmi;
@@ -268,11 +268,17 @@ static int snd_add_child_devices(struct device *device, u32 numchans)
return 0;
}
-static int snd_bcm2835_alsa_probe(struct platform_device *pdev)
+static int snd_bcm2835_alsa_probe(struct vchiq_device *device)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &device->dev;
int err;
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent failed: %d\n", err);
+ return err;
+ }
+
if (num_channels <= 0 || num_channels > MAX_SUBSTREAMS) {
num_channels = MAX_SUBSTREAMS;
dev_warn(dev, "Illegal num_channels value, will use %u\n",
@@ -292,32 +298,38 @@ static int snd_bcm2835_alsa_probe(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int snd_bcm2835_alsa_suspend(struct platform_device *pdev,
+static int snd_bcm2835_alsa_suspend(struct vchiq_device *device,
pm_message_t state)
{
return 0;
}
-static int snd_bcm2835_alsa_resume(struct platform_device *pdev)
+static int snd_bcm2835_alsa_resume(struct vchiq_device *device)
{
return 0;
}
#endif
-static struct platform_driver bcm2835_alsa_driver = {
+static struct vchiq_device_id device_id_table[] = {
+ { .name = "bcm2835-audio" },
+ {}
+};
+MODULE_DEVICE_TABLE(vchiq, device_id_table);
+
+static struct vchiq_driver bcm2835_alsa_driver = {
.probe = snd_bcm2835_alsa_probe,
#ifdef CONFIG_PM
.suspend = snd_bcm2835_alsa_suspend,
.resume = snd_bcm2835_alsa_resume,
#endif
+ .id_table = device_id_table,
.driver = {
- .name = "bcm2835_audio",
+ .name = "bcm2835-audio",
},
};
-module_platform_driver(bcm2835_alsa_driver);
+module_vchiq_driver(bcm2835_alsa_driver);
MODULE_AUTHOR("Dom Cobley");
MODULE_DESCRIPTION("Alsa driver for BCM2835 chip");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bcm2835_audio");
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 0a81383c475a..49ec5b496edb 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -84,7 +84,6 @@ struct bcm2835_alsa_stream {
int idx;
};
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip);
int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
int idx, enum snd_bcm2835_route route,
u32 numchannels, bool spdif);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 346d00df815a..e6e89784d84b 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -11,6 +11,7 @@
* Luke Diamand @ Broadcom
*/
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -24,8 +25,8 @@
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#include <linux/delay.h>
-#include <linux/platform_device.h>
+#include "../interface/vchiq_arm/vchiq_bus.h"
#include "../vchiq-mmal/mmal-common.h"
#include "../vchiq-mmal/mmal-encodings.h"
#include "../vchiq-mmal/mmal-vchiq.h"
@@ -854,7 +855,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
- sprintf((char *)inp->name, "Camera %u", inp->index);
+ snprintf((char *)inp->name, sizeof(inp->name), "Camera %u", inp->index);
return 0;
}
@@ -1841,7 +1842,7 @@ static struct v4l2_format default_v4l2_format = {
.fmt.pix.sizeimage = 1024 * 768,
};
-static int bcm2835_mmal_probe(struct platform_device *pdev)
+static int bcm2835_mmal_probe(struct vchiq_device *device)
{
int ret;
struct bcm2835_mmal_dev *dev;
@@ -1852,6 +1853,12 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
unsigned int resolutions[MAX_BCM2835_CAMERAS][2];
int i;
+ ret = dma_set_mask_and_coherent(&device->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&device->dev, "dma_set_mask_and_coherent failed: %d\n", ret);
+ return ret;
+ }
+
ret = vchiq_mmal_init(&instance);
if (ret < 0)
return ret;
@@ -1896,7 +1903,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
&camera_instance);
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret) {
- dev_err(&pdev->dev, "%s: could not register V4L2 device: %d\n",
+ dev_err(&device->dev, "%s: could not register V4L2 device: %d\n",
__func__, ret);
goto free_dev;
}
@@ -1976,7 +1983,7 @@ cleanup_mmal:
return ret;
}
-static void bcm2835_mmal_remove(struct platform_device *pdev)
+static void bcm2835_mmal_remove(struct vchiq_device *device)
{
int camera;
struct vchiq_mmal_instance *instance = gdev[0]->instance;
@@ -1988,17 +1995,23 @@ static void bcm2835_mmal_remove(struct platform_device *pdev)
vchiq_mmal_finalise(instance);
}
-static struct platform_driver bcm2835_camera_driver = {
+static const struct vchiq_device_id device_id_table[] = {
+ { .name = "bcm2835-camera" },
+ {}
+};
+MODULE_DEVICE_TABLE(vchiq, device_id_table);
+
+static struct vchiq_driver bcm2835_camera_driver = {
.probe = bcm2835_mmal_probe,
- .remove_new = bcm2835_mmal_remove,
+ .remove = bcm2835_mmal_remove,
+ .id_table = device_id_table,
.driver = {
.name = "bcm2835-camera",
},
};
-module_platform_driver(bcm2835_camera_driver)
+module_vchiq_driver(bcm2835_camera_driver)
MODULE_DESCRIPTION("Broadcom 2835 MMAL video capture");
MODULE_AUTHOR("Vincent Sanders");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bcm2835-camera");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index aa2313f3bcab..9fb8f657cc78 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -12,6 +12,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
@@ -33,6 +34,7 @@
#include "vchiq_core.h"
#include "vchiq_ioctl.h"
#include "vchiq_arm.h"
+#include "vchiq_bus.h"
#include "vchiq_debugfs.h"
#include "vchiq_connected.h"
#include "vchiq_pagelist.h"
@@ -58,15 +60,16 @@
#define KEEPALIVE_VER 1
#define KEEPALIVE_VER_MIN KEEPALIVE_VER
-/* Run time control of log level, based on KERN_XXX level. */
-int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
-int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
-
DEFINE_SPINLOCK(msg_queue_spinlock);
struct vchiq_state g_state;
-static struct platform_device *bcm2835_camera;
-static struct platform_device *bcm2835_audio;
+/*
+ * The devices implemented in the VCHIQ firmware are not discoverable,
+ * so we need to maintain a list of them in order to register them with
+ * the interface.
+ */
+static struct vchiq_device *bcm2835_audio;
+static struct vchiq_device *bcm2835_camera;
struct vchiq_drvdata {
const unsigned int cache_line_size;
@@ -252,7 +255,8 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
- vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
+ vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
+ "%s - %pK", __func__, pagelist);
if (!pagelist)
return NULL;
@@ -307,9 +311,9 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
type == PAGELIST_READ, pages);
if (actual_pages != num_pages) {
- vchiq_log_info(vchiq_arm_log_level,
- "%s - only %d/%d pages locked",
- __func__, actual_pages, num_pages);
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "%s - only %d/%d pages locked",
+ __func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
if (actual_pages > 0)
@@ -403,8 +407,8 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
struct page **pages = pagelistinfo->pages;
unsigned int num_pages = pagelistinfo->num_pages;
- vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
- __func__, pagelistinfo->pagelist, actual);
+ vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
+ "%s - %pK, %d", __func__, pagelistinfo->pagelist, actual);
/*
* NOTE: dma_unmap_sg must be called before the
@@ -499,7 +503,7 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
- vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
+ vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
if (!vchiq_slot_zero)
return -ENOMEM;
@@ -552,8 +556,8 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return -ENXIO;
}
- vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
- vchiq_slot_zero, &slot_phys);
+ vchiq_log_debug(&pdev->dev, VCHIQ_ARM, "vchiq_init - done (slots %pK, phys %pad)",
+ vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
@@ -683,17 +687,18 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
usleep_range(500, 600);
}
if (i == VCHIQ_INIT_RETRIES) {
- vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%s: videocore not initialized\n",
+ __func__);
ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
- vchiq_log_warning(vchiq_core_log_level,
+ vchiq_log_warning(state->dev, VCHIQ_CORE,
"%s: videocore initialized after %d retries\n", __func__, i);
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"%s: error allocating vchiq instance\n", __func__);
ret = -ENOMEM;
goto failed;
@@ -709,7 +714,8 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
ret = 0;
failed:
- vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s(%p): returning %d", __func__, instance, ret);
return ret;
}
@@ -722,8 +728,9 @@ void free_bulk_waiter(struct vchiq_instance *instance)
list_for_each_entry_safe(waiter, next,
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
- vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
- waiter, waiter->pid);
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
}
@@ -741,7 +748,8 @@ int vchiq_shutdown(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
- vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s(%p): returning %d", __func__, instance, status);
free_bulk_waiter(instance);
kfree(instance);
@@ -761,7 +769,8 @@ int vchiq_connect(struct vchiq_instance *instance)
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex)) {
- vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s: call to mutex_lock failed", __func__);
status = -EAGAIN;
goto failed;
}
@@ -773,7 +782,8 @@ int vchiq_connect(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
failed:
- vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -804,7 +814,8 @@ vchiq_add_service(struct vchiq_instance *instance,
status = -EINVAL;
}
- vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -835,7 +846,8 @@ vchiq_open_service(struct vchiq_instance *instance,
}
failed:
- vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
+ "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -959,7 +971,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
} else {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
- vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
+ vchiq_log_error(service->state->dev, VCHIQ_CORE,
+ "%s - out of memory", __func__);
return -ENOMEM;
}
}
@@ -982,8 +995,9 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
- current->pid);
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "saved bulk_waiter %pK for pid %d", waiter,
+ current->pid);
}
return status;
@@ -1003,13 +1017,16 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
/* Out of space - wait for the client */
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
+ vchiq_log_trace(instance->state->dev, VCHIQ_CORE,
+ "%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
if (wait_for_completion_interruptible(&instance->remove_event)) {
- vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "service_callback interrupted");
return -EAGAIN;
} else if (instance->closing) {
- vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "service_callback closing");
return 0;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1089,7 +1106,7 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
vchiq_service_get(service);
rcu_read_unlock();
- vchiq_log_trace(vchiq_arm_log_level,
+ vchiq_log_trace(service->state->dev, VCHIQ_ARM,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
__func__, (unsigned long)user_service, service->localport,
user_service->userdata, reason, (unsigned long)header,
@@ -1102,7 +1119,8 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
- vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
+ vchiq_log_trace(service->state->dev, VCHIQ_ARM,
+ "%s - msg queue full", __func__);
/*
* If there is no MESSAGE_AVAILABLE in the completion
* queue, add one
@@ -1111,8 +1129,8 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
instance->completion_remove) < 0) {
int status;
- vchiq_log_info(vchiq_arm_log_level,
- "Inserting extra MESSAGE_AVAILABLE");
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "Inserting extra MESSAGE_AVAILABLE");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
status = add_completion(instance, reason, NULL, user_service,
bulk_userdata);
@@ -1125,12 +1143,14 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
if (wait_for_completion_interruptible(&user_service->remove_event)) {
- vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EAGAIN;
} else if (instance->closing) {
- vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EINVAL;
@@ -1326,7 +1346,8 @@ vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
struct vchiq_header *header,
unsigned int service_user, void *bulk_user)
{
- vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
+ vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
+ "%s callback reason %d", __func__, reason);
return 0;
}
@@ -1350,22 +1371,22 @@ vchiq_keepalive_thread_func(void *v)
ret = vchiq_initialise(&instance);
if (ret) {
- vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
- ret);
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
+ "%s vchiq_initialise failed %d", __func__, ret);
goto exit;
}
status = vchiq_connect(instance);
if (status) {
- vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
- status);
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
+ "%s vchiq_connect failed %d", __func__, status);
goto shutdown;
}
status = vchiq_add_service(instance, &params, &ka_handle);
if (status) {
- vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
- status);
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
+ "%s vchiq_open_service failed %d", __func__, status);
goto shutdown;
}
@@ -1373,7 +1394,8 @@ vchiq_keepalive_thread_func(void *v)
long rc = 0, uc = 0;
if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
- vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
+ "%s interrupted", __func__);
flush_signals(current);
continue;
}
@@ -1393,14 +1415,14 @@ vchiq_keepalive_thread_func(void *v)
atomic_inc(&arm_state->ka_use_ack_count);
status = vchiq_use_service(instance, ka_handle);
if (status) {
- vchiq_log_error(vchiq_susp_log_level,
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
"%s vchiq_use_service error %d", __func__, status);
}
}
while (rc--) {
status = vchiq_release_service(instance, ka_handle);
if (status) {
- vchiq_log_error(vchiq_susp_log_level,
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
"%s vchiq_release_service error %d", __func__,
status);
}
@@ -1419,7 +1441,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
int ret = 0;
- char entity[16];
+ char entity[64];
int *entity_uc;
int local_uc;
@@ -1429,15 +1451,15 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
if (use_type == USE_TYPE_VCHIQ) {
- sprintf(entity, "VCHIQ: ");
+ snprintf(entity, sizeof(entity), "VCHIQ: ");
entity_uc = &arm_state->peer_use_count;
} else if (service) {
- sprintf(entity, "%c%c%c%c:%03d",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->client_id);
+ snprintf(entity, sizeof(entity), "%p4cc:%03d",
+ &service->base.fourcc,
+ service->client_id);
entity_uc = &service->service_use_count;
} else {
- vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND, "%s null service ptr", __func__);
ret = -EINVAL;
goto out;
}
@@ -1446,8 +1468,8 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
local_uc = ++arm_state->videocore_use_count;
++(*entity_uc);
- vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
- *entity_uc, local_uc);
+ vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc, local_uc);
write_unlock_bh(&arm_state->susp_res_lock);
@@ -1466,7 +1488,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
return ret;
}
@@ -1475,7 +1497,7 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
int ret = 0;
- char entity[16];
+ char entity[64];
int *entity_uc;
if (!arm_state) {
@@ -1484,12 +1506,12 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
}
if (service) {
- sprintf(entity, "%c%c%c%c:%03d",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->client_id);
+ snprintf(entity, sizeof(entity), "%p4cc:%03d",
+ &service->base.fourcc,
+ service->client_id);
entity_uc = &service->service_use_count;
} else {
- sprintf(entity, "PEER: ");
+ snprintf(entity, sizeof(entity), "PEER: ");
entity_uc = &arm_state->peer_use_count;
}
@@ -1504,14 +1526,14 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
--arm_state->videocore_use_count;
--(*entity_uc);
- vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
- *entity_uc, arm_state->videocore_use_count);
+ vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc, arm_state->videocore_use_count);
unlock:
write_unlock_bh(&arm_state->susp_res_lock);
out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
return ret;
}
@@ -1685,17 +1707,19 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
read_unlock_bh(&arm_state->susp_res_lock);
if (only_nonzero)
- vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
+ vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
+ "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
active_services, found);
for (i = 0; i < found; i++) {
- vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
- VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
+ vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
+ "%p4cc:%d service count %d %s",
+ &service_data[i].fourcc,
service_data[i].clientid, service_data[i].use_count,
service_data[i].use_count ? nz : "");
}
- vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
- vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
+ vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "VCHIQ use count %d", peer_count);
+ vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "Overall vchiq instance use count %d",
vc_use_count);
kfree(service_data);
@@ -1718,9 +1742,9 @@ vchiq_check_service(struct vchiq_service *service)
read_unlock_bh(&arm_state->susp_res_lock);
if (ret) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
+ vchiq_log_error(service->state->dev, VCHIQ_SUSPEND,
+ "%s ERROR - %p4cc:%d service count %d, state count %d", __func__,
+ &service->base.fourcc, service->client_id,
service->service_use_count, arm_state->videocore_use_count);
vchiq_dump_service_use_state(service->state);
}
@@ -1735,8 +1759,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
char threadname[16];
- vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
- get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ vchiq_log_debug(state->dev, VCHIQ_SUSPEND, "%d: %s->%s", state->id,
+ get_conn_state_name(oldstate), get_conn_state_name(newstate));
if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
return;
@@ -1754,7 +1778,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
(void *)state,
threadname);
if (IS_ERR(arm_state->ka_thread)) {
- vchiq_log_error(vchiq_susp_log_level,
+ vchiq_log_error(state->dev, VCHIQ_SUSPEND,
"vchiq: FATAL: couldn't create thread %s",
threadname);
} else {
@@ -1769,28 +1793,6 @@ static const struct of_device_id vchiq_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vchiq_of_match);
-static struct platform_device *
-vchiq_register_child(struct platform_device *pdev, const char *name)
-{
- struct platform_device_info pdevinfo;
- struct platform_device *child;
-
- memset(&pdevinfo, 0, sizeof(pdevinfo));
-
- pdevinfo.parent = &pdev->dev;
- pdevinfo.name = name;
- pdevinfo.id = PLATFORM_DEVID_NONE;
- pdevinfo.dma_mask = DMA_BIT_MASK(32);
-
- child = platform_device_register_full(&pdevinfo);
- if (IS_ERR(child)) {
- dev_warn(&pdev->dev, "%s not registered\n", name);
- child = NULL;
- }
-
- return child;
-}
-
static int vchiq_probe(struct platform_device *pdev)
{
struct device_node *fw_node;
@@ -1823,9 +1825,9 @@ static int vchiq_probe(struct platform_device *pdev)
vchiq_debugfs_init();
- vchiq_log_info(vchiq_arm_log_level,
- "vchiq: platform initialised - version %d (min %d)",
- VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ vchiq_log_debug(&pdev->dev, VCHIQ_ARM,
+ "vchiq: platform initialised - version %d (min %d)",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
/*
* Simply exit on error since the function handles cleanup in
@@ -1833,26 +1835,26 @@ static int vchiq_probe(struct platform_device *pdev)
*/
err = vchiq_register_chrdev(&pdev->dev);
if (err) {
- vchiq_log_warning(vchiq_arm_log_level,
+ vchiq_log_warning(&pdev->dev, VCHIQ_ARM,
"Failed to initialize vchiq cdev");
goto error_exit;
}
- bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
- bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
+ bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
+ bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
return 0;
failed_platform_init:
- vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
+ vchiq_log_warning(&pdev->dev, VCHIQ_ARM, "could not initialize vchiq platform");
error_exit:
return err;
}
static void vchiq_remove(struct platform_device *pdev)
{
- platform_device_unregister(bcm2835_audio);
- platform_device_unregister(bcm2835_camera);
+ vchiq_device_unregister(bcm2835_audio);
+ vchiq_device_unregister(bcm2835_camera);
vchiq_debugfs_deinit();
vchiq_deregister_chrdev();
}
@@ -1870,9 +1872,17 @@ static int __init vchiq_driver_init(void)
{
int ret;
+ ret = bus_register(&vchiq_bus_type);
+ if (ret) {
+ pr_err("Failed to register %s\n", vchiq_bus_type.name);
+ return ret;
+ }
+
ret = platform_driver_register(&vchiq_driver);
- if (ret)
+ if (ret) {
pr_err("Failed to register vchiq driver\n");
+ bus_unregister(&vchiq_bus_type);
+ }
return ret;
}
@@ -1880,6 +1890,7 @@ module_init(vchiq_driver_init);
static void __exit vchiq_driver_exit(void)
{
+ bus_unregister(&vchiq_bus_type);
platform_driver_unregister(&vchiq_driver);
}
module_exit(vchiq_driver_exit);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index 2fb31f9b527f..7cdc3d70bd2c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -76,9 +76,6 @@ struct dump_context {
loff_t offset;
};
-extern int vchiq_arm_log_level;
-extern int vchiq_susp_log_level;
-
extern spinlock_t msg_queue_spinlock;
extern struct vchiq_state g_state;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
new file mode 100644
index 000000000000..933027e0011e
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * vchiq_device.c - VCHIQ generic device and bus-type
+ *
+ * Copyright (c) 2023 Ideas On Board Oy
+ */
+
+#include <linux/device/bus.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "vchiq_bus.h"
+
+static int vchiq_bus_type_match(struct device *dev, struct device_driver *drv)
+{
+ if (dev->bus == &vchiq_bus_type &&
+ strcmp(dev_name(dev), drv->name) == 0)
+ return true;
+
+ return false;
+}
+
+static int vchiq_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct vchiq_device *device = container_of_const(dev, struct vchiq_device, dev);
+
+ return add_uevent_var(env, "MODALIAS=vchiq:%s", dev_name(&device->dev));
+}
+
+static int vchiq_bus_probe(struct device *dev)
+{
+ struct vchiq_device *device = to_vchiq_device(dev);
+ struct vchiq_driver *driver = to_vchiq_driver(dev->driver);
+
+ return driver->probe(device);
+}
+
+struct bus_type vchiq_bus_type = {
+ .name = "vchiq-bus",
+ .match = vchiq_bus_type_match,
+ .uevent = vchiq_bus_uevent,
+ .probe = vchiq_bus_probe,
+};
+
+static void vchiq_device_release(struct device *dev)
+{
+ struct vchiq_device *device = to_vchiq_device(dev);
+
+ kfree(device);
+}
+
+struct vchiq_device *
+vchiq_device_register(struct device *parent, const char *name)
+{
+ struct vchiq_device *device;
+ int ret;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.init_name = name;
+ device->dev.parent = parent;
+ device->dev.bus = &vchiq_bus_type;
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.release = vchiq_device_release;
+
+ of_dma_configure(&device->dev, parent->of_node, true);
+
+ ret = device_register(&device->dev);
+ if (ret) {
+ dev_err(parent, "Cannot register %s: %d\n", name, ret);
+ put_device(&device->dev);
+ return NULL;
+ }
+
+ return device;
+}
+
+void vchiq_device_unregister(struct vchiq_device *vchiq_dev)
+{
+ device_unregister(&vchiq_dev->dev);
+}
+
+int vchiq_driver_register(struct vchiq_driver *vchiq_drv)
+{
+ vchiq_drv->driver.bus = &vchiq_bus_type;
+
+ return driver_register(&vchiq_drv->driver);
+}
+EXPORT_SYMBOL_GPL(vchiq_driver_register);
+
+void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv)
+{
+ driver_unregister(&vchiq_drv->driver);
+}
+EXPORT_SYMBOL_GPL(vchiq_driver_unregister);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
new file mode 100644
index 000000000000..caa6fdf25bb1
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Ideas On Board Oy
+ */
+
+#ifndef _VCHIQ_DEVICE_H
+#define _VCHIQ_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct vchiq_device {
+ struct device dev;
+};
+
+struct vchiq_driver {
+ int (*probe)(struct vchiq_device *device);
+ void (*remove)(struct vchiq_device *device);
+ int (*resume)(struct vchiq_device *device);
+ int (*suspend)(struct vchiq_device *device,
+ pm_message_t state);
+
+ const struct vchiq_device_id *id_table;
+ struct device_driver driver;
+};
+
+static inline struct vchiq_device *to_vchiq_device(struct device *d)
+{
+ return container_of(d, struct vchiq_device, dev);
+}
+
+static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d)
+{
+ return container_of(d, struct vchiq_driver, driver);
+}
+
+extern struct bus_type vchiq_bus_type;
+
+struct vchiq_device *
+vchiq_device_register(struct device *parent, const char *name);
+void vchiq_device_unregister(struct vchiq_device *dev);
+
+int vchiq_driver_register(struct vchiq_driver *vchiq_drv);
+void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv);
+
+/**
+ * module_vchiq_driver() - Helper macro for registering a vchiq driver
+ * @__vchiq_driver: vchiq driver struct
+ *
+ * Helper macro for vchiq drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_vchiq_driver(__vchiq_driver) \
+ module_driver(__vchiq_driver, vchiq_driver_register, vchiq_driver_unregister)
+
+#endif /* _VCHIQ_DEVICE_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index bdb0ab617d8b..b3928bd8c9c6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -39,7 +39,7 @@ void vchiq_add_connected_callback(void (*callback)(void))
callback();
} else {
if (g_num_deferred_callbacks >= MAX_CALLBACKS) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(NULL, VCHIQ_CORE,
"There already %d callback registered - please increase MAX_CALLBACKS",
g_num_deferred_callbacks);
} else {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 596894338cb4..39b857da2d42 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -103,11 +103,6 @@ static_assert((unsigned int)VCHIQ_PORT_MAX <
#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
-#define SRVTRACE_LEVEL(srv) \
- (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
-#define SRVTRACE_ENABLED(srv, lev) \
- (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
-
#define NO_CLOSE_RECVD 0
#define CLOSE_RECVD 1
@@ -154,11 +149,6 @@ static inline void check_sizes(void)
BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
}
-/* Run time control of log level, based on KERN_XXX level. */
-int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
-int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
-int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
-
DEFINE_SPINLOCK(bulk_waiter_spinlock);
static DEFINE_SPINLOCK(quota_spinlock);
@@ -227,10 +217,10 @@ static const char *msg_type_str(unsigned int msg_type)
static inline void
set_service_state(struct vchiq_service *service, int newstate)
{
- vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate],
- srvstate_names[newstate]);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: srv:%d %s->%s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
service->srvstate = newstate;
}
@@ -255,8 +245,8 @@ find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
return service;
}
rcu_read_unlock();
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE,
+ "Invalid service handle 0x%x", handle);
return NULL;
}
@@ -276,8 +266,8 @@ find_service_by_port(struct vchiq_state *state, unsigned int localport)
}
rcu_read_unlock();
}
- vchiq_log_info(vchiq_core_log_level,
- "Invalid port %u", localport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE,
+ "Invalid port %u", localport);
return NULL;
}
@@ -297,8 +287,8 @@ find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
return service;
}
rcu_read_unlock();
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE,
+ "Invalid service handle 0x%x", handle);
return NULL;
}
@@ -320,8 +310,8 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int h
return service;
}
rcu_read_unlock();
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE,
+ "Invalid service handle 0x%x", handle);
return service;
}
@@ -469,13 +459,13 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
{
int status;
- vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
+ vchiq_log_trace(service->state->dev, VCHIQ_CORE, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
header, bulk_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
bulk_userdata);
if (status && (status != -EAGAIN)) {
- vchiq_log_warning(vchiq_core_log_level,
+ vchiq_log_warning(service->state->dev, VCHIQ_CORE,
"%d: ignoring ERROR from callback to service %x",
service->state->id, service->handle);
status = 0;
@@ -492,8 +482,8 @@ vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
enum vchiq_connstate oldstate = state->conn_state;
- vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
- conn_state_names[newstate]);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s->%s", state->id, conn_state_names[oldstate],
+ conn_state_names[newstate]);
state->conn_state = newstate;
vchiq_platform_conn_state_changed(state, oldstate, newstate);
}
@@ -751,7 +741,7 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
*/
complete(&quota->quota_event);
} else if (count == 0) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port, quota->message_use_count, header, msgid, header->msgid,
header->size);
@@ -773,10 +763,10 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
* it has dropped below its quota
*/
complete(&quota->quota_event);
- vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq:%d %x@%pK - slot_use->%d",
state->id, port, header->size, header, count - 1);
} else {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
@@ -819,7 +809,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
*/
rmb();
- vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq %d=%pK %x %x",
state->id, slot_index, data, local->slot_queue_recycle,
slot_queue_available);
@@ -841,7 +831,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
@@ -990,7 +980,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
((tx_end_index != quota->previous_tx_index) &&
(quota->slot_use_count == quota->slot_quota))) {
spin_unlock(&quota_spinlock);
- vchiq_log_trace(vchiq_core_log_level,
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
state->id, service->localport, msg_type_str(type), size,
quota->message_use_count, quota->slot_use_count);
@@ -1033,9 +1023,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
int tx_end_index;
int slot_use_count;
- vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
- VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK));
@@ -1050,11 +1040,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
return -EINVAL;
}
- if (SRVTRACE_ENABLED(service,
- VCHIQ_LOG_INFO))
- vchiq_log_dump_mem("Sent", 0,
- header->data,
- min_t(size_t, 16, callback_result));
+ vchiq_log_dump_mem(state->dev, "Sent", 0,
+ header->data,
+ min_t(size_t, 16, callback_result));
spin_lock(&quota_spinlock);
quota->message_use_count++;
@@ -1085,7 +1073,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
spin_unlock(&quota_spinlock);
if (slot_use_count)
- vchiq_log_trace(vchiq_core_log_level,
+ vchiq_log_trace(state->dev, VCHIQ_CORE,
"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
size, slot_use_count, header);
@@ -1093,9 +1081,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
- VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
/*
* It is assumed for now that this code path
@@ -1123,11 +1111,11 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_info(SRVTRACE_LEVEL(service),
- "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid), size);
+ vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
+ "Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
}
/* Make sure the new header is visible to the peer. */
@@ -1159,6 +1147,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
struct vchiq_shared_state *local;
struct vchiq_header *header;
ssize_t callback_result;
+ int svc_fourcc;
local = state->local;
@@ -1178,15 +1167,15 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int oldmsgid = header->msgid;
if (oldmsgid != VCHIQ_MSGID_PADDING)
- vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%d: qms - msgid %x, not PADDING",
state->id, oldmsgid);
}
- vchiq_log_info(vchiq_sync_log_level,
- "%d: qms %s@%pK,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- header, size, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ vchiq_log_debug(state->dev, VCHIQ_SYNC,
+ "%d: qms %s@%pK,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
callback_result =
copy_message_data(copy_callback, context,
@@ -1199,11 +1188,9 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
}
if (service) {
- if (SRVTRACE_ENABLED(service,
- VCHIQ_LOG_INFO))
- vchiq_log_dump_mem("Sent", 0,
- header->data,
- min_t(size_t, 16, callback_result));
+ vchiq_log_dump_mem(state->dev, "Sent", 0,
+ header->data,
+ min_t(size_t, 16, callback_result));
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
@@ -1214,19 +1201,15 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
header->size = size;
header->msgid = msgid;
- if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
- int svc_fourcc;
- svc_fourcc = service
- ? service->base.fourcc
- : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_trace(vchiq_sync_log_level,
- "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid), size);
- }
+ vchiq_log_trace(state->dev, VCHIQ_SYNC,
+ "Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
remote_event_signal(&state->remote->sync_trigger);
@@ -1278,9 +1261,9 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
VCHIQ_SLOT_QUEUE_MASK] =
SLOT_INDEX_FROM_INFO(state, slot_info);
state->remote->slot_queue_recycle = slot_queue_recycle + 1;
- vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
- SLOT_INDEX_FROM_INFO(state, slot_info),
- state->remote->slot_queue_recycle);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s %d - recycle->%x",
+ state->id, __func__, SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
/*
* A write barrier is necessary, but remote_event_signal
@@ -1315,8 +1298,10 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
{
int status = 0;
- vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
- service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
+ vchiq_log_trace(service->state->dev, VCHIQ_CORE,
+ "%d: nb:%d %cx - p=%x rn=%x r=%x",
+ service->state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
queue->process, queue->remote_notify, queue->remove);
queue->remote_notify = queue->process;
@@ -1397,9 +1382,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
service_flags = atomic_xchg(&service->poll_flags, 0);
if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
- vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
- state->id, service->localport,
- service->remoteport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - remove %d<->%d",
+ state->id, service->localport, service->remoteport);
/*
* Make it look like a client, because
@@ -1411,8 +1395,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_REMOVE);
} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
- vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
- state->id, service->localport, service->remoteport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - terminate %d<->%d",
+ state->id, service->localport, service->remoteport);
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_TERMINATE);
}
@@ -1441,9 +1425,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
{
int is_tx = (queue == &service->bulk_tx);
- vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
- service->state->id, service->localport, is_tx ? 't' : 'r',
- queue->local_insert, queue->remote_insert, queue->process);
+ vchiq_log_trace(service->state->dev, VCHIQ_CORE,
+ "%d: aob:%d %cx - li=%x ri=%x p=%x",
+ service->state->id, service->localport,
+ is_tx ? 't' : 'r', queue->local_insert,
+ queue->remote_insert, queue->process);
WARN_ON((int)(queue->local_insert - queue->process) < 0);
WARN_ON((int)(queue->remote_insert - queue->process) < 0);
@@ -1462,11 +1448,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
if (queue->process != queue->local_insert) {
vchiq_complete_bulk(service->instance, bulk);
- vchiq_log_info(SRVTRACE_LEVEL(service),
- "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
- is_tx ? "Send Bulk to" : "Recv Bulk from",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->remoteport, bulk->size, bulk->remote_size);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE_MSG,
+ "%s %p4cc d:%d ABORTED - tx len:%d, rx len:%d",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ &service->base.fourcc,
+ service->remoteport, bulk->size, bulk->remote_size);
} else {
/* fabricate a matching dummy bulk */
bulk->data = 0;
@@ -1499,8 +1485,8 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
- vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
- state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs OPEN@%pK (%d->'%p4cc')",
+ state->id, header, localport, &fourcc);
service = get_listening_service(state, fourcc);
if (!service)
@@ -1512,11 +1498,9 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
if ((service->version < version_min) || (version < service->version_min)) {
/* Version mismatch */
- vchiq_loud_error_header();
- vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
- state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
- service->version, service->version_min, version, version_min);
- vchiq_loud_error_footer();
+ dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
+ state->id, service->localport, &fourcc,
+ service->version, service->version_min, version, version_min);
vchiq_service_put(service);
service = NULL;
goto fail_open;
@@ -1587,6 +1571,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
struct vchiq_service *service = NULL;
unsigned int localport, remoteport;
int msgid, size, type, ret = -EINVAL;
+ int svc_fourcc;
DEBUG_INITIALISE(state->local);
@@ -1624,14 +1609,14 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_service_put(service);
service = get_connected_service(state, remoteport);
if (service)
- vchiq_log_warning(vchiq_core_log_level,
+ vchiq_log_warning(state->dev, VCHIQ_CORE,
"%d: prs %s@%pK (%d->%d) - found connected service %d",
state->id, msg_type_str(type), header,
remoteport, localport, service->localport);
}
if (!service) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
@@ -1642,23 +1627,20 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
}
- if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
- int svc_fourcc;
- svc_fourcc = service
- ? service->base.fourcc
- : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_info(SRVTRACE_LEVEL(service),
- "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
- msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- remoteport, localport, size);
- if (size > 0)
- vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
- }
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
+ "Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d",
+ msg_type_str(type), type, &svc_fourcc,
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"header %pK (msgid %x) - size %x too big for slot",
header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
@@ -1677,36 +1659,37 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ vchiq_log_debug(state->dev, VCHIQ_CORE,
+ "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPEN);
complete(&service->remove_event);
} else {
- vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
+ vchiq_log_error(state->dev, VCHIQ_CORE, "OPENACK received in state %s",
srvstate_names[service->srvstate]);
}
break;
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
- vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
- state->id, header, remoteport, localport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CLOSE@%pK (%d->%d)",
+ state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
goto bail_not_ready;
- vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->localport, service->remoteport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "Close Service %p4cc s:%u d:%d",
+ &service->base.fourcc,
+ service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
- vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
@@ -1725,7 +1708,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_CONNECT:
- vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CONNECT@%pK",
+ state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
complete(&state->connect);
@@ -1756,7 +1740,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
state->id, msg_type_str(type), header, remoteport,
localport, queue->remote_insert,
@@ -1777,11 +1761,12 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
bulk->actual = *(int *)header->data;
queue->remote_insert++;
- vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
- state->id, msg_type_str(type), header, remoteport, localport,
- bulk->actual, &bulk->data);
+ vchiq_log_debug(state->dev, VCHIQ_CORE,
+ "%d: prs %s@%pK (%d->%d) %x@%pad",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, bulk->actual, &bulk->data);
- vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs:%d %cx li=%x ri=%x p=%x",
state->id, localport,
(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
queue->local_insert, queue->remote_insert, queue->process);
@@ -1797,16 +1782,16 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_PADDING:
- vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PADDING@%pK,%x",
state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
- vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PAUSE@%pK,%x",
state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
- vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
- state->id);
+ vchiq_log_error(state->dev, VCHIQ_CORE,
+ "%d: PAUSE received in state PAUSED", state->id);
break;
}
if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
@@ -1819,7 +1804,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
- vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs RESUME@%pK,%x",
state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
@@ -1836,7 +1821,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
default:
- vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%d: prs invalid msgid %x@%pK,%x",
state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
@@ -1947,7 +1932,7 @@ handle_poll(struct vchiq_state *state)
* since the PAUSE should have flushed
* through outstanding messages.
*/
- vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
+ vchiq_log_error(state->dev, VCHIQ_CORE, "Failed to send RESUME message");
}
break;
default:
@@ -2025,6 +2010,7 @@ sync_func(void *v)
struct vchiq_header *header =
(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
state->remote->slot_sync);
+ int svc_fourcc;
while (1) {
struct vchiq_service *service;
@@ -2046,7 +2032,7 @@ sync_func(void *v)
service = find_service_by_port(state, localport);
if (!service) {
- vchiq_log_error(vchiq_sync_log_level,
+ vchiq_log_error(state->dev, VCHIQ_SYNC,
"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type), header,
remoteport, localport, localport);
@@ -2054,19 +2040,15 @@ sync_func(void *v)
continue;
}
- if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
- int svc_fourcc;
-
- svc_fourcc = service
- ? service->base.fourcc
- : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_trace(vchiq_sync_log_level,
- "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
- msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- remoteport, localport, size);
- if (size > 0)
- vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
- }
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_trace(state->dev, VCHIQ_SYNC,
+ "Rcvd Msg %s from %p4cc s:%d d:%d len:%d",
+ msg_type_str(type), &svc_fourcc,
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
switch (type) {
case VCHIQ_MSG_OPENACK:
@@ -2076,9 +2058,9 @@ sync_func(void *v)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ vchiq_log_debug(state->dev, VCHIQ_SYNC, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
@@ -2089,21 +2071,21 @@ sync_func(void *v)
break;
case VCHIQ_MSG_DATA:
- vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
+ vchiq_log_trace(state->dev, VCHIQ_SYNC, "%d: sf DATA@%pK,%x (%d->%d)",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
NULL) == -EAGAIN)
- vchiq_log_error(vchiq_sync_log_level,
+ vchiq_log_error(state->dev, VCHIQ_SYNC,
"synchronous callback to service %d returns -EAGAIN",
localport);
}
break;
default:
- vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
+ vchiq_log_error(state->dev, VCHIQ_SYNC, "%d: sf unexpected msgid %x@%pK,%x",
state->id, msgid, header, size);
release_message_sync(state, header);
break;
@@ -2122,7 +2104,7 @@ get_conn_state_name(enum vchiq_connstate conn_state)
}
struct vchiq_slot_zero *
-vchiq_init_slots(void *mem_base, int mem_size)
+vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
{
int mem_align =
(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
@@ -2137,7 +2119,7 @@ vchiq_init_slots(void *mem_base, int mem_size)
num_slots -= first_data_slot;
if (num_slots < 4) {
- vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
+ vchiq_log_error(dev, VCHIQ_CORE, "%s - insufficient memory %x bytes",
__func__, mem_size);
return NULL;
}
@@ -2174,12 +2156,11 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, s
remote = &slot_zero->master;
if (local->initialised) {
- vchiq_loud_error_header();
if (remote->initialised)
- vchiq_loud_error("local state has already been initialised");
+ dev_err(dev, "local state has already been initialised\n");
else
- vchiq_loud_error("master/slave mismatch two slaves");
- vchiq_loud_error_footer();
+ dev_err(dev, "master/slave mismatch two slaves\n");
+
return -EINVAL;
}
@@ -2257,9 +2238,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, s
state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
if (IS_ERR(state->slot_handler_thread)) {
- vchiq_loud_error_header();
- vchiq_loud_error("couldn't create thread %s", threadname);
- vchiq_loud_error_footer();
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
return PTR_ERR(state->slot_handler_thread);
}
set_user_nice(state->slot_handler_thread, -19);
@@ -2267,9 +2246,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, s
snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
if (IS_ERR(state->recycle_thread)) {
- vchiq_loud_error_header();
- vchiq_loud_error("couldn't create thread %s", threadname);
- vchiq_loud_error_footer();
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
ret = PTR_ERR(state->recycle_thread);
goto fail_free_handler_thread;
}
@@ -2278,9 +2255,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, s
snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
if (IS_ERR(state->sync_thread)) {
- vchiq_loud_error_header();
- vchiq_loud_error("couldn't create thread %s", threadname);
- vchiq_loud_error_footer();
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
ret = PTR_ERR(state->sync_thread);
goto fail_free_recycle_thread;
}
@@ -2353,10 +2328,11 @@ struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned in
}
EXPORT_SYMBOL(vchiq_msg_hold);
-static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
+static int vchiq_validate_params(struct vchiq_state *state,
+ const struct vchiq_service_params_kernel *params)
{
if (!params->callback || !params->fourcc) {
- vchiq_loud_error("Can't add service, invalid params\n");
+ dev_err(state->dev, "Can't add service, invalid params\n");
return -EINVAL;
}
@@ -2376,7 +2352,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
int ret;
int i;
- ret = vchiq_validate_params(params);
+ ret = vchiq_validate_params(state, params);
if (ret)
return NULL;
@@ -2486,9 +2462,9 @@ vchiq_add_service_internal(struct vchiq_state *state,
/* Bring this service online */
set_service_state(service, srvstate);
- vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
- (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
- VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE_MSG, "%s Service %p4cc SrcPort:%d",
+ (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
+ &params->fourcc, service->localport);
/* Don't unlock the service - leave it with a ref_count of 1. */
@@ -2525,7 +2501,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
(service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(service->state->dev, VCHIQ_CORE,
"%d: osi - srvstate = %s (ref %u)",
service->state->id,
srvstate_names[service->srvstate],
@@ -2584,12 +2560,13 @@ release_service_messages(struct vchiq_service *service)
int port = VCHIQ_MSG_DSTPORT(msgid);
if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
- vchiq_log_info(vchiq_core_log_level, " fsi - hdr %pK", header);
+ vchiq_log_debug(state->dev, VCHIQ_CORE,
+ " fsi - hdr %pK", header);
release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(vchiq_core_log_level,
+ vchiq_log_error(state->dev, VCHIQ_CORE,
"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
@@ -2645,8 +2622,8 @@ close_service_complete(struct vchiq_service *service, int failstate)
case VCHIQ_SRVSTATE_LISTENING:
break;
default:
- vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
- service->handle, srvstate_names[service->srvstate]);
+ vchiq_log_error(service->state->dev, VCHIQ_CORE, "%s(%x) called in state %s",
+ __func__, service->handle, srvstate_names[service->srvstate]);
WARN(1, "%s in unexpected state\n", __func__);
return -EINVAL;
}
@@ -2692,8 +2669,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
int close_id = MAKE_CLOSE(service->localport,
VCHIQ_MSG_DSTPORT(service->remoteport));
- vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
- service->localport, close_recvd, srvstate_names[service->srvstate]);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: csi:%d,%d (%s)", service->state->id,
+ service->localport, close_recvd, srvstate_names[service->srvstate]);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_CLOSED:
@@ -2701,7 +2678,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
if (close_recvd) {
- vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%s(1) called in state %s",
__func__, srvstate_names[service->srvstate]);
} else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
@@ -2789,7 +2766,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
break;
default:
- vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%s(%d) called in state %s", __func__,
close_recvd, srvstate_names[service->srvstate]);
break;
}
@@ -2803,8 +2780,8 @@ vchiq_terminate_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
- service->localport, service->remoteport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: tsi - (%d<->%d)", state->id,
+ service->localport, service->remoteport);
mark_service_closing(service);
@@ -2818,7 +2795,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: fsi - (%d)",
+ state->id, service->localport);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPENING:
@@ -2828,7 +2806,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
case VCHIQ_SRVSTATE_CLOSEWAIT:
break;
default:
- vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
+ vchiq_log_error(state->dev, VCHIQ_CORE, "%d: fsi - (%d) in state %s", state->id,
service->localport, srvstate_names[service->srvstate]);
return;
}
@@ -2898,8 +2876,8 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
if (!service)
return -EINVAL;
- vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
- service->state->id, service->localport);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: close_service:%d",
+ service->state->id, service->localport);
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
@@ -2929,7 +2907,7 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
- vchiq_log_warning(vchiq_core_log_level,
+ vchiq_log_warning(service->state->dev, VCHIQ_CORE,
"%d: close_service:%d - waiting in state %s",
service->state->id, service->localport,
srvstate_names[service->srvstate]);
@@ -2956,8 +2934,8 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
if (!service)
return -EINVAL;
- vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
- service->state->id, service->localport);
+ vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: remove_service:%d",
+ service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
vchiq_service_put(service);
@@ -2990,7 +2968,7 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
- vchiq_log_warning(vchiq_core_log_level,
+ vchiq_log_warning(service->state->dev, VCHIQ_CORE,
"%d: remove_service:%d - waiting in state %s",
service->state->id, service->localport,
srvstate_names[service->srvstate]);
@@ -3100,9 +3078,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
*/
wmb();
- vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
- state->id, service->localport, service->remoteport,
- dir_char, size, &bulk->data, userdata);
+ vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: bt (%d->%d) %cx %x@%pad %pK",
+ state->id, service->localport, service->remoteport,
+ dir_char, size, &bulk->data, userdata);
/*
* The slot mutex must be held when the service is being closed, so
@@ -3137,7 +3115,7 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
mutex_unlock(&state->slot_mutex);
mutex_unlock(&service->bulk_mutex);
- vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
+ vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: bt:%d %cx li=%x ri=%x p=%x",
state->id, service->localport, dir_char, queue->local_insert,
queue->remote_insert, queue->process);
@@ -3565,8 +3543,8 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
}
len += scnprintf(buf + len, sizeof(buf) - len,
- " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
- VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
+ " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)",
+ &fourcc, remoteport,
quota->message_use_count, quota->message_quota,
quota->slot_use_count, quota->slot_quota);
@@ -3628,26 +3606,6 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
return err;
}
-void
-vchiq_loud_error_header(void)
-{
- vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
- vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
- vchiq_log_error(vchiq_core_log_level, "=====");
-}
-
-void
-vchiq_loud_error_footer(void)
-{
- vchiq_log_error(vchiq_core_log_level, "=====");
- vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
- vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
-}
-
int vchiq_send_remote_use(struct vchiq_state *state)
{
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
@@ -3665,7 +3623,8 @@ int vchiq_send_remote_use_active(struct vchiq_state *state)
NULL, NULL, 0, 0);
}
-void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
+void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
+ const void *void_mem, size_t num_bytes)
{
const u8 *mem = void_mem;
size_t offset;
@@ -3694,9 +3653,9 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_
*s++ = '\0';
if (label && (*label != '\0'))
- vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
+ vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
else
- vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
+ vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
addr += 16;
mem += 16;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index ec1a3caefaea..161358db457c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -6,6 +6,7 @@
#include <linux/mutex.h>
#include <linux/completion.h>
+#include <linux/dev_printk.h>
#include <linux/kthread.h>
#include <linux/kref.h>
#include <linux/rcupdate.h>
@@ -30,50 +31,49 @@
#define VCHIQ_SLOT_SIZE 4096
#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
-/* Run time control of log level, based on KERN_XXX level. */
-#define VCHIQ_LOG_DEFAULT 4
-#define VCHIQ_LOG_ERROR 3
-#define VCHIQ_LOG_WARNING 4
-#define VCHIQ_LOG_INFO 6
-#define VCHIQ_LOG_TRACE 7
+enum vchiq_log_category {
+ VCHIQ_ARM,
+ VCHIQ_CORE,
+ VCHIQ_CORE_MSG,
+ VCHIQ_SYNC,
+ VCHIQ_SUSPEND,
+};
-#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
+static inline const char *log_category_str(enum vchiq_log_category c)
+{
+ static const char * const strings[] = {
+ "vchiq_arm",
+ "vchiq_core",
+ "vchiq_core_msg",
+ "vchiq_sync",
+ "vchiq_suspend",
+ };
+
+ return strings[c];
+};
#ifndef vchiq_log_error
-#define vchiq_log_error(cat, fmt, ...) \
- do { if (cat >= VCHIQ_LOG_ERROR) \
- printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#define vchiq_log_error(dev, cat, fmt, ...) \
+ do { dev_dbg(dev, "%s error: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
#endif
#ifndef vchiq_log_warning
-#define vchiq_log_warning(cat, fmt, ...) \
- do { if (cat >= VCHIQ_LOG_WARNING) \
- printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#define vchiq_log_warning(dev, cat, fmt, ...) \
+ do { dev_dbg(dev, "%s warning: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
#endif
-#ifndef vchiq_log_info
-#define vchiq_log_info(cat, fmt, ...) \
- do { if (cat >= VCHIQ_LOG_INFO) \
- printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#ifndef vchiq_log_debug
+#define vchiq_log_debug(dev, cat, fmt, ...) \
+ do { dev_dbg(dev, "%s debug: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
#endif
#ifndef vchiq_log_trace
-#define vchiq_log_trace(cat, fmt, ...) \
- do { if (cat >= VCHIQ_LOG_TRACE) \
- printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#define vchiq_log_trace(dev, cat, fmt, ...) \
+ do { dev_dbg(dev, "%s trace: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
#endif
-#define vchiq_loud_error(...) \
- vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
-
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
VCHIQ_SLOT_SIZE)
-#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
- ((fourcc) >> 24) & 0xff, \
- ((fourcc) >> 16) & 0xff, \
- ((fourcc) >> 8) & 0xff, \
- (fourcc) & 0xff
-
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
#define BITSET_BIT(b) (1 << (b & 31))
@@ -463,15 +463,11 @@ struct vchiq_config {
extern spinlock_t bulk_waiter_spinlock;
-extern int vchiq_core_log_level;
-extern int vchiq_core_msg_log_level;
-extern int vchiq_sync_log_level;
-
extern const char *
get_conn_state_name(enum vchiq_connstate conn_state);
extern struct vchiq_slot_zero *
-vchiq_init_slots(void *mem_base, int mem_size);
+vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
extern int
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
@@ -600,7 +596,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
-void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
+void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
+ const void *void_mem, size_t num_bytes);
int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index dc667afd1f8c..58db78a9c8d4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -13,102 +13,10 @@
#define DEBUGFS_WRITE_BUF_SIZE 256
-#define VCHIQ_LOG_ERROR_STR "error"
-#define VCHIQ_LOG_WARNING_STR "warning"
-#define VCHIQ_LOG_INFO_STR "info"
-#define VCHIQ_LOG_TRACE_STR "trace"
-
/* Global 'vchiq' debugfs and clients entry used by all instances */
static struct dentry *vchiq_dbg_dir;
static struct dentry *vchiq_dbg_clients;
-/* Log category debugfs entries */
-struct vchiq_debugfs_log_entry {
- const char *name;
- void *plevel;
-};
-
-static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
- { "core", &vchiq_core_log_level },
- { "msg", &vchiq_core_msg_log_level },
- { "sync", &vchiq_sync_log_level },
- { "susp", &vchiq_susp_log_level },
- { "arm", &vchiq_arm_log_level },
-};
-
-static int debugfs_log_show(struct seq_file *f, void *offset)
-{
- int *levp = f->private;
- char *log_value = NULL;
-
- switch (*levp) {
- case VCHIQ_LOG_ERROR:
- log_value = VCHIQ_LOG_ERROR_STR;
- break;
- case VCHIQ_LOG_WARNING:
- log_value = VCHIQ_LOG_WARNING_STR;
- break;
- case VCHIQ_LOG_INFO:
- log_value = VCHIQ_LOG_INFO_STR;
- break;
- case VCHIQ_LOG_TRACE:
- log_value = VCHIQ_LOG_TRACE_STR;
- break;
- default:
- break;
- }
-
- seq_printf(f, "%s\n", log_value ? log_value : "(null)");
-
- return 0;
-}
-
-static int debugfs_log_open(struct inode *inode, struct file *file)
-{
- return single_open(file, debugfs_log_show, inode->i_private);
-}
-
-static ssize_t debugfs_log_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct seq_file *f = (struct seq_file *)file->private_data;
- int *levp = f->private;
- char kbuf[DEBUGFS_WRITE_BUF_SIZE + 1];
-
- memset(kbuf, 0, DEBUGFS_WRITE_BUF_SIZE + 1);
- if (count >= DEBUGFS_WRITE_BUF_SIZE)
- count = DEBUGFS_WRITE_BUF_SIZE;
-
- if (copy_from_user(kbuf, buffer, count))
- return -EFAULT;
- kbuf[count - 1] = 0;
-
- if (strncmp("error", kbuf, strlen("error")) == 0)
- *levp = VCHIQ_LOG_ERROR;
- else if (strncmp("warning", kbuf, strlen("warning")) == 0)
- *levp = VCHIQ_LOG_WARNING;
- else if (strncmp("info", kbuf, strlen("info")) == 0)
- *levp = VCHIQ_LOG_INFO;
- else if (strncmp("trace", kbuf, strlen("trace")) == 0)
- *levp = VCHIQ_LOG_TRACE;
- else
- *levp = VCHIQ_LOG_DEFAULT;
-
- *ppos += count;
-
- return count;
-}
-
-static const struct file_operations debugfs_log_fops = {
- .owner = THIS_MODULE,
- .open = debugfs_log_open,
- .write = debugfs_log_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
struct vchiq_instance *instance = f->private;
@@ -205,19 +113,8 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
void vchiq_debugfs_init(void)
{
- struct dentry *dir;
- int i;
-
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
-
- /* create an entry under <debugfs>/vchiq/log for each log category */
- dir = debugfs_create_dir("log", vchiq_dbg_dir);
-
- for (i = 0; i < ARRAY_SIZE(vchiq_debugfs_log_entries); i++)
- debugfs_create_file(vchiq_debugfs_log_entries[i].name, 0644,
- dir, vchiq_debugfs_log_entries[i].plevel,
- &debugfs_log_fops);
}
/* remove all the debugfs entries */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 841e1a535642..0bc93f48c14c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -47,9 +47,9 @@ user_service_free(void *userdata)
static void close_delivered(struct user_service *user_service)
{
- vchiq_log_info(vchiq_arm_log_level,
- "%s(handle=%x)",
- __func__, user_service->service->handle);
+ vchiq_log_debug(user_service->service->state->dev, VCHIQ_ARM,
+ "%s(handle=%x)",
+ __func__, user_service->service->handle);
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
@@ -235,8 +235,8 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
if (wait_for_completion_interruptible(&user_service->insert_event)) {
- vchiq_log_info(vchiq_arm_log_level,
- "DEQUEUE_MESSAGE interrupted");
+ vchiq_log_debug(service->state->dev, VCHIQ_ARM,
+ "DEQUEUE_MESSAGE interrupted");
ret = -EINTR;
break;
}
@@ -271,7 +271,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
ret = -EFAULT;
}
} else {
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(service->state->dev, VCHIQ_ARM,
"header %pK: bufsize %x < size %x",
header, args->bufsize, header->size);
WARN(1, "invalid size\n");
@@ -318,13 +318,13 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
if (!waiter) {
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(service->state->dev, VCHIQ_ARM,
"no bulk_waiter found for pid %d", current->pid);
ret = -ESRCH;
goto out;
}
- vchiq_log_info(vchiq_arm_log_level,
- "found bulk_waiter %pK for pid %d", waiter, current->pid);
+ vchiq_log_debug(service->state->dev, VCHIQ_ARM,
+ "found bulk_waiter %pK for pid %d", waiter, current->pid);
userdata = &waiter->bulk_waiter;
} else {
userdata = args->userdata;
@@ -355,8 +355,8 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %pK for pid %d", waiter, current->pid);
+ vchiq_log_debug(service->state->dev, VCHIQ_ARM,
+ "saved bulk_waiter %pK for pid %d", waiter, current->pid);
ret = put_user(mode_waiting, mode);
}
@@ -455,8 +455,8 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
mutex_lock(&instance->completion_mutex);
if (rc) {
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- vchiq_log_info(vchiq_arm_log_level,
- "AWAIT_COMPLETION interrupted");
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
+ "AWAIT_COMPLETION interrupted");
ret = -EINTR;
goto out;
}
@@ -501,7 +501,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
msglen = header->size + sizeof(struct vchiq_header);
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(service->state->dev, VCHIQ_ARM,
"header %pK: msgbufsize %x < msglen %x",
header, args->msgbufsize, msglen);
WARN(1, "invalid message size\n");
@@ -582,7 +582,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret = 0;
int i, rc;
- vchiq_log_trace(vchiq_arm_log_level,
+ vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
"%s - instance %pK, cmd %s, arg %lx", __func__, instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -618,7 +618,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
rc = mutex_lock_killable(&instance->state->mutex);
if (rc) {
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(instance->state->dev, VCHIQ_ARM,
"vchiq: connect: could not lock mutex for state %d: %d",
instance->state->id, rc);
ret = -EINTR;
@@ -630,7 +630,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (!status)
instance->connected = 1;
else
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(instance->state->dev, VCHIQ_ARM,
"vchiq: could not connect: %d", status);
break;
@@ -700,14 +700,13 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
if (ret) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
+ vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
+ "%s: cmd %s returned error %ld for service %p4cc:%03d",
__func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
"VCHIQ_IOC_USE_SERVICE" :
"VCHIQ_IOC_RELEASE_SERVICE",
- ret,
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->client_id);
+ ret, &service->base.fourcc,
+ service->client_id);
}
} else {
ret = -EINVAL;
@@ -868,16 +867,17 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EINTR;
}
- if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
- vchiq_log_info(vchiq_arm_log_level,
- " ioctl instance %pK, cmd %s -> status %d, %ld",
- instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
- ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
- else
- vchiq_log_trace(vchiq_arm_log_level,
+ if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
+ vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
" ioctl instance %pK, cmd %s -> status %d, %ld",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ } else {
+ vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
+ " ioctl instance %pK, cmd %s -> status %d, %ld",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ }
return ret;
}
@@ -1170,10 +1170,10 @@ static int vchiq_open(struct inode *inode, struct file *file)
struct vchiq_state *state = vchiq_get_state();
struct vchiq_instance *instance;
- vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
+ vchiq_log_debug(state->dev, VCHIQ_ARM, "vchiq_open");
if (!state) {
- vchiq_log_error(vchiq_arm_log_level,
+ vchiq_log_error(state->dev, VCHIQ_ARM,
"vchiq has no connection to VideoCore");
return -ENOTCONN;
}
@@ -1206,8 +1206,8 @@ static int vchiq_release(struct inode *inode, struct file *file)
int ret = 0;
int i;
- vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
- (unsigned long)instance);
+ vchiq_log_debug(state->dev, VCHIQ_ARM, "%s: instance=%lx", __func__,
+ (unsigned long)instance);
if (!state) {
ret = -EPERM;
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index d0366dd3f2b1..5c416c31ec57 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -62,7 +62,6 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
return list_entry(resource->entry, struct vme_lm_resource,
list)->parent;
default:
- printk(KERN_ERR "Unknown resource type\n");
return NULL;
}
}
@@ -81,27 +80,12 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
dma_addr_t *dma)
{
- struct vme_bridge *bridge;
-
- if (!resource) {
- printk(KERN_ERR "No resource\n");
- return NULL;
- }
-
- bridge = find_bridge(resource);
- if (!bridge) {
- printk(KERN_ERR "Can't find bridge\n");
- return NULL;
- }
-
- if (!bridge->parent) {
- printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
- return NULL;
- }
+ struct vme_bridge *bridge = find_bridge(resource);
if (!bridge->alloc_consistent) {
- printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
- bridge->name);
+ dev_err(bridge->parent,
+ "alloc_consistent not supported by bridge %s\n",
+ bridge->name);
return NULL;
}
@@ -121,27 +105,12 @@ EXPORT_SYMBOL(vme_alloc_consistent);
void vme_free_consistent(struct vme_resource *resource, size_t size,
void *vaddr, dma_addr_t dma)
{
- struct vme_bridge *bridge;
-
- if (!resource) {
- printk(KERN_ERR "No resource\n");
- return;
- }
-
- bridge = find_bridge(resource);
- if (!bridge) {
- printk(KERN_ERR "Can't find bridge\n");
- return;
- }
-
- if (!bridge->parent) {
- printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
- return;
- }
+ struct vme_bridge *bridge = find_bridge(resource);
if (!bridge->free_consistent) {
- printk(KERN_ERR "free_consistent not supported by bridge %s\n",
- bridge->name);
+ dev_err(bridge->parent,
+ "free_consistent not supported by bridge %s\n",
+ bridge->name);
return;
}
@@ -161,6 +130,7 @@ EXPORT_SYMBOL(vme_free_consistent);
*/
size_t vme_get_size(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
@@ -184,14 +154,14 @@ size_t vme_get_size(struct vme_resource *resource)
case VME_DMA:
return 0;
default:
- printk(KERN_ERR "Unknown resource type\n");
+ dev_err(bridge->parent, "Unknown resource type\n");
return 0;
}
}
EXPORT_SYMBOL(vme_get_size);
-int vme_check_window(u32 aspace, unsigned long long vme_base,
- unsigned long long size)
+int vme_check_window(struct vme_bridge *bridge, u32 aspace,
+ unsigned long long vme_base, unsigned long long size)
{
int retval = 0;
@@ -225,7 +195,7 @@ int vme_check_window(u32 aspace, unsigned long long vme_base,
/* User Defined */
break;
default:
- printk(KERN_ERR "Invalid address space\n");
+ dev_err(bridge->parent, "Invalid address space\n");
retval = -EINVAL;
break;
}
@@ -282,24 +252,21 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
u32 cycle)
{
struct vme_bridge *bridge;
- struct list_head *slave_pos = NULL;
struct vme_slave_resource *allocated_image = NULL;
struct vme_slave_resource *slave_image = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
goto err_bus;
}
/* Loop through slave resources */
- list_for_each(slave_pos, &bridge->slave_resources) {
- slave_image = list_entry(slave_pos,
- struct vme_slave_resource, list);
-
+ list_for_each_entry(slave_image, &bridge->slave_resources, list) {
if (!slave_image) {
- printk(KERN_ERR "Registered NULL Slave resource\n");
+ dev_err(bridge->parent,
+ "Registered NULL Slave resource\n");
continue;
}
@@ -366,24 +333,24 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
int retval;
if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
+ dev_err(bridge->parent, "Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (!bridge->slave_set) {
- printk(KERN_ERR "Function not supported\n");
+ dev_err(bridge->parent, "Function not supported\n");
return -ENOSYS;
}
if (!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle))) {
- printk(KERN_ERR "Invalid attributes\n");
+ dev_err(bridge->parent, "Invalid attributes\n");
return -EINVAL;
}
- retval = vme_check_window(aspace, vme_base, size);
+ retval = vme_check_window(bridge, aspace, vme_base, size);
if (retval)
return retval;
@@ -415,14 +382,14 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
struct vme_slave_resource *image;
if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
+ dev_err(bridge->parent, "Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (!bridge->slave_get) {
- printk(KERN_ERR "vme_slave_get not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -439,24 +406,25 @@ EXPORT_SYMBOL(vme_slave_get);
*/
void vme_slave_free(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *slave_image;
if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
+ dev_err(bridge->parent, "Not a slave resource\n");
return;
}
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
if (!slave_image) {
- printk(KERN_ERR "Can't find slave resource\n");
+ dev_err(bridge->parent, "Can't find slave resource\n");
return;
}
/* Unlock image */
mutex_lock(&slave_image->mtx);
if (slave_image->locked == 0)
- printk(KERN_ERR "Image is already free\n");
+ dev_err(bridge->parent, "Image is already free\n");
slave_image->locked = 0;
mutex_unlock(&slave_image->mtx);
@@ -482,24 +450,21 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
- struct list_head *master_pos = NULL;
struct vme_master_resource *allocated_image = NULL;
struct vme_master_resource *master_image = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
goto err_bus;
}
/* Loop through master resources */
- list_for_each(master_pos, &bridge->master_resources) {
- master_image = list_entry(master_pos,
- struct vme_master_resource, list);
-
+ list_for_each_entry(master_image, &bridge->master_resources, list) {
if (!master_image) {
- printk(KERN_WARNING "Registered NULL master resource\n");
+ dev_warn(bridge->parent,
+ "Registered NULL master resource\n");
continue;
}
@@ -519,7 +484,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
/* Check to see if we found a resource */
if (!allocated_image) {
- printk(KERN_ERR "Can't find a suitable resource\n");
+ dev_err(&vdev->dev, "Can't find a suitable resource\n");
goto err_image;
}
@@ -569,25 +534,25 @@ int vme_master_set(struct vme_resource *resource, int enabled,
int retval;
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (!bridge->master_set) {
- printk(KERN_WARNING "vme_master_set not supported\n");
+ dev_warn(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
if (!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle) &&
((image->width_attr & dwidth) == dwidth))) {
- printk(KERN_WARNING "Invalid attributes\n");
+ dev_warn(bridge->parent, "Invalid attributes\n");
return -EINVAL;
}
- retval = vme_check_window(aspace, vme_base, size);
+ retval = vme_check_window(bridge, aspace, vme_base, size);
if (retval)
return retval;
@@ -619,14 +584,14 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
struct vme_master_resource *image;
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (!bridge->master_get) {
- printk(KERN_WARNING "%s not supported\n", __func__);
+ dev_warn(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -658,12 +623,13 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
size_t length;
if (!bridge->master_read) {
- printk(KERN_WARNING "Reading from resource not supported\n");
+ dev_warn(bridge->parent,
+ "Reading from resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
@@ -672,7 +638,7 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
length = vme_get_size(resource);
if (offset > length) {
- printk(KERN_WARNING "Invalid Offset\n");
+ dev_warn(bridge->parent, "Invalid Offset\n");
return -EFAULT;
}
@@ -706,12 +672,12 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
size_t length;
if (!bridge->master_write) {
- printk(KERN_WARNING "Writing to resource not supported\n");
+ dev_warn(bridge->parent, "Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
@@ -720,7 +686,7 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
length = vme_get_size(resource);
if (offset > length) {
- printk(KERN_WARNING "Invalid Offset\n");
+ dev_warn(bridge->parent, "Invalid Offset\n");
return -EFAULT;
}
@@ -757,12 +723,12 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
struct vme_master_resource *image;
if (!bridge->master_rmw) {
- printk(KERN_WARNING "Writing to resource not supported\n");
+ dev_warn(bridge->parent, "Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
@@ -785,12 +751,13 @@ EXPORT_SYMBOL(vme_master_rmw);
*/
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
phys_addr_t phys_addr;
unsigned long vma_size;
if (resource->type != VME_MASTER) {
- pr_err("Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return -EINVAL;
}
@@ -799,7 +766,7 @@ int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
vma_size = vma->vm_end - vma->vm_start;
if (phys_addr + vma_size > image->bus_resource.end + 1) {
- pr_err("Map size cannot exceed the window size\n");
+ dev_err(bridge->parent, "Map size cannot exceed the window size\n");
return -EFAULT;
}
@@ -817,24 +784,25 @@ EXPORT_SYMBOL(vme_master_mmap);
*/
void vme_master_free(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *master_image;
if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
+ dev_err(bridge->parent, "Not a master resource\n");
return;
}
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
if (!master_image) {
- printk(KERN_ERR "Can't find master resource\n");
+ dev_err(bridge->parent, "Can't find master resource\n");
return;
}
/* Unlock image */
spin_lock(&master_image->lock);
if (master_image->locked == 0)
- printk(KERN_ERR "Image is already free\n");
+ dev_err(bridge->parent, "Image is already free\n");
master_image->locked = 0;
spin_unlock(&master_image->lock);
@@ -857,26 +825,24 @@ EXPORT_SYMBOL(vme_master_free);
struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
struct vme_bridge *bridge;
- struct list_head *dma_pos = NULL;
struct vme_dma_resource *allocated_ctrlr = NULL;
struct vme_dma_resource *dma_ctrlr = NULL;
struct vme_resource *resource = NULL;
/* XXX Not checking resource attributes */
- printk(KERN_ERR "No VME resource Attribute tests done\n");
+ dev_err(&vdev->dev, "No VME resource Attribute tests done\n");
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
goto err_bus;
}
/* Loop through DMA resources */
- list_for_each(dma_pos, &bridge->dma_resources) {
- dma_ctrlr = list_entry(dma_pos,
- struct vme_dma_resource, list);
+ list_for_each_entry(dma_ctrlr, &bridge->dma_resources, list) {
if (!dma_ctrlr) {
- printk(KERN_ERR "Registered NULL DMA resource\n");
+ dev_err(bridge->parent,
+ "Registered NULL DMA resource\n");
continue;
}
@@ -928,10 +894,11 @@ EXPORT_SYMBOL(vme_dma_request);
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
- printk(KERN_ERR "Not a DMA resource\n");
+ dev_err(bridge->parent, "Not a DMA resource\n");
return NULL;
}
@@ -1109,12 +1076,13 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
int retval;
if (!bridge->dma_list_add) {
- printk(KERN_WARNING "Link List DMA generation not supported\n");
+ dev_warn(bridge->parent,
+ "Link List DMA generation not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
- printk(KERN_ERR "Link List already submitted\n");
+ dev_err(bridge->parent, "Link List already submitted\n");
return -EINVAL;
}
@@ -1142,7 +1110,8 @@ int vme_dma_list_exec(struct vme_dma_list *list)
int retval;
if (!bridge->dma_list_exec) {
- printk(KERN_ERR "Link List DMA execution not supported\n");
+ dev_err(bridge->parent,
+ "Link List DMA execution not supported\n");
return -EINVAL;
}
@@ -1171,12 +1140,13 @@ int vme_dma_list_free(struct vme_dma_list *list)
int retval;
if (!bridge->dma_list_empty) {
- printk(KERN_WARNING "Emptying of Link Lists not supported\n");
+ dev_warn(bridge->parent,
+ "Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
- printk(KERN_ERR "Link List in use\n");
+ dev_err(bridge->parent, "Link List in use\n");
return -EBUSY;
}
@@ -1186,7 +1156,7 @@ int vme_dma_list_free(struct vme_dma_list *list)
*/
retval = bridge->dma_list_empty(list);
if (retval) {
- printk(KERN_ERR "Unable to empty link-list entries\n");
+ dev_err(bridge->parent, "Unable to empty link-list entries\n");
mutex_unlock(&list->mtx);
return retval;
}
@@ -1208,22 +1178,24 @@ EXPORT_SYMBOL(vme_dma_list_free);
*/
int vme_dma_free(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_dma_resource *ctrlr;
if (resource->type != VME_DMA) {
- printk(KERN_ERR "Not a DMA resource\n");
+ dev_err(bridge->parent, "Not a DMA resource\n");
return -EINVAL;
}
ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
if (!mutex_trylock(&ctrlr->mtx)) {
- printk(KERN_ERR "Resource busy, can't free\n");
+ dev_err(bridge->parent, "Resource busy, can't free\n");
return -EBUSY;
}
if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
- printk(KERN_WARNING "Resource still processing transfers\n");
+ dev_warn(bridge->parent,
+ "Resource still processing transfers\n");
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
}
@@ -1241,14 +1213,11 @@ EXPORT_SYMBOL(vme_dma_free);
void vme_bus_error_handler(struct vme_bridge *bridge,
unsigned long long address, int am)
{
- struct list_head *handler_pos = NULL;
struct vme_error_handler *handler;
int handler_triggered = 0;
u32 aspace = vme_get_aspace(am);
- list_for_each(handler_pos, &bridge->vme_error_handlers) {
- handler = list_entry(handler_pos, struct vme_error_handler,
- list);
+ list_for_each_entry(handler, &bridge->vme_error_handlers, list) {
if ((aspace == handler->aspace) &&
(address >= handler->start) &&
(address < handler->end)) {
@@ -1304,8 +1273,9 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
if (call)
call(level, statid, priv_data);
else
- printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
- level, statid);
+ dev_warn(bridge->parent,
+ "Spurious VME interrupt, level:%x, vector:%x\n", level,
+ statid);
}
EXPORT_SYMBOL(vme_irq_handler);
@@ -1333,17 +1303,18 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
return -EINVAL;
}
if ((level < 1) || (level > 7)) {
- printk(KERN_ERR "Invalid interrupt level\n");
+ dev_err(bridge->parent, "Invalid interrupt level\n");
return -EINVAL;
}
if (!bridge->irq_set) {
- printk(KERN_ERR "Configuring interrupts not supported\n");
+ dev_err(bridge->parent,
+ "Configuring interrupts not supported\n");
return -EINVAL;
}
@@ -1351,7 +1322,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
if (bridge->irq[level - 1].callback[statid].func) {
mutex_unlock(&bridge->irq_mtx);
- printk(KERN_WARNING "VME Interrupt already taken\n");
+ dev_warn(bridge->parent, "VME Interrupt already taken\n");
return -EBUSY;
}
@@ -1382,17 +1353,18 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
return;
}
if ((level < 1) || (level > 7)) {
- printk(KERN_ERR "Invalid interrupt level\n");
+ dev_err(bridge->parent, "Invalid interrupt level\n");
return;
}
if (!bridge->irq_set) {
- printk(KERN_ERR "Configuring interrupts not supported\n");
+ dev_err(bridge->parent,
+ "Configuring interrupts not supported\n");
return;
}
@@ -1429,17 +1401,18 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
return -EINVAL;
}
if ((level < 1) || (level > 7)) {
- printk(KERN_WARNING "Invalid interrupt level\n");
+ dev_warn(bridge->parent, "Invalid interrupt level\n");
return -EINVAL;
}
if (!bridge->irq_generate) {
- printk(KERN_WARNING "Interrupt generation not supported\n");
+ dev_warn(bridge->parent,
+ "Interrupt generation not supported\n");
return -EINVAL;
}
@@ -1460,23 +1433,21 @@ EXPORT_SYMBOL(vme_irq_generate);
struct vme_resource *vme_lm_request(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
- struct list_head *lm_pos = NULL;
struct vme_lm_resource *allocated_lm = NULL;
struct vme_lm_resource *lm = NULL;
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
goto err_bus;
}
/* Loop through LM resources */
- list_for_each(lm_pos, &bridge->lm_resources) {
- lm = list_entry(lm_pos,
- struct vme_lm_resource, list);
+ list_for_each_entry(lm, &bridge->lm_resources, list) {
if (!lm) {
- printk(KERN_ERR "Registered NULL Location Monitor resource\n");
+ dev_err(bridge->parent,
+ "Registered NULL Location Monitor resource\n");
continue;
}
@@ -1528,10 +1499,11 @@ EXPORT_SYMBOL(vme_lm_request);
*/
int vme_lm_count(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return -EINVAL;
}
@@ -1562,14 +1534,14 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_set) {
- printk(KERN_ERR "vme_lm_set not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -1598,14 +1570,14 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_get) {
- printk(KERN_ERR "vme_lm_get not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -1620,7 +1592,7 @@ EXPORT_SYMBOL(vme_lm_get);
* @callback: Pointer to callback function called when triggered.
* @data: Generic pointer that will be passed to the callback function.
*
- * Attach a callback to the specificed offset into the location monitors
+ * Attach a callback to the specified offset into the location monitors
* monitored addresses. A generic pointer is provided to allow data to be
* passed to the callback when called.
*
@@ -1635,14 +1607,14 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_attach) {
- printk(KERN_ERR "vme_lm_attach not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -1655,7 +1627,7 @@ EXPORT_SYMBOL(vme_lm_attach);
* @resource: Pointer to VME location monitor resource.
* @monitor: Offset to which callback should be removed.
*
- * Remove the callback associated with the specificed offset into the
+ * Remove the callback associated with the specified offset into the
* location monitors monitored addresses.
*
* Return: Zero on success, -EINVAL when provided with an invalid location
@@ -1668,14 +1640,14 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (!bridge->lm_detach) {
- printk(KERN_ERR "vme_lm_detach not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -1697,10 +1669,11 @@ EXPORT_SYMBOL(vme_lm_detach);
*/
void vme_lm_free(struct vme_resource *resource)
{
+ struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
+ dev_err(bridge->parent, "Not a Location Monitor resource\n");
return;
}
@@ -1737,12 +1710,12 @@ int vme_slot_num(struct vme_dev *vdev)
bridge = vdev->bridge;
if (!bridge) {
- printk(KERN_ERR "Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
return -EINVAL;
}
if (!bridge->slot_get) {
- printk(KERN_WARNING "vme_slot_num not supported\n");
+ dev_warn(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
@@ -1765,7 +1738,7 @@ int vme_bus_num(struct vme_dev *vdev)
bridge = vdev->bridge;
if (!bridge) {
- pr_err("Can't find VME bus\n");
+ dev_err(&vdev->dev, "Can't find VME bus\n");
return -EINVAL;
}
@@ -1866,8 +1839,9 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
if (vdev->dev.platform_data) {
list_add_tail(&vdev->drv_list, &drv->devices);
list_add_tail(&vdev->bridge_list, &bridge->devices);
- } else
+ } else {
device_unregister(&vdev->dev);
+ }
}
return 0;
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index faa9816046a9..06504dccd5ff 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -87,7 +87,7 @@ extern struct bus_type vme_bus_type;
#define VME_NUM_STATUSID 256
/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
-#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
+#define VME_MAX_BRIDGES (sizeof(unsigned int) * 8)
#define VME_MAX_SLOTS 32
#define VME_SLOT_CURRENT -1
@@ -133,8 +133,8 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
dma_addr_t);
size_t vme_get_size(struct vme_resource *);
-int vme_check_window(u32 aspace, unsigned long long vme_base,
- unsigned long long size);
+int vme_check_window(struct vme_bridge *bridge, u32 aspace,
+ unsigned long long vme_base, unsigned long long size);
struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
index 7c53a8a7b79b..7f84d1c86f29 100644
--- a/drivers/staging/vme_user/vme_fake.c
+++ b/drivers/staging/vme_user/vme_fake.c
@@ -95,7 +95,7 @@ static void fake_VIRQ_tasklet(unsigned long data)
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
- fake_bridge = (struct vme_bridge *) data;
+ fake_bridge = (struct vme_bridge *)data;
bridge = fake_bridge->driver_priv;
vme_irq_handler(fake_bridge, bridge->int_level, bridge->int_statid);
@@ -105,7 +105,7 @@ static void fake_VIRQ_tasklet(unsigned long data)
* Configure VME interrupt
*/
static void fake_irq_set(struct vme_bridge *fake_bridge, int level,
- int state, int sync)
+ int state, int sync)
{
/* Nothing to do */
}
@@ -125,7 +125,7 @@ static dma_addr_t fake_ptr_to_pci(void *addr)
* interrupt to be acked.
*/
static int fake_irq_generate(struct vme_bridge *fake_bridge, int level,
- int statid)
+ int statid)
{
struct fake_driver *bridge;
@@ -152,8 +152,8 @@ static int fake_irq_generate(struct vme_bridge *fake_bridge, int level,
* Initialize a slave window with the requested attributes.
*/
static int fake_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, u32 aspace, u32 cycle)
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
{
unsigned int i, granularity = 0;
unsigned long long vme_bound;
@@ -221,8 +221,8 @@ static int fake_slave_set(struct vme_slave_resource *image, int enabled,
* Get slave window configuration.
*/
static int fake_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
unsigned int i;
struct fake_driver *bridge;
@@ -249,8 +249,8 @@ static int fake_slave_get(struct vme_slave_resource *image, int *enabled,
* Set the attributes of an outbound window.
*/
static int fake_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- u32 aspace, u32 cycle, u32 dwidth)
+ unsigned long long vme_base, unsigned long long size,
+ u32 aspace, u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -335,8 +335,8 @@ err_window:
* Set the attributes of an outbound window.
*/
static int __fake_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- u32 *aspace, u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i;
struct fake_driver *bridge;
@@ -356,15 +356,15 @@ static int __fake_master_get(struct vme_master_resource *image, int *enabled,
}
static int fake_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- u32 *aspace, u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __fake_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
+ cycle, dwidth);
spin_unlock(&image->lock);
@@ -403,8 +403,7 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
if (bridge->lm_callback[i])
- bridge->lm_callback[i](
- bridge->lm_data[i]);
+ bridge->lm_callback[i](bridge->lm_data[i]);
}
}
}
@@ -511,7 +510,7 @@ static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
}
static ssize_t fake_master_read(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval;
u32 aspace, cycle, dwidth;
@@ -585,7 +584,6 @@ static ssize_t fake_master_read(struct vme_master_resource *image, void *buf,
aspace, cycle);
done += 1;
}
-
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
@@ -700,7 +698,7 @@ static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
}
static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval = 0;
u32 aspace, cycle, dwidth;
@@ -739,7 +737,7 @@ static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
if ((addr + done) & 0x2) {
if ((count - done) < 2) {
fake_vmewrite8(bridge, (u8 *)(buf + done),
- addr + done, aspace, cycle);
+ addr + done, aspace, cycle);
done += 1;
goto out;
} else {
@@ -768,10 +766,9 @@ static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
count32 = (count - done);
while (done < count32) {
fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done,
- aspace, cycle);
+ aspace, cycle);
done += 1;
}
-
}
if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
@@ -784,7 +781,7 @@ static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
if ((count - done) & 0x1) {
fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, aspace,
- cycle);
+ cycle);
done += 1;
}
@@ -802,8 +799,8 @@ out:
* Requires a previously configured master window, returns final value.
*/
static unsigned int fake_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
+ unsigned int mask, unsigned int compare,
+ unsigned int swap, loff_t offset)
{
u32 tmp, base;
u32 aspace, cycle;
@@ -848,7 +845,7 @@ static unsigned int fake_master_rmw(struct vme_master_resource *image,
* callback is attached and disabled when the last callback is removed.
*/
static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- u32 aspace, u32 cycle)
+ u32 aspace, u32 cycle)
{
int i;
struct vme_bridge *fake_bridge;
@@ -894,7 +891,8 @@ static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int fake_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+ unsigned long long *lm_base,
+ u32 *aspace, u32 *cycle)
{
struct fake_driver *bridge;
@@ -917,7 +915,7 @@ static int fake_lm_get(struct vme_lm_resource *lm,
* Callback will be passed the monitor triggered.
*/
static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(void *), void *data)
+ void (*callback)(void *), void *data)
{
struct vme_bridge *fake_bridge;
struct fake_driver *bridge;
@@ -995,7 +993,7 @@ static int fake_slot_get(struct vme_bridge *fake_bridge)
}
static void *fake_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
+ dma_addr_t *dma)
{
void *alloc = kmalloc(size, GFP_KERNEL);
@@ -1006,12 +1004,9 @@ static void *fake_alloc_consistent(struct device *parent, size_t size,
}
static void fake_free_consistent(struct device *parent, size_t size,
- void *vaddr, dma_addr_t dma)
+ void *vaddr, dma_addr_t dma)
{
kfree(vaddr);
-/*
- dma_free_coherent(parent, size, vaddr, dma);
-*/
}
/*
@@ -1094,9 +1089,9 @@ static int __init fake_init(void)
mutex_init(&fake_device->vme_int);
mutex_init(&fake_bridge->irq_mtx);
tasklet_init(&fake_device->int_tasklet, fake_VIRQ_tasklet,
- (unsigned long) fake_bridge);
+ (unsigned long)fake_bridge);
- strcpy(fake_bridge->name, driver_name);
+ strscpy(fake_bridge->name, driver_name, sizeof(fake_bridge->name));
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
@@ -1118,10 +1113,10 @@ static int __init fake_init(void)
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
- &fake_bridge->master_resources);
+ &fake_bridge->master_resources);
}
/* Add slave windows to list */
@@ -1144,7 +1139,7 @@ static int __init fake_init(void)
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
- &fake_bridge->slave_resources);
+ &fake_bridge->slave_resources);
}
/* Add location monitor to list */
@@ -1179,7 +1174,7 @@ static int __init fake_init(void)
fake_bridge->free_consistent = fake_free_consistent;
pr_info("Board is%s the VME system controller\n",
- (geoid == 1) ? "" : " not");
+ (geoid == 1) ? "" : " not");
pr_info("VME geographical address is set to %d\n", geoid);
@@ -1220,7 +1215,7 @@ err_master:
/* resources are stored in link list */
list_for_each_safe(pos, n, &fake_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
- list);
+ list);
list_del(pos);
kfree(master_image);
}
@@ -1275,7 +1270,7 @@ static void __exit fake_exit(void)
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &fake_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
- list);
+ list);
list_del(pos);
kfree(master_image);
}
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 2f5eafd50934..2ec9c2904404 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -2118,8 +2118,9 @@ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
if (!geoid) {
slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
- } else
+ } else {
slot = geoid;
+ }
return (int)slot;
}
@@ -2196,9 +2197,9 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
- if (crat & TSI148_LCSR_CRAT_EN)
+ if (crat & TSI148_LCSR_CRAT_EN) {
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
- else {
+ } else {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
@@ -2309,7 +2310,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&tsi148_device->vme_rmw);
tsi148_bridge->parent = &pdev->dev;
- strcpy(tsi148_bridge->name, driver_name);
+ strscpy(tsi148_bridge->name, driver_name, VMENAMSIZ);
/* Setup IRQ */
retval = tsi148_irq_init(tsi148_bridge);
diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h
index 63f726e1811a..4dd224d0b86e 100644
--- a/drivers/staging/vme_user/vme_tsi148.h
+++ b/drivers/staging/vme_user/vme_tsi148.h
@@ -96,13 +96,9 @@ struct tsi148_dma_entry {
#define TSI148_PCFS_MISC0 0xC
#define TSI148_PCFS_MBARL 0x10
#define TSI148_PCFS_MBARU 0x14
-
#define TSI148_PCFS_SUBID 0x28
-
#define TSI148_PCFS_CAPP 0x34
-
#define TSI148_PCFS_MISC1 0x3C
-
#define TSI148_PCFS_XCAPP 0x40
#define TSI148_PCFS_XSTAT 0x44
@@ -210,7 +206,7 @@ static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1,
/*
* VMEbus interrupt ack
- * offset 200
+ * offset 0x200
*/
#define TSI148_LCSR_VIACK1 0x204
#define TSI148_LCSR_VIACK2 0x208
@@ -227,7 +223,7 @@ static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
/*
* RMW
- * offset 220
+ * offset 0x220
*/
#define TSI148_LCSR_RMWAU 0x220
#define TSI148_LCSR_RMWAL 0x224
@@ -237,7 +233,7 @@ static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
/*
* VMEbus control
- * offset 234
+ * offset 0x234
*/
#define TSI148_LCSR_VMCTRL 0x234
#define TSI148_LCSR_VCTRL 0x238
@@ -245,38 +241,38 @@ static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
/*
* PCI status
- * offset 240
+ * offset 0x240
*/
#define TSI148_LCSR_PSTAT 0x240
/*
* VME filter.
- * offset 250
+ * offset 0x250
*/
#define TSI148_LCSR_VMEFL 0x250
- /*
- * VME exception.
- * offset 260
+/*
+ * VME exception.
+ * offset 0x260
*/
#define TSI148_LCSR_VEAU 0x260
#define TSI148_LCSR_VEAL 0x264
#define TSI148_LCSR_VEAT 0x268
- /*
- * PCI error
- * offset 270
- */
+/*
+ * PCI error
+ * offset 0x270
+ */
#define TSI148_LCSR_EDPAU 0x270
#define TSI148_LCSR_EDPAL 0x274
#define TSI148_LCSR_EDPXA 0x278
#define TSI148_LCSR_EDPXS 0x27C
#define TSI148_LCSR_EDPAT 0x280
- /*
- * Inbound Translations
- * offset 300
- */
+/*
+ * Inbound Translations
+ * offset 0x300
+ */
#define TSI148_LCSR_IT0_ITSAU 0x300
#define TSI148_LCSR_IT0_ITSAL 0x304
#define TSI148_LCSR_IT0_ITEAU 0x308
@@ -363,53 +359,53 @@ static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
#define TSI148_LCSR_OFFSET_ITOFL 0x14
#define TSI148_LCSR_OFFSET_ITAT 0x18
- /*
- * Inbound Translation GCSR
- * offset 400
- */
+/*
+ * Inbound Translation GCSR
+ * offset 0x400
+ */
#define TSI148_LCSR_GBAU 0x400
#define TSI148_LCSR_GBAL 0x404
#define TSI148_LCSR_GCSRAT 0x408
- /*
- * Inbound Translation CRG
- * offset 40C
- */
+/*
+ * Inbound Translation CRG
+ * offset 0x40C
+ */
#define TSI148_LCSR_CBAU 0x40C
#define TSI148_LCSR_CBAL 0x410
#define TSI148_LCSR_CSRAT 0x414
- /*
- * Inbound Translation CR/CSR
- * CRG
- * offset 418
- */
+/*
+ * Inbound Translation CR/CSR
+ * CRG
+ * offset 0x418
+ */
#define TSI148_LCSR_CROU 0x418
#define TSI148_LCSR_CROL 0x41C
#define TSI148_LCSR_CRAT 0x420
- /*
- * Inbound Translation Location Monitor
- * offset 424
- */
+/*
+ * Inbound Translation Location Monitor
+ * offset 0x424
+ */
#define TSI148_LCSR_LMBAU 0x424
#define TSI148_LCSR_LMBAL 0x428
#define TSI148_LCSR_LMAT 0x42C
- /*
- * VMEbus Interrupt Control.
- * offset 430
- */
+/*
+ * VMEbus Interrupt Control.
+ * offset 0x430
+ */
#define TSI148_LCSR_BCU 0x430
#define TSI148_LCSR_BCL 0x434
#define TSI148_LCSR_BPGTR 0x438
#define TSI148_LCSR_BPCTR 0x43C
#define TSI148_LCSR_VICR 0x440
- /*
- * Local Bus Interrupt Control.
- * offset 448
- */
+/*
+ * Local Bus Interrupt Control.
+ * offset 0x448
+ */
#define TSI148_LCSR_INTEN 0x448
#define TSI148_LCSR_INTEO 0x44C
#define TSI148_LCSR_INTS 0x450
@@ -417,10 +413,10 @@ static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
#define TSI148_LCSR_INTM1 0x458
#define TSI148_LCSR_INTM2 0x45C
- /*
- * DMA Controllers
- * offset 500
- */
+/*
+ * DMA Controllers
+ * offset 0x500
+ */
#define TSI148_LCSR_DCTL0 0x500
#define TSI148_LCSR_DSTA0 0x504
#define TSI148_LCSR_DCSAU0 0x508
@@ -484,27 +480,27 @@ static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0,
#define TSI148_LCSR_OFFSET_DCNT 0x40
#define TSI148_LCSR_OFFSET_DDBS 0x44
- /*
- * GCSR Register Group
- */
+/*
+ * GCSR Register Group
+ */
- /*
- * GCSR CRG
- * offset 00 600 - DEVI/VENI
- * offset 04 604 - CTRL/GA/REVID
- * offset 08 608 - Semaphore3/2/1/0
- * offset 0C 60C - Seamphore7/6/5/4
- */
+/*
+ * GCSR CRG
+ * offset 0x00 0x600 - DEVI/VENI
+ * offset 0x04 0x604 - CTRL/GA/REVID
+ * offset 0x08 0x608 - Semaphore3/2/1/0
+ * offset 0x0C 0x60C - Seamphore7/6/5/4
+ */
#define TSI148_GCSR_ID 0x600
#define TSI148_GCSR_CSR 0x604
#define TSI148_GCSR_SEMA0 0x608
#define TSI148_GCSR_SEMA1 0x60C
- /*
- * Mail Box
- * GCSR CRG
- * offset 10 610 - Mailbox0
- */
+/*
+ * Mail Box
+ * GCSR CRG
+ * offset 0x10 0x610 - Mailbox0
+ */
#define TSI148_GCSR_MBOX0 0x610
#define TSI148_GCSR_MBOX1 0x614
#define TSI148_GCSR_MBOX2 0x618
@@ -515,27 +511,27 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
TSI148_GCSR_MBOX2,
TSI148_GCSR_MBOX3 };
- /*
- * CR/CSR
- */
+/*
+ * CR/CSR
+ */
- /*
- * CR/CSR CRG
- * offset 7FFF4 FF4 - CSRBCR
- * offset 7FFF8 FF8 - CSRBSR
- * offset 7FFFC FFC - CBAR
- */
+/*
+ * CR/CSR CRG
+ * offset 0x7FFF4 0xFF4 - CSRBCR
+ * offset 0x7FFF8 0xFF8 - CSRBSR
+ * offset 0x7FFFC 0xFFC - CBAR
+ */
#define TSI148_CSRBCR 0xFF4
#define TSI148_CSRBSR 0xFF8
#define TSI148_CBAR 0xFFC
- /*
- * TSI148 Register Bit Definitions
- */
+/*
+ * TSI148 Register Bit Definitions
+ */
- /*
- * PFCS Register Set
- */
+/*
+ * PFCS Register Set
+ */
#define TSI148_PCFS_CMMD_SERR BIT(8) /* SERR_L out pin ssys err */
#define TSI148_PCFS_CMMD_PERR BIT(6) /* PERR_L out pin parity */
#define TSI148_PCFS_CMMD_MSTR BIT(2) /* PCI bus master */
@@ -547,7 +543,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_PCFS_STAT_RCVMA BIT(13) /* Received Master Abort */
#define TSI148_PCFS_STAT_RCVTA BIT(12) /* Received Target Abort */
#define TSI148_PCFS_STAT_SIGTA BIT(11) /* Signalled Target Abort */
-#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */
+#define TSI148_PCFS_STAT_SELTIM (3 << 9) /* DELSEL Timing */
#define TSI148_PCFS_STAT_DPAR BIT(8) /* Data Parity Err Reported */
#define TSI148_PCFS_STAT_FAST BIT(7) /* Fast back-to-back Cap */
#define TSI148_PCFS_STAT_P66M BIT(5) /* 66 MHz Capable */
@@ -556,49 +552,49 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Revision ID/Class Code Registers (CRG +$008)
*/
-#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */
-#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */
-#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */
-#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */
+#define TSI148_PCFS_CLAS_M (0xFF << 24) /* Class ID */
+#define TSI148_PCFS_SUBCLAS_M (0xFF << 16) /* Sub-Class ID */
+#define TSI148_PCFS_PROGIF_M (0xFF << 8) /* Sub-Class ID */
+#define TSI148_PCFS_REVID_M (0xFF << 0) /* Rev ID */
/*
* Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C)
*/
-#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */
-#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */
-#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */
+#define TSI148_PCFS_HEAD_M (0xFF << 16) /* Master Lat Timer */
+#define TSI148_PCFS_MLAT_M (0xFF << 8) /* Master Lat Timer */
+#define TSI148_PCFS_CLSZ_M (0xFF << 0) /* Cache Line Size */
/*
* Memory Base Address Lower Reg (CRG + $010)
*/
-#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
+#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF << 12) /* Base Addr Lower Mask */
#define TSI148_PCFS_MBARL_PRE BIT(3) /* Prefetch */
-#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
+#define TSI148_PCFS_MBARL_MTYPE_M (3 << 1) /* Memory Type Mask */
#define TSI148_PCFS_MBARL_IOMEM BIT(0) /* I/O Space Indicator */
/*
* Message Signaled Interrupt Capabilities Register (CRG + $040)
*/
#define TSI148_PCFS_MSICAP_64BAC BIT(7) /* 64-bit Address Capable */
-#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */
-#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */
+#define TSI148_PCFS_MSICAP_MME_M (7 << 4) /* Multiple Msg Enable Mask */
+#define TSI148_PCFS_MSICAP_MMC_M (7 << 1) /* Multiple Msg Capable Mask */
#define TSI148_PCFS_MSICAP_MSIEN BIT(0) /* Msg signaled INT Enable */
/*
* Message Address Lower Register (CRG +$044)
*/
-#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */
+#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF << 2) /* Mask */
/*
* Message Data Register (CRG + 4C)
*/
-#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */
+#define TSI148_PCFS_MSIMD_M (0xFFFF << 0) /* Mask */
/*
* PCI-X Capabilities Register (CRG + $050)
*/
-#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */
-#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */
+#define TSI148_PCFS_PCIXCAP_MOST_M (7 << 4) /* Max outstanding Split Tran */
+#define TSI148_PCFS_PCIXCAP_MMRBC_M (3 << 2) /* Max Mem Read byte cnt */
#define TSI148_PCFS_PCIXCAP_ERO BIT(1) /* Enable Relaxed Ordering */
#define TSI148_PCFS_PCIXCAP_DPERE BIT(0) /* Data Parity Recover Enable */
@@ -606,18 +602,17 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
* PCI-X Status Register (CRG +$054)
*/
#define TSI148_PCFS_PCIXSTAT_RSCEM BIT(29) /* Received Split Comp Error */
-#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
-#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
- */
-#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
+#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7 << 26) /* max Cumulative Read Size */
+#define TSI148_PCFS_PCIXSTAT_DMOST_M (7 << 23) /* max outstanding Split Trans */
+#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3 << 21) /* max mem read byte count */
#define TSI148_PCFS_PCIXSTAT_DC BIT(20) /* Device Complexity */
#define TSI148_PCFS_PCIXSTAT_USC BIT(19) /* Unexpected Split comp */
#define TSI148_PCFS_PCIXSTAT_SCD BIT(18) /* Split completion discard */
#define TSI148_PCFS_PCIXSTAT_133C BIT(17) /* 133MHz capable */
#define TSI148_PCFS_PCIXSTAT_64D BIT(16) /* 64 bit device */
-#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */
-#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */
-#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */
+#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF << 8) /* Bus number */
+#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F << 3) /* Device number */
+#define TSI148_PCFS_PCIXSTAT_FN_M (7 << 0) /* Function Number */
/*
* LCSR Registers
@@ -626,22 +621,22 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Outbound Translation Starting Address Lower
*/
-#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_OTSAL_M (0xFFFF << 16) /* Mask */
/*
* Outbound Translation Ending Address Lower
*/
-#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_OTEAL_M (0xFFFF << 16) /* Mask */
/*
* Outbound Translation Offset Lower
*/
-#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_OTOFFL_M (0xFFFF << 16) /* Mask */
/*
* Outbound Translation 2eSST Broadcast Select
*/
-#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */
+#define TSI148_LCSR_OTBS_M (0xFFFFF << 0) /* Mask */
/*
* Outbound Translation Attribute
@@ -649,42 +644,42 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_OTAT_EN BIT(31) /* Window Enable */
#define TSI148_LCSR_OTAT_MRPFD BIT(18) /* Prefetch Disable */
-#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */
-#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_M (3 << 16) /* Prefetch Size Mask */
+#define TSI148_LCSR_OTAT_PFS_2 (0 << 16) /* 2 Cache Lines P Size */
#define TSI148_LCSR_OTAT_PFS_4 BIT(16) /* 4 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_8 (2 << 16) /* 8 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_16 (3 << 16) /* 16 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */
-#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_OTAT_2eSSTM_M (7 << 11) /* 2eSST Xfer Rate Mask */
+#define TSI148_LCSR_OTAT_2eSSTM_160 (0 << 11) /* 160MB/s 2eSST Xfer Rate */
#define TSI148_LCSR_OTAT_2eSSTM_267 BIT(11) /* 267MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_OTAT_2eSSTM_320 (2 << 11) /* 320MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */
-#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_M (7 << 8) /* Xfer Protocol Mask */
+#define TSI148_LCSR_OTAT_TM_SCT (0 << 8) /* SCT Xfer Protocol */
#define TSI148_LCSR_OTAT_TM_BLT BIT(8) /* BLT Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_MBLT (2 << 8) /* MBLT Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eVME (3 << 8) /* 2eVME Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eSST (4 << 8) /* 2eSST Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eSSTB (5 << 8) /* 2eSST Bcast Xfer Protocol */
-#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */
-#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */
+#define TSI148_LCSR_OTAT_DBW_M (3 << 6) /* Max Data Width */
+#define TSI148_LCSR_OTAT_DBW_16 (0 << 6) /* 16-bit Data Width */
#define TSI148_LCSR_OTAT_DBW_32 BIT(6) /* 32-bit Data Width */
#define TSI148_LCSR_OTAT_SUP BIT(5) /* Supervisory Access */
#define TSI148_LCSR_OTAT_PGM BIT(4) /* Program Access */
-#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */
-#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_M (0xf << 0) /* Address Mode Mask */
+#define TSI148_LCSR_OTAT_AMODE_A16 (0 << 0) /* A16 Address Space */
#define TSI148_LCSR_OTAT_AMODE_A24 BIT(0) /* A24 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_A32 (2 << 0) /* A32 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_A64 (4 << 0) /* A32 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_CRCSR (5 << 0) /* CR/CSR Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER1 (8 << 0) /* User1 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER2 (9 << 0) /* User2 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER3 (10 << 0) /* User3 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER4 (11 << 0) /* User4 Address Space */
/*
* VME Master Control Register CRG+$234
@@ -696,46 +691,46 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VMCTRL_RMWEN BIT(20) /* RMW Enable */
-#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
+#define TSI148_LCSR_VMCTRL_ATO_M (7 << 16) /* Master Access Time-out Mask
*/
-#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
+#define TSI148_LCSR_VMCTRL_ATO_32 (0 << 16) /* 32 us */
#define TSI148_LCSR_VMCTRL_ATO_128 BIT(16) /* 128 us */
-#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
-#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */
-#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */
-#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */
-#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */
-#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */
-
-#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */
-#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */
+#define TSI148_LCSR_VMCTRL_ATO_512 (2 << 16) /* 512 us */
+#define TSI148_LCSR_VMCTRL_ATO_2M (3 << 16) /* 2 ms */
+#define TSI148_LCSR_VMCTRL_ATO_8M (4 << 16) /* 8 ms */
+#define TSI148_LCSR_VMCTRL_ATO_32M (5 << 16) /* 32 ms */
+#define TSI148_LCSR_VMCTRL_ATO_128M (6 << 16) /* 128 ms */
+#define TSI148_LCSR_VMCTRL_ATO_DIS (7 << 16) /* Disabled */
+
+#define TSI148_LCSR_VMCTRL_VTOFF_M (7 << 12) /* VMEbus Master Time off */
+#define TSI148_LCSR_VMCTRL_VTOFF_0 (0 << 12) /* 0us */
#define TSI148_LCSR_VMCTRL_VTOFF_1 BIT(12) /* 1us */
-#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */
-#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */
-#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */
-#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */
-#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */
-#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */
-
-#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */
-#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */
+#define TSI148_LCSR_VMCTRL_VTOFF_2 (2 << 12) /* 2us */
+#define TSI148_LCSR_VMCTRL_VTOFF_4 (3 << 12) /* 4us */
+#define TSI148_LCSR_VMCTRL_VTOFF_8 (4 << 12) /* 8us */
+#define TSI148_LCSR_VMCTRL_VTOFF_16 (5 << 12) /* 16us */
+#define TSI148_LCSR_VMCTRL_VTOFF_32 (6 << 12) /* 32us */
+#define TSI148_LCSR_VMCTRL_VTOFF_64 (7 << 12) /* 64us */
+
+#define TSI148_LCSR_VMCTRL_VTON_M (7 << 8) /* VMEbus Master Time On */
+#define TSI148_LCSR_VMCTRL_VTON_4 (0 << 8) /* 8us */
#define TSI148_LCSR_VMCTRL_VTON_8 BIT(8) /* 8us */
-#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */
-#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */
-#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */
-#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */
-#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
-#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
-
-#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
+#define TSI148_LCSR_VMCTRL_VTON_16 (2 << 8) /* 16us */
+#define TSI148_LCSR_VMCTRL_VTON_32 (3 << 8) /* 32us */
+#define TSI148_LCSR_VMCTRL_VTON_64 (4 << 8) /* 64us */
+#define TSI148_LCSR_VMCTRL_VTON_128 (5 << 8) /* 128us */
+#define TSI148_LCSR_VMCTRL_VTON_256 (6 << 8) /* 256us */
+#define TSI148_LCSR_VMCTRL_VTON_512 (7 << 8) /* 512us */
+
+#define TSI148_LCSR_VMCTRL_VREL_M (3 << 3) /* VMEbus Master Rel Mode Mask
*/
-#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
+#define TSI148_LCSR_VMCTRL_VREL_T_D (0 << 3) /* Time on or Done */
#define TSI148_LCSR_VMCTRL_VREL_T_R_D BIT(3) /* Time on and REQ or Done */
-#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
-#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
+#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2 << 3) /* Time on and BCLR or Done */
+#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3 << 3) /* Time on or Done and REQ */
#define TSI148_LCSR_VMCTRL_VFAIR BIT(2) /* VMEbus Master Fair Mode */
-#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
+#define TSI148_LCSR_VMCTRL_VREQL_M (3 << 0) /* VMEbus Master Req Level Mask
*/
/*
@@ -743,20 +738,20 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
*/
#define TSI148_LCSR_VCTRL_LRE BIT(31) /* Late Retry Enable */
-#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */
-#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */
+#define TSI148_LCSR_VCTRL_DLT_M (0xF << 24) /* Deadlock Timer */
+#define TSI148_LCSR_VCTRL_DLT_OFF (0 << 24) /* Deadlock Timer Off */
#define TSI148_LCSR_VCTRL_DLT_16 BIT(24) /* 16 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_32 (2 << 24) /* 32 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_64 (3 << 24) /* 64 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_128 (4 << 24) /* 128 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_256 (5 << 24) /* 256 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_512 (6 << 24) /* 512 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_1024 (7 << 24) /* 1024 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_2048 (8 << 24) /* 2048 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_4096 (9 << 24) /* 4096 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_8192 (0xA << 24) /* 8192 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_16384 (0xB << 24) /* 16384 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_32768 (0xC << 24) /* 32768 VCLKS */
#define TSI148_LCSR_VCTRL_NERBB BIT(20) /* No Early Release of Bus Busy
*/
@@ -765,21 +760,20 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VCTRL_LRESET BIT(16) /* Local Reset */
#define TSI148_LCSR_VCTRL_SFAILAI BIT(15) /* SYSFAIL Auto Slot ID */
-#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */
+#define TSI148_LCSR_VCTRL_BID_M (0x1F << 8) /* Broadcast ID Mask */
#define TSI148_LCSR_VCTRL_ATOEN BIT(7) /* Arbiter Time-out Enable */
#define TSI148_LCSR_VCTRL_ROBIN BIT(6) /* VMEbus Round Robin */
-#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
- */
-#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
+#define TSI148_LCSR_VCTRL_GTO_M (7 << 0) /* VMEbus Global Time-out Mask*/
+#define TSI148_LCSR_VCTRL_GTO_8 (0 << 0) /* 8 us */
#define TSI148_LCSR_VCTRL_GTO_16 BIT(0) /* 16 us */
-#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
-#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */
-#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */
-#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */
-#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */
-#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */
+#define TSI148_LCSR_VCTRL_GTO_32 (2 << 0) /* 32 us */
+#define TSI148_LCSR_VCTRL_GTO_64 (3 << 0) /* 64 us */
+#define TSI148_LCSR_VCTRL_GTO_128 (4 << 0) /* 128 us */
+#define TSI148_LCSR_VCTRL_GTO_256 (5 << 0) /* 256 us */
+#define TSI148_LCSR_VCTRL_GTO_512 (6 << 0) /* 512 us */
+#define TSI148_LCSR_VCTRL_GTO_DIS (7 << 0) /* Disabled */
/*
* VMEbus Status Register CRG + $23C
@@ -792,7 +786,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VSTAT_ACFAILS BIT(9) /* AC fail status */
#define TSI148_LCSR_VSTAT_SCONS BIT(8) /* System Cont Status */
#define TSI148_LCSR_VSTAT_GAP BIT(5) /* Geographic Addr Parity */
-#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
+#define TSI148_LCSR_VSTAT_GA_M (0x1F << 0) /* Geographic Addr Mask */
/*
* PCI Configuration Status Register CRG+$240
@@ -819,8 +813,8 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VEAT_IACK BIT(16) /* IACK_ signal state */
#define TSI148_LCSR_VEAT_DS1 BIT(15) /* DS1_ signal state */
#define TSI148_LCSR_VEAT_DS0 BIT(14) /* DS0_ signal state */
-#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */
-#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */
+#define TSI148_LCSR_VEAT_AM_M (0x3F << 8) /* Address Mode Mask */
+#define TSI148_LCSR_VEAT_XAM_M (0xFF << 0) /* Master AMode Mask */
/*
* VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280
@@ -830,23 +824,23 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Inbound Translation Starting Address Lower
*/
-#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */
-#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */
+#define TSI148_LCSR_ITSAL6432_M (0xFFFF << 16) /* Mask */
+#define TSI148_LCSR_ITSAL24_M (0x00FFF << 12) /* Mask */
+#define TSI148_LCSR_ITSAL16_M (0x0000FFF << 4) /* Mask */
/*
* Inbound Translation Ending Address Lower
*/
-#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */
-#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */
+#define TSI148_LCSR_ITEAL6432_M (0xFFFF << 16) /* Mask */
+#define TSI148_LCSR_ITEAL24_M (0x00FFF << 12) /* Mask */
+#define TSI148_LCSR_ITEAL16_M (0x0000FFF << 4) /* Mask */
/*
* Inbound Translation Offset Lower
*/
-#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */
-#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */
+#define TSI148_LCSR_ITOFFL6432_M (0xFFFF << 16) /* Mask */
+#define TSI148_LCSR_ITOFFL24_M (0xFFFFF << 12) /* Mask */
+#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF << 4) /* Mask */
/*
* Inbound Translation Attribute
@@ -854,16 +848,16 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_ITAT_EN BIT(31) /* Window Enable */
#define TSI148_LCSR_ITAT_TH BIT(18) /* Prefetch Threshold */
-#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */
-#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */
+#define TSI148_LCSR_ITAT_VFS_M (3 << 16) /* Virtual FIFO Size Mask */
+#define TSI148_LCSR_ITAT_VFS_64 (0 << 16) /* 64 bytes Virtual FIFO Size */
#define TSI148_LCSR_ITAT_VFS_128 BIT(16) /* 128 bytes Virtual FIFO Sz */
-#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */
-#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */
+#define TSI148_LCSR_ITAT_VFS_256 (2 << 16) /* 256 bytes Virtual FIFO Sz */
+#define TSI148_LCSR_ITAT_VFS_512 (3 << 16) /* 512 bytes Virtual FIFO Sz */
-#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */
-#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_ITAT_2eSSTM_M (7 << 12) /* 2eSST Xfer Rate Mask */
+#define TSI148_LCSR_ITAT_2eSSTM_160 (0 << 12) /* 160MB/s 2eSST Xfer Rate */
#define TSI148_LCSR_ITAT_2eSSTM_267 BIT(12) /* 267MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_ITAT_2eSSTM_320 (2 << 12) /* 320MB/s 2eSST Xfer Rate */
#define TSI148_LCSR_ITAT_2eSSTB BIT(11) /* 2eSST Bcast Xfer Protocol */
#define TSI148_LCSR_ITAT_2eSST BIT(10) /* 2eSST Xfer Protocol */
@@ -871,11 +865,11 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_ITAT_MBLT BIT(8) /* MBLT Xfer Protocol */
#define TSI148_LCSR_ITAT_BLT BIT(7) /* BLT Xfer Protocol */
-#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */
-#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */
+#define TSI148_LCSR_ITAT_AS_M (7 << 4) /* Address Space Mask */
+#define TSI148_LCSR_ITAT_AS_A16 (0 << 4) /* A16 Address Space */
#define TSI148_LCSR_ITAT_AS_A24 BIT(4) /* A24 Address Space */
-#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */
-#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */
+#define TSI148_LCSR_ITAT_AS_A32 (2 << 4) /* A32 Address Space */
+#define TSI148_LCSR_ITAT_AS_A64 (4 << 4) /* A64 Address Space */
#define TSI148_LCSR_ITAT_SUPR BIT(3) /* Supervisor Access */
#define TSI148_LCSR_ITAT_NPRIV BIT(2) /* Non-Priv (User) Access */
@@ -885,18 +879,18 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* GCSR Base Address Lower Address CRG +$404
*/
-#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */
+#define TSI148_LCSR_GBAL_M (0x7FFFFFF << 5) /* Mask */
/*
* GCSR Attribute Register CRG + $408
*/
#define TSI148_LCSR_GCSRAT_EN BIT(7) /* Enable access to GCSR */
-#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */
-#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */
+#define TSI148_LCSR_GCSRAT_AS_M (7 << 4) /* Address Space Mask */
+#define TSI148_LCSR_GCSRAT_AS_A16 (0 << 4) /* Address Space 16 */
#define TSI148_LCSR_GCSRAT_AS_A24 BIT(4) /* Address Space 24 */
-#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */
-#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */
+#define TSI148_LCSR_GCSRAT_AS_A32 (2 << 4) /* Address Space 32 */
+#define TSI148_LCSR_GCSRAT_AS_A64 (4 << 4) /* Address Space 64 */
#define TSI148_LCSR_GCSRAT_SUPR BIT(3) /* Sup set -GCSR decoder */
#define TSI148_LCSR_GCSRAT_NPRIV BIT(2) /* Non-Privliged set - CGSR */
@@ -906,18 +900,18 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* CRG Base Address Lower Address CRG + $410
*/
-#define TSI148_LCSR_CBAL_M (0xFFFFF<<12)
+#define TSI148_LCSR_CBAL_M (0xFFFFF << 12)
/*
* CRG Attribute Register CRG + $414
*/
#define TSI148_LCSR_CRGAT_EN BIT(7) /* Enable PRG Access */
-#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */
-#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */
+#define TSI148_LCSR_CRGAT_AS_M (7 << 4) /* Address Space */
+#define TSI148_LCSR_CRGAT_AS_A16 (0 << 4) /* Address Space 16 */
#define TSI148_LCSR_CRGAT_AS_A24 BIT(4) /* Address Space 24 */
-#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */
-#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */
+#define TSI148_LCSR_CRGAT_AS_A32 (2 << 4) /* Address Space 32 */
+#define TSI148_LCSR_CRGAT_AS_A64 (4 << 4) /* Address Space 64 */
#define TSI148_LCSR_CRGAT_SUPR BIT(3) /* Supervisor Access */
#define TSI148_LCSR_CRGAT_NPRIV BIT(2) /* Non-Privliged(User) Access */
@@ -927,7 +921,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* CR/CSR Offset Lower Register CRG + $41C
*/
-#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */
+#define TSI148_LCSR_CROL_M (0x1FFF << 19) /* Mask */
/*
* CR/CSR Attribute register CRG + $420
@@ -937,18 +931,18 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Location Monitor base address lower register CRG + $428
*/
-#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */
+#define TSI148_LCSR_LMBAL_M (0x7FFFFFF << 5) /* Mask */
/*
* Location Monitor Attribute Register CRG + $42C
*/
#define TSI148_LCSR_LMAT_EN BIT(7) /* Enable Location Monitor */
-#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */
-#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */
+#define TSI148_LCSR_LMAT_AS_M (7 << 4) /* Address Space MASK */
+#define TSI148_LCSR_LMAT_AS_A16 (0 << 4) /* A16 */
#define TSI148_LCSR_LMAT_AS_A24 BIT(4) /* A24 */
-#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */
-#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */
+#define TSI148_LCSR_LMAT_AS_A32 (2 << 4) /* A32 */
+#define TSI148_LCSR_LMAT_AS_A64 (4 << 4) /* A64 */
#define TSI148_LCSR_LMAT_SUPR BIT(3) /* Supervisor Access */
#define TSI148_LCSR_LMAT_NPRIV BIT(2) /* Non-Priv (User) Access */
@@ -958,58 +952,58 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Broadcast Pulse Generator Timer Register CRG + $438
*/
-#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */
+#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF << 0) /* Mask */
/*
* Broadcast Programmable Clock Timer Register CRG + $43C
*/
-#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */
+#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF << 0) /* Mask */
/*
* VMEbus Interrupt Control Register CRG + $43C
*/
-#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */
+#define TSI148_LCSR_VICR_CNTS_M (3 << 22) /* Cntr Source MASK */
#define TSI148_LCSR_VICR_CNTS_DIS BIT(22) /* Cntr Disable */
-#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */
-#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */
+#define TSI148_LCSR_VICR_CNTS_IRQ1 (2 << 22) /* IRQ1 to Cntr */
+#define TSI148_LCSR_VICR_CNTS_IRQ2 (3 << 22) /* IRQ2 to Cntr */
-#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */
+#define TSI148_LCSR_VICR_EDGIS_M (3 << 20) /* Edge interrupt MASK */
#define TSI148_LCSR_VICR_EDGIS_DIS BIT(20) /* Edge interrupt Disable */
-#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */
-#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */
+#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2 << 20) /* IRQ1 to Edge */
+#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3 << 20) /* IRQ2 to Edge */
-#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */
+#define TSI148_LCSR_VICR_IRQIF_M (3 << 18) /* IRQ1* Function MASK */
#define TSI148_LCSR_VICR_IRQIF_NORM BIT(18) /* Normal */
-#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */
-#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */
-#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */
+#define TSI148_LCSR_VICR_IRQIF_PULSE (2 << 18) /* Pulse Generator */
+#define TSI148_LCSR_VICR_IRQIF_PROG (3 << 18) /* Programmable Clock */
+#define TSI148_LCSR_VICR_IRQIF_1U (4 << 18) /* 1us Clock */
-#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */
+#define TSI148_LCSR_VICR_IRQ2F_M (3 << 16) /* IRQ2* Function MASK */
#define TSI148_LCSR_VICR_IRQ2F_NORM BIT(16) /* Normal */
-#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */
-#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */
-#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */
+#define TSI148_LCSR_VICR_IRQ2F_PULSE (2 << 16) /* Pulse Generator */
+#define TSI148_LCSR_VICR_IRQ2F_PROG (3 << 16) /* Programmable Clock */
+#define TSI148_LCSR_VICR_IRQ2F_1U (4 << 16) /* 1us Clock */
#define TSI148_LCSR_VICR_BIP BIT(15) /* Broadcast Interrupt Pulse */
#define TSI148_LCSR_VICR_IRQC BIT(12) /* VMEbus IRQ Clear */
#define TSI148_LCSR_VICR_IRQS BIT(11) /* VMEbus IRQ Status */
-#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */
+#define TSI148_LCSR_VICR_IRQL_M (7 << 8) /* VMEbus SW IRQ Level Mask */
#define TSI148_LCSR_VICR_IRQL_1 BIT(8) /* VMEbus SW IRQ Level 1 */
-#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */
-#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */
-#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */
-#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */
-#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */
-#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */
+#define TSI148_LCSR_VICR_IRQL_2 (2 << 8) /* VMEbus SW IRQ Level 2 */
+#define TSI148_LCSR_VICR_IRQL_3 (3 << 8) /* VMEbus SW IRQ Level 3 */
+#define TSI148_LCSR_VICR_IRQL_4 (4 << 8) /* VMEbus SW IRQ Level 4 */
+#define TSI148_LCSR_VICR_IRQL_5 (5 << 8) /* VMEbus SW IRQ Level 5 */
+#define TSI148_LCSR_VICR_IRQL_6 (6 << 8) /* VMEbus SW IRQ Level 6 */
+#define TSI148_LCSR_VICR_IRQL_7 (7 << 8) /* VMEbus SW IRQ Level 7 */
static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1,
TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3,
TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5,
TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 };
-#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */
+#define TSI148_LCSR_VICR_STID_M (0xFF << 0) /* Status/ID Mask */
/*
* Interrupt Enable Register CRG + $440
@@ -1161,33 +1155,33 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
/*
* Interrupt Map Register 1 CRG + $458
*/
-#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */
-#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */
-#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */
-#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */
-#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */
-#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */
-#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */
-#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */
-#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */
-#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */
+#define TSI148_LCSR_INTM1_DMA1M_M (3 << 18) /* DMA 1 */
+#define TSI148_LCSR_INTM1_DMA0M_M (3 << 16) /* DMA 0 */
+#define TSI148_LCSR_INTM1_LM3M_M (3 << 14) /* Location Monitor 3 */
+#define TSI148_LCSR_INTM1_LM2M_M (3 << 12) /* Location Monitor 2 */
+#define TSI148_LCSR_INTM1_LM1M_M (3 << 10) /* Location Monitor 1 */
+#define TSI148_LCSR_INTM1_LM0M_M (3 << 8) /* Location Monitor 0 */
+#define TSI148_LCSR_INTM1_MB3M_M (3 << 6) /* Mail Box 3 */
+#define TSI148_LCSR_INTM1_MB2M_M (3 << 4) /* Mail Box 2 */
+#define TSI148_LCSR_INTM1_MB1M_M (3 << 2) /* Mail Box 1 */
+#define TSI148_LCSR_INTM1_MB0M_M (3 << 0) /* Mail Box 0 */
/*
* Interrupt Map Register 2 CRG + $45C
*/
-#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */
-#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */
-#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */
-#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */
-#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */
-#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */
-#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */
-#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */
-#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */
-#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */
-#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */
-#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */
+#define TSI148_LCSR_INTM2_PERRM_M (3 << 26) /* PCI Bus Error */
+#define TSI148_LCSR_INTM2_VERRM_M (3 << 24) /* VMEbus Error */
+#define TSI148_LCSR_INTM2_VIEM_M (3 << 22) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTM2_IACKM_M (3 << 20) /* IACK */
+#define TSI148_LCSR_INTM2_SYSFLM_M (3 << 18) /* System Fail */
+#define TSI148_LCSR_INTM2_ACFLM_M (3 << 16) /* AC Fail */
+#define TSI148_LCSR_INTM2_IRQ7M_M (3 << 14) /* IRQ7 */
+#define TSI148_LCSR_INTM2_IRQ6M_M (3 << 12) /* IRQ6 */
+#define TSI148_LCSR_INTM2_IRQ5M_M (3 << 10) /* IRQ5 */
+#define TSI148_LCSR_INTM2_IRQ4M_M (3 << 8) /* IRQ4 */
+#define TSI148_LCSR_INTM2_IRQ3M_M (3 << 6) /* IRQ3 */
+#define TSI148_LCSR_INTM2_IRQ2M_M (3 << 4) /* IRQ2 */
+#define TSI148_LCSR_INTM2_IRQ1M_M (3 << 2) /* IRQ1 */
/*
* DMA Control (0-1) Registers CRG + $500
@@ -1198,45 +1192,45 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
#define TSI148_LCSR_DCTL_MOD BIT(23) /* Mode */
-#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */
-#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */
+#define TSI148_LCSR_DCTL_VBKS_M (7 << 12) /* VMEbus block Size MASK */
+#define TSI148_LCSR_DCTL_VBKS_32 (0 << 12) /* VMEbus block Size 32 */
#define TSI148_LCSR_DCTL_VBKS_64 BIT(12) /* VMEbus block Size 64 */
-#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */
-#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */
-#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */
-#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */
-#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */
-#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */
-
-#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */
-#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */
+#define TSI148_LCSR_DCTL_VBKS_128 (2 << 12) /* VMEbus block Size 128 */
+#define TSI148_LCSR_DCTL_VBKS_256 (3 << 12) /* VMEbus block Size 256 */
+#define TSI148_LCSR_DCTL_VBKS_512 (4 << 12) /* VMEbus block Size 512 */
+#define TSI148_LCSR_DCTL_VBKS_1024 (5 << 12) /* VMEbus block Size 1024 */
+#define TSI148_LCSR_DCTL_VBKS_2048 (6 << 12) /* VMEbus block Size 2048 */
+#define TSI148_LCSR_DCTL_VBKS_4096 (7 << 12) /* VMEbus block Size 4096 */
+
+#define TSI148_LCSR_DCTL_VBOT_M (7 << 8) /* VMEbus back-off MASK */
+#define TSI148_LCSR_DCTL_VBOT_0 (0 << 8) /* VMEbus back-off 0us */
#define TSI148_LCSR_DCTL_VBOT_1 BIT(8) /* VMEbus back-off 1us */
-#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */
-#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */
-#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */
-#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */
-#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */
-#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */
-
-#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */
-#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */
+#define TSI148_LCSR_DCTL_VBOT_2 (2 << 8) /* VMEbus back-off 2us */
+#define TSI148_LCSR_DCTL_VBOT_4 (3 << 8) /* VMEbus back-off 4us */
+#define TSI148_LCSR_DCTL_VBOT_8 (4 << 8) /* VMEbus back-off 8us */
+#define TSI148_LCSR_DCTL_VBOT_16 (5 << 8) /* VMEbus back-off 16us */
+#define TSI148_LCSR_DCTL_VBOT_32 (6 << 8) /* VMEbus back-off 32us */
+#define TSI148_LCSR_DCTL_VBOT_64 (7 << 8) /* VMEbus back-off 64us */
+
+#define TSI148_LCSR_DCTL_PBKS_M (7 << 4) /* PCI block size MASK */
+#define TSI148_LCSR_DCTL_PBKS_32 (0 << 4) /* PCI block size 32 bytes */
#define TSI148_LCSR_DCTL_PBKS_64 BIT(4) /* PCI block size 64 bytes */
-#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */
-#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */
-#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */
-#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */
-#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */
-#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */
-
-#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */
-#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */
+#define TSI148_LCSR_DCTL_PBKS_128 (2 << 4) /* PCI block size 128 bytes */
+#define TSI148_LCSR_DCTL_PBKS_256 (3 << 4) /* PCI block size 256 bytes */
+#define TSI148_LCSR_DCTL_PBKS_512 (4 << 4) /* PCI block size 512 bytes */
+#define TSI148_LCSR_DCTL_PBKS_1024 (5 << 4) /* PCI block size 1024 bytes */
+#define TSI148_LCSR_DCTL_PBKS_2048 (6 << 4) /* PCI block size 2048 bytes */
+#define TSI148_LCSR_DCTL_PBKS_4096 (7 << 4) /* PCI block size 4096 bytes */
+
+#define TSI148_LCSR_DCTL_PBOT_M (7 << 0) /* PCI back off MASK */
+#define TSI148_LCSR_DCTL_PBOT_0 (0 << 0) /* PCI back off 0us */
#define TSI148_LCSR_DCTL_PBOT_1 BIT(0) /* PCI back off 1us */
-#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */
-#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */
-#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */
-#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */
-#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */
-#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */
+#define TSI148_LCSR_DCTL_PBOT_2 (2 << 0) /* PCI back off 2us */
+#define TSI148_LCSR_DCTL_PBOT_4 (3 << 0) /* PCI back off 3us */
+#define TSI148_LCSR_DCTL_PBOT_8 (4 << 0) /* PCI back off 4us */
+#define TSI148_LCSR_DCTL_PBOT_16 (5 << 0) /* PCI back off 8us */
+#define TSI148_LCSR_DCTL_PBOT_32 (6 << 0) /* PCI back off 16us */
+#define TSI148_LCSR_DCTL_PBOT_64 (7 << 0) /* PCI back off 32us */
/*
* DMA Status Registers (0-1) CRG + $504
@@ -1253,97 +1247,97 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
/*
* DMA Current Link Address Lower (0-1)
*/
-#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */
+#define TSI148_LCSR_DCLAL_M (0x3FFFFFF << 6) /* Mask */
/*
* DMA Source Attribute (0-1) Reg
*/
-#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */
-#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */
+#define TSI148_LCSR_DSAT_TYP_M (3 << 28) /* Source Bus Type */
+#define TSI148_LCSR_DSAT_TYP_PCI (0 << 28) /* PCI Bus */
#define TSI148_LCSR_DSAT_TYP_VME BIT(28) /* VMEbus */
-#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */
+#define TSI148_LCSR_DSAT_TYP_PAT (2 << 28) /* Data Pattern */
#define TSI148_LCSR_DSAT_PSZ BIT(25) /* Pattern Size */
#define TSI148_LCSR_DSAT_NIN BIT(24) /* No Increment */
-#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */
-#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */
+#define TSI148_LCSR_DSAT_2eSSTM_M (3 << 11) /* 2eSST Trans Rate Mask */
+#define TSI148_LCSR_DSAT_2eSSTM_160 (0 << 11) /* 160 MB/s */
#define TSI148_LCSR_DSAT_2eSSTM_267 BIT(11) /* 267 MB/s */
-#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */
+#define TSI148_LCSR_DSAT_2eSSTM_320 (2 << 11) /* 320 MB/s */
-#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
-#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */
+#define TSI148_LCSR_DSAT_TM_M (7 << 8) /* Bus Transfer Protocol Mask */
+#define TSI148_LCSR_DSAT_TM_SCT (0 << 8) /* SCT */
#define TSI148_LCSR_DSAT_TM_BLT BIT(8) /* BLT */
-#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */
-#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */
-#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */
-#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
+#define TSI148_LCSR_DSAT_TM_MBLT (2 << 8) /* MBLT */
+#define TSI148_LCSR_DSAT_TM_2eVME (3 << 8) /* 2eVME */
+#define TSI148_LCSR_DSAT_TM_2eSST (4 << 8) /* 2eSST */
+#define TSI148_LCSR_DSAT_TM_2eSSTB (5 << 8) /* 2eSST Broadcast */
-#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */
-#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */
+#define TSI148_LCSR_DSAT_DBW_M (3 << 6) /* Max Data Width MASK */
+#define TSI148_LCSR_DSAT_DBW_16 (0 << 6) /* 16 Bits */
#define TSI148_LCSR_DSAT_DBW_32 BIT(6) /* 32 Bits */
#define TSI148_LCSR_DSAT_SUP BIT(5) /* Supervisory Mode */
#define TSI148_LCSR_DSAT_PGM BIT(4) /* Program Mode */
-#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */
-#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */
+#define TSI148_LCSR_DSAT_AMODE_M (0xf << 0) /* Address Space Mask */
+#define TSI148_LCSR_DSAT_AMODE_A16 (0 << 0) /* A16 */
#define TSI148_LCSR_DSAT_AMODE_A24 BIT(0) /* A24 */
-#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */
-#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */
-#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */
-#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */
-#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */
-#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */
-#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */
+#define TSI148_LCSR_DSAT_AMODE_A32 (2 << 0) /* A32 */
+#define TSI148_LCSR_DSAT_AMODE_A64 (4 << 0) /* A64 */
+#define TSI148_LCSR_DSAT_AMODE_CRCSR (5 << 0) /* CR/CSR */
+#define TSI148_LCSR_DSAT_AMODE_USER1 (8 << 0) /* User1 */
+#define TSI148_LCSR_DSAT_AMODE_USER2 (9 << 0) /* User2 */
+#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa << 0) /* User3 */
+#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb << 0) /* User4 */
/*
* DMA Destination Attribute Registers (0-1)
*/
-#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */
+#define TSI148_LCSR_DDAT_TYP_PCI (0 << 28) /* Destination PCI Bus */
#define TSI148_LCSR_DDAT_TYP_VME BIT(28) /* Destination VMEbus */
-#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */
-#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */
+#define TSI148_LCSR_DDAT_2eSSTM_M (3 << 11) /* 2eSST Transfer Rate Mask */
+#define TSI148_LCSR_DDAT_2eSSTM_160 (0 << 11) /* 160 MB/s */
#define TSI148_LCSR_DDAT_2eSSTM_267 BIT(11) /* 267 MB/s */
-#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */
+#define TSI148_LCSR_DDAT_2eSSTM_320 (2 << 11) /* 320 MB/s */
-#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
-#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */
+#define TSI148_LCSR_DDAT_TM_M (7 << 8) /* Bus Transfer Protocol Mask */
+#define TSI148_LCSR_DDAT_TM_SCT (0 << 8) /* SCT */
#define TSI148_LCSR_DDAT_TM_BLT BIT(8) /* BLT */
-#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */
-#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */
-#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */
-#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
+#define TSI148_LCSR_DDAT_TM_MBLT (2 << 8) /* MBLT */
+#define TSI148_LCSR_DDAT_TM_2eVME (3 << 8) /* 2eVME */
+#define TSI148_LCSR_DDAT_TM_2eSST (4 << 8) /* 2eSST */
+#define TSI148_LCSR_DDAT_TM_2eSSTB (5 << 8) /* 2eSST Broadcast */
-#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */
-#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */
+#define TSI148_LCSR_DDAT_DBW_M (3 << 6) /* Max Data Width MASK */
+#define TSI148_LCSR_DDAT_DBW_16 (0 << 6) /* 16 Bits */
#define TSI148_LCSR_DDAT_DBW_32 BIT(6) /* 32 Bits */
#define TSI148_LCSR_DDAT_SUP BIT(5) /* Supervisory/User Access */
#define TSI148_LCSR_DDAT_PGM BIT(4) /* Program/Data Access */
-#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */
-#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */
+#define TSI148_LCSR_DDAT_AMODE_M (0xf << 0) /* Address Space Mask */
+#define TSI148_LCSR_DDAT_AMODE_A16 (0 << 0) /* A16 */
#define TSI148_LCSR_DDAT_AMODE_A24 BIT(0) /* A24 */
-#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */
-#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */
-#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */
-#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */
-#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */
-#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */
-#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */
+#define TSI148_LCSR_DDAT_AMODE_A32 (2 << 0) /* A32 */
+#define TSI148_LCSR_DDAT_AMODE_A64 (4 << 0) /* A64 */
+#define TSI148_LCSR_DDAT_AMODE_CRCSR (5 << 0) /* CRC/SR */
+#define TSI148_LCSR_DDAT_AMODE_USER1 (8 << 0) /* User1 */
+#define TSI148_LCSR_DDAT_AMODE_USER2 (9 << 0) /* User2 */
+#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa << 0) /* User3 */
+#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb << 0) /* User4 */
/*
* DMA Next Link Address Lower
*/
-#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
+#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF << 6) /* Address Mask */
#define TSI148_LCSR_DNLAL_LLA BIT(0) /* Last Link Address Indicator */
/*
* DMA 2eSST Broadcast Select
*/
-#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */
+#define TSI148_LCSR_DBS_M (0x1FFFFF << 0) /* Mask */
/*
* GCSR Register Group
@@ -1368,7 +1362,7 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
#define TSI148_GCSR_GCTRL_MBI0S BIT(0) /* Mail box 0 Int Status */
#define TSI148_GCSR_GAP BIT(5) /* Geographic Addr Parity */
-#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
+#define TSI148_GCSR_GA_M (0x1F << 0) /* Geographic Address Mask */
/*
* CR/CSR Register Group
@@ -1395,6 +1389,6 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
/*
* CR/CSR Base Address Register CRG + FFC
*/
-#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */
+#define TSI148_CRCSR_CBAR_M (0x1F << 3) /* Mask */
#endif /* TSI148_H */
diff --git a/drivers/staging/vme_user/vme_user.c b/drivers/staging/vme_user/vme_user.c
index b9367b575d00..36183f923768 100644
--- a/drivers/staging/vme_user/vme_user.c
+++ b/drivers/staging/vme_user/vme_user.c
@@ -37,7 +37,7 @@
#include "vme.h"
#include "vme_user.h"
-static const char driver_name[] = "vme_user";
+#define DRIVER_NAME "vme_user"
static int bus[VME_USER_BUS_MAX];
static unsigned int bus_num;
@@ -101,9 +101,11 @@ struct image_desc {
static struct image_desc image[VME_DEVS];
static struct cdev *vme_user_cdev; /* Character device */
-static struct class *vme_user_sysfs_class; /* Sysfs class */
static struct vme_dev *vme_user_bridge; /* Pointer to user device */
+static const struct class vme_user_sysfs_class = {
+ .name = DRIVER_NAME,
+};
static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
MASTER_MINOR, MASTER_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
@@ -540,8 +542,7 @@ static int vme_user_probe(struct vme_dev *vdev)
}
/* Assign major and minor numbers for the driver */
- err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
- driver_name);
+ err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, DRIVER_NAME);
if (err) {
dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
VME_MAJOR);
@@ -614,10 +615,9 @@ static int vme_user_probe(struct vme_dev *vdev)
}
/* Create sysfs entries - on udev systems this creates the dev files */
- vme_user_sysfs_class = class_create(driver_name);
- if (IS_ERR(vme_user_sysfs_class)) {
+ err = class_register(&vme_user_sysfs_class);
+ if (err) {
dev_err(&vdev->dev, "Error creating vme_user class.\n");
- err = PTR_ERR(vme_user_sysfs_class);
goto err_master;
}
@@ -641,7 +641,7 @@ static int vme_user_probe(struct vme_dev *vdev)
}
num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
- image[i].device = device_create(vme_user_sysfs_class, NULL,
+ image[i].device = device_create(&vme_user_sysfs_class, NULL,
MKDEV(VME_MAJOR, i), NULL,
name, num);
if (IS_ERR(image[i].device)) {
@@ -656,9 +656,9 @@ static int vme_user_probe(struct vme_dev *vdev)
err_sysfs:
while (i > 0) {
i--;
- device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
+ device_destroy(&vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
}
- class_destroy(vme_user_sysfs_class);
+ class_unregister(&vme_user_sysfs_class);
/* Ensure counter set correctly to unalloc all master windows */
i = MASTER_MAX + 1;
@@ -696,9 +696,9 @@ static void vme_user_remove(struct vme_dev *dev)
/* Remove sysfs Entries */
for (i = 0; i < VME_DEVS; i++) {
mutex_destroy(&image[i].mutex);
- device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
+ device_destroy(&vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
}
- class_destroy(vme_user_sysfs_class);
+ class_unregister(&vme_user_sysfs_class);
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
kfree(image[i].kern_buf);
@@ -720,7 +720,7 @@ static void vme_user_remove(struct vme_dev *dev)
}
static struct vme_driver vme_user_driver = {
- .name = driver_name,
+ .name = DRIVER_NAME,
.match = vme_user_match,
.probe = vme_user_probe,
.remove = vme_user_remove,
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 696d4dd03aa2..f7824396c5ff 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -761,7 +761,7 @@ static const unsigned char vt3253b0_rfmd[CB_VT3253B0_INIT_FOR_RFMD][2] = {
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
/* For RFMD2959 */
static
-unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
+unsigned char vt3253b0_agc4_rfmd2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
@@ -962,7 +962,7 @@ unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
/* For AIROHA */
static
-unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
+unsigned char vt3253b0_airoha2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -1223,7 +1223,7 @@ unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
#define CB_VT3253B0_INIT_FOR_UW2451 256
/* For UW2451 */
-static unsigned char byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
+static unsigned char vt3253b0_uw2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -1484,7 +1484,7 @@ static unsigned char byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
#define CB_VT3253B0_AGC 193
/* For AIROHA */
-static unsigned char byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
+static unsigned char vt3253b0_agc[CB_VT3253B0_AGC][2] = {
{0xF0, 0x00},
{0xF1, 0x00},
{0xF0, 0x80},
@@ -1979,7 +1979,7 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
* In:
* iobase - I/O base address
* byRevId - Revision ID
- * byRFType - RF type
+ * rf_type - RF type
* Out:
* none
*
@@ -1992,10 +1992,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
bool result = true;
int ii;
void __iomem *iobase = priv->port_offset;
- unsigned char by_rf_type = priv->byRFType;
+ unsigned char rf_type = priv->rf_type;
unsigned char by_local_id = priv->local_id;
- if (by_rf_type == RF_RFMD2959) {
+ if (rf_type == RF_RFMD2959) {
if (by_local_id <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
result &= bb_write_embedded(priv,
@@ -2010,74 +2010,74 @@ bool bb_vt3253_init(struct vnt_private *priv)
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AGC4_RFMD2959[ii][0],
- byVT3253B0_AGC4_RFMD2959[ii][1]);
+ vt3253b0_agc4_rfmd2959[ii][0],
+ vt3253b0_agc4_rfmd2959[ii][1]);
iowrite32(0x23, iobase + MAC_REG_ITRTMSET);
vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
}
- priv->abyBBVGA[0] = 0x18;
- priv->abyBBVGA[1] = 0x0A;
- priv->abyBBVGA[2] = 0x0;
- priv->abyBBVGA[3] = 0x0;
+ priv->bbvga[0] = 0x18;
+ priv->bbvga[1] = 0x0A;
+ priv->bbvga[2] = 0x0;
+ priv->bbvga[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -50;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
- } else if ((by_rf_type == RF_AIROHA) || (by_rf_type == RF_AL2230S)) {
+ } else if ((rf_type == RF_AIROHA) || (rf_type == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AIROHA2230[ii][0],
- byVT3253B0_AIROHA2230[ii][1]);
+ vt3253b0_airoha2230[ii][0],
+ vt3253b0_airoha2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+ vt3253b0_agc[ii][0], vt3253b0_agc[ii][1]);
- priv->abyBBVGA[0] = 0x1C;
- priv->abyBBVGA[1] = 0x10;
- priv->abyBBVGA[2] = 0x0;
- priv->abyBBVGA[3] = 0x0;
+ priv->bbvga[0] = 0x1C;
+ priv->bbvga[1] = 0x10;
+ priv->bbvga[2] = 0x0;
+ priv->bbvga[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -48;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
- } else if (by_rf_type == RF_UW2451) {
+ } else if (rf_type == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_UW2451[ii][0],
- byVT3253B0_UW2451[ii][1]);
+ vt3253b0_uw2451[ii][0],
+ vt3253b0_uw2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AGC[ii][0],
- byVT3253B0_AGC[ii][1]);
+ vt3253b0_agc[ii][0],
+ vt3253b0_agc[ii][1]);
iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
vt6655_mac_reg_bits_on(iobase, MAC_REG_PAPEDELAY, BIT(0));
- priv->abyBBVGA[0] = 0x14;
- priv->abyBBVGA[1] = 0x0A;
- priv->abyBBVGA[2] = 0x0;
- priv->abyBBVGA[3] = 0x0;
+ priv->bbvga[0] = 0x14;
+ priv->bbvga[1] = 0x0A;
+ priv->bbvga[2] = 0x0;
+ priv->bbvga[3] = 0x0;
priv->dbm_threshold[0] = -60;
priv->dbm_threshold[1] = -50;
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
- } else if (by_rf_type == RF_VT3226) {
+ } else if (rf_type == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AIROHA2230[ii][0],
- byVT3253B0_AIROHA2230[ii][1]);
+ vt3253b0_airoha2230[ii][0],
+ vt3253b0_airoha2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+ vt3253b0_agc[ii][0], vt3253b0_agc[ii][1]);
- priv->abyBBVGA[0] = 0x1C;
- priv->abyBBVGA[1] = 0x10;
- priv->abyBBVGA[2] = 0x0;
- priv->abyBBVGA[3] = 0x0;
+ priv->bbvga[0] = 0x1C;
+ priv->bbvga[1] = 0x10;
+ priv->bbvga[2] = 0x0;
+ priv->bbvga[3] = 0x0;
priv->dbm_threshold[0] = -70;
priv->dbm_threshold[1] = -48;
priv->dbm_threshold[2] = 0;
@@ -2087,8 +2087,8 @@ bool bb_vt3253_init(struct vnt_private *priv)
/* {{ RobertYu: 20050104 */
} else {
/* No VGA Table now */
- priv->bUpdateBBVGA = false;
- priv->abyBBVGA[0] = 0x1C;
+ priv->update_bbvga = false;
+ priv->bbvga[0] = 0x1C;
}
if (by_local_id > REV_ID_VT3253_A1) {
@@ -2126,7 +2126,7 @@ bb_set_short_slot_time(struct vnt_private *priv)
/* patch for 3253B0 Baseband with Cardbus module */
bb_read_embedded(priv, 0xE7, &by_bb_vga);
- if (by_bb_vga == priv->abyBBVGA[0])
+ if (by_bb_vga == priv->bbvga[0])
by_bb_rx_conf |= 0x20; /* 0010 0000 */
bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
@@ -2140,13 +2140,13 @@ void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data)
bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
- if (by_data == priv->abyBBVGA[0])
+ if (by_data == priv->bbvga[0])
by_bb_rx_conf |= 0x20; /* 0010 0000 */
else if (priv->short_slot_time)
by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
by_bb_rx_conf |= 0x20; /* 0010 0000 */
- priv->byBBVGACurrent = by_data;
+ priv->bbvga_current = by_data;
bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index c680925b9c92..350ab8f3778a 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -6,7 +6,7 @@
* Purpose: Provide functions to setup NIC operation mode
* Functions:
* s_vSafeResetTx - Rest Tx
- * CARDvSetRSPINF - Set RSPINF
+ * card_set_rspinf - Set RSPINF
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
@@ -50,7 +50,7 @@
/*--------------------- Static Variables --------------------------*/
-static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
+static const unsigned short rx_bcn_tsf_off[MAX_RATE] = {
17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3};
/*--------------------- Static Functions --------------------------*/
@@ -75,95 +75,95 @@ static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
* wRate - Tx Rate
* byPktType - Tx Packet type
* Out:
- * pbyTxRate - pointer to RSPINF TxRate field
- * pbyRsvTime - pointer to RSPINF RsvTime field
+ * tx_rate - pointer to RSPINF TxRate field
+ * rsv_time - pointer to RSPINF RsvTime field
*
* Return Value: none
*/
-static void s_vCalculateOFDMRParameter(unsigned char rate,
+static void calculate_ofdmr_parameter(unsigned char rate,
u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime)
+ unsigned char *tx_rate,
+ unsigned char *rsv_time)
{
switch (rate) {
case RATE_6M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9B;
- *pbyRsvTime = 44;
+ *tx_rate = 0x9B;
+ *rsv_time = 44;
} else {
- *pbyTxRate = 0x8B;
- *pbyRsvTime = 50;
+ *tx_rate = 0x8B;
+ *rsv_time = 50;
}
break;
case RATE_9M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9F;
- *pbyRsvTime = 36;
+ *tx_rate = 0x9F;
+ *rsv_time = 36;
} else {
- *pbyTxRate = 0x8F;
- *pbyRsvTime = 42;
+ *tx_rate = 0x8F;
+ *rsv_time = 42;
}
break;
case RATE_12M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9A;
- *pbyRsvTime = 32;
+ *tx_rate = 0x9A;
+ *rsv_time = 32;
} else {
- *pbyTxRate = 0x8A;
- *pbyRsvTime = 38;
+ *tx_rate = 0x8A;
+ *rsv_time = 38;
}
break;
case RATE_18M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9E;
- *pbyRsvTime = 28;
+ *tx_rate = 0x9E;
+ *rsv_time = 28;
} else {
- *pbyTxRate = 0x8E;
- *pbyRsvTime = 34;
+ *tx_rate = 0x8E;
+ *rsv_time = 34;
}
break;
case RATE_36M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9D;
- *pbyRsvTime = 24;
+ *tx_rate = 0x9D;
+ *rsv_time = 24;
} else {
- *pbyTxRate = 0x8D;
- *pbyRsvTime = 30;
+ *tx_rate = 0x8D;
+ *rsv_time = 30;
}
break;
case RATE_48M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x98;
- *pbyRsvTime = 24;
+ *tx_rate = 0x98;
+ *rsv_time = 24;
} else {
- *pbyTxRate = 0x88;
- *pbyRsvTime = 30;
+ *tx_rate = 0x88;
+ *rsv_time = 30;
}
break;
case RATE_54M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x9C;
- *pbyRsvTime = 24;
+ *tx_rate = 0x9C;
+ *rsv_time = 24;
} else {
- *pbyTxRate = 0x8C;
- *pbyRsvTime = 30;
+ *tx_rate = 0x8C;
+ *rsv_time = 30;
}
break;
case RATE_24M:
default:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
- *pbyTxRate = 0x99;
- *pbyRsvTime = 28;
+ *tx_rate = 0x99;
+ *rsv_time = 28;
} else {
- *pbyTxRate = 0x89;
- *pbyRsvTime = 34;
+ *tx_rate = 0x89;
+ *rsv_time = 34;
}
break;
}
@@ -182,59 +182,59 @@ static void s_vCalculateOFDMRParameter(unsigned char rate,
*
* Return Value: None.
*/
-bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
+bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
{
- unsigned char byCWMaxMin = 0;
- unsigned char bySlot = 0;
- unsigned char bySIFS = 0;
- unsigned char byDIFS = 0;
+ unsigned char cw_max_min = 0;
+ unsigned char slot = 0;
+ unsigned char sifs = 0;
+ unsigned char difs = 0;
int i;
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
if (bb_type == BB_TYPE_11A) {
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11A);
bb_write_embedded(priv, 0x88, 0x03);
- bySlot = C_SLOT_SHORT;
- bySIFS = C_SIFS_A;
- byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
- byCWMaxMin = 0xA4;
+ slot = C_SLOT_SHORT;
+ sifs = C_SIFS_A;
+ difs = C_SIFS_A + 2 * C_SLOT_SHORT;
+ cw_max_min = 0xA4;
} else if (bb_type == BB_TYPE_11B) {
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11B);
bb_write_embedded(priv, 0x88, 0x02);
- bySlot = C_SLOT_LONG;
- bySIFS = C_SIFS_BG;
- byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
- byCWMaxMin = 0xA5;
+ slot = C_SLOT_LONG;
+ sifs = C_SIFS_BG;
+ difs = C_SIFS_BG + 2 * C_SLOT_LONG;
+ cw_max_min = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11G);
bb_write_embedded(priv, 0x88, 0x08);
- bySIFS = C_SIFS_BG;
+ sifs = C_SIFS_BG;
if (priv->short_slot_time) {
- bySlot = C_SLOT_SHORT;
- byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
+ slot = C_SLOT_SHORT;
+ difs = C_SIFS_BG + 2 * C_SLOT_SHORT;
} else {
- bySlot = C_SLOT_LONG;
- byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
+ slot = C_SLOT_LONG;
+ difs = C_SIFS_BG + 2 * C_SLOT_LONG;
}
- byCWMaxMin = 0xa4;
+ cw_max_min = 0xa4;
for (i = RATE_54M; i >= RATE_6M; i--) {
if (priv->basic_rates & ((u32)(0x1 << i))) {
- byCWMaxMin |= 0x1;
+ cw_max_min |= 0x1;
break;
}
}
}
- if (priv->byRFType == RF_RFMD2959) {
+ if (priv->rf_type == RF_RFMD2959) {
/*
* bcs TX_PE will reserve 3 us hardware's processing
* time here is 2 us.
*/
- bySIFS -= 3;
- byDIFS -= 3;
+ sifs -= 3;
+ difs -= 3;
/*
* TX_PE will reserve 3 us for MAX2829 A mode only, it is for
* better TX throughput; MAC will need 2 us to process, so the
@@ -242,32 +242,32 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
*/
}
- if (priv->bySIFS != bySIFS) {
- priv->bySIFS = bySIFS;
- iowrite8(priv->bySIFS, priv->port_offset + MAC_REG_SIFS);
+ if (priv->sifs != sifs) {
+ priv->sifs = sifs;
+ iowrite8(priv->sifs, priv->port_offset + MAC_REG_SIFS);
}
- if (priv->byDIFS != byDIFS) {
- priv->byDIFS = byDIFS;
- iowrite8(priv->byDIFS, priv->port_offset + MAC_REG_DIFS);
+ if (priv->difs != difs) {
+ priv->difs = difs;
+ iowrite8(priv->difs, priv->port_offset + MAC_REG_DIFS);
}
- if (priv->byEIFS != C_EIFS) {
- priv->byEIFS = C_EIFS;
- iowrite8(priv->byEIFS, priv->port_offset + MAC_REG_EIFS);
+ if (priv->eifs != C_EIFS) {
+ priv->eifs = C_EIFS;
+ iowrite8(priv->eifs, priv->port_offset + MAC_REG_EIFS);
}
- if (priv->bySlot != bySlot) {
- priv->bySlot = bySlot;
- iowrite8(priv->bySlot, priv->port_offset + MAC_REG_SLOT);
+ if (priv->slot != slot) {
+ priv->slot = slot;
+ iowrite8(priv->slot, priv->port_offset + MAC_REG_SLOT);
bb_set_short_slot_time(priv);
}
- if (priv->byCWMaxMin != byCWMaxMin) {
- priv->byCWMaxMin = byCWMaxMin;
- iowrite8(priv->byCWMaxMin, priv->port_offset + MAC_REG_CWMAXMIN0);
+ if (priv->cw_max_min != cw_max_min) {
+ priv->cw_max_min = cw_max_min;
+ iowrite8(priv->cw_max_min, priv->port_offset + MAC_REG_CWMAXMIN0);
}
- priv->byPacketType = CARDbyGetPktType(priv);
+ priv->packet_type = card_get_pkt_type(priv);
- CARDvSetRSPINF(priv, bb_type);
+ card_set_rspinf(priv, bb_type);
return true;
}
@@ -278,8 +278,8 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
*
* Parameters:
* In:
- * priv - The adapter to be sync.
- * byRxRate - data rate of receive beacon
+ * priv - The adapter to be sync.
+ * rx_rate - data rate of receive beacon
* qwBSSTimestamp - Rx BCN's TSF
* qwLocalTSF - Local TSF
* Out:
@@ -287,7 +287,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
*
* Return Value: none
*/
-bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
+bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
u64 qwBSSTimestamp)
{
u64 local_tsf;
@@ -296,7 +296,7 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
local_tsf = vt6655_get_current_tsf(priv);
if (qwBSSTimestamp != local_tsf) {
- qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
+ qwTSFOffset = CARDqGetTSFOffset(rx_rate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
qwTSFOffset = le64_to_cpu(qwTSFOffset);
@@ -356,7 +356,7 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
if (priv->radio_off)
return;
- switch (priv->byRFType) {
+ switch (priv->rf_type) {
case RF_RFMD2959:
vt6655_mac_word_reg_bits_off(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_TXPEINV);
@@ -536,7 +536,7 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *priv,
*
* Return Value: None.
*/
-void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
+void card_set_rspinf(struct vnt_private *priv, u8 bb_type)
{
union vnt_phy_field_swap phy;
unsigned char byTxRate, byRsvTime; /* For OFDM */
@@ -585,58 +585,58 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
iowrite32(phy.field_write, priv->port_offset + MAC_REG_RSPINF_B_11);
/* RSPINF_a_6 */
- s_vCalculateOFDMRParameter(RATE_6M,
+ calculate_ofdmr_parameter(RATE_6M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6);
/* RSPINF_a_9 */
- s_vCalculateOFDMRParameter(RATE_9M,
+ calculate_ofdmr_parameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9);
/* RSPINF_a_12 */
- s_vCalculateOFDMRParameter(RATE_12M,
+ calculate_ofdmr_parameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12);
/* RSPINF_a_18 */
- s_vCalculateOFDMRParameter(RATE_18M,
+ calculate_ofdmr_parameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18);
/* RSPINF_a_24 */
- s_vCalculateOFDMRParameter(RATE_24M,
+ calculate_ofdmr_parameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24);
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
+ calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36);
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
+ calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48);
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
+ calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54);
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
+ calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
@@ -684,7 +684,7 @@ bool CARDbIsOFDMinBasicRate(struct vnt_private *priv)
return false;
}
-unsigned char CARDbyGetPktType(struct vnt_private *priv)
+unsigned char card_get_pkt_type(struct vnt_private *priv)
{
if (priv->byBBType == BB_TYPE_11A || priv->byBBType == BB_TYPE_11B)
return (unsigned char)priv->byBBType;
@@ -708,11 +708,11 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
*
* Return Value: TSF Offset value
*/
-u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
+u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2)
{
unsigned short wRxBcnTSFOffst;
- wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate % MAX_RATE];
+ wRxBcnTSFOffst = rx_bcn_tsf_off[rx_rate % MAX_RATE];
qwTSF2 += (u64)wRxBcnTSFOffst;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 22dc359a6565..19689a291f5b 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -39,7 +39,7 @@
struct vnt_private;
-void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
+void card_set_rspinf(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
@@ -48,13 +48,13 @@ void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval);
u64 vt6655_get_current_tsf(struct vnt_private *priv);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
-u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
-unsigned char CARDbyGetPktType(struct vnt_private *priv);
+u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2);
+unsigned char card_get_pkt_type(struct vnt_private *priv);
void CARDvSafeResetTx(struct vnt_private *priv);
void CARDvSafeResetRx(struct vnt_private *priv);
void CARDbRadioPowerOff(struct vnt_private *priv);
-bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
-bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
+bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type);
+bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
u64 qwBSSTimestamp);
bool CARDbSetBeaconPeriod(struct vnt_private *priv,
unsigned short wBeaconInterval);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 4122875ebcaa..771c1364b0f0 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -82,15 +82,15 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
{
bool ret = true;
- if (priv->byCurrentCh == ch->hw_value)
+ if (priv->current_ch == ch->hw_value)
return ret;
/* Set VGA to max sensitivity */
- if (priv->bUpdateBBVGA &&
- priv->byBBVGACurrent != priv->abyBBVGA[0]) {
- priv->byBBVGACurrent = priv->abyBBVGA[0];
+ if (priv->update_bbvga &&
+ priv->bbvga_current != priv->bbvga[0]) {
+ priv->bbvga_current = priv->bbvga[0];
- bb_set_vga_gain_offset(priv, priv->byBBVGACurrent);
+ bb_set_vga_gain_offset(priv, priv->bbvga_current);
}
/* clear NAV */
@@ -100,13 +100,13 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
* it is for better TX throughput
*/
- priv->byCurrentCh = ch->hw_value;
- ret &= RFbSelectChannel(priv, priv->byRFType,
+ priv->current_ch = ch->hw_value;
+ ret &= RFbSelectChannel(priv, priv->rf_type,
ch->hw_value);
/* Init Synthesizer Table */
if (priv->bEnablePSMode)
- rf_write_wake_prog_syn(priv, priv->byRFType, ch->hw_value);
+ rf_write_wake_prog_syn(priv, priv->rf_type, ch->hw_value);
bb_software_reset(priv);
@@ -117,19 +117,19 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
/* set HW default power register */
VT6655_MAC_SELECT_PAGE1(priv->port_offset);
- RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
- iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
- RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
- iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
+ RFbSetPower(priv, RATE_1M, priv->current_ch);
+ iowrite8(priv->cur_pwr, priv->port_offset + MAC_REG_PWRCCK);
+ RFbSetPower(priv, RATE_6M, priv->current_ch);
+ iowrite8(priv->cur_pwr, priv->port_offset + MAC_REG_PWROFDM);
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
if (priv->byBBType == BB_TYPE_11B)
- RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
+ RFbSetPower(priv, RATE_1M, priv->current_ch);
else
- RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
+ RFbSetPower(priv, RATE_6M, priv->current_ch);
return ret;
}
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index c272a4ab2fa0..d9ee0b7401fe 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -153,7 +153,7 @@ struct vnt_private {
/* Version control */
unsigned char local_id;
- unsigned char byRFType;
+ unsigned char rf_type;
unsigned char max_pwr_level;
unsigned char byZoneType;
@@ -181,14 +181,14 @@ struct vnt_private {
unsigned int uCwMin; /* Current CwMin */
unsigned int uCwMax; /* CwMax is fixed on 1023. */
/* PHY parameter */
- unsigned char bySIFS;
- unsigned char byDIFS;
- unsigned char byEIFS;
- unsigned char bySlot;
- unsigned char byCWMaxMin;
+ unsigned char sifs;
+ unsigned char difs;
+ unsigned char eifs;
+ unsigned char slot;
+ unsigned char cw_max_min;
u8 byBBType; /* 0:11A, 1:11B, 2:11G */
- u8 byPacketType; /*
+ u8 packet_type; /*
* 0:11a,1:11b,2:11gb (only CCK
* in BasicRate), 3:11ga (OFDM in
* Basic Rate)
@@ -239,21 +239,21 @@ struct vnt_private {
bool bIsBeaconBufReadySet;
unsigned int cbBeaconBufReadySetCnt;
bool bFixRate;
- u16 byCurrentCh;
+ u16 current_ch;
bool bAES;
unsigned char byAutoFBCtrl;
/* For Update BaseBand VGA Gain Offset */
- bool bUpdateBBVGA;
+ bool update_bbvga;
unsigned int uBBVGADiffCount;
- unsigned char byBBVGANew;
- unsigned char byBBVGACurrent;
- unsigned char abyBBVGA[BB_VGA_LEVEL];
+ unsigned char bbvga_new;
+ unsigned char bbvga_current;
+ unsigned char bbvga[BB_VGA_LEVEL];
long dbm_threshold[BB_VGA_LEVEL];
- unsigned char byBBPreEDRSSI;
+ unsigned char bb_pre_edrssi;
unsigned char byBBPreEDIndex;
unsigned long dwDiagRefCount;
@@ -264,7 +264,7 @@ struct vnt_private {
/* For RF Power table */
unsigned char byCCKPwr;
unsigned char byOFDMPwrG;
- unsigned char byCurPwr;
+ unsigned char cur_pwr;
char byCurPwrdBm;
unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1];
unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1];
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 45e5eccadb44..7d297526e653 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -177,9 +177,9 @@ device_set_options(struct vnt_private *priv)
priv->byShortRetryLimit = priv->opts.short_retry;
priv->byLongRetryLimit = priv->opts.long_retry;
priv->byBBType = priv->opts.bbp_type;
- priv->byPacketType = priv->byBBType;
+ priv->packet_type = priv->byBBType;
priv->byAutoFBCtrl = AUTO_FB_0;
- priv->bUpdateBBVGA = true;
+ priv->update_bbvga = true;
priv->preamble_type = 0;
pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
@@ -359,7 +359,7 @@ static void device_init_registers(struct vnt_private *priv)
RFbInit(priv);
/* Get Desire Power Value */
- priv->byCurPwr = 0xFF;
+ priv->cur_pwr = 0xFF;
priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_PWR_OFDMG);
@@ -423,10 +423,10 @@ static void device_init_registers(struct vnt_private *priv)
/* initialize BBP registers */
bb_vt3253_init(priv);
- if (priv->bUpdateBBVGA) {
- priv->byBBVGACurrent = priv->abyBBVGA[0];
- priv->byBBVGANew = priv->byBBVGACurrent;
- bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
+ if (priv->update_bbvga) {
+ priv->bbvga_current = priv->bbvga[0];
+ priv->bbvga_new = priv->bbvga_current;
+ bb_set_vga_gain_offset(priv, priv->bbvga[0]);
}
bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
@@ -1040,7 +1040,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
long dbm;
int i;
- if (!priv->bUpdateBBVGA)
+ if (!priv->update_bbvga)
return;
if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
@@ -1053,12 +1053,12 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
for (i = 0; i < BB_VGA_LEVEL; i++) {
if (dbm < priv->dbm_threshold[i]) {
- priv->byBBVGANew = priv->abyBBVGA[i];
+ priv->bbvga_new = priv->bbvga[i];
break;
}
}
- if (priv->byBBVGANew == priv->byBBVGACurrent) {
+ if (priv->bbvga_new == priv->bbvga_current) {
priv->uBBVGADiffCount = 1;
return;
}
@@ -1067,23 +1067,23 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->uBBVGADiffCount == 1) {
/* first VGA diff gain */
- bb_set_vga_gain_offset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->bbvga_new);
dev_dbg(&priv->pcid->dev,
"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
- (int)dbm, priv->byBBVGANew,
- priv->byBBVGACurrent,
+ (int)dbm, priv->bbvga_new,
+ priv->bbvga_current,
(int)priv->uBBVGADiffCount);
}
if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
dev_dbg(&priv->pcid->dev,
"RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
- (int)dbm, priv->byBBVGANew,
- priv->byBBVGACurrent,
+ (int)dbm, priv->bbvga_new,
+ priv->bbvga_current,
(int)priv->uBBVGADiffCount);
- bb_set_vga_gain_offset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->bbvga_new);
}
}
@@ -1442,7 +1442,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
if (priv->byBBType != bb_type) {
priv->byBBType = bb_type;
- CARDbSetPhyParameter(priv, priv->byBBType);
+ card_set_phy_parameter(priv, priv->byBBType);
}
}
@@ -1509,8 +1509,8 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
else
priv->short_slot_time = false;
- CARDbSetPhyParameter(priv, priv->byBBType);
- bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
+ card_set_phy_parameter(priv, priv->byBBType);
+ bb_set_vga_gain_offset(priv, priv->bbvga[0]);
}
if (changed & BSS_CHANGED_TXPOWER)
@@ -1534,7 +1534,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
if (vif->cfg.assoc && conf->beacon_rate) {
- CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
+ card_update_tsf(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
CARDbSetBeaconPeriod(priv, conf->beacon_int);
@@ -1792,10 +1792,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
/* Get RFType */
- priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
- priv->byRFType &= RF_MASK;
+ priv->rf_type = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
+ priv->rf_type &= RF_MASK;
- dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
+ dev_dbg(&pcid->dev, "RF Type = %x\n", priv->rf_type);
device_get_options(priv);
device_set_options(priv);
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index c6ed3537f439..7ada188e2048 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -79,7 +79,7 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
RFvRSSITodBm(priv, *rssi, &rx_dbm);
- priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
+ priv->bb_pre_edrssi = (u8)rx_dbm + 1;
priv->current_rssi = *rssi;
skb_pull(skb, 4);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 1fadc2fc4412..d319ec21c97b 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -268,7 +268,7 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
* Parameters:
* In:
* byBBType
- * byRFType
+ * rf_type
* Out:
* none
*
@@ -279,7 +279,7 @@ bool RFbInit(struct vnt_private *priv)
{
bool ret = true;
- switch (priv->byRFType) {
+ switch (priv->rf_type) {
case RF_AIROHA:
case RF_AL2230S:
priv->max_pwr_level = AL2230_PWR_IDX_LEN;
@@ -300,7 +300,7 @@ bool RFbInit(struct vnt_private *priv)
*
* Parameters:
* In:
- * byRFType
+ * rf_type
* byChannel - Channel number
* Out:
* none
@@ -308,12 +308,12 @@ bool RFbInit(struct vnt_private *priv)
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char rf_type,
u16 byChannel)
{
bool ret = true;
- switch (byRFType) {
+ switch (rf_type) {
case RF_AIROHA:
case RF_AL2230S:
ret = RFbAL2230SelectChannel(priv, byChannel);
@@ -438,12 +438,12 @@ bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
break;
}
- if (priv->byCurPwr == byPwr)
+ if (priv->cur_pwr == byPwr)
return true;
ret = RFbRawSetPower(priv, byPwr, rate);
if (ret)
- priv->byCurPwr = byPwr;
+ priv->cur_pwr = byPwr;
return ret;
}
@@ -470,7 +470,7 @@ bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
if (byPwr >= priv->max_pwr_level)
return false;
- switch (priv->byRFType) {
+ switch (priv->rf_type) {
case RF_AIROHA:
ret &= IFRFbWriteEmbedded(priv, al2230_power_table[byPwr]);
if (rate <= RATE_11M)
@@ -521,7 +521,7 @@ RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI, long *pldBm)
long a = 0;
unsigned char abyAIROHARF[4] = {0, 18, 0, 40};
- switch (priv->byRFType) {
+ switch (priv->rf_type) {
case RF_AIROHA:
case RF_AL2230S:
a = abyAIROHARF[byIdx];
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 9fef81846a9f..6f842ac00526 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -58,7 +58,7 @@
/*--------------------- Export Functions --------------------------*/
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
-bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char rf_type, u16 byChannel);
bool RFbInit(struct vnt_private *priv);
bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 3e8c92675c82..35650f911ebc 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3194,7 +3194,7 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
/* Was there an error? */
if (HFA384x_TXSTATUS_ISERROR(status))
- prism2sta_ev_txexc(wlandev, status);
+ netdev_dbg(wlandev->netdev, "TxExc status=0x%x.\n", status);
else
prism2sta_ev_tx(wlandev, status);
}
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 048e1c3fe19b..8336435eccc2 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -312,8 +312,8 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
payload_length - 4);
if (foo) {
/* de-wep failed, drop skb. */
- pr_debug("Host de-WEP failed, dropping frame (%d).\n",
- foo);
+ netdev_dbg(netdev, "Host de-WEP failed, dropping frame (%d).\n",
+ foo);
wlandev->rx.decrypt_err++;
return 2;
}
@@ -340,7 +340,7 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
(e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) &&
((!ether_addr_equal_unaligned(daddr, e_hdr->daddr)) ||
(!ether_addr_equal_unaligned(saddr, e_hdr->saddr)))) {
- pr_debug("802.3 ENCAP len: %d\n", payload_length);
+ netdev_dbg(netdev, "802.3 ENCAP len: %d\n", payload_length);
/* 802.3 Encapsulated */
/* Test for an overlength frame */
if (payload_length > (netdev->mtu + ETH_HLEN)) {
@@ -367,7 +367,7 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
(p80211_stt_findproto(be16_to_cpu(e_snap->type)))) ||
(memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
0))) {
- pr_debug("SNAP+RFC1042 len: %d\n", payload_length);
+ netdev_dbg(netdev, "SNAP+RFC1042 len: %d\n", payload_length);
/* it's a SNAP + RFC1042 frame && protocol is in STT */
/* build 802.3 + RFC1042 */
@@ -397,7 +397,7 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
(e_llc->dsap == 0xaa) &&
(e_llc->ssap == 0xaa) &&
(e_llc->ctl == 0x03)) {
- pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
+ netdev_dbg(netdev, "802.1h/RFC1042 len: %d\n", payload_length);
/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
* build a DIXII + RFC894
*/
@@ -433,7 +433,7 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else {
- pr_debug("NON-ENCAP len: %d\n", payload_length);
+ netdev_dbg(netdev, "NON-ENCAP len: %d\n", payload_length);
/* any NON-ENCAP */
/* it's a generic 80211+LLC or IPX 'Raw 802.3' */
/* build an 802.3 frame */
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index f5186380b629..485f2c697f5f 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -114,9 +114,6 @@ struct p80211_frmrx {
u32 decrypt_err;
};
-/* called by /proc/net/wireless */
-struct iw_statistics *p80211wext_get_wireless_stats(struct net_device *dev);
-
/* WEP stuff */
#define NUM_WEPKEYS 4
#define MAX_KEYLEN 32
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 5d03b2b9aab4..3ccd11041646 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -725,7 +725,7 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
if (j == -1) { /* plug the filename */
memset(dest, 0, s3plug[i].len);
- strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
+ strscpy(dest, PRISM2_USB_FWFILE, s3plug[i].len);
} else { /* plug a PDR */
memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 083a055ee986..17222516e85e 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -45,9 +45,7 @@ extern int prism2_reset_settletime;
u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
-void prism2sta_ev_info(struct wlandevice *wlandev,
- struct hfa384x_inf_frame *inf);
-void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
+void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_inf_frame *inf);
void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
void prism2sta_ev_alloc(struct wlandevice *wlandev);
@@ -78,14 +76,6 @@ void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
struct p80211pstrd *pstr);
-/* functions to convert Group Addresses */
-void prism2mgmt_get_grpaddr(u32 did, struct p80211pstrd *pstr,
- struct hfa384x *priv);
-int prism2mgmt_set_grpaddr(u32 did,
- u8 *prism2buf, struct p80211pstrd *pstr,
- struct hfa384x *priv);
-int prism2mgmt_get_grpaddr_index(u32 did);
-
void prism2sta_processing_defer(struct work_struct *data);
void prism2sta_commsqual_defer(struct work_struct *data);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 57180bb71699..cb6c7a9fb8f3 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -90,9 +90,6 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev);
static int prism2sta_globalsetup(struct wlandevice *wlandev);
static int prism2sta_setmulticast(struct wlandevice *wlandev,
struct net_device *dev);
-
-static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_inf_frame *inf);
static void prism2sta_inf_tallies(struct wlandevice *wlandev,
struct hfa384x_inf_frame *inf);
static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
@@ -268,46 +265,50 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev,
switch (msg->msgcode) {
case DIDMSG_DOT11REQ_MIBGET:
- pr_debug("Received mibget request\n");
+ netdev_dbg(wlandev->netdev, "Received mibget request\n");
result = prism2mgmt_mibset_mibget(wlandev, msg);
break;
case DIDMSG_DOT11REQ_MIBSET:
- pr_debug("Received mibset request\n");
+ netdev_dbg(wlandev->netdev, "Received mibset request\n");
result = prism2mgmt_mibset_mibget(wlandev, msg);
break;
case DIDMSG_DOT11REQ_SCAN:
- pr_debug("Received scan request\n");
+ netdev_dbg(wlandev->netdev, "Received scan request\n");
result = prism2mgmt_scan(wlandev, msg);
break;
case DIDMSG_DOT11REQ_SCAN_RESULTS:
- pr_debug("Received scan_results request\n");
+ netdev_dbg(wlandev->netdev, "Received scan_results request\n");
result = prism2mgmt_scan_results(wlandev, msg);
break;
case DIDMSG_DOT11REQ_START:
- pr_debug("Received mlme start request\n");
+ netdev_dbg(wlandev->netdev, "Received mlme start request\n");
result = prism2mgmt_start(wlandev, msg);
break;
/*
* Prism2 specific messages
*/
case DIDMSG_P2REQ_READPDA:
- pr_debug("Received mlme readpda request\n");
+ netdev_dbg(wlandev->netdev, "Received mlme readpda request\n");
result = prism2mgmt_readpda(wlandev, msg);
break;
case DIDMSG_P2REQ_RAMDL_STATE:
- pr_debug("Received mlme ramdl_state request\n");
+ netdev_dbg(wlandev->netdev,
+ "Received mlme ramdl_state request\n");
result = prism2mgmt_ramdl_state(wlandev, msg);
break;
case DIDMSG_P2REQ_RAMDL_WRITE:
- pr_debug("Received mlme ramdl_write request\n");
+ netdev_dbg(wlandev->netdev,
+ "Received mlme ramdl_write request\n");
result = prism2mgmt_ramdl_write(wlandev, msg);
break;
case DIDMSG_P2REQ_FLASHDL_STATE:
- pr_debug("Received mlme flashdl_state request\n");
+ netdev_dbg(wlandev->netdev,
+ "Received mlme flashdl_state request\n");
result = prism2mgmt_flashdl_state(wlandev, msg);
break;
case DIDMSG_P2REQ_FLASHDL_WRITE:
- pr_debug("Received mlme flashdl_write request\n");
+ netdev_dbg(wlandev->netdev,
+ "Received mlme flashdl_write request\n");
result = prism2mgmt_flashdl_write(wlandev, msg);
break;
/*
@@ -318,7 +319,7 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev,
case DIDMSG_LNXREQ_IFSTATE: {
struct p80211msg_lnxreq_ifstate *ifstatemsg;
- pr_debug("Received mlme ifstate request\n");
+ netdev_dbg(wlandev->netdev, "Received mlme ifstate request\n");
ifstatemsg = (struct p80211msg_lnxreq_ifstate *)msg;
result = prism2sta_ifstate(wlandev,
ifstatemsg->ifstate.data);
@@ -329,17 +330,18 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev,
break;
}
case DIDMSG_LNXREQ_WLANSNIFF:
- pr_debug("Received mlme wlansniff request\n");
+ netdev_dbg(wlandev->netdev,
+ "Received mlme wlansniff request\n");
result = prism2mgmt_wlansniff(wlandev, msg);
break;
case DIDMSG_LNXREQ_AUTOJOIN:
- pr_debug("Received mlme autojoin request\n");
+ netdev_dbg(wlandev->netdev, "Received mlme autojoin request\n");
result = prism2mgmt_autojoin(wlandev, msg);
break;
case DIDMSG_LNXREQ_COMMSQUALITY: {
struct p80211msg_lnxreq_commsquality *qualmsg;
- pr_debug("Received commsquality request\n");
+ netdev_dbg(wlandev->netdev, "Received commsquality request\n");
qualmsg = (struct p80211msg_lnxreq_commsquality *)msg;
@@ -392,8 +394,8 @@ u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate)
result = P80211ENUM_resultcode_implementation_failure;
- pr_debug("Current MSD state(%d), requesting(%d)\n",
- wlandev->msdstate, ifstate);
+ netdev_dbg(wlandev->netdev, "Current MSD state(%d), requesting(%d)\n",
+ wlandev->msdstate, ifstate);
switch (ifstate) {
case P80211ENUM_ifstate_fwload:
switch (wlandev->msdstate) {
@@ -923,30 +925,6 @@ exit:
}
/*
- * prism2sta_inf_handover
- *
- * Handles the receipt of a Handover info frame. Should only be present
- * in APs only.
- *
- * Arguments:
- * wlandev wlan device structure
- * inf ptr to info frame (contents in hfa384x order)
- *
- * Returns:
- * nothing
- *
- * Side effects:
- *
- * Call context:
- * interrupt
- */
-static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_inf_frame *inf)
-{
- pr_debug("received infoframe:HANDOVER (unhandled)\n");
-}
-
-/*
* prism2sta_inf_tallies
*
* Handles the receipt of a CommTallies info frame.
@@ -1025,15 +1003,15 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
nbss /= sizeof(struct hfa384x_scan_result_sub);
/* Print em */
- pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
- inf->info.scanresult.scanreason, nbss);
+ netdev_dbg(wlandev->netdev, "rx scanresults, reason=%d, nbss=%d:\n",
+ inf->info.scanresult.scanreason, nbss);
for (i = 0; i < nbss; i++) {
- pr_debug("chid=%d anl=%d sl=%d bcnint=%d\n",
- sr->result[i].chid,
- sr->result[i].anl,
- sr->result[i].sl, sr->result[i].bcnint);
- pr_debug(" capinfo=0x%04x proberesp_rate=%d\n",
- sr->result[i].capinfo, sr->result[i].proberesp_rate);
+ netdev_dbg(wlandev->netdev, "chid=%d anl=%d sl=%d bcnint=%d\n",
+ sr->result[i].chid, sr->result[i].anl,
+ sr->result[i].sl, sr->result[i].bcnint);
+ netdev_dbg(wlandev->netdev,
+ " capinfo=0x%04x proberesp_rate=%d\n",
+ sr->result[i].capinfo, sr->result[i].proberesp_rate);
}
/* issue a join request */
joinreq.channel = sr->result[0].chid;
@@ -1071,7 +1049,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
int nbss;
nbss = (inf->framelen - 3) / 32;
- pr_debug("Received %d hostscan results\n", nbss);
+ netdev_dbg(wlandev->netdev, "Received %d hostscan results\n", nbss);
if (nbss > 32)
nbss = 32;
@@ -1134,13 +1112,14 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
chinforesult->pnl = result->pnl;
chinforesult->active = result->active;
- pr_debug("chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n",
- chan + 1,
- (chinforesult->active & HFA384x_CHINFORESULT_BSSACTIVE)
- ? "signal" : "noise",
- chinforesult->anl, chinforesult->pnl,
- (chinforesult->active & HFA384x_CHINFORESULT_PCFACTIVE)
- ? 1 : 0);
+ netdev_dbg(wlandev->netdev,
+ "chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n",
+ chan + 1,
+ (chinforesult->active & HFA384x_CHINFORESULT_BSSACTIVE) ?
+ "signal" : "noise",
+ chinforesult->anl,
+ chinforesult->pnl,
+ (chinforesult->active & HFA384x_CHINFORESULT_PCFACTIVE) ? 1 : 0);
n++;
}
atomic_set(&hw->channel_info.done, 2);
@@ -1219,9 +1198,9 @@ void prism2sta_processing_defer(struct work_struct *data)
wlandev->bssid,
WLAN_BSSID_LEN);
if (result) {
- pr_debug
- ("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTBSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTBSSID, result);
return;
}
@@ -1229,9 +1208,9 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
- pr_debug
- ("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
@@ -1242,9 +1221,9 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_PORTSTATUS,
&portstatus);
if (result) {
- pr_debug
- ("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_PORTSTATUS, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_PORTSTATUS, result);
return;
}
wlandev->macmode =
@@ -1302,8 +1281,9 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTBSSID,
wlandev->bssid, WLAN_BSSID_LEN);
if (result) {
- pr_debug("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTBSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTBSSID, result);
return;
}
@@ -1311,8 +1291,9 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
- pr_debug("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
@@ -1724,7 +1705,8 @@ void prism2sta_ev_info(struct wlandevice *wlandev,
/* Dispatch */
switch (inf->infotype) {
case HFA384x_IT_HANDOVERADDR:
- prism2sta_inf_handover(wlandev, inf);
+ netdev_dbg(wlandev->netdev,
+ "received infoframe:HANDOVER (unhandled)\n");
break;
case HFA384x_IT_COMMTALLIES:
prism2sta_inf_tallies(wlandev, inf);
@@ -1767,30 +1749,6 @@ void prism2sta_ev_info(struct wlandevice *wlandev,
}
/*
- * prism2sta_ev_txexc
- *
- * Handles the TxExc event. A Transmit Exception event indicates
- * that the MAC's TX process was unsuccessful - so the packet did
- * not get transmitted.
- *
- * Arguments:
- * wlandev wlan device structure
- * status tx frame status word
- *
- * Returns:
- * nothing
- *
- * Side effects:
- *
- * Call context:
- * interrupt
- */
-void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status)
-{
- pr_debug("TxExc status=0x%x.\n", status);
-}
-
-/*
* prism2sta_ev_tx
*
* Handles the Tx event.
@@ -1808,7 +1766,7 @@ void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status)
*/
void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status)
{
- pr_debug("Tx Complete, status=0x%04x\n", status);
+ netdev_dbg(wlandev->netdev, "Tx Complete, status=0x%04x\n", status);
/* update linux network stats */
wlandev->netdev->stats.tx_packets++;
}
@@ -1917,10 +1875,10 @@ void prism2sta_commsqual_defer(struct work_struct *data)
return;
}
- pr_debug("commsqual %d %d %d\n",
- le16_to_cpu(hw->qual.cq_curr_bss),
- le16_to_cpu(hw->qual.asl_curr_bss),
- le16_to_cpu(hw->qual.anl_curr_fc));
+ netdev_dbg(wlandev->netdev, "commsqual %d %d %d\n",
+ le16_to_cpu(hw->qual.cq_curr_bss),
+ le16_to_cpu(hw->qual.asl_curr_bss),
+ le16_to_cpu(hw->qual.anl_curr_fc));
}
/* Get the signal rate */
@@ -1929,8 +1887,8 @@ void prism2sta_commsqual_defer(struct work_struct *data)
result = p80211req_dorequest(wlandev, (u8 *)&msg);
if (result) {
- pr_debug("get signal rate failed, result = %d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "get signal rate failed, result = %d\n", result);
return;
}
@@ -1948,7 +1906,8 @@ void prism2sta_commsqual_defer(struct work_struct *data)
hw->txrate = 110;
break;
default:
- pr_debug("Bad ratebit (%d)\n", mibitem->data);
+ netdev_dbg(wlandev->netdev, "Bad ratebit (%d)\n",
+ mibitem->data);
}
/* Lastly, we need to make sure the BSSID didn't change on us */
@@ -1956,8 +1915,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
HFA384x_RID_CURRENTBSSID,
wlandev->bssid, WLAN_BSSID_LEN);
if (result) {
- pr_debug("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTBSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTBSSID, result);
return;
}
@@ -1965,8 +1925,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID,
&ssid, sizeof(ssid));
if (result) {
- pr_debug("getconfig(0x%02x) failed, result = %d\n",
- HFA384x_RID_CURRENTSSID, result);
+ netdev_dbg(wlandev->netdev,
+ "getconfig(0x%02x) failed, result = %d\n",
+ HFA384x_RID_CURRENTSSID, result);
return;
}
prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0eb342de0b00..5ddfd5d9ac7f 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -40,7 +40,10 @@ static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
static DEFINE_SPINLOCK(driver_lock);
-static struct class *tee_class;
+static const struct class tee_class = {
+ .name = "tee",
+};
+
static dev_t tee_devt;
struct tee_context *teedev_open(struct tee_device *teedev)
@@ -919,7 +922,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
teedev->id - offs);
- teedev->dev.class = tee_class;
+ teedev->dev.class = &tee_class;
teedev->dev.release = tee_release_device;
teedev->dev.parent = dev;
@@ -1112,7 +1115,7 @@ tee_client_open_context(struct tee_context *start,
dev = &start->teedev->dev;
do {
- dev = class_find_device(tee_class, dev, &match_data, match_dev);
+ dev = class_find_device(&tee_class, dev, &match_data, match_dev);
if (!dev) {
ctx = ERR_PTR(-ENOENT);
break;
@@ -1226,10 +1229,10 @@ static int __init tee_init(void)
{
int rc;
- tee_class = class_create("tee");
- if (IS_ERR(tee_class)) {
+ rc = class_register(&tee_class);
+ if (rc) {
pr_err("couldn't create class\n");
- return PTR_ERR(tee_class);
+ return rc;
}
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
@@ -1249,8 +1252,7 @@ static int __init tee_init(void)
out_unreg_chrdev:
unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
out_unreg_class:
- class_destroy(tee_class);
- tee_class = NULL;
+ class_unregister(&tee_class);
return rc;
}
@@ -1259,8 +1261,7 @@ static void __exit tee_exit(void)
{
bus_unregister(&tee_bus_type);
unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
- class_destroy(tee_class);
- tee_class = NULL;
+ class_unregister(&tee_class);
}
subsys_initcall(tee_init);
diff --git a/drivers/thunderbolt/clx.c b/drivers/thunderbolt/clx.c
index 13d217ae98e6..787dfd1550e5 100644
--- a/drivers/thunderbolt/clx.c
+++ b/drivers/thunderbolt/clx.c
@@ -175,6 +175,28 @@ bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
}
/**
+ * tb_switch_clx_is_supported() - Is CLx supported on this type of router
+ * @sw: The router to check CLx support for
+ */
+static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
+{
+ if (!clx_enabled)
+ return false;
+
+ if (sw->quirks & QUIRK_NO_CLX)
+ return false;
+
+ /*
+ * CLx is not enabled and validated on Intel USB4 platforms
+ * before Alder Lake.
+ */
+ if (tb_switch_is_tiger_lake(sw))
+ return false;
+
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+/**
* tb_switch_clx_init() - Initialize router CL states
* @sw: Router
*
@@ -273,28 +295,6 @@ static int tb_switch_mask_clx_objections(struct tb_switch *sw)
sw->cap_lp + offset, ARRAY_SIZE(val));
}
-/**
- * tb_switch_clx_is_supported() - Is CLx supported on this type of router
- * @sw: The router to check CLx support for
- */
-bool tb_switch_clx_is_supported(const struct tb_switch *sw)
-{
- if (!clx_enabled)
- return false;
-
- if (sw->quirks & QUIRK_NO_CLX)
- return false;
-
- /*
- * CLx is not enabled and validated on Intel USB4 platforms
- * before Alder Lake.
- */
- if (tb_switch_is_tiger_lake(sw))
- return false;
-
- return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
-}
-
static bool validate_mask(unsigned int clx)
{
/* Previous states need to be enabled */
@@ -405,6 +405,9 @@ int tb_switch_clx_disable(struct tb_switch *sw)
if (!clx)
return 0;
+ if (sw->is_unplugged)
+ return clx;
+
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
index 39476fc48801..9e47a63f28e7 100644
--- a/drivers/thunderbolt/dma_test.c
+++ b/drivers/thunderbolt/dma_test.c
@@ -101,7 +101,7 @@ struct dma_test {
unsigned int packets_sent;
unsigned int packets_received;
unsigned int link_speed;
- unsigned int link_width;
+ enum tb_link_width link_width;
unsigned int crc_errors;
unsigned int buffer_overflow_errors;
enum dma_test_result result;
@@ -465,9 +465,9 @@ DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
static int dma_test_set_bonding(struct dma_test *dt)
{
switch (dt->link_width) {
- case 2:
+ case TB_LINK_WIDTH_DUAL:
return tb_xdomain_lane_bonding_enable(dt->xd);
- case 1:
+ case TB_LINK_WIDTH_SINGLE:
tb_xdomain_lane_bonding_disable(dt->xd);
fallthrough;
default:
@@ -490,12 +490,8 @@ static void dma_test_check_errors(struct dma_test *dt, int ret)
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
- } else if (dt->link_width) {
- const struct tb_xdomain *xd = dt->xd;
-
- if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
- (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
- dt->error_code = DMA_TEST_WIDTH_ERROR;
+ } else if (dt->link_width && dt->link_width != dt->xd->link_width) {
+ dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ee03fd75a472..091a81bbdbdc 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -19,9 +19,9 @@ static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop
tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
hop->in_hop_index, regs->out_port, regs->next_hop);
- tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
- regs->weight, regs->priority,
- regs->initial_credits, regs->drop_packages);
+ tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d PM: %d\n",
+ regs->weight, regs->priority, regs->initial_credits,
+ regs->drop_packages, regs->pmps);
tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
regs->counter_enable, regs->counter);
tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
@@ -535,6 +535,7 @@ int tb_path_activate(struct tb_path *path)
hop.next_hop = path->hops[i].next_hop_index;
hop.out_port = path->hops[i].out_port->port;
hop.initial_credits = path->hops[i].initial_credits;
+ hop.pmps = path->hops[i].pm_support;
hop.unknown1 = 0;
hop.enable = 1;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 488138a28ae1..e6bfa63b40ae 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
{
struct tb_port *port;
+ if (tb_switch_is_icm(sw))
+ return;
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_usb3_down(port))
continue;
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 47becb363ada..d49d6628dbf2 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -94,6 +94,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
goto err_nvm;
rt->nvm = nvm;
+ dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
return 0;
err_nvm:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index bd5815f8f23b..1e15ffa79295 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -372,6 +372,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
+ tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor);
}
if (!sw->no_nvm_upgrade) {
@@ -915,6 +916,48 @@ int tb_port_get_link_speed(struct tb_port *port)
}
/**
+ * tb_port_get_link_generation() - Returns link generation
+ * @port: Lane adapter
+ *
+ * Returns link generation as number or negative errno in case of
+ * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
+ * links so for those always returns 2.
+ */
+int tb_port_get_link_generation(struct tb_port *port)
+{
+ int ret;
+
+ ret = tb_port_get_link_speed(port);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case 40:
+ return 4;
+ case 20:
+ return 3;
+ default:
+ return 2;
+ }
+}
+
+static const char *width_name(enum tb_link_width width)
+{
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ return "symmetric, single lane";
+ case TB_LINK_WIDTH_DUAL:
+ return "symmetric, dual lanes";
+ case TB_LINK_WIDTH_ASYM_TX:
+ return "asymmetric, 3 transmitters, 1 receiver";
+ case TB_LINK_WIDTH_ASYM_RX:
+ return "asymmetric, 3 receivers, 1 transmitter";
+ default:
+ return "unknown";
+ }
+}
+
+/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
@@ -939,8 +982,15 @@ int tb_port_get_link_width(struct tb_port *port)
LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
}
-static bool tb_port_is_width_supported(struct tb_port *port,
- unsigned int width_mask)
+/**
+ * tb_port_width_supported() - Is the given link width supported
+ * @port: Port to check
+ * @width: Widths to check (bitmask)
+ *
+ * Can be called to any lane adapter. Checks if given @width is
+ * supported by the hardware and returns %true if it is.
+ */
+bool tb_port_width_supported(struct tb_port *port, unsigned int width)
{
u32 phy, widths;
int ret;
@@ -948,20 +998,23 @@ static bool tb_port_is_width_supported(struct tb_port *port,
if (!port->cap_phy)
return false;
+ if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
+ if (tb_port_get_link_generation(port) < 4 ||
+ !usb4_port_asym_supported(port))
+ return false;
+ }
+
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
- widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
- LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
-
- return widths & width_mask;
-}
-
-static bool is_gen4_link(struct tb_port *port)
-{
- return tb_port_get_link_speed(port) > 20;
+ /*
+ * The field encoding is the same as &enum tb_link_width (which is
+ * passed to @width).
+ */
+ widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
+ return widths & width;
}
/**
@@ -991,15 +1044,23 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
switch (width) {
case TB_LINK_WIDTH_SINGLE:
/* Gen 4 link cannot be single */
- if (is_gen4_link(port))
+ if (tb_port_get_link_generation(port) >= 4)
return -EOPNOTSUPP;
val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
+
case TB_LINK_WIDTH_DUAL:
+ if (tb_port_get_link_generation(port) >= 4)
+ return usb4_port_asym_set_link_width(port, width);
val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
+
+ case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
+ return usb4_port_asym_set_link_width(port, width);
+
default:
return -EINVAL;
}
@@ -1124,7 +1185,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
/**
* tb_port_wait_for_link_width() - Wait until link reaches specific width
* @port: Port to wait for
- * @width_mask: Expected link width mask
+ * @width: Expected link width (bitmask)
* @timeout_msec: Timeout in ms how long to wait
*
* Should be used after both ends of the link have been bonded (or
@@ -1133,14 +1194,15 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
* within the given timeout, %0 if it did. Can be passed a mask of
* expected widths and succeeds if any of the widths is reached.
*/
-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
int ret;
/* Gen 4 link does not support single lane */
- if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
+ if ((width & TB_LINK_WIDTH_SINGLE) &&
+ tb_port_get_link_generation(port) >= 4)
return -EOPNOTSUPP;
do {
@@ -1153,7 +1215,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
*/
if (ret != -EACCES)
return ret;
- } else if (ret & width_mask) {
+ } else if (ret & width) {
return 0;
}
@@ -1332,7 +1394,7 @@ int tb_pci_port_enable(struct tb_port *port, bool enable)
* tb_dp_port_hpd_is_active() - Is HPD already active
* @port: DP out port to check
*
- * Checks if the DP OUT adapter port has HDP bit already set.
+ * Checks if the DP OUT adapter port has HPD bit already set.
*/
int tb_dp_port_hpd_is_active(struct tb_port *port)
{
@@ -1344,14 +1406,14 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
if (ret)
return ret;
- return !!(data & ADP_DP_CS_2_HDP);
+ return !!(data & ADP_DP_CS_2_HPD);
}
/**
* tb_dp_port_hpd_clear() - Clear HPD from DP IN port
* @port: Port to clear HPD
*
- * If the DP IN port has HDP set, this function can be used to clear it.
+ * If the DP IN port has HPD set, this function can be used to clear it.
*/
int tb_dp_port_hpd_clear(struct tb_port *port)
{
@@ -1363,7 +1425,7 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
if (ret)
return ret;
- data |= ADP_DP_CS_3_HDPC;
+ data |= ADP_DP_CS_3_HPDC;
return tb_port_write(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_3, 1);
}
@@ -2697,6 +2759,38 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
return 0;
}
+/* Must be called after tb_switch_update_link_attributes() */
+static void tb_switch_link_init(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ bool bonded;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
+ tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
+ tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
+
+ bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
+
+ /*
+ * Gen 4 links come up as bonded so update the port structures
+ * accordingly.
+ */
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ up->bonded = bonded;
+ if (up->dual_link_port)
+ up->dual_link_port->bonded = bonded;
+ tb_port_update_credits(up);
+
+ down->bonded = bonded;
+ if (down->dual_link_port)
+ down->dual_link_port->bonded = bonded;
+ tb_port_update_credits(down);
+}
+
/**
* tb_switch_lane_bonding_enable() - Enable lane bonding
* @sw: Switch to enable lane bonding
@@ -2705,24 +2799,20 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
* switch. If conditions are correct and both switches support the feature,
* lanes are bonded. It is safe to call this to any switch.
*/
-int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
struct tb_port *up, *down;
- u64 route = tb_route(sw);
- unsigned int width_mask;
+ unsigned int width;
int ret;
- if (!route)
- return 0;
-
if (!tb_switch_lane_bonding_possible(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
- if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
- !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
+ if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
+ !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
/*
@@ -2746,21 +2836,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
}
/* Any of the widths are all bonded */
- width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
- TB_LINK_WIDTH_ASYM_RX;
-
- ret = tb_port_wait_for_link_width(down, width_mask, 100);
- if (ret) {
- tb_port_warn(down, "timeout enabling lane bonding\n");
- return ret;
- }
+ width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+ TB_LINK_WIDTH_ASYM_RX;
- tb_port_update_credits(down);
- tb_port_update_credits(up);
- tb_switch_update_link_attributes(sw);
-
- tb_sw_dbg(sw, "lane bonding enabled\n");
- return ret;
+ return tb_port_wait_for_link_width(down, width, 100);
}
/**
@@ -2770,20 +2849,27 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
* Disables lane bonding between @sw and parent. This can be called even
* if lanes were not bonded originally.
*/
-void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
- if (!tb_route(sw))
- return;
-
up = tb_upstream_port(sw);
if (!up->bonded)
- return;
+ return 0;
- down = tb_switch_downstream_port(sw);
+ /*
+ * If the link is Gen 4 there is no way to switch the link to
+ * two single lane links so avoid that here. Also don't bother
+ * if the link is not up anymore (sw is unplugged).
+ */
+ ret = tb_port_get_link_generation(up);
+ if (ret < 0)
+ return ret;
+ if (ret >= 4)
+ return -EOPNOTSUPP;
+ down = tb_switch_downstream_port(sw);
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
@@ -2791,15 +2877,160 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
* It is fine if we get other errors as the router might have
* been unplugged.
*/
- ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
- if (ret == -ETIMEDOUT)
- tb_sw_warn(sw, "timeout disabling lane bonding\n");
+ return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
+}
+
+static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
+{
+ struct tb_port *up, *down, *port;
+ enum tb_link_width down_width;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ if (width == TB_LINK_WIDTH_ASYM_TX) {
+ down_width = TB_LINK_WIDTH_ASYM_RX;
+ port = down;
+ } else {
+ down_width = TB_LINK_WIDTH_ASYM_TX;
+ port = up;
+ }
+
+ ret = tb_port_set_link_width(up, width);
+ if (ret)
+ return ret;
+
+ ret = tb_port_set_link_width(down, down_width);
+ if (ret)
+ return ret;
+
+ /*
+ * Initiate the change in the router that one of its TX lanes is
+ * changing to RX but do so only if there is an actual change.
+ */
+ if (sw->link_width != width) {
+ ret = usb4_port_asym_start(port);
+ if (ret)
+ return ret;
+
+ ret = tb_port_wait_for_link_width(up, width, 100);
+ if (ret)
+ return ret;
+ }
+
+ sw->link_width = width;
+ return 0;
+}
+
+static int tb_switch_asym_disable(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
+ if (ret)
+ return ret;
+
+ ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
+ if (ret)
+ return ret;
+
+ /*
+ * Initiate the change in the router that has three TX lanes and
+ * is changing one of its TX lanes to RX but only if there is a
+ * change in the link width.
+ */
+ if (sw->link_width > TB_LINK_WIDTH_DUAL) {
+ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
+ ret = usb4_port_asym_start(up);
+ else
+ ret = usb4_port_asym_start(down);
+ if (ret)
+ return ret;
+
+ ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
+ if (ret)
+ return ret;
+ }
+
+ sw->link_width = TB_LINK_WIDTH_DUAL;
+ return 0;
+}
+
+/**
+ * tb_switch_set_link_width() - Configure router link width
+ * @sw: Router to configure
+ * @width: The new link width
+ *
+ * Set device router link width to @width from router upstream port
+ * perspective. Supports also asymmetric links if the routers boths side
+ * of the link supports it.
+ *
+ * Does nothing for host router.
+ *
+ * Returns %0 in case of success, negative errno otherwise.
+ */
+int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
+{
+ struct tb_port *up, *down;
+ int ret = 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ ret = tb_switch_lane_bonding_disable(sw);
+ break;
+
+ case TB_LINK_WIDTH_DUAL:
+ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
+ sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ ret = tb_switch_asym_disable(sw);
+ if (ret)
+ break;
+ }
+ ret = tb_switch_lane_bonding_enable(sw);
+ break;
+
+ case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
+ ret = tb_switch_asym_enable(sw, width);
+ break;
+ }
+
+ switch (ret) {
+ case 0:
+ break;
+
+ case -ETIMEDOUT:
+ tb_sw_warn(sw, "timeout changing link width\n");
+ return ret;
+
+ case -ENOTCONN:
+ case -EOPNOTSUPP:
+ case -ENODEV:
+ return ret;
+
+ default:
+ tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
+ return ret;
+ }
tb_port_update_credits(down);
tb_port_update_credits(up);
+
tb_switch_update_link_attributes(sw);
- tb_sw_dbg(sw, "lane bonding disabled\n");
+ tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
+ return ret;
}
/**
@@ -2959,6 +3190,8 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
+ tb_switch_link_init(sw);
+
ret = tb_switch_clx_init(sw);
if (ret)
return ret;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 27bd6ca6f99e..5acdeb766860 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -16,8 +16,31 @@
#include "tb_regs.h"
#include "tunnel.h"
-#define TB_TIMEOUT 100 /* ms */
-#define MAX_GROUPS 7 /* max Group_ID is 7 */
+#define TB_TIMEOUT 100 /* ms */
+
+/*
+ * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
+ * direction. This is 40G - 10% guard band bandwidth.
+ */
+#define TB_ASYM_MIN (40000 * 90 / 100)
+
+/*
+ * Threshold bandwidth (in Mb/s) that is used to switch the links to
+ * asymmetric and back. This is selected as 45G which means when the
+ * request is higher than this, we switch the link to asymmetric, and
+ * when it is less than this we switch it back. The 45G is selected so
+ * that we still have 27G (of the total 72G) for bulk PCIe traffic when
+ * switching back to symmetric.
+ */
+#define TB_ASYM_THRESHOLD 45000
+
+#define MAX_GROUPS 7 /* max Group_ID is 7 */
+
+static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
+module_param_named(asym_threshold, asym_threshold, uint, 0444);
+MODULE_PARM_DESC(asym_threshold,
+ "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
+ __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
/**
* struct tb_cm - Simple Thunderbolt connection manager
@@ -190,7 +213,7 @@ static void tb_add_dp_resources(struct tb_switch *sw)
if (!tb_switch_query_dp_resource(sw, port))
continue;
- list_add_tail(&port->list, &tcm->dp_resources);
+ list_add(&port->list, &tcm->dp_resources);
tb_port_dbg(port, "DP IN resource available\n");
}
}
@@ -255,13 +278,13 @@ static int tb_enable_clx(struct tb_switch *sw)
* this in the future to cover the whole topology if it turns
* out to be beneficial.
*/
- while (sw && sw->config.depth > 1)
+ while (sw && tb_switch_depth(sw) > 1)
sw = tb_switch_parent(sw);
if (!sw)
return 0;
- if (sw->config.depth != 1)
+ if (tb_switch_depth(sw) != 1)
return 0;
/*
@@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switch *sw)
return ret == -EOPNOTSUPP ? 0 : ret;
}
-/* Disables CL states up to the host router */
-static void tb_disable_clx(struct tb_switch *sw)
+/**
+ * tb_disable_clx() - Disable CL states up to host router
+ * @sw: Router to start
+ *
+ * Disables CL states from @sw up to the host router. Returns true if
+ * any CL state were disabled. This can be used to figure out whether
+ * the link was setup by us or the boot firmware so we don't
+ * accidentally enable them if they were not enabled during discovery.
+ */
+static bool tb_disable_clx(struct tb_switch *sw)
{
+ bool disabled = false;
+
do {
- if (tb_switch_clx_disable(sw) < 0)
+ int ret;
+
+ ret = tb_switch_clx_disable(sw);
+ if (ret > 0)
+ disabled = true;
+ else if (ret < 0)
tb_sw_warn(sw, "failed to disable CL states\n");
+
sw = tb_switch_parent(sw);
} while (sw);
+
+ return disabled;
}
static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
@@ -553,7 +594,7 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
struct tb_switch *sw;
/* Pick the router that is deepest in the topology */
- if (dst_port->sw->config.depth > src_port->sw->config.depth)
+ if (tb_port_path_direction_downstream(src_port, dst_port))
sw = dst_port->sw;
else
sw = src_port->sw;
@@ -572,133 +613,294 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
}
-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int *available_up, int *available_down)
-{
- int usb3_consumed_up, usb3_consumed_down, ret;
- struct tb_cm *tcm = tb_priv(tb);
+/**
+ * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the consumed bandwidth is calculated
+ * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_down: Consumed downstream bandwidth (Mb/s)
+ *
+ * Calculates consumed USB3 and PCIe bandwidth at @port between path
+ * from @src_port to @dst_port. Does not take tunnel starting from
+ * @src_port and ending from @src_port into account.
+ */
+static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port,
+ struct tb_port *port,
+ int *consumed_up,
+ int *consumed_down)
+{
+ int pci_consumed_up, pci_consumed_down;
struct tb_tunnel *tunnel;
- struct tb_port *port;
- tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
- tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
- dst_port->port);
+ *consumed_up = *consumed_down = 0;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
if (tunnel && tunnel->src_port != src_port &&
tunnel->dst_port != dst_port) {
- ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
- &usb3_consumed_down);
+ int ret;
+
+ ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
if (ret)
return ret;
- } else {
- usb3_consumed_up = 0;
- usb3_consumed_down = 0;
}
- /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
- *available_up = *available_down = 120000;
+ /*
+ * If there is anything reserved for PCIe bulk traffic take it
+ * into account here too.
+ */
+ if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
+ *consumed_up += pci_consumed_up;
+ *consumed_down += pci_consumed_down;
+ }
- /* Find the minimum available bandwidth over all links */
- tb_for_each_port_on_path(src_port, dst_port, port) {
- int link_speed, link_width, up_bw, down_bw;
+ return 0;
+}
- if (!tb_port_is_null(port))
+/**
+ * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the consumed bandwidth is calculated
+ * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_down: Consumed downstream bandwidth (Mb/s)
+ *
+ * Calculates consumed DP bandwidth at @port between path from @src_port
+ * to @dst_port. Does not take tunnel starting from @src_port and ending
+ * from @src_port into account.
+ */
+static int tb_consumed_dp_bandwidth(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port,
+ struct tb_port *port,
+ int *consumed_up,
+ int *consumed_down)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+ int ret;
+
+ *consumed_up = *consumed_down = 0;
+
+ /*
+ * Find all DP tunnels that cross the port and reduce
+ * their consumed bandwidth from the available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int dp_consumed_up, dp_consumed_down;
+
+ if (tb_tunnel_is_invalid(tunnel))
continue;
- if (tb_is_upstream_port(port)) {
- link_speed = port->sw->link_speed;
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (!tb_tunnel_port_on_path(tunnel, port))
+ continue;
+
+ /*
+ * Ignore the DP tunnel between src_port and dst_port
+ * because it is the same tunnel and we may be
+ * re-calculating estimated bandwidth.
+ */
+ if (tunnel->src_port == src_port &&
+ tunnel->dst_port == dst_port)
+ continue;
+
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
+ &dp_consumed_down);
+ if (ret)
+ return ret;
+
+ *consumed_up += dp_consumed_up;
+ *consumed_down += dp_consumed_down;
+ }
+
+ return 0;
+}
+
+static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
+ struct tb_port *port)
+{
+ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ enum tb_link_width width;
+
+ if (tb_is_upstream_port(port))
+ width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
+ else
+ width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
+
+ return tb_port_width_supported(port, width);
+}
+
+/**
+ * tb_maximum_bandwidth() - Maximum bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the total bandwidth is calculated
+ * @max_up: Maximum upstream bandwidth (Mb/s)
+ * @max_down: Maximum downstream bandwidth (Mb/s)
+ * @include_asym: Include bandwidth if the link is switched from
+ * symmetric to asymmetric
+ *
+ * Returns maximum possible bandwidth in @max_up and @max_down over a
+ * single link at @port. If @include_asym is set then includes the
+ * additional banwdith if the links are transitioned into asymmetric to
+ * direction from @src_port to @dst_port.
+ */
+static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, struct tb_port *port,
+ int *max_up, int *max_down, bool include_asym)
+{
+ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ int link_speed, link_width, up_bw, down_bw;
+
+ /*
+ * Can include asymmetric, only if it is actually supported by
+ * the lane adapter.
+ */
+ if (!tb_asym_supported(src_port, dst_port, port))
+ include_asym = false;
+
+ if (tb_is_upstream_port(port)) {
+ link_speed = port->sw->link_speed;
+ /*
+ * sw->link_width is from upstream perspective so we use
+ * the opposite for downstream of the host router.
+ */
+ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else if (include_asym) {
/*
- * sw->link_width is from upstream perspective
- * so we use the opposite for downstream of the
- * host router.
+ * The link is symmetric at the moment but we
+ * can switch it to asymmetric as needed. Report
+ * this bandwidth as available (even though it
+ * is not yet enabled).
*/
- if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
- up_bw = link_speed * 3 * 1000;
- down_bw = link_speed * 1 * 1000;
- } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ if (downstream) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else {
- up_bw = link_speed * port->sw->link_width * 1000;
- down_bw = up_bw;
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
}
} else {
- link_speed = tb_port_get_link_speed(port);
- if (link_speed < 0)
- return link_speed;
-
- link_width = tb_port_get_link_width(port);
- if (link_width < 0)
- return link_width;
-
- if (link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * port->sw->link_width * 1000;
+ down_bw = up_bw;
+ }
+ } else {
+ link_speed = tb_port_get_link_speed(port);
+ if (link_speed < 0)
+ return link_speed;
+
+ link_width = tb_port_get_link_width(port);
+ if (link_width < 0)
+ return link_width;
+
+ if (link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else if (include_asym) {
+ /*
+ * The link is symmetric at the moment but we
+ * can switch it to asymmetric as needed. Report
+ * this bandwidth as available (even though it
+ * is not yet enabled).
+ */
+ if (downstream) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
- } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
+ } else {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
- } else {
- up_bw = link_speed * link_width * 1000;
- down_bw = up_bw;
}
+ } else {
+ up_bw = link_speed * link_width * 1000;
+ down_bw = up_bw;
}
+ }
- /* Leave 10% guard band */
- up_bw -= up_bw / 10;
- down_bw -= down_bw / 10;
-
- tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
+ /* Leave 10% guard band */
+ *max_up = up_bw - up_bw / 10;
+ *max_down = down_bw - down_bw / 10;
- /*
- * Find all DP tunnels that cross the port and reduce
- * their consumed bandwidth from the available.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- int dp_consumed_up, dp_consumed_down;
+ tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
+ return 0;
+}
- if (tb_tunnel_is_invalid(tunnel))
- continue;
+/**
+ * tb_available_bandwidth() - Available bandwidth for tunneling
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @available_up: Available bandwidth upstream (Mb/s)
+ * @available_down: Available bandwidth downstream (Mb/s)
+ * @include_asym: Include bandwidth if the link is switched from
+ * symmetric to asymmetric
+ *
+ * Calculates maximum available bandwidth for protocol tunneling between
+ * @src_port and @dst_port at the moment. This is minimum of maximum
+ * link bandwidth across all links reduced by currently consumed
+ * bandwidth on that link.
+ *
+ * If @include_asym is true then includes also bandwidth that can be
+ * added when the links are transitioned into asymmetric (but does not
+ * transition the links).
+ */
+static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int *available_up,
+ int *available_down, bool include_asym)
+{
+ struct tb_port *port;
+ int ret;
- if (!tb_tunnel_is_dp(tunnel))
- continue;
+ /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
+ *available_up = *available_down = 120000;
- if (!tb_tunnel_port_on_path(tunnel, port))
- continue;
+ /* Find the minimum available bandwidth over all links */
+ tb_for_each_port_on_path(src_port, dst_port, port) {
+ int max_up, max_down, consumed_up, consumed_down;
- /*
- * Ignore the DP tunnel between src_port and
- * dst_port because it is the same tunnel and we
- * may be re-calculating estimated bandwidth.
- */
- if (tunnel->src_port == src_port &&
- tunnel->dst_port == dst_port)
- continue;
+ if (!tb_port_is_null(port))
+ continue;
- ret = tb_tunnel_consumed_bandwidth(tunnel,
- &dp_consumed_up,
- &dp_consumed_down);
- if (ret)
- return ret;
+ ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
+ &max_up, &max_down, include_asym);
+ if (ret)
+ return ret;
- up_bw -= dp_consumed_up;
- down_bw -= dp_consumed_down;
- }
+ ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
+ port, &consumed_up,
+ &consumed_down);
+ if (ret)
+ return ret;
+ max_up -= consumed_up;
+ max_down -= consumed_down;
- /*
- * If USB3 is tunneled from the host router down to the
- * branch leading to port we need to take USB3 consumed
- * bandwidth into account regardless whether it actually
- * crosses the port.
- */
- up_bw -= usb3_consumed_up;
- down_bw -= usb3_consumed_down;
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
+ &consumed_up, &consumed_down);
+ if (ret)
+ return ret;
+ max_up -= consumed_up;
+ max_down -= consumed_down;
- if (up_bw < *available_up)
- *available_up = up_bw;
- if (down_bw < *available_down)
- *available_down = down_bw;
+ if (max_up < *available_up)
+ *available_up = max_up;
+ if (max_down < *available_down)
+ *available_down = max_down;
}
if (*available_up < 0)
@@ -729,21 +931,21 @@ static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
if (!tunnel)
return;
- tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
+ tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
/*
* Calculate available bandwidth for the first hop USB3 tunnel.
* That determines the whole USB3 bandwidth for this branch.
*/
ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
- &available_up, &available_down);
+ &available_up, &available_down, false);
if (ret) {
- tb_warn(tb, "failed to calculate available bandwidth\n");
+ tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
return;
}
- tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
+ available_down);
tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
}
@@ -794,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return ret;
}
- ret = tb_available_bandwidth(tb, down, up, &available_up,
- &available_down);
+ ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
+ false);
if (ret)
goto err_reclaim;
@@ -856,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
return 0;
}
+/**
+ * tb_configure_asym() - Transition links to asymmetric if needed
+ * @tb: Domain structure
+ * @src_port: Source adapter to start the transition
+ * @dst_port: Destination adapter
+ * @requested_up: Additional bandwidth (Mb/s) required upstream
+ * @requested_down: Additional bandwidth (Mb/s) required downstream
+ *
+ * Transition links between @src_port and @dst_port into asymmetric, with
+ * three lanes in the direction from @src_port towards @dst_port and one lane
+ * in the opposite direction, if the bandwidth requirements
+ * (requested + currently consumed) on that link exceed @asym_threshold.
+ *
+ * Must be called with available >= requested over all links.
+ */
+static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int requested_up,
+ int requested_down)
+{
+ struct tb_switch *sw;
+ bool clx, downstream;
+ struct tb_port *up;
+ int ret = 0;
+
+ if (!asym_threshold)
+ return 0;
+
+ /* Disable CL states before doing any transitions */
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ /* Pick up router deepest in the hierarchy */
+ if (downstream)
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+
+ clx = tb_disable_clx(sw);
+
+ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+ int consumed_up, consumed_down;
+ enum tb_link_width width;
+
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
+ &consumed_up, &consumed_down);
+ if (ret)
+ break;
+
+ if (downstream) {
+ /*
+ * Downstream so make sure upstream is within the 36G
+ * (40G - guard band 10%), and the requested is above
+ * what the threshold is.
+ */
+ if (consumed_up + requested_up >= TB_ASYM_MIN) {
+ ret = -ENOBUFS;
+ break;
+ }
+ /* Does consumed + requested exceed the threshold */
+ if (consumed_down + requested_down < asym_threshold)
+ continue;
+
+ width = TB_LINK_WIDTH_ASYM_RX;
+ } else {
+ /* Upstream, the opposite of above */
+ if (consumed_down + requested_down >= TB_ASYM_MIN) {
+ ret = -ENOBUFS;
+ break;
+ }
+ if (consumed_up + requested_up < asym_threshold)
+ continue;
+
+ width = TB_LINK_WIDTH_ASYM_TX;
+ }
+
+ if (up->sw->link_width == width)
+ continue;
+
+ if (!tb_port_width_supported(up, width))
+ continue;
+
+ tb_sw_dbg(up->sw, "configuring asymmetric link\n");
+
+ /*
+ * Here requested + consumed > threshold so we need to
+ * transtion the link into asymmetric now.
+ */
+ ret = tb_switch_set_link_width(up->sw, width);
+ if (ret) {
+ tb_sw_warn(up->sw, "failed to set link width\n");
+ break;
+ }
+ }
+
+ /* Re-enable CL states if they were previosly enabled */
+ if (clx)
+ tb_enable_clx(sw);
+
+ return ret;
+}
+
+/**
+ * tb_configure_sym() - Transition links to symmetric if possible
+ * @tb: Domain structure
+ * @src_port: Source adapter to start the transition
+ * @dst_port: Destination adapter
+ * @requested_up: New lower bandwidth request upstream (Mb/s)
+ * @requested_down: New lower bandwidth request downstream (Mb/s)
+ *
+ * Goes over each link from @src_port to @dst_port and tries to
+ * transition the link to symmetric if the currently consumed bandwidth
+ * allows.
+ */
+static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int requested_up,
+ int requested_down)
+{
+ struct tb_switch *sw;
+ bool clx, downstream;
+ struct tb_port *up;
+ int ret = 0;
+
+ if (!asym_threshold)
+ return 0;
+
+ /* Disable CL states before doing any transitions */
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ /* Pick up router deepest in the hierarchy */
+ if (downstream)
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+
+ clx = tb_disable_clx(sw);
+
+ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+ int consumed_up, consumed_down;
+
+ /* Already symmetric */
+ if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
+ continue;
+ /* Unplugged, no need to switch */
+ if (up->sw->is_unplugged)
+ continue;
+
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
+ &consumed_up, &consumed_down);
+ if (ret)
+ break;
+
+ if (downstream) {
+ /*
+ * Downstream so we want the consumed_down < threshold.
+ * Upstream traffic should be less than 36G (40G
+ * guard band 10%) as the link was configured asymmetric
+ * already.
+ */
+ if (consumed_down + requested_down >= asym_threshold)
+ continue;
+ } else {
+ if (consumed_up + requested_up >= asym_threshold)
+ continue;
+ }
+
+ if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
+ continue;
+
+ tb_sw_dbg(up->sw, "configuring symmetric link\n");
+
+ ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
+ if (ret) {
+ tb_sw_warn(up->sw, "failed to set link width\n");
+ break;
+ }
+ }
+
+ /* Re-enable CL states if they were previosly enabled */
+ if (clx)
+ tb_enable_clx(sw);
+
+ return ret;
+}
+
+static void tb_configure_link(struct tb_port *down, struct tb_port *up,
+ struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+
+ /* Link the routers using both links if available */
+ down->remote = up;
+ up->remote = down;
+ if (down->dual_link_port && up->dual_link_port) {
+ down->dual_link_port->remote = up->dual_link_port;
+ up->dual_link_port->remote = down->dual_link_port;
+ }
+
+ /*
+ * Enable lane bonding if the link is currently two single lane
+ * links.
+ */
+ if (sw->link_width < TB_LINK_WIDTH_DUAL)
+ tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
+
+ /*
+ * Device router that comes up as symmetric link is
+ * connected deeper in the hierarchy, we transition the links
+ * above into symmetric if bandwidth allows.
+ */
+ if (tb_switch_depth(sw) > 1 &&
+ tb_port_get_link_generation(up) >= 4 &&
+ up->sw->link_width == TB_LINK_WIDTH_DUAL) {
+ struct tb_port *host_port;
+
+ host_port = tb_port_at(tb_route(sw), tb->root_switch);
+ tb_configure_sym(tb, host_port, up, 0, 0);
+ }
+
+ /* Set the link configured */
+ tb_switch_configure_link(sw);
+}
+
static void tb_scan_port(struct tb_port *port);
/*
@@ -964,19 +1385,9 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- /* Link the switches using both links if available */
upstream_port = tb_upstream_port(sw);
- port->remote = upstream_port;
- upstream_port->remote = port;
- if (port->dual_link_port && upstream_port->dual_link_port) {
- port->dual_link_port->remote = upstream_port->dual_link_port;
- upstream_port->dual_link_port->remote = port->dual_link_port;
- }
+ tb_configure_link(port, upstream_port, sw);
- /* Enable lane bonding if supported */
- tb_switch_lane_bonding_enable(sw);
- /* Set the link configured */
- tb_switch_configure_link(sw);
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
@@ -1040,6 +1451,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* deallocated properly.
*/
tb_switch_dealloc_dp_resource(src_port->sw, src_port);
+ /*
+ * If bandwidth on a link is < asym_threshold
+ * transition the link to symmetric.
+ */
+ tb_configure_sym(tb, src_port, dst_port, 0, 0);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1092,7 +1508,8 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
tb_retimer_remove_all(port);
tb_remove_dp_resources(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
- tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ TB_LINK_WIDTH_SINGLE);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -1188,7 +1605,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
ret = tb_release_unused_usb3_bandwidth(tb,
first_tunnel->src_port, first_tunnel->dst_port);
if (ret) {
- tb_port_warn(in,
+ tb_tunnel_warn(tunnel,
"failed to release unused bandwidth\n");
break;
}
@@ -1196,9 +1613,9 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
out = tunnel->dst_port;
ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down);
+ &estimated_down, true);
if (ret) {
- tb_port_warn(in,
+ tb_tunnel_warn(tunnel,
"failed to re-calculate estimated bandwidth\n");
break;
}
@@ -1209,16 +1626,18 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
* - available bandwidth along the path
* - bandwidth allocated for USB 3.x but not used.
*/
- tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u/%u Mb/s\n",
+ estimated_up, estimated_down);
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
estimated_bw = estimated_down;
else
estimated_bw = estimated_up;
if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_port_warn(in, "failed to update estimated bandwidth\n");
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
}
if (first_tunnel)
@@ -1282,18 +1701,14 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static void tb_tunnel_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
+ int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
- if (!tb_acpi_may_tunnel_dp()) {
- tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
- return;
- }
-
/*
* Find pair of inactive DP IN and DP OUT adapters and then
* establish a DP tunnel between them.
@@ -1311,22 +1726,21 @@ static void tb_tunnel_dp(struct tb *tb)
continue;
}
- tb_port_dbg(port, "DP IN available\n");
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
out = tb_find_dp_out(tb, port);
- if (out) {
- in = port;
+ if (out)
break;
- }
}
if (!in) {
tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return;
+ return false;
}
if (!out) {
tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return;
+ return false;
}
/*
@@ -1369,7 +1783,8 @@ static void tb_tunnel_dp(struct tb *tb)
goto err_detach_group;
}
- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
+ true);
if (ret)
goto err_reclaim_usb;
@@ -1391,6 +1806,13 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the consumption exceeds
+ * the threshold.
+ */
+ if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1399,7 +1821,7 @@ static void tb_tunnel_dp(struct tb *tb)
* TMU mode to HiFi for CL0s to work.
*/
tb_increase_tmu_accuracy(tunnel);
- return;
+ return true;
err_free:
tb_tunnel_free(tunnel);
@@ -1414,6 +1836,19 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
+
+ return false;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ if (!tb_acpi_may_tunnel_dp()) {
+ tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+ return;
+ }
+
+ while (tb_tunnel_one_dp(tb))
+ ;
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1701,7 +2136,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_remove_dp_resources(port->remote->sw);
tb_switch_tmu_disable(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
- tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ TB_LINK_WIDTH_SINGLE);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -1781,8 +2217,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
in = tunnel->src_port;
out = tunnel->dst_port;
- tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
- allocated_up, allocated_down);
+ tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
+ allocated_up, allocated_down);
/*
* If we get rounded up request from graphics side, say HBR2 x 4
@@ -1823,20 +2259,26 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
else if (requested_down_corrected < 0)
requested_down_corrected = 0;
- tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
- requested_up_corrected, requested_down_corrected);
+ tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
+ requested_up_corrected, requested_down_corrected);
if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
(*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
- tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
- requested_up_corrected, requested_down_corrected,
- max_up_rounded, max_down_rounded);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
+ requested_up_corrected, requested_down_corrected,
+ max_up_rounded, max_down_rounded);
return -ENOBUFS;
}
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
/*
+ * If bandwidth on a link is < asym_threshold transition
+ * the link to symmetric.
+ */
+ tb_configure_sym(tb, in, out, *requested_up, *requested_down);
+ /*
* If requested bandwidth is less or equal than what is
* currently allocated to that tunnel we simply change
* the reservation of the tunnel. Since all the tunnels
@@ -1861,17 +2303,33 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
* are also in the same group but we use the same function here
* that we use with the normal bandwidth allocation).
*/
- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
+ true);
if (ret)
goto reclaim;
- tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
+ available_up, available_down);
if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
(*requested_down >= 0 && available_down >= requested_down_corrected)) {
+ /*
+ * If bandwidth on a link is >= asym_threshold
+ * transition the link to asymmetric.
+ */
+ ret = tb_configure_asym(tb, in, out, *requested_up,
+ *requested_down);
+ if (ret) {
+ tb_configure_sym(tb, in, out, 0, 0);
+ return ret;
+ }
+
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
+ tb_configure_sym(tb, in, out, 0, 0);
+ }
} else {
ret = -ENOBUFS;
}
@@ -1937,7 +2395,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
out = tunnel->dst_port;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -1948,12 +2406,15 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
if (ret == -ENOBUFS)
- tb_port_warn(in, "not enough bandwidth available\n");
+ tb_tunnel_warn(tunnel,
+ "not enough bandwidth available\n");
else
- tb_port_warn(in, "failed to change bandwidth allocation\n");
+ tb_tunnel_warn(tunnel,
+ "failed to change bandwidth allocation\n");
} else {
- tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
- requested_up, requested_down);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth allocation changed to %d/%d Mb/s\n",
+ requested_up, requested_down);
/* Update other clients about the allocation change */
tb_recalc_estimated_bandwidth(tb);
@@ -2181,7 +2642,8 @@ static void tb_restore_children(struct tb_switch *sw)
continue;
if (port->remote) {
- tb_switch_lane_bonding_enable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ port->remote->sw->link_width);
tb_switch_configure_link(port->remote->sw);
tb_restore_children(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index d2a55ad2fd3e..e299e53473ae 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -162,11 +162,6 @@ struct tb_switch_tmu {
* switches) you need to have domain lock held.
*
* In USB4 terminology this structure represents a router.
- *
- * Note @link_width is not the same as whether link is bonded or not.
- * For Gen 4 links the link is also bonded when it is asymmetric. The
- * correct way to find out whether the link is bonded or not is to look
- * @bonded field of the upstream port.
*/
struct tb_switch {
struct device dev;
@@ -348,6 +343,7 @@ struct tb_retimer {
* the path
* @nfc_credits: Number of non-flow controlled buffers allocated for the
* @in_port.
+ * @pm_support: Set path PM packet support bit to 1 (for USB4 v2 routers)
*
* Hop configuration is always done on the IN port of a switch.
* in_port and out_port have to be on the same switch. Packets arriving on
@@ -368,6 +364,7 @@ struct tb_path_hop {
int next_hop_index;
unsigned int initial_credits;
unsigned int nfc_credits;
+ bool pm_support;
};
/**
@@ -864,6 +861,15 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
return tb_port_at(tb_route(sw), tb_switch_parent(sw));
}
+/**
+ * tb_switch_depth() - Returns depth of the connected router
+ * @sw: Router
+ */
+static inline int tb_switch_depth(const struct tb_switch *sw)
+{
+ return sw->config.depth;
+}
+
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
@@ -956,8 +962,7 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw)
return !sw->config.enabled;
}
-int tb_switch_lane_bonding_enable(struct tb_switch *sw);
-void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width);
int tb_switch_configure_link(struct tb_switch *sw);
void tb_switch_unconfigure_link(struct tb_switch *sw);
@@ -1001,7 +1006,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_clx_init(struct tb_switch *sw);
-bool tb_switch_clx_is_supported(const struct tb_switch *sw);
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx);
int tb_switch_clx_disable(struct tb_switch *sw);
@@ -1040,6 +1044,21 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid);
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
+/**
+ * tb_port_path_direction_downstream() - Checks if path directed downstream
+ * @src: Source adapter
+ * @dst: Destination adapter
+ *
+ * Returns %true only if the specified path from source adapter (@src)
+ * to destination adapter (@dst) is directed downstream.
+ */
+static inline bool
+tb_port_path_direction_downstream(const struct tb_port *src,
+ const struct tb_port *dst)
+{
+ return src->sw->config.depth < dst->sw->config.depth;
+}
+
static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
{
return tb_port_is_null(port) && port->sw->credit_allocation;
@@ -1057,12 +1076,29 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
(p) = tb_next_port_on_path((src), (dst), (p)))
+/**
+ * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path
+ * @src: Source port
+ * @dst: Destination port
+ * @p: Port used as iterator
+ *
+ * Walks over each upstream lane adapter on path from @src to @dst.
+ */
+#define tb_for_each_upstream_port_on_path(src, dst, p) \
+ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
+ (p) = tb_next_port_on_path((src), (dst), (p))) \
+ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\
+ continue; \
+ } else
+
int tb_port_get_link_speed(struct tb_port *port);
+int tb_port_get_link_generation(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
+bool tb_port_width_supported(struct tb_port *port, unsigned int width);
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
@@ -1256,6 +1292,11 @@ int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
+
+bool usb4_port_asym_supported(struct tb_port *port);
+int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
+int usb4_port_asym_start(struct tb_port *port);
+
int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
unsigned int ber_level, bool timing, bool right_high,
u32 *results);
@@ -1283,7 +1324,6 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
unsigned int address, void *buf, size_t size);
int usb4_usb3_port_max_link_rate(struct tb_port *port);
-int usb4_usb3_port_actual_link_rate(struct tb_port *port);
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index cf9f2370878a..87e4795275fe 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -346,10 +346,14 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
-#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(5, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK GENMASK(7, 6)
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX 0x2
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL 0x0
#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
#define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
@@ -382,12 +386,15 @@ struct tb_regs_port_header {
#define PORT_CS_18_WOCS BIT(16)
#define PORT_CS_18_WODS BIT(17)
#define PORT_CS_18_WOU4S BIT(18)
+#define PORT_CS_18_CSA BIT(22)
+#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
#define PORT_CS_19_WOD BIT(17)
#define PORT_CS_19_WOU4 BIT(18)
+#define PORT_CS_19_START_ASYM BIT(24)
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
@@ -400,7 +407,7 @@ struct tb_regs_port_header {
#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
#define ADP_DP_CS_2 0x02
#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0)
-#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_2_HPD BIT(6)
#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7)
#define ADP_DP_CS_2_NRD_MLR_SHIFT 7
#define ADP_DP_CS_2_CA BIT(10)
@@ -417,7 +424,7 @@ struct tb_regs_port_header {
#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24)
#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24
#define ADP_DP_CS_3 0x03
-#define ADP_DP_CS_3_HDPC BIT(9)
+#define ADP_DP_CS_3_HPDC BIT(9)
#define DP_LOCAL_CAP 0x04
#define DP_REMOTE_CAP 0x05
/* For DP IN adapter */
@@ -484,9 +491,6 @@ struct tb_regs_port_header {
#define ADP_USB3_CS_3 0x03
#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0)
#define ADP_USB3_CS_4 0x04
-#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0)
-#define ADP_USB3_CS_4_ALR_20G 0x1
-#define ADP_USB3_CS_4_ULV BIT(7)
#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12)
#define ADP_USB3_CS_4_MSLR_SHIFT 12
#define ADP_USB3_CS_4_MSLR_20G 0x1
@@ -499,7 +503,8 @@ struct tb_regs_hop {
* out_port (on the incoming port of the next switch)
*/
u32 out_port:6; /* next port of the path (on the same switch) */
- u32 initial_credits:8;
+ u32 initial_credits:7;
+ u32 pmps:1;
u32 unknown1:6; /* set to zero */
bool enable:1;
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index a6810fb36860..7534cd3a81f4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -21,12 +21,18 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+#define TB_PCI_PRIORITY 3
+#define TB_PCI_WEIGHT 1
+
/* USB3 adapters use always HopID of 8 for both directions */
#define TB_USB3_HOPID 8
#define TB_USB3_PATH_DOWN 0
#define TB_USB3_PATH_UP 1
+#define TB_USB3_PRIORITY 3
+#define TB_USB3_WEIGHT 2
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -36,6 +42,12 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
+#define TB_DP_VIDEO_PRIORITY 1
+#define TB_DP_VIDEO_WEIGHT 1
+
+#define TB_DP_AUX_PRIORITY 2
+#define TB_DP_AUX_WEIGHT 1
+
/* Minimum number of credits needed for PCIe path */
#define TB_MIN_PCIE_CREDITS 6U
/*
@@ -46,6 +58,18 @@
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1
+#define TB_DMA_PRIORITY 5
+#define TB_DMA_WEIGHT 1
+
+/*
+ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
+ * according to USB4 v2 Connection Manager guide. This ends up reserving
+ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
+ * account.
+ */
+#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
+#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
- do { \
- struct tb_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
- tb_route(__tunnel->src_port->sw), \
- __tunnel->src_port->port, \
- tb_route(__tunnel->dst_port->sw), \
- __tunnel->dst_port->port, \
- tb_tunnel_names[__tunnel->type], \
- ## arg); \
- } while (0)
-
-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
-#define tb_tunnel_warn(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
-#define tb_tunnel_info(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
-#define tb_tunnel_dbg(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
-
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -131,6 +134,16 @@ static unsigned int tb_available_credits(const struct tb_port *port,
return credits > 0 ? credits : 0;
}
+static void tb_init_pm_support(struct tb_path_hop *hop)
+{
+ struct tb_port *out_port = hop->out_port;
+ struct tb_port *in_port = hop->in_port;
+
+ if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
+ usb4_switch_version(in_port->sw) >= 2)
+ hop->pm_support = true;
+}
+
static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
enum tb_tunnel_type type)
{
@@ -156,11 +169,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
int ret;
/* Only supported of both routers are at least USB4 v2 */
- if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
- usb4_switch_version(tunnel->dst_port->sw) < 2)
+ if (tb_port_get_link_generation(port) < 4)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
@@ -234,8 +247,8 @@ static int tb_pci_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 1;
+ path->priority = TB_PCI_PRIORITY;
+ path->weight = TB_PCI_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop) {
@@ -376,6 +389,51 @@ err_free:
return NULL;
}
+/**
+ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
+ * @port: Lane 0 adapter
+ * @reserved_up: Upstream bandwidth in Mb/s to reserve
+ * @reserved_down: Downstream bandwidth in Mb/s to reserve
+ *
+ * Can be called to any connected lane 0 adapter to find out how much
+ * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
+ * Returns true if there is something to be reserved and writes the
+ * amount to @reserved_down/@reserved_up. Otherwise returns false and
+ * does not touch the parameters.
+ */
+bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
+ int *reserved_down)
+{
+ if (WARN_ON_ONCE(!port->remote))
+ return false;
+
+ if (!tb_acpi_may_tunnel_pcie())
+ return false;
+
+ if (tb_port_get_link_generation(port) < 4)
+ return false;
+
+ /* Must have PCIe adapters */
+ if (tb_is_upstream_port(port)) {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ } else {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
+ return false;
+ }
+
+ *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
+ *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
+
+ tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
+ *reserved_down);
+ return true;
+}
+
static bool tb_dp_is_usb4(const struct tb_switch *sw)
{
/* Titan Ridge DP adapters need the same treatment as USB4 */
@@ -614,8 +672,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
/*
* If the tunnel bandwidth is limited (max_bw is set) then see
@@ -624,10 +683,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
bw = tb_dp_bandwidth(out_rate, out_lanes);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- out_rate, out_lanes, bw);
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -639,13 +699,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate, out_lanes, &new_rate,
&new_lanes);
if (ret) {
- tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ tb_tunnel_info(tunnel, "not enough bandwidth\n");
return ret;
}
new_bw = tb_dp_bandwidth(new_rate, new_lanes);
- tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
- new_rate, new_lanes, new_bw);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
/*
* Set new rate and number of lanes before writing it to
@@ -662,7 +723,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
*/
if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
- tb_port_dbg(out, "disabling LTTPR\n");
+ tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
}
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
@@ -712,8 +773,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
lanes = min(in_lanes, out_lanes);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
- lanes, tmp);
+ tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes);
if (ret)
@@ -728,15 +789,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
rate = min(in_rate, out_rate);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in,
- "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tmp);
+ tb_tunnel_dbg(tunnel,
+ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
- tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
+ tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
/*
* Returns -EINVAL if granularity above is outside of the
@@ -751,12 +812,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
+ tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
@@ -767,7 +828,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
if (ret)
return ret;
- tb_port_dbg(in, "bandwidth allocation mode enabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
return 0;
}
@@ -788,7 +849,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
- tb_port_dbg(in, "bandwidth allocation mode supported\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index);
if (ret)
@@ -805,7 +866,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
return;
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
- tb_port_dbg(in, "bandwidth allocation mode disabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
}
}
@@ -921,10 +982,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
if (allocated_bw == max_bw)
allocated_bw = ret;
- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
- allocated_bw);
-
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -959,7 +1017,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
if (allocated_bw == max_bw)
allocated_bw = ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -987,7 +1045,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
@@ -1006,9 +1064,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
-
- tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
- tmp);
return 0;
}
@@ -1035,8 +1090,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
- tb_dp_bandwidth(*rate, *lanes));
+ tb_tunnel_dbg(tunnel, "DPRX read done\n");
return 0;
}
usleep_range(100, 150);
@@ -1073,9 +1127,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
-
- tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
- tb_dp_bandwidth(*rate, *lanes));
return 0;
}
@@ -1092,7 +1143,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1150,7 +1201,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
@@ -1172,7 +1223,7 @@ static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
hop->initial_credits = 1;
}
-static void tb_dp_init_aux_path(struct tb_path *path)
+static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1180,11 +1231,14 @@ static void tb_dp_init_aux_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 2;
- path->weight = 1;
+ path->priority = TB_DP_AUX_PRIORITY;
+ path->weight = TB_DP_AUX_WEIGHT;
- tb_path_for_each_hop(path, hop)
+ tb_path_for_each_hop(path, hop) {
tb_dp_init_aux_credits(hop);
+ if (pm_support)
+ tb_init_pm_support(hop);
+ }
}
static int tb_dp_init_video_credits(struct tb_path_hop *hop)
@@ -1216,7 +1270,7 @@ static int tb_dp_init_video_credits(struct tb_path_hop *hop)
return 0;
}
-static int tb_dp_init_video_path(struct tb_path *path)
+static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1224,8 +1278,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 1;
- path->weight = 1;
+ path->priority = TB_DP_VIDEO_PRIORITY;
+ path->weight = TB_DP_VIDEO_WEIGHT;
tb_path_for_each_hop(path, hop) {
int ret;
@@ -1233,6 +1287,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
ret = tb_dp_init_video_credits(hop);
if (ret)
return ret;
+ if (pm_support)
+ tb_init_pm_support(hop);
}
return 0;
@@ -1253,8 +1309,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
@@ -1265,8 +1322,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1))
@@ -1275,8 +1333,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
}
/**
@@ -1322,7 +1380,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
goto err_free;
}
tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
- if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
+ if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
@@ -1330,14 +1388,14 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
&port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
/* Validate that the tunnel is complete */
if (!tb_port_is_dpout(tunnel->dst_port)) {
@@ -1392,6 +1450,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_tunnel *tunnel;
struct tb_path **paths;
struct tb_path *path;
+ bool pm_support;
if (WARN_ON(!in->cap_adap || !out->cap_adap))
return NULL;
@@ -1413,26 +1472,27 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->max_down = max_down;
paths = tunnel->paths;
+ pm_support = usb4_switch_version(in->sw) >= 2;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
link_nr, "Video");
if (!path)
goto err_free;
- tb_dp_init_video_path(path);
+ tb_dp_init_video_path(path, pm_support);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_IN] = path;
return tunnel;
@@ -1497,8 +1557,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
/*
@@ -1531,8 +1591,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
tb_path_for_each_hop(path, hop) {
@@ -1758,14 +1818,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
- int pcie_enabled = tb_acpi_may_tunnel_pcie();
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+ int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
* take that it into account here.
*/
- *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
- *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
+ *consumed_up = tunnel->allocated_up *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+ *consumed_down = tunnel->allocated_down *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+
+ if (tb_port_get_link_generation(port) >= 4) {
+ *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
+ *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
+ }
+
return 0;
}
@@ -1790,17 +1859,10 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
{
int ret, max_rate, allocate_up, allocate_down;
- ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
+ ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;
- } else if (!ret) {
- /* Use maximum link rate if the link valid is not set */
- ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
- if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
- return;
- }
}
/*
@@ -1871,8 +1933,8 @@ static void tb_usb3_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 3;
+ path->priority = TB_USB3_PRIORITY;
+ path->weight = TB_USB3_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop)
@@ -2387,3 +2449,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
tunnel->reclaim_available_bandwidth(tunnel, available_up,
available_down);
}
+
+const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
+{
+ return tb_tunnel_names[tunnel->type];
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index bf690f7beeee..b4cff5482112 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
+bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
+ int *reserved_down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
@@ -137,5 +139,27 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
-#endif
+const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+ do { \
+ struct tb_tunnel *__tunnel = (tunnel); \
+ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
+ tb_route(__tunnel->src_port->sw), \
+ __tunnel->src_port->port, \
+ tb_route(__tunnel->dst_port->sw), \
+ __tunnel->dst_port->port, \
+ tb_tunnel_type_name(__tunnel), \
+ ## arg); \
+ } while (0)
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+
+#endif
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 05ddb224c464..4277733d0021 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1455,6 +1455,112 @@ bool usb4_port_clx_supported(struct tb_port *port)
}
/**
+ * usb4_port_asym_supported() - If the port supports asymmetric link
+ * @port: USB4 port
+ *
+ * Checks if the port and the cable supports asymmetric link and returns
+ * %true in that case.
+ */
+bool usb4_port_asym_supported(struct tb_port *port)
+{
+ u32 val;
+
+ if (!port->cap_usb4)
+ return false;
+
+ if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
+ return false;
+
+ return !!(val & PORT_CS_18_CSA);
+}
+
+/**
+ * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
+ * @port: USB4 port
+ * @width: Asymmetric width to configure
+ *
+ * Sets USB4 port link width to @width. Can be called for widths where
+ * usb4_port_asym_width_supported() returned @true.
+ */
+int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
+ switch (width) {
+ case TB_LINK_WIDTH_DUAL:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
+ break;
+ case TB_LINK_WIDTH_ASYM_TX:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
+ break;
+ case TB_LINK_WIDTH_ASYM_RX:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+/**
+ * usb4_port_asym_start() - Start symmetry change and wait for completion
+ * @port: USB4 port
+ *
+ * Start symmetry change of the link to asymmetric or symmetric
+ * (according to what was previously set in tb_port_set_link_width().
+ * Wait for completion of the change.
+ *
+ * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
+ * a negative errno in case of a failure.
+ */
+int usb4_port_asym_start(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_START_ASYM;
+ val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
+ * port started the symmetry transition.
+ */
+ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
+ PORT_CS_19_START_ASYM, 0, 1000);
+ if (ret)
+ return ret;
+
+ /* Then wait for the transtion to be completed */
+ return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
+ PORT_CS_18_TIP, 0, 5000);
+}
+
+/**
* usb4_port_margining_caps() - Read USB4 port marginig capabilities
* @port: USB4 port
* @caps: Array with at least two elements to hold the results
@@ -1946,35 +2052,6 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
return usb4_usb3_port_max_bandwidth(port, ret);
}
-/**
- * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
- * @port: USB3 adapter port
- *
- * Return actual established link rate of a USB3 adapter in Mb/s. If the
- * link is not up returns %0 and negative errno in case of failure.
- */
-int usb4_usb3_port_actual_link_rate(struct tb_port *port)
-{
- int ret, lr;
- u32 val;
-
- if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
- return -EINVAL;
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_adap + ADP_USB3_CS_4, 1);
- if (ret)
- return ret;
-
- if (!(val & ADP_USB3_CS_4_ULV))
- return 0;
-
- lr = val & ADP_USB3_CS_4_ALR_MASK;
- ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
-
- return usb4_usb3_port_max_bandwidth(port, ret);
-}
-
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
{
int ret;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 98764e740c07..34c01874f45b 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -377,18 +377,21 @@ void xen_console_resume(void)
#ifdef CONFIG_HVC_XEN_FRONTEND
static void xencons_disconnect_backend(struct xencons_info *info)
{
- if (info->irq > 0)
- unbind_from_irqhandler(info->irq, NULL);
- info->irq = 0;
+ if (info->hvc != NULL)
+ hvc_remove(info->hvc);
+ info->hvc = NULL;
+ if (info->irq > 0) {
+ evtchn_put(info->evtchn);
+ info->irq = 0;
+ info->evtchn = 0;
+ }
+ /* evtchn_put() will also close it so this is only an error path */
if (info->evtchn > 0)
xenbus_free_evtchn(info->xbdev, info->evtchn);
info->evtchn = 0;
if (info->gntref > 0)
gnttab_free_grant_references(info->gntref);
info->gntref = 0;
- if (info->hvc != NULL)
- hvc_remove(info->hvc);
- info->hvc = NULL;
}
static void xencons_free(struct xencons_info *info)
@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
if (ret)
return ret;
info->evtchn = evtchn;
- irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
+ irq = bind_evtchn_to_irq_lateeoi(evtchn);
if (irq < 0)
return irq;
info->irq = irq;
@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's CLOSING state */
- case XenbusStateClosing:
+ case XenbusStateClosing: {
+ struct xencons_info *info = dev_get_drvdata(&dev->dev);;
+
+ /*
+ * Don't tear down the evtchn and grant ref before the other
+ * end has disconnected, but do stop userspace from trying
+ * to use the device before we allow the backend to close.
+ */
+ if (info->hvc) {
+ hvc_remove(info->hvc);
+ info->hvc = NULL;
+ }
+
xenbus_frontend_closed(dev);
break;
}
+ }
}
static const struct xenbus_device_id xencons_ids[] = {
@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
ops = &dom0_hvc_ops;
r = xen_initial_domain_console_init();
if (r < 0)
- return r;
+ goto register_fe;
info = vtermno_to_xencons(HVC_COOKIE);
} else {
ops = &domU_hvc_ops;
@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
else
r = xen_pv_console_init();
if (r < 0)
- return r;
+ goto register_fe;
info = vtermno_to_xencons(HVC_COOKIE);
info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
list_del(&info->list);
spin_unlock_irqrestore(&xencons_lock, flags);
if (info->irq)
- unbind_from_irqhandler(info->irq, NULL);
+ evtchn_put(info->evtchn);
kfree(info);
return r;
}
r = 0;
+ register_fe:
#ifdef CONFIG_HVC_XEN_FRONTEND
r = xenbus_register_frontend(&xencons_driver);
#endif
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 98433a53bae1..f57fd9095f75 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -664,7 +664,6 @@ static void hvcs_return_index(int index)
static void hvcs_destruct_port(struct tty_port *p)
{
struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port);
- struct vio_dev *vdev;
struct completion *comp;
unsigned long flags;
@@ -686,7 +685,6 @@ static void hvcs_destruct_port(struct tty_port *p)
printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
hvcsd->vdev->unit_address);
- vdev = hvcsd->vdev;
hvcsd->vdev = NULL;
hvcsd->p_unit_address = 0;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 10aa4ed38793..6ce7f259968f 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -288,7 +288,7 @@ struct mxser_board {
enum mxser_must_hwid must_hwid;
speed_t max_baud;
- struct mxser_port ports[];
+ struct mxser_port ports[] __counted_by(nports);
};
static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 1f3aba607cd5..a3ab3946e4ad 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2,6 +2,7 @@
/*
* n_gsm.c GSM 0710 tty multiplexor
* Copyright (c) 2009/10 Intel Corporation
+ * Copyright (c) 2022/23 Siemens Mobility GmbH
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
@@ -4108,6 +4109,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
{
+ if (dlci->gsm->dead)
+ return -EL2HLT;
if (dlci->adaption == 2) {
/* Send convergence layer type 2 empty data frame. */
gsm_modem_upd_via_data(dlci, brk);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 6c9a408d67cd..f252d0b5a434 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -249,15 +249,12 @@ static void n_tty_check_throttle(struct tty_struct *tty)
if (ldata->icanon && ldata->canon_head == ldata->read_tail)
return;
- while (1) {
- int throttled;
+ do {
tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
if (N_TTY_BUF_SIZE - read_cnt(ldata) >= TTY_THRESHOLD_THROTTLE)
break;
- throttled = tty_throttle_safe(tty);
- if (!throttled)
- break;
- }
+ } while (!tty_throttle_safe(tty));
+
__tty_set_flow_change(tty, 0);
}
@@ -279,16 +276,14 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
* we won't get any more characters.
*/
- while (1) {
- int unthrottled;
+ do {
tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
break;
+
n_tty_kick_worker(tty);
- unthrottled = tty_unthrottle_safe(tty);
- if (!unthrottled)
- break;
- }
+ } while (!tty_unthrottle_safe(tty));
+
__tty_set_flow_change(tty, 0);
}
@@ -1965,26 +1960,27 @@ static bool copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
size_t head = smp_load_acquire(&ldata->commit_head);
size_t tail = MASK(ldata->read_tail);
- n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
- n = min(*nr, n);
- if (n) {
- u8 *from = read_buf_addr(ldata, tail);
- memcpy(*kbp, from, n);
- is_eof = n == 1 && *from == EOF_CHAR(tty);
- tty_audit_add_data(tty, from, n);
- zero_buffer(tty, from, n);
- smp_store_release(&ldata->read_tail, ldata->read_tail + n);
- /* Turn single EOF into zero-length read */
- if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
- (head == ldata->read_tail))
- return false;
- *kbp += n;
- *nr -= n;
-
- /* If we have more to copy, let the caller know */
- return head != ldata->read_tail;
- }
- return false;
+ n = min3(head - ldata->read_tail, N_TTY_BUF_SIZE - tail, *nr);
+ if (!n)
+ return false;
+
+ u8 *from = read_buf_addr(ldata, tail);
+ memcpy(*kbp, from, n);
+ is_eof = n == 1 && *from == EOF_CHAR(tty);
+ tty_audit_add_data(tty, from, n);
+ zero_buffer(tty, from, n);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + n);
+
+ /* Turn single EOF into zero-length read */
+ if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+ head == ldata->read_tail)
+ return false;
+
+ *kbp += n;
+ *nr -= n;
+
+ /* If we have more to copy, let the caller know */
+ return head != ldata->read_tail;
}
/**
@@ -2154,9 +2150,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
struct n_tty_data *ldata = tty->disc_data;
u8 *kb = kbuf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int c;
int minimum, time;
- ssize_t retval = 0;
+ ssize_t retval;
long timeout;
bool packet;
size_t old_tail;
@@ -2192,9 +2187,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
return kb - kbuf;
}
- c = job_control(tty, file);
- if (c < 0)
- return c;
+ retval = job_control(tty, file);
+ if (retval < 0)
+ return retval;
/*
* Internal serialization of reads.
@@ -2499,7 +2494,7 @@ static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct n_tty_data *ldata = tty->disc_data;
- int retval;
+ unsigned int num;
switch (cmd) {
case TIOCOUTQ:
@@ -2507,11 +2502,11 @@ static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TIOCINQ:
down_write(&tty->termios_rwsem);
if (L_ICANON(tty) && !L_EXTPROC(tty))
- retval = inq_canon(ldata);
+ num = inq_canon(ldata);
else
- retval = read_cnt(ldata);
+ num = read_cnt(ldata);
up_write(&tty->termios_rwsem);
- return put_user(retval, (unsigned int __user *) arg);
+ return put_user(num, (unsigned int __user *) arg);
default:
return n_tty_ioctl_helper(tty, cmd, arg);
}
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index e7d663901c07..a5fdaf5e148e 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -15,9 +15,11 @@
#include <linux/of_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
+
#include <linux/platform_data/x86/apple.h>
static bool is_registered;
@@ -185,30 +187,20 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
-static void devm_serdev_device_release(struct device *dev, void *dr)
+static void devm_serdev_device_close(void *serdev)
{
- serdev_device_close(*(struct serdev_device **)dr);
+ serdev_device_close(serdev);
}
int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
{
- struct serdev_device **dr;
int ret;
- dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
ret = serdev_device_open(serdev);
- if (ret) {
- devres_free(dr);
+ if (ret)
return ret;
- }
-
- *dr = serdev;
- devres_add(dev, dr);
- return 0;
+ return devm_add_action_or_reset(dev, devm_serdev_device_close, serdev);
}
EXPORT_SYMBOL_GPL(devm_serdev_device_open);
@@ -510,7 +502,7 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
ctrl->dev.type = &serdev_ctrl_type;
ctrl->dev.bus = &serdev_bus_type;
ctrl->dev.parent = parent;
- ctrl->dev.of_node = parent->of_node;
+ device_set_node(&ctrl->dev, dev_fwnode(parent));
serdev_controller_set_drvdata(ctrl, &ctrl[1]);
dev_set_name(&ctrl->dev, "serial%d", id);
@@ -673,7 +665,7 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
acpi_get_parent(adev->handle, &lookup.controller_handle);
/* Make sure controller and ResourceSource handle match */
- if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
+ if (!device_match_acpi_handle(ctrl->dev.parent, lookup.controller_handle))
return -ENODEV;
return 0;
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index d756fcc884cb..4de0c975ebdc 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -185,14 +185,14 @@ static void serial21285_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int h_lcr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
h_lcr = *CSR_H_UBRLCR;
if (break_state)
h_lcr |= H_UBRLCR_BREAK;
else
h_lcr &= ~H_UBRLCR_BREAK;
*CSR_H_UBRLCR = h_lcr;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int serial21285_startup(struct uart_port *port)
@@ -272,7 +272,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
if (port->fifosize)
h_lcr |= H_UBRLCR_FIFO;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
@@ -309,7 +309,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
*CSR_H_UBRLCR = h_lcr;
*CSR_UARTCON = 1;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *serial21285_type(struct uart_port *port)
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 4a9e71b2dbbc..d7482ae33a1c 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -34,7 +34,6 @@
struct aspeed_vuart {
struct device *dev;
- struct clk *clk;
int line;
struct timer_list unthrottle_timer;
struct uart_8250_port *port;
@@ -288,9 +287,9 @@ static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__aspeed_vuart_set_throttle(up, throttle);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void aspeed_vuart_throttle(struct uart_port *port)
@@ -340,7 +339,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lsr = serial_port_in(port, UART_LSR);
@@ -415,12 +414,14 @@ static int aspeed_vuart_map_irq_polarity(u32 dt)
static int aspeed_vuart_probe(struct platform_device *pdev)
{
struct of_phandle_args sirq_polarity_sense_args;
+ struct device *dev = &pdev->dev;
struct uart_8250_port port;
struct aspeed_vuart *vuart;
struct device_node *np;
struct resource *res;
u32 clk, prop, sirq[2];
int rc, sirq_polarity;
+ struct clk *vclk;
np = pdev->dev.of_node;
@@ -453,19 +454,13 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
return rc;
if (of_property_read_u32(np, "clock-frequency", &clk)) {
- vuart->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(vuart->clk)) {
- dev_warn(&pdev->dev,
- "clk or clock-frequency not defined\n");
- rc = PTR_ERR(vuart->clk);
+ vclk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(vclk)) {
+ rc = dev_err_probe(dev, PTR_ERR(vclk), "clk or clock-frequency not defined\n");
goto err_sysfs_remove;
}
- rc = clk_prepare_enable(vuart->clk);
- if (rc < 0)
- goto err_sysfs_remove;
-
- clk = clk_get_rate(vuart->clk);
+ clk = clk_get_rate(vclk);
}
/* If current-speed was set, then try not to change it. */
@@ -533,7 +528,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_lpc_address(vuart, prop);
if (rc < 0) {
- dev_err(&pdev->dev, "invalid value in aspeed,lpc-io-reg property\n");
+ dev_err_probe(dev, rc, "invalid value in aspeed,lpc-io-reg property\n");
goto err_clk_disable;
}
@@ -545,14 +540,14 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_sirq(vuart, sirq[0]);
if (rc < 0) {
- dev_err(&pdev->dev, "invalid sirq number in aspeed,lpc-interrupts property\n");
+ dev_err_probe(dev, rc, "invalid sirq number in aspeed,lpc-interrupts property\n");
goto err_clk_disable;
}
sirq_polarity = aspeed_vuart_map_irq_polarity(sirq[1]);
if (sirq_polarity < 0) {
- dev_err(&pdev->dev, "invalid sirq polarity in aspeed,lpc-interrupts property\n");
- rc = sirq_polarity;
+ rc = dev_err_probe(dev, sirq_polarity,
+ "invalid sirq polarity in aspeed,lpc-interrupts property\n");
goto err_clk_disable;
}
@@ -565,7 +560,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable_unprepare(vuart->clk);
irq_dispose_mapping(port.port.irq);
err_sysfs_remove:
sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
@@ -580,7 +574,6 @@ static int aspeed_vuart_remove(struct platform_device *pdev)
aspeed_vuart_set_enabled(vuart, false);
serial8250_unregister_port(vuart->line);
sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
- clk_disable_unprepare(vuart->clk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index aa5aff046756..55dea2539c47 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -567,7 +567,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
if (interrupts == 0)
return IRQ_NONE;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
/* Clear all interrupts */
udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
@@ -581,7 +581,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
if ((rval | tval) == 0)
dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
return IRQ_HANDLED;
}
@@ -608,10 +608,10 @@ static int brcmuart_startup(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier &= ~UART_IER_RDI;
serial_port_out(port, UART_IER, up->ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
priv->tx_running = false;
priv->dma.rx_dma = NULL;
@@ -629,7 +629,7 @@ static void brcmuart_shutdown(struct uart_port *port)
struct brcmuart_priv *priv = up->port.private_data;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->shutdown = true;
if (priv->dma_enabled) {
stop_rx_dma(up);
@@ -645,7 +645,7 @@ static void brcmuart_shutdown(struct uart_port *port)
*/
up->dma = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_do_shutdown(port);
}
@@ -788,7 +788,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
* interrupt but there is no data ready.
*/
if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_port_in(p, UART_LSR);
if ((status & UART_LSR_DR) == 0) {
@@ -813,7 +813,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
handled = 1;
}
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
if (handled)
return 1;
}
@@ -831,7 +831,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
if (priv->shutdown)
return HRTIMER_NORESTART;
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_port_in(p, UART_LSR);
/*
@@ -855,7 +855,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
status |= UART_MCR_RTS;
serial_port_out(p, UART_MCR, status);
}
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
return HRTIMER_NORESTART;
}
@@ -984,10 +984,9 @@ static int brcmuart_probe(struct platform_device *pdev)
}
/* We should have just the uart base registers or all the registers */
- if (x != 1 && x != REGS_MAX) {
- dev_warn(dev, "%s registers not specified\n", reg_names[x]);
- return -EINVAL;
- }
+ if (x != 1 && x != REGS_MAX)
+ return dev_err_probe(dev, -EINVAL, "%s registers not specified\n",
+ reg_names[x]);
/* if the DMA registers were specified, try to enable DMA */
if (x > REGS_DMA_RX) {
@@ -1016,27 +1015,23 @@ static int brcmuart_probe(struct platform_device *pdev)
of_property_read_u32(np, "clock-frequency", &clk_rate);
/* See if a Baud clock has been specified */
- baud_mux_clk = devm_clk_get(dev, "sw_baud");
- if (IS_ERR(baud_mux_clk)) {
- if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto release_dma;
- }
- dev_dbg(dev, "BAUD MUX clock not specified\n");
- } else {
+ baud_mux_clk = devm_clk_get_optional_enabled(dev, "sw_baud");
+ ret = PTR_ERR_OR_ZERO(baud_mux_clk);
+ if (ret)
+ goto release_dma;
+ if (baud_mux_clk) {
dev_dbg(dev, "BAUD MUX clock found\n");
- ret = clk_prepare_enable(baud_mux_clk);
- if (ret)
- goto release_dma;
+
priv->baud_mux_clk = baud_mux_clk;
init_real_clk_rates(dev, priv);
clk_rate = priv->default_mux_rate;
+ } else {
+ dev_dbg(dev, "BAUD MUX clock not specified\n");
}
if (clk_rate == 0) {
- dev_err(dev, "clock-frequency or clk not defined\n");
- ret = -EINVAL;
- goto err_clk_disable;
+ ret = dev_err_probe(dev, -EINVAL, "clock-frequency or clk not defined\n");
+ goto release_dma;
}
dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
@@ -1093,7 +1088,7 @@ static int brcmuart_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
- dev_err(dev, "unable to register 8250 port\n");
+ dev_err_probe(dev, ret, "unable to register 8250 port\n");
goto err;
}
priv->line = ret;
@@ -1102,14 +1097,13 @@ static int brcmuart_probe(struct platform_device *pdev)
if (priv->dma_enabled) {
dma_irq = platform_get_irq_byname(pdev, "dma");
if (dma_irq < 0) {
- ret = dma_irq;
- dev_err(dev, "no IRQ resource info\n");
+ ret = dev_err_probe(dev, dma_irq, "no IRQ resource info\n");
goto err1;
}
ret = devm_request_irq(dev, dma_irq, brcmuart_isr,
IRQF_SHARED, "uart DMA irq", &new_port->port);
if (ret) {
- dev_err(dev, "unable to register IRQ handler\n");
+ dev_err_probe(dev, ret, "unable to register IRQ handler\n");
goto err1;
}
}
@@ -1121,8 +1115,6 @@ err1:
serial8250_unregister_port(priv->line);
err:
brcmuart_free_bufs(dev, priv);
-err_clk_disable:
- clk_disable_unprepare(baud_mux_clk);
release_dma:
if (priv->dma_enabled)
brcmuart_arbitration(priv, 0);
@@ -1137,7 +1129,6 @@ static int brcmuart_remove(struct platform_device *pdev)
hrtimer_cancel(&priv->hrt);
serial8250_unregister_port(priv->line);
brcmuart_free_bufs(&pdev->dev, priv);
- clk_disable_unprepare(priv->baud_mux_clk);
if (priv->dma_enabled)
brcmuart_arbitration(priv, 0);
return 0;
@@ -1154,10 +1145,10 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
* This will prevent resume from enabling RTS before the
* baud rate has been restored.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->saved_mctrl = port->mctrl;
port->mctrl &= ~TIOCM_RTS;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_suspend_port(priv->line);
clk_disable_unprepare(priv->baud_mux_clk);
@@ -1196,10 +1187,10 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
if (priv->saved_mctrl & TIOCM_RTS) {
/* Restore RTS */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl |= TIOCM_RTS;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
return 0;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 3449f8790e46..912733151858 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -259,7 +259,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
unsigned int iir, ier = 0, lsr;
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Must disable interrupts or else we risk racing with the interrupt
@@ -292,7 +292,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
if (up->port.irq)
serial_out(up, UART_IER, ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/* Standard timer interval plus 0.2s to keep the port running */
mod_timer(&up->timer,
@@ -611,7 +611,7 @@ static int univ8250_console_setup(struct console *co, char *options)
* if so, search for the first available port that does have
* console support.
*/
- if (co->index >= UART_NR)
+ if (co->index < 0 || co->index >= UART_NR)
co->index = 0;
/*
@@ -992,11 +992,11 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
struct uart_port *port = &up->port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
@@ -1194,9 +1194,9 @@ void serial8250_unregister_port(int line)
if (uart->em485) {
unsigned long flags;
- spin_lock_irqsave(&uart->port.lock, flags);
+ uart_port_lock_irqsave(&uart->port, &flags);
serial8250_em485_destroy(uart);
- spin_unlock_irqrestore(&uart->port.lock, flags);
+ uart_port_unlock_irqrestore(&uart->port, flags);
}
uart_remove_one_port(&serial8250_reg, &uart->port);
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7fa66501792d..8b30ca8fdd3f 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -22,7 +22,7 @@ static void __dma_tx_complete(void *param)
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
dma->tx_running = 0;
@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *param)
if (ret || !dma->tx_running)
serial8250_set_THRI(p);
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static void __dma_rx_complete(struct uart_8250_port *p)
@@ -70,7 +70,7 @@ static void dma_rx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (dma->rx_running)
__dma_rx_complete(p);
@@ -80,7 +80,7 @@ static void dma_rx_complete(void *param)
*/
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
p->dma->rx_dma(p);
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
int serial8250_tx_dma(struct uart_8250_port *p)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index f4cafca1a7da..b94f567647cb 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -263,20 +263,20 @@ static int dw8250_handle_irq(struct uart_port *p)
* so we limit the workaround only to non-DMA mode.
*/
if (!up->dma && rx_timeout) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
}
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_lsr_in(up);
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
@@ -498,11 +498,6 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
}
}
-static void dw8250_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void dw8250_reset_control_assert(void *data)
{
reset_control_assert(data);
@@ -598,23 +593,15 @@ static int dw8250_probe(struct platform_device *pdev)
device_property_read_u32(dev, "clock-frequency", &p->uartclk);
/* If there is separate baudclk, get the rate from it. */
- data->clk = devm_clk_get_optional(dev, "baudclk");
+ data->clk = devm_clk_get_optional_enabled(dev, "baudclk");
if (data->clk == NULL)
- data->clk = devm_clk_get_optional(dev, NULL);
+ data->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
INIT_WORK(&data->clk_work, dw8250_clk_work_cb);
data->clk_notifier.notifier_call = dw8250_clk_notifier_cb;
- err = clk_prepare_enable(data->clk);
- if (err)
- return dev_err_probe(dev, err, "could not enable optional baudclk\n");
-
- err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk);
- if (err)
- return err;
-
if (data->clk)
p->uartclk = clk_get_rate(data->clk);
@@ -622,18 +609,10 @@ static int dw8250_probe(struct platform_device *pdev)
if (!p->uartclk)
return dev_err_probe(dev, -EINVAL, "clock rate not defined\n");
- data->pclk = devm_clk_get_optional(dev, "apb_pclk");
+ data->pclk = devm_clk_get_optional_enabled(dev, "apb_pclk");
if (IS_ERR(data->pclk))
return PTR_ERR(data->pclk);
- err = clk_prepare_enable(data->pclk);
- if (err)
- return dev_err_probe(dev, err, "could not enable apb_pclk\n");
-
- err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk);
- if (err)
- return err;
-
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(data->rst))
return PTR_ERR(data->rst);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 077c3ba3539e..6085d356ad86 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
-#include <linux/8250_pci.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
@@ -47,12 +46,6 @@
#define PCI_SUBDEVICE_ID_USR_2980 0x0128
#define PCI_SUBDEVICE_ID_USR_2981 0x0129
-#define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001
-#define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002
-#define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004
-#define PCI_DEVICE_ID_SEALEVEL_780xC 0x1008
-#define PCI_DEVICE_ID_SEALEVEL_716xC 0x1010
-
#define UART_EXAR_INT0 0x80
#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
@@ -84,6 +77,9 @@
#define UART_EXAR_RS485_DLY(x) ((x) << 4)
+#define UART_EXAR_DLD 0x02 /* Divisor Fractional */
+#define UART_EXAR_DLD_485_POLARITY 0x80 /* RS-485 Enable Signal Polarity */
+
/*
* IOT2040 MPIO wiring semantics:
*
@@ -201,9 +197,9 @@ static int xr17v35x_startup(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_port_out(port, UART_IER, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
return serial8250_do_startup(port);
}
@@ -445,6 +441,44 @@ static int generic_rs485_config(struct uart_port *port, struct ktermios *termios
return 0;
}
+static int sealevel_rs485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ u8 __iomem *p = port->membase;
+ u8 old_lcr;
+ u8 efr;
+ u8 dld;
+ int ret;
+
+ ret = generic_rs485_config(port, termios, rs485);
+ if (ret)
+ return ret;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ old_lcr = readb(p + UART_LCR);
+
+ /* Set EFR[4]=1 to enable enhanced feature registers */
+ efr = readb(p + UART_XR_EFR);
+ efr |= UART_EFR_ECB;
+ writeb(efr, p + UART_XR_EFR);
+
+ /* Set MCR to use DTR as Auto-RS485 Enable signal */
+ writeb(UART_MCR_OUT1, p + UART_MCR);
+
+ /* Set LCR[7]=1 to enable access to DLD register */
+ writeb(old_lcr | UART_LCR_DLAB, p + UART_LCR);
+
+ /* Set DLD[7]=1 for inverted RS485 Enable logic */
+ dld = readb(p + UART_EXAR_DLD);
+ dld |= UART_EXAR_DLD_485_POLARITY;
+ writeb(dld, p + UART_EXAR_DLD);
+
+ writeb(old_lcr, p + UART_LCR);
+ }
+
+ return 0;
+}
+
static const struct serial_rs485 generic_rs485_supported = {
.flags = SER_RS485_ENABLED,
};
@@ -566,6 +600,9 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.rs485_config = platform->rs485_config;
port->port.rs485_supported = *(platform->rs485_supported);
+ if (pcidev->subsystem_vendor == PCI_VENDOR_ID_SEALEVEL)
+ port->port.rs485_config = sealevel_rs485_config;
+
/*
* Setup the UART clock for the devices on expansion slot to
* half the clock speed of the main chip (which is 125MHz)
@@ -652,8 +689,6 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
else if (board->num_ports)
nr_ports = board->num_ports;
- else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
- nr_ports = pcidev->device & 0xff;
else
nr_ports = pcidev->device & 0x0f;
@@ -893,12 +928,6 @@ static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
-
- EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
- EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
- EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
- EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
- EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 6af4e1c1210a..f522eb5026c9 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -30,11 +30,11 @@ int fsl8250_handle_irq(struct uart_port *port)
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
iir = port->serial_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
@@ -54,7 +54,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 1;
}
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 2cc78a4bf7a1..8ec03863606e 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -12,7 +12,6 @@
#include <linux/rational.h>
#include <linux/dma/hsu.h>
-#include <linux/8250_pci.h>
#include "8250.h"
@@ -32,9 +31,9 @@
struct mid8250;
struct mid8250_board {
- unsigned int flags;
unsigned long freq;
unsigned int base_baud;
+ unsigned int bar;
int (*setup)(struct mid8250 *, struct uart_port *p);
void (*exit)(struct mid8250 *);
};
@@ -169,7 +168,6 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
{
struct hsu_dma_chip *chip = &mid->dma_chip;
struct pci_dev *pdev = to_pci_dev(p->dev);
- unsigned int bar = FL_GET_BASE(mid->board->flags);
int ret;
pci_set_master(pdev);
@@ -183,7 +181,7 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
chip->dev = &pdev->dev;
chip->irq = pci_irq_vector(pdev, 0);
chip->regs = p->membase;
- chip->length = pci_resource_len(pdev, bar);
+ chip->length = pci_resource_len(pdev, mid->board->bar);
chip->offset = DNV_DMA_CHAN_OFFSET;
/* Falling back to PIO mode if DMA probing fails */
@@ -291,7 +289,6 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct uart_8250_port uart;
struct mid8250 *mid;
- unsigned int bar;
int ret;
ret = pcim_enable_device(pdev);
@@ -303,7 +300,6 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
mid->board = (struct mid8250_board *)id->driver_data;
- bar = FL_GET_BASE(mid->board->flags);
memset(&uart, 0, sizeof(struct uart_8250_port));
@@ -316,8 +312,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.set_termios = mid8250_set_termios;
- uart.port.mapbase = pci_resource_start(pdev, bar);
- uart.port.membase = pcim_iomap(pdev, bar, 0);
+ uart.port.mapbase = pci_resource_start(pdev, mid->board->bar);
+ uart.port.membase = pcim_iomap(pdev, mid->board->bar, 0);
if (!uart.port.membase)
return -ENOMEM;
@@ -353,25 +349,25 @@ static void mid8250_remove(struct pci_dev *pdev)
}
static const struct mid8250_board pnw_board = {
- .flags = FL_BASE0,
.freq = 50000000,
.base_baud = 115200,
+ .bar = 0,
.setup = pnw_setup,
.exit = pnw_exit,
};
static const struct mid8250_board tng_board = {
- .flags = FL_BASE0,
.freq = 38400000,
.base_baud = 1843200,
+ .bar = 0,
.setup = tng_setup,
.exit = tng_exit,
};
static const struct mid8250_board dnv_board = {
- .flags = FL_BASE1,
.freq = 133333333,
.base_baud = 115200,
+ .bar = 1,
.setup = dnv_setup,
.exit = dnv_exit,
};
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 74da5676ce67..23457daae8a1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -102,7 +102,7 @@ static void mtk8250_dma_rx_complete(void *param)
if (data->rx_status == DMA_RX_SHUTDOWN)
return;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
total = dma->rx_size - state.residue;
@@ -128,7 +128,7 @@ static void mtk8250_dma_rx_complete(void *param)
mtk8250_rx_dma(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void mtk8250_rx_dma(struct uart_8250_port *up)
@@ -368,7 +368,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
@@ -416,7 +416,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
if (uart_console(port))
up->port.cons->cflag = termios->c_cflag;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 51329625c48a..ef3e745bd09c 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -33,7 +33,8 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
struct of_serial_info *info)
{
struct resource resource;
- struct device_node *np = ofdev->dev.of_node;
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct uart_port *port = &up->port;
u32 clk, spd, prop;
int ret, irq;
@@ -46,19 +47,12 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (of_property_read_u32(np, "clock-frequency", &clk)) {
/* Get clk rate through clk driver if present */
- info->clk = devm_clk_get(&ofdev->dev, NULL);
+ info->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- if (ret != -EPROBE_DEFER)
- dev_warn(&ofdev->dev,
- "failed to get clock: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
goto err_pmruntime;
}
- ret = clk_prepare_enable(info->clk);
- if (ret < 0)
- goto err_pmruntime;
-
clk = clk_get_rate(info->clk);
}
/* If current-speed was set, then try not to change it. */
@@ -67,8 +61,8 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
- dev_warn(&ofdev->dev, "invalid address\n");
- goto err_unprepare;
+ dev_err_probe(dev, ret, "invalid address\n");
+ goto err_pmruntime;
}
port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
@@ -85,10 +79,9 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
/* Check for shifted address mapping */
if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
if (prop >= port->mapsize) {
- dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
- prop, &port->mapsize);
- ret = -EINVAL;
- goto err_unprepare;
+ ret = dev_err_probe(dev, -EINVAL, "reg-offset %u exceeds region size %pa\n",
+ prop, &port->mapsize);
+ goto err_pmruntime;
}
port->mapbase += prop;
@@ -109,10 +102,9 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
UPIO_MEM32BE : UPIO_MEM32;
break;
default:
- dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
- prop);
- ret = -EINVAL;
- goto err_unprepare;
+ ret = dev_err_probe(dev, -EINVAL, "unsupported reg-io-width (%u)\n",
+ prop);
+ goto err_pmruntime;
}
}
port->flags |= UPF_IOREMAP;
@@ -139,7 +131,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (irq < 0) {
if (irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_unprepare;
+ goto err_pmruntime;
}
/* IRQ support not mandatory */
irq = 0;
@@ -150,12 +142,12 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
if (IS_ERR(info->rst)) {
ret = PTR_ERR(info->rst);
- goto err_unprepare;
+ goto err_pmruntime;
}
ret = reset_control_deassert(info->rst);
if (ret)
- goto err_unprepare;
+ goto err_pmruntime;
port->type = type;
port->uartclk = clk;
@@ -173,7 +165,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
case PORT_RT2880:
ret = rt288x_setup(port);
if (ret)
- goto err_unprepare;
+ goto err_pmruntime;
break;
}
@@ -185,8 +177,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
}
return 0;
-err_unprepare:
- clk_disable_unprepare(info->clk);
err_pmruntime:
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
@@ -253,7 +243,6 @@ err_dispose:
irq_dispose_mapping(port8250.port.irq);
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
- clk_disable_unprepare(info->clk);
err_free:
kfree(info);
return ret;
@@ -271,7 +260,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
reset_control_assert(info->rst);
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
- clk_disable_unprepare(info->clk);
kfree(info);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ca972fd37725..2d42f485c987 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -27,6 +28,7 @@
#include <linux/pm_wakeirq.h>
#include <linux/dma-mapping.h>
#include <linux/sys_soc.h>
+#include <linux/pm_domain.h>
#include "8250.h"
@@ -114,6 +116,12 @@
/* RX FIFO occupancy indicator */
#define UART_OMAP_RX_LVL 0x19
+/*
+ * Copy of the genpd flags for the console.
+ * Only used if console suspend is disabled
+ */
+static unsigned int genpd_flags_console;
+
struct omap8250_priv {
void __iomem *membase;
int line;
@@ -130,6 +138,7 @@ struct omap8250_priv {
u8 tx_trigger;
u8 rx_trigger;
+ atomic_t active;
bool is_suspending;
int wakeirq;
int wakeups_enabled;
@@ -401,7 +410,7 @@ static void omap_8250_set_termios(struct uart_port *port,
* interrupts disabled.
*/
pm_runtime_get_sync(port->dev);
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
/*
* Update the per-port timeout.
@@ -504,7 +513,7 @@ static void omap_8250_set_termios(struct uart_port *port,
}
omap8250_restore_regs(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
@@ -529,7 +538,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
pm_runtime_get_sync(port->dev);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
@@ -541,7 +550,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
@@ -632,14 +641,23 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
unsigned int iir, lsr;
int ret;
+ pm_runtime_get_noresume(port->dev);
+
+ /* Shallow idle state wake-up to an IO interrupt? */
+ if (atomic_add_unless(&priv->active, 1, 1)) {
+ priv->latency = priv->calc_latency;
+ schedule_work(&priv->qos_work);
+ }
+
#ifdef CONFIG_SERIAL_8250_DMA
if (up->dma) {
ret = omap_8250_dma_handle_irq(port);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put(port->dev);
return IRQ_RETVAL(ret);
}
#endif
- serial8250_rpm_get(up);
lsr = serial_port_in(port, UART_LSR);
iir = serial_port_in(port, UART_IIR);
ret = serial8250_handle_irq(port, iir);
@@ -660,7 +678,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
unsigned long delay;
/* Synchronize UART_IER access against the console. */
- spin_lock(&port->lock);
+ uart_port_lock(port);
up->ier = port->serial_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
@@ -670,13 +688,14 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
*/
cancel_delayed_work(&up->overrun_backoff);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
schedule_delayed_work(&up->overrun_backoff, delay);
}
- serial8250_rpm_put(up);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put(port->dev);
return IRQ_RETVAL(ret);
}
@@ -717,10 +736,10 @@ static int omap_8250_startup(struct uart_port *port)
}
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
@@ -733,9 +752,9 @@ static int omap_8250_startup(struct uart_port *port)
serial_out(up, UART_OMAP_WER, priv->wer);
if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->dma->rx_dma(up);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
enable_irq(up->port.irq);
@@ -761,10 +780,10 @@ static void omap_8250_shutdown(struct uart_port *port)
serial_out(up, UART_OMAP_EFR2, 0x0);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
disable_irq_nosync(up->port.irq);
dev_pm_clear_wake_irq(port->dev);
@@ -789,10 +808,10 @@ static void omap_8250_throttle(struct uart_port *port)
pm_runtime_get_sync(port->dev);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->ops->stop_rx(port);
priv->throttled = true;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
@@ -807,14 +826,14 @@ static void omap_8250_unthrottle(struct uart_port *port)
pm_runtime_get_sync(port->dev);
/* Synchronize UART_IER access against the console. */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->throttled = false;
if (up->dma)
up->dma->rx_dma(up);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
port->read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
@@ -958,7 +977,7 @@ static void __dma_rx_complete(void *param)
unsigned long flags;
/* Synchronize UART_IER access against the console. */
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
/*
* If the tx status is not DMA_COMPLETE, then this is a delayed
@@ -967,7 +986,7 @@ static void __dma_rx_complete(void *param)
*/
if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
DMA_COMPLETE) {
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
return;
}
__dma_rx_do_complete(p);
@@ -978,7 +997,7 @@ static void __dma_rx_complete(void *param)
omap_8250_rx_dma(p);
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
@@ -1083,7 +1102,7 @@ static void omap_8250_dma_tx_complete(void *param)
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
dma->tx_running = 0;
@@ -1112,7 +1131,7 @@ static void omap_8250_dma_tx_complete(void *param)
serial8250_set_THRI(p);
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static int omap_8250_tx_dma(struct uart_8250_port *p)
@@ -1270,15 +1289,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
u16 status;
u8 iir;
- serial8250_rpm_get(up);
-
iir = serial_port_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- serial8250_rpm_put(up);
return IRQ_HANDLED;
}
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = serial_port_in(port, UART_LSR);
@@ -1305,7 +1321,6 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
uart_unlock_and_check_sysrq(port);
- serial8250_rpm_put(up);
return 1;
}
@@ -1503,8 +1518,6 @@ static int omap8250_probe(struct platform_device *pdev)
if (!of_get_available_child_count(pdev->dev.of_node))
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
- pm_runtime_irq_safe(&pdev->dev);
-
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(&up, priv);
@@ -1617,6 +1630,7 @@ static int omap8250_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
int err = 0;
serial8250_suspend_port(priv->line);
@@ -1627,8 +1641,19 @@ static int omap8250_suspend(struct device *dev)
if (!device_may_wakeup(dev))
priv->wer = 0;
serial_out(up, UART_OMAP_WER, priv->wer);
- if (uart_console(&up->port) && console_suspend_enabled)
- err = pm_runtime_force_suspend(dev);
+ if (uart_console(&up->port)) {
+ if (console_suspend_enabled)
+ err = pm_runtime_force_suspend(dev);
+ else {
+ /*
+ * The pd shall not be powered-off (no console suspend).
+ * Make copy of genpd flags before to set it always on.
+ * The original value is restored during the resume.
+ */
+ genpd_flags_console = genpd->flags;
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+ }
flush_work(&priv->qos_work);
return err;
@@ -1638,12 +1663,16 @@ static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
int err;
if (uart_console(&up->port) && console_suspend_enabled) {
- err = pm_runtime_force_resume(dev);
- if (err)
- return err;
+ if (console_suspend_enabled) {
+ err = pm_runtime_force_resume(dev);
+ if (err)
+ return err;
+ } else
+ genpd->flags = genpd_flags_console;
}
serial8250_resume_port(priv->line);
@@ -1743,6 +1772,7 @@ static int omap8250_runtime_suspend(struct device *dev)
priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&priv->qos_work);
+ atomic_set(&priv->active, 0);
return 0;
}
@@ -1752,23 +1782,29 @@ static int omap8250_runtime_resume(struct device *dev)
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = NULL;
+ /* Did the hardware wake to a device IO interrupt before a wakeirq? */
+ if (atomic_read(&priv->active))
+ return 0;
+
if (priv->line >= 0)
up = serial8250_get_port(priv->line);
if (up && omap8250_lost_context(up)) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
omap8250_restore_regs(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
omap_8250_rx_dma(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
+ atomic_set(&priv->active, 1);
priv->latency = priv->calc_latency;
schedule_work(&priv->qos_work);
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 62a9bd30b4db..614be0f13a31 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -26,6 +26,84 @@
#include "8250.h"
#include "8250_pcilib.h"
+#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
+#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
+#define PCI_DEVICE_ID_OCTPRO 0x0001
+#define PCI_SUBDEVICE_ID_OCTPRO232 0x0108
+#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
+#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
+#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
+#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
+#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe
+#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
+#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
+#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
+#define PCI_DEVICE_ID_TITAN_200I 0x8028
+#define PCI_DEVICE_ID_TITAN_400I 0x8048
+#define PCI_DEVICE_ID_TITAN_800I 0x8088
+#define PCI_DEVICE_ID_TITAN_800EH 0xA007
+#define PCI_DEVICE_ID_TITAN_800EHB 0xA008
+#define PCI_DEVICE_ID_TITAN_400EH 0xA009
+#define PCI_DEVICE_ID_TITAN_100E 0xA010
+#define PCI_DEVICE_ID_TITAN_200E 0xA012
+#define PCI_DEVICE_ID_TITAN_400E 0xA013
+#define PCI_DEVICE_ID_TITAN_800E 0xA014
+#define PCI_DEVICE_ID_TITAN_200EI 0xA016
+#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_200V3 0xA306
+#define PCI_DEVICE_ID_TITAN_400V3 0xA310
+#define PCI_DEVICE_ID_TITAN_410V3 0xA312
+#define PCI_DEVICE_ID_TITAN_800V3 0xA314
+#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
+#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
+#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
+#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
+#define PCI_VENDOR_ID_WCH 0x4348
+#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
+#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
+#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
+#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
+#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
+#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
+#define PCI_VENDOR_ID_AGESTAR 0x5372
+#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
+#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
+
+#define PCIE_VENDOR_ID_WCH 0x1c00
+#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
+#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
+
+#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
+#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
+#define PCI_DEVICE_ID_MOXA_CP102N 0x1027
+#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
+#define PCI_DEVICE_ID_MOXA_CP104N 0x1046
+#define PCI_DEVICE_ID_MOXA_CP112N 0x1121
+#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144
+#define PCI_DEVICE_ID_MOXA_CP114N 0x1145
+#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160
+#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161
+#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182
+#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183
+#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322
+#define PCI_DEVICE_ID_MOXA_CP132N 0x1323
+#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342
+#define PCI_DEVICE_ID_MOXA_CP134N 0x1343
+#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381
+#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683
+
+/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
+
/*
* init function returns:
* > 0 - number of ports
@@ -1887,6 +1965,49 @@ pci_sunix_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, 0);
}
+#define MOXA_PUART_GPIO_EN 0x09
+#define MOXA_PUART_GPIO_OUT 0x0A
+
+#define MOXA_GPIO_PIN2 BIT(2)
+
+static bool pci_moxa_is_mini_pcie(unsigned short device)
+{
+ if (device == PCI_DEVICE_ID_MOXA_CP102N ||
+ device == PCI_DEVICE_ID_MOXA_CP104N ||
+ device == PCI_DEVICE_ID_MOXA_CP112N ||
+ device == PCI_DEVICE_ID_MOXA_CP114N ||
+ device == PCI_DEVICE_ID_MOXA_CP132N ||
+ device == PCI_DEVICE_ID_MOXA_CP134N)
+ return true;
+
+ return false;
+}
+
+static int pci_moxa_init(struct pci_dev *dev)
+{
+ unsigned short device = dev->device;
+ resource_size_t iobar_addr = pci_resource_start(dev, 2);
+ unsigned int num_ports = (device & 0x00F0) >> 4;
+ u8 val;
+
+ /*
+ * Enable hardware buffer to prevent break signal output when system boots up.
+ * This hardware buffer is only supported on Mini PCIe series.
+ */
+ if (pci_moxa_is_mini_pcie(device)) {
+ /* Set GPIO direction */
+ val = inb(iobar_addr + MOXA_PUART_GPIO_EN);
+ val |= MOXA_GPIO_PIN2;
+ outb(val, iobar_addr + MOXA_PUART_GPIO_EN);
+ /* Enable low GPIO */
+ val = inb(iobar_addr + MOXA_PUART_GPIO_OUT);
+ val &= ~MOXA_GPIO_PIN2;
+ outb(val, iobar_addr + MOXA_PUART_GPIO_OUT);
+ }
+
+ return num_ports;
+}
+
static int
pci_moxa_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1903,78 +2024,6 @@ pci_moxa_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, 0);
}
-#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
-#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
-#define PCI_DEVICE_ID_OCTPRO 0x0001
-#define PCI_SUBDEVICE_ID_OCTPRO232 0x0108
-#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
-#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
-#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
-#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
-#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
-#define PCI_VENDOR_ID_ADVANTECH 0x13fe
-#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
-#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
-#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
-#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
-#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
-#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
-#define PCI_DEVICE_ID_TITAN_200I 0x8028
-#define PCI_DEVICE_ID_TITAN_400I 0x8048
-#define PCI_DEVICE_ID_TITAN_800I 0x8088
-#define PCI_DEVICE_ID_TITAN_800EH 0xA007
-#define PCI_DEVICE_ID_TITAN_800EHB 0xA008
-#define PCI_DEVICE_ID_TITAN_400EH 0xA009
-#define PCI_DEVICE_ID_TITAN_100E 0xA010
-#define PCI_DEVICE_ID_TITAN_200E 0xA012
-#define PCI_DEVICE_ID_TITAN_400E 0xA013
-#define PCI_DEVICE_ID_TITAN_800E 0xA014
-#define PCI_DEVICE_ID_TITAN_200EI 0xA016
-#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
-#define PCI_DEVICE_ID_TITAN_200V3 0xA306
-#define PCI_DEVICE_ID_TITAN_400V3 0xA310
-#define PCI_DEVICE_ID_TITAN_410V3 0xA312
-#define PCI_DEVICE_ID_TITAN_800V3 0xA314
-#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
-#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
-#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
-#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
-#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
-#define PCI_VENDOR_ID_WCH 0x4348
-#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
-#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
-#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
-#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
-#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
-#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
-#define PCI_VENDOR_ID_AGESTAR 0x5372
-#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
-#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
-#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
-
-#define PCIE_VENDOR_ID_WCH 0x1c00
-#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
-#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
-#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
-#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-
-#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
-#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
-#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
-#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144
-#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160
-#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161
-#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182
-#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183
-#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322
-#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342
-#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381
-#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683
-
-/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
-#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
-#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
-
/*
* Master list of serial port init/setup/exit quirks.
* This does not describe the general nature of the port.
@@ -2429,6 +2478,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
+ /*
+ * Brainboxes devices - all Oxsemi based
+ */
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4027,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4028,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4029,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4019,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4016,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4015,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x400A,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x400E,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x400C,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x400B,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x400F,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4010,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4011,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x401D,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x401E,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4013,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4017,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4018,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8811,
@@ -2635,6 +2831,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
+ .init = pci_moxa_init,
.setup = pci_moxa_setup,
},
{
@@ -2854,9 +3051,9 @@ enum pci_board_num_t {
pbn_titan_2_4000000,
pbn_titan_4_4000000,
pbn_titan_8_4000000,
- pbn_moxa8250_2p,
- pbn_moxa8250_4p,
- pbn_moxa8250_8p,
+ pbn_moxa_2,
+ pbn_moxa_4,
+ pbn_moxa_8,
};
/*
@@ -3628,19 +3825,19 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_moxa8250_2p] = {
+ [pbn_moxa_2] = {
.flags = FL_BASE1,
.num_ports = 2,
.base_baud = 921600,
.uart_offset = 0x200,
},
- [pbn_moxa8250_4p] = {
+ [pbn_moxa_4] = {
.flags = FL_BASE1,
.num_ports = 4,
.base_baud = 921600,
.uart_offset = 0x200,
},
- [pbn_moxa8250_8p] = {
+ [pbn_moxa_8] = {
.flags = FL_BASE1,
.num_ports = 8,
.base_baud = 921600,
@@ -4914,10 +5111,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b1_bt_1_115200 },
/*
+ * IntaShield IS-100
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D60,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ /*
* IntaShield IS-200
*/
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0d80 */
pbn_b2_2_115200 },
/*
* IntaShield IS-400
@@ -4925,6 +5128,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
+ /*
+ * IntaShield IX-100
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4027,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * IntaShield IX-200
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4028,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * IntaShield IX-400
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4029,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
/* Brainboxes Devices */
/*
* Brainboxes UC-101
@@ -4940,10 +5164,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
/*
- * Brainboxes UC-257
+ * Brainboxes UC-253/UC-734
*/
- { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ { PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
@@ -4979,6 +5207,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
@@ -4989,6 +5225,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-313
*/
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5003,6 +5247,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-346
*/
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B01,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5014,6 +5262,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A82,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5026,13 +5278,95 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
- * Brainboxes UC-420/431
+ * Brainboxes UC-420
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes UC-607
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-836
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UP-189
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-200
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B22,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B23,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-869
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C01,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C03,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-880
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C22,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C23,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
* Brainboxes PX-101
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4005,
@@ -5064,7 +5398,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, 0x4015,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
- pbn_oxsemi_4_15625000 },
+ pbn_oxsemi_2_15625000 },
/*
* Brainboxes PX-260/PX-701
*/
@@ -5073,6 +5407,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_oxsemi_4_15625000 },
/*
+ * Brainboxes PX-275/279
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0E41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_8_115200 },
+ /*
* Brainboxes PX-310
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x400E,
@@ -5119,18 +5460,40 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_oxsemi_4_15625000 },
/*
- * Brainboxes PX-803
+ * Brainboxes PX-475
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401D,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-803/PX-857
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4009,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
- pbn_b0_1_115200 },
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4018,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_INTASHIELD, 0x401E,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
- pbn_oxsemi_1_15625000 },
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-820
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4002,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4013,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
/*
- * Brainboxes PX-846
+ * Brainboxes PX-835/PX-846
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4008,
PCI_ANY_ID, PCI_ANY_ID,
@@ -5349,42 +5712,24 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* MOXA
*/
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102E,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_2p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102EL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_2p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104EL_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_4p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP114EL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_4p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118EL_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118E_A_I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132EL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_2p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP134EL_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_4p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP138E_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
- { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP168EL_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_moxa8250_8p },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102E), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102EL), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102N), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL_A), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104N), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP112N), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114EL), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114N), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_A), pbn_moxa_8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_B), pbn_moxa_8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL_A), pbn_moxa_8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118E_A_I), pbn_moxa_8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132EL), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132N), pbn_moxa_2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134EL_A), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134N), pbn_moxa_4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP138E_A), pbn_moxa_8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL_A), pbn_moxa_8 },
/*
* ADDI-DATA GmbH communication cards <info@addi-data.com>
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index a3b25779d921..9f9e21981929 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -107,7 +107,7 @@ static const int logical_to_physical_port_idx[][MAX_PORTS] = {
struct pci1xxxx_8250 {
unsigned int nr;
void __iomem *membase;
- int line[];
+ int line[] __counted_by(nr);
};
static int pci1xxxx_get_num_ports(struct pci_dev *dev)
@@ -225,10 +225,10 @@ static bool pci1xxxx_port_suspend(int line)
if (port->suspended == 0 && port->dev) {
wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl &= ~TIOCM_OUT2;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
}
@@ -251,10 +251,10 @@ static void pci1xxxx_port_resume(int line)
writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
if (port->suspended == 0) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl |= TIOCM_OUT2;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
mutex_unlock(&tport->mutex);
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 141627370aab..8ca061d3bbb9 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -170,13 +170,6 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO,
},
- [PORT_AR7] = {
- .name = "AR7",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
- .flags = UART_CAP_FIFO /* | UART_CAP_AFE */,
- },
[PORT_U6_16550A] = {
.name = "U6_16550A",
.fifo_size = 64,
@@ -689,7 +682,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
if (p->capabilities & UART_CAP_SLEEP) {
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&p->port.lock);
+ uart_port_lock_irq(&p->port);
if (p->capabilities & UART_CAP_EFR) {
lcr = serial_in(p, UART_LCR);
efr = serial_in(p, UART_EFR);
@@ -703,7 +696,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial_out(p, UART_EFR, efr);
serial_out(p, UART_LCR, lcr);
}
- spin_unlock_irq(&p->port.lock);
+ uart_port_unlock_irq(&p->port);
}
serial8250_rpm_put(p);
@@ -746,9 +739,9 @@ static void enable_rsa(struct uart_8250_port *up)
{
if (up->port.type == PORT_RSA) {
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
__enable_rsa(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
serial_out(up, UART_RSA_FRR, 0);
@@ -768,7 +761,7 @@ static void disable_rsa(struct uart_8250_port *up)
if (up->port.type == PORT_RSA &&
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
@@ -781,7 +774,7 @@ static void disable_rsa(struct uart_8250_port *up)
if (result)
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
}
#endif /* CONFIG_SERIAL_8250_RSA */
@@ -1008,12 +1001,11 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO |
- UART_IIR_FIFO_ENABLED);
+ status1 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, 0);
serial_out(up, UART_LCR, 0);
- if (status1 == (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED))
+ if (status1 == UART_IIR_FIFO_ENABLED_16750)
up->port.type = PORT_16550A_FSL64;
else
DEBUG_AUTOCONF("Motorola 8xxx DUART ");
@@ -1081,12 +1073,12 @@ static void autoconfig_16550a(struct uart_8250_port *up)
*/
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED);
+ status1 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status2 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED);
+ status2 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_LCR, 0);
@@ -1094,7 +1086,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
if (status1 == UART_IIR_FIFO_ENABLED_16550A &&
- status2 == (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED_16550A)) {
+ status2 == UART_IIR_FIFO_ENABLED_16750) {
up->port.type = PORT_16750;
up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
return;
@@ -1172,7 +1164,7 @@ static void autoconfig(struct uart_8250_port *up)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->capabilities = 0;
up->bugs = 0;
@@ -1211,7 +1203,7 @@ static void autoconfig(struct uart_8250_port *up)
/*
* We failed; there's nothing here
*/
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
scratch2, scratch3);
goto out;
@@ -1235,7 +1227,7 @@ static void autoconfig(struct uart_8250_port *up)
status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
serial8250_out_MCR(up, save_mcr);
if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
status1);
goto out;
@@ -1304,7 +1296,7 @@ static void autoconfig(struct uart_8250_port *up)
serial8250_clear_IER(up);
out_unlock:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Check if the device is a Fintek F81216A
@@ -1344,9 +1336,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
save_ier = serial_in(up, UART_IER);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
@@ -1359,9 +1351,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_IER, UART_IER_ALL_INTR);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
serial_in(up, UART_LSR);
serial_in(up, UART_RX);
serial_in(up, UART_IIR);
@@ -1372,9 +1364,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
serial8250_out_MCR(up, save_mcr);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_IER, save_ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
@@ -1442,13 +1434,13 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
unsigned long flags;
serial8250_rpm_get(p);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->stop_tx_timer) {
p->rs485_stop_tx(p);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
serial8250_rpm_put(p);
return HRTIMER_NORESTART;
@@ -1630,12 +1622,12 @@ static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
struct uart_8250_port *p = em485->port;
unsigned long flags;
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->start_tx_timer) {
__start_tx(&p->port);
em485->active_timer = NULL;
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
return HRTIMER_NORESTART;
}
@@ -1918,7 +1910,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = serial_lsr_in(up);
@@ -1988,9 +1980,9 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
struct uart_8250_port *up = up_to_u8250p(port);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
serial8250_tx_chars(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
iir = serial_port_in(port, UART_IIR);
@@ -2005,10 +1997,10 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
result = TIOCSER_TEMT;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
@@ -2070,13 +2062,13 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_port_out(port, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
}
@@ -2211,7 +2203,7 @@ int serial8250_do_startup(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
@@ -2221,7 +2213,7 @@ int serial8250_do_startup(struct uart_port *port)
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_LCR, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
if (port->type == PORT_DA830) {
@@ -2230,10 +2222,10 @@ int serial8250_do_startup(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
serial_port_out(port, UART_IER, 0);
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
mdelay(10);
/* Enable Tx, Rx and free run mode */
@@ -2347,7 +2339,7 @@ int serial8250_do_startup(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
@@ -2359,7 +2351,7 @@ int serial8250_do_startup(struct uart_port *port)
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
@@ -2382,7 +2374,7 @@ int serial8250_do_startup(struct uart_port *port)
*/
serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (up->port.flags & UPF_FOURPORT) {
if (!up->port.irq)
up->port.mctrl |= TIOCM_OUT1;
@@ -2428,7 +2420,7 @@ int serial8250_do_startup(struct uart_port *port)
}
dont_test_tx_en:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Clear the interrupt registers again for luck, and clear the
@@ -2499,17 +2491,17 @@ void serial8250_do_shutdown(struct uart_port *port)
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->ier = 0;
serial_port_out(port, UART_IER, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
synchronize_irq(port->irq);
if (up->dma)
serial8250_release_dma(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
inb((port->iobase & 0xfe0) | 0x1f);
@@ -2518,7 +2510,7 @@ void serial8250_do_shutdown(struct uart_port *port)
port->mctrl &= ~TIOCM_OUT2;
serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Disable break condition and FIFOs
@@ -2754,14 +2746,14 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
quot = serial8250_get_divisor(port, baud, &frac);
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
out_unlock:
@@ -2798,7 +2790,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
* Synchronize UART_IER access against the console.
*/
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->lcr = cval; /* Save computed LCR */
@@ -2901,7 +2893,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
}
serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
/* Don't rewrite B0 */
@@ -2924,15 +2916,15 @@ void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial8250_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial8250_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
@@ -3406,9 +3398,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
touch_nmi_watchdog();
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* First save the IER then disable the interrupts
@@ -3478,7 +3470,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
serial8250_modem_status(up);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static unsigned int probe_baud(struct uart_port *port)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index c12978311a09..8b9a2c4902e2 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -148,7 +148,7 @@ config SERIAL_8250_PCI
config SERIAL_8250_EXAR
tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
- depends on SERIAL_8250_PCI
+ depends on SERIAL_8250 && PCI
default SERIAL_8250
help
This builds support for XR17C1xx, XR17V3xx and some Commtech
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 628b75be312e..ea2e81f58eac 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -13,39 +13,41 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o
8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
8250_base-$(CONFIG_SERIAL_8250_PCILIB) += 8250_pcilib.o
-obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o
-obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
-obj-$(CONFIG_SERIAL_8250_EXAR) += 8250_exar.o
-obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
-obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
+
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
+
+obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
obj-$(CONFIG_SERIAL_8250_ASPEED_VUART) += 8250_aspeed_vuart.o
obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o
-obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
-obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
-obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
+obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o
obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
-obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
-obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
-obj-$(CONFIG_SERIAL_8250_PCI1XXXX) += 8250_pci1xxxx.o
-obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
-obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
+obj-$(CONFIG_SERIAL_8250_EXAR) += 8250_exar.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
+obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
+obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
+obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
+obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
+obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o
-obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
-obj-$(CONFIG_SERIAL_8250_RT288X) += 8250_rt288x.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
-obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
-obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
-obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
+obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
+obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
+obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
+obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
+obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o
+obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_PCI1XXXX) += 8250_pci1xxxx.o
obj-$(CONFIG_SERIAL_8250_PERICOM) += 8250_pericom.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
+obj-$(CONFIG_SERIAL_8250_RT288X) += 8250_rt288x.o
+obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
+obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o
-obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o
-obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index bdc568a4ab66..732c893c8d16 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1578,6 +1578,32 @@ config SERIAL_NUVOTON_MA35D1_CONSOLE
but you can alter that using a kernel command line option such as
"console=ttyNVTx".
+config SERIAL_ESP32
+ tristate "Espressif ESP32 UART support"
+ depends on XTENSA_PLATFORM_ESP32 || (COMPILE_TEST && OF)
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Driver for the UART controllers of the Espressif ESP32xx SoCs.
+ When earlycon option is enabled the following kernel command line
+ snippets may be used:
+ earlycon=esp32s3uart,mmio32,0x60000000,115200n8,40000000
+ earlycon=esp32uart,mmio32,0x3ff40000,115200n8
+
+config SERIAL_ESP32_ACM
+ tristate "Espressif ESP32 USB ACM gadget support"
+ depends on XTENSA_PLATFORM_ESP32 || (COMPILE_TEST && OF)
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Driver for the CDC ACM gadget controller of the Espressif ESP32S3
+ SoCs that share separate USB controller with the JTAG adapter.
+ When earlycon option is enabled the following kernel command line
+ snippet may be used:
+ earlycon=esp32s3acm,mmio32,0x60038000
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 138abbc89738..b25e9b54a660 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -24,70 +24,72 @@ obj-$(CONFIG_SERIAL_21285) += 21285.o
# Now bring in any enabled 8250/16450/16550 type drivers.
obj-y += 8250/
-obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
-obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
-obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
-obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
-obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
-obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
-obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
-obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
-obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
-obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
-obj-$(CONFIG_SERIAL_MUX) += mux.o
-obj-$(CONFIG_SERIAL_MCF) += mcf.o
-obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
-obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
-obj-$(CONFIG_SERIAL_DZ) += dz.o
-obj-$(CONFIG_SERIAL_ZS) += zs.o
-obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
-obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
-obj-$(CONFIG_SERIAL_IMX) += imx.o
-obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o
-obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
-obj-$(CONFIG_SERIAL_ICOM) += icom.o
-obj-$(CONFIG_SERIAL_MESON) += meson_uart.o
-obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
-obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
-obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
-obj-$(CONFIG_SERIAL_JSM) += jsm/
-obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
-obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
-obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
-obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
-obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
-obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
-obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
-obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
-obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
-obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
+obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
-obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
-obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
-obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
-obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
-obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
-obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
-obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
-obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
-obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
-obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
-obj-$(CONFIG_SERIAL_RP2) += rp2.o
-obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o
-obj-$(CONFIG_SERIAL_FSL_LINFLEXUART) += fsl_linflexuart.o
+obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
+obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
+obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
+obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
+obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
+obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o
-obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
-obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
-obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
-obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o
-obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o
-obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
-obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
-obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
-obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
-obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
-obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o
-obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o
+obj-$(CONFIG_SERIAL_DZ) += dz.o
+obj-$(CONFIG_SERIAL_ESP32) += esp32_uart.o
+obj-$(CONFIG_SERIAL_ESP32_ACM) += esp32_acm.o
+obj-$(CONFIG_SERIAL_FSL_LINFLEXUART) += fsl_linflexuart.o
+obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o
+obj-$(CONFIG_SERIAL_ICOM) += icom.o
+obj-$(CONFIG_SERIAL_IMX) += imx.o
+obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o
+obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
+obj-$(CONFIG_SERIAL_JSM) += jsm/
+obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
+obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o
+obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
+obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
+obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
+obj-$(CONFIG_SERIAL_MCF) += mcf.o
+obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
+obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
+obj-$(CONFIG_SERIAL_MESON) += meson_uart.o
+obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
+obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
+obj-$(CONFIG_SERIAL_MUX) += mux.o
+obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o
+obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
+obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
+obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
+obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
+obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o
+obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
+obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
+obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
+obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
+obj-$(CONFIG_SERIAL_RP2) += rp2.o
+obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
+obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
+obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
+obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
+obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
+obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
+obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
+obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
+obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
+obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
+obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o
+obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
+obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
+obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
+obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_ZS) += zs.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 5fab4c978891..7090b251dd4d 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -147,14 +147,14 @@ static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
altera_jtaguart_rx_chars(port);
if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
altera_jtaguart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_RETVAL(isr);
}
@@ -180,14 +180,14 @@ static int altera_jtaguart_startup(struct uart_port *port)
return ret;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Enable RX interrupts now */
port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -196,14 +196,14 @@ static void altera_jtaguart_shutdown(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
port->read_status_mask = 0;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
@@ -264,33 +264,33 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c
unsigned long flags;
u32 status;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (!altera_jtaguart_tx_space(port, &status)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
return; /* no connection activity */
}
cpu_relax();
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#else
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (!altera_jtaguart_tx_space(port, NULL)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
cpu_relax();
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index a9c41942190c..77835ac68df2 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -164,13 +164,13 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
else
pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void altera_uart_set_termios(struct uart_port *port,
@@ -187,10 +187,10 @@ static void altera_uart_set_termios(struct uart_port *port,
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* FIXME: port->read_status_mask and port->ignore_status_mask
@@ -264,12 +264,12 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
altera_uart_tx_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_RETVAL(isr);
}
@@ -313,13 +313,13 @@ static int altera_uart_startup(struct uart_port *port)
}
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Enable RX interrupts now */
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -329,13 +329,13 @@ static void altera_uart_shutdown(struct uart_port *port)
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
pp->imr = 0;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irq)
free_irq(port->irq, port);
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index b5a7404cbacb..eabbf8afc9b5 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -207,7 +207,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = readb(port->membase + UART010_IIR);
if (status) {
@@ -228,7 +228,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
handled = 1;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_RETVAL(handled);
}
@@ -270,14 +270,14 @@ static void pl010_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int lcr_h;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lcr_h = readb(port->membase + UART010_LCRH);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
writel(lcr_h, port->membase + UART010_LCRH);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int pl010_startup(struct uart_port *port)
@@ -385,7 +385,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
if (port->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
@@ -438,22 +438,22 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
writel(lcr_h, port->membase + UART010_LCRH);
writel(old_cr, port->membase + UART010_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
pl010_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
pl010_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3dc9b0fcab1c..61cc24cd90e4 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -345,9 +345,9 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
if (!sysrq)
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
@@ -421,7 +421,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
/* We need platform data */
if (!plat || !plat->dma_filter) {
- dev_info(uap->port.dev, "no DMA platform data\n");
+ dev_dbg(uap->port.dev, "no DMA platform data\n");
return;
}
@@ -550,7 +550,7 @@ static void pl011_dma_tx_callback(void *data)
unsigned long flags;
u16 dmacr;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
if (uap->dmatx.queued)
dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
DMA_TO_DEVICE);
@@ -571,7 +571,7 @@ static void pl011_dma_tx_callback(void *data)
if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
uart_circ_empty(&uap->port.state->xmit)) {
uap->dmatx.queued = false;
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return;
}
@@ -582,7 +582,7 @@ static void pl011_dma_tx_callback(void *data)
*/
pl011_start_tx_pio(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
/*
@@ -1009,7 +1009,7 @@ static void pl011_dma_rx_callback(void *data)
* routine to flush out the secondary DMA buffer while
* we immediately trigger the next DMA job.
*/
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/*
* Rx data can be taken by the UART interrupts during
* the DMA irq handler. So we check the residue here.
@@ -1025,7 +1025,7 @@ static void pl011_dma_rx_callback(void *data)
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
@@ -1091,11 +1091,11 @@ static void pl011_dma_rx_poll(struct timer_list *t)
if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
> uap->dmarx.poll_timeout) {
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
uap->dmarx.running = false;
dmaengine_terminate_all(rxchan);
@@ -1191,10 +1191,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
pl011_write(uap->dmacr, uap, REG_DMACR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
if (uap->using_tx_dma) {
/* In theory, this should already be done by pl011_dma_flush_buffer */
@@ -1374,9 +1374,9 @@ static void pl011_throttle_rx(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pl011_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void pl011_enable_ms(struct uart_port *port)
@@ -1394,7 +1394,7 @@ __acquires(&uap->port.lock)
{
pl011_fifo_to_tty(uap);
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
@@ -1419,7 +1419,7 @@ __acquires(&uap->port.lock)
#endif
}
}
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
}
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
@@ -1555,7 +1555,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
@@ -1585,7 +1585,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
handled = 1;
}
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return IRQ_RETVAL(handled);
}
@@ -1657,14 +1657,14 @@ static void pl011_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int lcr_h;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
lcr_h = pl011_read(uap, REG_LCRH_TX);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
pl011_write(lcr_h, uap, REG_LCRH_TX);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -1803,7 +1803,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
unsigned long flags;
unsigned int i;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1825,7 +1825,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
static void pl011_unthrottle_rx(struct uart_port *port)
@@ -1833,7 +1833,7 @@ static void pl011_unthrottle_rx(struct uart_port *port)
struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
@@ -1841,7 +1841,7 @@ static void pl011_unthrottle_rx(struct uart_port *port)
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
static int pl011_startup(struct uart_port *port)
@@ -1861,7 +1861,7 @@ static int pl011_startup(struct uart_port *port)
pl011_write(uap->vendor->ifls, uap, REG_IFLS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
cr = pl011_read(uap, REG_CR);
cr &= UART011_CR_RTS | UART011_CR_DTR;
@@ -1872,7 +1872,7 @@ static int pl011_startup(struct uart_port *port)
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* initialise the old status of the modem signals
@@ -1933,12 +1933,12 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
unsigned int cr;
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
cr = pl011_read(uap, REG_CR);
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* disable break condition and fifos
@@ -1950,14 +1950,14 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
static void pl011_disable_interrupts(struct uart_amba_port *uap)
{
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/* mask all interrupts and clear all pending ones */
uap->im = 0;
pl011_write(uap->im, uap, REG_IMSC);
pl011_write(0xffff, uap, REG_ICR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
}
static void pl011_shutdown(struct uart_port *port)
@@ -2102,7 +2102,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
bits = tty_get_frame_size(termios->c_cflag);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
@@ -2176,7 +2176,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
old_cr |= UART011_CR_RXE;
pl011_write(old_cr, uap, REG_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void
@@ -2194,10 +2194,10 @@ sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
termios->c_cflag |= CS8 | CLOCAL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, CS8, uap->fixed_baud);
pl011_setup_status_masks(port, termios);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pl011_type(struct uart_port *port)
@@ -2336,9 +2336,9 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
if (uap->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&uap->port.lock);
+ locked = uart_port_trylock(&uap->port);
else
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
/*
* First save the CR then disable the interrupts
@@ -2364,7 +2364,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
pl011_write(old_cr, uap, REG_CR);
if (locked)
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
local_irq_restore(flags);
clk_disable(uap->clk);
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index d7658f380838..716cb014c028 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -133,7 +133,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned int status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = UART_GET_STATUS(port);
if (status & UART_STATUS_DR)
@@ -141,7 +141,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
if (status & UART_STATUS_THE)
apbuart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -228,7 +228,7 @@ static void apbuart_set_termios(struct uart_port *port,
if (termios->c_cflag & CRTSCTS)
cr |= UART_CTRL_FL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
@@ -251,7 +251,7 @@ static void apbuart_set_termios(struct uart_port *port,
UART_PUT_SCAL(port, quot);
UART_PUT_CTRL(port, cr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *apbuart_type(struct uart_port *port)
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 924c1a89347c..ffd234673177 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -133,9 +133,9 @@ static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
unsigned long flags;
unsigned int rdata;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
}
@@ -220,14 +220,14 @@ static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
else
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/*
@@ -318,7 +318,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* disable the UART */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
@@ -352,7 +352,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
@@ -450,7 +450,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
if ((status & AR933X_UART_CS_HOST_INT) == 0)
return IRQ_NONE;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
ar933x_uart_tx_chars(up);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
return IRQ_HANDLED;
}
@@ -485,7 +485,7 @@ static int ar933x_uart_startup(struct uart_port *port)
if (ret)
return ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* Enable HOST interrupts */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
@@ -498,7 +498,7 @@ static int ar933x_uart_startup(struct uart_port *port)
/* Enable RX interrupts */
ar933x_uart_start_rx_interrupt(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
@@ -632,9 +632,9 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* First save the IER then disable the interrupts
@@ -654,7 +654,7 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index ad4ae19b6ce3..1aa5b2b49c26 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -279,9 +279,9 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
if (status & RXIENB) {
/* already in ISR, no need of xx_irqsave */
- spin_lock(&port->lock);
+ uart_port_lock(port);
arc_serial_rx_chars(port, status);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
if ((status & TXIENB) && (status & TXEMPTY)) {
@@ -291,12 +291,12 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
*/
UART_TX_IRQ_DISABLE(port);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!uart_tx_stopped(port))
arc_serial_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
return IRQ_HANDLED;
@@ -366,7 +366,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
uartl = hw_val & 0xFF;
uarth = (hw_val >> 8) & 0xFF;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
UART_ALL_IRQ_DISABLE(port);
@@ -391,7 +391,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *arc_serial_type(struct uart_port *port)
@@ -521,9 +521,9 @@ static void arc_serial_console_write(struct console *co, const char *s,
struct uart_port *port = &arc_uart_ports[co->index].port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, arc_serial_console_putchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static struct console arc_console = {
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 88cdafa5ac54..1946fafc3f3e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -861,7 +861,7 @@ static void atmel_complete_tx_dma(void *arg)
struct dma_chan *chan = atmel_port->chan_tx;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (chan)
dmaengine_terminate_all(chan);
@@ -893,7 +893,7 @@ static void atmel_complete_tx_dma(void *arg)
atmel_port->tx_done_mask);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void atmel_release_tx_dma(struct uart_port *port)
@@ -1711,9 +1711,9 @@ static void atmel_tasklet_rx_func(struct tasklet_struct *t)
struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
- spin_lock(&port->lock);
+ uart_port_lock(port);
atmel_port->schedule_rx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
@@ -1723,9 +1723,9 @@ static void atmel_tasklet_tx_func(struct tasklet_struct *t)
struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
- spin_lock(&port->lock);
+ uart_port_lock(port);
atmel_port->schedule_tx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static void atmel_init_property(struct atmel_uart_port *atmel_port,
@@ -2175,7 +2175,7 @@ static void atmel_set_termios(struct uart_port *port,
} else
mode |= ATMEL_US_PAR_NONE;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
@@ -2377,22 +2377,22 @@ gclk_fail:
else
atmel_disable_ms(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
atmel_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
atmel_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 0dd8cceb837c..4a08fd5ee61b 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -201,7 +201,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
unsigned long flags;
unsigned int val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = bcm_uart_readl(port, UART_CTL_REG);
if (ctl)
@@ -210,7 +210,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
val &= ~UART_CTL_XMITBRK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
@@ -332,7 +332,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
unsigned int irqstat;
port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
irqstat = bcm_uart_readl(port, UART_IR_REG);
if (irqstat & UART_RX_INT_STAT)
@@ -353,7 +353,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
estat & UART_EXTINP_DCD_MASK);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -451,9 +451,9 @@ static void bcm_uart_shutdown(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
bcm_uart_writel(port, 0, UART_IR_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
bcm_uart_disable(port);
bcm_uart_flush(port);
@@ -470,7 +470,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
unsigned long flags;
int tries;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Drain the hot tub fully before we power it off for the winter. */
for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
@@ -546,7 +546,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
uart_update_timeout(port, new->c_cflag, baud);
bcm_uart_enable(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
@@ -712,9 +712,9 @@ static void bcm_console_write(struct console *co, const char *s,
/* bcm_uart_interrupt() already took the lock */
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
@@ -725,7 +725,7 @@ static void bcm_console_write(struct console *co, const char *s,
wait_for_xmitr(port);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/cpm_uart.c b/drivers/tty/serial/cpm_uart.c
index 626423022d62..be4af6eda4c2 100644
--- a/drivers/tty/serial/cpm_uart.c
+++ b/drivers/tty/serial/cpm_uart.c
@@ -569,7 +569,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
if ((termios->c_cflag & CREAD) == 0)
port->read_status_mask &= ~BD_SC_EMPTY;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (IS_SMC(pinfo)) {
unsigned int bits = tty_get_frame_size(termios->c_cflag);
@@ -609,7 +609,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
clk_set_rate(pinfo->clk, baud);
else
cpm_setbrg(pinfo->brg - 1, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *cpm_uart_type(struct uart_port *port)
@@ -1386,9 +1386,9 @@ static void cpm_uart_console_write(struct console *co, const char *s,
cpm_uart_early_write(pinfo, s, count, true);
local_irq_restore(flags);
} else {
- spin_lock_irqsave(&pinfo->port.lock, flags);
+ uart_port_lock_irqsave(&pinfo->port, &flags);
cpm_uart_early_write(pinfo, s, count, true);
- spin_unlock_irqrestore(&pinfo->port.lock, flags);
+ uart_port_unlock_irqrestore(&pinfo->port, flags);
}
}
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 128b5479e813..5004125f3045 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -133,7 +133,7 @@ static void digicolor_uart_rx(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (1) {
u8 status, ch, ch_flag;
@@ -172,7 +172,7 @@ static void digicolor_uart_rx(struct uart_port *port)
ch_flag);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
tty_flip_buffer_push(&port->state->port);
}
@@ -185,7 +185,7 @@ static void digicolor_uart_tx(struct uart_port *port)
if (digicolor_uart_tx_full(port))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->x_char) {
writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
@@ -211,7 +211,7 @@ static void digicolor_uart_tx(struct uart_port *port)
uart_write_wakeup(port);
out:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
@@ -333,7 +333,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
@@ -341,7 +341,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *digicolor_uart_type(struct uart_port *port)
@@ -398,14 +398,14 @@ static void digicolor_uart_console_write(struct console *co, const char *c,
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, c, n, digicolor_uart_console_putchar);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Wait for transmitter to become empty */
do {
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 667f52e83277..6df7af9edc1c 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -268,9 +268,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
- spin_lock(&dport->port.lock);
+ uart_port_lock(&dport->port);
dz_stop_tx(&dport->port);
- spin_unlock(&dport->port.lock);
+ uart_port_unlock(&dport->port);
return;
}
@@ -287,9 +287,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
/* Are we are done. */
if (uart_circ_empty(xmit)) {
- spin_lock(&dport->port.lock);
+ uart_port_lock(&dport->port);
dz_stop_tx(&dport->port);
- spin_unlock(&dport->port.lock);
+ uart_port_unlock(&dport->port);
}
}
@@ -415,14 +415,14 @@ static int dz_startup(struct uart_port *uport)
return ret;
}
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
/* Enable interrupts. */
tmp = dz_in(dport, DZ_CSR);
tmp |= DZ_RIE | DZ_TIE;
dz_out(dport, DZ_CSR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
return 0;
}
@@ -443,9 +443,9 @@ static void dz_shutdown(struct uart_port *uport)
int irq_guard;
u16 tmp;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
dz_stop_tx(&dport->port);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
irq_guard = atomic_add_return(-1, &mux->irq_guard);
if (!irq_guard) {
@@ -491,14 +491,14 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
unsigned long flags;
unsigned short tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
tmp = dz_in(dport, DZ_TCR);
if (break_state)
tmp |= mask;
else
tmp &= ~mask;
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static int dz_encode_baud_rate(unsigned int baud)
@@ -608,7 +608,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
if (termios->c_cflag & CREAD)
cflag |= DZ_RXENAB;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
uart_update_timeout(uport, termios->c_cflag, baud);
@@ -631,7 +631,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
if (termios->c_iflag & IGNBRK)
dport->port.ignore_status_mask |= DZ_BREAK;
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
}
/*
@@ -645,12 +645,12 @@ static void dz_pm(struct uart_port *uport, unsigned int state,
struct dz_port *dport = to_dport(uport);
unsigned long flags;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
if (state < 3)
dz_start_tx(&dport->port);
else
dz_stop_tx(&dport->port);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
}
@@ -811,7 +811,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
unsigned short csr, tcr, trdy, mask;
int loops = 10000;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
csr = dz_in(dport, DZ_CSR);
dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
tcr = dz_in(dport, DZ_TCR);
@@ -819,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
mask = tcr;
dz_out(dport, DZ_TCR, mask);
iob();
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
do {
trdy = dz_in(dport, DZ_CSR);
diff --git a/drivers/tty/serial/esp32_acm.c b/drivers/tty/serial/esp32_acm.c
new file mode 100644
index 000000000000..cb28a87736aa
--- /dev/null
+++ b/drivers/tty/serial/esp32_acm.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty_flip.h>
+#include <asm/serial.h>
+
+#define DRIVER_NAME "esp32s3-acm"
+#define DEV_NAME "ttyGS"
+#define UART_NR 4
+
+#define ESP32S3_ACM_TX_FIFO_SIZE 64
+
+#define USB_SERIAL_JTAG_EP1_REG 0x00
+#define USB_SERIAL_JTAG_EP1_CONF_REG 0x04
+#define USB_SERIAL_JTAG_WR_DONE BIT(0)
+#define USB_SERIAL_JTAG_SERIAL_IN_EP_DATA_FREE BIT(1)
+#define USB_SERIAL_JTAG_INT_ST_REG 0x0c
+#define USB_SERIAL_JTAG_SERIAL_OUT_RECV_PKT_INT_ST BIT(2)
+#define USB_SERIAL_JTAG_SERIAL_IN_EMPTY_INT_ST BIT(3)
+#define USB_SERIAL_JTAG_INT_ENA_REG 0x10
+#define USB_SERIAL_JTAG_SERIAL_OUT_RECV_PKT_INT_ENA BIT(2)
+#define USB_SERIAL_JTAG_SERIAL_IN_EMPTY_INT_ENA BIT(3)
+#define USB_SERIAL_JTAG_INT_CLR_REG 0x14
+#define USB_SERIAL_JTAG_IN_EP1_ST_REG 0x2c
+#define USB_SERIAL_JTAG_IN_EP1_WR_ADDR GENMASK(8, 2)
+#define USB_SERIAL_JTAG_OUT_EP1_ST_REG 0x3c
+#define USB_SERIAL_JTAG_OUT_EP1_REC_DATA_CNT GENMASK(22, 16)
+
+static const struct of_device_id esp32s3_acm_dt_ids[] = {
+ {
+ .compatible = "esp,esp32s3-acm",
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, esp32s3_acm_dt_ids);
+
+static struct uart_port *esp32s3_acm_ports[UART_NR];
+
+static void esp32s3_acm_write(struct uart_port *port, unsigned long reg, u32 v)
+{
+ writel(v, port->membase + reg);
+}
+
+static u32 esp32s3_acm_read(struct uart_port *port, unsigned long reg)
+{
+ return readl(port->membase + reg);
+}
+
+static u32 esp32s3_acm_tx_fifo_free(struct uart_port *port)
+{
+ u32 status = esp32s3_acm_read(port, USB_SERIAL_JTAG_EP1_CONF_REG);
+
+ return status & USB_SERIAL_JTAG_SERIAL_IN_EP_DATA_FREE;
+}
+
+static u32 esp32s3_acm_tx_fifo_cnt(struct uart_port *port)
+{
+ u32 status = esp32s3_acm_read(port, USB_SERIAL_JTAG_IN_EP1_ST_REG);
+
+ return FIELD_GET(USB_SERIAL_JTAG_IN_EP1_WR_ADDR, status);
+}
+
+static u32 esp32s3_acm_rx_fifo_cnt(struct uart_port *port)
+{
+ u32 status = esp32s3_acm_read(port, USB_SERIAL_JTAG_OUT_EP1_ST_REG);
+
+ return FIELD_GET(USB_SERIAL_JTAG_OUT_EP1_REC_DATA_CNT, status);
+}
+
+/* return TIOCSER_TEMT when transmitter is not busy */
+static unsigned int esp32s3_acm_tx_empty(struct uart_port *port)
+{
+ return esp32s3_acm_tx_fifo_cnt(port) == 0 ? TIOCSER_TEMT : 0;
+}
+
+static void esp32s3_acm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int esp32s3_acm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR;
+}
+
+static void esp32s3_acm_stop_tx(struct uart_port *port)
+{
+ u32 int_ena;
+
+ int_ena = esp32s3_acm_read(port, USB_SERIAL_JTAG_INT_ENA_REG);
+ int_ena &= ~USB_SERIAL_JTAG_SERIAL_IN_EMPTY_INT_ENA;
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_ENA_REG, int_ena);
+}
+
+static void esp32s3_acm_rxint(struct uart_port *port)
+{
+ struct tty_port *tty_port = &port->state->port;
+ u32 rx_fifo_cnt = esp32s3_acm_rx_fifo_cnt(port);
+ unsigned long flags;
+ u32 i;
+
+ if (!rx_fifo_cnt)
+ return;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < rx_fifo_cnt; ++i) {
+ u32 rx = esp32s3_acm_read(port, USB_SERIAL_JTAG_EP1_REG);
+
+ ++port->icount.rx;
+ tty_insert_flip_char(tty_port, rx, TTY_NORMAL);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ tty_flip_buffer_push(tty_port);
+}
+
+static void esp32s3_acm_push(struct uart_port *port)
+{
+ if (esp32s3_acm_tx_fifo_free(port))
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_EP1_CONF_REG,
+ USB_SERIAL_JTAG_WR_DONE);
+}
+
+static void esp32s3_acm_put_char(struct uart_port *port, u8 c)
+{
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_EP1_REG, c);
+}
+
+static void esp32s3_acm_put_char_sync(struct uart_port *port, u8 c)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ while (!esp32s3_acm_tx_fifo_free(port)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev, "timeout waiting for TX FIFO\n");
+ return;
+ }
+ cpu_relax();
+ }
+ esp32s3_acm_put_char(port, c);
+ esp32s3_acm_push(port);
+}
+
+static void esp32s3_acm_transmit_buffer(struct uart_port *port)
+{
+ u32 tx_fifo_used;
+ unsigned int pending;
+ u8 ch;
+
+ if (!esp32s3_acm_tx_fifo_free(port))
+ return;
+
+ tx_fifo_used = esp32s3_acm_tx_fifo_cnt(port);
+ pending = uart_port_tx_limited(port, ch,
+ ESP32S3_ACM_TX_FIFO_SIZE - tx_fifo_used,
+ true, esp32s3_acm_put_char(port, ch),
+ ({}));
+ if (pending) {
+ u32 int_ena;
+
+ int_ena = esp32s3_acm_read(port, USB_SERIAL_JTAG_INT_ENA_REG);
+ int_ena |= USB_SERIAL_JTAG_SERIAL_IN_EMPTY_INT_ENA;
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_ENA_REG, int_ena);
+ }
+ esp32s3_acm_push(port);
+}
+
+static void esp32s3_acm_txint(struct uart_port *port)
+{
+ esp32s3_acm_transmit_buffer(port);
+}
+
+static irqreturn_t esp32s3_acm_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ u32 status;
+
+ status = esp32s3_acm_read(port, USB_SERIAL_JTAG_INT_ST_REG);
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_CLR_REG, status);
+
+ if (status & USB_SERIAL_JTAG_SERIAL_OUT_RECV_PKT_INT_ST)
+ esp32s3_acm_rxint(port);
+ if (status & USB_SERIAL_JTAG_SERIAL_IN_EMPTY_INT_ST)
+ esp32s3_acm_txint(port);
+
+ return IRQ_RETVAL(status);
+}
+
+static void esp32s3_acm_start_tx(struct uart_port *port)
+{
+ esp32s3_acm_transmit_buffer(port);
+}
+
+static void esp32s3_acm_stop_rx(struct uart_port *port)
+{
+ u32 int_ena;
+
+ int_ena = esp32s3_acm_read(port, USB_SERIAL_JTAG_INT_ENA_REG);
+ int_ena &= ~USB_SERIAL_JTAG_SERIAL_OUT_RECV_PKT_INT_ENA;
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_ENA_REG, int_ena);
+}
+
+static int esp32s3_acm_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = request_irq(port->irq, esp32s3_acm_int, 0, DRIVER_NAME, port);
+ if (ret)
+ return ret;
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_ENA_REG,
+ USB_SERIAL_JTAG_SERIAL_OUT_RECV_PKT_INT_ENA);
+
+ return 0;
+}
+
+static void esp32s3_acm_shutdown(struct uart_port *port)
+{
+ esp32s3_acm_write(port, USB_SERIAL_JTAG_INT_ENA_REG, 0);
+ free_irq(port->irq, port);
+}
+
+static void esp32s3_acm_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
+{
+}
+
+static const char *esp32s3_acm_type(struct uart_port *port)
+{
+ return "ESP32S3 ACM";
+}
+
+/* configure/auto-configure the port */
+static void esp32s3_acm_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_GENERIC;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static void esp32s3_acm_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ esp32s3_acm_put_char_sync(port, c);
+}
+
+static int esp32s3_acm_poll_get_char(struct uart_port *port)
+{
+ if (esp32s3_acm_rx_fifo_cnt(port))
+ return esp32s3_acm_read(port, USB_SERIAL_JTAG_EP1_REG);
+ else
+ return NO_POLL_CHAR;
+}
+#endif
+
+static const struct uart_ops esp32s3_acm_pops = {
+ .tx_empty = esp32s3_acm_tx_empty,
+ .set_mctrl = esp32s3_acm_set_mctrl,
+ .get_mctrl = esp32s3_acm_get_mctrl,
+ .stop_tx = esp32s3_acm_stop_tx,
+ .start_tx = esp32s3_acm_start_tx,
+ .stop_rx = esp32s3_acm_stop_rx,
+ .startup = esp32s3_acm_startup,
+ .shutdown = esp32s3_acm_shutdown,
+ .set_termios = esp32s3_acm_set_termios,
+ .type = esp32s3_acm_type,
+ .config_port = esp32s3_acm_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_put_char = esp32s3_acm_poll_put_char,
+ .poll_get_char = esp32s3_acm_poll_get_char,
+#endif
+};
+
+static void esp32s3_acm_string_write(struct uart_port *port, const char *s,
+ unsigned int count)
+{
+ uart_console_write(port, s, count, esp32s3_acm_put_char_sync);
+}
+
+static void
+esp32s3_acm_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = esp32s3_acm_ports[co->index];
+ unsigned long flags;
+ bool locked = true;
+
+ if (port->sysrq)
+ locked = false;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ esp32s3_acm_string_write(port, s, count);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct uart_driver esp32s3_acm_reg;
+static struct console esp32s3_acm_console = {
+ .name = DEV_NAME,
+ .write = esp32s3_acm_console_write,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &esp32s3_acm_reg,
+};
+
+static void esp32s3_acm_earlycon_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, esp32s3_acm_put_char_sync);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int esp32s3_acm_earlycon_read(struct console *con, char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ unsigned int num_read = 0;
+
+ while (num_read < n) {
+ int c = esp32s3_acm_poll_get_char(&dev->port);
+
+ if (c == NO_POLL_CHAR)
+ break;
+ s[num_read++] = c;
+ }
+ return num_read;
+}
+#endif
+
+static int __init esp32s3_acm_early_console_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = esp32s3_acm_earlycon_write;
+#ifdef CONFIG_CONSOLE_POLL
+ device->con->read = esp32s3_acm_earlycon_read;
+#endif
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(esp32s3acm, "esp,esp32s3-acm",
+ esp32s3_acm_early_console_setup);
+
+static struct uart_driver esp32s3_acm_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = DEV_NAME,
+ .nr = ARRAY_SIZE(esp32s3_acm_ports),
+ .cons = &esp32s3_acm_console,
+};
+
+static int esp32s3_acm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct uart_port *port;
+ struct resource *res;
+ int ret;
+
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
+ if (ret >= UART_NR) {
+ dev_err(&pdev->dev, "driver limited to %d serial ports\n",
+ UART_NR);
+ return -ENOMEM;
+ }
+
+ port->line = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ port->mapbase = res->start;
+ port->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+
+ port->dev = &pdev->dev;
+ port->type = PORT_GENERIC;
+ port->iotype = UPIO_MEM;
+ port->irq = platform_get_irq(pdev, 0);
+ port->ops = &esp32s3_acm_pops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->has_sysrq = 1;
+ port->fifosize = ESP32S3_ACM_TX_FIFO_SIZE;
+
+ esp32s3_acm_ports[port->line] = port;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&esp32s3_acm_reg, port);
+}
+
+static int esp32s3_acm_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&esp32s3_acm_reg, port);
+ return 0;
+}
+
+
+static struct platform_driver esp32s3_acm_driver = {
+ .probe = esp32s3_acm_probe,
+ .remove = esp32s3_acm_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = esp32s3_acm_dt_ids,
+ },
+};
+
+static int __init esp32s3_acm_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&esp32s3_acm_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&esp32s3_acm_driver);
+ if (ret)
+ uart_unregister_driver(&esp32s3_acm_reg);
+
+ return ret;
+}
+
+static void __exit esp32s3_acm_exit(void)
+{
+ platform_driver_unregister(&esp32s3_acm_driver);
+ uart_unregister_driver(&esp32s3_acm_reg);
+}
+
+module_init(esp32s3_acm_init);
+module_exit(esp32s3_acm_exit);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/esp32_uart.c b/drivers/tty/serial/esp32_uart.c
new file mode 100644
index 000000000000..85c9c5ad7cc5
--- /dev/null
+++ b/drivers/tty/serial/esp32_uart.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty_flip.h>
+#include <asm/serial.h>
+
+#define DRIVER_NAME "esp32-uart"
+#define DEV_NAME "ttyS"
+#define UART_NR 3
+
+#define ESP32_UART_TX_FIFO_SIZE 127
+#define ESP32_UART_RX_FIFO_SIZE 127
+
+#define UART_FIFO_REG 0x00
+#define UART_INT_RAW_REG 0x04
+#define UART_INT_ST_REG 0x08
+#define UART_INT_ENA_REG 0x0c
+#define UART_INT_CLR_REG 0x10
+#define UART_RXFIFO_FULL_INT BIT(0)
+#define UART_TXFIFO_EMPTY_INT BIT(1)
+#define UART_BRK_DET_INT BIT(7)
+#define UART_CLKDIV_REG 0x14
+#define ESP32_UART_CLKDIV GENMASK(19, 0)
+#define ESP32S3_UART_CLKDIV GENMASK(11, 0)
+#define UART_CLKDIV_SHIFT 0
+#define UART_CLKDIV_FRAG GENMASK(23, 20)
+#define UART_STATUS_REG 0x1c
+#define ESP32_UART_RXFIFO_CNT GENMASK(7, 0)
+#define ESP32S3_UART_RXFIFO_CNT GENMASK(9, 0)
+#define UART_RXFIFO_CNT_SHIFT 0
+#define UART_DSRN BIT(13)
+#define UART_CTSN BIT(14)
+#define ESP32_UART_TXFIFO_CNT GENMASK(23, 16)
+#define ESP32S3_UART_TXFIFO_CNT GENMASK(25, 16)
+#define UART_TXFIFO_CNT_SHIFT 16
+#define UART_CONF0_REG 0x20
+#define UART_PARITY BIT(0)
+#define UART_PARITY_EN BIT(1)
+#define UART_BIT_NUM GENMASK(3, 2)
+#define UART_BIT_NUM_5 0
+#define UART_BIT_NUM_6 1
+#define UART_BIT_NUM_7 2
+#define UART_BIT_NUM_8 3
+#define UART_STOP_BIT_NUM GENMASK(5, 4)
+#define UART_STOP_BIT_NUM_1 1
+#define UART_STOP_BIT_NUM_2 3
+#define UART_SW_RTS BIT(6)
+#define UART_SW_DTR BIT(7)
+#define UART_LOOPBACK BIT(14)
+#define UART_TX_FLOW_EN BIT(15)
+#define UART_RTS_INV BIT(23)
+#define UART_DTR_INV BIT(24)
+#define UART_CONF1_REG 0x24
+#define UART_RXFIFO_FULL_THRHD_SHIFT 0
+#define ESP32_UART_TXFIFO_EMPTY_THRHD_SHIFT 8
+#define ESP32S3_UART_TXFIFO_EMPTY_THRHD_SHIFT 10
+#define ESP32_UART_RX_FLOW_EN BIT(23)
+#define ESP32S3_UART_RX_FLOW_EN BIT(22)
+#define ESP32S3_UART_CLK_CONF_REG 0x78
+#define ESP32S3_UART_SCLK_DIV_B GENMASK(5, 0)
+#define ESP32S3_UART_SCLK_DIV_A GENMASK(11, 6)
+#define ESP32S3_UART_SCLK_DIV_NUM GENMASK(19, 12)
+#define ESP32S3_UART_SCLK_SEL GENMASK(21, 20)
+#define APB_CLK 1
+#define RC_FAST_CLK 2
+#define XTAL_CLK 3
+#define ESP32S3_UART_SCLK_EN BIT(22)
+#define ESP32S3_UART_RST_CORE BIT(23)
+#define ESP32S3_UART_TX_SCLK_EN BIT(24)
+#define ESP32S3_UART_RX_SCLK_EN BIT(25)
+#define ESP32S3_UART_TX_RST_CORE BIT(26)
+#define ESP32S3_UART_RX_RST_CORE BIT(27)
+
+#define ESP32S3_UART_CLK_CONF_DEFAULT \
+ (ESP32S3_UART_RX_SCLK_EN | \
+ ESP32S3_UART_TX_SCLK_EN | \
+ ESP32S3_UART_SCLK_EN | \
+ FIELD_PREP(ESP32S3_UART_SCLK_SEL, XTAL_CLK))
+
+struct esp32_port {
+ struct uart_port port;
+ struct clk *clk;
+};
+
+struct esp32_uart_variant {
+ u32 clkdiv_mask;
+ u32 rxfifo_cnt_mask;
+ u32 txfifo_cnt_mask;
+ u32 txfifo_empty_thrhd_shift;
+ u32 rx_flow_en;
+ const char *type;
+ bool has_clkconf;
+};
+
+static const struct esp32_uart_variant esp32_variant = {
+ .clkdiv_mask = ESP32_UART_CLKDIV,
+ .rxfifo_cnt_mask = ESP32_UART_RXFIFO_CNT,
+ .txfifo_cnt_mask = ESP32_UART_TXFIFO_CNT,
+ .txfifo_empty_thrhd_shift = ESP32_UART_TXFIFO_EMPTY_THRHD_SHIFT,
+ .rx_flow_en = ESP32_UART_RX_FLOW_EN,
+ .type = "ESP32 UART",
+};
+
+static const struct esp32_uart_variant esp32s3_variant = {
+ .clkdiv_mask = ESP32S3_UART_CLKDIV,
+ .rxfifo_cnt_mask = ESP32S3_UART_RXFIFO_CNT,
+ .txfifo_cnt_mask = ESP32S3_UART_TXFIFO_CNT,
+ .txfifo_empty_thrhd_shift = ESP32S3_UART_TXFIFO_EMPTY_THRHD_SHIFT,
+ .rx_flow_en = ESP32S3_UART_RX_FLOW_EN,
+ .type = "ESP32S3 UART",
+ .has_clkconf = true,
+};
+
+static const struct of_device_id esp32_uart_dt_ids[] = {
+ {
+ .compatible = "esp,esp32-uart",
+ .data = &esp32_variant,
+ }, {
+ .compatible = "esp,esp32s3-uart",
+ .data = &esp32s3_variant,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, esp32_uart_dt_ids);
+
+static struct esp32_port *esp32_uart_ports[UART_NR];
+
+static const struct esp32_uart_variant *port_variant(struct uart_port *port)
+{
+ return port->private_data;
+}
+
+static void esp32_uart_write(struct uart_port *port, unsigned long reg, u32 v)
+{
+ writel(v, port->membase + reg);
+}
+
+static u32 esp32_uart_read(struct uart_port *port, unsigned long reg)
+{
+ return readl(port->membase + reg);
+}
+
+static u32 esp32_uart_tx_fifo_cnt(struct uart_port *port)
+{
+ u32 status = esp32_uart_read(port, UART_STATUS_REG);
+
+ return (status & port_variant(port)->txfifo_cnt_mask) >> UART_TXFIFO_CNT_SHIFT;
+}
+
+static u32 esp32_uart_rx_fifo_cnt(struct uart_port *port)
+{
+ u32 status = esp32_uart_read(port, UART_STATUS_REG);
+
+ return (status & port_variant(port)->rxfifo_cnt_mask) >> UART_RXFIFO_CNT_SHIFT;
+}
+
+/* return TIOCSER_TEMT when transmitter is not busy */
+static unsigned int esp32_uart_tx_empty(struct uart_port *port)
+{
+ return esp32_uart_tx_fifo_cnt(port) ? 0 : TIOCSER_TEMT;
+}
+
+static void esp32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 conf0 = esp32_uart_read(port, UART_CONF0_REG);
+
+ conf0 &= ~(UART_LOOPBACK |
+ UART_SW_RTS | UART_RTS_INV |
+ UART_SW_DTR | UART_DTR_INV);
+
+ if (mctrl & TIOCM_RTS)
+ conf0 |= UART_SW_RTS;
+ if (mctrl & TIOCM_DTR)
+ conf0 |= UART_SW_DTR;
+ if (mctrl & TIOCM_LOOP)
+ conf0 |= UART_LOOPBACK;
+
+ esp32_uart_write(port, UART_CONF0_REG, conf0);
+}
+
+static unsigned int esp32_uart_get_mctrl(struct uart_port *port)
+{
+ u32 status = esp32_uart_read(port, UART_STATUS_REG);
+ unsigned int ret = TIOCM_CAR;
+
+ if (status & UART_DSRN)
+ ret |= TIOCM_DSR;
+ if (status & UART_CTSN)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+static void esp32_uart_stop_tx(struct uart_port *port)
+{
+ u32 int_ena;
+
+ int_ena = esp32_uart_read(port, UART_INT_ENA_REG);
+ int_ena &= ~UART_TXFIFO_EMPTY_INT;
+ esp32_uart_write(port, UART_INT_ENA_REG, int_ena);
+}
+
+static void esp32_uart_rxint(struct uart_port *port)
+{
+ struct tty_port *tty_port = &port->state->port;
+ u32 rx_fifo_cnt = esp32_uart_rx_fifo_cnt(port);
+ unsigned long flags;
+ u32 i;
+
+ if (!rx_fifo_cnt)
+ return;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < rx_fifo_cnt; ++i) {
+ u32 rx = esp32_uart_read(port, UART_FIFO_REG);
+
+ if (!rx &&
+ (esp32_uart_read(port, UART_INT_ST_REG) & UART_BRK_DET_INT)) {
+ esp32_uart_write(port, UART_INT_CLR_REG, UART_BRK_DET_INT);
+ ++port->icount.brk;
+ uart_handle_break(port);
+ } else {
+ if (uart_handle_sysrq_char(port, (unsigned char)rx))
+ continue;
+ tty_insert_flip_char(tty_port, rx, TTY_NORMAL);
+ ++port->icount.rx;
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ tty_flip_buffer_push(tty_port);
+}
+
+static void esp32_uart_put_char(struct uart_port *port, u8 c)
+{
+ esp32_uart_write(port, UART_FIFO_REG, c);
+}
+
+static void esp32_uart_put_char_sync(struct uart_port *port, u8 c)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ while (esp32_uart_tx_fifo_cnt(port) >= ESP32_UART_TX_FIFO_SIZE) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev, "timeout waiting for TX FIFO\n");
+ return;
+ }
+ cpu_relax();
+ }
+ esp32_uart_put_char(port, c);
+}
+
+static void esp32_uart_transmit_buffer(struct uart_port *port)
+{
+ u32 tx_fifo_used = esp32_uart_tx_fifo_cnt(port);
+ unsigned int pending;
+ u8 ch;
+
+ if (tx_fifo_used >= ESP32_UART_TX_FIFO_SIZE)
+ return;
+
+ pending = uart_port_tx_limited(port, ch,
+ ESP32_UART_TX_FIFO_SIZE - tx_fifo_used,
+ true, esp32_uart_put_char(port, ch),
+ ({}));
+ if (pending) {
+ u32 int_ena;
+
+ int_ena = esp32_uart_read(port, UART_INT_ENA_REG);
+ int_ena |= UART_TXFIFO_EMPTY_INT;
+ esp32_uart_write(port, UART_INT_ENA_REG, int_ena);
+ }
+}
+
+static void esp32_uart_txint(struct uart_port *port)
+{
+ esp32_uart_transmit_buffer(port);
+}
+
+static irqreturn_t esp32_uart_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ u32 status;
+
+ status = esp32_uart_read(port, UART_INT_ST_REG);
+
+ if (status & (UART_RXFIFO_FULL_INT | UART_BRK_DET_INT))
+ esp32_uart_rxint(port);
+ if (status & UART_TXFIFO_EMPTY_INT)
+ esp32_uart_txint(port);
+
+ esp32_uart_write(port, UART_INT_CLR_REG, status);
+
+ return IRQ_RETVAL(status);
+}
+
+static void esp32_uart_start_tx(struct uart_port *port)
+{
+ esp32_uart_transmit_buffer(port);
+}
+
+static void esp32_uart_stop_rx(struct uart_port *port)
+{
+ u32 int_ena;
+
+ int_ena = esp32_uart_read(port, UART_INT_ENA_REG);
+ int_ena &= ~UART_RXFIFO_FULL_INT;
+ esp32_uart_write(port, UART_INT_ENA_REG, int_ena);
+}
+
+static int esp32_uart_startup(struct uart_port *port)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct esp32_port *sport = container_of(port, struct esp32_port, port);
+
+ ret = clk_prepare_enable(sport->clk);
+ if (ret)
+ return ret;
+
+ ret = request_irq(port->irq, esp32_uart_int, 0, DRIVER_NAME, port);
+ if (ret) {
+ clk_disable_unprepare(sport->clk);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port_variant(port)->has_clkconf)
+ esp32_uart_write(port, ESP32S3_UART_CLK_CONF_REG,
+ ESP32S3_UART_CLK_CONF_DEFAULT);
+ esp32_uart_write(port, UART_CONF1_REG,
+ (1 << UART_RXFIFO_FULL_THRHD_SHIFT) |
+ (1 << port_variant(port)->txfifo_empty_thrhd_shift));
+ esp32_uart_write(port, UART_INT_CLR_REG, UART_RXFIFO_FULL_INT | UART_BRK_DET_INT);
+ esp32_uart_write(port, UART_INT_ENA_REG, UART_RXFIFO_FULL_INT | UART_BRK_DET_INT);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+}
+
+static void esp32_uart_shutdown(struct uart_port *port)
+{
+ struct esp32_port *sport = container_of(port, struct esp32_port, port);
+
+ esp32_uart_write(port, UART_INT_ENA_REG, 0);
+ free_irq(port->irq, port);
+ clk_disable_unprepare(sport->clk);
+}
+
+static bool esp32_uart_set_baud(struct uart_port *port, u32 baud)
+{
+ u32 sclk = port->uartclk;
+ u32 div = sclk / baud;
+
+ if (port_variant(port)->has_clkconf) {
+ u32 sclk_div = div / port_variant(port)->clkdiv_mask;
+
+ if (div > port_variant(port)->clkdiv_mask) {
+ sclk /= (sclk_div + 1);
+ div = sclk / baud;
+ }
+ esp32_uart_write(port, ESP32S3_UART_CLK_CONF_REG,
+ FIELD_PREP(ESP32S3_UART_SCLK_DIV_NUM, sclk_div) |
+ ESP32S3_UART_CLK_CONF_DEFAULT);
+ }
+
+ if (div <= port_variant(port)->clkdiv_mask) {
+ u32 frag = (sclk * 16) / baud - div * 16;
+
+ esp32_uart_write(port, UART_CLKDIV_REG,
+ div | FIELD_PREP(UART_CLKDIV_FRAG, frag));
+ return true;
+ }
+
+ return false;
+}
+
+static void esp32_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
+{
+ unsigned long flags;
+ u32 conf0, conf1;
+ u32 baud;
+ const u32 rx_flow_en = port_variant(port)->rx_flow_en;
+ u32 max_div = port_variant(port)->clkdiv_mask;
+
+ termios->c_cflag &= ~CMSPAR;
+
+ if (port_variant(port)->has_clkconf)
+ max_div *= FIELD_MAX(ESP32S3_UART_SCLK_DIV_NUM);
+
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / max_div,
+ port->uartclk / 16);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ conf0 = esp32_uart_read(port, UART_CONF0_REG);
+ conf0 &= ~(UART_PARITY_EN | UART_PARITY | UART_BIT_NUM | UART_STOP_BIT_NUM);
+
+ conf1 = esp32_uart_read(port, UART_CONF1_REG);
+ conf1 &= ~rx_flow_en;
+
+ if (termios->c_cflag & PARENB) {
+ conf0 |= UART_PARITY_EN;
+ if (termios->c_cflag & PARODD)
+ conf0 |= UART_PARITY;
+ }
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ conf0 |= FIELD_PREP(UART_BIT_NUM, UART_BIT_NUM_5);
+ break;
+ case CS6:
+ conf0 |= FIELD_PREP(UART_BIT_NUM, UART_BIT_NUM_6);
+ break;
+ case CS7:
+ conf0 |= FIELD_PREP(UART_BIT_NUM, UART_BIT_NUM_7);
+ break;
+ case CS8:
+ conf0 |= FIELD_PREP(UART_BIT_NUM, UART_BIT_NUM_8);
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ conf0 |= FIELD_PREP(UART_STOP_BIT_NUM, UART_STOP_BIT_NUM_2);
+ else
+ conf0 |= FIELD_PREP(UART_STOP_BIT_NUM, UART_STOP_BIT_NUM_1);
+
+ if (termios->c_cflag & CRTSCTS)
+ conf1 |= rx_flow_en;
+
+ esp32_uart_write(port, UART_CONF0_REG, conf0);
+ esp32_uart_write(port, UART_CONF1_REG, conf1);
+
+ if (baud) {
+ esp32_uart_set_baud(port, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ } else {
+ if (esp32_uart_set_baud(port, 115200)) {
+ baud = 115200;
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ } else {
+ dev_warn(port->dev,
+ "unable to set speed to %d baud or the default 115200\n",
+ baud);
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *esp32_uart_type(struct uart_port *port)
+{
+ return port_variant(port)->type;
+}
+
+/* configure/auto-configure the port */
+static void esp32_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_GENERIC;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int esp32_uart_poll_init(struct uart_port *port)
+{
+ struct esp32_port *sport = container_of(port, struct esp32_port, port);
+
+ return clk_prepare_enable(sport->clk);
+}
+
+static void esp32_uart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ esp32_uart_put_char_sync(port, c);
+}
+
+static int esp32_uart_poll_get_char(struct uart_port *port)
+{
+ if (esp32_uart_rx_fifo_cnt(port))
+ return esp32_uart_read(port, UART_FIFO_REG);
+ else
+ return NO_POLL_CHAR;
+
+}
+#endif
+
+static const struct uart_ops esp32_uart_pops = {
+ .tx_empty = esp32_uart_tx_empty,
+ .set_mctrl = esp32_uart_set_mctrl,
+ .get_mctrl = esp32_uart_get_mctrl,
+ .stop_tx = esp32_uart_stop_tx,
+ .start_tx = esp32_uart_start_tx,
+ .stop_rx = esp32_uart_stop_rx,
+ .startup = esp32_uart_startup,
+ .shutdown = esp32_uart_shutdown,
+ .set_termios = esp32_uart_set_termios,
+ .type = esp32_uart_type,
+ .config_port = esp32_uart_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = esp32_uart_poll_init,
+ .poll_put_char = esp32_uart_poll_put_char,
+ .poll_get_char = esp32_uart_poll_get_char,
+#endif
+};
+
+static void esp32_uart_console_putchar(struct uart_port *port, u8 c)
+{
+ esp32_uart_put_char_sync(port, c);
+}
+
+static void esp32_uart_string_write(struct uart_port *port, const char *s,
+ unsigned int count)
+{
+ uart_console_write(port, s, count, esp32_uart_console_putchar);
+}
+
+static void
+esp32_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct esp32_port *sport = esp32_uart_ports[co->index];
+ struct uart_port *port = &sport->port;
+ unsigned long flags;
+ bool locked = true;
+
+ if (port->sysrq)
+ locked = false;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ esp32_uart_string_write(port, s, count);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __init esp32_uart_console_setup(struct console *co, char *options)
+{
+ struct esp32_port *sport;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(esp32_uart_ports))
+ co->index = 0;
+
+ sport = esp32_uart_ports[co->index];
+ if (!sport)
+ return -ENODEV;
+
+ ret = clk_prepare_enable(sport->clk);
+ if (ret)
+ return ret;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static int esp32_uart_console_exit(struct console *co)
+{
+ struct esp32_port *sport = esp32_uart_ports[co->index];
+
+ clk_disable_unprepare(sport->clk);
+ return 0;
+}
+
+static struct uart_driver esp32_uart_reg;
+static struct console esp32_uart_console = {
+ .name = DEV_NAME,
+ .write = esp32_uart_console_write,
+ .device = uart_console_device,
+ .setup = esp32_uart_console_setup,
+ .exit = esp32_uart_console_exit,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &esp32_uart_reg,
+};
+
+static void esp32_uart_earlycon_putchar(struct uart_port *port, u8 c)
+{
+ esp32_uart_put_char_sync(port, c);
+}
+
+static void esp32_uart_earlycon_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, esp32_uart_earlycon_putchar);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int esp32_uart_earlycon_read(struct console *con, char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ unsigned int num_read = 0;
+
+ while (num_read < n) {
+ int c = esp32_uart_poll_get_char(&dev->port);
+
+ if (c == NO_POLL_CHAR)
+ break;
+ s[num_read++] = c;
+ }
+ return num_read;
+}
+#endif
+
+static int __init esp32xx_uart_early_console_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = esp32_uart_earlycon_write;
+#ifdef CONFIG_CONSOLE_POLL
+ device->con->read = esp32_uart_earlycon_read;
+#endif
+ if (device->port.uartclk != BASE_BAUD * 16)
+ esp32_uart_set_baud(&device->port, device->baud);
+
+ return 0;
+}
+
+static int __init esp32_uart_early_console_setup(struct earlycon_device *device,
+ const char *options)
+{
+ device->port.private_data = (void *)&esp32_variant;
+
+ return esp32xx_uart_early_console_setup(device, options);
+}
+
+OF_EARLYCON_DECLARE(esp32uart, "esp,esp32-uart",
+ esp32_uart_early_console_setup);
+
+static int __init esp32s3_uart_early_console_setup(struct earlycon_device *device,
+ const char *options)
+{
+ device->port.private_data = (void *)&esp32s3_variant;
+
+ return esp32xx_uart_early_console_setup(device, options);
+}
+
+OF_EARLYCON_DECLARE(esp32s3uart, "esp,esp32s3-uart",
+ esp32s3_uart_early_console_setup);
+
+static struct uart_driver esp32_uart_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = DEV_NAME,
+ .nr = ARRAY_SIZE(esp32_uart_ports),
+ .cons = &esp32_uart_console,
+};
+
+static int esp32_uart_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ static const struct of_device_id *match;
+ struct uart_port *port;
+ struct esp32_port *sport;
+ struct resource *res;
+ int ret;
+
+ match = of_match_device(esp32_uart_dt_ids, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+ if (!sport)
+ return -ENOMEM;
+
+ port = &sport->port;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
+ if (ret >= UART_NR) {
+ dev_err(&pdev->dev, "driver limited to %d serial ports\n", UART_NR);
+ return -ENOMEM;
+ }
+
+ port->line = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ port->mapbase = res->start;
+ port->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+
+ sport->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sport->clk))
+ return PTR_ERR(sport->clk);
+
+ port->uartclk = clk_get_rate(sport->clk);
+ port->dev = &pdev->dev;
+ port->type = PORT_GENERIC;
+ port->iotype = UPIO_MEM;
+ port->irq = platform_get_irq(pdev, 0);
+ port->ops = &esp32_uart_pops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->has_sysrq = 1;
+ port->fifosize = ESP32_UART_TX_FIFO_SIZE;
+ port->private_data = (void *)match->data;
+
+ esp32_uart_ports[port->line] = sport;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&esp32_uart_reg, port);
+}
+
+static int esp32_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&esp32_uart_reg, port);
+
+ return 0;
+}
+
+
+static struct platform_driver esp32_uart_driver = {
+ .probe = esp32_uart_probe,
+ .remove = esp32_uart_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = esp32_uart_dt_ids,
+ },
+};
+
+static int __init esp32_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&esp32_uart_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&esp32_uart_driver);
+ if (ret)
+ uart_unregister_driver(&esp32_uart_reg);
+
+ return ret;
+}
+
+static void __exit esp32_uart_exit(void)
+{
+ platform_driver_unregister(&esp32_uart_driver);
+ uart_unregister_driver(&esp32_uart_reg);
+}
+
+module_init(esp32_uart_init);
+module_exit(esp32_uart_exit);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 249cb380c3c6..3bdaf1ddc309 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -203,7 +203,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
struct circ_buf *xmit = &sport->state->xmit;
unsigned long flags;
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
if (sport->x_char) {
linflex_put_char(sport, sport->x_char);
@@ -217,7 +217,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
linflex_transmit_buffer(sport);
out:
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
return IRQ_HANDLED;
}
@@ -230,7 +230,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
unsigned char rx;
bool brk;
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
status = readl(sport->membase + UARTSR);
while (status & LINFLEXD_UARTSR_RMB) {
@@ -266,7 +266,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
}
}
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
tty_flip_buffer_push(port);
@@ -369,11 +369,11 @@ static int linflex_startup(struct uart_port *port)
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
linflex_setup_watermark(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
DRIVER_NAME, port);
@@ -386,14 +386,14 @@ static void linflex_shutdown(struct uart_port *port)
unsigned long ier;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable interrupts */
ier = readl(port->membase + LINIER);
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
writel(ier, port->membase + LINIER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
devm_free_irq(port->dev, port->irq, port);
}
@@ -474,7 +474,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
cr &= ~LINFLEXD_UARTCR_PCE;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = 0;
@@ -507,7 +507,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
writel(cr1, port->membase + LINCR1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *linflex_type(struct uart_port *port)
@@ -646,14 +646,14 @@ linflex_console_write(struct console *co, const char *s, unsigned int count)
if (sport->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->lock, flags);
+ locked = uart_port_trylock_irqsave(sport, &flags);
else
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
linflex_string_write(sport, s, count);
if (locked)
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
}
/*
@@ -832,10 +832,14 @@ static int linflex_probe(struct platform_device *pdev)
return PTR_ERR(sport->membase);
sport->mapbase = res->start;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
sport->dev = &pdev->dev;
sport->type = PORT_LINFLEXUART;
sport->iotype = UPIO_MEM;
- sport->irq = platform_get_irq(pdev, 0);
+ sport->irq = ret;
sport->ops = &linflex_pops;
sport->flags = UPF_BOOT_AUTOCONF;
sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f72e1340b47d..6d0cfb2e86b4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -532,9 +532,9 @@ static void lpuart_dma_tx_complete(void *arg)
struct dma_chan *chan = sport->dma_tx_chan;
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!sport->dma_tx_in_progress) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
@@ -543,7 +543,7 @@ static void lpuart_dma_tx_complete(void *arg)
uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
sport->dma_tx_in_progress = false;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
@@ -553,12 +553,12 @@ static void lpuart_dma_tx_complete(void *arg)
return;
}
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!lpuart_stopped_or_empty(&sport->port))
lpuart_dma_tx(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
@@ -651,7 +651,7 @@ static int lpuart_poll_init(struct uart_port *port)
sport->port.fifosize = 0;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Disable Rx & Tx */
writeb(0, sport->port.membase + UARTCR2);
@@ -675,7 +675,7 @@ static int lpuart_poll_init(struct uart_port *port)
/* Enable Rx and Tx */
writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
@@ -703,7 +703,7 @@ static int lpuart32_poll_init(struct uart_port *port)
sport->port.fifosize = 0;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Disable Rx & Tx */
lpuart32_write(&sport->port, 0, UARTCTRL);
@@ -724,7 +724,7 @@ static int lpuart32_poll_init(struct uart_port *port)
/* Enable Rx and Tx */
lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
@@ -879,9 +879,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
static void lpuart_txint(struct lpuart_port *sport)
{
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
lpuart_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
}
static void lpuart_rxint(struct lpuart_port *sport)
@@ -890,7 +890,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
struct tty_port *port = &sport->port.state->port;
unsigned char rx, sr;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
flg = TTY_NORMAL;
@@ -956,9 +956,9 @@ out:
static void lpuart32_txint(struct lpuart_port *sport)
{
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
lpuart32_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
}
static void lpuart32_rxint(struct lpuart_port *sport)
@@ -968,7 +968,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
unsigned long rx, sr;
bool is_break;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
flg = TTY_NORMAL;
@@ -1170,12 +1170,12 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
async_tx_ack(sport->dma_rx_desc);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
if (dmastat == DMA_ERROR) {
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
@@ -1244,7 +1244,7 @@ exit:
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
tty_flip_buffer_push(port);
if (!sport->dma_idle_int)
@@ -1335,9 +1335,9 @@ static void lpuart_timer_func(struct timer_list *t)
mod_timer(&sport->lpuart_timer,
jiffies + sport->dma_rx_timeout);
- if (spin_trylock_irqsave(&sport->port.lock, flags)) {
+ if (uart_port_trylock_irqsave(&sport->port, &flags)) {
sport->last_residue = state.residue;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
}
@@ -1802,14 +1802,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
lpuart_setup_watermark_enable(sport);
lpuart_rx_dma_startup(sport);
lpuart_tx_dma_startup(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int lpuart_startup(struct uart_port *port)
@@ -1859,7 +1859,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
lpuart32_hw_disable(sport);
@@ -1869,7 +1869,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
lpuart32_setup_watermark_enable(sport);
lpuart32_configure(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int lpuart32_startup(struct uart_port *port)
@@ -1932,7 +1932,7 @@ static void lpuart_shutdown(struct uart_port *port)
unsigned char temp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable Rx/Tx and interrupts */
temp = readb(port->membase + UARTCR2);
@@ -1940,7 +1940,7 @@ static void lpuart_shutdown(struct uart_port *port)
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
writeb(temp, port->membase + UARTCR2);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
}
@@ -1952,7 +1952,7 @@ static void lpuart32_shutdown(struct uart_port *port)
unsigned long temp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* clear status */
temp = lpuart32_read(&sport->port, UARTSTAT);
@@ -1969,7 +1969,7 @@ static void lpuart32_shutdown(struct uart_port *port)
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
lpuart32_write(port, temp, UARTCTRL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
}
@@ -2069,7 +2069,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
@@ -2124,7 +2124,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void __lpuart32_serial_setbrg(struct uart_port *port,
@@ -2304,7 +2304,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
@@ -2359,7 +2359,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *lpuart_type(struct uart_port *port)
@@ -2477,9 +2477,9 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* first save CR2 and then disable interrupts */
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
@@ -2495,7 +2495,7 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
writeb(old_cr2, sport->port.membase + UARTCR2);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void
@@ -2507,9 +2507,9 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* first save CR2 and then disable interrupts */
cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
@@ -2525,7 +2525,7 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
lpuart32_write(&sport->port, old_cr, UARTCTRL);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
@@ -3089,7 +3089,7 @@ static int lpuart_suspend(struct device *dev)
uart_suspend_port(&lpuart_reg, &sport->port);
if (lpuart_uport_is_active(sport)) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
/* disable Rx/Tx and interrupts */
temp = lpuart32_read(&sport->port, UARTCTRL);
@@ -3101,7 +3101,7 @@ static int lpuart_suspend(struct device *dev)
temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
writeb(temp, sport->port.membase + UARTCR2);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
if (sport->lpuart_dma_rx_use) {
/*
@@ -3114,7 +3114,7 @@ static int lpuart_suspend(struct device *dev)
lpuart_dma_rx_free(&sport->port);
/* Disable Rx DMA to use UART port as wakeup source */
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
temp = lpuart32_read(&sport->port, UARTBAUD);
lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
@@ -3123,11 +3123,11 @@ static int lpuart_suspend(struct device *dev)
writeb(readb(sport->port.membase + UARTCR5) &
~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
if (sport->lpuart_dma_tx_use) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
temp = lpuart32_read(&sport->port, UARTBAUD);
temp &= ~UARTBAUD_TDMAE;
@@ -3137,7 +3137,7 @@ static int lpuart_suspend(struct device *dev)
temp &= ~UARTCR5_TDMAS;
writeb(temp, sport->port.membase + UARTCR5);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
sport->dma_tx_in_progress = false;
dmaengine_terminate_sync(sport->dma_tx_chan);
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 819f957b6b84..a75eafbcbea3 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -929,7 +929,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
char delta_status;
unsigned char status;
- spin_lock(&icom_port->uart_port.lock);
+ uart_port_lock(&icom_port->uart_port);
/*modem input register */
status = readb(&icom_port->dram->isr);
@@ -951,7 +951,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
port.delta_msr_wait);
old_status = status;
}
- spin_unlock(&icom_port->uart_port.lock);
+ uart_port_unlock(&icom_port->uart_port);
}
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
@@ -1093,7 +1093,7 @@ static void process_interrupt(u16 port_int_reg,
struct icom_port *icom_port)
{
- spin_lock(&icom_port->uart_port.lock);
+ uart_port_lock(&icom_port->uart_port);
trace(icom_port, "INTERRUPT", port_int_reg);
if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
@@ -1102,7 +1102,7 @@ static void process_interrupt(u16 port_int_reg,
if (port_int_reg & INT_RCV_COMPLETED)
recv_interrupt(port_int_reg, icom_port);
- spin_unlock(&icom_port->uart_port.lock);
+ uart_port_unlock(&icom_port->uart_port);
}
static irqreturn_t icom_interrupt(int irq, void *dev_id)
@@ -1186,14 +1186,14 @@ static unsigned int icom_tx_empty(struct uart_port *port)
int ret;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT)
ret = TIOCSER_TEMT;
else
ret = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
@@ -1276,7 +1276,7 @@ static void icom_send_xchar(struct uart_port *port, char ch)
/* wait .1 sec to send char */
for (index = 0; index < 10; index++) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
xdata = readb(&icom_port->dram->xchar);
if (xdata == 0x00) {
trace(icom_port, "QUICK_WRITE", 0);
@@ -1284,10 +1284,10 @@ static void icom_send_xchar(struct uart_port *port, char ch)
/* flush write operation */
xdata = readb(&icom_port->dram->xchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
break;
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
msleep(10);
}
}
@@ -1307,7 +1307,7 @@ static void icom_break(struct uart_port *port, int break_state)
unsigned char cmdReg;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
trace(icom_port, "BREAK", 0);
cmdReg = readb(&icom_port->dram->CmdReg);
if (break_state == -1) {
@@ -1315,7 +1315,7 @@ static void icom_break(struct uart_port *port, int break_state)
} else {
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int icom_open(struct uart_port *port)
@@ -1365,7 +1365,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long offset;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
trace(icom_port, "CHANGE_SPEED", 0);
cflag = termios->c_cflag;
@@ -1516,7 +1516,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
trace(icom_port, "XR_ENAB", 0);
writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *icom_type(struct uart_port *port)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 13cb78340709..708b9852a575 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -177,8 +177,6 @@
enum imx_uart_type {
IMX1_UART,
IMX21_UART,
- IMX53_UART,
- IMX6Q_UART,
};
/* device type dependent stuff */
@@ -240,30 +238,26 @@ struct imx_port_ucrs {
unsigned int ucr3;
};
-static struct imx_uart_data imx_uart_devdata[] = {
- [IMX1_UART] = {
- .uts_reg = IMX1_UTS,
- .devtype = IMX1_UART,
- },
- [IMX21_UART] = {
- .uts_reg = IMX21_UTS,
- .devtype = IMX21_UART,
- },
- [IMX53_UART] = {
- .uts_reg = IMX21_UTS,
- .devtype = IMX53_UART,
- },
- [IMX6Q_UART] = {
- .uts_reg = IMX21_UTS,
- .devtype = IMX6Q_UART,
- },
+static const struct imx_uart_data imx_uart_imx1_devdata = {
+ .uts_reg = IMX1_UTS,
+ .devtype = IMX1_UART,
+};
+
+static const struct imx_uart_data imx_uart_imx21_devdata = {
+ .uts_reg = IMX21_UTS,
+ .devtype = IMX21_UART,
};
static const struct of_device_id imx_uart_dt_ids[] = {
- { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
- { .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], },
- { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
- { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
+ /*
+ * For reasons unknown to me, some UART devices (e.g. imx6ul's) are
+ * compatible to fsl,imx6q-uart, but not fsl,imx21-uart, while the
+ * original imx6q's UART is compatible to fsl,imx21-uart. This driver
+ * doesn't make any distinction between these two variants.
+ */
+ { .compatible = "fsl,imx6q-uart", .data = &imx_uart_imx21_devdata, },
+ { .compatible = "fsl,imx1-uart", .data = &imx_uart_imx1_devdata, },
+ { .compatible = "fsl,imx21-uart", .data = &imx_uart_imx21_devdata, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
@@ -575,7 +569,7 @@ static void imx_uart_dma_tx_callback(void *data)
unsigned long flags;
u32 ucr1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
@@ -600,7 +594,7 @@ static void imx_uart_dma_tx_callback(void *data)
imx_uart_writel(sport, ucr4, UCR4);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/* called with port.lock taken and irqs off */
@@ -766,11 +760,11 @@ static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
struct imx_port *sport = dev_id;
irqreturn_t ret;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
ret = __imx_uart_rtsint(irq, dev_id);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
@@ -779,9 +773,9 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return IRQ_HANDLED;
}
@@ -895,11 +889,11 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
struct imx_port *sport = dev_id;
irqreturn_t ret;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
ret = __imx_uart_rxint(irq, dev_id);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
@@ -962,7 +956,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
@@ -1032,7 +1026,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
@@ -1115,7 +1109,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
u32 ucr1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
@@ -1124,7 +1118,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
imx_uart_writel(sport, ucr1, UCR1);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
@@ -1137,9 +1131,9 @@ static void imx_uart_timeout(struct timer_list *t)
unsigned long flags;
if (sport->port.state) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
imx_uart_mctrl_check(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
@@ -1169,9 +1163,9 @@ static void imx_uart_dma_rx_callback(void *data)
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
if (status == DMA_ERROR) {
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_clear_rx_errors(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return;
}
@@ -1200,9 +1194,9 @@ static void imx_uart_dma_rx_callback(void *data)
r_bytes = rx_ring->head - rx_ring->tail;
/* If we received something, check for 0xff flood */
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
@@ -1457,10 +1451,12 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4);
/* Can we enable the DMA support? */
- if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
+ if (!uart_console(port) && imx_uart_dma_init(sport) == 0) {
+ lockdep_set_subclass(&port->lock, 1);
dma_is_inited = 1;
+ }
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Reset fifo's and state machines */
imx_uart_soft_reset(sport);
@@ -1533,7 +1529,7 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_disable_loopback_rs485(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
@@ -1558,21 +1554,21 @@ static void imx_uart_shutdown(struct uart_port *port)
sport->dma_is_rxing = 0;
}
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
imx_uart_stop_tx(port);
imx_uart_stop_rx(port);
imx_uart_disable_dma(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
imx_uart_dma_exit(sport);
}
mctrl_gpio_disable_ms(sport->gpios);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
imx_uart_writel(sport, ucr2, UCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
/*
* Stop our timer.
@@ -1583,7 +1579,7 @@ static void imx_uart_shutdown(struct uart_port *port)
* Disable all interrupts, port and break condition.
*/
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
@@ -1605,7 +1601,7 @@ static void imx_uart_shutdown(struct uart_port *port)
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
@@ -1668,7 +1664,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* Read current UCR2 and save it for future use, then clear all the bits
@@ -1796,7 +1792,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_uart_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *imx_uart_type(struct uart_port *port)
@@ -1858,7 +1854,7 @@ static int imx_uart_poll_init(struct uart_port *port)
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* Be careful about the order of enabling bits here. First enable the
@@ -1886,7 +1882,7 @@ static int imx_uart_poll_init(struct uart_port *port)
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
@@ -2005,9 +2001,9 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
if (sport->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* First, save UCR1/2/3 and then disable interrupts
@@ -2035,7 +2031,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
imx_uart_ucrs_restore(sport, &old_ucr);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
@@ -2193,10 +2189,10 @@ static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t)
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (sport->tx_state == WAIT_AFTER_RTS)
imx_uart_start_tx(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return HRTIMER_NORESTART;
}
@@ -2206,10 +2202,10 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (sport->tx_state == WAIT_AFTER_SEND)
imx_uart_stop_tx(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return HRTIMER_NORESTART;
}
@@ -2482,9 +2478,9 @@ static void imx_uart_restore_context(struct imx_port *sport)
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!sport->context_saved) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
@@ -2499,7 +2495,7 @@ static void imx_uart_restore_context(struct imx_port *sport)
imx_uart_writel(sport, sport->saved_reg[2], UCR3);
imx_uart_writel(sport, sport->saved_reg[3], UCR4);
sport->context_saved = false;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void imx_uart_save_context(struct imx_port *sport)
@@ -2507,7 +2503,7 @@ static void imx_uart_save_context(struct imx_port *sport)
unsigned long flags;
/* Save necessary regs */
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
@@ -2519,7 +2515,7 @@ static void imx_uart_save_context(struct imx_port *sport)
sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
sport->context_saved = true;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 845ff706bc59..320b29cd4683 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -432,7 +432,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
unsigned char r3;
bool push = false;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
r3 = read_zsreg(channel, R3);
/* Channel A */
@@ -448,7 +448,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
if (r3 & CHATxIP)
ip22zilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (push)
tty_flip_buffer_push(&up->port.state->port);
@@ -458,7 +458,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
push = false;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
@@ -471,7 +471,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
if (r3 & CHBTxIP)
ip22zilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (push)
tty_flip_buffer_push(&up->port.state->port);
@@ -504,11 +504,11 @@ static unsigned int ip22zilog_tx_empty(struct uart_port *port)
unsigned char status;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = ip22zilog_read_channel_status(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (status & Tx_BUF_EMP)
ret = TIOCSER_TEMT;
@@ -664,7 +664,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != up->curregs[R5]) {
@@ -674,7 +674,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
write_zsreg(channel, R5, up->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
@@ -735,9 +735,9 @@ static int ip22zilog_startup(struct uart_port *port)
if (ZS_IS_CONS(up))
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__ip22zilog_startup(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -775,7 +775,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
if (ZS_IS_CONS(up))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
channel = ZILOG_CHANNEL_FROM_PORT(port);
@@ -788,7 +788,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
up->curregs[R5] &= ~SND_BRK;
ip22zilog_maybe_update_regs(up, channel);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
@@ -880,7 +880,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
@@ -894,7 +894,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *ip22zilog_type(struct uart_port *port)
@@ -1016,10 +1016,10 @@ ip22zilog_console_write(struct console *con, const char *s, unsigned int count)
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, ip22zilog_put_char);
udelay(2);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init ip22zilog_console_setup(struct console *con, char *options)
@@ -1034,13 +1034,13 @@ static int __init ip22zilog_console_setup(struct console *con, char *options)
printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->curregs[R15] |= BRKIE;
__ip22zilog_startup(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 0c78f66276cd..2bd640428970 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -816,9 +816,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
/* Parse any modem signal changes */
jsm_dbg(INTR, &ch->ch_bd->pci_dev,
"MOD_STAT: sending to parse_modem_sigs\n");
- spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
+ uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
- spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
+ uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
}
}
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 222afc270c88..ce0fef7e2c66 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -152,14 +152,14 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
container_of(port, struct jsm_channel, uart_port);
struct ktermios *termios;
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
termios = &port->state->port.tty->termios;
if (ch == termios->c_cc[VSTART])
channel->ch_bd->bd_ops->send_start_character(channel);
if (ch == termios->c_cc[VSTOP])
channel->ch_bd->bd_ops->send_stop_character(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static void jsm_tty_stop_rx(struct uart_port *port)
@@ -176,13 +176,13 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
if (break_state == -1)
channel->ch_bd->bd_ops->send_break(channel);
else
channel->ch_bd->bd_ops->clear_break(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static int jsm_tty_open(struct uart_port *port)
@@ -241,7 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
channel->ch_cached_lsr = 0;
channel->ch_stops_sent = 0;
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
termios = &port->state->port.tty->termios;
channel->ch_c_cflag = termios->c_cflag;
channel->ch_c_iflag = termios->c_iflag;
@@ -261,7 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
jsm_carrier(channel);
channel->ch_open_count++;
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
return 0;
@@ -307,7 +307,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
channel->ch_c_cflag = termios->c_cflag;
channel->ch_c_iflag = termios->c_iflag;
channel->ch_c_oflag = termios->c_oflag;
@@ -317,7 +317,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
channel->ch_bd->bd_ops->param(channel);
jsm_carrier(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static const char *jsm_tty_type(struct uart_port *port)
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index d881cdd2a58f..a25ab1efe38f 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -139,13 +139,13 @@ static irqreturn_t liteuart_interrupt(int irq, void *data)
* if polling, the context would be "in_serving_softirq", so use
* irq[save|restore] spin_lock variants to cover all possibilities
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
if (isr & EV_RX)
liteuart_rx_chars(port);
if (isr & EV_TX)
liteuart_tx_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_RETVAL(isr);
}
@@ -195,10 +195,10 @@ static int liteuart_startup(struct uart_port *port)
}
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* only enabling rx irqs during startup */
liteuart_update_irq_reg(port, true, EV_RX);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (!port->irq) {
timer_setup(&uart->timer, liteuart_timer, 0);
@@ -213,9 +213,9 @@ static void liteuart_shutdown(struct uart_port *port)
struct liteuart_port *uart = to_liteuart_port(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irq)
free_irq(port->irq, port);
@@ -229,13 +229,13 @@ static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
unsigned int baud;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* update baudrate */
baud = uart_get_baud_rate(port, new, old, 0, 460800);
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *liteuart_type(struct uart_port *port)
@@ -382,9 +382,9 @@ static void liteuart_console_write(struct console *co, const char *s,
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
port = &uart->port;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, liteuart_putchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int liteuart_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index b38fe4728c26..5149a947b7fe 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -140,15 +140,15 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
wait_for_xmit_empty(&up->port);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
}
@@ -298,7 +298,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
struct tty_port *tport = &port->state->port;
u32 status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* Read UART status and clear latched interrupts */
status = readl(LPC32XX_HSUART_IIR(port->membase));
@@ -333,7 +333,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
__serial_lpc32xx_tx(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -404,14 +404,14 @@ static void serial_lpc32xx_break_ctl(struct uart_port *port,
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
if (break_state != 0)
tmp |= LPC32XX_HSU_BREAK;
else
tmp &= ~LPC32XX_HSU_BREAK;
writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* port->lock is not held. */
@@ -421,7 +421,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__serial_uart_flush(port);
@@ -441,7 +441,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
retval = request_irq(port->irq, serial_lpc32xx_interrupt,
0, MODNAME, port);
@@ -458,7 +458,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
u32 tmp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
@@ -466,7 +466,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
@@ -491,7 +491,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
quot = __serial_get_clock_div(port->uartclk, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Ignore characters? */
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
@@ -505,7 +505,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 465b1def9e11..a6a7c405892e 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -269,16 +269,16 @@ static void receive_chars(struct uart_ma35d1_port *up)
if (uart_handle_sysrq_char(&up->port, ch))
continue;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
fsr = serial_in(up, MA35_FSR_REG);
} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
tty_flip_buffer_push(&up->port.state->port);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
}
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
@@ -364,14 +364,14 @@ static void ma35d1serial_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
u32 lcr;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
lcr = serial_in(up, MA35_LCR_REG);
if (break_state != 0)
lcr |= MA35_LCR_BREAK;
else
lcr &= ~MA35_LCR_BREAK;
serial_out(up, MA35_LCR_REG, lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int ma35d1serial_startup(struct uart_port *port)
@@ -441,7 +441,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
if (termios->c_iflag & INPCK)
@@ -475,7 +475,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
serial_out(up, MA35_LCR_REG, lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *ma35d1serial_type(struct uart_port *port)
@@ -560,9 +560,9 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -576,7 +576,7 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
serial_out(up, MA35_IER_REG, ier);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init ma35d1serial_console_setup(struct console *co, char *options)
@@ -695,6 +695,9 @@ static int ma35d1serial_probe(struct platform_device *pdev)
up->port.iobase = res_mem->start;
up->port.membase = ioremap(up->port.iobase, MA35_UART_REG_SIZE);
+ if (!up->port.membase)
+ return -ENOMEM;
+
up->port.ops = &ma35d1serial_ops;
spin_lock_init(&up->port.lock);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index db3204d2a305..97e4965b73d4 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -402,7 +402,7 @@ static int max14830_detect(struct device *dev)
ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
-
+
regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 1666ce012e5e..91b15243f6c6 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -135,12 +135,12 @@ static void mcf_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
else
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
@@ -150,7 +150,7 @@ static int mcf_startup(struct uart_port *port)
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Reset UART, get it into known state... */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
@@ -164,7 +164,7 @@ static int mcf_startup(struct uart_port *port)
pp->imr = MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -176,7 +176,7 @@ static void mcf_shutdown(struct uart_port *port)
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
pp->imr = 0;
@@ -186,7 +186,7 @@ static void mcf_shutdown(struct uart_port *port)
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
@@ -252,7 +252,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
mr2 |= MCFUART_MR2_TXCTS;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
mr2 |= MCFUART_MR2_TXRTS;
@@ -273,7 +273,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
port->membase + MCFUART_UCSR);
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
@@ -350,7 +350,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
ret = IRQ_HANDLED;
@@ -359,7 +359,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
mcf_tx_chars(pp);
ret = IRQ_HANDLED;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return ret;
}
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index d2502aaa3e8c..8048fa542fc4 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -392,7 +392,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
if (!irq_id)
goto out;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* It's save to write to IIR[7:6] RXC[9:8] */
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
@@ -418,7 +418,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
handled = true;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
out:
return IRQ_RETVAL(handled);
}
@@ -708,7 +708,7 @@ static void men_z135_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -716,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
static const char *men_z135_type(struct uart_port *port)
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 790d910dafa5..8dd84617e715 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -129,14 +129,14 @@ static void meson_uart_shutdown(struct uart_port *port)
free_irq(port->irq, port);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_RX_EN;
val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
writel(val, port->membase + AML_UART_CONTROL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void meson_uart_start_tx(struct uart_port *port)
@@ -238,7 +238,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
meson_receive_chars(port);
@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
meson_uart_start_tx(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -284,7 +284,7 @@ static int meson_uart_startup(struct uart_port *port)
u32 val;
int ret = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
@@ -301,7 +301,7 @@ static int meson_uart_startup(struct uart_port *port)
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
@@ -341,7 +341,7 @@ static void meson_uart_set_termios(struct uart_port *port,
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
cflags = termios->c_cflag;
iflags = termios->c_iflag;
@@ -380,10 +380,14 @@ static void meson_uart_set_termios(struct uart_port *port,
else
val |= AML_UART_STOP_BIT_1SB;
- if (cflags & CRTSCTS)
- val &= ~AML_UART_TWO_WIRE_EN;
- else
+ if (cflags & CRTSCTS) {
+ if (port->flags & UPF_HARD_FLOW)
+ val &= ~AML_UART_TWO_WIRE_EN;
+ else
+ termios->c_cflag &= ~CRTSCTS;
+ } else {
val |= AML_UART_TWO_WIRE_EN;
+ }
writel(val, port->membase + AML_UART_CONTROL);
@@ -401,7 +405,7 @@ static void meson_uart_set_termios(struct uart_port *port,
AML_UART_FRAME_ERR;
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int meson_uart_verify_port(struct uart_port *port,
@@ -460,14 +464,14 @@ static int meson_uart_poll_get_char(struct uart_port *port)
u32 c;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
c = NO_POLL_CHAR;
else
c = readl(port->membase + AML_UART_RFIFO);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return c;
}
@@ -478,7 +482,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
u32 reg;
int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Wait until FIFO is empty or timeout */
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
@@ -502,7 +506,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
out:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -559,9 +563,9 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
@@ -573,7 +577,7 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
writel(val, port->membase + AML_UART_CONTROL);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
@@ -646,8 +650,8 @@ meson_serial_early_console_setup(struct earlycon_device *device, const char *opt
return 0;
}
-OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
- meson_serial_early_console_setup);
+OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart", meson_serial_early_console_setup);
+OF_EARLYCON_DECLARE(meson, "amlogic,meson-s4-uart", meson_serial_early_console_setup);
#define MESON_SERIAL_CONSOLE_PTR(_devname) (&meson_serial_console_##_devname)
#else
@@ -705,6 +709,7 @@ static int meson_uart_probe(struct platform_device *pdev)
u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
int irq;
+ bool has_rtscts;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -732,6 +737,7 @@ static int meson_uart_probe(struct platform_device *pdev)
return irq;
of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
+ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
if (meson_ports[pdev->id]) {
return dev_err_probe(&pdev->dev, -EBUSY,
@@ -762,6 +768,8 @@ static int meson_uart_probe(struct platform_device *pdev)
port->mapsize = resource_size(res_mem);
port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
+ if (has_rtscts)
+ port->flags |= UPF_HARD_FLOW;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
port->line = pdev->id;
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 70a910085e93..db3b81f2aa57 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -207,9 +207,9 @@ static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mlb_usio_rx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -218,10 +218,10 @@ static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
mlb_usio_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -267,7 +267,7 @@ static int mlb_usio_startup(struct uart_port *port)
escr = readb(port->membase + MLB_USIO_REG_ESCR);
if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
escr |= MLB_USIO_ESCR_FLWEN;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
writeb(0, port->membase + MLB_USIO_REG_SCR);
writeb(escr, port->membase + MLB_USIO_REG_ESCR);
writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
@@ -282,7 +282,7 @@ static int mlb_usio_startup(struct uart_port *port)
writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -337,7 +337,7 @@ static void mlb_usio_set_termios(struct uart_port *port,
else
quot = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
MLB_USIO_SSR_TDRE;
@@ -367,7 +367,7 @@ static void mlb_usio_set_termios(struct uart_port *port,
writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *mlb_usio_type(struct uart_port *port)
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 916507b8f31d..a252465e745f 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1096,14 +1096,14 @@ static void
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (ctl == -1)
psc_ops->command(port, MPC52xx_PSC_START_BRK);
else
psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int
@@ -1214,7 +1214,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
}
/* Get the lock */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Do our best to flush TX & RX, so we don't lose anything */
/* But we don't wait indefinitely ! */
@@ -1250,7 +1250,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
/* We're all set, release the lock */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *
@@ -1477,11 +1477,11 @@ mpc52xx_uart_int(int irq, void *dev_id)
struct uart_port *port = dev_id;
irqreturn_t ret;
- spin_lock(&port->lock);
+ uart_port_lock(port);
ret = psc_ops->handle_irq(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return ret;
}
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index ea5a7911cb15..2a4c09f3a834 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -188,12 +188,12 @@ static irqreturn_t mps2_uart_rxirq(int irq, void *data)
if (unlikely(!(irqflag & UARTn_INT_RX)))
return IRQ_NONE;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
mps2_uart_rx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -206,12 +206,12 @@ static irqreturn_t mps2_uart_txirq(int irq, void *data)
if (unlikely(!(irqflag & UARTn_INT_TX)))
return IRQ_NONE;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
mps2_uart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -222,7 +222,7 @@ static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
struct uart_port *port = data;
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (irqflag & UARTn_INT_RX_OVERRUN) {
struct tty_port *tport = &port->state->port;
@@ -244,7 +244,7 @@ static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
handled = IRQ_HANDLED;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return handled;
}
@@ -356,12 +356,12 @@ mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 90953e679e38..597264b546fd 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -444,7 +444,7 @@ static void msm_complete_tx_dma(void *args)
unsigned int count;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Already stopped */
if (!dma->count)
@@ -476,7 +476,7 @@ static void msm_complete_tx_dma(void *args)
msm_handle_tx(port);
done:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
@@ -549,7 +549,7 @@ static void msm_complete_rx_dma(void *args)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Already stopped */
if (!dma->count)
@@ -587,16 +587,16 @@ static void msm_complete_rx_dma(void *args)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (!sysrq)
tty_insert_flip_char(tport, dma->virt[i], flag);
}
msm_start_rx_dma(msm_port);
done:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (count)
tty_flip_buffer_push(tport);
@@ -762,9 +762,9 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
sysrq = uart_handle_sysrq_char(port, buf[i]);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!sysrq)
tty_insert_flip_char(tport, buf[i], flag);
}
@@ -824,9 +824,9 @@ static void msm_handle_rx(struct uart_port *port)
else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
sysrq = uart_handle_sysrq_char(port, c);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!sysrq)
tty_insert_flip_char(tport, c, flag);
}
@@ -951,7 +951,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
unsigned int misr;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
misr = msm_read(port, MSM_UART_MISR);
msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
@@ -983,7 +983,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -1128,13 +1128,13 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long flags, rate;
flags = *saved_flags;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
entry = msm_find_best_baud(port, baud, &rate);
clk_set_rate(msm_port->clk, rate);
baud = rate / 16 / entry->divisor;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
*saved_flags = flags;
port->uartclk = rate;
@@ -1266,7 +1266,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, mr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (dma->chan) /* Terminate if any */
msm_stop_dma(port, dma);
@@ -1338,7 +1338,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
/* Try to use DMA */
msm_start_rx_dma(msm_port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *msm_type(struct uart_port *port)
@@ -1620,9 +1620,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
else
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1661,7 +1661,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index ea924e9b913b..0255646bc175 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -187,9 +187,9 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
unsigned long flags;
unsigned int st;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
st = readl(port->membase + UART_STAT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
}
@@ -249,14 +249,14 @@ static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
unsigned int ctl;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = readl(port->membase + UART_CTRL(port));
if (brk == -1)
ctl |= CTRL_SND_BRK_SEQ;
else
ctl &= ~CTRL_SND_BRK_SEQ;
writel(ctl, port->membase + UART_CTRL(port));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
@@ -540,7 +540,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
unsigned long flags;
unsigned int baud, min_baud, max_baud;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
@@ -589,7 +589,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *mvebu_uart_type(struct uart_port *port)
@@ -735,9 +735,9 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
intr = readl(port->membase + UART_INTR(port)) &
@@ -758,7 +758,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int mvebu_uart_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0ead88c5a19a..ad4c1c5d0a7f 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -390,10 +390,10 @@ static void serial_omap_throttle(struct uart_port *port)
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void serial_omap_unthrottle(struct uart_port *port)
@@ -401,10 +401,10 @@ static void serial_omap_unthrottle(struct uart_port *port)
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
@@ -527,7 +527,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int max_count = 256;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
do {
iir = serial_in(up, UART_IIR);
@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
}
} while (max_count--);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
tty_flip_buffer_push(&up->port.state->port);
@@ -579,9 +579,9 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
unsigned int ret = 0;
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
@@ -647,13 +647,13 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int serial_omap_startup(struct uart_port *port)
@@ -701,13 +701,13 @@ static int serial_omap_startup(struct uart_port *port)
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
up->msr_saved_flags = 0;
/*
@@ -742,10 +742,10 @@ static void serial_omap_shutdown(struct uart_port *port)
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
@@ -815,7 +815,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Update the per-port timeout.
@@ -1013,7 +1013,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
@@ -1216,9 +1216,9 @@ serial_omap_console_write(struct console *co, const char *s,
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* First save the IER then disable the interrupts
@@ -1245,7 +1245,7 @@ serial_omap_console_write(struct console *co, const char *s,
check_modem_status(up);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index e99970a9437f..919f5e5aa0f1 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -125,12 +125,12 @@ static unsigned int owl_uart_tx_empty(struct uart_port *port)
u32 val;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_STAT);
ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
@@ -232,7 +232,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
unsigned long flags;
u32 stat;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stat = owl_uart_read(port, OWL_UART_STAT);
@@ -246,7 +246,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
owl_uart_write(port, stat, OWL_UART_STAT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -256,14 +256,14 @@ static void owl_uart_shutdown(struct uart_port *port)
u32 val;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
owl_uart_write(port, val, OWL_UART_CTL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
@@ -279,7 +279,7 @@ static int owl_uart_startup(struct uart_port *port)
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
@@ -291,7 +291,7 @@ static int owl_uart_startup(struct uart_port *port)
val |= OWL_UART_CTL_EN;
owl_uart_write(port, val, OWL_UART_CTL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -311,7 +311,7 @@ static void owl_uart_set_termios(struct uart_port *port,
u32 ctl;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = owl_uart_read(port, OWL_UART_CTL);
@@ -371,7 +371,7 @@ static void owl_uart_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void owl_uart_release_port(struct uart_port *port)
@@ -515,9 +515,9 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
@@ -541,7 +541,7 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
owl_uart_write(port, old_ctl, OWL_UART_CTL);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index cc83b772b7ca..436cc6d52a11 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1347,7 +1347,7 @@ static void pch_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
spin_lock_irqsave(&priv->lock, flags);
- spin_lock(&port->lock);
+ uart_port_lock(port);
uart_update_timeout(port, termios->c_cflag, baud);
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
@@ -1360,7 +1360,7 @@ static void pch_uart_set_termios(struct uart_port *port,
tty_termios_encode_baud_rate(termios, baud, baud);
out:
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1581,10 +1581,10 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
port_locked = 0;
} else if (oops_in_progress) {
priv_locked = spin_trylock(&priv->lock);
- port_locked = spin_trylock(&priv->port.lock);
+ port_locked = uart_port_trylock(&priv->port);
} else {
spin_lock(&priv->lock);
- spin_lock(&priv->port.lock);
+ uart_port_lock(&priv->port);
}
/*
@@ -1604,7 +1604,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
- spin_unlock(&priv->port.lock);
+ uart_port_unlock(&priv->port);
if (priv_locked)
spin_unlock(&priv->lock);
local_irq_restore(flags);
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index e308d5022b3f..3a95bf5d55d3 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -243,7 +243,7 @@ static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
struct pic32_sport *sport = to_pic32_sport(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (ctl)
pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
@@ -252,7 +252,7 @@ static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
PIC32_UART_STA_UTXBRK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* get port type in string format */
@@ -274,7 +274,7 @@ static void pic32_uart_do_rx(struct uart_port *port)
*/
max_count = PIC32_UART_RX_FIFO_DEPTH;
- spin_lock(&port->lock);
+ uart_port_lock(port);
tty = &port->state->port;
@@ -331,7 +331,7 @@ static void pic32_uart_do_rx(struct uart_port *port)
} while (--max_count);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
tty_flip_buffer_push(tty);
}
@@ -410,9 +410,9 @@ static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pic32_uart_do_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -580,9 +580,9 @@ static void pic32_uart_shutdown(struct uart_port *port)
unsigned long flags;
/* disable uart */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pic32_uart_dsbl_and_mask(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
clk_disable_unprepare(sport->clk);
/* free all 3 interrupts for this UART */
@@ -604,7 +604,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
unsigned int quot;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable uart and mask all interrupts while changing speed */
pic32_uart_dsbl_and_mask(port);
@@ -672,7 +672,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
/* enable uart */
pic32_uart_en_and_unmask(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* serial core request to claim uart iomem */
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 13668ffdb1e7..c8bf08c19c64 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -246,9 +246,9 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
#endif /* USE_CTRL_O_SYSRQ */
if (uap->port.sysrq) {
int swallow;
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
swallow = uart_handle_sysrq_char(&uap->port, ch);
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
if (swallow)
goto next_char;
}
@@ -435,7 +435,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
uap_a = pmz_get_port_A(uap);
uap_b = uap_a->mate;
- spin_lock(&uap_a->port.lock);
+ uart_port_lock(&uap_a->port);
r3 = read_zsreg(uap_a, R3);
/* Channel A */
@@ -456,14 +456,14 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
rc = IRQ_HANDLED;
}
skip_a:
- spin_unlock(&uap_a->port.lock);
+ uart_port_unlock(&uap_a->port);
if (push)
tty_flip_buffer_push(&uap->port.state->port);
if (!uap_b)
goto out;
- spin_lock(&uap_b->port.lock);
+ uart_port_lock(&uap_b->port);
push = false;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
if (!ZS_IS_OPEN(uap_b)) {
@@ -481,7 +481,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
rc = IRQ_HANDLED;
}
skip_b:
- spin_unlock(&uap_b->port.lock);
+ uart_port_unlock(&uap_b->port);
if (push)
tty_flip_buffer_push(&uap->port.state->port);
@@ -497,9 +497,9 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
unsigned long flags;
u8 status;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
status = read_zsreg(uap, R0);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return status;
}
@@ -685,7 +685,7 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != uap->curregs[R5]) {
@@ -693,7 +693,7 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
write_zsreg(uap, R5, uap->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#ifdef CONFIG_PPC_PMAC
@@ -865,18 +865,18 @@ static void pmz_irda_reset(struct uart_pmac_port *uap)
{
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->curregs[R5] |= DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
msleep(110);
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->curregs[R5] &= ~DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
msleep(10);
}
@@ -896,9 +896,9 @@ static int pmz_startup(struct uart_port *port)
* initialize the chip
*/
if (!ZS_IS_CONS(uap)) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pwr_delay = __pmz_startup(uap);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
@@ -921,9 +921,9 @@ static int pmz_startup(struct uart_port *port)
pmz_irda_reset(uap);
/* Enable interrupt requests for the channel */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pmz_interrupt_control(uap, 1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -933,7 +933,7 @@ static void pmz_shutdown(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable interrupt requests for the channel */
pmz_interrupt_control(uap, 0);
@@ -948,19 +948,19 @@ static void pmz_shutdown(struct uart_port *port)
pmz_maybe_update_regs(uap);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Release interrupt handler */
free_irq(uap->port.irq, uap);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
if (!ZS_IS_CONS(uap))
pmz_set_scc_power(uap, 0); /* Shut the chip down */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
@@ -1247,7 +1247,7 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable IRQs on the port */
pmz_interrupt_control(uap, 0);
@@ -1259,7 +1259,7 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
if (ZS_IS_OPEN(uap))
pmz_interrupt_control(uap, 1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pmz_type(struct uart_port *port)
@@ -1896,7 +1896,7 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
struct uart_pmac_port *uap = &pmz_ports[con->index];
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
/* Turn of interrupts and enable the transmitter. */
write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
@@ -1908,7 +1908,7 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
write_zsreg(uap, R1, uap->curregs[1]);
/* Don't disable the transmitter. */
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
/*
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 73c60f5ea027..46e70e155aab 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -225,14 +225,14 @@ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
return IRQ_HANDLED;
}
@@ -242,9 +242,9 @@ static unsigned int serial_pxa_tx_empty(struct uart_port *port)
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
@@ -295,13 +295,13 @@ static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int serial_pxa_startup(struct uart_port *port)
@@ -346,10 +346,10 @@ static int serial_pxa_startup(struct uart_port *port)
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl |= TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
@@ -383,10 +383,10 @@ static void serial_pxa_shutdown(struct uart_port *port)
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
@@ -434,7 +434,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Ensure the port will be enabled.
@@ -504,7 +504,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
up->lcr = cval; /* Save LCR */
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
@@ -608,9 +608,9 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* First save the IER then disable the interrupts
@@ -628,7 +628,7 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
clk_disable(up->clk);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index b8aa4c1293ba..7e78f97e8f43 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -482,9 +482,9 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
uport = &port->uport;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&uport->lock, flags);
+ locked = uart_port_trylock_irqsave(uport, &flags);
else
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
geni_status = readl(uport->membase + SE_GENI_STATUS);
@@ -520,7 +520,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
qcom_geni_serial_setup_tx(uport, port->tx_remaining);
if (locked)
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
@@ -970,7 +970,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
if (uport->suspended)
return IRQ_NONE;
- spin_lock(&uport->lock);
+ uart_port_lock(uport);
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index be5c842b5ba9..d824c8318f33 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -139,12 +139,12 @@ static unsigned int rda_uart_tx_empty(struct uart_port *port)
unsigned int ret;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = rda_uart_read(port, RDA_UART_STATUS);
ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
@@ -246,7 +246,7 @@ static void rda_uart_set_termios(struct uart_port *port,
unsigned int baud;
u32 irq_mask;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
rda_uart_change_baudrate(rda_port, baud);
@@ -325,7 +325,7 @@ static void rda_uart_set_termios(struct uart_port *port,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rda_uart_send_chars(struct uart_port *port)
@@ -408,7 +408,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 val, irq_mask;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Clear IRQ cause */
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
rda_uart_send_chars(port);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -436,16 +436,16 @@ static int rda_uart_startup(struct uart_port *port)
int ret;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
"rda-uart", port);
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = rda_uart_read(port, RDA_UART_CTRL);
val |= RDA_UART_ENABLE;
@@ -456,7 +456,7 @@ static int rda_uart_startup(struct uart_port *port)
val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -466,7 +466,7 @@ static void rda_uart_shutdown(struct uart_port *port)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rda_uart_stop_tx(port);
rda_uart_stop_rx(port);
@@ -475,7 +475,7 @@ static void rda_uart_shutdown(struct uart_port *port)
val &= ~RDA_UART_ENABLE;
rda_uart_write(port, val, RDA_UART_CTRL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *rda_uart_type(struct uart_port *port)
@@ -515,7 +515,7 @@ static void rda_uart_config_port(struct uart_port *port, int flags)
rda_uart_request_port(port);
}
- spin_lock_irqsave(&port->lock, irq_flags);
+ uart_port_lock_irqsave(port, &irq_flags);
/* Clear mask, so no surprise interrupts. */
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
@@ -523,7 +523,7 @@ static void rda_uart_config_port(struct uart_port *port, int flags)
/* Clear status register */
rda_uart_write(port, 0, RDA_UART_STATUS);
- spin_unlock_irqrestore(&port->lock, irq_flags);
+ uart_port_unlock_irqrestore(port, irq_flags);
}
static void rda_uart_release_port(struct uart_port *port)
@@ -597,9 +597,9 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
@@ -615,7 +615,7 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index de220ac8ca54..d46a81cddfcd 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -276,9 +276,9 @@ static unsigned int rp2_uart_tx_empty(struct uart_port *port)
* But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
* enabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
}
@@ -323,10 +323,10 @@ static void rp2_uart_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
break_state ? RP2_TXRX_CTL_BREAK_m : 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rp2_uart_enable_ms(struct uart_port *port)
@@ -383,7 +383,7 @@ static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* ignore all characters if CREAD is not set */
port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
@@ -391,7 +391,7 @@ static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
__rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rp2_rx_chars(struct rp2_uart_port *up)
@@ -440,7 +440,7 @@ static void rp2_ch_interrupt(struct rp2_uart_port *up)
{
u32 status;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* The IRQ status bits are clear-on-write. Other status bits in
@@ -456,7 +456,7 @@ static void rp2_ch_interrupt(struct rp2_uart_port *up)
if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
}
static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
@@ -516,10 +516,10 @@ static void rp2_uart_shutdown(struct uart_port *port)
rp2_uart_break_ctl(port, 0);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rp2_mask_ch_irq(up, up->idx, 0);
rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *rp2_uart_type(struct uart_port *port)
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index ad011f1e2f4d..be7bcd75d9f4 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -115,9 +115,9 @@ static void sa1100_timeout(struct timer_list *t)
unsigned long flags;
if (sport->port.state) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sa1100_mctrl_check(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
@@ -247,7 +247,7 @@ static irqreturn_t sa1100_int(int irq, void *dev_id)
struct sa1100_port *sport = dev_id;
unsigned int status, pass_counter = 0;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
do {
@@ -276,7 +276,7 @@ static irqreturn_t sa1100_int(int irq, void *dev_id)
status &= SM_TO_UTSR0(sport->port.read_status_mask) |
~UTSR0_TFS;
} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return IRQ_HANDLED;
}
@@ -321,14 +321,14 @@ static void sa1100_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int utcr3;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
utcr3 = UART_GET_UTCR3(sport);
if (break_state == -1)
utcr3 |= UTCR3_BRK;
else
utcr3 &= ~UTCR3_BRK;
UART_PUT_UTCR3(sport, utcr3);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int sa1100_startup(struct uart_port *port)
@@ -354,9 +354,9 @@ static int sa1100_startup(struct uart_port *port)
/*
* Enable modem status interrupts
*/
- spin_lock_irq(&sport->port.lock);
+ uart_port_lock_irq(&sport->port);
sa1100_enable_ms(&sport->port);
- spin_unlock_irq(&sport->port.lock);
+ uart_port_unlock_irq(&sport->port);
return 0;
}
@@ -423,7 +423,7 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
del_timer_sync(&sport->timer);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
@@ -485,7 +485,7 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
sa1100_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *sa1100_type(struct uart_port *port)
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 07fb8a9dac63..3bd552841cd2 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -64,7 +64,6 @@
#define RXSTAT_DUMMY_READ (0x10000000)
enum s3c24xx_port_type {
- TYPE_S3C24XX,
TYPE_S3C6400,
TYPE_APPLE_S5L,
};
@@ -128,8 +127,6 @@ struct s3c24xx_uart_dma {
};
struct s3c24xx_uart_port {
- unsigned char rx_claimed;
- unsigned char tx_claimed;
unsigned char rx_enabled;
unsigned char tx_enabled;
unsigned int pm_level;
@@ -248,7 +245,7 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
unsigned int ucon, ufcon;
int count = 10000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (--count && !s3c24xx_serial_txempty_nofifo(port))
udelay(100);
@@ -262,7 +259,7 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 1;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
@@ -271,14 +268,14 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
unsigned long flags;
unsigned int ucon;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c24xx_serial_stop_tx(struct uart_port *port)
@@ -344,7 +341,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args)
dma->tx_transfer_addr, dma->tx_size,
DMA_TO_DEVICE);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_xmit_advance(port, count);
ourport->tx_in_progress = 0;
@@ -353,7 +350,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args)
uart_write_wakeup(port);
s3c24xx_serial_start_next_tx(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
@@ -619,7 +616,7 @@ static void s3c24xx_serial_rx_dma_complete(void *args)
received = dma->rx_bytes_requested - state.residue;
async_tx_ack(dma->rx_desc);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (received)
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
@@ -631,7 +628,7 @@ static void s3c24xx_serial_rx_dma_complete(void *args)
s3c64xx_start_rx_dma(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
@@ -722,7 +719,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
utrstat = rd_regl(port, S3C2410_UTRSTAT);
rd_regl(port, S3C2410_UFSTAT);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
s3c64xx_start_rx_dma(ourport);
@@ -751,7 +748,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
finish:
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -849,9 +846,9 @@ static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
- spin_lock(&port->lock);
+ uart_port_lock(port);
s3c24xx_serial_rx_drain_fifo(ourport);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -932,11 +929,11 @@ static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
- spin_lock(&port->lock);
+ uart_port_lock(port);
s3c24xx_serial_tx_chars(ourport);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -1033,7 +1030,7 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned int ucon;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ucon = rd_regl(port, S3C2410_UCON);
@@ -1044,7 +1041,7 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
wr_regl(port, S3C2410_UCON, ucon);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
@@ -1166,29 +1163,6 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
}
}
-static void s3c24xx_serial_shutdown(struct uart_port *port)
-{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
-
- if (ourport->tx_claimed) {
- free_irq(ourport->tx_irq, ourport);
- ourport->tx_enabled = 0;
- ourport->tx_claimed = 0;
- ourport->tx_mode = 0;
- }
-
- if (ourport->rx_claimed) {
- free_irq(ourport->rx_irq, ourport);
- ourport->rx_claimed = 0;
- ourport->rx_enabled = 0;
- }
-
- if (ourport->dma)
- s3c24xx_serial_release_dma(ourport);
-
- ourport->tx_in_progress = 0;
-}
-
static void s3c64xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -1234,48 +1208,6 @@ static void apple_s5l_serial_shutdown(struct uart_port *port)
ourport->tx_in_progress = 0;
}
-static int s3c24xx_serial_startup(struct uart_port *port)
-{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- int ret;
-
- ourport->rx_enabled = 1;
-
- ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_irq, 0,
- s3c24xx_serial_portname(port), ourport);
-
- if (ret != 0) {
- dev_err(port->dev, "cannot get irq %d\n", ourport->rx_irq);
- return ret;
- }
-
- ourport->rx_claimed = 1;
-
- dev_dbg(port->dev, "requesting tx irq...\n");
-
- ourport->tx_enabled = 1;
-
- ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_irq, 0,
- s3c24xx_serial_portname(port), ourport);
-
- if (ret) {
- dev_err(port->dev, "cannot get irq %d\n", ourport->tx_irq);
- goto err;
- }
-
- ourport->tx_claimed = 1;
-
- /* the port reset code should have done the correct
- * register setup for the port controls
- */
-
- return ret;
-
-err:
- s3c24xx_serial_shutdown(port);
- return ret;
-}
-
static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -1303,7 +1235,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
@@ -1313,7 +1245,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
enable_rx_pio(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Enable Rx Interrupt */
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
@@ -1341,7 +1273,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
@@ -1351,7 +1283,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
enable_rx_pio(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Enable Rx Interrupt */
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
@@ -1626,7 +1558,7 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
ulcon |= S3C2410_LCON_PNONE;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dev_dbg(port->dev,
"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
@@ -1684,7 +1616,7 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= RXSTAT_DUMMY_READ;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *s3c24xx_serial_type(struct uart_port *port)
@@ -1692,8 +1624,6 @@ static const char *s3c24xx_serial_type(struct uart_port *port)
const struct s3c24xx_uart_port *ourport = to_ourport(port);
switch (ourport->info->type) {
- case TYPE_S3C24XX:
- return "S3C24XX";
case TYPE_S3C6400:
return "S3C6400/10";
case TYPE_APPLE_S5L:
@@ -1753,27 +1683,6 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c);
#endif
-static const struct uart_ops s3c24xx_serial_ops = {
- .pm = s3c24xx_serial_pm,
- .tx_empty = s3c24xx_serial_tx_empty,
- .get_mctrl = s3c24xx_serial_get_mctrl,
- .set_mctrl = s3c24xx_serial_set_mctrl,
- .stop_tx = s3c24xx_serial_stop_tx,
- .start_tx = s3c24xx_serial_start_tx,
- .stop_rx = s3c24xx_serial_stop_rx,
- .break_ctl = s3c24xx_serial_break_ctl,
- .startup = s3c24xx_serial_startup,
- .shutdown = s3c24xx_serial_shutdown,
- .set_termios = s3c24xx_serial_set_termios,
- .type = s3c24xx_serial_type,
- .config_port = s3c24xx_serial_config_port,
- .verify_port = s3c24xx_serial_verify_port,
-#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL)
- .poll_get_char = s3c24xx_serial_get_poll_char,
- .poll_put_char = s3c24xx_serial_put_poll_char,
-#endif
-};
-
static const struct uart_ops s3c64xx_serial_ops = {
.pm = s3c24xx_serial_pm,
.tx_empty = s3c24xx_serial_tx_empty,
@@ -1836,7 +1745,6 @@ static void s3c24xx_serial_init_port_default(int index) {
port->iotype = UPIO_MEM;
port->uartclk = 0;
port->fifosize = 16;
- port->ops = &s3c24xx_serial_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = index;
}
@@ -1954,16 +1862,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->tx_irq = ret + 1;
}
- switch (ourport->info->type) {
- case TYPE_S3C24XX:
- ret = platform_get_irq(platdev, 1);
- if (ret > 0)
- ourport->tx_irq = ret;
- break;
- default:
- break;
- }
-
/*
* DMA is currently supported only on DT platforms, if DMA properties
* are specified.
@@ -2083,9 +1981,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
&ourport->drv_data->def_cfg;
switch (ourport->info->type) {
- case TYPE_S3C24XX:
- ourport->port.ops = &s3c24xx_serial_ops;
- break;
case TYPE_S3C6400:
ourport->port.ops = &s3c64xx_serial_ops;
break;
@@ -2376,14 +2271,14 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
if (cons_uart->sysrq)
locked = false;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&cons_uart->lock, flags);
+ locked = uart_port_trylock_irqsave(cons_uart, &flags);
else
- spin_lock_irqsave(&cons_uart->lock, flags);
+ uart_port_lock_irqsave(cons_uart, &flags);
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
if (locked)
- spin_unlock_irqrestore(&cons_uart->lock, flags);
+ uart_port_unlock_irqrestore(cons_uart, flags);
}
/* Shouldn't be __init, as it can be instantiated from other module */
@@ -2840,17 +2735,7 @@ static struct samsung_early_console_data s3c2410_early_console_data = {
.rxfifo_mask = S3C2410_UFSTAT_RXFULL | S3C2410_UFSTAT_RXMASK,
};
-static int __init s3c2410_early_console_setup(struct earlycon_device *device,
- const char *opt)
-{
- device->port.private_data = &s3c2410_early_console_data;
- return samsung_early_console_setup(device, opt);
-}
-
-OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
- s3c2410_early_console_setup);
-
-/* S3C2412, S3C2440, S3C64xx */
+/* S3C64xx */
static struct samsung_early_console_data s3c2440_early_console_data = {
.txfull_mask = S3C2440_UFSTAT_TXFULL,
.rxfifo_mask = S3C2440_UFSTAT_RXFULL | S3C2440_UFSTAT_RXMASK,
@@ -2863,10 +2748,6 @@ static int __init s3c2440_early_console_setup(struct earlycon_device *device,
return samsung_early_console_setup(device, opt);
}
-OF_EARLYCON_DECLARE(s3c2412, "samsung,s3c2412-uart",
- s3c2440_early_console_setup);
-OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
- s3c2440_early_console_setup);
OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
s3c2440_early_console_setup);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index f3cd69346482..dbec29d9a6c3 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -610,7 +610,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
else
aux &= ~M_DUART_CTS_CHNG_ENA;
- spin_lock(&uport->lock);
+ uart_port_lock(uport);
if (sport->tx_stopped)
command |= M_DUART_TX_DIS;
@@ -632,7 +632,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
write_sbdchn(sport, R_DUART_CMD, command);
- spin_unlock(&uport->lock);
+ uart_port_unlock(uport);
}
@@ -839,22 +839,22 @@ static void sbd_console_write(struct console *co, const char *s,
unsigned int mask;
/* Disable transmit interrupts and enable the transmitter. */
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
mask & ~M_DUART_IMR_TX);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
uart_console_write(&sport->port, s, count, sbd_console_putchar);
/* Restore transmit interrupts and the transmitter enable. */
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
sbd_line_drain(sport);
if (sport->tx_stopped)
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static int __init sbd_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index f61d98e09dc3..db2bb1c0d36c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -223,7 +223,7 @@
* trigger levels. Trigger levels from 4 characters to 60 characters are
* available with a granularity of four.
*
- * When the trigger level setting in TLR is zero, the SC16IS740/750/760 uses the
+ * When the trigger level setting in TLR is zero, the SC16IS74x/75x/76x uses the
* trigger level setting defined in FCR. If TLR has non-zero trigger level value
* the trigger level defined in FCR is discarded. This applies to both transmit
* FIFO and receive FIFO trigger level setting.
@@ -234,7 +234,7 @@
#define SC16IS7XX_TLR_TX_TRIGGER(words) ((((words) / 4) & 0x0f) << 0)
#define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4)
-/* IOControl register bits (Only 750/760) */
+/* IOControl register bits (Only 75x/76x) */
#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */
#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */
@@ -249,9 +249,9 @@
#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */
#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode
* 0 = rate upto 115.2 kbit/s
- * - Only 750/760
+ * - Only 75x/76x
* 1 = rate upto 1.152 Mbit/s
- * - Only 760
+ * - Only 76x
*/
/* EFR register bits */
@@ -358,7 +358,6 @@ static struct uart_driver sc16is7xx_uart = {
static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
static void sc16is7xx_stop_tx(struct uart_port *port);
-#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
static int sc16is7xx_line(struct uart_port *port)
@@ -667,9 +666,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sc16is7xx_stop_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
@@ -695,13 +694,13 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
sc16is7xx_fifo_write(port, to_send);
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
sc16is7xx_stop_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
@@ -733,7 +732,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
one->old_mctrl = status;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
port->icount.rng++;
if (changed & TIOCM_DSR)
@@ -744,7 +743,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
uart_handle_cts_change(port, status & TIOCM_CTS);
wake_up_interruptible(&port->state->port.delta_msr_wait);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
@@ -823,9 +822,9 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
sc16is7xx_handle_tx(port);
mutex_unlock(&s->efr_lock);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
@@ -836,14 +835,14 @@ static void sc16is7xx_reconf_rs485(struct uart_port *port)
struct serial_rs485 *rs485 = &port->rs485;
unsigned long irqflags;
- spin_lock_irqsave(&port->lock, irqflags);
+ uart_port_lock_irqsave(port, &irqflags);
if (rs485->flags & SER_RS485_ENABLED) {
efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
}
- spin_unlock_irqrestore(&port->lock, irqflags);
+ uart_port_unlock_irqrestore(port, irqflags);
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
}
@@ -854,10 +853,10 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
struct sc16is7xx_one_config config;
unsigned long irqflags;
- spin_lock_irqsave(&one->port.lock, irqflags);
+ uart_port_lock_irqsave(&one->port, &irqflags);
config = one->config;
memset(&one->config, 0, sizeof(one->config));
- spin_unlock_irqrestore(&one->port.lock, irqflags);
+ uart_port_unlock_irqrestore(&one->port, irqflags);
if (config.flags & SC16IS7XX_RECONF_MD) {
u8 mcr = 0;
@@ -963,18 +962,18 @@ static void sc16is7xx_throttle(struct uart_port *port)
* value set in MCR register. Stop reading data from RX FIFO so the
* AutoRTS feature will de-activate RTS output.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sc16is7xx_unthrottle(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -1113,7 +1112,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
/* Setup baudrate generator */
baud = sc16is7xx_set_baud(port, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1121,7 +1120,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (UART_ENABLE_MS(port, termios->c_cflag))
sc16is7xx_enable_ms(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
@@ -1208,9 +1207,9 @@ static int sc16is7xx_startup(struct uart_port *port)
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
/* Enable modem status polling */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sc16is7xx_enable_ms(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -1409,6 +1408,29 @@ static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
}
#endif
+static void sc16is7xx_setup_irda_ports(struct sc16is7xx_port *s)
+{
+ int i;
+ int ret;
+ int count;
+ u32 irda_port[2];
+ struct device *dev = s->p[0].port.dev;
+
+ count = device_property_count_u32(dev, "irda-mode-ports");
+ if (count < 0 || count > ARRAY_SIZE(irda_port))
+ return;
+
+ ret = device_property_read_u32_array(dev, "irda-mode-ports",
+ irda_port, count);
+ if (ret)
+ return;
+
+ for (i = 0; i < count; i++) {
+ if (irda_port[i] < s->devtype->nr_uart)
+ s->p[irda_port[i]].irda_mode = true;
+ }
+}
+
/*
* Configure ports designated to operate as modem control lines.
*/
@@ -1591,16 +1613,7 @@ static int sc16is7xx_probe(struct device *dev,
sc16is7xx_power(&s->p[i].port, 0);
}
- if (dev->of_node) {
- struct property *prop;
- const __be32 *p;
- u32 u;
-
- of_property_for_each_u32(dev->of_node, "irda-mode-ports",
- prop, p, u)
- if (u < devtype->nr_uart)
- s->p[u].irda_mode = true;
- }
+ sc16is7xx_setup_irda_ports(s);
ret = sc16is7xx_setup_mctrl_ports(s);
if (ret)
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d4ec943cb8e9..6d4006b41975 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -411,7 +411,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
}
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
lcr = tup->lcr_shadow;
lcr |= UART_LCR_DLAB;
tegra_uart_write(tup, lcr, UART_LCR);
@@ -424,7 +424,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
tup->current_baud = baud;
@@ -522,13 +522,13 @@ static void tegra_uart_tx_dma_complete(void *args)
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
tegra_uart_start_next_tx(tup);
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
}
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
@@ -598,13 +598,13 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
unsigned int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
if (!tup->tx_in_progress) {
unsigned long lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
ret = TIOCSER_TEMT;
}
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
return ret;
}
@@ -727,7 +727,7 @@ static void tegra_uart_rx_dma_complete(void *args)
struct dma_tx_state state;
enum dma_status status;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
@@ -749,7 +749,7 @@ static void tegra_uart_rx_dma_complete(void *args)
set_rts(tup, true);
done:
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
}
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
@@ -836,7 +836,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
bool is_rx_int = false;
unsigned long flags;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
while (1) {
iir = tegra_uart_read(tup, UART_IIR);
if (iir & UART_IIR_NO_INT) {
@@ -852,7 +852,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
} else if (is_rx_start) {
tegra_uart_start_rx_dma(tup);
}
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
return IRQ_HANDLED;
}
@@ -969,11 +969,11 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
}
}
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
/* Reset the Rx and Tx FIFOs */
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
tup->current_baud = 0;
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
@@ -1292,7 +1292,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
int ret;
max_divider *= 16;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
/* Changing configuration, it is safe to stop any rx now */
if (tup->rts_active)
@@ -1341,7 +1341,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
baud = uart_get_baud_rate(u, termios, oldtermios,
parent_clk_rate/max_divider,
parent_clk_rate/16);
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
ret = tegra_set_baudrate(tup, baud);
if (ret < 0) {
dev_err(tup->uport.dev, "Failed to set baud rate\n");
@@ -1349,7 +1349,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
}
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
/* Flow control */
if (termios->c_cflag & CRTSCTS) {
@@ -1382,7 +1382,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
if (termios->c_iflag & IGNBRK)
tup->uport.ignore_status_mask |= UART_LSR_BI;
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
}
static const char *tegra_uart_type(struct uart_port *u)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d5ba6e90bd95..f1348a509552 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -79,7 +79,7 @@ static inline void uart_port_deref(struct uart_port *uport)
({ \
struct uart_port *__uport = uart_port_ref(state); \
if (__uport) \
- spin_lock_irqsave(&__uport->lock, flags); \
+ uart_port_lock_irqsave(__uport, &flags); \
__uport; \
})
@@ -87,7 +87,7 @@ static inline void uart_port_deref(struct uart_port *uport)
({ \
struct uart_port *__uport = uport; \
if (__uport) { \
- spin_unlock_irqrestore(&__uport->lock, flags); \
+ uart_port_unlock_irqrestore(__uport, flags); \
uart_port_deref(__uport); \
} \
})
@@ -146,7 +146,7 @@ static void __uart_start(struct uart_state *state)
/* Increment the runtime PM usage count for the active check below */
err = pm_runtime_get(&port_dev->dev);
- if (err < 0) {
+ if (err < 0 && err != -EINPROGRESS) {
pm_runtime_put_noidle(&port_dev->dev);
return;
}
@@ -179,12 +179,12 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
@@ -219,7 +219,7 @@ static void uart_change_line_settings(struct tty_struct *tty, struct uart_state
/*
* Set modem status enables based on termios cflag
*/
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (termios->c_cflag & CRTSCTS)
uport->status |= UPSTAT_CTS_ENABLE;
else
@@ -240,7 +240,7 @@ static void uart_change_line_settings(struct tty_struct *tty, struct uart_state
else
__uart_start(state);
}
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
/*
@@ -431,7 +431,7 @@ EXPORT_SYMBOL(uart_update_timeout);
* baud.
*
* If the new baud rate is invalid, try the @old termios setting. If it's still
- * invalid, we try 9600 baud.
+ * invalid, we try 9600 baud. If that is also invalid 0 is returned.
*
* The @termios structure is updated to reflect the baud rate we're actually
* going to be using. Don't do this for the case where B0 is requested ("hang
@@ -515,8 +515,6 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
max - 1, max - 1);
}
}
- /* Should never happen */
- WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(uart_get_baud_rate);
@@ -702,11 +700,11 @@ static void uart_send_xchar(struct tty_struct *tty, char ch)
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->x_char = ch;
if (ch)
port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
uart_port_deref(port);
}
@@ -775,6 +773,9 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
struct uart_port *uport;
int ret = -ENODEV;
+ /* Initialize structure in case we error out later to prevent any stack info leakage. */
+ *retinfo = (struct serial_struct){};
+
/*
* Ensure the state we copy is consistent and no hardware changes
* occur as we go
@@ -1085,9 +1086,9 @@ static int uart_tiocmget(struct tty_struct *tty)
if (!tty_io_error(tty)) {
result = uport->mctrl;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
result |= uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
out:
mutex_unlock(&port->mutex);
@@ -1223,16 +1224,16 @@ static int uart_wait_modem_status(struct uart_state *state, unsigned long arg)
uport = uart_port_ref(state);
if (!uport)
return -EIO;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
uart_enable_ms(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
add_wait_queue(&port->delta_msr_wait, &wait);
for (;;) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1277,9 +1278,9 @@ static int uart_get_icount(struct tty_struct *tty,
uport = uart_port_ref(state);
if (!uport)
return -EIO;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_deref(uport);
icount->cts = cnow.cts;
@@ -1413,9 +1414,9 @@ static int uart_rs485_config(struct uart_port *port)
uart_sanitize_serial_rs485(port, rs485);
uart_set_rs485_termination(port, rs485);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->rs485_config(port, NULL, rs485);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret)
memset(rs485, 0, sizeof(*rs485));
@@ -1428,9 +1429,9 @@ static int uart_get_rs485_config(struct uart_port *port,
unsigned long flags;
struct serial_rs485 aux;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
aux = port->rs485;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (copy_to_user(rs485, &aux, sizeof(aux)))
return -EFAULT;
@@ -1457,7 +1458,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
uart_sanitize_serial_rs485(port, &rs485);
uart_set_rs485_termination(port, &rs485);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret) {
port->rs485 = rs485;
@@ -1466,7 +1467,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
if (!(rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret)
return ret;
@@ -1485,9 +1486,9 @@ static int uart_get_iso7816_config(struct uart_port *port,
if (!port->iso7816_config)
return -ENOTTY;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
aux = port->iso7816;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (copy_to_user(iso7816, &aux, sizeof(aux)))
return -EFAULT;
@@ -1516,9 +1517,9 @@ static int uart_set_iso7816_config(struct uart_port *port,
if (iso7816.reserved[i])
return -EINVAL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->iso7816_config(port, &iso7816);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret)
return ret;
@@ -1735,9 +1736,9 @@ static void uart_tty_port_shutdown(struct tty_port *port)
if (WARN(!uport, "detached port still initialized!\n"))
return;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_shutdown(port);
@@ -1751,10 +1752,10 @@ static void uart_tty_port_shutdown(struct tty_port *port)
/*
* Free the transmit buffer.
*/
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
buf = state->xmit.buf;
state->xmit.buf = NULL;
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
free_page((unsigned long)buf);
@@ -1897,10 +1898,10 @@ static bool uart_carrier_raised(struct tty_port *port)
*/
if (WARN_ON(!uport))
return true;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_deref(uport);
return mctrl & TIOCM_CAR;
@@ -2017,9 +2018,9 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
pm_state = state->pm_state;
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, UART_PM_STATE_ON);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
status = uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
@@ -2358,9 +2359,9 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
*/
if (!console_suspend_enabled && uart_console(uport)) {
if (uport->ops->start_rx) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
goto unlock;
}
@@ -2375,7 +2376,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
tty_port_set_suspended(port, true);
tty_port_set_initialized(port, false);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
ops->stop_tx(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
@@ -2383,7 +2384,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mctrl = uport->mctrl;
uport->mctrl = 0;
ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
/*
* Wait for the transmitter to empty.
@@ -2455,9 +2456,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (!console_suspend_enabled && uport->ops->start_rx) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->start_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
if (console_suspend_enabled)
console_start(uport->cons);
@@ -2468,10 +2469,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
int ret;
uart_change_pm(state, UART_PM_STATE_ON);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
struct tty_struct *tty = port->tty;
@@ -2481,11 +2482,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (tty)
uart_change_line_settings(tty, state, NULL);
uart_rs485_config(uport);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
tty_port_set_initialized(port, true);
} else {
/*
@@ -2588,11 +2589,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl &= TIOCM_DTR;
if (!(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
uart_rs485_config(port);
@@ -3564,9 +3565,10 @@ int uart_get_rs485_mode(struct uart_port *port)
{
struct serial_rs485 *rs485conf = &port->rs485;
struct device *dev = port->dev;
+ enum gpiod_flags dflags;
+ struct gpio_desc *desc;
u32 rs485_delay[2];
int ret;
- int rx_during_tx_gpio_flag;
ret = device_property_read_u32_array(dev, "rs485-rts-delay",
rs485_delay, 2);
@@ -3605,26 +3607,19 @@ int uart_get_rs485_mode(struct uart_port *port)
* bus participants enable it, no communication is possible at all.
* Works fine for short cables and users may enable for longer cables.
*/
- port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term",
- GPIOD_OUT_LOW);
- if (IS_ERR(port->rs485_term_gpio)) {
- ret = PTR_ERR(port->rs485_term_gpio);
- port->rs485_term_gpio = NULL;
- return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
- }
+ desc = devm_gpiod_get_optional(dev, "rs485-term", GPIOD_OUT_LOW);
+ if (IS_ERR(desc))
+ return dev_err_probe(dev, PTR_ERR(desc), "Cannot get rs485-term-gpios\n");
+ port->rs485_term_gpio = desc;
if (port->rs485_term_gpio)
port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
- rx_during_tx_gpio_flag = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
- GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
- port->rs485_rx_during_tx_gpio = devm_gpiod_get_optional(dev,
- "rs485-rx-during-tx",
- rx_during_tx_gpio_flag);
- if (IS_ERR(port->rs485_rx_during_tx_gpio)) {
- ret = PTR_ERR(port->rs485_rx_during_tx_gpio);
- port->rs485_rx_during_tx_gpio = NULL;
- return dev_err_probe(dev, ret, "Cannot get rs485-rx-during-tx-gpios\n");
- }
+ dflags = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ desc = devm_gpiod_get_optional(dev, "rs485-rx-during-tx", dflags);
+ if (IS_ERR(desc))
+ return dev_err_probe(dev, PTR_ERR(desc), "Cannot get rs485-rx-during-tx-gpios\n");
+ port->rs485_rx_during_tx_gpio = desc;
return 0;
}
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 7d5aaa8d422b..e51ca593ab86 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -184,7 +184,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
mctrl_gpio_get(gpios, &mctrl);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
mctrl_diff = mctrl ^ gpios->mctrl_prev;
gpios->mctrl_prev = mctrl;
@@ -205,7 +205,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 862423237007..88975a4df306 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -35,10 +35,10 @@ static int serial_port_runtime_resume(struct device *dev)
goto out;
/* Flush any pending TX for the port */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (__serial_port_busy(port))
port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
out:
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index be08fb6f749c..eaa980722455 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -335,13 +335,13 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
unsigned int status;
while (1) {
- spin_lock(&up->lock);
+ uart_port_lock(up);
status = sio_in(up, TXX9_SIDISR);
if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
status &= ~TXX9_SIDISR_TDIS;
if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT))) {
- spin_unlock(&up->lock);
+ uart_port_unlock(up);
break;
}
@@ -353,7 +353,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
sio_mask(up, TXX9_SIDISR,
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT);
- spin_unlock(&up->lock);
+ uart_port_unlock(up);
if (pass_counter++ > PASS_LIMIT)
break;
@@ -367,9 +367,9 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *up)
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
return ret;
}
@@ -399,12 +399,12 @@ static void serial_txx9_break_ctl(struct uart_port *up, int break_state)
{
unsigned long flags;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
if (break_state == -1)
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
else
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
}
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
@@ -517,9 +517,9 @@ static int serial_txx9_startup(struct uart_port *up)
/*
* Now, initialize the UART
*/
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
/* Enable RX/TX */
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
@@ -541,9 +541,9 @@ static void serial_txx9_shutdown(struct uart_port *up)
*/
sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
/*
* Disable break condition
@@ -625,7 +625,7 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
/*
* Update the per-port timeout.
@@ -676,7 +676,7 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
sio_out(up, TXX9_SIFCR, fcr);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
}
static void
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index a560b729fa3b..84ab434c94ba 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1205,7 +1205,7 @@ static void sci_dma_tx_complete(void *arg)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_xmit_advance(port, s->tx_dma_len);
@@ -1229,7 +1229,7 @@ static void sci_dma_tx_complete(void *arg)
}
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Locking: called with port lock held */
@@ -1320,7 +1320,7 @@ static void sci_dma_rx_complete(void *arg)
dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
s->active_rx);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
active = sci_dma_rx_find_active(s);
if (active >= 0)
@@ -1347,20 +1347,20 @@ static void sci_dma_rx_complete(void *arg)
dma_async_issue_pending(chan);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[active], active, s->active_rx);
return;
fail:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
/* Switch to PIO */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dmaengine_terminate_async(chan);
sci_dma_rx_chan_invalidate(s);
sci_dma_rx_reenable_irq(s);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sci_dma_tx_release(struct sci_port *s)
@@ -1409,13 +1409,13 @@ static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held)
fail:
/* Switch to PIO */
if (!port_lock_held)
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (i)
dmaengine_terminate_async(chan);
sci_dma_rx_chan_invalidate(s);
sci_start_rx(port);
if (!port_lock_held)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return -EAGAIN;
}
@@ -1437,14 +1437,14 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
* transmit till the end, and then the rest. Take the port lock to get a
* consistent xmit buffer state.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
head = xmit->head;
tail = xmit->tail;
buf = s->tx_dma_addr + tail;
s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
if (!s->tx_dma_len) {
/* Transmit buffer has been flushed */
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
return;
}
@@ -1452,7 +1452,7 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
goto switch_to_pio;
}
@@ -1464,12 +1464,12 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
desc->callback_param = s;
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
goto switch_to_pio;
}
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
__func__, xmit->buf, tail, head, s->cookie_tx);
@@ -1477,10 +1477,10 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
return;
switch_to_pio:
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
s->chan_tx = NULL;
sci_start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
@@ -1497,17 +1497,17 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
dev_dbg(port->dev, "DMA Rx timed out\n");
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
active = sci_dma_rx_find_active(s);
if (active < 0) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return HRTIMER_NORESTART;
}
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
s->active_rx, active);
@@ -1525,7 +1525,7 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
*/
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
return HRTIMER_NORESTART;
}
@@ -1546,7 +1546,7 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
sci_dma_rx_reenable_irq(s);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return HRTIMER_NORESTART;
}
@@ -1770,9 +1770,9 @@ static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
struct uart_port *port = ptr;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -1786,11 +1786,11 @@ static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
if (port->type != PORT_SCI)
return sci_tx_interrupt(irq, ptr);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctrl = serial_port_in(port, SCSCR);
ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
serial_port_out(port, SCSCR, ctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
@@ -2187,7 +2187,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
return;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
scsptr = serial_port_in(port, SCSPTR);
scscr = serial_port_in(port, SCSCR);
@@ -2201,7 +2201,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
serial_port_out(port, SCSPTR, scsptr);
serial_port_out(port, SCSCR, scscr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sci_startup(struct uart_port *port)
@@ -2233,7 +2233,7 @@ static void sci_shutdown(struct uart_port *port)
s->autorts = false;
mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
sci_stop_tx(port);
/*
@@ -2243,7 +2243,7 @@ static void sci_shutdown(struct uart_port *port)
scr = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, scr &
(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (s->chan_rx_saved) {
@@ -2545,7 +2545,7 @@ done:
serial_port_out(port, SCCKS, sccks);
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_reset(port);
@@ -2667,7 +2667,7 @@ done:
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
sci_port_disable(s);
@@ -3052,9 +3052,9 @@ static void serial_console_write(struct console *co, const char *s,
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* first save SCSCR then disable interrupts, keep clock source */
ctrl = serial_port_in(port, SCSCR);
@@ -3074,7 +3074,7 @@ static void serial_console_write(struct console *co, const char *s,
serial_port_out(port, SCSCR, ctrl);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int serial_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index d195c5de52e7..b296e57a9dee 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -521,11 +521,11 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
struct sifive_serial_port *ssp = dev_id;
u32 ip;
- spin_lock(&ssp->port.lock);
+ uart_port_lock(&ssp->port);
ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
if (!ip) {
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
return IRQ_NONE;
}
@@ -534,7 +534,7 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
__ssp_transmit_chars(ssp);
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
return IRQ_HANDLED;
}
@@ -653,7 +653,7 @@ static void sifive_serial_set_termios(struct uart_port *port,
ssp->port.uartclk / 16);
__ssp_update_baud_rate(ssp, rate);
- spin_lock_irqsave(&ssp->port.lock, flags);
+ uart_port_lock_irqsave(&ssp->port, &flags);
/* Update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, rate);
@@ -670,7 +670,7 @@ static void sifive_serial_set_termios(struct uart_port *port,
if (v != old_v)
__ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
- spin_unlock_irqrestore(&ssp->port.lock, flags);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
static void sifive_serial_release_port(struct uart_port *port)
@@ -795,9 +795,9 @@ static void sifive_serial_console_write(struct console *co, const char *s,
if (ssp->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&ssp->port.lock);
+ locked = uart_port_trylock(&ssp->port);
else
- spin_lock(&ssp->port.lock);
+ uart_port_lock(&ssp->port);
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
@@ -807,7 +807,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
if (locked)
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index f328fa57231f..f257525f9299 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -247,7 +247,7 @@ static void sprd_complete_tx_dma(void *data)
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
sp->tx_dma.trans_len, DMA_TO_DEVICE);
@@ -260,7 +260,7 @@ static void sprd_complete_tx_dma(void *data)
sprd_tx_dma_config(port))
sp->tx_dma.trans_len = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_uart_dma_submit(struct uart_port *port,
@@ -429,13 +429,13 @@ static void sprd_complete_rx_dma(void *data)
enum dma_status status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = dmaengine_tx_status(sp->rx_dma.chn,
sp->rx_dma.cookie, &state);
if (status != DMA_COMPLETE) {
sprd_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
@@ -449,7 +449,7 @@ static void sprd_complete_rx_dma(void *data)
if (sprd_start_dma_rx(port))
sprd_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_start_dma_rx(struct uart_port *port)
@@ -638,12 +638,12 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned int ims;
- spin_lock(&port->lock);
+ uart_port_lock(port);
ims = serial_in(port, SPRD_IMSR);
if (!ims) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_NONE;
}
@@ -660,7 +660,7 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
sprd_tx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -727,13 +727,13 @@ static int sprd_startup(struct uart_port *port)
serial_out(port, SPRD_CTL1, fc);
/* enable interrupt */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ien = serial_in(port, SPRD_IEN);
ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
if (!sp->rx_dma.enable)
ien |= SPRD_IEN_RX_FULL;
serial_out(port, SPRD_IEN, ien);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -793,7 +793,7 @@ static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
lcr |= SPRD_LCR_EVEN_PAR;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
@@ -837,7 +837,7 @@ static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
serial_out(port, SPRD_CTL1, fc);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
@@ -974,9 +974,9 @@ static void sprd_console_write(struct console *co, const char *s,
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, sprd_console_putchar);
@@ -984,7 +984,7 @@ static void sprd_console_write(struct console *co, const char *s,
wait_for_xmitr(port);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 92b9f6894006..a821f5d76a26 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -319,7 +319,7 @@ static irqreturn_t asc_interrupt(int irq, void *ptr)
struct uart_port *port = ptr;
u32 status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = asc_in(port, ASC_STA);
@@ -334,7 +334,7 @@ static irqreturn_t asc_interrupt(int irq, void *ptr)
asc_transmit_chars(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -452,10 +452,10 @@ static void asc_pm(struct uart_port *port, unsigned int state,
* we can come to turning it off. Note this is not called with
* the port spinlock held.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
asc_out(port, ASC_CTL, ctl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
clk_disable_unprepare(ascport->clk);
break;
}
@@ -480,7 +480,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
cflag = termios->c_cflag;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* read control register */
ctrl_val = asc_in(port, ASC_CTL);
@@ -594,7 +594,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* write final value and enable port */
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *asc_type(struct uart_port *port)
@@ -849,9 +849,9 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
if (port->sysrq)
locked = 0; /* asc_interrupt has already claimed the lock */
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Disable interrupts so we don't get the IRQ line bouncing
@@ -869,7 +869,7 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
asc_out(port, ASC_INTEN, intenable);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int asc_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 5e9cf0c48813..3048620315d6 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -537,7 +537,7 @@ static void stm32_usart_rx_dma_complete(void *arg)
unsigned int size;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
@@ -643,9 +643,9 @@ static void stm32_usart_tx_dma_complete(void *arg)
stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
@@ -889,7 +889,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
if (!stm32_port->throttled) {
if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
@@ -898,14 +898,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
}
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
stm32_usart_transmit_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
/* Receiver timeout irq for DMA RX */
if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
@@ -993,7 +993,7 @@ static void stm32_usart_throttle(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Pause DMA transfer, so the RX data gets queued into the FIFO.
@@ -1006,7 +1006,7 @@ static void stm32_usart_throttle(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = true;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Unthrottle the remote, the input buffer can now accept data. */
@@ -1016,7 +1016,7 @@ static void stm32_usart_unthrottle(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
@@ -1030,7 +1030,7 @@ static void stm32_usart_unthrottle(struct uart_port *port)
if (stm32_port->rx_ch)
stm32_usart_rx_dma_start_or_resume(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Receive stop */
@@ -1047,9 +1047,20 @@ static void stm32_usart_stop_rx(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
}
-/* Handle breaks - ignored by us */
static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (break_state)
+ stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
+ else
+ stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
+
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int stm32_usart_startup(struct uart_port *port)
@@ -1158,7 +1169,7 @@ static void stm32_usart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
isr,
@@ -1349,7 +1360,7 @@ static void stm32_usart_set_termios(struct uart_port *port,
writel_relaxed(cr1, port->membase + ofs->cr1);
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Handle modem control interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
@@ -1399,9 +1410,9 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
pm_runtime_get_sync(port->dev);
break;
case UART_PM_STATE_OFF:
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_put_sync(port->dev);
break;
}
@@ -1884,9 +1895,9 @@ static void stm32_usart_console_write(struct console *co, const char *s,
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Save and disable interrupts, enable the transmitter */
old_cr1 = readl_relaxed(port->membase + ofs->cr1);
@@ -1900,7 +1911,7 @@ static void stm32_usart_console_write(struct console *co, const char *s,
writel_relaxed(old_cr1, port->membase + ofs->cr1);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int stm32_usart_console_setup(struct console *co, char *options)
@@ -2035,7 +2046,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
* low-power mode.
*/
if (stm32_port->rx_ch) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Poll data from DMA RX buffer if any */
if (!stm32_usart_rx_dma_pause(stm32_port))
size += stm32_usart_receive_chars(port, true);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index c671d674bce4..5bfc0040f17b 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -217,10 +217,10 @@ static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
struct tty_port *tport;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tport = receive_chars(port);
transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (tport)
tty_flip_buffer_push(tport);
@@ -271,7 +271,7 @@ static void sunhv_send_xchar(struct uart_port *port, char ch)
if (ch == __DISABLED_CHAR)
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(ch);
@@ -280,7 +280,7 @@ static void sunhv_send_xchar(struct uart_port *port, char ch)
udelay(1);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* port->lock held by caller. */
@@ -295,7 +295,7 @@ static void sunhv_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
int limit = 10000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(CON_BREAK);
@@ -304,7 +304,7 @@ static void sunhv_break_ctl(struct uart_port *port, int break_state)
udelay(1);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
}
@@ -328,7 +328,7 @@ static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned int iflag, cflag;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
iflag = termios->c_iflag;
cflag = termios->c_cflag;
@@ -343,7 +343,7 @@ static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, cflag,
(port->uartclk / (16 * quot)));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *sunhv_type(struct uart_port *port)
@@ -437,9 +437,9 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
int locked = 1;
if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -470,7 +470,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,9 +492,9 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
int i, locked = 1;
if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
for (i = 0; i < n; i++) {
if (*s == '\n')
@@ -503,7 +503,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 3aacd5eb414c..4251f4e1ba99 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -184,7 +184,7 @@ static void sunplus_break_ctl(struct uart_port *port, int ctl)
unsigned long flags;
unsigned int lcr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lcr = readl(port->membase + SUP_UART_LCR);
@@ -195,7 +195,7 @@ static void sunplus_break_ctl(struct uart_port *port, int ctl)
writel(lcr, port->membase + SUP_UART_LCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void transmit_chars(struct uart_port *port)
@@ -277,7 +277,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
struct uart_port *port = args;
unsigned int isc;
- spin_lock(&port->lock);
+ uart_port_lock(port);
isc = readl(port->membase + SUP_UART_ISC);
@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
if (isc & SUP_UART_ISC_TX)
transmit_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -302,14 +302,14 @@ static int sunplus_startup(struct uart_port *port)
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
isc |= SUP_UART_ISC_RXM;
writel(isc, port->membase + SUP_UART_ISC);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -318,13 +318,13 @@ static void sunplus_shutdown(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
@@ -372,7 +372,7 @@ static void sunplus_set_termios(struct uart_port *port,
lcr |= UART_LCR_EPAR;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
@@ -407,7 +407,7 @@ static void sunplus_set_termios(struct uart_port *port,
writel(div_l, port->membase + SUP_UART_DIV_L);
writel(lcr, port->membase + SUP_UART_LCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
@@ -517,15 +517,15 @@ static void sunplus_console_write(struct console *co,
if (sunplus_console_ports[co->index]->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
+ locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
else
- spin_lock(&sunplus_console_ports[co->index]->port.lock);
+ uart_port_lock(&sunplus_console_ports[co->index]->port);
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
sunplus_uart_console_putchar);
if (locked)
- spin_unlock(&sunplus_console_ports[co->index]->port.lock);
+ uart_port_unlock(&sunplus_console_ports[co->index]->port);
local_irq_restore(flags);
}
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 40eeaf835bba..6aa51a6f8063 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -310,7 +310,7 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
unsigned long flags;
unsigned char gis;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
status.stat = 0;
gis = readb(&up->regs->r.gis) >> up->gis_shift;
@@ -331,7 +331,7 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
transmit_chars(up, &status);
}
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (port)
tty_flip_buffer_push(port);
@@ -473,12 +473,12 @@ static void sunsab_send_xchar(struct uart_port *port, char ch)
if (ch == __DISABLED_CHAR)
return;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
sunsab_tec_wait(up);
writeb(ch, &up->regs->w.tic);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/* port->lock held by caller. */
@@ -499,7 +499,7 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
unsigned long flags;
unsigned char val;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
val = up->cached_dafo;
if (break_state)
@@ -512,7 +512,7 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
if (test_bit(SAB82532_XPR, &up->irqflags))
sunsab_tx_idle(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/* port->lock is not held. */
@@ -527,7 +527,7 @@ static int sunsab_startup(struct uart_port *port)
if (err)
return err;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Wait for any commands or immediate characters
@@ -582,7 +582,7 @@ static int sunsab_startup(struct uart_port *port)
set_bit(SAB82532_ALLS, &up->irqflags);
set_bit(SAB82532_XPR, &up->irqflags);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
@@ -594,7 +594,7 @@ static void sunsab_shutdown(struct uart_port *port)
container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* Disable Interrupts */
up->interrupt_mask0 = 0xff;
@@ -628,7 +628,7 @@ static void sunsab_shutdown(struct uart_port *port)
writeb(tmp, &up->regs->rw.ccr0);
#endif
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
free_irq(up->port.irq, up);
}
@@ -779,9 +779,9 @@ static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *sunsab_type(struct uart_port *port)
@@ -857,15 +857,15 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
@@ -914,7 +914,7 @@ static int sunsab_console_setup(struct console *con, char *options)
*/
sunsab_startup(&up->port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Finally, enable interrupts
@@ -932,7 +932,7 @@ static int sunsab_console_setup(struct console *con, char *options)
sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 58a4342ad0f9..1e051cc2591c 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -212,9 +212,9 @@ static void enable_rsa(struct uart_sunsu_port *up)
{
if (up->port.type == PORT_RSA) {
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
__enable_rsa(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
serial_outp(up, UART_RSA_FRR, 0);
@@ -234,7 +234,7 @@ static void disable_rsa(struct uart_sunsu_port *up)
if (up->port.type == PORT_RSA &&
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
mode = serial_inp(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
@@ -247,7 +247,7 @@ static void disable_rsa(struct uart_sunsu_port *up)
if (result)
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
}
#endif /* CONFIG_SERIAL_8250_RSA */
@@ -311,10 +311,10 @@ static void sunsu_enable_ms(struct uart_port *port)
container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
@@ -456,7 +456,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
unsigned long flags;
unsigned char status;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
do {
status = serial_inp(up, UART_LSR);
@@ -470,7 +470,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
} while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return IRQ_HANDLED;
}
@@ -545,9 +545,9 @@ static unsigned int sunsu_tx_empty(struct uart_port *port)
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
@@ -599,13 +599,13 @@ static void sunsu_break_ctl(struct uart_port *port, int break_state)
container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int sunsu_startup(struct uart_port *port)
@@ -683,12 +683,12 @@ static int sunsu_startup(struct uart_port *port)
*/
serial_outp(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl |= TIOCM_OUT2;
sunsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
@@ -731,7 +731,7 @@ static void sunsu_shutdown(struct uart_port *port)
up->ier = 0;
serial_outp(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (up->port.flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
inb((up->port.iobase & 0xfe0) | 0x1f);
@@ -740,7 +740,7 @@ static void sunsu_shutdown(struct uart_port *port)
up->port.mctrl &= ~TIOCM_OUT2;
sunsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
@@ -826,7 +826,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Update the per-port timeout.
@@ -891,7 +891,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
up->cflag = cflag;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
@@ -1038,7 +1038,7 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
up->type_probed = PORT_UNKNOWN;
up->port.iotype = UPIO_MEM;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (!(up->port.flags & UPF_BUGGY_UART)) {
/*
@@ -1173,7 +1173,7 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
serial_outp(up, UART_IER, 0);
out:
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static struct uart_driver sunsu_reg = {
@@ -1298,9 +1298,9 @@ static void sunsu_console_write(struct console *co, const char *s,
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the UER then disable the interrupts
@@ -1318,7 +1318,7 @@ static void sunsu_console_write(struct console *co, const char *s,
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c8c71c56264c..d3b5e864b727 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -531,7 +531,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
struct tty_port *port;
unsigned char r3;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
r3 = read_zsreg(channel, R3);
/* Channel A */
@@ -548,7 +548,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
if (r3 & CHATxIP)
sunzilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (port)
tty_flip_buffer_push(port);
@@ -557,7 +557,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
port = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
@@ -571,7 +571,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
if (r3 & CHBTxIP)
sunzilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (port)
tty_flip_buffer_push(port);
@@ -604,11 +604,11 @@ static unsigned int sunzilog_tx_empty(struct uart_port *port)
unsigned char status;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = sunzilog_read_channel_status(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (status & Tx_BUF_EMP)
ret = TIOCSER_TEMT;
@@ -764,7 +764,7 @@ static void sunzilog_break_ctl(struct uart_port *port, int break_state)
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != up->curregs[R5]) {
@@ -774,7 +774,7 @@ static void sunzilog_break_ctl(struct uart_port *port, int break_state)
write_zsreg(channel, R5, up->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __sunzilog_startup(struct uart_sunzilog_port *up)
@@ -800,9 +800,9 @@ static int sunzilog_startup(struct uart_port *port)
if (ZS_IS_CONS(up))
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__sunzilog_startup(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -840,7 +840,7 @@ static void sunzilog_shutdown(struct uart_port *port)
if (ZS_IS_CONS(up))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
channel = ZILOG_CHANNEL_FROM_PORT(port);
@@ -853,7 +853,7 @@ static void sunzilog_shutdown(struct uart_port *port)
up->curregs[R5] &= ~SND_BRK;
sunzilog_maybe_update_regs(up, channel);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
@@ -945,7 +945,7 @@ sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
@@ -962,7 +962,7 @@ sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *sunzilog_type(struct uart_port *port)
@@ -1201,15 +1201,15 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
@@ -1244,7 +1244,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->curregs[R15] |= BRKIE;
sunzilog_convert_to_zs(up, con->cflag, 0, brg);
@@ -1252,7 +1252,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
__sunzilog_startup(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
@@ -1333,7 +1333,7 @@ static void sunzilog_init_hw(struct uart_sunzilog_port *up)
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (ZS_IS_CHANNEL_A(up)) {
write_zsreg(channel, R9, FHWRES);
ZSDELAY_LONG();
@@ -1383,7 +1383,7 @@ static void sunzilog_init_hw(struct uart_sunzilog_port *up)
write_zsreg(channel, R9, up->curregs[R9]);
}
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
#ifdef CONFIG_SERIO
if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 0859394a78cd..0cc6524f5e8b 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -174,7 +174,7 @@ static void timbuart_tasklet(struct tasklet_struct *t)
struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
u32 isr, ier = 0;
- spin_lock(&uart->port.lock);
+ uart_port_lock(&uart->port);
isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
@@ -189,7 +189,7 @@ static void timbuart_tasklet(struct tasklet_struct *t)
iowrite32(ier, uart->port.membase + TIMBUART_IER);
- spin_unlock(&uart->port.lock);
+ uart_port_unlock(&uart->port);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
}
@@ -295,10 +295,10 @@ static void timbuart_set_termios(struct uart_port *port,
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *timbuart_type(struct uart_port *port)
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index b225a78f6175..404c14acafa5 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -216,11 +216,11 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
unsigned long flags;
do {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
n++;
} while (busy);
@@ -238,9 +238,9 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = uart_in32(ULITE_STATUS, port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
}
@@ -323,7 +323,7 @@ static void ulite_set_termios(struct uart_port *port,
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
| ULITE_STATUS_TXFULL;
@@ -346,7 +346,7 @@ static void ulite_set_termios(struct uart_port *port,
/* update timeout */
uart_update_timeout(port, termios->c_cflag, pdata->baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *ulite_type(struct uart_port *port)
@@ -495,9 +495,9 @@ static void ulite_console_write(struct console *co, const char *s,
int locked = 1;
if (oops_in_progress) {
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
} else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* save and disable interrupt */
ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
@@ -512,7 +512,7 @@ static void ulite_console_write(struct console *co, const char *s,
uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int ulite_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index b06661b80f41..ed7a6bb5596a 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -931,7 +931,7 @@ static void qe_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Do we really need a spinlock here? */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
@@ -949,7 +949,7 @@ static void qe_uart_set_termios(struct uart_port *port,
qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index c5d5c2765119..78a1c1eea11b 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -227,7 +227,7 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned long isr;
- spin_lock(&port->lock);
+ uart_port_lock(port);
isr = vt8500_read(port, VT8500_URISR);
/* Acknowledge active status bits */
@@ -240,7 +240,7 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
if (isr & TCTS)
handle_delta_cts(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -342,7 +342,7 @@ static void vt8500_set_termios(struct uart_port *port,
unsigned int baud, lcr;
unsigned int loops = 1000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* calculate and set baud rate */
baud = uart_get_baud_rate(port, termios, old, 900, 921600);
@@ -410,7 +410,7 @@ static void vt8500_set_termios(struct uart_port *port,
vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *vt8500_type(struct uart_port *port)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 2e5e86a00a77..66a45a634158 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -346,7 +346,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int isrstatus;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* Read the interrupt status register to determine which
* interrupt(s) is/are active and clear them.
@@ -369,7 +369,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
@@ -506,14 +506,14 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
return NOTIFY_BAD;
}
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
return NOTIFY_OK;
}
@@ -523,7 +523,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
* frequency.
*/
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
locked = 1;
port->uartclk = ndata->new_rate;
@@ -533,7 +533,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
fallthrough;
case ABORT_RATE_CHANGE:
if (!locked)
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
@@ -555,7 +555,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
return NOTIFY_OK;
default:
@@ -652,19 +652,19 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
unsigned int status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = readl(port->membase + CDNS_UART_CR);
if (ctl == -1)
- writel(CDNS_UART_CR_STARTBRK | status,
+ writel(CDNS_UART_CR_STARTBRK | (~CDNS_UART_CR_STOPBRK & status),
port->membase + CDNS_UART_CR);
else {
if ((status & CDNS_UART_CR_STOPBRK) == 0)
writel(CDNS_UART_CR_STOPBRK | status,
port->membase + CDNS_UART_CR);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
@@ -683,7 +683,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
unsigned long flags;
unsigned int ctrl_reg, mode_reg;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
@@ -794,7 +794,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
cval &= ~CDNS_UART_MODEMCR_FCM;
writel(cval, port->membase + CDNS_UART_MODEMCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
@@ -813,7 +813,7 @@ static int cdns_uart_startup(struct uart_port *port)
is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
@@ -861,7 +861,7 @@ static int cdns_uart_startup(struct uart_port *port)
writel(readl(port->membase + CDNS_UART_ISR),
port->membase + CDNS_UART_ISR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
if (ret) {
@@ -889,7 +889,7 @@ static void cdns_uart_shutdown(struct uart_port *port)
int status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable interrupts */
status = readl(port->membase + CDNS_UART_IMR);
@@ -900,7 +900,7 @@ static void cdns_uart_shutdown(struct uart_port *port)
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
@@ -1050,7 +1050,7 @@ static int cdns_uart_poll_get_char(struct uart_port *port)
int c;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Check if FIFO is empty */
if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
@@ -1058,7 +1058,7 @@ static int cdns_uart_poll_get_char(struct uart_port *port)
else /* Read a character */
c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return c;
}
@@ -1067,7 +1067,7 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Wait until FIFO is empty */
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
@@ -1080,7 +1080,7 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif
@@ -1232,9 +1232,9 @@ static void cdns_uart_console_write(struct console *co, const char *s,
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* save and disable interrupt */
imr = readl(port->membase + CDNS_UART_IMR);
@@ -1257,7 +1257,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
writel(imr, port->membase + CDNS_UART_IER);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
@@ -1325,7 +1325,7 @@ static int cdns_uart_suspend(struct device *device)
if (console_suspend_enabled && uart_console(port) && may_wake) {
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Empty the receive FIFO 1st before making changes */
while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY))
@@ -1334,7 +1334,7 @@ static int cdns_uart_suspend(struct device *device)
writel(1, port->membase + CDNS_UART_RXWM);
/* disable RX timeout interrups */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
@@ -1372,7 +1372,7 @@ static int cdns_uart_resume(struct device *device)
return ret;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
@@ -1392,14 +1392,14 @@ static int cdns_uart_resume(struct device *device)
clk_disable(cdns_uart->uartclk);
clk_disable(cdns_uart->pclk);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
} else {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* restore original rx trigger level */
writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/* enable RX timeout interrupt */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
return uart_resume_port(cdns_uart->cdns_uart_driver, port);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 23198e3f1461..6b4a28bcf2f5 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -262,13 +262,14 @@ static void sysrq_handle_showallcpus(u8 key)
if (in_hardirq())
regs = get_irq_regs();
- pr_info("CPU%d:\n", smp_processor_id());
+ pr_info("CPU%d:\n", get_cpu());
if (regs)
show_regs(regs);
else
show_stack(NULL, NULL, KERN_INFO);
schedule_work(&sysrq_showallcpus);
+ put_cpu();
}
}
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index 50862f98273e..93cf5ef1e857 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -41,15 +41,20 @@ enum {
};
/* Values for tty->flow_change */
-#define TTY_THROTTLE_SAFE 1
-#define TTY_UNTHROTTLE_SAFE 2
+enum tty_flow_change {
+ TTY_FLOW_NO_CHANGE,
+ TTY_THROTTLE_SAFE,
+ TTY_UNTHROTTLE_SAFE,
+};
-static inline void __tty_set_flow_change(struct tty_struct *tty, int val)
+static inline void __tty_set_flow_change(struct tty_struct *tty,
+ enum tty_flow_change val)
{
tty->flow_change = val;
}
-static inline void tty_set_flow_change(struct tty_struct *tty, int val)
+static inline void tty_set_flow_change(struct tty_struct *tty,
+ enum tty_flow_change val)
{
tty->flow_change = val;
smp_mb();
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 5f6d0cf67571..f8883afbeeba 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -69,12 +69,11 @@ EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
void tty_buffer_unlock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- int restart;
-
- restart = buf->head->commit != buf->head->read;
+ bool restart = buf->head->commit != buf->head->read;
atomic_dec(&buf->priority);
mutex_unlock(&buf->lock);
+
if (restart)
queue_work(system_unbound_wq, &buf->work);
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0216a468b438..06414e43e0b5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -159,7 +159,7 @@ static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
/**
- * free_tty_struct - free a disused tty
+ * free_tty_struct - free a disused tty
* @tty: tty struct to free
*
* Free the write buffers, tty queue and tty memory itself.
@@ -233,7 +233,7 @@ static void tty_del_file(struct file *file)
}
/**
- * tty_name - return tty naming
+ * tty_name - return tty naming
* @tty: tty structure
*
* Convert a tty structure into a name. The name reflects the kernel naming
@@ -295,7 +295,7 @@ static void check_tty_count(struct tty_struct *tty, const char *routine)
}
/**
- * get_tty_driver - find device of a tty
+ * get_tty_driver - find device of a tty
* @device: device identifier
* @index: returns the index of the tty
*
@@ -320,7 +320,7 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
}
/**
- * tty_dev_name_to_number - return dev_t for device name
+ * tty_dev_name_to_number - return dev_t for device name
* @name: user space name of device under /dev
* @number: pointer to dev_t that this function will populate
*
@@ -372,7 +372,7 @@ EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
#ifdef CONFIG_CONSOLE_POLL
/**
- * tty_find_polling_driver - find device of a polled tty
+ * tty_find_polling_driver - find device of a polled tty
* @name: name string to match
* @line: pointer to resulting tty line nr
*
@@ -505,7 +505,7 @@ static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
/**
- * tty_wakeup - request more data
+ * tty_wakeup - request more data
* @tty: terminal
*
* Internal and external helper for wakeups of tty. This function informs the
@@ -529,7 +529,7 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_release_redirect - Release a redirect on a pty if present
+ * tty_release_redirect - Release a redirect on a pty if present
* @tty: tty device
*
* This is available to the pty code so if the master closes, if the slave is a
@@ -550,7 +550,7 @@ static struct file *tty_release_redirect(struct tty_struct *tty)
}
/**
- * __tty_hangup - actual handler for hangup events
+ * __tty_hangup - actual handler for hangup events
* @tty: tty device
* @exit_session: if non-zero, signal all foreground group processes
*
@@ -673,7 +673,7 @@ static void do_tty_hangup(struct work_struct *work)
}
/**
- * tty_hangup - trigger a hangup event
+ * tty_hangup - trigger a hangup event
* @tty: tty to hangup
*
* A carrier loss (virtual or otherwise) has occurred on @tty. Schedule a
@@ -687,7 +687,7 @@ void tty_hangup(struct tty_struct *tty)
EXPORT_SYMBOL(tty_hangup);
/**
- * tty_vhangup - process vhangup
+ * tty_vhangup - process vhangup
* @tty: tty to hangup
*
* The user has asked via system call for the terminal to be hung up. We do
@@ -703,7 +703,7 @@ EXPORT_SYMBOL(tty_vhangup);
/**
- * tty_vhangup_self - process vhangup for own ctty
+ * tty_vhangup_self - process vhangup for own ctty
*
* Perform a vhangup on the current controlling tty
*/
@@ -719,7 +719,7 @@ void tty_vhangup_self(void)
}
/**
- * tty_vhangup_session - hangup session leader exit
+ * tty_vhangup_session - hangup session leader exit
* @tty: tty to hangup
*
* The session leader is exiting and hanging up its controlling terminal.
@@ -735,7 +735,7 @@ void tty_vhangup_session(struct tty_struct *tty)
}
/**
- * tty_hung_up_p - was tty hung up
+ * tty_hung_up_p - was tty hung up
* @filp: file pointer of tty
*
* Return: true if the tty has been subject to a vhangup or a carrier loss
@@ -756,7 +756,7 @@ void __stop_tty(struct tty_struct *tty)
}
/**
- * stop_tty - propagate flow control
+ * stop_tty - propagate flow control
* @tty: tty to stop
*
* Perform flow control to the driver. May be called on an already stopped
@@ -790,7 +790,7 @@ void __start_tty(struct tty_struct *tty)
}
/**
- * start_tty - propagate flow control
+ * start_tty - propagate flow control
* @tty: tty to start
*
* Start a tty that has been stopped if at all possible. If @tty was previously
@@ -902,7 +902,7 @@ static ssize_t iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
/**
- * tty_read - read method for tty device files
+ * tty_read - read method for tty device files
* @iocb: kernel I/O control block
* @to: destination for the data read
*
@@ -1095,7 +1095,7 @@ static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_
}
/**
- * tty_write - write method for tty device file
+ * tty_write - write method for tty device file
* @iocb: kernel I/O control block
* @from: iov_iter with data to write
*
@@ -1137,7 +1137,7 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
}
/**
- * tty_send_xchar - send priority character
+ * tty_send_xchar - send priority character
* @tty: the tty to send to
* @ch: xchar to send
*
@@ -1171,7 +1171,7 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
}
/**
- * pty_line_name - generate name for a pty
+ * pty_line_name - generate name for a pty
* @driver: the tty driver in use
* @index: the minor number
* @p: output buffer of at least 6 bytes
@@ -1192,7 +1192,7 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
}
/**
- * tty_line_name - generate name for a tty
+ * tty_line_name - generate name for a tty
* @driver: the tty driver in use
* @index: the minor number
* @p: output buffer of at least 7 bytes
@@ -1243,7 +1243,7 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
}
/**
- * tty_init_termios - helper for termios setup
+ * tty_init_termios - helper for termios setup
* @tty: the tty to set up
*
* Initialise the termios structure for this tty. This runs under the
@@ -1326,7 +1326,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
}
/**
- * tty_reopen() - fast re-open of an open tty
+ * tty_reopen() - fast re-open of an open tty
* @tty: the tty to open
*
* Re-opens on master ptys are not allowed and return -%EIO.
@@ -1370,7 +1370,7 @@ static int tty_reopen(struct tty_struct *tty)
}
/**
- * tty_init_dev - initialise a tty device
+ * tty_init_dev - initialise a tty device
* @driver: tty driver we are opening a device on
* @idx: device index
*
@@ -1492,7 +1492,7 @@ void tty_save_termios(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_save_termios);
/**
- * tty_flush_works - flush all works of a tty/pty pair
+ * tty_flush_works - flush all works of a tty/pty pair
* @tty: tty device to flush works for (or either end of a pty pair)
*
* Sync flush all works belonging to @tty (and the 'other' tty).
@@ -1508,7 +1508,7 @@ static void tty_flush_works(struct tty_struct *tty)
}
/**
- * release_one_tty - release tty structure memory
+ * release_one_tty - release tty structure memory
* @work: work of tty we are obliterating
*
* Releases memory associated with a tty structure, and clears out the
@@ -1556,7 +1556,7 @@ static void queue_release_one_tty(struct kref *kref)
}
/**
- * tty_kref_put - release a tty kref
+ * tty_kref_put - release a tty kref
* @tty: tty device
*
* Release a reference to the @tty device and if need be let the kref layer
@@ -1570,7 +1570,7 @@ void tty_kref_put(struct tty_struct *tty)
EXPORT_SYMBOL(tty_kref_put);
/**
- * release_tty - release tty structure memory
+ * release_tty - release tty structure memory
* @tty: tty device release
* @idx: index of the tty device release
*
@@ -1647,7 +1647,7 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
}
/**
- * tty_kclose - closes tty opened by tty_kopen
+ * tty_kclose - closes tty opened by tty_kopen
* @tty: tty device
*
* Performs the final steps to release and free a tty device. It is the same as
@@ -1677,7 +1677,7 @@ void tty_kclose(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_kclose);
/**
- * tty_release_struct - release a tty struct
+ * tty_release_struct - release a tty struct
* @tty: tty device
* @idx: index of the tty
*
@@ -1706,7 +1706,7 @@ void tty_release_struct(struct tty_struct *tty, int idx)
EXPORT_SYMBOL_GPL(tty_release_struct);
/**
- * tty_release - vfs callback for close
+ * tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
*
@@ -1987,7 +1987,7 @@ out:
}
/**
- * tty_kopen_exclusive - open a tty device for kernel
+ * tty_kopen_exclusive - open a tty device for kernel
* @device: dev_t of device to open
*
* Opens tty exclusively for kernel. Performs the driver lookup, makes sure
@@ -2007,7 +2007,7 @@ struct tty_struct *tty_kopen_exclusive(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_exclusive);
/**
- * tty_kopen_shared - open a tty device for shared in-kernel use
+ * tty_kopen_shared - open a tty device for shared in-kernel use
* @device: dev_t of device to open
*
* Opens an already existing tty for in-kernel use. Compared to
@@ -2022,7 +2022,7 @@ struct tty_struct *tty_kopen_shared(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_shared);
/**
- * tty_open_by_driver - open a tty device
+ * tty_open_by_driver - open a tty device
* @device: dev_t of device to open
* @filp: file pointer to tty
*
@@ -2090,7 +2090,7 @@ out:
}
/**
- * tty_open - open a tty device
+ * tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
*
@@ -2184,7 +2184,7 @@ retry_open:
/**
- * tty_poll - check tty status
+ * tty_poll - check tty status
* @filp: file being polled
* @wait: poll wait structures to update
*
@@ -2262,7 +2262,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
static bool tty_legacy_tiocsti __read_mostly = IS_ENABLED(CONFIG_LEGACY_TIOCSTI);
/**
- * tiocsti - fake input character
+ * tiocsti - fake input character
* @tty: tty to fake input into
* @p: pointer to character
*
@@ -2299,7 +2299,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
}
/**
- * tiocgwinsz - implement window query ioctl
+ * tiocgwinsz - implement window query ioctl
* @tty: tty
* @arg: user buffer for result
*
@@ -2320,7 +2320,7 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tty_do_resize - resize event
+ * tty_do_resize - resize event
* @tty: tty being resized
* @ws: new dimensions
*
@@ -2350,7 +2350,7 @@ done:
EXPORT_SYMBOL(tty_do_resize);
/**
- * tiocswinsz - implement window size set ioctl
+ * tiocswinsz - implement window size set ioctl
* @tty: tty side of tty
* @arg: user buffer for result
*
@@ -2377,7 +2377,7 @@ static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tioccons - allow admin to move logical console
+ * tioccons - allow admin to move logical console
* @file: the file to become console
*
* Allow the administrator to move the redirected console device.
@@ -2416,7 +2416,7 @@ static int tioccons(struct file *file)
}
/**
- * tiocsetd - set line discipline
+ * tiocsetd - set line discipline
* @tty: tty device
* @p: pointer to user data
*
@@ -2438,7 +2438,7 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
}
/**
- * tiocgetd - get line discipline
+ * tiocgetd - get line discipline
* @tty: tty device
* @p: pointer to user data
*
@@ -2461,7 +2461,7 @@ static int tiocgetd(struct tty_struct *tty, int __user *p)
}
/**
- * send_break - performed time break
+ * send_break - performed time break
* @tty: device to break on
* @duration: timeout in mS
*
@@ -2479,27 +2479,27 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
return 0;
if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
- retval = tty->ops->break_ctl(tty, duration);
- else {
- /* Do the work ourselves */
- if (tty_write_lock(tty, false) < 0)
- return -EINTR;
- retval = tty->ops->break_ctl(tty, -1);
- if (retval)
- goto out;
- if (!signal_pending(current))
- msleep_interruptible(duration);
+ return tty->ops->break_ctl(tty, duration);
+
+ /* Do the work ourselves */
+ if (tty_write_lock(tty, false) < 0)
+ return -EINTR;
+
+ retval = tty->ops->break_ctl(tty, -1);
+ if (!retval) {
+ msleep_interruptible(duration);
retval = tty->ops->break_ctl(tty, 0);
-out:
- tty_write_unlock(tty);
- if (signal_pending(current))
- retval = -EINTR;
}
+ tty_write_unlock(tty);
+
+ if (signal_pending(current))
+ retval = -EINTR;
+
return retval;
}
/**
- * tty_tiocmget - get modem status
+ * tty_tiocmget - get modem status
* @tty: tty device
* @p: pointer to result
*
@@ -2522,7 +2522,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
}
/**
- * tty_tiocmset - set modem status
+ * tty_tiocmset - set modem status
* @tty: tty device
* @cmd: command - clear bits, set bits or set all
* @p: pointer to desired bits
@@ -2563,7 +2563,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
}
/**
- * tty_get_icount - get tty statistics
+ * tty_get_icount - get tty statistics
* @tty: tty device
* @icount: output parameter
*
@@ -3126,7 +3126,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
}
/**
- * tty_put_char - write one character to a tty
+ * tty_put_char - write one character to a tty
* @tty: tty
* @ch: character to write
*
@@ -3304,7 +3304,7 @@ void tty_unregister_device(struct tty_driver *driver, unsigned index)
EXPORT_SYMBOL(tty_unregister_device);
/**
- * __tty_alloc_driver -- allocate tty driver
+ * __tty_alloc_driver - allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
* @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
@@ -3397,7 +3397,7 @@ static void destruct_tty_driver(struct kref *kref)
}
/**
- * tty_driver_kref_put -- drop a reference to a tty driver
+ * tty_driver_kref_put - drop a reference to a tty driver
* @driver: driver of which to drop the reference
*
* The final put will destroy and free up the driver.
@@ -3409,7 +3409,7 @@ void tty_driver_kref_put(struct tty_driver *driver)
EXPORT_SYMBOL(tty_driver_kref_put);
/**
- * tty_register_driver -- register a tty driver
+ * tty_register_driver - register a tty driver
* @driver: driver to register
*
* Called by a tty driver to register itself.
@@ -3474,7 +3474,7 @@ err:
EXPORT_SYMBOL(tty_register_driver);
/**
- * tty_unregister_driver -- unregister a tty driver
+ * tty_unregister_driver - unregister a tty driver
* @driver: driver to unregister
*
* Called by a tty driver to unregister itself.
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 7958bf6d27c4..4b499301a3db 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -38,16 +38,13 @@
#define TERMIOS_TERMIO BIT(2)
#define TERMIOS_OLD BIT(3)
-
/**
- * tty_chars_in_buffer - characters pending
- * @tty: terminal
+ * tty_chars_in_buffer - characters pending
+ * @tty: terminal
*
- * Return the number of bytes of data in the device private
- * output queue. If no private method is supplied there is assumed
- * to be no queue on the device.
+ * Returns: the number of bytes of data in the device private output queue. If
+ * no private method is supplied there is assumed to be no queue on the device.
*/
-
unsigned int tty_chars_in_buffer(struct tty_struct *tty)
{
if (tty->ops->chars_in_buffer)
@@ -57,16 +54,15 @@ unsigned int tty_chars_in_buffer(struct tty_struct *tty)
EXPORT_SYMBOL(tty_chars_in_buffer);
/**
- * tty_write_room - write queue space
- * @tty: terminal
+ * tty_write_room - write queue space
+ * @tty: terminal
*
- * Return the number of bytes that can be queued to this device
- * at the present time. The result should be treated as a guarantee
- * and the driver cannot offer a value it later shrinks by more than
- * the number of bytes written. If no method is provided 2K is always
- * returned and data may be lost as there will be no flow control.
+ * Returns: the number of bytes that can be queued to this device at the present
+ * time. The result should be treated as a guarantee and the driver cannot
+ * offer a value it later shrinks by more than the number of bytes written. If
+ * no method is provided, 2K is always returned and data may be lost as there
+ * will be no flow control.
*/
-
unsigned int tty_write_room(struct tty_struct *tty)
{
if (tty->ops->write_room)
@@ -76,12 +72,12 @@ unsigned int tty_write_room(struct tty_struct *tty)
EXPORT_SYMBOL(tty_write_room);
/**
- * tty_driver_flush_buffer - discard internal buffer
- * @tty: terminal
+ * tty_driver_flush_buffer - discard internal buffer
+ * @tty: terminal
*
- * Discard the internal output buffer for this device. If no method
- * is provided then either the buffer cannot be hardware flushed or
- * there is no buffer driver side.
+ * Discard the internal output buffer for this device. If no method is provided,
+ * then either the buffer cannot be hardware flushed or there is no buffer
+ * driver side.
*/
void tty_driver_flush_buffer(struct tty_struct *tty)
{
@@ -91,50 +87,47 @@ void tty_driver_flush_buffer(struct tty_struct *tty)
EXPORT_SYMBOL(tty_driver_flush_buffer);
/**
- * tty_unthrottle - flow control
- * @tty: terminal
+ * tty_unthrottle - flow control
+ * @tty: terminal
*
- * Indicate that a tty may continue transmitting data down the stack.
- * Takes the termios rwsem to protect against parallel throttle/unthrottle
- * and also to ensure the driver can consistently reference its own
- * termios data at this point when implementing software flow control.
+ * Indicate that a @tty may continue transmitting data down the stack. Takes
+ * the &tty_struct->termios_rwsem to protect against parallel
+ * throttle/unthrottle and also to ensure the driver can consistently reference
+ * its own termios data at this point when implementing software flow control.
*
- * Drivers should however remember that the stack can issue a throttle,
- * then change flow control method, then unthrottle.
+ * Drivers should however remember that the stack can issue a throttle, then
+ * change flow control method, then unthrottle.
*/
-
void tty_unthrottle(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->unthrottle)
tty->ops->unthrottle(tty);
- tty->flow_change = 0;
+ tty->flow_change = TTY_FLOW_NO_CHANGE;
up_write(&tty->termios_rwsem);
}
EXPORT_SYMBOL(tty_unthrottle);
/**
- * tty_throttle_safe - flow control
- * @tty: terminal
+ * tty_throttle_safe - flow control
+ * @tty: terminal
*
- * Indicate that a tty should stop transmitting data down the stack.
- * tty_throttle_safe will only attempt throttle if tty->flow_change is
- * TTY_THROTTLE_SAFE. Prevents an accidental throttle due to race
- * conditions when throttling is conditional on factors evaluated prior to
- * throttling.
+ * Indicate that a @tty should stop transmitting data down the stack.
+ * tty_throttle_safe() will only attempt throttle if @tty->flow_change is
+ * %TTY_THROTTLE_SAFE. Prevents an accidental throttle due to race conditions
+ * when throttling is conditional on factors evaluated prior to throttling.
*
- * Returns 0 if tty is throttled (or was already throttled)
+ * Returns: %true if @tty is throttled (or was already throttled)
*/
-
-int tty_throttle_safe(struct tty_struct *tty)
+bool tty_throttle_safe(struct tty_struct *tty)
{
- int ret = 0;
+ bool ret = true;
mutex_lock(&tty->throttle_mutex);
if (!tty_throttled(tty)) {
if (tty->flow_change != TTY_THROTTLE_SAFE)
- ret = 1;
+ ret = false;
else {
set_bit(TTY_THROTTLED, &tty->flags);
if (tty->ops->throttle)
@@ -147,25 +140,24 @@ int tty_throttle_safe(struct tty_struct *tty)
}
/**
- * tty_unthrottle_safe - flow control
- * @tty: terminal
+ * tty_unthrottle_safe - flow control
+ * @tty: terminal
*
- * Similar to tty_unthrottle() but will only attempt unthrottle
- * if tty->flow_change is TTY_UNTHROTTLE_SAFE. Prevents an accidental
- * unthrottle due to race conditions when unthrottling is conditional
- * on factors evaluated prior to unthrottling.
+ * Similar to tty_unthrottle() but will only attempt unthrottle if
+ * @tty->flow_change is %TTY_UNTHROTTLE_SAFE. Prevents an accidental unthrottle
+ * due to race conditions when unthrottling is conditional on factors evaluated
+ * prior to unthrottling.
*
- * Returns 0 if tty is unthrottled (or was already unthrottled)
+ * Returns: %true if @tty is unthrottled (or was already unthrottled)
*/
-
-int tty_unthrottle_safe(struct tty_struct *tty)
+bool tty_unthrottle_safe(struct tty_struct *tty)
{
- int ret = 0;
+ bool ret = true;
mutex_lock(&tty->throttle_mutex);
if (tty_throttled(tty)) {
if (tty->flow_change != TTY_UNTHROTTLE_SAFE)
- ret = 1;
+ ret = false;
else {
clear_bit(TTY_THROTTLED, &tty->flags);
if (tty->ops->unthrottle)
@@ -178,14 +170,14 @@ int tty_unthrottle_safe(struct tty_struct *tty)
}
/**
- * tty_wait_until_sent - wait for I/O to finish
- * @tty: tty we are waiting for
- * @timeout: how long we will wait
+ * tty_wait_until_sent - wait for I/O to finish
+ * @tty: tty we are waiting for
+ * @timeout: how long we will wait
*
- * Wait for characters pending in a tty driver to hit the wire, or
- * for a timeout to occur (eg due to flow control)
+ * Wait for characters pending in a tty driver to hit the wire, or for a
+ * timeout to occur (eg due to flow control).
*
- * Locking: none
+ * Locking: none
*/
void tty_wait_until_sent(struct tty_struct *tty, long timeout)
@@ -231,16 +223,15 @@ static void unset_locked_termios(struct tty_struct *tty, const struct ktermios *
}
/**
- * tty_termios_copy_hw - copy hardware settings
- * @new: New termios
- * @old: Old termios
+ * tty_termios_copy_hw - copy hardware settings
+ * @new: new termios
+ * @old: old termios
*
- * Propagate the hardware specific terminal setting bits from
- * the old termios structure to the new one. This is used in cases
- * where the hardware does not support reconfiguration or as a helper
- * in some cases where only minimal reconfiguration is supported
+ * Propagate the hardware specific terminal setting bits from the @old termios
+ * structure to the @new one. This is used in cases where the hardware does not
+ * support reconfiguration or as a helper in some cases where only minimal
+ * reconfiguration is supported.
*/
-
void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old)
{
/* The bits a dumb device handles in software. Smart devices need
@@ -253,14 +244,15 @@ void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old)
EXPORT_SYMBOL(tty_termios_copy_hw);
/**
- * tty_termios_hw_change - check for setting change
- * @a: termios
- * @b: termios to compare
+ * tty_termios_hw_change - check for setting change
+ * @a: termios
+ * @b: termios to compare
*
- * Check if any of the bits that affect a dumb device have changed
- * between the two termios structures, or a speed change is needed.
+ * Check if any of the bits that affect a dumb device have changed between the
+ * two termios structures, or a speed change is needed.
+ *
+ * Returns: %true if change is needed
*/
-
bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b)
{
if (a->c_ispeed != b->c_ispeed || a->c_ospeed != b->c_ospeed)
@@ -272,11 +264,10 @@ bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b)
EXPORT_SYMBOL(tty_termios_hw_change);
/**
- * tty_get_char_size - get size of a character
- * @cflag: termios cflag value
+ * tty_get_char_size - get size of a character
+ * @cflag: termios cflag value
*
- * Get the size (in bits) of a character depending on @cflag's %CSIZE
- * setting.
+ * Returns: size (in bits) of a character depending on @cflag's %CSIZE setting
*/
unsigned char tty_get_char_size(unsigned int cflag)
{
@@ -295,13 +286,14 @@ unsigned char tty_get_char_size(unsigned int cflag)
EXPORT_SYMBOL_GPL(tty_get_char_size);
/**
- * tty_get_frame_size - get size of a frame
- * @cflag: termios cflag value
+ * tty_get_frame_size - get size of a frame
+ * @cflag: termios cflag value
*
- * Get the size (in bits) of a frame depending on @cflag's %CSIZE, %CSTOPB,
- * and %PARENB setting. The result is a sum of character size, start and
- * stop bits -- one bit each -- second stop bit (if set), and parity bit
- * (if set).
+ * Get the size (in bits) of a frame depending on @cflag's %CSIZE, %CSTOPB, and
+ * %PARENB setting. The result is a sum of character size, start and stop bits
+ * -- one bit each -- second stop bit (if set), and parity bit (if set).
+ *
+ * Returns: size (in bits) of a frame depending on @cflag's setting.
*/
unsigned char tty_get_frame_size(unsigned int cflag)
{
@@ -319,16 +311,15 @@ unsigned char tty_get_frame_size(unsigned int cflag)
EXPORT_SYMBOL_GPL(tty_get_frame_size);
/**
- * tty_set_termios - update termios values
- * @tty: tty to update
- * @new_termios: desired new value
+ * tty_set_termios - update termios values
+ * @tty: tty to update
+ * @new_termios: desired new value
*
- * Perform updates to the termios values set on this terminal.
- * A master pty's termios should never be set.
+ * Perform updates to the termios values set on this @tty. A master pty's
+ * termios should never be set.
*
- * Locking: termios_rwsem
+ * Locking: &tty_struct->termios_rwsem
*/
-
int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
struct ktermios old_termios;
@@ -441,18 +432,19 @@ __weak int kernel_termios_to_user_termios(struct termios __user *u,
#endif /* TCGETS2 */
/**
- * set_termios - set termios values for a tty
- * @tty: terminal device
- * @arg: user data
- * @opt: option information
+ * set_termios - set termios values for a tty
+ * @tty: terminal device
+ * @arg: user data
+ * @opt: option information
+ *
+ * Helper function to prepare termios data and run necessary other functions
+ * before using tty_set_termios() to do the actual changes.
*
- * Helper function to prepare termios data and run necessary other
- * functions before using tty_set_termios to do the actual changes.
+ * Locking: called functions take &tty_struct->ldisc_sem and
+ * &tty_struct->termios_rwsem locks
*
- * Locking:
- * Called functions take ldisc and termios_rwsem locks
+ * Returns: 0 on success, an error otherwise
*/
-
static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
{
struct ktermios tmp_termios;
@@ -624,16 +616,16 @@ static void set_sgflags(struct ktermios *termios, int flags)
}
/**
- * set_sgttyb - set legacy terminal values
- * @tty: tty structure
- * @sgttyb: pointer to old style terminal structure
+ * set_sgttyb - set legacy terminal values
+ * @tty: tty structure
+ * @sgttyb: pointer to old style terminal structure
+ *
+ * Updates a terminal from the legacy BSD style terminal information structure.
*
- * Updates a terminal from the legacy BSD style terminal information
- * structure.
+ * Locking: &tty_struct->termios_rwsem
*
- * Locking: termios_rwsem
+ * Returns: 0 on success, an error otherwise
*/
-
static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
{
int retval;
@@ -735,14 +727,17 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
#endif
/**
- * tty_change_softcar - carrier change ioctl helper
- * @tty: tty to update
- * @enable: enable/disable CLOCAL
+ * tty_change_softcar - carrier change ioctl helper
+ * @tty: tty to update
+ * @enable: enable/disable %CLOCAL
+ *
+ * Perform a change to the %CLOCAL state and call into the driver layer to make
+ * it visible.
+ *
+ * Locking: &tty_struct->termios_rwsem.
*
- * Perform a change to the CLOCAL state and call into the driver
- * layer to make it visible. All done with the termios rwsem
+ * Returns: 0 on success, an error otherwise
*/
-
static int tty_change_softcar(struct tty_struct *tty, bool enable)
{
int ret = 0;
@@ -762,16 +757,15 @@ static int tty_change_softcar(struct tty_struct *tty, bool enable)
}
/**
- * tty_mode_ioctl - mode related ioctls
- * @tty: tty for the ioctl
- * @cmd: command
- * @arg: ioctl argument
+ * tty_mode_ioctl - mode related ioctls
+ * @tty: tty for the ioctl
+ * @cmd: command
+ * @arg: ioctl argument
*
- * Perform non line discipline specific mode control ioctls. This
- * is designed to be called by line disciplines to ensure they provide
- * consistent mode setting.
+ * Perform non-line discipline specific mode control ioctls. This is designed
+ * to be called by line disciplines to ensure they provide consistent mode
+ * setting.
*/
-
int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct tty_struct *real_tty;
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 0d04287da098..ef8741c3e662 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
return;
}
- spin_lock_irq(&current->sighand->siglock);
- put_pid(current->signal->tty_old_pgrp);
- current->signal->tty_old_pgrp = NULL;
- tty = tty_kref_get(current->signal->tty);
- spin_unlock_irq(&current->sighand->siglock);
-
+ tty = get_current_tty();
if (tty) {
unsigned long flags;
@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
tty_kref_put(tty);
}
+ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
+ * current->signal->tty_old_pgrp in a race condition, and
+ * cause pid memleak. Release current->signal->tty_old_pgrp
+ * after tty->ctrl.pgrp set to NULL.
+ */
+ spin_lock_irq(&current->sighand->siglock);
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty_old_pgrp = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+
/* Now clear signal->tty under the lock */
read_lock(&tasklist_lock);
session_clear_tty(task_session(current));
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 624d104bd145..63c125250961 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -79,7 +79,7 @@ const struct tty_port_client_operations tty_port_default_client_ops = {
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
/**
- * tty_port_init -- initialize tty_port
+ * tty_port_init - initialize tty_port
* @port: tty_port to initialize
*
* Initializes the state of struct tty_port. When a port was initialized using
@@ -267,7 +267,7 @@ void tty_port_free_xmit_buf(struct tty_port *port)
EXPORT_SYMBOL(tty_port_free_xmit_buf);
/**
- * tty_port_destroy -- destroy inited port
+ * tty_port_destroy - destroy inited port
* @port: tty port to be destroyed
*
* When a port was initialized using tty_port_init(), one has to destroy the
@@ -297,7 +297,7 @@ static void tty_port_destructor(struct kref *kref)
}
/**
- * tty_port_put -- drop a reference to tty_port
+ * tty_port_put - drop a reference to tty_port
* @port: port to drop a reference of (can be NULL)
*
* The final put will destroy and free up the @port using
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index a39ed981bfd3..5b625f20233b 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return -ENOMEM;
name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
+ if (!name) {
+ rv = -ENOMEM;
+ goto free_port;
+ }
rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
ARRAY_SIZE(vcc_versions), NULL, name);
if (rv)
- goto free_port;
+ goto free_name;
port->vio.debug = vcc_dbg_vio;
vcc_ldc_cfg.debug = vcc_dbg_ldc;
rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
if (rv)
- goto free_port;
+ goto free_name;
spin_lock_init(&port->lock);
@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto unreg_tty;
}
port->domain = kstrdup(domain, GFP_KERNEL);
+ if (!port->domain) {
+ rv = -ENOMEM;
+ goto unreg_tty;
+ }
+
mdesc_release(hp);
@@ -653,8 +662,9 @@ free_table:
vcc_table_remove(port->index);
free_ldc:
vio_ldc_free(&port->vio);
-free_port:
+free_name:
kfree(name);
+free_port:
kfree(port);
return rv;
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index f02d21e2a96e..5e39a4f430ee 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -205,7 +205,7 @@ static enum translation_map inv_translate[MAX_NR_CONSOLES];
FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
/**
- * struct uni_pagedict -- unicode directory
+ * struct uni_pagedict - unicode directory
*
* @uni_pgdir: 32*32*64 table with glyphs
* @refcount: reference count of this structure
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 1fe6107b539b..12a192e1196b 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -2079,12 +2079,15 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
return -ENOMEM;
spin_lock_irqsave(&func_buf_lock, flags);
- len = strlcpy(kbs, func_table[kb_func] ? : "", len);
+ len = strscpy(kbs, func_table[kb_func] ? : "", len);
spin_unlock_irqrestore(&func_buf_lock, flags);
+ if (len < 0) {
+ ret = -ENOSPC;
+ break;
+ }
ret = copy_to_user(user_kdgkb->kb_string, kbs, len + 1) ?
-EFAULT : 0;
-
break;
}
case KDSKBSENT:
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 829c4be66f3b..67e2cb7c96ee 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -174,7 +174,7 @@ vcs_poll_data_get(struct file *file)
}
/**
- * vcs_vc -- return VC for @inode
+ * vcs_vc - return VC for @inode
* @inode: inode for which to return a VC
* @viewed: returns whether this console is currently foreground (viewed)
*
@@ -199,7 +199,7 @@ static struct vc_data *vcs_vc(struct inode *inode, bool *viewed)
}
/**
- * vcs_size -- return size for a VC in @vc
+ * vcs_size - return size for a VC in @vc
* @vc: which VC
* @attr: does it use attributes?
* @unicode: is it unicode?
@@ -786,23 +786,22 @@ static const struct file_operations vcs_fops = {
.release = vcs_release,
};
-static struct class *vc_class;
+static const struct class vc_class = {
+ .name = "vc",
+};
void vcs_make_sysfs(int index)
{
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL,
- "vcs%u", index + 1);
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 65), NULL,
- "vcsu%u", index + 1);
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL,
- "vcsa%u", index + 1);
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL, "vcs%u", index + 1);
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 65), NULL, "vcsu%u", index + 1);
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL, "vcsa%u", index + 1);
}
void vcs_remove_sysfs(int index)
{
- device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 1));
- device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 65));
- device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 129));
+ device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 1));
+ device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 65));
+ device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 129));
}
int __init vcs_init(void)
@@ -811,11 +810,12 @@ int __init vcs_init(void)
if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops))
panic("unable to get major %d for vcs device", VCS_MAJOR);
- vc_class = class_create("vc");
+ if (class_register(&vc_class))
+ panic("unable to create vc_class");
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 64), NULL, "vcsu");
- device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 64), NULL, "vcsu");
+ device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
for (i = 0; i < MIN_NR_CONSOLES; i++)
vcs_make_sysfs(i);
return 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5c47f77804f0..156efda7c80d 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2588,7 +2588,7 @@ static inline int vc_translate_ascii(const struct vc_data *vc, int c)
/**
- * vc_sanitize_unicode -- Replace invalid Unicode code points with U+FFFD
+ * vc_sanitize_unicode - Replace invalid Unicode code points with U+FFFD
* @c: the received character, or U+FFFD for invalid sequences.
*/
static inline int vc_sanitize_unicode(const int c)
@@ -2600,7 +2600,7 @@ static inline int vc_sanitize_unicode(const int c)
}
/**
- * vc_translate_unicode -- Combine UTF-8 into Unicode in @vc_utf_char
+ * vc_translate_unicode - Combine UTF-8 into Unicode in @vc_utf_char
* @vc: virtual console
* @c: character to translate
* @rescan: we return true if we need more (continuation) data
@@ -3155,9 +3155,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type) {
case TIOCL_SETSEL:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
return set_selection_user((struct tiocl_selection
__user *)(p+1), tty);
case TIOCL_PASTESEL:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
return paste_selection(tty);
case TIOCL_UNBLANKSCREEN:
console_lock();
@@ -3165,6 +3169,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
console_unlock();
break;
case TIOCL_SELLOADLUT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
console_lock();
ret = sel_loadlut(p);
console_unlock();
@@ -3565,7 +3571,9 @@ int __init vty_init(const struct file_operations *console_fops)
return 0;
}
-static struct class *vtconsole_class;
+static const struct class vtconsole_class = {
+ .name = "vtconsole",
+};
static int do_bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
@@ -4092,7 +4100,7 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
goto err;
con_driver->dev =
- device_create_with_groups(vtconsole_class, NULL,
+ device_create_with_groups(&vtconsole_class, NULL,
MKDEV(0, con_driver->node),
con_driver, con_dev_groups,
"vtcon%i", con_driver->node);
@@ -4173,7 +4181,7 @@ static void con_driver_unregister_callback(struct work_struct *ignored)
console_unlock();
vtconsole_deinit_device(con_driver);
- device_destroy(vtconsole_class, MKDEV(0, con_driver->node));
+ device_destroy(&vtconsole_class, MKDEV(0, con_driver->node));
console_lock();
@@ -4234,12 +4242,9 @@ static int __init vtconsole_class_init(void)
{
int i;
- vtconsole_class = class_create("vtconsole");
- if (IS_ERR(vtconsole_class)) {
- pr_warn("Unable to create vt console class; errno = %ld\n",
- PTR_ERR(vtconsole_class));
- vtconsole_class = NULL;
- }
+ i = class_register(&vtconsole_class);
+ if (i)
+ pr_warn("Unable to create vt console class; errno = %d\n", i);
/* Add system drivers to sysfs */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -4247,7 +4252,7 @@ static int __init vtconsole_class_init(void)
if (con->con && !con->dev) {
con->dev =
- device_create_with_groups(vtconsole_class, NULL,
+ device_create_with_groups(&vtconsole_class, NULL,
MKDEV(0, con->node),
con, con_dev_groups,
"vtcon%i", con->node);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 7f33bcc315f2..abf8c6cdea9e 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -91,6 +91,16 @@ config USB_PCI
If you have such a device you may say N here and PCI related code
will not be built in the USB driver.
+config USB_PCI_AMD
+ bool "AMD PCI USB host support"
+ depends on USB_PCI && HAS_IOPORT
+ default X86 || MACH_LOONGSON64 || PPC_PASEMI
+ help
+ Enable workarounds for USB implementation quirks in SB600/SB700/SB800
+ and later south bridge implementations. These are common on x86 PCs
+ with AMD CPUs but rarely used elsewhere, with the exception of a few
+ powerpc and mips desktop machines.
+
if USB
source "drivers/usb/core/Kconfig"
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index 6332a6b5dce6..6ba5adced61c 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -109,7 +109,6 @@ int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
void c67x00_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
-void c67x00_hcd_msg_received(struct c67x00_sie *sie, u16 msg);
void c67x00_sched_kick(struct c67x00_hcd *c67x00);
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00);
void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00);
diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
index f0ca865cce2a..ad617b7455b9 100644
--- a/drivers/usb/cdns3/cdnsp-debug.h
+++ b/drivers/usb/cdns3/cdnsp-debug.h
@@ -131,8 +131,6 @@ static inline const char *cdnsp_trb_type_string(u8 type)
return "Endpoint Not ready";
case TRB_HALT_ENDPOINT:
return "Halt Endpoint";
- case TRB_FLUSH_ENDPOINT:
- return "FLush Endpoint";
default:
return "UNKNOWN";
}
@@ -328,7 +326,6 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
break;
case TRB_RESET_EP:
case TRB_HALT_ENDPOINT:
- case TRB_FLUSH_ENDPOINT:
ret = snprintf(str, size,
"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
cdnsp_trb_type_string(type),
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 4b67749edb99..4a3f0f958256 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1024,10 +1024,8 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
pep->ep_state |= EP_DIS_IN_RROGRESS;
/* Endpoint was unconfigured by Reset Device command. */
- if (!(pep->ep_state & EP_UNCONFIGURED)) {
+ if (!(pep->ep_state & EP_UNCONFIGURED))
cdnsp_cmd_stop_ep(pdev, pep);
- cdnsp_cmd_flush_ep(pdev, pep);
- }
/* Remove all queued USB requests. */
while (!list_empty(&pep->pending_list)) {
@@ -1424,8 +1422,6 @@ static void cdnsp_stop(struct cdnsp_device *pdev)
{
u32 temp;
- cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
-
/* Remove internally queued request for ep0. */
if (!list_empty(&pdev->eps[0].pending_list)) {
struct cdnsp_request *req;
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index e1b5801fdddf..dbee6f085277 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -1128,8 +1128,6 @@ union cdnsp_trb {
#define TRB_HALT_ENDPOINT 54
/* Doorbell Overflow Event. */
#define TRB_DRB_OVERFLOW 57
-/* Flush Endpoint Command. */
-#define TRB_FLUSH_ENDPOINT 58
#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
@@ -1539,8 +1537,6 @@ void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index);
-void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
- unsigned int ep_index);
void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num);
void cdnsp_queue_reset_device(struct cdnsp_device *pdev);
void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
@@ -1574,7 +1570,6 @@ void cdnsp_irq_reset(struct cdnsp_device *pdev);
int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
struct cdnsp_ep *pep, int value);
int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
-int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
void cdnsp_setup_analyze(struct cdnsp_device *pdev);
int cdnsp_status_stage(struct cdnsp_device *pdev);
int cdnsp_reset_device(struct cdnsp_device *pdev);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 07f6068342d4..af981778382d 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -2123,19 +2123,6 @@ ep_stopped:
return ret;
}
-int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
-{
- int ret;
-
- cdnsp_queue_flush_endpoint(pdev, pep->idx);
- cdnsp_ring_cmd_db(pdev);
- ret = cdnsp_wait_for_cmd_compl(pdev);
-
- trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
-
- return ret;
-}
-
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
@@ -2465,17 +2452,6 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
EP_ID_FOR_TRB(ep_index));
}
-/*
- * Queue a flush endpoint request on the command ring.
- */
-void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
- unsigned int ep_index)
-{
- cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
- SLOT_ID_FOR_TRB(pdev->slot_id) |
- EP_ID_FOR_TRB(ep_index));
-}
-
void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
{
u32 lo, mid;
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index c815824a0b2d..bab45bc62361 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -43,6 +43,10 @@ config USB_CHIPIDEA_MSM
tristate "Enable MSM hsusb glue driver" if EXPERT
default USB_CHIPIDEA
+config USB_CHIPIDEA_NPCM
+ tristate "Enable NPCM hsusb glue driver" if EXPERT
+ default USB_CHIPIDEA
+
config USB_CHIPIDEA_IMX
tristate "Enable i.MX USB glue driver" if EXPERT
depends on OF
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 71afeab97e83..718cb24603dd 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -13,6 +13,7 @@ ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
obj-$(CONFIG_USB_CHIPIDEA_GENERIC) += ci_hdrc_usb2.o
obj-$(CONFIG_USB_CHIPIDEA_MSM) += ci_hdrc_msm.o
+obj-$(CONFIG_USB_CHIPIDEA_NPCM) += ci_hdrc_npcm.o
obj-$(CONFIG_USB_CHIPIDEA_PCI) += ci_hdrc_pci.o
obj-$(CONFIG_USB_CHIPIDEA_IMX) += usbmisc_imx.o ci_hdrc_imx.o
obj-$(CONFIG_USB_CHIPIDEA_TEGRA) += ci_hdrc_tegra.o
diff --git a/drivers/usb/chipidea/ci_hdrc_npcm.c b/drivers/usb/chipidea/ci_hdrc_npcm.c
new file mode 100644
index 000000000000..e4a191e02ceb
--- /dev/null
+++ b/drivers/usb/chipidea/ci_hdrc_npcm.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Nuvoton Technology corporation.
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/chipidea.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/of.h>
+
+#include "ci.h"
+
+struct npcm_udc_data {
+ struct platform_device *ci;
+ struct clk *core_clk;
+ struct ci_hdrc_platform_data pdata;
+};
+
+static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event)
+{
+ struct device *dev = ci->dev->parent;
+
+ switch (event) {
+ case CI_HDRC_CONTROLLER_RESET_EVENT:
+ /* clear all mode bits */
+ hw_write(ci, OP_USBMODE, 0xffffffff, 0x0);
+ break;
+ default:
+ dev_dbg(dev, "unknown ci_hdrc event (%d)\n",event);
+ break;
+ }
+
+ return 0;
+}
+
+static int npcm_udc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct npcm_udc_data *ci;
+ struct platform_device *plat_ci;
+ struct device *dev = &pdev->dev;
+
+ ci = devm_kzalloc(&pdev->dev, sizeof(*ci), GFP_KERNEL);
+ if (!ci)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ci);
+
+ ci->core_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ci->core_clk))
+ return PTR_ERR(ci->core_clk);
+
+ ret = clk_prepare_enable(ci->core_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable the clock: %d\n", ret);
+
+ ci->pdata.name = dev_name(dev);
+ ci->pdata.capoffset = DEF_CAPOFFSET;
+ ci->pdata.flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS;
+ ci->pdata.phy_mode = USBPHY_INTERFACE_MODE_UTMI;
+ ci->pdata.notify_event = npcm_udc_notify_event;
+
+ plat_ci = ci_hdrc_add_device(dev, pdev->resource, pdev->num_resources,
+ &ci->pdata);
+ if (IS_ERR(plat_ci)) {
+ ret = PTR_ERR(plat_ci);
+ dev_err(dev, "failed to register HDRC NPCM device: %d\n", ret);
+ goto clk_err;
+ }
+
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+clk_err:
+ clk_disable_unprepare(ci->core_clk);
+ return ret;
+}
+
+static int npcm_udc_remove(struct platform_device *pdev)
+{
+ struct npcm_udc_data *ci = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ ci_hdrc_remove_device(ci->ci);
+ clk_disable_unprepare(ci->core_clk);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_udc_dt_match[] = {
+ { .compatible = "nuvoton,npcm750-udc", },
+ { .compatible = "nuvoton,npcm845-udc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_udc_dt_match);
+
+static struct platform_driver npcm_udc_driver = {
+ .probe = npcm_udc_probe,
+ .remove = npcm_udc_remove,
+ .driver = {
+ .name = "npcm_udc",
+ .of_match_table = npcm_udc_dt_match,
+ },
+};
+
+module_platform_driver(npcm_udc_driver);
+
+MODULE_DESCRIPTION("NPCM USB device controller driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 8e78bf643e25..2cc305803217 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -293,14 +293,12 @@ static int tegra_usb_probe(struct platform_device *pdev)
usb->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(usb->phy))
return dev_err_probe(&pdev->dev, PTR_ERR(usb->phy),
- "failed to get PHY\n");
+ "failed to get PHY");
usb->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(usb->clk)) {
- err = PTR_ERR(usb->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", err);
- return err;
- }
+ if (IS_ERR(usb->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(usb->clk),
+ "failed to get clock");
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
@@ -316,7 +314,7 @@ static int tegra_usb_probe(struct platform_device *pdev)
err = tegra_usb_reset_controller(&pdev->dev);
if (err) {
- dev_err(&pdev->dev, "failed to reset controller: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "failed to reset controller");
goto fail_power_off;
}
@@ -347,8 +345,8 @@ static int tegra_usb_probe(struct platform_device *pdev)
usb->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
pdev->num_resources, &usb->data);
if (IS_ERR(usb->dev)) {
- err = PTR_ERR(usb->dev);
- dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(usb->dev),
+ "failed to add HDRC device");
goto phy_shutdown;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 1321ee67f3b8..97379f653b06 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -9,9 +9,9 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ulpi.h>
@@ -51,8 +51,8 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ci_hdrc_usb2_priv *priv;
struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
+ const struct ci_hdrc_platform_data *data;
int ret;
- const struct of_device_id *match;
if (!ci_pdata) {
ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
@@ -61,11 +61,10 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
*ci_pdata = ci_default_pdata; /* struct copy */
}
- match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
- if (match && match->data) {
+ data = device_get_match_data(&pdev->dev);
+ if (data)
/* struct copy */
- *ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
- }
+ *ci_pdata = *data;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -120,7 +119,7 @@ static struct platform_driver ci_hdrc_usb2_driver = {
.remove_new = ci_hdrc_usb2_remove,
.driver = {
.name = "chipidea-usb2",
- .of_match_table = of_match_ptr(ci_hdrc_usb2_of_match),
+ .of_match_table = ci_hdrc_usb2_of_match,
},
};
module_platform_driver(ci_hdrc_usb2_driver);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 08af26b762a2..0cce19208370 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -30,8 +30,7 @@ struct ehci_ci_priv {
};
struct ci_hdrc_dma_aligned_buffer {
- void *kmalloc_ptr;
- void *old_xfer_buffer;
+ void *original_buffer;
u8 data[];
};
@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
+static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
{
struct ci_hdrc_dma_aligned_buffer *temp;
- size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
temp = container_of(urb->transfer_buffer,
struct ci_hdrc_dma_aligned_buffer, data);
+ urb->transfer_buffer = temp->original_buffer;
+
+ if (copy_back && usb_urb_dir_in(urb)) {
+ size_t length;
- if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
- memcpy(temp->old_xfer_buffer, temp->data, length);
+ memcpy(temp->original_buffer, temp->data, length);
}
- urb->transfer_buffer = temp->old_xfer_buffer;
- kfree(temp->kmalloc_ptr);
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ kfree(temp);
}
static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
- struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
- const unsigned int ci_hdrc_usb_dma_align = 32;
- size_t kmalloc_size;
+ struct ci_hdrc_dma_aligned_buffer *temp;
- if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
- !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
+ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
+ return 0;
+ if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
+ && IS_ALIGNED(urb->transfer_buffer_length, 4))
return 0;
- /* Allocate a buffer with enough padding for alignment */
- kmalloc_size = urb->transfer_buffer_length +
- sizeof(struct ci_hdrc_dma_aligned_buffer) +
- ci_hdrc_usb_dma_align - 1;
-
- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
- if (!kmalloc_ptr)
+ temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
+ if (!temp)
return -ENOMEM;
- /* Position our struct dma_aligned_buffer such that data is aligned */
- temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
- temp->kmalloc_ptr = kmalloc_ptr;
- temp->old_xfer_buffer = urb->transfer_buffer;
if (usb_urb_dir_out(urb))
memcpy(temp->data, urb->transfer_buffer,
urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
+ temp->original_buffer = urb->transfer_buffer;
+ urb->transfer_buffer = temp->data;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
- ci_hdrc_free_dma_aligned_buffer(urb);
+ ci_hdrc_free_dma_aligned_buffer(urb, false);
return ret;
}
@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
usb_hcd_unmap_urb_for_dma(hcd, urb);
- ci_hdrc_free_dma_aligned_buffer(urb);
+ ci_hdrc_free_dma_aligned_buffer(urb, true);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index f5490f2a5b6b..647e98f4e351 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -130,8 +130,11 @@ enum ci_role ci_otg_role(struct ci_hdrc *ci)
void ci_handle_vbus_change(struct ci_hdrc *ci)
{
- if (!ci->is_otg)
+ if (!ci->is_otg) {
+ if (ci->platdata->flags & CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS)
+ usb_gadget_vbus_connect(&ci->gadget);
return;
+ }
if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 990280688b25..ee3156f49533 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -206,8 +206,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver)
goto free_irq_vectors;
}
- hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
- driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
+ hcd->amd_resume_bug = usb_hcd_amd_resume_bug(dev, driver);
if (driver->flags & HCD_MEMORY) {
/* EHCI, OHCI */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ff47eeffb49..b4584a0cd484 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2274,6 +2274,8 @@ void usb_disconnect(struct usb_device **pdev)
*/
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
+
+ typec_deattach(port_dev->connector, &udev->dev);
}
usb_remove_ep_devs(&udev->ep0);
@@ -2620,6 +2622,8 @@ int usb_new_device(struct usb_device *udev)
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
+
+ typec_attach(port_dev->connector, &udev->dev);
}
(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index d44dd7f6623e..43ce21c96a51 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -14,6 +14,7 @@
#include <linux/usb.h>
#include <linux/usb/ch11.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/typec.h>
#include "usb.h"
struct usb_hub {
@@ -82,6 +83,7 @@ struct usb_hub {
* @dev: generic device interface
* @port_owner: port's owner
* @peer: related usb2 and usb3 ports (share the same connector)
+ * @connector: USB Type-C connector
* @req: default pm qos request for hubs without port power control
* @connect_type: port's connect type
* @state: device state of the usb device attached to the port
@@ -100,6 +102,7 @@ struct usb_port {
struct device dev;
struct usb_dev_state *port_owner;
struct usb_port *peer;
+ struct typec_connector *connector;
struct dev_pm_qos_request *req;
enum usb_port_connect_type connect_type;
enum usb_device_state state;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 77be0dc28da9..c628c1abc907 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -653,6 +653,7 @@ static void find_and_link_peer(struct usb_hub *hub, int port1)
static int connector_bind(struct device *dev, struct device *connector, void *data)
{
+ struct usb_port *port_dev = to_usb_port(dev);
int ret;
ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
@@ -660,16 +661,30 @@ static int connector_bind(struct device *dev, struct device *connector, void *da
return ret;
ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
- if (ret)
+ if (ret) {
sysfs_remove_link(&dev->kobj, "connector");
+ return ret;
+ }
+
+ port_dev->connector = data;
+
+ /*
+ * If there is already USB device connected to the port, letting the
+ * Type-C connector know about it immediately.
+ */
+ if (port_dev->child)
+ typec_attach(port_dev->connector, &port_dev->child->dev);
- return ret;
+ return 0;
}
static void connector_unbind(struct device *dev, struct device *connector, void *data)
{
+ struct usb_port *port_dev = to_usb_port(dev);
+
sysfs_remove_link(&connector->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "connector");
+ port_dev->connector = NULL;
}
static const struct component_ops connector_ops = {
@@ -698,6 +713,7 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
set_bit(port1, hub->power_bits);
port_dev->dev.parent = hub->intfdev;
if (hub_is_superspeed(hdev)) {
+ port_dev->is_superspeed = 1;
port_dev->usb3_lpm_u1_permit = 1;
port_dev->usb3_lpm_u2_permit = 1;
port_dev->dev.groups = port_dev_usb3_group;
@@ -705,8 +721,6 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
port_dev->dev.groups = port_dev_group;
port_dev->dev.type = &usb_port_device_type;
port_dev->dev.driver = &usb_port_driver;
- if (hub_is_superspeed(hub->hdev))
- port_dev->is_superspeed = 1;
dev_set_name(&port_dev->dev, "%s-port%d", dev_name(&hub->hdev->dev),
port1);
mutex_init(&port_dev->status_lock);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 657f1f659ffa..35c7a4df8e71 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4769,8 +4769,8 @@ fail3:
if (qh_allocated && qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
fail2:
- spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
kfree(qtd);
fail1:
if (qh_allocated) {
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 93f52e371cdd..fb03162ae9b7 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/usb/of.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
@@ -968,26 +968,17 @@ typedef void (*set_params_cb)(struct dwc2_hsotg *data);
int dwc2_init_params(struct dwc2_hsotg *hsotg)
{
- const struct of_device_id *match;
set_params_cb set_params;
dwc2_set_default_params(hsotg);
dwc2_get_device_properties(hsotg);
- match = of_match_device(dwc2_of_match_table, hsotg->dev);
- if (match && match->data) {
- set_params = match->data;
+ set_params = device_get_match_data(hsotg->dev);
+ if (set_params) {
set_params(hsotg);
- } else if (!match) {
- const struct acpi_device_id *amatch;
- const struct pci_device_id *pmatch = NULL;
-
- amatch = acpi_match_device(dwc2_acpi_match, hsotg->dev);
- if (amatch && amatch->driver_data) {
- set_params = (set_params_cb)amatch->driver_data;
- set_params(hsotg);
- } else if (!amatch)
- pmatch = pci_match_id(dwc2_pci_ids, to_pci_dev(hsotg->dev->parent));
+ } else {
+ const struct pci_device_id *pmatch =
+ pci_match_id(dwc2_pci_ids, to_pci_dev(hsotg->dev->parent));
if (pmatch && pmatch->driver_data) {
set_params = (set_params_cb)pmatch->driver_data;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 98efcbb76c88..5fc27b20df63 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -178,4 +178,15 @@ config USB_DWC3_OCTEON
Only the host mode is currently supported.
Say 'Y' or 'M' here if you have one such device.
+config USB_DWC3_RTK
+ tristate "Realtek DWC3 Platform Driver"
+ depends on OF && ARCH_REALTEK
+ default USB_DWC3
+ select USB_ROLE_SWITCH
+ help
+ RTK DHC RTD SoCs with DesignWare Core USB3 IP inside,
+ and IP Core configured for USB 2.0 and USB 3.0 in host
+ or dual-role mode.
+ Say 'Y' or 'M' if you have such device.
+
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index fe1493d4bbe5..124eda2522d9 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o
obj-$(CONFIG_USB_DWC3_OCTEON) += dwc3-octeon.o
+obj-$(CONFIG_USB_DWC3_RTK) += dwc3-rtk.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 343d2570189f..0328c86ef806 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -854,8 +854,20 @@ static int dwc3_clk_enable(struct dwc3 *dwc)
if (ret)
goto disable_ref_clk;
+ ret = clk_prepare_enable(dwc->utmi_clk);
+ if (ret)
+ goto disable_susp_clk;
+
+ ret = clk_prepare_enable(dwc->pipe_clk);
+ if (ret)
+ goto disable_utmi_clk;
+
return 0;
+disable_utmi_clk:
+ clk_disable_unprepare(dwc->utmi_clk);
+disable_susp_clk:
+ clk_disable_unprepare(dwc->susp_clk);
disable_ref_clk:
clk_disable_unprepare(dwc->ref_clk);
disable_bus_clk:
@@ -865,6 +877,8 @@ disable_bus_clk:
static void dwc3_clk_disable(struct dwc3 *dwc)
{
+ clk_disable_unprepare(dwc->pipe_clk);
+ clk_disable_unprepare(dwc->utmi_clk);
clk_disable_unprepare(dwc->susp_clk);
clk_disable_unprepare(dwc->ref_clk);
clk_disable_unprepare(dwc->bus_clk);
@@ -1094,6 +1108,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
}
}
+static void dwc3_config_threshold(struct dwc3 *dwc)
+{
+ u32 reg;
+ u8 rx_thr_num;
+ u8 rx_maxburst;
+ u8 tx_thr_num;
+ u8 tx_maxburst;
+
+ /*
+ * Must config both number of packets and max burst settings to enable
+ * RX and/or TX threshold.
+ */
+ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+ rx_thr_num = dwc->rx_thr_num_pkt_prd;
+ rx_maxburst = dwc->rx_max_burst_prd;
+ tx_thr_num = dwc->tx_thr_num_pkt_prd;
+ tx_maxburst = dwc->tx_max_burst_prd;
+
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+
+ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+
+ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+
+ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+
+ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ }
+
+ rx_thr_num = dwc->rx_thr_num_pkt;
+ rx_maxburst = dwc->rx_max_burst;
+ tx_thr_num = dwc->tx_thr_num_pkt;
+ tx_maxburst = dwc->tx_max_burst;
+
+ if (DWC3_IP_IS(DWC3)) {
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
+ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
+
+ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
+ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
+ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
+
+ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
+ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ } else {
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
+ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
+
+ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
+ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
+ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
+
+ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
+ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ }
+}
+
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -1246,42 +1365,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
- /*
- * Must config both number of packets and max burst settings to enable
- * RX and/or TX threshold.
- */
- if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
- u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
- u8 rx_maxburst = dwc->rx_max_burst_prd;
- u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
- u8 tx_maxburst = dwc->tx_max_burst_prd;
-
- if (rx_thr_num && rx_maxburst) {
- reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
- reg |= DWC31_RXTHRNUMPKTSEL_PRD;
-
- reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
- reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
-
- reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
- reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
-
- dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
- }
-
- if (tx_thr_num && tx_maxburst) {
- reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
- reg |= DWC31_TXTHRNUMPKTSEL_PRD;
-
- reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
- reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
-
- reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
- reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
-
- dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
- }
- }
+ dwc3_config_threshold(dwc);
return 0;
@@ -1417,6 +1501,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
+ u8 rx_thr_num_pkt = 0;
+ u8 rx_max_burst = 0;
+ u8 tx_thr_num_pkt = 0;
+ u8 tx_max_burst = 0;
u8 rx_thr_num_pkt_prd = 0;
u8 rx_max_burst_prd = 0;
u8 tx_thr_num_pkt_prd = 0;
@@ -1479,6 +1567,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
"snps,usb2-lpm-disable");
dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
"snps,usb2-gadget-lpm-disable");
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
+ &rx_thr_num_pkt);
+ device_property_read_u8(dev, "snps,rx-max-burst",
+ &rx_max_burst);
+ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
+ &tx_thr_num_pkt);
+ device_property_read_u8(dev, "snps,tx-max-burst",
+ &tx_max_burst);
device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
&rx_thr_num_pkt_prd);
device_property_read_u8(dev, "snps,rx-max-burst-prd",
@@ -1560,6 +1656,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->hird_threshold = hird_threshold;
+ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
+ dwc->rx_max_burst = rx_max_burst;
+
+ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
+ dwc->tx_max_burst = tx_max_burst;
+
dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
dwc->rx_max_burst_prd = rx_max_burst_prd;
@@ -1785,6 +1887,20 @@ static int dwc3_get_clocks(struct dwc3 *dwc)
}
}
+ /* specific to Rockchip RK3588 */
+ dwc->utmi_clk = devm_clk_get_optional(dev, "utmi");
+ if (IS_ERR(dwc->utmi_clk)) {
+ return dev_err_probe(dev, PTR_ERR(dwc->utmi_clk),
+ "could not get utmi clock\n");
+ }
+
+ /* specific to Rockchip RK3588 */
+ dwc->pipe_clk = devm_clk_get_optional(dev, "pipe");
+ if (IS_ERR(dwc->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(dwc->pipe_clk),
+ "could not get pipe clock\n");
+ }
+
return 0;
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index a69ac67d89fe..efe6caf4d0e8 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -211,6 +211,11 @@
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+/* Global TX Threshold Configuration Register */
+#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
+#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
+#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
+
/* Global RX Threshold Configuration Register for DWC_usb31 only */
#define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
#define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
@@ -991,6 +996,8 @@ struct dwc3_scratchpad_array {
* @bus_clk: clock for accessing the registers
* @ref_clk: reference clock
* @susp_clk: clock used when the SS phy is in low power (S3) state
+ * @utmi_clk: clock used for USB2 PHY communication
+ * @pipe_clk: clock used for USB3 PHY communication
* @reset: reset control
* @regs: base address for our registers
* @regs_size: address space size
@@ -1045,6 +1052,10 @@ struct dwc3_scratchpad_array {
* @test_mode_nr: test feature selector
* @lpm_nyet_threshold: LPM NYET response threshold
* @hird_threshold: HIRD threshold
+ * @rx_thr_num_pkt: USB receive packet count
+ * @rx_max_burst: max USB receive burst size
+ * @tx_thr_num_pkt: USB transmit packet count
+ * @tx_max_burst: max USB transmit burst size
* @rx_thr_num_pkt_prd: periodic ESS receive packet count
* @rx_max_burst_prd: max periodic ESS receive burst size
* @tx_thr_num_pkt_prd: periodic ESS transmit packet count
@@ -1106,6 +1117,8 @@ struct dwc3_scratchpad_array {
* instances in park mode.
* @parkmode_disable_hs_quirk: set if we need to disable all HishSpeed
* instances in park mode.
+ * @gfladj_refclk_lpm_sel: set if we need to enable SOF/ITP counter
+ * running based on ref_clk
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
@@ -1156,6 +1169,8 @@ struct dwc3 {
struct clk *bus_clk;
struct clk *ref_clk;
struct clk *susp_clk;
+ struct clk *utmi_clk;
+ struct clk *pipe_clk;
struct reset_control *reset;
@@ -1273,6 +1288,10 @@ struct dwc3 {
u8 test_mode_nr;
u8 lpm_nyet_threshold;
u8 hird_threshold;
+ u8 rx_thr_num_pkt;
+ u8 rx_max_burst;
+ u8 tx_thr_num_pkt;
+ u8 tx_max_burst;
u8 rx_thr_num_pkt_prd;
u8 rx_max_burst_prd;
u8 tx_thr_num_pkt_prd;
diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c
new file mode 100644
index 000000000000..590028e8fdcb
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-rtk.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-rtk.c - Realtek DWC3 Specific Glue layer
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/suspend.h>
+#include <linux/sys_soc.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/of.h>
+#include <linux/usb/role.h>
+
+#include "core.h"
+
+#define WRAP_CTR_REG 0x0
+#define DISABLE_MULTI_REQ BIT(1)
+#define DESC_R2W_MULTI_DISABLE BIT(9)
+#define FORCE_PIPE3_PHY_STATUS_TO_0 BIT(13)
+
+#define WRAP_USB2_PHY_UTMI_REG 0x8
+#define TXHSVM_EN BIT(3)
+
+#define WRAP_PHY_PIPE_REG 0xC
+#define RESET_DISABLE_PIPE3_P0 BIT(0)
+#define CLOCK_ENABLE_FOR_PIPE3_PCLK BIT(1)
+
+#define WRAP_USB_HMAC_CTR0_REG 0x60
+#define U3PORT_DIS BIT(8)
+
+#define WRAP_USB2_PHY_REG 0x70
+#define USB2_PHY_EN_PHY_PLL_PORT0 BIT(12)
+#define USB2_PHY_EN_PHY_PLL_PORT1 BIT(13)
+#define USB2_PHY_SWITCH_MASK 0x707
+#define USB2_PHY_SWITCH_DEVICE 0x0
+#define USB2_PHY_SWITCH_HOST 0x606
+
+#define WRAP_APHY_REG 0x128
+#define USB3_MBIAS_ENABLE BIT(1)
+
+/* pm control */
+#define WRAP_USB_DBUS_PWR_CTRL_REG 0x160
+#define USB_DBUS_PWR_CTRL_REG 0x0
+#define DBUS_PWR_CTRL_EN BIT(0)
+
+struct dwc3_rtk {
+ struct device *dev;
+ void __iomem *regs;
+ size_t regs_size;
+ void __iomem *pm_base;
+
+ struct dwc3 *dwc;
+
+ enum usb_role cur_role;
+ struct usb_role_switch *role_switch;
+};
+
+static void switch_usb2_role(struct dwc3_rtk *rtk, enum usb_role role)
+{
+ void __iomem *reg;
+ int val;
+
+ reg = rtk->regs + WRAP_USB2_PHY_REG;
+ val = ~USB2_PHY_SWITCH_MASK & readl(reg);
+
+ switch (role) {
+ case USB_ROLE_DEVICE:
+ writel(USB2_PHY_SWITCH_DEVICE | val, reg);
+ break;
+ case USB_ROLE_HOST:
+ writel(USB2_PHY_SWITCH_HOST | val, reg);
+ break;
+ default:
+ dev_dbg(rtk->dev, "%s: role=%d\n", __func__, role);
+ break;
+ }
+}
+
+static void switch_dwc3_role(struct dwc3_rtk *rtk, enum usb_role role)
+{
+ if (!rtk->dwc->role_sw)
+ return;
+
+ usb_role_switch_set_role(rtk->dwc->role_sw, role);
+}
+
+static enum usb_role dwc3_rtk_get_role(struct dwc3_rtk *rtk)
+{
+ enum usb_role role;
+
+ role = rtk->cur_role;
+
+ if (rtk->dwc && rtk->dwc->role_sw)
+ role = usb_role_switch_get_role(rtk->dwc->role_sw);
+ else
+ dev_dbg(rtk->dev, "%s not usb_role_switch role=%d\n", __func__, role);
+
+ return role;
+}
+
+static void dwc3_rtk_set_role(struct dwc3_rtk *rtk, enum usb_role role)
+{
+ rtk->cur_role = role;
+
+ switch_dwc3_role(rtk, role);
+ mdelay(10);
+ switch_usb2_role(rtk, role);
+}
+
+#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
+static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct dwc3_rtk *rtk = usb_role_switch_get_drvdata(sw);
+
+ dwc3_rtk_set_role(rtk, role);
+
+ return 0;
+}
+
+static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
+{
+ struct dwc3_rtk *rtk = usb_role_switch_get_drvdata(sw);
+
+ return dwc3_rtk_get_role(rtk);
+}
+
+static int dwc3_rtk_setup_role_switch(struct dwc3_rtk *rtk)
+{
+ struct usb_role_switch_desc dwc3_role_switch = {NULL};
+
+ dwc3_role_switch.name = dev_name(rtk->dev);
+ dwc3_role_switch.driver_data = rtk;
+ dwc3_role_switch.allow_userspace_control = true;
+ dwc3_role_switch.fwnode = dev_fwnode(rtk->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+ dwc3_role_switch.get = dwc3_usb_role_switch_get;
+ rtk->role_switch = usb_role_switch_register(rtk->dev, &dwc3_role_switch);
+ if (IS_ERR(rtk->role_switch))
+ return PTR_ERR(rtk->role_switch);
+
+ return 0;
+}
+
+static int dwc3_rtk_remove_role_switch(struct dwc3_rtk *rtk)
+{
+ if (rtk->role_switch)
+ usb_role_switch_unregister(rtk->role_switch);
+
+ rtk->role_switch = NULL;
+
+ return 0;
+}
+#else
+#define dwc3_rtk_setup_role_switch(x) 0
+#define dwc3_rtk_remove_role_switch(x) 0
+#endif
+
+static const char *const speed_names[] = {
+ [USB_SPEED_UNKNOWN] = "UNKNOWN",
+ [USB_SPEED_LOW] = "low-speed",
+ [USB_SPEED_FULL] = "full-speed",
+ [USB_SPEED_HIGH] = "high-speed",
+ [USB_SPEED_WIRELESS] = "wireless",
+ [USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
+};
+
+static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np)
+{
+ struct device_node *dwc3_np;
+ const char *maximum_speed;
+ int ret;
+
+ dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ if (!dwc3_np)
+ return USB_SPEED_UNKNOWN;
+
+ ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed);
+ if (ret < 0)
+ return USB_SPEED_UNKNOWN;
+
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+
+ return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
+}
+
+static int dwc3_rtk_init(struct dwc3_rtk *rtk)
+{
+ struct device *dev = rtk->dev;
+ void __iomem *reg;
+ int val;
+ enum usb_device_speed maximum_speed;
+ const struct soc_device_attribute rtk_soc_kylin_a00[] = {
+ { .family = "Realtek Kylin", .revision = "A00", },
+ { /* empty */ } };
+ const struct soc_device_attribute rtk_soc_hercules[] = {
+ { .family = "Realtek Hercules", }, { /* empty */ } };
+ const struct soc_device_attribute rtk_soc_thor[] = {
+ { .family = "Realtek Thor", }, { /* empty */ } };
+
+ if (soc_device_match(rtk_soc_kylin_a00)) {
+ reg = rtk->regs + WRAP_CTR_REG;
+ val = readl(reg);
+ writel(DISABLE_MULTI_REQ | val, reg);
+ dev_info(dev, "[bug fixed] 1295/1296 A00: add workaround to disable multiple request for D-Bus");
+ }
+
+ if (soc_device_match(rtk_soc_hercules)) {
+ reg = rtk->regs + WRAP_USB2_PHY_REG;
+ val = readl(reg);
+ writel(USB2_PHY_EN_PHY_PLL_PORT1 | val, reg);
+ dev_info(dev, "[bug fixed] 1395 add workaround to disable usb2 port 2 suspend!");
+ }
+
+ reg = rtk->regs + WRAP_USB2_PHY_UTMI_REG;
+ val = readl(reg);
+ writel(TXHSVM_EN | val, reg);
+
+ maximum_speed = __get_dwc3_maximum_speed(dev->of_node);
+ if (maximum_speed != USB_SPEED_UNKNOWN && maximum_speed <= USB_SPEED_HIGH) {
+ if (soc_device_match(rtk_soc_thor)) {
+ reg = rtk->regs + WRAP_USB_HMAC_CTR0_REG;
+ val = readl(reg);
+ writel(U3PORT_DIS | val, reg);
+ } else {
+ reg = rtk->regs + WRAP_CTR_REG;
+ val = readl(reg);
+ writel(FORCE_PIPE3_PHY_STATUS_TO_0 | val, reg);
+
+ reg = rtk->regs + WRAP_PHY_PIPE_REG;
+ val = ~CLOCK_ENABLE_FOR_PIPE3_PCLK & readl(reg);
+ writel(RESET_DISABLE_PIPE3_P0 | val, reg);
+
+ reg = rtk->regs + WRAP_USB_HMAC_CTR0_REG;
+ val = readl(reg);
+ writel(U3PORT_DIS | val, reg);
+
+ reg = rtk->regs + WRAP_APHY_REG;
+ val = readl(reg);
+ writel(~USB3_MBIAS_ENABLE & val, reg);
+
+ dev_dbg(rtk->dev, "%s: disable usb 3.0 phy\n", __func__);
+ }
+ }
+
+ reg = rtk->regs + WRAP_CTR_REG;
+ val = readl(reg);
+ writel(DESC_R2W_MULTI_DISABLE | val, reg);
+
+ /* Set phy Dp/Dm initial state to host mode to avoid the Dp glitch */
+ reg = rtk->regs + WRAP_USB2_PHY_REG;
+ val = ~USB2_PHY_SWITCH_MASK & readl(reg);
+ writel(USB2_PHY_SWITCH_HOST | val, reg);
+
+ if (rtk->pm_base) {
+ reg = rtk->pm_base + USB_DBUS_PWR_CTRL_REG;
+ val = DBUS_PWR_CTRL_EN | readl(reg);
+ writel(val, reg);
+ }
+
+ return 0;
+}
+
+static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
+{
+ struct device *dev = rtk->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *dwc3_pdev;
+ struct device *dwc3_dev;
+ struct device_node *dwc3_node;
+ enum usb_dr_mode dr_mode;
+ int ret = 0;
+
+ ret = dwc3_rtk_init(rtk);
+ if (ret)
+ return -EINVAL;
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to add dwc3 core\n");
+ return ret;
+ }
+
+ dwc3_node = of_get_compatible_child(node, "snps,dwc3");
+ if (!dwc3_node) {
+ dev_err(dev, "failed to find dwc3 core node\n");
+ ret = -ENODEV;
+ goto depopulate;
+ }
+
+ dwc3_pdev = of_find_device_by_node(dwc3_node);
+ if (!dwc3_pdev) {
+ dev_err(dev, "failed to find dwc3 core platform_device\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ dwc3_dev = &dwc3_pdev->dev;
+ rtk->dwc = platform_get_drvdata(dwc3_pdev);
+ if (!rtk->dwc) {
+ dev_err(dev, "failed to find dwc3 core\n");
+ ret = -ENODEV;
+ goto err_pdev_put;
+ }
+
+ dr_mode = usb_get_dr_mode(dwc3_dev);
+ if (dr_mode != rtk->dwc->dr_mode) {
+ dev_info(dev, "dts set dr_mode=%d, but dwc3 set dr_mode=%d\n",
+ dr_mode, rtk->dwc->dr_mode);
+ dr_mode = rtk->dwc->dr_mode;
+ }
+
+ switch (dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ rtk->cur_role = USB_ROLE_DEVICE;
+ break;
+ case USB_DR_MODE_HOST:
+ rtk->cur_role = USB_ROLE_HOST;
+ break;
+ default:
+ dev_dbg(rtk->dev, "%s: dr_mode=%d\n", __func__, dr_mode);
+ break;
+ }
+
+ if (device_property_read_bool(dwc3_dev, "usb-role-switch")) {
+ ret = dwc3_rtk_setup_role_switch(rtk);
+ if (ret) {
+ dev_err(dev, "dwc3_rtk_setup_role_switch fail=%d\n", ret);
+ goto err_pdev_put;
+ }
+ rtk->cur_role = dwc3_rtk_get_role(rtk);
+ }
+
+ switch_usb2_role(rtk, rtk->cur_role);
+
+ return 0;
+
+err_pdev_put:
+ platform_device_put(dwc3_pdev);
+err_node_put:
+ of_node_put(dwc3_node);
+depopulate:
+ of_platform_depopulate(dev);
+
+ return ret;
+}
+
+static int dwc3_rtk_probe(struct platform_device *pdev)
+{
+ struct dwc3_rtk *rtk;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+ int ret = 0;
+
+ rtk = devm_kzalloc(dev, sizeof(*rtk), GFP_KERNEL);
+ if (!rtk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, rtk);
+
+ rtk->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory resource\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto out;
+ }
+
+ rtk->regs = regs;
+ rtk->regs_size = resource_size(res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ rtk->pm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rtk->pm_base)) {
+ ret = PTR_ERR(rtk->pm_base);
+ goto out;
+ }
+ }
+
+ ret = dwc3_rtk_probe_dwc3_core(rtk);
+
+out:
+ return ret;
+}
+
+static void dwc3_rtk_remove(struct platform_device *pdev)
+{
+ struct dwc3_rtk *rtk = platform_get_drvdata(pdev);
+
+ rtk->dwc = NULL;
+
+ dwc3_rtk_remove_role_switch(rtk);
+
+ of_platform_depopulate(rtk->dev);
+}
+
+static void dwc3_rtk_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_rtk *rtk = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(rtk->dev);
+}
+
+static const struct of_device_id rtk_dwc3_match[] = {
+ { .compatible = "realtek,rtd-dwc3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rtk_dwc3_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_rtk_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int dwc3_rtk_resume(struct device *dev)
+{
+ struct dwc3_rtk *rtk = dev_get_drvdata(dev);
+
+ dwc3_rtk_init(rtk);
+
+ switch_usb2_role(rtk, rtk->cur_role);
+
+ /* runtime set active to reflect active state. */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_rtk_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_rtk_suspend, dwc3_rtk_resume)
+};
+
+#define DEV_PM_OPS (&dwc3_rtk_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver dwc3_rtk_driver = {
+ .probe = dwc3_rtk_probe,
+ .remove_new = dwc3_rtk_remove,
+ .driver = {
+ .name = "rtk-dwc3",
+ .of_match_table = rtk_dwc3_match,
+ .pm = DEV_PM_OPS,
+ },
+ .shutdown = dwc3_rtk_shutdown,
+};
+
+module_platform_driver(dwc3_rtk_driver);
+
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("DesignWare USB3 Realtek Glue Layer");
+MODULE_ALIAS("platform:rtk-dwc3");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: phy_rtk_usb2 phy_rtk_usb3");
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 19307d24f3a0..5b7e92f476de 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -32,9 +32,6 @@
#define XLNX_USB_TRAFFIC_ROUTE_CONFIG 0x005C
#define XLNX_USB_TRAFFIC_ROUTE_FPD 0x1
-/* Versal USB Reset ID */
-#define VERSAL_USB_RESET_ID 0xC104036
-
#define XLNX_USB_FPD_PIPE_CLK 0x7c
#define PIPE_CLK_DESELECT 1
#define PIPE_CLK_SELECT 0
@@ -72,20 +69,23 @@ static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
static int dwc3_xlnx_init_versal(struct dwc3_xlnx *priv_data)
{
struct device *dev = priv_data->dev;
+ struct reset_control *crst;
int ret;
+ crst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(crst))
+ return dev_err_probe(dev, PTR_ERR(crst), "failed to get reset signal\n");
+
dwc3_xlnx_mask_phy_rst(priv_data, false);
/* Assert and De-assert reset */
- ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
- PM_RESET_ACTION_ASSERT);
+ ret = reset_control_assert(crst);
if (ret < 0) {
dev_err_probe(dev, ret, "failed to assert Reset\n");
return ret;
}
- ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
- PM_RESET_ACTION_RELEASE);
+ ret = reset_control_deassert(crst);
if (ret < 0) {
dev_err_probe(dev, ret, "failed to De-assert Reset\n");
return ret;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e6ab8cc225ff..cc0ed29a4adc 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1410,7 +1410,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct f_ncm *ncm = func_to_ncm(f);
struct usb_string *us;
- int status;
+ int status = 0;
struct usb_ep *ep;
struct f_ncm_opts *ncm_opts;
@@ -1428,22 +1428,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
}
- /*
- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
- * configurations are bound in sequence with list_for_each_entry,
- * in each configuration its functions are bound in sequence
- * with list_for_each_entry, so we assume no race condition
- * with regard to ncm_opts->bound access
- */
- if (!ncm_opts->bound) {
- mutex_lock(&ncm_opts->lock);
- gether_set_gadget(ncm_opts->net, cdev->gadget);
+ mutex_lock(&ncm_opts->lock);
+ gether_set_gadget(ncm_opts->net, cdev->gadget);
+ if (!ncm_opts->bound)
status = gether_register_netdev(ncm_opts->net);
- mutex_unlock(&ncm_opts->lock);
- if (status)
- goto fail;
- ncm_opts->bound = true;
- }
+ mutex_unlock(&ncm_opts->lock);
+
+ if (status)
+ goto fail;
+
+ ncm_opts->bound = true;
+
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
if (IS_ERR(us)) {
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 0219cd79493a..f9a0f07a7476 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -212,7 +212,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
+ /* .wTerminalType = DYNAMIC */
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
@@ -240,7 +240,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
+ /* .wTerminalType = DYNAMIC */
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
@@ -977,6 +977,9 @@ static void setup_descriptor(struct f_uac2_opts *opts)
iad_desc.bInterfaceCount++;
}
+ io_in_it_desc.wTerminalType = cpu_to_le16(opts->c_terminal_type);
+ io_out_ot_desc.wTerminalType = cpu_to_le16(opts->p_terminal_type);
+
setup_headers(opts, fs_audio_desc, USB_SPEED_FULL);
setup_headers(opts, hs_audio_desc, USB_SPEED_HIGH);
setup_headers(opts, ss_audio_desc, USB_SPEED_SUPER);
@@ -2095,6 +2098,9 @@ UAC2_ATTRIBUTE(s16, c_volume_res);
UAC2_ATTRIBUTE(u32, fb_max);
UAC2_ATTRIBUTE_STRING(function_name);
+UAC2_ATTRIBUTE(s16, p_terminal_type);
+UAC2_ATTRIBUTE(s16, c_terminal_type);
+
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
&f_uac2_opts_attr_p_srate,
@@ -2122,6 +2128,9 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_function_name,
+ &f_uac2_opts_attr_p_terminal_type,
+ &f_uac2_opts_attr_c_terminal_type,
+
NULL,
};
@@ -2180,6 +2189,9 @@ static struct usb_function_instance *afunc_alloc_inst(void)
snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+ opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE;
+ opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE;
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index faa398109431..786379f1b7b7 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -516,6 +516,7 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
void *mem;
switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
uvc_control_desc = uvc->desc.ss_control;
uvc_streaming_cls = uvc->desc.ss_streaming;
@@ -564,7 +565,8 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
bytes += uvc_interrupt_ep.bLength + uvc_interrupt_cs_ep.bLength;
n_desc += 2;
- if (speed == USB_SPEED_SUPER) {
+ if (speed == USB_SPEED_SUPER ||
+ speed == USB_SPEED_SUPER_PLUS) {
bytes += uvc_ss_interrupt_comp.bLength;
n_desc += 1;
}
@@ -619,7 +621,8 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
if (uvc->enable_interrupt_ep) {
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_ep);
- if (speed == USB_SPEED_SUPER)
+ if (speed == USB_SPEED_SUPER ||
+ speed == USB_SPEED_SUPER_PLUS)
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_interrupt_comp);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_cs_ep);
@@ -795,6 +798,13 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
goto error;
}
+ f->ssp_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER_PLUS);
+ if (IS_ERR(f->ssp_descriptors)) {
+ ret = PTR_ERR(f->ssp_descriptors);
+ f->ssp_descriptors = NULL;
+ goto error;
+ }
+
/* Preallocate control endpoint request. */
uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 4bb0553da658..9d1c40c152d8 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1200,7 +1200,7 @@ void gether_disconnect(struct gether *link)
DBG(dev, "%s\n", __func__);
- netif_stop_queue(dev->net);
+ netif_device_detach(dev->net);
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 0510c9bad58d..5e81bdd6c5fb 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -35,6 +35,11 @@
#define UAC2_DEF_REQ_NUM 2
#define UAC2_DEF_INT_REQ_NUM 10
+#define UAC2_DEF_P_TERM_TYPE 0x301
+ /* UAC_OUTPUT_TERMINAL_SPEAKER */
+#define UAC2_DEF_C_TERM_TYPE 0x201
+ /* UAC_INPUT_TERMINAL_MICROPHONE*/
+
struct f_uac2_opts {
struct usb_function_instance func_inst;
int p_chmask;
@@ -65,6 +70,9 @@ struct f_uac2_opts {
char function_name[32];
+ s16 p_terminal_type;
+ s16 c_terminal_type;
+
struct mutex lock;
int refcnt;
};
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index cdc0926100fd..03179b1880fd 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -31,6 +31,12 @@
#include <linux/usb/gadgetfs.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
+
+/* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
+#undef DBG
+#undef ERROR
+#undef INFO
/*
@@ -1511,7 +1517,16 @@ delegate:
event->u.setup = *ctrl;
ep0_readable (dev);
spin_unlock (&dev->lock);
- return 0;
+ /*
+ * Return USB_GADGET_DELAYED_STATUS as a workaround to
+ * stop some UDC drivers (e.g. dwc3) from automatically
+ * proceeding with the status stage for 0-length
+ * transfers.
+ * Should be removed once all UDC drivers are fixed to
+ * always delay the status stage until a response is
+ * queued to EP0.
+ */
+ return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
}
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index e549022642e5..399fca32a8ac 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -25,6 +25,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/ch11.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
#include <uapi/linux/usb/raw_gadget.h>
@@ -64,7 +65,7 @@ static int raw_event_queue_add(struct raw_event_queue *queue,
struct usb_raw_event *event;
spin_lock_irqsave(&queue->lock, flags);
- if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
+ if (queue->size >= RAW_EVENT_QUEUE_SIZE) {
spin_unlock_irqrestore(&queue->lock, flags);
return -ENOMEM;
}
@@ -310,9 +311,10 @@ static int gadget_bind(struct usb_gadget *gadget,
dev->eps_num = i;
spin_unlock_irqrestore(&dev->lock, flags);
+ dev_dbg(&gadget->dev, "gadget connected\n");
ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
if (ret < 0) {
- dev_err(&gadget->dev, "failed to queue event\n");
+ dev_err(&gadget->dev, "failed to queue connect event\n");
set_gadget_data(gadget, NULL);
return ret;
}
@@ -357,20 +359,65 @@ static int gadget_setup(struct usb_gadget *gadget,
ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
if (ret < 0)
- dev_err(&gadget->dev, "failed to queue event\n");
+ dev_err(&gadget->dev, "failed to queue control event\n");
goto out;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
out:
+ if (ret == 0 && ctrl->wLength == 0) {
+ /*
+ * Return USB_GADGET_DELAYED_STATUS as a workaround to stop
+ * some UDC drivers (e.g. dwc3) from automatically proceeding
+ * with the status stage for 0-length transfers.
+ * Should be removed once all UDC drivers are fixed to always
+ * delay the status stage until a response is queued to EP0.
+ */
+ return USB_GADGET_DELAYED_STATUS;
+ }
return ret;
}
-/* These are currently unused but present in case UDC driver requires them. */
-static void gadget_disconnect(struct usb_gadget *gadget) { }
-static void gadget_suspend(struct usb_gadget *gadget) { }
-static void gadget_resume(struct usb_gadget *gadget) { }
-static void gadget_reset(struct usb_gadget *gadget) { }
+static void gadget_disconnect(struct usb_gadget *gadget)
+{
+ struct raw_dev *dev = get_gadget_data(gadget);
+ int ret;
+
+ dev_dbg(&gadget->dev, "gadget disconnected\n");
+ ret = raw_queue_event(dev, USB_RAW_EVENT_DISCONNECT, 0, NULL);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue disconnect event\n");
+}
+static void gadget_suspend(struct usb_gadget *gadget)
+{
+ struct raw_dev *dev = get_gadget_data(gadget);
+ int ret;
+
+ dev_dbg(&gadget->dev, "gadget suspended\n");
+ ret = raw_queue_event(dev, USB_RAW_EVENT_SUSPEND, 0, NULL);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue suspend event\n");
+}
+static void gadget_resume(struct usb_gadget *gadget)
+{
+ struct raw_dev *dev = get_gadget_data(gadget);
+ int ret;
+
+ dev_dbg(&gadget->dev, "gadget resumed\n");
+ ret = raw_queue_event(dev, USB_RAW_EVENT_RESUME, 0, NULL);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue resume event\n");
+}
+static void gadget_reset(struct usb_gadget *gadget)
+{
+ struct raw_dev *dev = get_gadget_data(gadget);
+ int ret;
+
+ dev_dbg(&gadget->dev, "gadget reset\n");
+ ret = raw_queue_event(dev, USB_RAW_EVENT_RESET, 0, NULL);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue reset event\n");
+}
/*----------------------------------------------------------------------*/
@@ -663,12 +710,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
if (WARN_ON(in && dev->ep0_out_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
- goto out_done;
+ goto out_unlock;
}
if (WARN_ON(!in && dev->ep0_in_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
- goto out_done;
+ goto out_unlock;
}
dev->req->buf = data;
@@ -682,8 +729,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
dev_err(&dev->gadget->dev,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
- dev->state = STATE_DEV_FAILED;
- goto out_done;
+ goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&dev->ep0_done);
@@ -692,13 +738,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(dev->gadget->ep0, dev->req);
wait_for_completion(&dev->ep0_done);
spin_lock_irqsave(&dev->lock, flags);
- goto out_done;
+ if (dev->ep0_status == -ECONNRESET)
+ dev->ep0_status = -EINTR;
+ goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
- ret = dev->ep0_status;
-out_done:
+out_interrupted:
+ ret = dev->ep0_status;
+out_queue_failed:
dev->ep0_urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1066,8 +1115,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
dev_err(&dev->gadget->dev,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
- dev->state = STATE_DEV_FAILED;
- goto out_done;
+ goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&done);
@@ -1076,13 +1124,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(ep->ep, ep->req);
wait_for_completion(&done);
spin_lock_irqsave(&dev->lock, flags);
- goto out_done;
+ if (ep->status == -ECONNRESET)
+ ep->status = -EINTR;
+ goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
- ret = ep->status;
-out_done:
+out_interrupted:
+ ret = ep->status;
+out_queue_failed:
ep->urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index 2ef89a442f50..3916c8e2ba01 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -1432,15 +1432,24 @@ static void ast_udc_init_hw(struct ast_udc_dev *udc)
ast_udc_write(udc, 0, AST_UDC_EP0_CTRL);
}
-static int ast_udc_remove(struct platform_device *pdev)
+static void ast_udc_remove(struct platform_device *pdev)
{
struct ast_udc_dev *udc = platform_get_drvdata(pdev);
unsigned long flags;
u32 ctrl;
usb_del_gadget_udc(&udc->gadget);
- if (udc->driver)
- return -EBUSY;
+ if (udc->driver) {
+ /*
+ * This is broken as only some cleanup is skipped, *udev is
+ * freed and the register mapping goes away. Any further usage
+ * probably crashes. Also the device is unbound, so the skipped
+ * cleanup is never catched up later.
+ */
+ dev_alert(&pdev->dev,
+ "Driver is busy and still going away. Fasten your seat belts!\n");
+ return;
+ }
spin_lock_irqsave(&udc->lock, flags);
@@ -1459,8 +1468,6 @@ static int ast_udc_remove(struct platform_device *pdev)
udc->ep0_buf_dma);
udc->ep0_buf = NULL;
-
- return 0;
}
static int ast_udc_probe(struct platform_device *pdev)
@@ -1581,7 +1588,7 @@ MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids);
static struct platform_driver ast_udc_driver = {
.probe = ast_udc_probe,
- .remove = ast_udc_remove,
+ .remove_new = ast_udc_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ast_udc_of_dt_ids,
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 922b4187004b..30ea4a9d5301 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -2000,6 +2000,7 @@ static int at91udc_resume(struct platform_device *pdev)
#endif
static struct platform_driver at91_udc_driver = {
+ .probe = at91udc_probe,
.remove = at91udc_remove,
.shutdown = at91udc_shutdown,
.suspend = at91udc_suspend,
@@ -2010,7 +2011,7 @@ static struct platform_driver at91_udc_driver = {
},
};
-module_platform_driver_probe(at91_udc_driver, at91udc_probe);
+module_platform_driver(at91_udc_driver);
MODULE_DESCRIPTION("AT91 udc driver");
MODULE_AUTHOR("Thomas Rathbone, David Brownell");
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 7166d1117742..ded9531f141b 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1126,12 +1126,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
/* ------------------------------------------------------------------------- */
/* Acquire connect_lock before calling this function. */
-static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+static int usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
{
if (udc->vbus)
- usb_gadget_connect_locked(udc->gadget);
+ return usb_gadget_connect_locked(udc->gadget);
else
- usb_gadget_disconnect_locked(udc->gadget);
+ return usb_gadget_disconnect_locked(udc->gadget);
}
static void vbus_event_work(struct work_struct *work)
@@ -1605,12 +1605,23 @@ static int gadget_bind_driver(struct device *dev)
}
usb_gadget_enable_async_callbacks(udc);
udc->allow_connect = true;
- usb_udc_connect_control_locked(udc);
+ ret = usb_udc_connect_control_locked(udc);
+ if (ret)
+ goto err_connect_control;
+
mutex_unlock(&udc->connect_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
+ err_connect_control:
+ udc->allow_connect = false;
+ usb_gadget_disable_async_callbacks(udc);
+ if (gadget->irq)
+ synchronize_irq(gadget->irq);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+
err_start:
driver->unbind(udc->gadget);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 4aae86b47edf..4e88681a79b6 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -27,9 +27,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -2471,17 +2472,12 @@ static const struct of_device_id qe_udc_match[];
static int qe_udc_probe(struct platform_device *ofdev)
{
struct qe_udc *udc;
- const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct qe_ep *ep;
unsigned int ret = 0;
unsigned int i;
const void *prop;
- match = of_match_device(qe_udc_match, &ofdev->dev);
- if (!match)
- return -EINVAL;
-
prop = of_get_property(np, "mode", NULL);
if (!prop || strcmp(prop, "peripheral"))
return -ENODEV;
@@ -2493,7 +2489,7 @@ static int qe_udc_probe(struct platform_device *ofdev)
return -ENOMEM;
}
- udc->soc_type = (unsigned long)match->data;
+ udc->soc_type = (unsigned long)device_get_match_data(&ofdev->dev);
udc->usb_regs = of_iomap(np, 0);
if (!udc->usb_regs) {
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index ee5705d336e3..2693a10eb0c7 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2666,6 +2666,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
+ .probe = fsl_udc_probe,
.remove = fsl_udc_remove,
.id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
@@ -2679,7 +2680,7 @@ static struct platform_driver udc_driver = {
},
};
-module_platform_driver_probe(udc_driver, fsl_udc_probe);
+module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index bd03d475f927..873265634ccc 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1506,10 +1506,11 @@ clean_up:
}
static struct platform_driver fusb300_driver = {
- .remove_new = fusb300_remove,
- .driver = {
+ .probe = fusb300_probe,
+ .remove_new = fusb300_remove,
+ .driver = {
.name = udc_name,
},
};
-module_platform_driver_probe(fusb300_driver, fusb300_probe);
+module_platform_driver(fusb300_driver);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index fe62db32dd0e..a917cc9a32ab 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3254,6 +3254,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
#endif
static struct platform_driver lpc32xx_udc_driver = {
+ .probe = lpc32xx_udc_probe,
.remove = lpc32xx_udc_remove,
.shutdown = lpc32xx_udc_shutdown,
.suspend = lpc32xx_udc_suspend,
@@ -3264,7 +3265,7 @@ static struct platform_driver lpc32xx_udc_driver = {
},
};
-module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
+module_platform_driver(lpc32xx_udc_driver);
MODULE_DESCRIPTION("LPC32XX udc driver");
MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index e05f45a4b56b..bfaa5291e6c8 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1687,10 +1687,11 @@ clean_up:
/*-------------------------------------------------------------------------*/
static struct platform_driver m66592_driver = {
+ .probe = m66592_probe,
.remove_new = m66592_remove,
.driver = {
.name = udc_name,
},
};
-module_platform_driver_probe(m66592_driver, m66592_probe);
+module_platform_driver(m66592_driver);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 51b665f15c8e..db4a10a979f9 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1964,13 +1964,14 @@ clean_up2:
/*-------------------------------------------------------------------------*/
static struct platform_driver r8a66597_driver = {
+ .probe = r8a66597_probe,
.remove_new = r8a66597_remove,
.driver = {
.name = udc_name,
},
};
-module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
+module_platform_driver(r8a66597_driver);
MODULE_DESCRIPTION("R8A66597 USB gadget driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 2665832f9add..1f9c1b1435d8 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -60,6 +60,23 @@
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG 0xF8
+#define ASMT_DATA_WRITE1_REG 0xFC
+#define ASMT_CONTROL_REG 0xE0
+#define ASMT_CONTROL_WRITE_BIT 0x02
+#define ASMT_WRITEREG_CMD 0x10423
+#define ASMT_FLOWCTL_ADDR 0xFA30
+#define ASMT_FLOWCTL_DATA 0xBA
+#define ASMT_PSEUDO_DATA 0
+
+/* Intel quirk use */
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB2PRM 0xD4
+#define USB_INTEL_USB3_PSSEN 0xD8
+#define USB_INTEL_USB3PRM 0xDC
+
+#ifdef CONFIG_USB_PCI_AMD
/* AMD quirk use */
#define AB_REG_BAR_LOW 0xe0
#define AB_REG_BAR_HIGH 0xe1
@@ -93,21 +110,6 @@
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
-#define USB_INTEL_XUSB2PR 0xD0
-#define USB_INTEL_USB2PRM 0xD4
-#define USB_INTEL_USB3_PSSEN 0xD8
-#define USB_INTEL_USB3PRM 0xDC
-
-/* ASMEDIA quirk use */
-#define ASMT_DATA_WRITE0_REG 0xF8
-#define ASMT_DATA_WRITE1_REG 0xFC
-#define ASMT_CONTROL_REG 0xE0
-#define ASMT_CONTROL_WRITE_BIT 0x02
-#define ASMT_WRITEREG_CMD 0x10423
-#define ASMT_FLOWCTL_ADDR 0xFA30
-#define ASMT_FLOWCTL_DATA 0xBA
-#define ASMT_PSEUDO_DATA 0
-
/*
* amd_chipset_gen values represent AMD different chipset generations
*/
@@ -458,50 +460,6 @@ void usb_amd_quirk_pll_disable(void)
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
-static int usb_asmedia_wait_write(struct pci_dev *pdev)
-{
- unsigned long retry_count;
- unsigned char value;
-
- for (retry_count = 1000; retry_count > 0; --retry_count) {
-
- pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
-
- if (value == 0xff) {
- dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
- return -EIO;
- }
-
- if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
- return 0;
-
- udelay(50);
- }
-
- dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
- return -ETIMEDOUT;
-}
-
-void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
-{
- if (usb_asmedia_wait_write(pdev) != 0)
- return;
-
- /* send command and address to device */
- pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
- pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
- pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
-
- if (usb_asmedia_wait_write(pdev) != 0)
- return;
-
- /* send data to device */
- pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
- pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
- pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
-}
-EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
-
void usb_amd_quirk_pll_enable(void)
{
usb_amd_quirk_pll(0);
@@ -630,7 +588,62 @@ bool usb_amd_pt_check_port(struct device *device, int port)
return !(value & BIT(port_shift));
}
EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+#endif /* CONFIG_USB_PCI_AMD */
+
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+ unsigned long retry_count;
+ unsigned char value;
+
+ for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+ pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+ if (value == 0xff) {
+ dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+ return -EIO;
+ }
+
+ if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+ return 0;
+
+ udelay(50);
+ }
+
+ dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+ return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+ /* send command and address to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send data to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
+static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+{
+ u16 cmd;
+
+ return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
+}
+
+#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
+
+#if defined(CONFIG_HAS_IOPORT) && IS_ENABLED(CONFIG_USB_UHCI_HCD)
/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
@@ -712,14 +725,7 @@ reset_needed:
}
EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
-static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
-{
- u16 cmd;
- return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
-}
-
#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
-#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
{
@@ -739,6 +745,12 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
uhci_check_and_reset_hc(pdev, base);
}
+#else /* defined(CONFIG_HAS_IOPORT && IS_ENABLED(CONFIG_USB_UHCI_HCD) */
+
+static void quirk_usb_handoff_uhci(struct pci_dev *pdev) {}
+
+#endif /* defined(CONFIG_HAS_IOPORT && IS_ENABLED(CONFIG_USB_UHCI_HCD) */
+
static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
{
return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index e729de21fad7..a5230b0b9e91 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -2,9 +2,7 @@
#ifndef __LINUX_USB_PCI_QUIRKS_H
#define __LINUX_USB_PCI_QUIRKS_H
-#ifdef CONFIG_USB_PCI
-void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
-int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+#ifdef CONFIG_USB_PCI_AMD
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
bool usb_amd_hang_symptom_quirk(void);
bool usb_amd_prefetch_quirk(void);
@@ -12,23 +10,41 @@ void usb_amd_dev_put(void);
bool usb_amd_quirk_pll_check(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
-void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
-void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
-void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
bool usb_amd_pt_check_port(struct device *device, int port);
#else
-struct pci_dev;
+static inline bool usb_amd_hang_symptom_quirk(void)
+{
+ return false;
+};
+static inline bool usb_amd_prefetch_quirk(void)
+{
+ return false;
+}
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
-static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
static inline void usb_amd_dev_put(void) {}
-static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+static inline bool usb_amd_quirk_pll_check(void)
+{
+ return false;
+}
static inline void sb800_prefetch(struct device *dev, int on) {}
static inline bool usb_amd_pt_check_port(struct device *device, int port)
{
return false;
}
+#endif /* CONFIG_USB_PCI_AMD */
+
+#ifdef CONFIG_USB_PCI
+void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+#else
+struct pci_dev;
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
#endif /* CONFIG_USB_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 99baa60ef50f..6d142cd61bd6 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -204,7 +204,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,
for (i = 0; i < TRBS_PER_SEGMENT; i++) {
trb = &seg->trbs[i];
dma = seg->dma + i * sizeof(*trb);
- seq_printf(s, "%pad: %s\n", &dma,
+ seq_printf(s, "%2u %pad: %s\n", seg->num, &dma,
xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]),
le32_to_cpu(trb->generic.field[1]),
le32_to_cpu(trb->generic.field[2]),
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index e8af0a125f84..96eb36a58738 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -79,6 +79,33 @@
/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
#define XHCI_STS_CNR (1 << 11)
+/**
+ * struct xhci_protocol_caps
+ * @revision: major revision, minor revision, capability ID,
+ * and next capability pointer.
+ * @name_string: Four ASCII characters to say which spec this xHC
+ * follows, typically "USB ".
+ * @port_info: Port offset, count, and protocol-defined information.
+ */
+struct xhci_protocol_caps {
+ u32 revision;
+ u32 name_string;
+ u32 port_info;
+};
+
+#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
+#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f)
+#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
+#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
+
+#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f)
+#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03)
+#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03)
+#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01)
+#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03)
+#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff)
+
#include <linux/io.h>
/**
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0df5d807a77e..0980ade2a234 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1262,7 +1262,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -ENODEV;
break;
}
- trace_xhci_get_port_status(wIndex, temp);
+ trace_xhci_get_port_status(port, temp);
status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
&flags);
if (status == 0xffffffff)
@@ -1687,7 +1687,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
retval = -ENODEV;
break;
}
- trace_xhci_hub_status_data(i, temp);
+ trace_xhci_hub_status_data(ports[i], temp);
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0a37f0d511cf..62116586848b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -29,6 +29,7 @@
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state,
unsigned int max_packet,
+ unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
@@ -60,6 +61,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
}
+ seg->num = num;
seg->dma = dma;
seg->next = NULL;
@@ -128,7 +130,7 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
- struct xhci_segment *next;
+ struct xhci_segment *next, *seg;
bool chain_links;
if (!ring || !first || !last)
@@ -144,13 +146,18 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
xhci_link_segments(last, next, ring->type, chain_links);
ring->num_segs += num_segs;
- if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
- ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
- &= ~cpu_to_le32(LINK_TOGGLE);
- last->trbs[TRBS_PER_SEGMENT-1].link.control
- |= cpu_to_le32(LINK_TOGGLE);
+ if (ring->enq_seg == ring->last_seg) {
+ if (ring->type != TYPE_EVENT) {
+ ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
+ &= ~cpu_to_le32(LINK_TOGGLE);
+ last->trbs[TRBS_PER_SEGMENT-1].link.control
+ |= cpu_to_le32(LINK_TOGGLE);
+ }
ring->last_seg = last;
}
+
+ for (seg = last; seg != ring->last_seg; seg = seg->next)
+ seg->next->num = seg->num + 1;
}
/*
@@ -320,8 +327,9 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+ unsigned int num_segs, unsigned int num,
+ unsigned int cycle_state, enum xhci_ring_type type,
+ unsigned int max_packet, gfp_t flags)
{
struct xhci_segment *prev;
bool chain_links;
@@ -331,16 +339,17 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+ prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
if (!prev)
return -ENOMEM;
- num_segs--;
+ num++;
*first = prev;
- while (num_segs > 0) {
+ while (num < num_segs) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+ next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
+ flags);
if (!next) {
prev = *first;
while (prev) {
@@ -353,7 +362,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
xhci_link_segments(prev, next, type, chain_links);
prev = next;
- num_segs--;
+ num++;
}
xhci_link_segments(prev, *first, type, chain_links);
*last = prev;
@@ -388,7 +397,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
- &ring->last_seg, num_segs, cycle_state, type,
+ &ring->last_seg, num_segs, 0, cycle_state, type,
max_packet, flags);
if (ret)
goto fail;
@@ -428,7 +437,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
int ret;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_new_segs, ring->cycle_state, ring->type,
+ num_new_segs, ring->enq_seg->num + 1,
+ ring->cycle_state, ring->type,
ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
@@ -1766,7 +1776,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
-int xhci_alloc_erst(struct xhci_hcd *xhci,
+static int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_ring *evt_ring,
struct xhci_erst *erst,
gfp_t flags)
@@ -1797,23 +1807,13 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
}
static void
-xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- size_t erst_size;
- u64 tmp64;
u32 tmp;
if (!ir)
return;
- erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
- if (ir->erst.entries)
- dma_free_coherent(dev, erst_size,
- ir->erst.entries,
- ir->erst.erst_dma_addr);
- ir->erst.entries = NULL;
-
/*
* Clean out interrupter registers except ERSTBA. Clearing either the
* low or high 32 bits of ERSTBA immediately causes the controller to
@@ -1824,14 +1824,30 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
tmp &= ERST_SIZE_MASK;
writel(tmp, &ir->ir_set->erst_size);
- tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- tmp64 &= (u64) ERST_PTR_MASK;
- xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
}
+}
- /* free interrrupter event ring */
+static void
+xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ size_t erst_size;
+
+ if (!ir)
+ return;
+
+ erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
+ if (ir->erst.entries)
+ dma_free_coherent(dev, erst_size,
+ ir->erst.entries,
+ ir->erst.erst_dma_addr);
+ ir->erst.entries = NULL;
+
+ /* free interrupter event ring */
if (ir->event_ring)
xhci_ring_free(xhci, ir->event_ring);
+
ir->event_ring = NULL;
kfree(ir);
@@ -1844,6 +1860,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
+ xhci_remove_interrupter(xhci, xhci->interrupter);
xhci_free_interrupter(xhci, xhci->interrupter);
xhci->interrupter = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
@@ -1933,7 +1950,6 @@ no_bw:
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
@@ -1941,16 +1957,12 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter
if (!deq)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
- temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, preserving EHB bit");
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
@@ -2238,14 +2250,18 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
+ unsigned int num_segs;
int ret;
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
- ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- 0, flags);
+ num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
+ ERST_MAX_SEGS);
+
+ ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
+ flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
@@ -2281,7 +2297,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
erst_size &= ERST_SIZE_MASK;
- erst_size |= ERST_NUM_SEGS;
+ erst_size |= ir->event_ring->num_segs;
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 579899eb24c1..5b3cd455adec 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -19,6 +19,18 @@
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
+#define LS_PAYLOAD_MAX 18
+/* section 11.18.1, per fs frame */
+#define FS_BW_BOUNDARY 1157
+#define LS_BW_BOUNDARY 144
+
+/*
+ * max number of microframes for split transfer, assume extra-cs budget is 0
+ * for fs isoc in : 1 ss + 1 idle + 6 cs (roundup(1023/188))
+ */
+#define TT_MICROFRAMES_MAX 8
+/* offset from SS for fs/ls isoc/intr ep (ss + idle) */
+#define CS_OFFSET 2
#define DBG_BUF_EN 64
@@ -237,17 +249,26 @@ static void drop_tt(struct usb_device *udev)
static struct mu3h_sch_ep_info *
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
- struct usb_host_endpoint *ep)
+ struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
{
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *bw_info;
struct mu3h_sch_tt *tt = NULL;
+ u32 len;
bw_info = get_bw_info(mtk, udev, ep);
if (!bw_info)
return ERR_PTR(-ENODEV);
- sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
+ if (is_fs_or_ls(udev->speed))
+ len = TT_MICROFRAMES_MAX;
+ else if ((udev->speed >= USB_SPEED_SUPER) &&
+ usb_endpoint_xfer_isoc(&ep->desc))
+ len = get_esit(ep_ctx);
+ else
+ len = 1;
+
+ sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table, len), GFP_KERNEL);
if (!sch_ep)
return ERR_PTR(-ENOMEM);
@@ -279,7 +300,11 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
u32 mult;
u32 esit_pkts;
u32 max_esit_payload;
+ u32 bw_per_microframe;
+ u32 *bwb_table;
+ int i;
+ bwb_table = sch_ep->bw_budget_table;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
@@ -313,7 +338,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
* opportunities per microframe
*/
sch_ep->pkts = max_burst + 1;
- sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+ bwb_table[0] = maxpkt * sch_ep->pkts;
} else if (sch_ep->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
@@ -330,6 +355,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
+ bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
@@ -346,18 +372,52 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
+ bw_per_microframe = maxpkt * sch_ep->pkts;
+
+ for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
+ bwb_table[i] = bw_per_microframe;
+
+ /* last one <= bw_per_microframe */
+ bwb_table[i] = maxpkt * esit_pkts - i * bw_per_microframe;
}
- sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (is_fs_or_ls(sch_ep->speed)) {
sch_ep->pkts = 1; /* at most one packet for each microframe */
/*
- * num_budget_microframes and cs_count will be updated when
+ * @cs_count will be updated to add extra-cs when
* check TT for INT_OUT_EP, ISOC/INT_IN_EP type
+ * @maxpkt <= 1023;
*/
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
sch_ep->num_budget_microframes = sch_ep->cs_count;
- sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
+
+ /* init budget table */
+ if (ep_type == ISOC_OUT_EP) {
+ for (i = 0; i < sch_ep->cs_count - 1; i++)
+ bwb_table[i] = FS_PAYLOAD_MAX;
+
+ bwb_table[i] = maxpkt - i * FS_PAYLOAD_MAX;
+ } else if (ep_type == INT_OUT_EP) {
+ /* only first one used (maxpkt <= 64), others zero */
+ bwb_table[0] = maxpkt;
+ } else { /* INT_IN_EP or ISOC_IN_EP */
+ bwb_table[0] = 0; /* start split */
+ bwb_table[1] = 0; /* idle */
+ /*
+ * @cs_count will be updated according to cs position
+ * (add 1 or 2 extra-cs), but assume only first
+ * @num_budget_microframes elements will be used later,
+ * although in fact it does not (extra-cs budget many receive
+ * some data for IN ep);
+ * @cs_count is 1 for INT_IN_EP (maxpkt <= 64);
+ */
+ for (i = 0; i < sch_ep->cs_count - 1; i++)
+ bwb_table[i + CS_OFFSET] = FS_PAYLOAD_MAX;
+
+ bwb_table[i + CS_OFFSET] = maxpkt - i * FS_PAYLOAD_MAX;
+ /* ss + idle */
+ sch_ep->num_budget_microframes += CS_OFFSET;
+ }
}
}
@@ -374,7 +434,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
- bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
+ bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j];
if (bw > max_bw)
max_bw = bw;
}
@@ -382,56 +442,152 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
return max_bw;
}
+/*
+ * for OUT: get first SS consumed bw;
+ * for IN: get first CS consumed bw;
+ */
+static u16 get_fs_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u16 fs_bw;
+
+ if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
+ fs_bw = tt->fs_bus_bw_out[XHCI_MTK_BW_INDEX(offset)];
+ else /* skip ss + idle */
+ fs_bw = tt->fs_bus_bw_in[XHCI_MTK_BW_INDEX(offset + CS_OFFSET)];
+
+ return fs_bw;
+}
+
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool used)
{
- int bw_updated;
u32 base;
- int i, j;
-
- bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
+ int i, j, k;
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++)
- sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ k = XHCI_MTK_BW_INDEX(base + j);
+ if (used)
+ sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
+ else
+ sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
+ }
}
}
-static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
+static int check_ls_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ int i;
+
+ if (sch_ep->speed != USB_SPEED_LOW)
+ return 0;
+
+ if (sch_ep->ep_type == INT_OUT_EP)
+ i = XHCI_MTK_BW_INDEX(offset);
+ else if (sch_ep->ep_type == INT_IN_EP)
+ i = XHCI_MTK_BW_INDEX(offset + CS_OFFSET); /* skip ss + idle */
+ else
+ return -EINVAL;
+
+ if (tt->ls_bus_bw[i] + sch_ep->maxpkt > LS_PAYLOAD_MAX)
+ return -ESCH_BW_OVERFLOW;
+
+ return 0;
+}
+
+static int check_fs_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 tmp;
- int base;
+ int i, k;
+
+ /*
+ * for OUT eps, will transfer exactly assigned length of data,
+ * so can't allocate more than 188 bytes;
+ * but it's not for IN eps, usually it can't receive full
+ * 188 bytes in a uframe, if it not assign full 188 bytes,
+ * can add another one;
+ */
+ for (i = 0; i < sch_ep->num_budget_microframes; i++) {
+ k = XHCI_MTK_BW_INDEX(offset + i);
+ if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
+ tmp = tt->fs_bus_bw_out[k] + sch_ep->bw_budget_table[i];
+ else /* ep_type : ISOC IN / INTR IN */
+ tmp = tt->fs_bus_bw_in[k];
+
+ if (tmp > FS_PAYLOAD_MAX)
+ return -ESCH_BW_OVERFLOW;
+ }
+
+ return 0;
+}
+
+static int check_fs_budget_frames(struct mu3h_sch_ep_info *sch_ep, int offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 head, tail;
int i, j, k;
+ /* bugdet scheduled may cross at most two fs frames */
+ j = XHCI_MTK_BW_INDEX(offset) / UFRAMES_PER_FRAME;
+ k = XHCI_MTK_BW_INDEX(offset + sch_ep->num_budget_microframes - 1) / UFRAMES_PER_FRAME;
+
+ if (j != k) {
+ head = tt->fs_frame_bw[j];
+ tail = tt->fs_frame_bw[k];
+ } else {
+ head = tt->fs_frame_bw[j];
+ tail = 0;
+ }
+
+ j = roundup(offset, UFRAMES_PER_FRAME);
+ for (i = 0; i < sch_ep->num_budget_microframes; i++) {
+ if ((offset + i) < j)
+ head += sch_ep->bw_budget_table[i];
+ else
+ tail += sch_ep->bw_budget_table[i];
+ }
+
+ if (head > FS_BW_BOUNDARY || tail > FS_BW_BOUNDARY)
+ return -ESCH_BW_OVERFLOW;
+
+ return 0;
+}
+
+static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
+{
+ int i, base;
+ int ret = 0;
+
for (i = 0; i < sch_ep->num_esit; i++) {
base = offset + i * sch_ep->esit;
- /*
- * Compared with hs bus, no matter what ep type,
- * the hub will always delay one uframe to send data
- */
- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j);
- tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
- if (tmp > FS_PAYLOAD_MAX)
- return -ESCH_BW_OVERFLOW;
- }
+ ret = check_ls_budget_microframes(sch_ep, base);
+ if (ret)
+ goto err;
+
+ ret = check_fs_budget_microframes(sch_ep, base);
+ if (ret)
+ goto err;
+
+ ret = check_fs_budget_frames(sch_ep, base);
+ if (ret)
+ goto err;
}
- return 0;
+err:
+ return ret;
}
-static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
+static int check_ss_and_cs(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
u32 start_ss, last_ss;
u32 start_cs, last_cs;
- if (!sch_ep->sch_tt)
- return 0;
-
- start_ss = offset % 8;
+ start_ss = offset % UFRAMES_PER_FRAME;
if (sch_ep->ep_type == ISOC_OUT_EP) {
last_ss = start_ss + sch_ep->cs_count - 1;
@@ -444,6 +600,7 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
return -ESCH_SS_Y6;
} else {
+ /* maxpkt <= 1023, cs <= 6 */
u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
/*
@@ -454,44 +611,164 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
return -ESCH_SS_Y6;
/* one uframe for ss + one uframe for idle */
- start_cs = (start_ss + 2) % 8;
+ start_cs = (start_ss + CS_OFFSET) % UFRAMES_PER_FRAME;
last_cs = start_cs + cs_count - 1;
-
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
+ /* add extra-cs */
+ cs_count += (last_cs == 7) ? 1 : 2;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
- /* ss, idle are ignored */
- sch_ep->num_budget_microframes = cs_count;
- /*
- * if interval=1, maxp >752, num_budge_micoframe is larger
- * than sch_ep->esit, will overstep boundary
- */
- if (sch_ep->num_budget_microframes > sch_ep->esit)
- sch_ep->num_budget_microframes = sch_ep->esit;
}
+ return 0;
+}
+
+/*
+ * when isoc-out transfers 188 bytes in a uframe, and send isoc/intr's
+ * ss token in the uframe, may cause 'bit stuff error' in downstream
+ * port;
+ * when isoc-out transfer less than 188 bytes in a uframe, shall send
+ * isoc-in's ss after isoc-out's ss (but hw can't ensure the sequence,
+ * so just avoid overlap).
+ */
+static int check_isoc_ss_overlap(struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ int base;
+ int i, j, k;
+
+ if (!tt)
+ return 0;
+
+ for (i = 0; i < sch_ep->num_esit; i++) {
+ base = offset + i * sch_ep->esit;
+
+ if (sch_ep->ep_type == ISOC_OUT_EP) {
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ k = XHCI_MTK_BW_INDEX(base + j + CS_OFFSET);
+ /* use cs to indicate existence of in-ss @(base+j) */
+ if (tt->fs_bus_bw_in[k])
+ return -ESCH_SS_OVERLAP;
+ }
+ } else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
+ k = XHCI_MTK_BW_INDEX(base);
+ /* only check IN's ss */
+ if (tt->fs_bus_bw_out[k])
+ return -ESCH_SS_OVERLAP;
+ }
+ }
+
+ return 0;
+}
+
+static int check_sch_tt_budget(struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+ int ret;
+
+ ret = check_ss_and_cs(sch_ep, offset);
+ if (ret)
+ return ret;
+
+ ret = check_isoc_ss_overlap(sch_ep, offset);
+ if (ret)
+ return ret;
+
return check_fs_bus_bw(sch_ep, offset);
}
+/* allocate microframes in the ls/fs frame */
+static int alloc_sch_portion_of_frame(struct mu3h_sch_ep_info *sch_ep)
+{
+ struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
+ const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
+ u32 bw_max, fs_bw_min;
+ u32 offset, offset_min;
+ u16 fs_bw;
+ int frames;
+ int i, j;
+ int ret;
+
+ frames = sch_ep->esit / UFRAMES_PER_FRAME;
+
+ for (i = 0; i < UFRAMES_PER_FRAME; i++) {
+ fs_bw_min = FS_PAYLOAD_MAX;
+ offset_min = XHCI_MTK_MAX_ESIT;
+
+ for (j = 0; j < frames; j++) {
+ offset = (i + j * UFRAMES_PER_FRAME) % sch_ep->esit;
+
+ ret = check_sch_tt_budget(sch_ep, offset);
+ if (ret)
+ continue;
+
+ /* check hs bw domain */
+ bw_max = get_max_bw(sch_bw, sch_ep, offset);
+ if (bw_max > bw_boundary) {
+ ret = -ESCH_BW_OVERFLOW;
+ continue;
+ }
+
+ /* use best-fit between frames */
+ fs_bw = get_fs_bw(sch_ep, offset);
+ if (fs_bw < fs_bw_min) {
+ fs_bw_min = fs_bw;
+ offset_min = offset;
+ }
+
+ if (!fs_bw_min)
+ break;
+ }
+
+ /* use first-fit between microframes in a frame */
+ if (offset_min < XHCI_MTK_MAX_ESIT)
+ break;
+ }
+
+ if (offset_min == XHCI_MTK_MAX_ESIT)
+ return -ESCH_BW_OVERFLOW;
+
+ sch_ep->offset = offset_min;
+
+ return 0;
+}
+
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
- int bw_updated;
+ u16 *fs_bus_bw;
u32 base;
- int i, j;
+ int i, j, k, f;
- bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
+ if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
+ fs_bus_bw = tt->fs_bus_bw_out;
+ else
+ fs_bus_bw = tt->fs_bus_bw_in;
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++)
- tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ k = XHCI_MTK_BW_INDEX(base + j);
+ f = k / UFRAMES_PER_FRAME;
+ if (used) {
+ if (sch_ep->speed == USB_SPEED_LOW)
+ tt->ls_bus_bw[k] += (u8)sch_ep->bw_budget_table[j];
+
+ fs_bus_bw[k] += (u16)sch_ep->bw_budget_table[j];
+ tt->fs_frame_bw[f] += (u16)sch_ep->bw_budget_table[j];
+ } else {
+ if (sch_ep->speed == USB_SPEED_LOW)
+ tt->ls_bus_bw[k] -= (u8)sch_ep->bw_budget_table[j];
+
+ fs_bus_bw[k] -= (u16)sch_ep->bw_budget_table[j];
+ tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j];
+ }
+ }
}
if (used)
@@ -513,7 +790,8 @@ static int load_ep_bw(struct mu3h_sch_bw_info *sch_bw,
return 0;
}
-static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
+/* allocate microframes for hs/ss/ssp */
+static int alloc_sch_microframes(struct mu3h_sch_ep_info *sch_ep)
{
struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
@@ -521,16 +799,12 @@ static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
u32 worst_bw;
u32 min_bw = ~0;
int min_index = -1;
- int ret = 0;
/*
* Search through all possible schedule microframes.
* and find a microframe where its worst bandwidth is minimum.
*/
for (offset = 0; offset < sch_ep->esit; offset++) {
- ret = check_sch_tt(sch_ep, offset);
- if (ret)
- continue;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
if (worst_bw > bw_boundary)
@@ -540,21 +814,29 @@ static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
min_bw = worst_bw;
min_index = offset;
}
-
- /* use first-fit for LS/FS */
- if (sch_ep->sch_tt && min_index >= 0)
- break;
-
- if (min_bw == 0)
- break;
}
if (min_index < 0)
- return ret ? ret : -ESCH_BW_OVERFLOW;
+ return -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index;
- return load_ep_bw(sch_bw, sch_ep, true);
+ return 0;
+}
+
+static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
+{
+ int ret;
+
+ if (sch_ep->sch_tt)
+ ret = alloc_sch_portion_of_frame(sch_ep);
+ else
+ ret = alloc_sch_microframes(sch_ep);
+
+ if (ret)
+ return ret;
+
+ return load_ep_bw(sch_ep->bw_info, sch_ep, true);
}
static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
@@ -651,7 +933,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
- sch_ep = create_sch_ep(mtk, udev, ep);
+ sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index faaaf05e36ce..865b55e23b15 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -30,12 +30,21 @@
#define XHCI_MTK_MAX_ESIT (1 << 6)
#define XHCI_MTK_BW_INDEX(x) ((x) & (XHCI_MTK_MAX_ESIT - 1))
+#define UFRAMES_PER_FRAME 8
+#define XHCI_MTK_FRAMES_CNT (XHCI_MTK_MAX_ESIT / UFRAMES_PER_FRAME)
+
/**
- * @fs_bus_bw: array to keep track of bandwidth already used for FS
+ * @fs_bus_bw_out: save bandwidth used by FS/LS OUT eps in each uframes
+ * @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes
+ * @ls_bus_bw: save bandwidth used by LS eps in each uframes
+ * @fs_frame_bw: save bandwidth used by FS/LS eps in each FS frames
* @ep_list: Endpoints using this TT
*/
struct mu3h_sch_tt {
- u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
+ u16 fs_bus_bw_out[XHCI_MTK_MAX_ESIT];
+ u16 fs_bus_bw_in[XHCI_MTK_MAX_ESIT];
+ u8 ls_bus_bw[XHCI_MTK_MAX_ESIT];
+ u16 fs_frame_bw[XHCI_MTK_FRAMES_CNT];
struct list_head ep_list;
};
@@ -58,7 +67,6 @@ struct mu3h_sch_bw_info {
* @num_esit: number of @esit in a period
* @num_budget_microframes: number of continuous uframes
* (@repeat==1) scheduled within the interval
- * @bw_cost_per_microframe: bandwidth cost per microframe
* @hentry: hash table entry
* @endpoint: linked into bandwidth domain which it belongs to
* @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to
@@ -83,12 +91,12 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
+ * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
u32 num_esit;
u32 num_budget_microframes;
- u32 bw_cost_per_microframe;
struct list_head endpoint;
struct hlist_node hentry;
struct list_head tt_endpoint;
@@ -108,6 +116,7 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
+ u32 bw_budget_table[];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b9ae5c2a2527..95ed9404f6f8 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -535,6 +535,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -693,7 +695,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
+ pm_runtime_forbid(&dev->dev);
+ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
dma_set_max_seg_size(&dev->dev, UINT_MAX);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 28218c8f1837..b93161374293 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -458,23 +458,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
int ret;
if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
- clk_prepare_enable(xhci->clk);
- clk_prepare_enable(xhci->reg_clk);
+ ret = clk_prepare_enable(xhci->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(xhci->reg_clk);
+ if (ret) {
+ clk_disable_unprepare(xhci->clk);
+ return ret;
+ }
}
ret = xhci_priv_resume_quirk(hcd);
if (ret)
- return ret;
+ goto disable_clks;
ret = xhci_resume(xhci, PMSG_RESUME);
if (ret)
- return ret;
+ goto disable_clks;
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
+
+disable_clks:
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_disable_unprepare(xhci->clk);
+ clk_disable_unprepare(xhci->reg_clk);
+ }
+
+ return ret;
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e5dc0723a8f..f3b5e6345858 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -144,7 +144,7 @@ static void next_trb(struct xhci_hcd *xhci,
struct xhci_segment **seg,
union xhci_trb **trb)
{
- if (trb_is_link(*trb)) {
+ if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
@@ -450,8 +450,9 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
* In the future we should distinguish between -ENODEV and -ETIMEDOUT
* and try to recover a -ETIMEDOUT with a host controller reset.
*/
- ret = xhci_handshake(&xhci->op_regs->cmd_ring,
- CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
+ XHCI_STATE_REMOVING);
if (ret < 0) {
xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
xhci_halt(xhci);
@@ -1879,7 +1880,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id);
- inc_deq(xhci, ir->event_ring);
return;
}
@@ -1906,7 +1906,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
- trace_xhci_handle_port_status(hcd_portnum, portsc);
+ trace_xhci_handle_port_status(port, portsc);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
@@ -2007,8 +2007,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
cleanup:
- /* Update event ring dequeue pointer before dropping the lock */
- inc_deq(xhci, ir->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
@@ -2884,13 +2882,6 @@ cleanup:
trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
- /*
- * Do not update event ring dequeue pointer if we're in a loop
- * processing missed tds.
- */
- if (!handling_skipped_tds)
- inc_deq(xhci, ir->event_ring);
-
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
@@ -2922,9 +2913,7 @@ err_out:
static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
union xhci_trb *event;
- int update_ptrs = 1;
u32 trb_type;
- int ret;
/* Event ring hasn't been allocated yet. */
if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
@@ -2954,12 +2943,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
break;
case TRB_PORT_STATUS:
handle_port_status(xhci, ir, event);
- update_ptrs = 0;
break;
case TRB_TRANSFER:
- ret = handle_tx_event(xhci, ir, &event->trans_event);
- if (ret >= 0)
- update_ptrs = 0;
+ handle_tx_event(xhci, ir, &event->trans_event);
break;
case TRB_DEV_NOTE:
handle_device_notification(xhci, event);
@@ -2979,9 +2965,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
return 0;
}
- if (update_ptrs)
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
@@ -3013,13 +2998,12 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
* Per 4.9.4, Software writes to the ERDP register shall
* always advance the Event Ring Dequeue Pointer value.
*/
- if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
- ((u64) deq & (u64) ~ERST_PTR_MASK))
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
return;
/* Update HC event ring dequeue pointer */
- temp_64 &= ERST_DESI_MASK;
- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
}
/* Clear the event handler busy flag (RW1C) */
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index d6b32f2ad90e..ac47b1c0544a 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -509,35 +509,38 @@ DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
);
DECLARE_EVENT_CLASS(xhci_log_portsc,
- TP_PROTO(u32 portnum, u32 portsc),
- TP_ARGS(portnum, portsc),
+ TP_PROTO(struct xhci_port *port, u32 portsc),
+ TP_ARGS(port, portsc),
TP_STRUCT__entry(
+ __field(u32, busnum)
__field(u32, portnum)
__field(u32, portsc)
),
TP_fast_assign(
- __entry->portnum = portnum;
+ __entry->busnum = port->rhub->hcd->self.busnum;
+ __entry->portnum = port->hcd_portnum;
__entry->portsc = portsc;
),
- TP_printk("port-%d: %s",
+ TP_printk("port %d-%d: %s",
+ __entry->busnum,
__entry->portnum,
xhci_decode_portsc(__get_buf(XHCI_MSG_MAX), __entry->portsc)
)
);
DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
- TP_PROTO(u32 portnum, u32 portsc),
- TP_ARGS(portnum, portsc)
+ TP_PROTO(struct xhci_port *port, u32 portsc),
+ TP_ARGS(port, portsc)
);
DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
- TP_PROTO(u32 portnum, u32 portsc),
- TP_ARGS(portnum, portsc)
+ TP_PROTO(struct xhci_port *port, u32 portsc),
+ TP_ARGS(port, portsc)
);
DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
- TP_PROTO(u32 portnum, u32 portsc),
- TP_ARGS(portnum, portsc)
+ TP_PROTO(struct xhci_port *port, u32 portsc),
+ TP_ARGS(port, portsc)
);
DECLARE_EVENT_CLASS(xhci_log_doorbell,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e1b1b64a0723..884b0898d9c9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -82,6 +82,29 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
}
/*
+ * xhci_handshake_check_state - same as xhci_handshake but takes an additional
+ * exit_state parameter, and bails out with an error immediately when xhc_state
+ * has exit_state flag set.
+ */
+int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec, unsigned int exit_state)
+{
+ u32 result;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ptr, result,
+ (result & mask) == done ||
+ result == U32_MAX ||
+ xhci->xhc_state & exit_state,
+ 1, usec);
+
+ if (result == U32_MAX || xhci->xhc_state & exit_state)
+ return -ENODEV;
+
+ return ret;
+}
+
+/*
* Disable interrupts and begin the xHCI halting process.
*/
void xhci_quiesce(struct xhci_hcd *xhci)
@@ -201,7 +224,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
- ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
+ ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
+ CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
if (ret)
return ret;
@@ -520,7 +544,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- temp_64 &= ~ERST_PTR_MASK;
+ temp_64 &= ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -968,6 +992,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
+ bool suspended_usb3_devs = false;
bool reinit_xhc = false;
if (!hcd->state)
@@ -1115,10 +1140,17 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
/*
* Resume roothubs only if there are pending events.
* USB 3 devices resend U3 LFPS wake after a 100ms delay if
- * the first wake signalling failed, give it that chance.
+ * the first wake signalling failed, give it that chance if
+ * there are suspended USB 3 devices.
*/
+ if (xhci->usb3_rhub.bus_state.suspended_ports ||
+ xhci->usb3_rhub.bus_state.bus_suspended)
+ suspended_usb3_devs = true;
+
pending_portevent = xhci_pending_portevent(xhci);
- if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
+
+ if (suspended_usb3_devs && !pending_portevent &&
+ msg.event == PM_EVENT_AUTO_RESUME) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3d0451f5880a..3ea5c092bba7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -525,7 +525,7 @@ struct xhci_intr_reg {
* a work queue (or delayed service routine)?
*/
#define ERST_EHB (1 << 3)
-#define ERST_PTR_MASK (0xf)
+#define ERST_PTR_MASK (GENMASK_ULL(63, 4))
/**
* struct xhci_run_regs
@@ -558,33 +558,6 @@ struct xhci_doorbell_array {
#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
#define DB_VALUE_HOST 0x00000000
-/**
- * struct xhci_protocol_caps
- * @revision: major revision, minor revision, capability ID,
- * and next capability pointer.
- * @name_string: Four ASCII characters to say which spec this xHC
- * follows, typically "USB ".
- * @port_info: Port offset, count, and protocol-defined information.
- */
-struct xhci_protocol_caps {
- u32 revision;
- u32 name_string;
- u32 port_info;
-};
-
-#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
-#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
-#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f)
-#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
-#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
-
-#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f)
-#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03)
-#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03)
-#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01)
-#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03)
-#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff)
-
#define PLT_MASK (0x03 << 6)
#define PLT_SYM (0x00 << 6)
#define PLT_ASYM_RX (0x02 << 6)
@@ -1545,6 +1518,7 @@ struct xhci_segment {
union xhci_trb *trbs;
/* private to HCD */
struct xhci_segment *next;
+ unsigned int num;
dma_addr_t dma;
/* Max packet sized bounce buffer for td-fragmant alignment */
dma_addr_t bounce_dma;
@@ -1669,12 +1643,8 @@ struct urb_priv {
struct xhci_td td[] __counted_by(num_tds);
};
-/*
- * Each segment table entry is 4*32bits long. 1K seems like an ok size:
- * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
- * meaning 64 ring segments.
- * Initial allocated size of the ERST, in number of entries */
-#define ERST_NUM_SEGS 1
+/* Reasonable limit for number of Event Ring segments (spec allows 32k) */
+#define ERST_MAX_SEGS 2
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
@@ -2078,13 +2048,8 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
-int xhci_alloc_erst(struct xhci_hcd *xhci,
- struct xhci_ring *evt_ring,
- struct xhci_erst *erst,
- gfp_t flags);
void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state);
-void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
@@ -2119,6 +2084,8 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
+int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec, unsigned int exit_state);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 99b15b77dfd5..c510af7baa0d 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -165,6 +165,19 @@ config APPLE_MFI_FASTCHARGE
It is safe to say M here.
+config USB_LJCA
+ tristate "Intel La Jolla Cove Adapter support"
+ select AUXILIARY_BUS
+ depends on USB && ACPI
+ help
+ This adds support for Intel La Jolla Cove USB-I2C/SPI/GPIO
+ Master Adapter (LJCA). Additional drivers such as I2C_LJCA,
+ GPIO_LJCA and SPI_LJCA must be enabled in order to use the
+ functionality of the device.
+
+ This driver can also be built as a module. If so, the module
+ will be called usb-ljca.
+
source "drivers/usb/misc/sisusbvga/Kconfig"
config USB_LD
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 1992cc284d8a..0bc732bcb162 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_EMI26) += emi26.o
obj-$(CONFIG_USB_EMI62) += emi62.o
obj-$(CONFIG_USB_EZUSB_FX2) += ezusb.o
obj-$(CONFIG_APPLE_MFI_FASTCHARGE) += apple-mfi-fastcharge.o
+obj-$(CONFIG_USB_LJCA) += usb-ljca.o
obj-$(CONFIG_USB_IDMOUSE) += idmouse.o
obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 57bbe1309094..a341b2fbb7b4 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -240,7 +240,6 @@ static void onboard_hub_attach_usb_driver(struct work_struct *work)
static int onboard_hub_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct onboard_hub *hub;
unsigned int i;
@@ -250,11 +249,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
- of_id = of_match_device(onboard_hub_match, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
- hub->pdata = of_id->data;
+ hub->pdata = device_get_match_data(&pdev->dev);
if (!hub->pdata)
return -EINVAL;
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 2a4ab5ac0ebe..c4e24a7b9290 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -57,6 +57,7 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
{ .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
{ .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
+ { .compatible = "usb5e3,626", .data = &genesys_gl852g_data, },
{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
new file mode 100644
index 000000000000..c9decd0396d4
--- /dev/null
+++ b/drivers/usb/misc/usb-ljca.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB driver
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/usb/ljca.h>
+
+#include <asm/unaligned.h>
+
+/* command flags */
+#define LJCA_ACK_FLAG BIT(0)
+#define LJCA_RESP_FLAG BIT(1)
+#define LJCA_CMPL_FLAG BIT(2)
+
+#define LJCA_MAX_PACKET_SIZE 64u
+#define LJCA_MAX_PAYLOAD_SIZE \
+ (LJCA_MAX_PACKET_SIZE - sizeof(struct ljca_msg))
+
+#define LJCA_WRITE_TIMEOUT_MS 200
+#define LJCA_WRITE_ACK_TIMEOUT_MS 500
+#define LJCA_ENUM_CLIENT_TIMEOUT_MS 20
+
+/* ljca client type */
+enum ljca_client_type {
+ LJCA_CLIENT_MNG = 1,
+ LJCA_CLIENT_GPIO = 3,
+ LJCA_CLIENT_I2C = 4,
+ LJCA_CLIENT_SPI = 5,
+};
+
+/* MNG client commands */
+enum ljca_mng_cmd {
+ LJCA_MNG_RESET = 2,
+ LJCA_MNG_ENUM_GPIO = 4,
+ LJCA_MNG_ENUM_I2C = 5,
+ LJCA_MNG_ENUM_SPI = 8,
+};
+
+/* ljca client acpi _ADR */
+enum ljca_client_acpi_adr {
+ LJCA_GPIO_ACPI_ADR,
+ LJCA_I2C1_ACPI_ADR,
+ LJCA_I2C2_ACPI_ADR,
+ LJCA_SPI1_ACPI_ADR,
+ LJCA_SPI2_ACPI_ADR,
+ LJCA_CLIENT_ACPI_ADR_MAX,
+};
+
+/* ljca cmd message structure */
+struct ljca_msg {
+ u8 type;
+ u8 cmd;
+ u8 flags;
+ u8 len;
+ u8 data[] __counted_by(len);
+} __packed;
+
+struct ljca_i2c_ctr_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+} __packed;
+
+struct ljca_i2c_descriptor {
+ u8 num;
+ struct ljca_i2c_ctr_info info[] __counted_by(num);
+} __packed;
+
+struct ljca_spi_ctr_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+} __packed;
+
+struct ljca_spi_descriptor {
+ u8 num;
+ struct ljca_spi_ctr_info info[] __counted_by(num);
+} __packed;
+
+struct ljca_bank_descriptor {
+ u8 bank_id;
+ u8 pin_num;
+
+ /* 1 bit for each gpio, 1 means valid */
+ __le32 valid_pins;
+} __packed;
+
+struct ljca_gpio_descriptor {
+ u8 pins_per_bank;
+ u8 bank_num;
+ struct ljca_bank_descriptor bank_desc[] __counted_by(bank_num);
+} __packed;
+
+/**
+ * struct ljca_adapter - represent a ljca adapter
+ *
+ * @intf: the usb interface for this ljca adapter
+ * @usb_dev: the usb device for this ljca adapter
+ * @dev: the specific device info of the usb interface
+ * @rx_pipe: bulk in pipe for receive data from firmware
+ * @tx_pipe: bulk out pipe for send data to firmware
+ * @rx_urb: urb used for the bulk in pipe
+ * @rx_buf: buffer used to receive command response and event
+ * @rx_len: length of rx buffer
+ * @ex_buf: external buffer to save command response
+ * @ex_buf_len: length of external buffer
+ * @actual_length: actual length of data copied to external buffer
+ * @tx_buf: buffer used to download command to firmware
+ * @tx_buf_len: length of tx buffer
+ * @lock: spinlock to protect tx_buf and ex_buf
+ * @cmd_completion: completion object as the command receives ack
+ * @mutex: mutex to avoid command download concurrently
+ * @client_list: client device list
+ * @disconnect: usb disconnect ongoing or not
+ * @reset_id: used to reset firmware
+ */
+struct ljca_adapter {
+ struct usb_interface *intf;
+ struct usb_device *usb_dev;
+ struct device *dev;
+
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
+
+ struct urb *rx_urb;
+ void *rx_buf;
+ unsigned int rx_len;
+
+ u8 *ex_buf;
+ u8 ex_buf_len;
+ u8 actual_length;
+
+ void *tx_buf;
+ u8 tx_buf_len;
+
+ spinlock_t lock;
+
+ struct completion cmd_completion;
+ struct mutex mutex;
+
+ struct list_head client_list;
+
+ bool disconnect;
+
+ u32 reset_id;
+};
+
+struct ljca_match_ids_walk_data {
+ const struct acpi_device_id *ids;
+ const char *uid;
+ struct acpi_device *adev;
+};
+
+static const struct acpi_device_id ljca_gpio_hids[] = {
+ { "INTC1074" },
+ { "INTC1096" },
+ { "INTC100B" },
+ { "INTC10D1" },
+ {},
+};
+
+static const struct acpi_device_id ljca_i2c_hids[] = {
+ { "INTC1075" },
+ { "INTC1097" },
+ { "INTC100C" },
+ { "INTC10D2" },
+ {},
+};
+
+static const struct acpi_device_id ljca_spi_hids[] = {
+ { "INTC1091" },
+ { "INTC1098" },
+ { "INTC100D" },
+ { "INTC10D3" },
+ {},
+};
+
+static void ljca_handle_event(struct ljca_adapter *adap,
+ struct ljca_msg *header)
+{
+ struct ljca_client *client;
+
+ list_for_each_entry(client, &adap->client_list, link) {
+ /*
+ * Currently only GPIO register event callback, but
+ * firmware message structure should include id when
+ * multiple same type clients register event callback.
+ */
+ if (client->type == header->type) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->event_cb_lock, flags);
+ client->event_cb(client->context, header->cmd,
+ header->data, header->len);
+ spin_unlock_irqrestore(&client->event_cb_lock, flags);
+
+ break;
+ }
+ }
+}
+
+/* process command ack and received data if available */
+static void ljca_handle_cmd_ack(struct ljca_adapter *adap, struct ljca_msg *header)
+{
+ struct ljca_msg *tx_header = adap->tx_buf;
+ u8 ibuf_len, actual_len = 0;
+ unsigned long flags;
+ u8 *ibuf;
+
+ spin_lock_irqsave(&adap->lock, flags);
+
+ if (tx_header->type != header->type || tx_header->cmd != header->cmd) {
+ spin_unlock_irqrestore(&adap->lock, flags);
+ dev_err(adap->dev, "cmd ack mismatch error\n");
+ return;
+ }
+
+ ibuf_len = adap->ex_buf_len;
+ ibuf = adap->ex_buf;
+
+ if (ibuf && ibuf_len) {
+ actual_len = min(header->len, ibuf_len);
+
+ /* copy received data to external buffer */
+ memcpy(ibuf, header->data, actual_len);
+ }
+ /* update copied data length */
+ adap->actual_length = actual_len;
+
+ spin_unlock_irqrestore(&adap->lock, flags);
+
+ complete(&adap->cmd_completion);
+}
+
+static void ljca_recv(struct urb *urb)
+{
+ struct ljca_msg *header = urb->transfer_buffer;
+ struct ljca_adapter *adap = urb->context;
+ int ret;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ENOENT:
+ /*
+ * directly complete the possible ongoing transfer
+ * during disconnect
+ */
+ if (adap->disconnect)
+ complete(&adap->cmd_completion);
+ return;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPIPE:
+ /* rx urb is terminated */
+ dev_dbg(adap->dev, "rx urb terminated with status: %d\n",
+ urb->status);
+ return;
+ default:
+ dev_dbg(adap->dev, "rx urb error: %d\n", urb->status);
+ goto resubmit;
+ }
+
+ if (header->len + sizeof(*header) != urb->actual_length)
+ goto resubmit;
+
+ if (header->flags & LJCA_ACK_FLAG)
+ ljca_handle_cmd_ack(adap, header);
+ else
+ ljca_handle_event(adap, header);
+
+resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret && ret != -EPERM)
+ dev_err(adap->dev, "resubmit rx urb error %d\n", ret);
+}
+
+static int ljca_send(struct ljca_adapter *adap, u8 type, u8 cmd,
+ const u8 *obuf, u8 obuf_len, u8 *ibuf, u8 ibuf_len,
+ bool ack, unsigned long timeout)
+{
+ unsigned int msg_len = sizeof(struct ljca_msg) + obuf_len;
+ struct ljca_msg *header = adap->tx_buf;
+ unsigned int transferred;
+ unsigned long flags;
+ int ret;
+
+ if (adap->disconnect)
+ return -ENODEV;
+
+ if (msg_len > adap->tx_buf_len)
+ return -EINVAL;
+
+ mutex_lock(&adap->mutex);
+
+ spin_lock_irqsave(&adap->lock, flags);
+
+ header->type = type;
+ header->cmd = cmd;
+ header->len = obuf_len;
+ if (obuf)
+ memcpy(header->data, obuf, obuf_len);
+
+ header->flags = LJCA_CMPL_FLAG | (ack ? LJCA_ACK_FLAG : 0);
+
+ adap->ex_buf = ibuf;
+ adap->ex_buf_len = ibuf_len;
+ adap->actual_length = 0;
+
+ spin_unlock_irqrestore(&adap->lock, flags);
+
+ reinit_completion(&adap->cmd_completion);
+
+ ret = usb_autopm_get_interface(adap->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = usb_bulk_msg(adap->usb_dev, adap->tx_pipe, header,
+ msg_len, &transferred, LJCA_WRITE_TIMEOUT_MS);
+
+ usb_autopm_put_interface(adap->intf);
+
+ if (ret < 0)
+ goto out;
+ if (transferred != msg_len) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ack) {
+ ret = wait_for_completion_timeout(&adap->cmd_completion,
+ timeout);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ }
+ ret = adap->actual_length;
+
+out:
+ spin_lock_irqsave(&adap->lock, flags);
+ adap->ex_buf = NULL;
+ adap->ex_buf_len = 0;
+
+ memset(header, 0, sizeof(*header));
+ spin_unlock_irqrestore(&adap->lock, flags);
+
+ mutex_unlock(&adap->mutex);
+
+ return ret;
+}
+
+int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len, u8 *ibuf, u8 ibuf_len)
+{
+ return ljca_send(client->adapter, client->type, cmd,
+ obuf, obuf_len, ibuf, ibuf_len, true,
+ LJCA_WRITE_ACK_TIMEOUT_MS);
+}
+EXPORT_SYMBOL_NS_GPL(ljca_transfer, LJCA);
+
+int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len)
+{
+ return ljca_send(client->adapter, client->type, cmd, obuf,
+ obuf_len, NULL, 0, false, LJCA_WRITE_ACK_TIMEOUT_MS);
+}
+EXPORT_SYMBOL_NS_GPL(ljca_transfer_noack, LJCA);
+
+int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb,
+ void *context)
+{
+ unsigned long flags;
+
+ if (!event_cb)
+ return -EINVAL;
+
+ spin_lock_irqsave(&client->event_cb_lock, flags);
+
+ if (client->event_cb) {
+ spin_unlock_irqrestore(&client->event_cb_lock, flags);
+ return -EALREADY;
+ }
+
+ client->event_cb = event_cb;
+ client->context = context;
+
+ spin_unlock_irqrestore(&client->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ljca_register_event_cb, LJCA);
+
+void ljca_unregister_event_cb(struct ljca_client *client)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->event_cb_lock, flags);
+
+ client->event_cb = NULL;
+ client->context = NULL;
+
+ spin_unlock_irqrestore(&client->event_cb_lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(ljca_unregister_event_cb, LJCA);
+
+static int ljca_match_device_ids(struct acpi_device *adev, void *data)
+{
+ struct ljca_match_ids_walk_data *wd = data;
+ const char *uid = acpi_device_uid(adev);
+
+ if (acpi_match_device_ids(adev, wd->ids))
+ return 0;
+
+ if (!wd->uid)
+ goto match;
+
+ if (!uid)
+ /*
+ * Some DSDTs have only one ACPI companion for the two I2C
+ * controllers and they don't set a UID at all (e.g. Dell
+ * Latitude 9420). On these platforms only the first I2C
+ * controller is used, so if a HID match has no UID we use
+ * "0" as the UID and assign ACPI companion to the first
+ * I2C controller.
+ */
+ uid = "0";
+ else
+ uid = strchr(uid, wd->uid[0]);
+
+ if (!uid || strcmp(uid, wd->uid))
+ return 0;
+
+match:
+ wd->adev = adev;
+
+ return 1;
+}
+
+/* bind auxiliary device to acpi device */
+static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap,
+ struct auxiliary_device *auxdev,
+ u64 adr, u8 id)
+{
+ struct ljca_match_ids_walk_data wd = { 0 };
+ struct acpi_device *parent, *adev;
+ struct device *dev = adap->dev;
+ char uid[4];
+
+ parent = ACPI_COMPANION(dev);
+ if (!parent)
+ return;
+
+ /*
+ * get auxdev ACPI handle from the ACPI device directly
+ * under the parent that matches _ADR.
+ */
+ adev = acpi_find_child_device(parent, adr, false);
+ if (adev) {
+ ACPI_COMPANION_SET(&auxdev->dev, adev);
+ return;
+ }
+
+ /*
+ * _ADR is a grey area in the ACPI specification, some
+ * platforms use _HID to distinguish children devices.
+ */
+ switch (adr) {
+ case LJCA_GPIO_ACPI_ADR:
+ wd.ids = ljca_gpio_hids;
+ break;
+ case LJCA_I2C1_ACPI_ADR:
+ case LJCA_I2C2_ACPI_ADR:
+ snprintf(uid, sizeof(uid), "%d", id);
+ wd.uid = uid;
+ wd.ids = ljca_i2c_hids;
+ break;
+ case LJCA_SPI1_ACPI_ADR:
+ case LJCA_SPI2_ACPI_ADR:
+ wd.ids = ljca_spi_hids;
+ break;
+ default:
+ dev_warn(dev, "unsupported _ADR\n");
+ return;
+ }
+
+ acpi_dev_for_each_child(parent, ljca_match_device_ids, &wd);
+ if (wd.adev) {
+ ACPI_COMPANION_SET(&auxdev->dev, wd.adev);
+ return;
+ }
+
+ parent = ACPI_COMPANION(dev->parent->parent);
+ if (!parent)
+ return;
+
+ acpi_dev_for_each_child(parent, ljca_match_device_ids, &wd);
+ if (wd.adev)
+ ACPI_COMPANION_SET(&auxdev->dev, wd.adev);
+}
+
+static void ljca_auxdev_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ kfree(auxdev->dev.platform_data);
+}
+
+static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
+ char *name, void *data, u64 adr)
+{
+ struct auxiliary_device *auxdev;
+ struct ljca_client *client;
+ int ret;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->type = type;
+ client->id = id;
+ client->adapter = adap;
+ spin_lock_init(&client->event_cb_lock);
+
+ auxdev = &client->auxdev;
+ auxdev->name = name;
+ auxdev->id = id;
+
+ auxdev->dev.parent = adap->dev;
+ auxdev->dev.platform_data = data;
+ auxdev->dev.release = ljca_auxdev_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret)
+ goto err_free;
+
+ ljca_auxdev_acpi_bind(adap, auxdev, adr, id);
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret)
+ goto err_uninit;
+
+ list_add_tail(&client->link, &adap->client_list);
+
+ return 0;
+
+err_uninit:
+ auxiliary_device_uninit(auxdev);
+
+err_free:
+ kfree(client);
+
+ return ret;
+}
+
+static int ljca_enumerate_gpio(struct ljca_adapter *adap)
+{
+ u32 valid_pin[LJCA_MAX_GPIO_NUM / BITS_PER_TYPE(u32)];
+ struct ljca_gpio_descriptor *desc;
+ struct ljca_gpio_info *gpio_info;
+ u8 buf[LJCA_MAX_PAYLOAD_SIZE];
+ int ret, gpio_num;
+ unsigned int i;
+
+ ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_GPIO, NULL, 0, buf,
+ sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
+
+ /* check firmware response */
+ desc = (struct ljca_gpio_descriptor *)buf;
+ if (ret != struct_size(desc, bank_desc, desc->bank_num))
+ return -EINVAL;
+
+ gpio_num = desc->pins_per_bank * desc->bank_num;
+ if (gpio_num > LJCA_MAX_GPIO_NUM)
+ return -EINVAL;
+
+ /* construct platform data */
+ gpio_info = kzalloc(sizeof *gpio_info, GFP_KERNEL);
+ if (!gpio_info)
+ return -ENOMEM;
+ gpio_info->num = gpio_num;
+
+ for (i = 0; i < desc->bank_num; i++)
+ valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins);
+ bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num);
+
+ ret = ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
+ gpio_info, LJCA_GPIO_ACPI_ADR);
+ if (ret)
+ kfree(gpio_info);
+
+ return ret;
+}
+
+static int ljca_enumerate_i2c(struct ljca_adapter *adap)
+{
+ struct ljca_i2c_descriptor *desc;
+ struct ljca_i2c_info *i2c_info;
+ u8 buf[LJCA_MAX_PAYLOAD_SIZE];
+ unsigned int i;
+ int ret;
+
+ ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_I2C, NULL, 0, buf,
+ sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
+
+ /* check firmware response */
+ desc = (struct ljca_i2c_descriptor *)buf;
+ if (ret != struct_size(desc, info, desc->num))
+ return -EINVAL;
+
+ for (i = 0; i < desc->num; i++) {
+ /* construct platform data */
+ i2c_info = kzalloc(sizeof *i2c_info, GFP_KERNEL);
+ if (!i2c_info)
+ return -ENOMEM;
+
+ i2c_info->id = desc->info[i].id;
+ i2c_info->capacity = desc->info[i].capacity;
+ i2c_info->intr_pin = desc->info[i].intr_pin;
+
+ ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i,
+ "ljca-i2c", i2c_info,
+ LJCA_I2C1_ACPI_ADR + i);
+ if (ret) {
+ kfree(i2c_info);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ljca_enumerate_spi(struct ljca_adapter *adap)
+{
+ struct ljca_spi_descriptor *desc;
+ struct ljca_spi_info *spi_info;
+ u8 buf[LJCA_MAX_PAYLOAD_SIZE];
+ unsigned int i;
+ int ret;
+
+ ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_SPI, NULL, 0, buf,
+ sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
+
+ /* check firmware response */
+ desc = (struct ljca_spi_descriptor *)buf;
+ if (ret != struct_size(desc, info, desc->num))
+ return -EINVAL;
+
+ for (i = 0; i < desc->num; i++) {
+ /* construct platform data */
+ spi_info = kzalloc(sizeof *spi_info, GFP_KERNEL);
+ if (!spi_info)
+ return -ENOMEM;
+
+ spi_info->id = desc->info[i].id;
+ spi_info->capacity = desc->info[i].capacity;
+
+ ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i,
+ "ljca-spi", spi_info,
+ LJCA_SPI1_ACPI_ADR + i);
+ if (ret) {
+ kfree(spi_info);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ljca_reset_handshake(struct ljca_adapter *adap)
+{
+ __le32 reset_id = cpu_to_le32(adap->reset_id);
+ __le32 reset_id_ret = 0;
+ int ret;
+
+ adap->reset_id++;
+
+ ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_RESET, (u8 *)&reset_id,
+ sizeof(__le32), (u8 *)&reset_id_ret, sizeof(__le32),
+ true, LJCA_WRITE_ACK_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
+
+ if (reset_id_ret != reset_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ljca_enumerate_clients(struct ljca_adapter *adap)
+{
+ struct ljca_client *client, *next;
+ int ret;
+
+ ret = ljca_reset_handshake(adap);
+ if (ret)
+ goto err_kill;
+
+ ret = ljca_enumerate_gpio(adap);
+ if (ret) {
+ dev_err(adap->dev, "enumerate GPIO error\n");
+ goto err_kill;
+ }
+
+ ret = ljca_enumerate_i2c(adap);
+ if (ret) {
+ dev_err(adap->dev, "enumerate I2C error\n");
+ goto err_kill;
+ }
+
+ ret = ljca_enumerate_spi(adap);
+ if (ret) {
+ dev_err(adap->dev, "enumerate SPI error\n");
+ goto err_kill;
+ }
+
+ return 0;
+
+err_kill:
+ adap->disconnect = true;
+
+ usb_kill_urb(adap->rx_urb);
+
+ list_for_each_entry_safe_reverse(client, next, &adap->client_list, link) {
+ auxiliary_device_delete(&client->auxdev);
+ auxiliary_device_uninit(&client->auxdev);
+
+ list_del_init(&client->link);
+ kfree(client);
+ }
+
+ return ret;
+}
+
+static int ljca_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct usb_host_interface *alt = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct device *dev = &interface->dev;
+ struct ljca_adapter *adap;
+ int ret;
+
+ adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ /* separate tx buffer allocation for alignment */
+ adap->tx_buf = devm_kzalloc(dev, LJCA_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!adap->tx_buf)
+ return -ENOMEM;
+ adap->tx_buf_len = LJCA_MAX_PACKET_SIZE;
+
+ mutex_init(&adap->mutex);
+ spin_lock_init(&adap->lock);
+ init_completion(&adap->cmd_completion);
+ INIT_LIST_HEAD(&adap->client_list);
+
+ adap->intf = usb_get_intf(interface);
+ adap->usb_dev = usb_dev;
+ adap->dev = dev;
+
+ /*
+ * find the first bulk in and out endpoints.
+ * ignore any others.
+ */
+ ret = usb_find_common_endpoints(alt, &ep_in, &ep_out, NULL, NULL);
+ if (ret) {
+ dev_err(dev, "bulk endpoints not found\n");
+ goto err_put;
+ }
+ adap->rx_pipe = usb_rcvbulkpipe(usb_dev, usb_endpoint_num(ep_in));
+ adap->tx_pipe = usb_sndbulkpipe(usb_dev, usb_endpoint_num(ep_out));
+
+ /* setup rx buffer */
+ adap->rx_len = usb_endpoint_maxp(ep_in);
+ adap->rx_buf = devm_kzalloc(dev, adap->rx_len, GFP_KERNEL);
+ if (!adap->rx_buf) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ /* alloc rx urb */
+ adap->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!adap->rx_urb) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+ usb_fill_bulk_urb(adap->rx_urb, usb_dev, adap->rx_pipe,
+ adap->rx_buf, adap->rx_len, ljca_recv, adap);
+
+ usb_set_intfdata(interface, adap);
+
+ /* submit rx urb before enumerate clients */
+ ret = usb_submit_urb(adap->rx_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "submit rx urb failed: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ljca_enumerate_clients(adap);
+ if (ret)
+ goto err_free;
+
+ usb_enable_autosuspend(usb_dev);
+
+ return 0;
+
+err_free:
+ usb_free_urb(adap->rx_urb);
+
+err_put:
+ usb_put_intf(adap->intf);
+
+ mutex_destroy(&adap->mutex);
+
+ return ret;
+}
+
+static void ljca_disconnect(struct usb_interface *interface)
+{
+ struct ljca_adapter *adap = usb_get_intfdata(interface);
+ struct ljca_client *client, *next;
+
+ adap->disconnect = true;
+
+ usb_kill_urb(adap->rx_urb);
+
+ list_for_each_entry_safe_reverse(client, next, &adap->client_list, link) {
+ auxiliary_device_delete(&client->auxdev);
+ auxiliary_device_uninit(&client->auxdev);
+
+ list_del_init(&client->link);
+ kfree(client);
+ }
+
+ usb_free_urb(adap->rx_urb);
+
+ usb_put_intf(adap->intf);
+
+ mutex_destroy(&adap->mutex);
+}
+
+static int ljca_suspend(struct usb_interface *interface, pm_message_t message)
+{
+ struct ljca_adapter *adap = usb_get_intfdata(interface);
+
+ usb_kill_urb(adap->rx_urb);
+
+ return 0;
+}
+
+static int ljca_resume(struct usb_interface *interface)
+{
+ struct ljca_adapter *adap = usb_get_intfdata(interface);
+
+ return usb_submit_urb(adap->rx_urb, GFP_KERNEL);
+}
+
+static const struct usb_device_id ljca_table[] = {
+ { USB_DEVICE(0x8086, 0x0b63) },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(usb, ljca_table);
+
+static struct usb_driver ljca_driver = {
+ .name = "ljca",
+ .id_table = ljca_table,
+ .probe = ljca_probe,
+ .disconnect = ljca_disconnect,
+ .suspend = ljca_suspend,
+ .resume = ljca_resume,
+ .supports_autosuspend = 1,
+};
+module_usb_driver(ljca_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index ac0d75ac2d2f..caf65f8294db 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -705,7 +705,7 @@ static int is_good_config(struct usbtest_dev *tdev, int len)
{
struct usb_config_descriptor *config;
- if (len < sizeof(*config))
+ if (len < (int)sizeof(*config))
return 0;
config = (struct usb_config_descriptor *) tdev->buf;
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 9812d102a005..824904abe76f 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -81,15 +81,12 @@ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r)
static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb)
{
unsigned long flags;
- struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
- list_for_each (pos, &mbus->r_list) {
- r = list_entry(pos, struct mon_reader, r_link);
+ list_for_each_entry(r, &mbus->r_list, r_link)
r->rnf_submit(r->r_data, urb);
- }
spin_unlock_irqrestore(&mbus->lock, flags);
}
@@ -108,15 +105,12 @@ static void mon_submit(struct usb_bus *ubus, struct urb *urb)
static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error)
{
unsigned long flags;
- struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
- list_for_each (pos, &mbus->r_list) {
- r = list_entry(pos, struct mon_reader, r_link);
+ list_for_each_entry(r, &mbus->r_list, r_link)
r->rnf_error(r->r_data, urb, error);
- }
spin_unlock_irqrestore(&mbus->lock, flags);
}
@@ -135,15 +129,12 @@ static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status)
{
unsigned long flags;
- struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
- list_for_each (pos, &mbus->r_list) {
- r = list_entry(pos, struct mon_reader, r_link);
+ list_for_each_entry(r, &mbus->r_list, r_link)
r->rnf_complete(r->r_data, urb, status);
- }
spin_unlock_irqrestore(&mbus->lock, flags);
}
@@ -165,11 +156,9 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status)
static void mon_stop(struct mon_bus *mbus)
{
struct usb_bus *ubus;
- struct list_head *p;
if (mbus == &mon_bus0) {
- list_for_each (p, &mon_buses) {
- mbus = list_entry(p, struct mon_bus, bus_link);
+ list_for_each_entry(mbus, &mon_buses, bus_link) {
/*
* We do not change nreaders here, so rely on mon_lock.
*/
@@ -332,14 +321,12 @@ static void mon_bus0_init(void)
*/
struct mon_bus *mon_bus_lookup(unsigned int num)
{
- struct list_head *p;
struct mon_bus *mbus;
if (num == 0) {
return &mon_bus0;
}
- list_for_each (p, &mon_buses) {
- mbus = list_entry(p, struct mon_bus, bus_link);
+ list_for_each_entry(mbus, &mon_buses, bus_link) {
if (mbus->u_bus->busnum == num) {
return mbus;
}
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 6f264b129243..6858ed9fc3b2 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -451,7 +451,7 @@ comm_init_err:
return ret;
}
-static int mtu3_remove(struct platform_device *pdev)
+static void mtu3_remove(struct platform_device *pdev)
{
struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
@@ -469,8 +469,16 @@ static int mtu3_remove(struct platform_device *pdev)
ssusb_gadget_exit(ssusb);
ssusb_host_exit(ssusb);
break;
- default:
- return -EINVAL;
+ case USB_DR_MODE_UNKNOWN:
+ /*
+ * This cannot happen because with dr_mode ==
+ * USB_DR_MODE_UNKNOWN, .probe() doesn't succeed and so
+ * .remove() wouldn't be called at all. However (little
+ * surprising) the compiler isn't smart enough to see that, so
+ * we explicitly have this case item to not make the compiler
+ * wail about an unhandled enumeration value.
+ */
+ break;
}
ssusb_rscs_exit(ssusb);
@@ -478,8 +486,6 @@ static int mtu3_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
}
static int resume_ip_and_ports(struct ssusb_mtk *ssusb, pm_message_t msg)
@@ -615,7 +621,7 @@ MODULE_DEVICE_TABLE(of, mtu3_of_match);
static struct platform_driver mtu3_driver = {
.probe = mtu3_probe,
- .remove = mtu3_remove,
+ .remove_new = mtu3_remove,
.driver = {
.name = MTU3_DRIVER_NAME,
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 912e32b78ac6..8abf3a567e30 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 98b42dc04dee..9c7a8bbc0542 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -849,7 +849,7 @@ static int dsps_setup_optional_vbus_irq(struct platform_device *pdev,
error = devm_request_threaded_irq(glue->dev, glue->vbus_irq,
NULL, dsps_vbus_threaded_irq,
- IRQF_ONESHOT,
+ IRQF_SHARED,
"vbus", glue);
if (error) {
glue->vbus_irq = 0;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index d73282c0ec50..4d3b49e5b87a 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -54,12 +54,12 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
static int uas_use_uas_driver(struct usb_interface *intf,
const struct usb_device_id *id,
- unsigned long *flags_ret)
+ u64 *flags_ret)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- unsigned long flags = id->driver_info;
+ u64 flags = id->driver_info;
struct usb_host_interface *alt;
int r;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2583ee9815c5..696bb0b23599 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -37,7 +37,7 @@ struct uas_dev_info {
struct usb_anchor cmd_urbs;
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
- unsigned long flags;
+ u64 flags;
int qdepth, resetting;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
@@ -988,7 +988,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
- unsigned long dev_flags;
+ u64 dev_flags;
if (!uas_use_uas_driver(intf, id, &dev_flags))
return -ENODEV;
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 0547daf116a2..5df40759d77a 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 7b36a3334fb3..d1ad6a2509ab 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -110,17 +110,6 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
.useTransport = use_transport, \
}
-#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
- vendor_name, product_name, use_protocol, use_transport, \
- init_function, Flags) \
-{ \
- .vendorName = vendor_name, \
- .productName = product_name, \
- .useProtocol = use_protocol, \
- .useTransport = use_transport, \
- .initFunction = init_function, \
-}
-
static const struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -132,7 +121,6 @@ static const struct us_unusual_dev for_dynamic_ids =
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
-#undef UNUSUAL_VENDOR_INTF
#ifdef CONFIG_LOCKDEP
@@ -472,13 +460,13 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
-void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+void usb_stor_adjust_quirks(struct usb_device *udev, u64 *fflags)
{
char *p;
u16 vid = le16_to_cpu(udev->descriptor.idVendor);
u16 pid = le16_to_cpu(udev->descriptor.idProduct);
- unsigned f = 0;
- unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
+ u64 f = 0;
+ u64 mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
@@ -617,7 +605,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
us->fflags &= ~US_FL_GO_SLOW;
if (us->fflags)
- dev_info(pdev, "Quirks match for vid %04x pid %04x: %lx\n",
+ dev_info(pdev, "Quirks match for vid %04x pid %04x: %llx\n",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct),
us->fflags);
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index fd3f32670873..97c6196d639b 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -95,7 +95,7 @@ struct us_data {
struct usb_interface *pusb_intf; /* this interface */
const struct us_unusual_dev *unusual_dev;
/* device-filter entry */
- unsigned long fflags; /* fixed flags from filter */
+ u64 fflags; /* fixed flags from filter */
unsigned long dflags; /* dynamic atomic bitflags */
unsigned int send_bulk_pipe; /* cached pipe values */
unsigned int recv_bulk_pipe;
@@ -192,7 +192,7 @@ extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
extern void usb_stor_adjust_quirks(struct usb_device *dev,
- unsigned long *fflags);
+ u64 *fflags);
#define module_usb_stor_driver(__driver, __sht, __name) \
static int __init __driver##_init(void) \
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 529512827d8f..a26029e43dfd 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -19,27 +19,13 @@
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
- .driver_info = (flags) }
+ .driver_info = (kernel_ulong_t)(flags) }
#define COMPLIANT_DEV UNUSUAL_DEV
#define USUAL_DEV(useProto, useTrans) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
-/* Define the device is matched with Vendor ID and interface descriptors */
-#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
- vendorName, productName, useProtocol, useTransport, \
- initFunction, flags) \
-{ \
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
- | USB_DEVICE_ID_MATCH_VENDOR, \
- .idVendor = (id_vendor), \
- .bInterfaceClass = (cl), \
- .bInterfaceSubClass = (sc), \
- .bInterfaceProtocol = (pr), \
- .driver_info = (flags) \
-}
-
const struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -49,7 +35,6 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
-#undef UNUSUAL_VENDOR_INTF
/*
* The table of devices to ignore
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index aea9d2fd0e76..f81bec0c7b86 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -86,8 +86,11 @@ static int dp_altmode_notify(struct dp_altmode *dp)
static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
{
- u32 conf = DP_CONF_SIGNALING_DP; /* Only DP signaling supported */
u8 pin_assign = 0;
+ u32 conf;
+
+ /* DP Signalling */
+ conf = (dp->data.conf & DP_CONF_SIGNALLING_MASK) >> DP_CONF_SIGNALLING_SHIFT;
switch (con) {
case DP_STATUS_CON_DISABLED:
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 221604f933a4..b12a07edc71b 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1550,8 +1550,7 @@ static void anx7411_i2c_remove(struct i2c_client *client)
if (plat->workqueue)
destroy_workqueue(plat->workqueue);
- if (plat->spi_client)
- i2c_unregister_device(plat->spi_client);
+ i2c_unregister_device(plat->spi_client);
if (plat->typec.role_sw)
usb_role_switch_put(plat->typec.role_sw);
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 9c1dbf3c00e0..2e0451bd336e 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -13,6 +13,7 @@
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
+#include <linux/usb.h>
#include "bus.h"
#include "class.h"
@@ -681,6 +682,33 @@ const struct device_type typec_partner_dev_type = {
.release = typec_partner_release,
};
+static void typec_partner_link_device(struct typec_partner *partner, struct device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &partner->dev.kobj, "typec");
+ if (ret)
+ return;
+
+ ret = sysfs_create_link(&partner->dev.kobj, &dev->kobj, dev_name(dev));
+ if (ret) {
+ sysfs_remove_link(&dev->kobj, "typec");
+ return;
+ }
+
+ if (partner->attach)
+ partner->attach(partner, dev);
+}
+
+static void typec_partner_unlink_device(struct typec_partner *partner, struct device *dev)
+{
+ sysfs_remove_link(&partner->dev.kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "typec");
+
+ if (partner->deattach)
+ partner->deattach(partner, dev);
+}
+
/**
* typec_partner_set_identity - Report result from Discover Identity command
* @partner: The partner updated identity values
@@ -865,6 +893,8 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
partner->num_altmodes = -1;
partner->pd_revision = desc->pd_revision;
partner->svdm_version = port->cap->svdm_version;
+ partner->attach = desc->attach;
+ partner->deattach = desc->deattach;
if (desc->identity) {
/*
@@ -887,6 +917,11 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
return ERR_PTR(ret);
}
+ if (port->usb2_dev)
+ typec_partner_link_device(partner, port->usb2_dev);
+ if (port->usb3_dev)
+ typec_partner_link_device(partner, port->usb3_dev);
+
return partner;
}
EXPORT_SYMBOL_GPL(typec_register_partner);
@@ -899,8 +934,19 @@ EXPORT_SYMBOL_GPL(typec_register_partner);
*/
void typec_unregister_partner(struct typec_partner *partner)
{
- if (!IS_ERR_OR_NULL(partner))
- device_unregister(&partner->dev);
+ struct typec_port *port;
+
+ if (IS_ERR_OR_NULL(partner))
+ return;
+
+ port = to_typec_port(partner->dev.parent);
+
+ if (port->usb2_dev)
+ typec_partner_unlink_device(partner, port->usb2_dev);
+ if (port->usb3_dev)
+ typec_partner_unlink_device(partner, port->usb3_dev);
+
+ device_unregister(&partner->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_partner);
@@ -1775,6 +1821,50 @@ static int partner_match(struct device *dev, void *data)
return is_typec_partner(dev);
}
+static struct typec_partner *typec_get_partner(struct typec_port *port)
+{
+ struct device *dev;
+
+ dev = device_find_child(&port->dev, NULL, partner_match);
+ if (!dev)
+ return NULL;
+
+ return to_typec_partner(dev);
+}
+
+static void typec_partner_attach(struct typec_connector *con, struct device *dev)
+{
+ struct typec_port *port = container_of(con, struct typec_port, con);
+ struct typec_partner *partner = typec_get_partner(port);
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (udev->speed < USB_SPEED_SUPER)
+ port->usb2_dev = dev;
+ else
+ port->usb3_dev = dev;
+
+ if (partner) {
+ typec_partner_link_device(partner, dev);
+ put_device(&partner->dev);
+ }
+}
+
+static void typec_partner_deattach(struct typec_connector *con, struct device *dev)
+{
+ struct typec_port *port = container_of(con, struct typec_port, con);
+ struct typec_partner *partner = typec_get_partner(port);
+
+ if (partner) {
+ typec_partner_unlink_device(partner, dev);
+ put_device(&partner->dev);
+ }
+
+ if (port->usb2_dev == dev)
+ port->usb2_dev = NULL;
+ else if (port->usb3_dev == dev)
+ port->usb3_dev = NULL;
+}
+
/**
* typec_set_data_role - Report data role change
* @port: The USB Type-C Port where the role was changed
@@ -1784,7 +1874,7 @@ static int partner_match(struct device *dev, void *data)
*/
void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
{
- struct device *partner_dev;
+ struct typec_partner *partner;
if (port->data_role == role)
return;
@@ -1793,14 +1883,14 @@ void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
sysfs_notify(&port->dev.kobj, NULL, "data_role");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
- partner_dev = device_find_child(&port->dev, NULL, partner_match);
- if (!partner_dev)
+ partner = typec_get_partner(port);
+ if (!partner)
return;
- if (to_typec_partner(partner_dev)->identity)
- typec_product_type_notify(partner_dev);
+ if (partner->identity)
+ typec_product_type_notify(&partner->dev);
- put_device(partner_dev);
+ put_device(&partner->dev);
}
EXPORT_SYMBOL_GPL(typec_set_data_role);
@@ -2251,6 +2341,8 @@ struct typec_port *typec_register_port(struct device *parent,
port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
+ port->con.attach = typec_partner_attach;
+ port->con.deattach = typec_partner_deattach;
device_initialize(&port->dev);
port->dev.class = &typec_class;
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index 673b2952b074..c36761ba3f59 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -8,6 +8,7 @@
struct typec_mux;
struct typec_switch;
+struct usb_device;
struct typec_plug {
struct device dev;
@@ -35,6 +36,9 @@ struct typec_partner {
enum usb_pd_svdm_ver svdm_version;
struct usb_power_delivery *pd;
+
+ void (*attach)(struct typec_partner *partner, struct device *dev);
+ void (*deattach)(struct typec_partner *partner, struct device *dev);
};
struct typec_port {
@@ -59,6 +63,18 @@ struct typec_port {
const struct typec_capability *cap;
const struct typec_operations *ops;
+
+ struct typec_connector con;
+
+ /*
+ * REVISIT: Only USB devices for now. If there are others, these need to
+ * be converted into a list.
+ *
+ * NOTE: These may be registered first before the typec_partner, so they
+ * will always have to be kept here instead of struct typec_partner.
+ */
+ struct device *usb2_dev;
+ struct device *usb3_dev;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 65da61150ba7..816b9bd08355 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -46,4 +46,14 @@ config TYPEC_MUX_NB7VPQ904M
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
redriver chip found on some devices with a Type-C port.
+config TYPEC_MUX_PTN36502
+ tristate "NXP PTN36502 Type-C redriver driver"
+ depends on I2C
+ depends on DRM || DRM=n
+ select DRM_PANEL_BRIDGE if DRM
+ select REGMAP_I2C
+ help
+ Say Y or M if your system has a NXP PTN36502 Type-C redriver chip
+ found on some devices with a Type-C port.
+
endmenu
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 76196096ef41..9d6a5557b0bd 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
+obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index e0ee1f621abb..cb7cdf90cb0a 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -60,6 +60,7 @@ struct fsa4480 {
unsigned int svid;
u8 cur_enable;
+ bool swap_sbu_lanes;
};
static const struct regmap_config fsa4480_regmap_config = {
@@ -76,6 +77,9 @@ static int fsa4480_set(struct fsa4480 *fsa)
u8 enable = FSA4480_ENABLE_DEVICE;
u8 sel = 0;
+ if (fsa->swap_sbu_lanes)
+ reverse = !reverse;
+
/* USB Mode */
if (fsa->mode < TYPEC_STATE_MODAL ||
(!fsa->svid && (fsa->mode == TYPEC_MODE_USB2 ||
@@ -179,12 +183,75 @@ static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *st
return ret;
}
+enum {
+ NORMAL_LANE_MAPPING,
+ INVERT_LANE_MAPPING,
+};
+
+#define DATA_LANES_COUNT 2
+
+static const int supported_data_lane_mapping[][DATA_LANES_COUNT] = {
+ [NORMAL_LANE_MAPPING] = { 0, 1 },
+ [INVERT_LANE_MAPPING] = { 1, 0 },
+};
+
+static int fsa4480_parse_data_lanes_mapping(struct fsa4480 *fsa)
+{
+ struct fwnode_handle *ep;
+ u32 data_lanes[DATA_LANES_COUNT];
+ int ret, i, j;
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(&fsa->client->dev), NULL);
+ if (!ep)
+ return 0;
+
+ ret = fwnode_property_read_u32_array(ep, "data-lanes", data_lanes, DATA_LANES_COUNT);
+ if (ret == -EINVAL)
+ /* Property isn't here, consider default mapping */
+ goto out_done;
+ if (ret) {
+ dev_err(&fsa->client->dev, "invalid data-lanes property: %d\n", ret);
+ goto out_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(supported_data_lane_mapping); i++) {
+ for (j = 0; j < DATA_LANES_COUNT; j++) {
+ if (data_lanes[j] != supported_data_lane_mapping[i][j])
+ break;
+ }
+
+ if (j == DATA_LANES_COUNT)
+ break;
+ }
+
+ switch (i) {
+ case NORMAL_LANE_MAPPING:
+ break;
+ case INVERT_LANE_MAPPING:
+ fsa->swap_sbu_lanes = true;
+ break;
+ default:
+ dev_err(&fsa->client->dev, "invalid data-lanes mapping\n");
+ ret = -EINVAL;
+ goto out_error;
+ }
+
+out_done:
+ ret = 0;
+
+out_error:
+ fwnode_handle_put(ep);
+
+ return ret;
+}
+
static int fsa4480_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
struct fsa4480 *fsa;
+ int ret;
fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL);
if (!fsa)
@@ -193,6 +260,10 @@ static int fsa4480_probe(struct i2c_client *client)
fsa->client = client;
mutex_init(&fsa->lock);
+ ret = fsa4480_parse_data_lanes_mapping(fsa);
+ if (ret)
+ return ret;
+
fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config);
if (IS_ERR(fsa->regmap))
return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 60ed1f809130..56989a0d0f43 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -191,6 +191,12 @@ static int hsl_orientation(struct pmc_usb_port *port)
return port->orientation - 1;
}
+static bool is_pmc_mux_tbt(struct acpi_device *adev)
+{
+ return acpi_dev_hid_uid_match(adev, "INTC1072", NULL) ||
+ acpi_dev_hid_uid_match(adev, "INTC1079", NULL);
+}
+
static int pmc_usb_send_command(struct intel_scu_ipc_dev *ipc, u8 *msg, u32 len)
{
u8 response[4];
@@ -293,6 +299,24 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
+ if (!is_pmc_mux_tbt(port->pmc->iom_adev)) {
+ u8 cable_speed = (data->conf & DP_CONF_SIGNALLING_MASK) >>
+ DP_CONF_SIGNALLING_SHIFT;
+
+ u8 cable_type = (data->conf & DP_CONF_CABLE_TYPE_MASK) >>
+ DP_CONF_CABLE_TYPE_SHIFT;
+
+ req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
+
+ if (cable_type == DP_CONF_CABLE_TYPE_OPTICAL)
+ req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE;
+ else if (cable_type == DP_CONF_CABLE_TYPE_RE_TIMER)
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE |
+ PMC_USB_ALTMODE_RETIMER_CABLE;
+ else if (cable_type == DP_CONF_CABLE_TYPE_RE_DRIVER)
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
+ }
+
ret = pmc_usb_command(port, (void *)&req, sizeof(req));
if (ret)
return ret;
@@ -599,6 +623,7 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
desc.driver_data = port;
desc.name = fwnode_get_name(fwnode);
desc.set = pmc_usb_set_role;
+ desc.allow_userspace_control = true;
port->usb_sw = usb_role_switch_register(pmc->dev, &desc);
if (IS_ERR(port->usb_sw)) {
diff --git a/drivers/usb/typec/mux/ptn36502.c b/drivers/usb/typec/mux/ptn36502.c
new file mode 100644
index 000000000000..72ae38a1b2be
--- /dev/null
+++ b/drivers/usb/typec/mux/ptn36502.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NXP PTN36502 Type-C driver
+ *
+ * Copyright (C) 2023 Luca Weiss <luca.weiss@fairphone.com>
+ *
+ * Based on NB7VPQ904M driver:
+ * Copyright (C) 2023 Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+
+#include <drm/drm_bridge.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+#define PTN36502_CHIP_ID_REG 0x00
+#define PTN36502_CHIP_ID 0x02
+
+#define PTN36502_CHIP_REVISION_REG 0x01
+#define PTN36502_CHIP_REVISION_BASE_MASK GENMASK(7, 4)
+#define PTN36502_CHIP_REVISION_METAL_MASK GENMASK(3, 0)
+
+#define PTN36502_DP_LINK_CTRL_REG 0x06
+#define PTN36502_DP_LINK_CTRL_LANES_MASK GENMASK(3, 2)
+#define PTN36502_DP_LINK_CTRL_LANES_2 (2)
+#define PTN36502_DP_LINK_CTRL_LANES_4 (3)
+#define PTN36502_DP_LINK_CTRL_LINK_RATE_MASK GENMASK(1, 0)
+#define PTN36502_DP_LINK_CTRL_LINK_RATE_5_4GBPS (2)
+
+/* Registers for lane 0 (0x07) to lane 3 (0x0a) have the same layout */
+#define PTN36502_DP_LANE_CTRL_REG(n) (0x07 + (n))
+#define PTN36502_DP_LANE_CTRL_RX_GAIN_MASK GENMASK(6, 4)
+#define PTN36502_DP_LANE_CTRL_RX_GAIN_3DB (2)
+#define PTN36502_DP_LANE_CTRL_TX_SWING_MASK GENMASK(3, 2)
+#define PTN36502_DP_LANE_CTRL_TX_SWING_800MVPPD (2)
+#define PTN36502_DP_LANE_CTRL_PRE_EMPHASIS_MASK GENMASK(1, 0)
+#define PTN36502_DP_LANE_CTRL_PRE_EMPHASIS_3_5DB (1)
+
+#define PTN36502_MODE_CTRL1_REG 0x0b
+#define PTN36502_MODE_CTRL1_PLUG_ORIENT_MASK GENMASK(5, 5)
+#define PTN36502_MODE_CTRL1_PLUG_ORIENT_REVERSE (1)
+#define PTN36502_MODE_CTRL1_AUX_CROSSBAR_MASK GENMASK(3, 3)
+#define PTN36502_MODE_CTRL1_AUX_CROSSBAR_SW_ON (1)
+#define PTN36502_MODE_CTRL1_MODE_MASK GENMASK(2, 0)
+#define PTN36502_MODE_CTRL1_MODE_OFF (0)
+#define PTN36502_MODE_CTRL1_MODE_USB_ONLY (1)
+#define PTN36502_MODE_CTRL1_MODE_USB_DP (2)
+#define PTN36502_MODE_CTRL1_MODE_DP (3)
+
+#define PTN36502_DEVICE_CTRL_REG 0x0d
+#define PTN36502_DEVICE_CTRL_AUX_MONITORING_MASK GENMASK(7, 7)
+#define PTN36502_DEVICE_CTRL_AUX_MONITORING_EN (1)
+
+struct ptn36502 {
+ struct i2c_client *client;
+ struct regulator *vdd18_supply;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_retimer *retimer;
+
+ struct typec_switch *typec_switch;
+
+ struct drm_bridge bridge;
+
+ struct mutex lock; /* protect non-concurrent retimer & switch */
+
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+};
+
+static int ptn36502_set(struct ptn36502 *ptn)
+{
+ bool reverse = (ptn->orientation == TYPEC_ORIENTATION_REVERSE);
+ unsigned int ctrl1_val = 0;
+ unsigned int lane_ctrl_val = 0;
+ unsigned int link_ctrl_val = 0;
+
+ switch (ptn->mode) {
+ case TYPEC_STATE_SAFE:
+ /* Deep power saving state */
+ regmap_write(ptn->regmap, PTN36502_MODE_CTRL1_REG,
+ FIELD_PREP(PTN36502_MODE_CTRL1_MODE_MASK,
+ PTN36502_MODE_CTRL1_MODE_OFF));
+ return 0;
+
+ case TYPEC_STATE_USB:
+ /*
+ * Normal Orientation (CC1)
+ * A -> USB RX
+ * B -> USB TX
+ * C -> X
+ * D -> X
+ * Flipped Orientation (CC2)
+ * A -> X
+ * B -> X
+ * C -> USB TX
+ * D -> USB RX
+ */
+
+ /* USB 3.1 Gen 1 only */
+ ctrl1_val = FIELD_PREP(PTN36502_MODE_CTRL1_MODE_MASK,
+ PTN36502_MODE_CTRL1_MODE_USB_ONLY);
+ if (reverse)
+ ctrl1_val |= FIELD_PREP(PTN36502_MODE_CTRL1_PLUG_ORIENT_MASK,
+ PTN36502_MODE_CTRL1_PLUG_ORIENT_REVERSE);
+
+ regmap_write(ptn->regmap, PTN36502_MODE_CTRL1_REG, ctrl1_val);
+ return 0;
+
+ default:
+ if (ptn->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ break;
+ }
+
+ /* DP Altmode Setup */
+
+ switch (ptn->mode) {
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ /*
+ * Normal Orientation (CC1)
+ * A -> DP3
+ * B -> DP2
+ * C -> DP1
+ * D -> DP0
+ * Flipped Orientation (CC2)
+ * A -> DP0
+ * B -> DP1
+ * C -> DP2
+ * D -> DP3
+ */
+
+ /* 4-lane DP */
+ ctrl1_val |= FIELD_PREP(PTN36502_MODE_CTRL1_MODE_MASK,
+ PTN36502_MODE_CTRL1_MODE_DP);
+ link_ctrl_val |= FIELD_PREP(PTN36502_DP_LINK_CTRL_LANES_MASK,
+ PTN36502_DP_LINK_CTRL_LANES_4);
+ break;
+
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F: /* State F is deprecated */
+ /*
+ * Normal Orientation (CC1)
+ * A -> USB RX
+ * B -> USB TX
+ * C -> DP1
+ * D -> DP0
+ * Flipped Orientation (CC2)
+ * A -> DP0
+ * B -> DP1
+ * C -> USB TX
+ * D -> USB RX
+ */
+
+ /* USB 3.1 Gen 1 and 2-lane DP */
+ ctrl1_val |= FIELD_PREP(PTN36502_MODE_CTRL1_MODE_MASK,
+ PTN36502_MODE_CTRL1_MODE_USB_DP);
+ link_ctrl_val |= FIELD_PREP(PTN36502_DP_LINK_CTRL_LANES_MASK,
+ PTN36502_DP_LINK_CTRL_LANES_2);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Enable AUX monitoring */
+ regmap_write(ptn->regmap, PTN36502_DEVICE_CTRL_REG,
+ FIELD_PREP(PTN36502_DEVICE_CTRL_AUX_MONITORING_MASK,
+ PTN36502_DEVICE_CTRL_AUX_MONITORING_EN));
+
+ /* Enable AUX switch path */
+ ctrl1_val |= FIELD_PREP(PTN36502_MODE_CTRL1_AUX_CROSSBAR_MASK,
+ PTN36502_MODE_CTRL1_AUX_CROSSBAR_SW_ON);
+ if (reverse)
+ ctrl1_val |= FIELD_PREP(PTN36502_MODE_CTRL1_PLUG_ORIENT_MASK,
+ PTN36502_MODE_CTRL1_PLUG_ORIENT_REVERSE);
+ regmap_write(ptn->regmap, PTN36502_MODE_CTRL1_REG, ctrl1_val);
+
+ /* DP Link rate: 5.4 Gbps (HBR2) */
+ link_ctrl_val |= FIELD_PREP(PTN36502_DP_LINK_CTRL_LINK_RATE_MASK,
+ PTN36502_DP_LINK_CTRL_LINK_RATE_5_4GBPS);
+ regmap_write(ptn->regmap, PTN36502_DP_LINK_CTRL_REG, link_ctrl_val);
+
+ /*
+ * For all lanes:
+ * - Rx equivalization gain: 3 dB
+ * - TX output swing control: 800 mVppd
+ * - Pre-emphasis control: 3.5 dB
+ */
+ lane_ctrl_val = FIELD_PREP(PTN36502_DP_LANE_CTRL_RX_GAIN_MASK,
+ PTN36502_DP_LANE_CTRL_RX_GAIN_3DB) |
+ FIELD_PREP(PTN36502_DP_LANE_CTRL_TX_SWING_MASK,
+ PTN36502_DP_LANE_CTRL_TX_SWING_800MVPPD) |
+ FIELD_PREP(PTN36502_DP_LANE_CTRL_PRE_EMPHASIS_MASK,
+ PTN36502_DP_LANE_CTRL_PRE_EMPHASIS_3_5DB);
+ regmap_write(ptn->regmap, PTN36502_DP_LANE_CTRL_REG(0), lane_ctrl_val);
+ regmap_write(ptn->regmap, PTN36502_DP_LANE_CTRL_REG(1), lane_ctrl_val);
+ regmap_write(ptn->regmap, PTN36502_DP_LANE_CTRL_REG(2), lane_ctrl_val);
+ regmap_write(ptn->regmap, PTN36502_DP_LANE_CTRL_REG(3), lane_ctrl_val);
+
+ return 0;
+}
+
+static int ptn36502_sw_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
+{
+ struct ptn36502 *ptn = typec_switch_get_drvdata(sw);
+ int ret;
+
+ ret = typec_switch_set(ptn->typec_switch, orientation);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ptn->lock);
+
+ if (ptn->orientation != orientation) {
+ ptn->orientation = orientation;
+
+ ret = ptn36502_set(ptn);
+ }
+
+ mutex_unlock(&ptn->lock);
+
+ return ret;
+}
+
+static int ptn36502_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ struct ptn36502 *ptn = typec_retimer_get_drvdata(retimer);
+ int ret = 0;
+
+ mutex_lock(&ptn->lock);
+
+ if (ptn->mode != state->mode) {
+ ptn->mode = state->mode;
+
+ if (state->alt)
+ ptn->svid = state->alt->svid;
+ else
+ ptn->svid = 0; // No SVID
+
+ ret = ptn36502_set(ptn);
+ }
+
+ mutex_unlock(&ptn->lock);
+
+ return ret;
+}
+
+static int ptn36502_detect(struct ptn36502 *ptn)
+{
+ struct device *dev = &ptn->client->dev;
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(ptn->regmap, PTN36502_CHIP_ID_REG,
+ &reg_val);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read chip ID\n");
+
+ if (reg_val != PTN36502_CHIP_ID)
+ return dev_err_probe(dev, -ENODEV, "Unexpected chip ID: %x\n", reg_val);
+
+ ret = regmap_read(ptn->regmap, PTN36502_CHIP_REVISION_REG,
+ &reg_val);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read chip revision\n");
+
+ dev_dbg(dev, "Chip revision: base layer version %lx, metal layer version %lx\n",
+ FIELD_GET(PTN36502_CHIP_REVISION_BASE_MASK, reg_val),
+ FIELD_GET(PTN36502_CHIP_REVISION_METAL_MASK, reg_val));
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+static int ptn36502_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ptn36502 *ptn = container_of(bridge, struct ptn36502, bridge);
+ struct drm_bridge *next_bridge;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ next_bridge = devm_drm_of_get_bridge(&ptn->client->dev, ptn->client->dev.of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ dev_err(&ptn->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
+ return PTR_ERR(next_bridge);
+ }
+
+ return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs ptn36502_bridge_funcs = {
+ .attach = ptn36502_bridge_attach,
+};
+
+static int ptn36502_register_bridge(struct ptn36502 *ptn)
+{
+ ptn->bridge.funcs = &ptn36502_bridge_funcs;
+ ptn->bridge.of_node = ptn->client->dev.of_node;
+
+ return devm_drm_bridge_add(&ptn->client->dev, &ptn->bridge);
+}
+#else
+static int ptn36502_register_bridge(struct ptn36502 *ptn)
+{
+ return 0;
+}
+#endif
+
+static const struct regmap_config ptn36502_regmap = {
+ .max_register = 0x0d,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ptn36502_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_retimer_desc retimer_desc = { };
+ struct ptn36502 *ptn;
+ int ret;
+
+ ptn = devm_kzalloc(dev, sizeof(*ptn), GFP_KERNEL);
+ if (!ptn)
+ return -ENOMEM;
+
+ ptn->client = client;
+
+ ptn->regmap = devm_regmap_init_i2c(client, &ptn36502_regmap);
+ if (IS_ERR(ptn->regmap)) {
+ dev_err(&client->dev, "Failed to allocate register map\n");
+ return PTR_ERR(ptn->regmap);
+ }
+
+ ptn->mode = TYPEC_STATE_SAFE;
+ ptn->orientation = TYPEC_ORIENTATION_NONE;
+
+ mutex_init(&ptn->lock);
+
+ ptn->vdd18_supply = devm_regulator_get_optional(dev, "vdd18");
+ if (IS_ERR(ptn->vdd18_supply))
+ return PTR_ERR(ptn->vdd18_supply);
+
+ ptn->typec_switch = fwnode_typec_switch_get(dev->fwnode);
+ if (IS_ERR(ptn->typec_switch))
+ return dev_err_probe(dev, PTR_ERR(ptn->typec_switch),
+ "Failed to acquire orientation-switch\n");
+
+ ret = regulator_enable(ptn->vdd18_supply);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable vdd18\n");
+
+ ret = ptn36502_detect(ptn);
+ if (ret)
+ goto err_disable_regulator;
+
+ ret = ptn36502_register_bridge(ptn);
+ if (ret)
+ goto err_disable_regulator;
+
+ sw_desc.drvdata = ptn;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = ptn36502_sw_set;
+
+ ptn->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(ptn->sw)) {
+ ret = dev_err_probe(dev, PTR_ERR(ptn->sw),
+ "Failed to register typec switch\n");
+ goto err_disable_regulator;
+ }
+
+ retimer_desc.drvdata = ptn;
+ retimer_desc.fwnode = dev->fwnode;
+ retimer_desc.set = ptn36502_retimer_set;
+
+ ptn->retimer = typec_retimer_register(dev, &retimer_desc);
+ if (IS_ERR(ptn->retimer)) {
+ ret = dev_err_probe(dev, PTR_ERR(ptn->retimer),
+ "Failed to register typec retimer\n");
+ goto err_switch_unregister;
+ }
+
+ return 0;
+
+err_switch_unregister:
+ typec_switch_unregister(ptn->sw);
+
+err_disable_regulator:
+ regulator_disable(ptn->vdd18_supply);
+
+ return ret;
+}
+
+static void ptn36502_remove(struct i2c_client *client)
+{
+ struct ptn36502 *ptn = i2c_get_clientdata(client);
+
+ typec_retimer_unregister(ptn->retimer);
+ typec_switch_unregister(ptn->sw);
+
+ regulator_disable(ptn->vdd18_supply);
+}
+
+static const struct i2c_device_id ptn36502_table[] = {
+ { "ptn36502" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ptn36502_table);
+
+static const struct of_device_id ptn36502_of_table[] = {
+ { .compatible = "nxp,ptn36502" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ptn36502_of_table);
+
+static struct i2c_driver ptn36502_driver = {
+ .driver = {
+ .name = "ptn36502",
+ .of_match_table = ptn36502_of_table,
+ },
+ .probe = ptn36502_probe,
+ .remove = ptn36502_remove,
+ .id_table = ptn36502_table,
+};
+module_i2c_driver(ptn36502_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca.weiss@fairphone.com>");
+MODULE_DESCRIPTION("NXP PTN36502 Type-C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index 8cc66e4467c4..85d015cdbe1f 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -83,14 +83,12 @@ unchunked_extended_messages_supported_show(struct device *dev,
}
static DEVICE_ATTR_RO(unchunked_extended_messages_supported);
-/*
- * REVISIT: Peak Current requires access also to the RDO.
static ssize_t
peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- ...
+ return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_PEAK_CURR_SHIFT) & 3);
}
-*/
+static DEVICE_ATTR_RO(peak_current);
static ssize_t
fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -135,7 +133,7 @@ static struct attribute *source_fixed_supply_attrs[] = {
&dev_attr_usb_communication_capable.attr,
&dev_attr_dual_role_data.attr,
&dev_attr_unchunked_extended_messages_supported.attr,
- /*&dev_attr_peak_current.attr,*/
+ &dev_attr_peak_current.attr,
&dev_attr_voltage.attr,
&maximum_current_attr.attr,
NULL
@@ -144,7 +142,7 @@ static struct attribute *source_fixed_supply_attrs[] = {
static umode_t fixed_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
if (to_pdo(kobj_to_dev(kobj))->object_position &&
- /*attr != &dev_attr_peak_current.attr &&*/
+ attr != &dev_attr_peak_current.attr &&
attr != &dev_attr_voltage.attr &&
attr != &maximum_current_attr.attr &&
attr != &operational_current_attr.attr)
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index a929e000d0e2..d42da5720a25 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -8,17 +8,22 @@
#include <linux/acpi.h>
#include <linux/component.h>
+#include <linux/usb.h>
#include "class.h"
static int typec_aggregate_bind(struct device *dev)
{
- return component_bind_all(dev, NULL);
+ struct typec_port *port = to_typec_port(dev);
+
+ return component_bind_all(dev, &port->con);
}
static void typec_aggregate_unbind(struct device *dev)
{
- component_unbind_all(dev, NULL);
+ struct typec_port *port = to_typec_port(dev);
+
+ component_unbind_all(dev, &port->con);
}
static const struct component_master_ops typec_aggregate_ops = {
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 17ebc5fb684f..67422d45eb54 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -51,13 +52,19 @@
/* 1b0 as fixed rx threshold of rd/rp 0.55V, 1b1 depends on RTCRTL4[0] */
#define BMCIO_RXDZEN BIT(0)
+struct rt1711h_chip_info {
+ u32 rxdz_sel;
+ u16 did;
+ bool enable_pd30_extended_message;
+};
+
struct rt1711h_chip {
struct tcpci_data data;
struct tcpci *tcpci;
struct device *dev;
struct regulator *vbus;
+ const struct rt1711h_chip_info *info;
bool src_en;
- u16 did;
};
static int rt1711h_read16(struct rt1711h_chip *chip, unsigned int reg, u16 *val)
@@ -105,7 +112,7 @@ static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
return ret;
/* Enable PD30 extended message for RT1715 */
- if (chip->did == RT1715_DID) {
+ if (chip->info->enable_pd30_extended_message) {
ret = regmap_update_bits(regmap, RT1711H_RTCTRL8,
RT1711H_ENEXTMSG, RT1711H_ENEXTMSG);
if (ret < 0)
@@ -200,10 +207,7 @@ static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
if ((cc1 >= TYPEC_CC_RP_1_5 && cc2 < TYPEC_CC_RP_DEF) ||
(cc2 >= TYPEC_CC_RP_1_5 && cc1 < TYPEC_CC_RP_DEF)) {
rxdz_en = BMCIO_RXDZEN;
- if (chip->did == RT1715_DID)
- rxdz_sel = RT1711H_BMCIO_RXDZSEL;
- else
- rxdz_sel = 0;
+ rxdz_sel = chip->info->rxdz_sel;
} else {
rxdz_en = 0;
rxdz_sel = RT1711H_BMCIO_RXDZSEL;
@@ -319,7 +323,7 @@ static int rt1711h_check_revision(struct i2c_client *i2c, struct rt1711h_chip *c
ret = i2c_smbus_read_word_data(i2c, TCPC_BCD_DEV);
if (ret < 0)
return ret;
- if (ret != chip->did) {
+ if (ret != chip->info->did) {
dev_err(&i2c->dev, "did is not correct, 0x%04x\n", ret);
return -ENODEV;
}
@@ -336,7 +340,7 @@ static int rt1711h_probe(struct i2c_client *client)
if (!chip)
return -ENOMEM;
- chip->did = (size_t)device_get_match_data(&client->dev);
+ chip->info = i2c_get_match_data(client);
ret = rt1711h_check_revision(client, chip);
if (ret < 0) {
@@ -391,26 +395,34 @@ static void rt1711h_remove(struct i2c_client *client)
tcpci_unregister_port(chip->tcpci);
}
+static const struct rt1711h_chip_info rt1711h = {
+ .did = RT1711H_DID,
+};
+
+static const struct rt1711h_chip_info rt1715 = {
+ .rxdz_sel = RT1711H_BMCIO_RXDZSEL,
+ .did = RT1715_DID,
+ .enable_pd30_extended_message = true,
+};
+
static const struct i2c_device_id rt1711h_id[] = {
- { "rt1711h", 0 },
- { "rt1715", 0 },
- { }
+ { "rt1711h", (kernel_ulong_t)&rt1711h },
+ { "rt1715", (kernel_ulong_t)&rt1715 },
+ {}
};
MODULE_DEVICE_TABLE(i2c, rt1711h_id);
-#ifdef CONFIG_OF
static const struct of_device_id rt1711h_of_match[] = {
- { .compatible = "richtek,rt1711h", .data = (void *)RT1711H_DID },
- { .compatible = "richtek,rt1715", .data = (void *)RT1715_DID },
- {},
+ { .compatible = "richtek,rt1711h", .data = &rt1711h },
+ { .compatible = "richtek,rt1715", .data = &rt1715 },
+ {}
};
MODULE_DEVICE_TABLE(of, rt1711h_of_match);
-#endif
static struct i2c_driver rt1711h_i2c_driver = {
.driver = {
.name = "rt1711h",
- .of_match_table = of_match_ptr(rt1711h_of_match),
+ .of_match_table = rt1711h_of_match,
},
.probe = rt1711h_probe,
.remove = rt1711h_remove,
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index d962f67c95ae..058d5b853b57 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -517,9 +517,9 @@ static const char * const pd_rev[] = {
((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
(cc) == TYPEC_CC_RP_3_0)
+/* As long as cc is pulled up, we can consider it as sink. */
#define tcpm_port_is_sink(port) \
- ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
- (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
+ (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
@@ -1625,6 +1625,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
+ if (IS_ERR_OR_NULL(port->partner))
+ break;
+
if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
@@ -3903,6 +3906,8 @@ static void run_state_machine(struct tcpm_port *port)
port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
port->state == SRC_UNATTACHED) ||
(port->enter_state == SNK_ATTACH_WAIT &&
+ port->state == SNK_UNATTACHED) ||
+ (port->enter_state == SNK_DEBOUNCED &&
port->state == SNK_UNATTACHED));
port->enter_state = port->state;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 37b56ce75f39..0e867f531d34 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -17,6 +17,7 @@
#include <linux/usb/typec_altmode.h>
#include <linux/usb/role.h>
#include <linux/workqueue.h>
+#include <linux/firmware.h>
#include "tps6598x.h"
#include "trace.h"
@@ -36,13 +37,33 @@
#define TPS_REG_STATUS 0x1a
#define TPS_REG_SYSTEM_CONF 0x28
#define TPS_REG_CTRL_CONF 0x29
+#define TPS_REG_BOOT_STATUS 0x2D
#define TPS_REG_POWER_STATUS 0x3f
+#define TPS_REG_PD_STATUS 0x40
#define TPS_REG_RX_IDENTITY_SOP 0x48
#define TPS_REG_DATA_STATUS 0x5f
+#define TPS_REG_SLEEP_CONF 0x70
/* TPS_REG_SYSTEM_CONF bits */
#define TPS_SYSCONF_PORTINFO(c) ((c) & 7)
+/*
+ * BPMs task timeout, recommended 5 seconds
+ * pg.48 TPS2575 Host Interface Technical Reference
+ * Manual (Rev. A)
+ * https://www.ti.com/lit/ug/slvuc05a/slvuc05a.pdf
+ */
+#define TPS_BUNDLE_TIMEOUT 0x32
+
+/* BPMs return code */
+#define TPS_TASK_BPMS_INVALID_BUNDLE_SIZE 0x4
+#define TPS_TASK_BPMS_INVALID_SLAVE_ADDR 0x5
+#define TPS_TASK_BPMS_INVALID_TIMEOUT 0x6
+
+/* PBMc data out */
+#define TPS_PBMC_RC 0 /* Return code */
+#define TPS_PBMC_DPCS 2 /* device patch complete status */
+
enum {
TPS_PORTINFO_SINK,
TPS_PORTINFO_SINK_ACCESSORY,
@@ -68,6 +89,7 @@ enum {
TPS_MODE_BOOT,
TPS_MODE_BIST,
TPS_MODE_DISC,
+ TPS_MODE_PTCH,
};
static const char *const modes[] = {
@@ -75,11 +97,22 @@ static const char *const modes[] = {
[TPS_MODE_BOOT] = "BOOT",
[TPS_MODE_BIST] = "BIST",
[TPS_MODE_DISC] = "DISC",
+ [TPS_MODE_PTCH] = "PTCH",
};
/* Unrecognized commands will be replaced with "!CMD" */
#define INVALID_CMD(_cmd_) (_cmd_ == 0x444d4321)
+struct tps6598x;
+
+struct tipd_data {
+ irq_handler_t irq_handler;
+ int (*register_port)(struct tps6598x *tps, struct fwnode_handle *node);
+ void (*trace_power_status)(u16 status);
+ void (*trace_status)(u32 status);
+ int (*apply_patch)(struct tps6598x *tps);
+};
+
struct tps6598x {
struct device *dev;
struct regmap *regmap;
@@ -97,9 +130,11 @@ struct tps6598x {
enum power_supply_usb_type usb_type;
int wakeup;
+ u32 status; /* status reg */
u16 pwr_status;
struct delayed_work wq_poll;
- irq_handler_t irq_handler;
+
+ const struct tipd_data *data;
};
static enum power_supply_property tps6598x_psy_props[] = {
@@ -180,6 +215,11 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
return tps6598x_block_read(tps, reg, val, sizeof(u64));
}
+static inline int tps6598x_write8(struct tps6598x *tps, u8 reg, u8 val)
+{
+ return tps6598x_block_write(tps, reg, &val, sizeof(u8));
+}
+
static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
{
return tps6598x_block_write(tps, reg, &val, sizeof(u64));
@@ -282,9 +322,10 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
power_supply_changed(tps->psy);
}
-static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
+static int tps6598x_exec_cmd_tmo(struct tps6598x *tps, const char *cmd,
size_t in_len, u8 *in_data,
- size_t out_len, u8 *out_data)
+ size_t out_len, u8 *out_data,
+ u32 cmd_timeout_ms, u32 res_delay_ms)
{
unsigned long timeout;
u32 val;
@@ -307,8 +348,7 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
if (ret < 0)
return ret;
- /* XXX: Using 1s for now, but it may not be enough for every command. */
- timeout = jiffies + msecs_to_jiffies(1000);
+ timeout = jiffies + msecs_to_jiffies(cmd_timeout_ms);
do {
ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
@@ -321,6 +361,9 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return -ETIMEDOUT;
} while (val);
+ /* some commands require delay for the result to be available */
+ mdelay(res_delay_ms);
+
if (out_len) {
ret = tps6598x_block_read(tps, TPS_REG_DATA1,
out_data, out_len);
@@ -345,6 +388,14 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return 0;
}
+static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
+ size_t in_len, u8 *in_data,
+ size_t out_len, u8 *out_data)
+{
+ return tps6598x_exec_cmd_tmo(tps, cmd, in_len, in_data,
+ out_len, out_data, 1000, 0);
+}
+
static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
@@ -419,7 +470,9 @@ static bool tps6598x_read_status(struct tps6598x *tps, u32 *status)
dev_err(tps->dev, "%s: failed to read status\n", __func__);
return false;
}
- trace_tps6598x_status(*status);
+
+ if (tps->data->trace_status)
+ tps->data->trace_status(*status);
return true;
}
@@ -450,7 +503,9 @@ static bool tps6598x_read_power_status(struct tps6598x *tps)
return false;
}
tps->pwr_status = pwr_status;
- trace_tps6598x_power_status(pwr_status);
+
+ if (tps->data->trace_power_status)
+ tps->data->trace_power_status(pwr_status);
return true;
}
@@ -513,6 +568,65 @@ err_unlock:
return IRQ_NONE;
}
+static bool tps6598x_has_role_changed(struct tps6598x *tps, u32 status)
+{
+ status ^= tps->status;
+
+ return status & (TPS_STATUS_PORTROLE | TPS_STATUS_DATAROLE);
+}
+
+static irqreturn_t tps25750_interrupt(int irq, void *data)
+{
+ struct tps6598x *tps = data;
+ u64 event[2] = { };
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event, 11);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read events\n", __func__);
+ goto err_unlock;
+ }
+ trace_tps25750_irq(event[0]);
+
+ if (!(event[0] | event[1]))
+ goto err_unlock;
+
+ if (!tps6598x_read_status(tps, &status))
+ goto err_clear_ints;
+
+ if ((event[0] | event[1]) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ if (!tps6598x_read_power_status(tps))
+ goto err_clear_ints;
+
+ if ((event[0] | event[1]) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ if (!tps6598x_read_data_status(tps))
+ goto err_clear_ints;
+
+ /*
+ * data/port roles could be updated independently after
+ * a plug event. Therefore, we need to check
+ * for pr/dr status change to set TypeC dr/pr accordingly.
+ */
+ if ((event[0] | event[1]) & TPS_REG_INT_PLUG_EVENT ||
+ tps6598x_has_role_changed(tps, status))
+ tps6598x_handle_plug_event(tps, status);
+
+ tps->status = status;
+
+err_clear_ints:
+ tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event, 11);
+
+err_unlock:
+ mutex_unlock(&tps->lock);
+
+ if (event[0] | event[1])
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
@@ -568,7 +682,7 @@ static void tps6598x_poll_work(struct work_struct *work)
struct tps6598x *tps = container_of(to_delayed_work(work),
struct tps6598x, wq_poll);
- tps->irq_handler(0, tps);
+ tps->data->irq_handler(0, tps);
queue_delayed_work(system_power_efficient_wq,
&tps->wq_poll, msecs_to_jiffies(POLL_INTERVAL));
}
@@ -582,12 +696,15 @@ static int tps6598x_check_mode(struct tps6598x *tps)
if (ret)
return ret;
- switch (match_string(modes, ARRAY_SIZE(modes), mode)) {
+ ret = match_string(modes, ARRAY_SIZE(modes), mode);
+
+ switch (ret) {
case TPS_MODE_APP:
- return 0;
+ case TPS_MODE_PTCH:
+ return ret;
case TPS_MODE_BOOT:
dev_warn(tps->dev, "dead-battery condition\n");
- return 0;
+ return ret;
case TPS_MODE_BIST:
case TPS_MODE_DISC:
default:
@@ -697,18 +814,374 @@ static int devm_tps6598_psy_register(struct tps6598x *tps)
return PTR_ERR_OR_ZERO(tps->psy);
}
+static int
+tps6598x_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
+{
+ int ret;
+ u32 conf;
+ struct typec_capability typec_cap = { };
+
+ ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
+ if (ret)
+ return ret;
+
+ typec_cap.revision = USB_TYPEC_REV_1_2;
+ typec_cap.pd_revision = 0x200;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
+ typec_cap.fwnode = fwnode;
+
+ switch (TPS_SYSCONF_PORTINFO(conf)) {
+ case TPS_PORTINFO_SINK_ACCESSORY:
+ case TPS_PORTINFO_SINK:
+ typec_cap.type = TYPEC_PORT_SNK;
+ typec_cap.data = TYPEC_PORT_UFP;
+ break;
+ case TPS_PORTINFO_DRP_UFP_DRD:
+ case TPS_PORTINFO_DRP_DFP_DRD:
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
+ break;
+ case TPS_PORTINFO_DRP_UFP:
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_UFP;
+ break;
+ case TPS_PORTINFO_DRP_DFP:
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DFP;
+ break;
+ case TPS_PORTINFO_SOURCE:
+ typec_cap.type = TYPEC_PORT_SRC;
+ typec_cap.data = TYPEC_PORT_DFP;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ tps->port = typec_register_port(tps->dev, &typec_cap);
+ if (IS_ERR(tps->port))
+ return PTR_ERR(tps->port);
+
+ return 0;
+}
+
+static int
+tps25750_write_firmware(struct tps6598x *tps,
+ u8 bpms_addr, const u8 *data, size_t len)
+{
+ struct i2c_client *client = to_i2c_client(tps->dev);
+ int ret;
+ u8 slave_addr;
+ int timeout;
+
+ slave_addr = client->addr;
+ timeout = client->adapter->timeout;
+
+ /*
+ * binary configuration size is around ~16Kbytes
+ * which might take some time to finish writing it
+ */
+ client->adapter->timeout = msecs_to_jiffies(5000);
+ client->addr = bpms_addr;
+
+ ret = regmap_raw_write(tps->regmap, data[0], &data[1], len - 1);
+
+ client->addr = slave_addr;
+ client->adapter->timeout = timeout;
+
+ return ret;
+}
+
+static int
+tps25750_exec_pbms(struct tps6598x *tps, u8 *in_data, size_t in_len)
+{
+ int ret;
+ u8 rc;
+
+ ret = tps6598x_exec_cmd_tmo(tps, "PBMs", in_len, in_data,
+ sizeof(rc), &rc, 4000, 0);
+ if (ret)
+ return ret;
+
+ switch (rc) {
+ case TPS_TASK_BPMS_INVALID_BUNDLE_SIZE:
+ dev_err(tps->dev, "%s: invalid fw size\n", __func__);
+ return -EINVAL;
+ case TPS_TASK_BPMS_INVALID_SLAVE_ADDR:
+ dev_err(tps->dev, "%s: invalid slave address\n", __func__);
+ return -EINVAL;
+ case TPS_TASK_BPMS_INVALID_TIMEOUT:
+ dev_err(tps->dev, "%s: timed out\n", __func__);
+ return -ETIMEDOUT;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tps25750_abort_patch_process(struct tps6598x *tps)
+{
+ int ret;
+
+ ret = tps6598x_exec_cmd(tps, "PBMe", 0, NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_check_mode(tps);
+ if (ret != TPS_MODE_PTCH)
+ dev_err(tps->dev, "failed to switch to \"PTCH\" mode\n");
+
+ return ret;
+}
+
+static int tps25750_start_patch_burst_mode(struct tps6598x *tps)
+{
+ int ret;
+ const struct firmware *fw;
+ const char *firmware_name;
+ struct {
+ u32 fw_size;
+ u8 addr;
+ u8 timeout;
+ } __packed bpms_data;
+ u32 addr;
+ struct device_node *np = tps->dev->of_node;
+
+ ret = device_property_read_string(tps->dev, "firmware-name",
+ &firmware_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, firmware_name, tps->dev);
+ if (ret) {
+ dev_err(tps->dev, "failed to retrieve \"%s\"\n", firmware_name);
+ return ret;
+ }
+
+ if (fw->size == 0) {
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = of_property_match_string(np, "reg-names", "patch-address");
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to get patch-address %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(np, "reg", ret, &addr);
+ if (ret)
+ return ret;
+
+ if (addr == 0 || (addr >= 0x20 && addr <= 0x23)) {
+ dev_err(tps->dev, "wrong patch address %u\n", addr);
+ return -EINVAL;
+ }
+
+ bpms_data.addr = (u8)addr;
+ bpms_data.fw_size = fw->size;
+ bpms_data.timeout = TPS_BUNDLE_TIMEOUT;
+
+ ret = tps25750_exec_pbms(tps, (u8 *)&bpms_data, sizeof(bpms_data));
+ if (ret)
+ goto release_fw;
+
+ ret = tps25750_write_firmware(tps, bpms_data.addr, fw->data, fw->size);
+ if (ret) {
+ dev_err(tps->dev, "Failed to write patch %s of %zu bytes\n",
+ firmware_name, fw->size);
+ goto release_fw;
+ }
+
+ /*
+ * A delay of 500us is required after the firmware is written
+ * based on pg.62 in tps6598x Host Interface Technical
+ * Reference Manual
+ * https://www.ti.com/lit/ug/slvuc05a/slvuc05a.pdf
+ */
+ udelay(500);
+
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int tps25750_complete_patch_process(struct tps6598x *tps)
+{
+ int ret;
+ u8 out_data[40];
+ u8 dummy[2] = { };
+
+ /*
+ * Without writing something to DATA_IN, this command would
+ * return an error
+ */
+ ret = tps6598x_exec_cmd_tmo(tps, "PBMc", sizeof(dummy), dummy,
+ sizeof(out_data), out_data, 2000, 20);
+ if (ret)
+ return ret;
+
+ if (out_data[TPS_PBMC_RC]) {
+ dev_err(tps->dev,
+ "%s: pbmc failed: %u\n", __func__,
+ out_data[TPS_PBMC_RC]);
+ return -EIO;
+ }
+
+ if (out_data[TPS_PBMC_DPCS]) {
+ dev_err(tps->dev,
+ "%s: failed device patch complete status: %u\n",
+ __func__, out_data[TPS_PBMC_DPCS]);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tps25750_apply_patch(struct tps6598x *tps)
+{
+ int ret;
+ unsigned long timeout;
+ u64 status = 0;
+
+ ret = tps6598x_block_read(tps, TPS_REG_BOOT_STATUS, &status, 5);
+ if (ret)
+ return ret;
+ /*
+ * Nothing to be done if the configuration
+ * is being loaded from EERPOM
+ */
+ if (status & TPS_BOOT_STATUS_I2C_EEPROM_PRESENT)
+ goto wait_for_app;
+
+ ret = tps25750_start_patch_burst_mode(tps);
+ if (ret) {
+ tps25750_abort_patch_process(tps);
+ return ret;
+ }
+
+ ret = tps25750_complete_patch_process(tps);
+ if (ret)
+ return ret;
+
+wait_for_app:
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ do {
+ ret = tps6598x_check_mode(tps);
+ if (ret < 0)
+ return ret;
+
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+
+ } while (ret != TPS_MODE_APP);
+
+ /*
+ * The dead battery flag may be triggered when the controller
+ * port is connected to a device that can source power and
+ * attempts to power up both the controller and the board it is on.
+ * To restore controller functionality, it is necessary to clear
+ * this flag
+ */
+ if (status & TPS_BOOT_STATUS_DEAD_BATTERY_FLAG) {
+ ret = tps6598x_exec_cmd(tps, "DBfg", 0, NULL, 0, NULL);
+ if (ret) {
+ dev_err(tps->dev, "failed to clear dead battery %d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_info(tps->dev, "controller switched to \"APP\" mode\n");
+
+ return 0;
+};
+
+static int tps25750_init(struct tps6598x *tps)
+{
+ int ret;
+
+ ret = tps->data->apply_patch(tps);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_write8(tps, TPS_REG_SLEEP_CONF,
+ TPS_SLEEP_CONF_SLEEP_MODE_ALLOWED);
+ if (ret)
+ dev_warn(tps->dev,
+ "%s: failed to enable sleep mode: %d\n",
+ __func__, ret);
+
+ return 0;
+}
+
+static int
+tps25750_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
+{
+ struct typec_capability typec_cap = { };
+ const char *data_role;
+ u8 pd_status;
+ int ret;
+
+ ret = tps6598x_read8(tps, TPS_REG_PD_STATUS, &pd_status);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &data_role);
+ if (ret) {
+ dev_err(tps->dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(data_role);
+ if (ret < 0) {
+ dev_err(tps->dev, "unknown data-role: %s\n", data_role);
+ return ret;
+ }
+
+ typec_cap.data = ret;
+ typec_cap.revision = USB_TYPEC_REV_1_3;
+ typec_cap.pd_revision = 0x300;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
+ typec_cap.fwnode = fwnode;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+
+ switch (TPS_PD_STATUS_PORT_TYPE(pd_status)) {
+ case TPS_PD_STATUS_PORT_TYPE_SINK_SOURCE:
+ case TPS_PD_STATUS_PORT_TYPE_SOURCE_SINK:
+ typec_cap.type = TYPEC_PORT_DRP;
+ break;
+ case TPS_PD_STATUS_PORT_TYPE_SINK:
+ typec_cap.type = TYPEC_PORT_SNK;
+ break;
+ case TPS_PD_STATUS_PORT_TYPE_SOURCE:
+ typec_cap.type = TYPEC_PORT_SRC;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ tps->port = typec_register_port(tps->dev, &typec_cap);
+ if (IS_ERR(tps->port))
+ return PTR_ERR(tps->port);
+
+ return 0;
+}
+
static int tps6598x_probe(struct i2c_client *client)
{
- irq_handler_t irq_handler = tps6598x_interrupt;
struct device_node *np = client->dev.of_node;
- struct typec_capability typec_cap = { };
struct tps6598x *tps;
struct fwnode_handle *fwnode;
u32 status;
- u32 conf;
u32 vid;
int ret;
u64 mask1;
+ bool is_tps25750;
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -721,9 +1194,12 @@ static int tps6598x_probe(struct i2c_client *client)
if (IS_ERR(tps->regmap))
return PTR_ERR(tps->regmap);
- ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
- if (ret < 0 || !vid)
- return -ENODEV;
+ is_tps25750 = device_is_compatible(tps->dev, "ti,tps25750");
+ if (!is_tps25750) {
+ ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
+ if (ret < 0 || !vid)
+ return -ENODEV;
+ }
/*
* Checking can the adapter handle SMBus protocol. If it can not, the
@@ -743,7 +1219,6 @@ static int tps6598x_probe(struct i2c_client *client)
APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
APPLE_CD_REG_INT_PLUG_EVENT;
- irq_handler = cd321x_interrupt;
} else {
/* Enable power status, data status and plug event interrupts */
mask1 = TPS_REG_INT_POWER_STATUS_UPDATE |
@@ -751,24 +1226,29 @@ static int tps6598x_probe(struct i2c_client *client)
TPS_REG_INT_PLUG_EVENT;
}
- tps->irq_handler = irq_handler;
+ tps->data = device_get_match_data(tps->dev);
+ if (!tps->data)
+ return -EINVAL;
+
/* Make sure the controller has application firmware running */
ret = tps6598x_check_mode(tps);
- if (ret)
+ if (ret < 0)
return ret;
+ if (is_tps25750 && ret == TPS_MODE_PTCH) {
+ ret = tps25750_init(tps);
+ if (ret)
+ return ret;
+ }
+
ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1);
if (ret)
- return ret;
-
- ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
- if (ret < 0)
- goto err_clear_mask;
- trace_tps6598x_status(status);
+ goto err_reset_controller;
- ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
- if (ret < 0)
+ if (!tps6598x_read_status(tps, &status)) {
+ ret = -ENODEV;
goto err_clear_mask;
+ }
/*
* This fwnode has a "compatible" property, but is never populated as a
@@ -787,50 +1267,13 @@ static int tps6598x_probe(struct i2c_client *client)
goto err_fwnode_put;
}
- typec_cap.revision = USB_TYPEC_REV_1_2;
- typec_cap.pd_revision = 0x200;
- typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- typec_cap.driver_data = tps;
- typec_cap.ops = &tps6598x_ops;
- typec_cap.fwnode = fwnode;
-
- switch (TPS_SYSCONF_PORTINFO(conf)) {
- case TPS_PORTINFO_SINK_ACCESSORY:
- case TPS_PORTINFO_SINK:
- typec_cap.type = TYPEC_PORT_SNK;
- typec_cap.data = TYPEC_PORT_UFP;
- break;
- case TPS_PORTINFO_DRP_UFP_DRD:
- case TPS_PORTINFO_DRP_DFP_DRD:
- typec_cap.type = TYPEC_PORT_DRP;
- typec_cap.data = TYPEC_PORT_DRD;
- break;
- case TPS_PORTINFO_DRP_UFP:
- typec_cap.type = TYPEC_PORT_DRP;
- typec_cap.data = TYPEC_PORT_UFP;
- break;
- case TPS_PORTINFO_DRP_DFP:
- typec_cap.type = TYPEC_PORT_DRP;
- typec_cap.data = TYPEC_PORT_DFP;
- break;
- case TPS_PORTINFO_SOURCE:
- typec_cap.type = TYPEC_PORT_SRC;
- typec_cap.data = TYPEC_PORT_DFP;
- break;
- default:
- ret = -ENODEV;
- goto err_role_put;
- }
-
ret = devm_tps6598_psy_register(tps);
if (ret)
goto err_role_put;
- tps->port = typec_register_port(&client->dev, &typec_cap);
- if (IS_ERR(tps->port)) {
- ret = PTR_ERR(tps->port);
+ ret = tps->data->register_port(tps, fwnode);
+ if (ret)
goto err_role_put;
- }
if (status & TPS_STATUS_PLUG_PRESENT) {
ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
@@ -845,7 +1288,7 @@ static int tps6598x_probe(struct i2c_client *client)
if (client->irq) {
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- irq_handler,
+ tps->data->irq_handler,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), tps);
} else {
@@ -879,6 +1322,10 @@ err_fwnode_put:
fwnode_handle_put(fwnode);
err_clear_mask:
tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
+err_reset_controller:
+ /* Reset PD controller to remove any applied patch */
+ if (is_tps25750)
+ tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
return ret;
}
@@ -889,9 +1336,14 @@ static void tps6598x_remove(struct i2c_client *client)
if (!client->irq)
cancel_delayed_work_sync(&tps->wq_poll);
+ devm_free_irq(tps->dev, client->irq, tps);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
+
+ /* Reset PD controller to remove any applied patch */
+ if (device_is_compatible(tps->dev, "ti,tps25750"))
+ tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
}
static int __maybe_unused tps6598x_suspend(struct device *dev)
@@ -914,6 +1366,17 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tps6598x *tps = i2c_get_clientdata(client);
+ int ret;
+
+ ret = tps6598x_check_mode(tps);
+ if (ret < 0)
+ return ret;
+
+ if (device_is_compatible(tps->dev, "ti,tps25750") && ret == TPS_MODE_PTCH) {
+ ret = tps25750_init(tps);
+ if (ret)
+ return ret;
+ }
if (tps->wakeup) {
disable_irq_wake(client->irq);
@@ -931,9 +1394,32 @@ static const struct dev_pm_ops tps6598x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tps6598x_suspend, tps6598x_resume)
};
+static const struct tipd_data cd321x_data = {
+ .irq_handler = cd321x_interrupt,
+ .register_port = tps6598x_register_port,
+ .trace_power_status = trace_tps6598x_power_status,
+ .trace_status = trace_tps6598x_status,
+};
+
+static const struct tipd_data tps6598x_data = {
+ .irq_handler = tps6598x_interrupt,
+ .register_port = tps6598x_register_port,
+ .trace_power_status = trace_tps6598x_power_status,
+ .trace_status = trace_tps6598x_status,
+};
+
+static const struct tipd_data tps25750_data = {
+ .irq_handler = tps25750_interrupt,
+ .register_port = tps25750_register_port,
+ .trace_power_status = trace_tps25750_power_status,
+ .trace_status = trace_tps25750_status,
+ .apply_patch = tps25750_apply_patch,
+};
+
static const struct of_device_id tps6598x_of_match[] = {
- { .compatible = "ti,tps6598x", },
- { .compatible = "apple,cd321x", },
+ { .compatible = "ti,tps6598x", &tps6598x_data},
+ { .compatible = "apple,cd321x", &cd321x_data},
+ { .compatible = "ti,tps25750", &tps25750_data},
{}
};
MODULE_DEVICE_TABLE(of, tps6598x_of_match);
diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
index 527857549d69..01609bf509e4 100644
--- a/drivers/usb/typec/tipd/tps6598x.h
+++ b/drivers/usb/typec/tipd/tps6598x.h
@@ -161,6 +161,25 @@
#define TPS_POWER_STATUS_BC12_STATUS_CDP 2
#define TPS_POWER_STATUS_BC12_STATUS_DCP 3
+/* TPS25750_REG_POWER_STATUS bits */
+#define TPS25750_POWER_STATUS_CHARGER_DETECT_STATUS_MASK GENMASK(7, 4)
+#define TPS25750_POWER_STATUS_CHARGER_DETECT_STATUS(p) \
+ TPS_FIELD_GET(TPS25750_POWER_STATUS_CHARGER_DETECT_STATUS_MASK, (p))
+#define TPS25750_POWER_STATUS_CHARGER_ADVERTISE_STATUS_MASK GENMASK(9, 8)
+#define TPS25750_POWER_STATUS_CHARGER_ADVERTISE_STATUS(p) \
+ TPS_FIELD_GET(TPS25750_POWER_STATUS_CHARGER_ADVERTISE_STATUS_MASK, (p))
+
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DISABLED 0
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_IN_PROGRESS 1
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_NONE 2
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_SPD 3
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_BC_1_2_CPD 4
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_BC_1_2_DPD 5
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_1_DCP 6
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_2_DCP 7
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_3_DCP 8
+#define TPS25750_POWER_STATUS_CHARGER_DET_STATUS_1_2V_DCP 9
+
/* TPS_REG_DATA_STATUS bits */
#define TPS_DATA_STATUS_DATA_CONNECTION BIT(0)
#define TPS_DATA_STATUS_UPSIDE_DOWN BIT(1)
@@ -199,4 +218,21 @@
#define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_A BIT(2)
#define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_B (BIT(2) | BIT(1))
+/* BOOT STATUS REG*/
+#define TPS_BOOT_STATUS_DEAD_BATTERY_FLAG BIT(2)
+#define TPS_BOOT_STATUS_I2C_EEPROM_PRESENT BIT(3)
+
+/* PD STATUS REG */
+#define TPS_REG_PD_STATUS_PORT_TYPE_MASK GENMASK(5, 4)
+#define TPS_PD_STATUS_PORT_TYPE(x) \
+ TPS_FIELD_GET(TPS_REG_PD_STATUS_PORT_TYPE_MASK, x)
+
+#define TPS_PD_STATUS_PORT_TYPE_SINK_SOURCE 0
+#define TPS_PD_STATUS_PORT_TYPE_SINK 1
+#define TPS_PD_STATUS_PORT_TYPE_SOURCE 2
+#define TPS_PD_STATUS_PORT_TYPE_SOURCE_SINK 3
+
+/* SLEEP CONF REG */
+#define TPS_SLEEP_CONF_SLEEP_MODE_ALLOWED BIT(0)
+
#endif /* __TPS6598X_H__ */
diff --git a/drivers/usb/typec/tipd/trace.h b/drivers/usb/typec/tipd/trace.h
index 12cad1bde7cc..0669cca12ea1 100644
--- a/drivers/usb/typec/tipd/trace.h
+++ b/drivers/usb/typec/tipd/trace.h
@@ -74,6 +74,13 @@
{ APPLE_CD_REG_INT_DATA_STATUS_UPDATE, "DATA_STATUS_UPDATE" }, \
{ APPLE_CD_REG_INT_STATUS_UPDATE, "STATUS_UPDATE" })
+#define show_tps25750_irq_flags(flags) \
+ __print_flags_u64(flags, "|", \
+ { TPS_REG_INT_PLUG_EVENT, "PLUG_EVENT" }, \
+ { TPS_REG_INT_POWER_STATUS_UPDATE, "POWER_STATUS_UPDATE" }, \
+ { TPS_REG_INT_STATUS_UPDATE, "STATUS_UPDATE" }, \
+ { TPS_REG_INT_PD_STATUS_UPDATE, "PD_STATUS_UPDATE" })
+
#define TPS6598X_STATUS_FLAGS_MASK (GENMASK(31, 0) ^ (TPS_STATUS_CONN_STATE_MASK | \
TPS_STATUS_PP_5V0_SWITCH_MASK | \
TPS_STATUS_PP_HV_SWITCH_MASK | \
@@ -84,6 +91,14 @@
TPS_STATUS_USB_HOST_PRESENT_MASK | \
TPS_STATUS_LEGACY_MASK))
+#define TPS25750_STATUS_FLAGS_MASK (GENMASK(31, 0) ^ (TPS_STATUS_CONN_STATE_MASK | \
+ GENMASK(19, 7) | \
+ TPS_STATUS_VBUS_STATUS_MASK | \
+ TPS_STATUS_USB_HOST_PRESENT_MASK | \
+ TPS_STATUS_LEGACY_MASK | \
+ BIT(26) | \
+ GENMASK(31, 28)))
+
#define show_status_conn_state(status) \
__print_symbolic(TPS_STATUS_CONN_STATE((status)), \
{ TPS_STATUS_CONN_STATE_CONN_WITH_R_A, "conn-Ra" }, \
@@ -141,6 +156,14 @@
{ TPS_STATUS_HIGH_VOLAGE_WARNING, "HIGH_VOLAGE_WARNING" }, \
{ TPS_STATUS_HIGH_LOW_VOLTAGE_WARNING, "HIGH_LOW_VOLTAGE_WARNING" })
+#define show_tps25750_status_flags(flags) \
+ __print_flags((flags & TPS25750_STATUS_FLAGS_MASK), "|", \
+ { TPS_STATUS_PLUG_PRESENT, "PLUG_PRESENT" }, \
+ { TPS_STATUS_PLUG_UPSIDE_DOWN, "UPSIDE_DOWN" }, \
+ { TPS_STATUS_PORTROLE, "PORTROLE" }, \
+ { TPS_STATUS_DATAROLE, "DATAROLE" }, \
+ { TPS_STATUS_BIST, "BIST" })
+
#define show_power_status_source_sink(power_status) \
__print_symbolic(TPS_POWER_STATUS_SOURCESINK(power_status), \
{ 1, "sink" }, \
@@ -159,6 +182,19 @@
{ TPS_POWER_STATUS_BC12_STATUS_CDP, "cdp" }, \
{ TPS_POWER_STATUS_BC12_STATUS_SDP, "sdp" })
+#define show_tps25750_power_status_charger_detect_status(power_status) \
+ __print_symbolic(TPS25750_POWER_STATUS_CHARGER_DETECT_STATUS(power_status), \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DISABLED, "disabled"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_IN_PROGRESS, "in progress"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_NONE, "none"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_SPD, "spd"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_BC_1_2_CPD, "cpd"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_BC_1_2_DPD, "dpd"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_1_DCP, "divider 1 dcp"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_2_DCP, "divider 2 dcp"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_DIV_3_DCP, "divider 3 dpc"}, \
+ { TPS25750_POWER_STATUS_CHARGER_DET_STATUS_1_2V_DCP, "1.2V dpc"})
+
#define TPS_DATA_STATUS_FLAGS_MASK (GENMASK(31, 0) ^ (TPS_DATA_STATUS_DP_PIN_ASSIGNMENT_MASK | \
TPS_DATA_STATUS_TBT_CABLE_SPEED_MASK | \
TPS_DATA_STATUS_TBT_CABLE_GEN_MASK))
@@ -230,6 +266,21 @@ TRACE_EVENT(cd321x_irq,
show_cd321x_irq_flags(__entry->event))
);
+TRACE_EVENT(tps25750_irq,
+ TP_PROTO(u64 event),
+ TP_ARGS(event),
+
+ TP_STRUCT__entry(
+ __field(u64, event)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event;
+ ),
+
+ TP_printk("event=%s", show_tps25750_irq_flags(__entry->event))
+);
+
TRACE_EVENT(tps6598x_status,
TP_PROTO(u32 status),
TP_ARGS(status),
@@ -257,6 +308,27 @@ TRACE_EVENT(tps6598x_status,
)
);
+TRACE_EVENT(tps25750_status,
+ TP_PROTO(u32 status),
+ TP_ARGS(status),
+
+ TP_STRUCT__entry(
+ __field(u32, status)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ ),
+
+ TP_printk("conn: %s, vbus: %s, usb-host: %s, legacy: %s, flags: %s",
+ show_status_conn_state(__entry->status),
+ show_status_vbus_status(__entry->status),
+ show_status_usb_host_present(__entry->status),
+ show_status_legacy(__entry->status),
+ show_tps25750_status_flags(__entry->status)
+ )
+);
+
TRACE_EVENT(tps6598x_power_status,
TP_PROTO(u16 power_status),
TP_ARGS(power_status),
@@ -277,6 +349,26 @@ TRACE_EVENT(tps6598x_power_status,
)
);
+TRACE_EVENT(tps25750_power_status,
+ TP_PROTO(u16 power_status),
+ TP_ARGS(power_status),
+
+ TP_STRUCT__entry(
+ __field(u16, power_status)
+ ),
+
+ TP_fast_assign(
+ __entry->power_status = power_status;
+ ),
+
+ TP_printk("conn: %d, pwr-role: %s, typec: %s, charger detect: %s",
+ !!TPS_POWER_STATUS_CONNECTION(__entry->power_status),
+ show_power_status_source_sink(__entry->power_status),
+ show_power_status_typec_status(__entry->power_status),
+ show_tps25750_power_status_charger_detect_status(__entry->power_status)
+ )
+);
+
TRACE_EVENT(tps6598x_data_status,
TP_PROTO(u32 data_status),
TP_ARGS(data_status),
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 73cd5bf35047..d9d3c91125ca 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -315,7 +315,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
struct ucsi_dp *dp;
/* We can't rely on the firmware with the capabilities. */
- desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
+ desc->vdo |= DP_CAP_DP_SIGNALLING(0) | DP_CAP_RECEPTACLE;
/* Claiming that we support all pin assignments */
desc->vdo |= all_assignments << 8;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 607061a37eca..449c125f6f87 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -501,8 +501,8 @@ static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
- DP_CAP_DP_SIGNALING | DP_CAP_USB |
- DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
+ DP_CAP_DP_SIGNALLING(0) | DP_CAP_USB |
+ DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
break;
case NVIDIA_FTB_DBG_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index bb1854b3311d..db6e248f8208 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -8,9 +8,13 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/gpio/consumer.h>
#include <linux/soc/qcom/pmic_glink.h>
#include "ucsi.h"
+#define PMIC_GLINK_MAX_PORTS 2
+
#define UCSI_BUF_SIZE 48
#define MSG_TYPE_REQ_RESP 1
@@ -52,6 +56,9 @@ struct ucsi_notify_ind_msg {
struct pmic_glink_ucsi {
struct device *dev;
+ struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
+ struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
+
struct pmic_glink_client *client;
struct ucsi *ucsi;
@@ -220,8 +227,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
}
con_num = UCSI_CCI_CONNECTOR(cci);
- if (con_num)
+ if (con_num) {
+ if (con_num < PMIC_GLINK_MAX_PORTS &&
+ ucsi->port_orientation[con_num - 1]) {
+ int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
+
+ if (orientation >= 0) {
+ typec_switch_set(ucsi->port_switch[con_num - 1],
+ orientation ? TYPEC_ORIENTATION_REVERSE
+ : TYPEC_ORIENTATION_NORMAL);
+ }
+ }
+
ucsi_connector_change(ucsi->ucsi, con_num);
+ }
if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
ucsi->sync_val = -EBUSY;
@@ -282,6 +301,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
{
struct pmic_glink_ucsi *ucsi;
struct device *dev = &adev->dev;
+ struct fwnode_handle *fwnode;
int ret;
ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
@@ -309,6 +329,38 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
ucsi_set_drvdata(ucsi->ucsi, ucsi);
+ device_for_each_child_node(dev, fwnode) {
+ struct gpio_desc *desc;
+ u32 port;
+
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ return ret;
+ }
+
+ if (port >= PMIC_GLINK_MAX_PORTS) {
+ dev_warn(dev, "invalid connector number, ignoring\n");
+ continue;
+ }
+
+ desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
+
+ /* If GPIO isn't found, continue */
+ if (!desc)
+ continue;
+
+ if (IS_ERR(desc))
+ return dev_err_probe(dev, PTR_ERR(desc),
+ "unable to acquire orientation gpio\n");
+ ucsi->port_orientation[port] = desc;
+
+ ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(ucsi->port_switch[port]))
+ return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
+ "failed to acquire orientation-switch\n");
+ }
+
ucsi->client = devm_pmic_glink_register_client(dev,
PMIC_GLINK_OWNER_USBC,
pmic_glink_ucsi_callback,
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 9c6954aad6c8..ce625b1ce9a5 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev)
/* release port */
rc = usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
- if (rc) {
- dev_dbg(&udev->dev, "unable to release port\n");
+ /*
+ * NOTE: If a HUB disconnect triggered disconnect of the down stream
+ * device usb_hub_release_port will return -ENODEV so we can safely ignore
+ * that error here.
+ */
+ if (rc && (rc != -ENODEV)) {
+ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
return;
}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 37d1fc34e8a5..82650c11e451 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1140,6 +1140,7 @@ static int hcd_name_to_id(const char *name)
static int vhci_setup(struct usb_hcd *hcd)
{
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
+
if (usb_hcd_is_primary_hcd(hcd)) {
vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_hs->vhci = vhci;
@@ -1493,13 +1494,10 @@ static struct platform_driver vhci_driver = {
static void del_platform_devices(void)
{
- struct platform_device *pdev;
int i;
for (i = 0; i < vhci_num_controllers; i++) {
- pdev = vhcis[i].pdev;
- if (pdev != NULL)
- platform_device_unregister(pdev);
+ platform_device_unregister(vhcis[i].pdev);
vhcis[i].pdev = NULL;
}
sysfs_remove_link(&platform_bus.kobj, driver_name);
@@ -1519,45 +1517,33 @@ static int __init vhci_hcd_init(void)
if (vhcis == NULL)
return -ENOMEM;
- for (i = 0; i < vhci_num_controllers; i++) {
- vhcis[i].pdev = platform_device_alloc(driver_name, i);
- if (!vhcis[i].pdev) {
- i--;
- while (i >= 0)
- platform_device_put(vhcis[i--].pdev);
- ret = -ENOMEM;
- goto err_device_alloc;
- }
- }
- for (i = 0; i < vhci_num_controllers; i++) {
- void *vhci = &vhcis[i];
- ret = platform_device_add_data(vhcis[i].pdev, &vhci, sizeof(void *));
- if (ret)
- goto err_driver_register;
- }
-
ret = platform_driver_register(&vhci_driver);
if (ret)
goto err_driver_register;
for (i = 0; i < vhci_num_controllers; i++) {
- ret = platform_device_add(vhcis[i].pdev);
+ void *vhci = &vhcis[i];
+ struct platform_device_info pdevinfo = {
+ .name = driver_name,
+ .id = i,
+ .data = &vhci,
+ .size_data = sizeof(void *),
+ };
+
+ vhcis[i].pdev = platform_device_register_full(&pdevinfo);
+ ret = PTR_ERR_OR_ZERO(vhcis[i].pdev);
if (ret < 0) {
- i--;
- while (i >= 0)
- platform_device_del(vhcis[i--].pdev);
+ while (i--)
+ platform_device_unregister(vhcis[i].pdev);
goto err_add_hcd;
}
}
- return ret;
+ return 0;
err_add_hcd:
platform_driver_unregister(&vhci_driver);
err_driver_register:
- for (i = 0; i < vhci_num_controllers; i++)
- platform_device_put(vhcis[i].pdev);
-err_device_alloc:
kfree(vhcis);
return ret;
}
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index ca56242972b3..84547d998bcf 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -31,11 +31,9 @@ struct mlx5_vdpa_mr {
struct list_head head;
unsigned long num_directs;
unsigned long num_klms;
- /* state of dvq mr */
- bool initialized;
- /* serialize mkey creation and destruction */
- struct mutex mkey_mtx;
+ struct vhost_iotlb *iotlb;
+
bool user_mr;
};
@@ -74,11 +72,12 @@ struct mlx5_vdpa_wq_ent {
enum {
MLX5_VDPA_DATAVQ_GROUP,
MLX5_VDPA_CVQ_GROUP,
+ MLX5_VDPA_DATAVQ_DESC_GROUP,
MLX5_VDPA_NUMVQ_GROUPS
};
enum {
- MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+ MLX5_VDPA_NUM_AS = 2
};
struct mlx5_vdpa_dev {
@@ -93,7 +92,9 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;
- struct mlx5_vdpa_mr mr;
+ struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+ /* serialize mr access */
+ struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
@@ -114,12 +115,19 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
-int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- bool *change_map, unsigned int asid);
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- unsigned int asid);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
-void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb);
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ unsigned int asid);
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid);
+int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
+int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 5a1971fcd87b..2197c46e563a 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -301,10 +301,13 @@ static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
sg_free_table(&mr->sg_head);
}
-static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ u64 start,
+ u64 size,
+ u8 perm,
struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
LIST_HEAD(tmp);
@@ -354,9 +357,10 @@ err_alloc:
* indirect memory key that provides access to the enitre address space given
* by iotlb.
*/
-static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
struct vhost_iotlb_map *map;
@@ -384,7 +388,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
LOG_MAX_KLM_SIZE);
mr->num_klms += nnuls;
}
- err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
}
@@ -393,7 +397,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
pperm = map->perm;
}
}
- err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
@@ -450,20 +454,23 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
}
-static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
+static int dup_iotlb(struct vhost_iotlb *dst, struct vhost_iotlb *src)
{
struct vhost_iotlb_map *map;
u64 start = 0, last = ULLONG_MAX;
int err;
+ if (dst == src)
+ return -EINVAL;
+
if (!src) {
- err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+ err = vhost_iotlb_add_range(dst, start, last, start, VHOST_ACCESS_RW);
return err;
}
for (map = vhost_iotlb_itree_first(src, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
- err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+ err = vhost_iotlb_add_range(dst, map->start, map->last,
map->addr, map->perm);
if (err)
return err;
@@ -471,9 +478,9 @@ static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
return 0;
}
-static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
+static void prune_iotlb(struct vhost_iotlb *iotlb)
{
- vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
+ vhost_iotlb_del_range(iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -489,133 +496,169 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
}
}
-static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return;
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
- prune_iotlb(mvdev);
+ vhost_iotlb_free(mr->iotlb);
}
-static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
-
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ if (!mr)
return;
- if (!mr->initialized)
- return;
+ mutex_lock(&mvdev->mr_mtx);
- if (mr->user_mr)
- destroy_user_mr(mvdev, mr);
- else
- destroy_dma_mr(mvdev, mr);
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+
+ for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) {
+ if (mvdev->mr[i] == mr)
+ mvdev->mr[i] = NULL;
+ }
- mr->initialized = false;
+ mutex_unlock(&mvdev->mr_mtx);
+
+ kfree(mr);
}
-void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *new_mr,
+ unsigned int asid)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
- mutex_lock(&mr->mkey_mtx);
+ mutex_lock(&mvdev->mr_mtx);
- _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
- _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
+ mvdev->mr[asid] = new_mr;
+ if (old_mr) {
+ _mlx5_vdpa_destroy_mr(mvdev, old_mr);
+ kfree(old_mr);
+ }
- mutex_unlock(&mr->mkey_mtx);
-}
+ mutex_unlock(&mvdev->mr_mtx);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
-{
- mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
- mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
}
-static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb,
- unsigned int asid)
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return 0;
+ for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
+ mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]);
- return dup_iotlb(mvdev, iotlb);
+ prune_iotlb(mvdev->cvq.iotlb);
}
-static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb,
- unsigned int asid)
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
- return 0;
-
- if (mr->initialized)
- return 0;
-
if (iotlb)
- err = create_user_mr(mvdev, iotlb);
+ err = create_user_mr(mvdev, mr, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (err)
return err;
- mr->initialized = true;
+ mr->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!mr->iotlb) {
+ err = -ENOMEM;
+ goto err_mr;
+ }
+
+ err = dup_iotlb(mr->iotlb, iotlb);
+ if (err)
+ goto err_iotlb;
return 0;
+
+err_iotlb:
+ vhost_iotlb_free(mr->iotlb);
+
+err_mr:
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ return err;
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb, unsigned int asid)
+struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb)
{
+ struct mlx5_vdpa_mr *mr;
int err;
- err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
- if (err)
- return err;
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&mvdev->mr_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
+ mutex_unlock(&mvdev->mr_mtx);
- err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
if (err)
goto out_err;
- return 0;
+ return mr;
out_err:
- _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
-
- return err;
+ kfree(mr);
+ return ERR_PTR(err);
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- unsigned int asid)
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid)
{
int err;
- mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
- mutex_unlock(&mvdev->mr.mkey_mtx);
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ return 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+
+ prune_iotlb(mvdev->cvq.iotlb);
+ err = dup_iotlb(mvdev->cvq.iotlb, iotlb);
+
+ spin_unlock(&mvdev->cvq.iommu_lock);
+
return err;
}
-int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- bool *change_map, unsigned int asid)
+int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
- int err = 0;
+ struct mlx5_vdpa_mr *mr;
+
+ mr = mlx5_vdpa_create_mr(mvdev, NULL);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
- *change_map = false;
- mutex_lock(&mr->mkey_mtx);
- if (mr->initialized) {
- mlx5_vdpa_info(mvdev, "memory map update\n");
- *change_map = true;
+ mlx5_vdpa_update_mr(mvdev, mr, 0);
+
+ return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
+}
+
+int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+{
+ if (asid >= MLX5_VDPA_NUM_AS)
+ return -EINVAL;
+
+ mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
+
+ if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_dma_mr(mvdev))
+ mlx5_vdpa_warn(mvdev, "create DMA MR failed\n");
+ } else {
+ mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, asid);
}
- if (!*change_map)
- err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
- mutex_unlock(&mr->mkey_mtx);
- return err;
+ return 0;
}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index d5a59c9035fb..5c5a41b64bfc 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
- mutex_init(&mvdev->mr.mkey_mtx);
+ mutex_init(&mvdev->mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
@@ -301,7 +301,7 @@ err_pd:
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
- mutex_destroy(&mvdev->mr.mkey_mtx);
+ mutex_destroy(&mvdev->mr_mtx);
return err;
}
@@ -318,6 +318,6 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
- mutex_destroy(&mvdev->mr.mkey_mtx);
+ mutex_destroy(&mvdev->mr_mtx);
res->valid = false;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 946488b8989f..12ac3397f39b 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -7,6 +7,7 @@
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/vdpa.h>
+#include <uapi/linux/vhost_types.h>
#include <linux/virtio_config.h>
#include <linux/auxiliary_bus.h>
#include <linux/mlx5/cq.h>
@@ -861,6 +862,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_vdpa_mr *vq_mr;
+ struct mlx5_vdpa_mr *vq_desc_mr;
void *obj_context;
u16 mlx_features;
void *cmd_hdr;
@@ -913,7 +917,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
+ vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ if (vq_mr)
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
+
+ vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
+ MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
+
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
@@ -2301,6 +2312,16 @@ static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
return MLX5_VDPA_DATAVQ_GROUP;
}
+static u32 mlx5_vdpa_get_vq_desc_group(struct vdpa_device *vdev, u16 idx)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return MLX5_VDPA_CVQ_GROUP;
+
+ return MLX5_VDPA_DATAVQ_DESC_GROUP;
+}
+
static u64 mlx_to_vritio_features(u16 dev_features)
{
u64 result = 0;
@@ -2539,6 +2560,11 @@ static void unregister_link_notifier(struct mlx5_vdpa_net *ndev)
flush_workqueue(ndev->mvdev.wq);
}
+static u64 mlx5_vdpa_get_backend_features(const struct vdpa_device *vdpa)
+{
+ return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
+}
+
static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2673,7 +2699,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
}
static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb, unsigned int asid)
+ struct mlx5_vdpa_mr *new_mr,
+ unsigned int asid)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
@@ -2681,28 +2708,21 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
suspend_vqs(ndev);
err = save_channels_info(ndev);
if (err)
- goto err_mr;
+ return err;
teardown_driver(ndev);
- mlx5_vdpa_destroy_mr_asid(mvdev, asid);
- err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
- if (err)
- goto err_mr;
+
+ mlx5_vdpa_update_mr(mvdev, new_mr, asid);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
- goto err_mr;
+ return 0;
restore_channels_info(ndev);
err = setup_driver(mvdev);
if (err)
- goto err_setup;
+ return err;
return 0;
-
-err_setup:
- mlx5_vdpa_destroy_mr_asid(mvdev, asid);
-err_mr:
- return err;
}
/* reslock must be held for this function */
@@ -2841,7 +2861,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
err_driver:
unregister_link_notifier(ndev);
err_setup:
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
@@ -2856,7 +2876,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
mvdev->group2asid[i] = 0;
}
-static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -2868,7 +2888,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
unregister_link_notifier(ndev);
teardown_driver(ndev);
clear_vqs_ready(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ if (flags & VDPA_RESET_F_CLEAN_MAP)
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = 0;
@@ -2879,8 +2900,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
init_group_to_asid_map(mvdev);
++mvdev->generation;
- if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
+ if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
+ MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_dma_mr(mvdev))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
up_write(&ndev->reslock);
@@ -2888,6 +2910,11 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
return 0;
}
+static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+{
+ return mlx5_vdpa_compat_reset(vdev, 0);
+}
+
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
{
return sizeof(struct virtio_net_config);
@@ -2919,18 +2946,38 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid)
{
- bool change_map;
+ struct mlx5_vdpa_mr *new_mr;
int err;
- err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
- if (err) {
- mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- return err;
+ if (asid >= MLX5_VDPA_NUM_AS)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, 0, U64_MAX)) {
+ new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
+ if (IS_ERR(new_mr)) {
+ err = PTR_ERR(new_mr);
+ mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+ return err;
+ }
+ } else {
+ /* Empty iotlbs don't have an mr but will clear the previous mr. */
+ new_mr = NULL;
+ }
+
+ if (!mvdev->mr[asid]) {
+ mlx5_vdpa_update_mr(mvdev, new_mr, asid);
+ } else {
+ err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+ goto out_err;
+ }
}
- if (change_map)
- err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
+ return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
+out_err:
+ mlx5_vdpa_destroy_mr(mvdev, new_mr);
return err;
}
@@ -2947,6 +2994,18 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
return err;
}
+static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err;
+
+ down_write(&ndev->reslock);
+ err = mlx5_vdpa_reset_mr(mvdev, asid);
+ up_write(&ndev->reslock);
+ return err;
+}
+
static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2985,7 +3044,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
- mlx5_vdpa_destroy_mr(mvdev);
+ mlx5_vdpa_destroy_mr_resources(mvdev);
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3169,12 +3228,19 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
unsigned int asid)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ int err = 0;
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
mvdev->group2asid[group] = asid;
- return 0;
+
+ mutex_lock(&mvdev->mr_mtx);
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
+ err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
+ mutex_unlock(&mvdev->mr_mtx);
+
+ return err;
}
static const struct vdpa_config_ops mlx5_vdpa_ops = {
@@ -3191,7 +3257,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
.get_vq_group = mlx5_vdpa_get_vq_group,
+ .get_vq_desc_group = mlx5_vdpa_get_vq_desc_group, /* Op disabled if not supported. */
.get_device_features = mlx5_vdpa_get_device_features,
+ .get_backend_features = mlx5_vdpa_get_backend_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
@@ -3201,11 +3269,13 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
.reset = mlx5_vdpa_reset,
+ .compat_reset = mlx5_vdpa_compat_reset,
.get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
+ .reset_map = mlx5_vdpa_reset_map,
.set_group_asid = mlx5_set_group_asid,
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
.free = mlx5_vdpa_free,
@@ -3289,6 +3359,7 @@ struct mlx5_vdpa_mgmtdev {
struct vdpa_mgmt_dev mgtdev;
struct mlx5_adev *madev;
struct mlx5_vdpa_net *ndev;
+ struct vdpa_config_ops vdpa_ops;
};
static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
@@ -3402,7 +3473,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
max_vqs = 2;
}
- ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
+ ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops,
MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3485,7 +3556,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
+ err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
goto err_res;
}
@@ -3515,7 +3586,7 @@ err_reg:
err_res2:
free_resources(ndev);
err_mr:
- mlx5_vdpa_destroy_mr(mvdev);
+ mlx5_vdpa_destroy_mr_resources(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs:
@@ -3575,6 +3646,10 @@ static int mlx5v_probe(struct auxiliary_device *adev,
MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
mgtdev->mgtdev.supported_features = get_supported_features(mdev);
mgtdev->madev = madev;
+ mgtdev->vdpa_ops = mlx5_vdpa_ops;
+
+ if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
+ mgtdev->vdpa_ops.get_vq_desc_group = NULL;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 76d41058add9..be2925d0d283 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -139,7 +139,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
vq->vring.notify = NULL;
}
-static void vdpasim_do_reset(struct vdpasim *vdpasim)
+static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
{
int i;
@@ -151,11 +151,13 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
&vdpasim->iommu_lock);
}
- for (i = 0; i < vdpasim->dev_attr.nas; i++) {
- vhost_iotlb_reset(&vdpasim->iommu[i]);
- vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
- 0, VHOST_MAP_RW);
- vdpasim->iommu_pt[i] = true;
+ if (flags & VDPA_RESET_F_CLEAN_MAP) {
+ for (i = 0; i < vdpasim->dev_attr.nas; i++) {
+ vhost_iotlb_reset(&vdpasim->iommu[i]);
+ vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
+ 0, VHOST_MAP_RW);
+ vdpasim->iommu_pt[i] = true;
+ }
}
vdpasim->running = true;
@@ -259,8 +261,12 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
if (!vdpasim->iommu_pt)
goto err_iommu;
- for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ for (i = 0; i < vdpasim->dev_attr.nas; i++) {
vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
+ vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
+ VHOST_MAP_RW);
+ vdpasim->iommu_pt[i] = true;
+ }
for (i = 0; i < dev_attr->nvqs; i++)
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
@@ -480,18 +486,23 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
mutex_unlock(&vdpasim->mutex);
}
-static int vdpasim_reset(struct vdpa_device *vdpa)
+static int vdpasim_compat_reset(struct vdpa_device *vdpa, u32 flags)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
mutex_lock(&vdpasim->mutex);
vdpasim->status = 0;
- vdpasim_do_reset(vdpasim);
+ vdpasim_do_reset(vdpasim, flags);
mutex_unlock(&vdpasim->mutex);
return 0;
}
+static int vdpasim_reset(struct vdpa_device *vdpa)
+{
+ return vdpasim_compat_reset(vdpa, 0);
+}
+
static int vdpasim_suspend(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -637,6 +648,25 @@ err:
return ret;
}
+static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
+ spin_lock(&vdpasim->iommu_lock);
+ if (vdpasim->iommu_pt[asid])
+ goto out;
+ vhost_iotlb_reset(&vdpasim->iommu[asid]);
+ vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
+ 0, VHOST_MAP_RW);
+ vdpasim->iommu_pt[asid] = true;
+out:
+ spin_unlock(&vdpasim->iommu_lock);
+ return 0;
+}
+
static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -749,6 +779,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .compat_reset = vdpasim_compat_reset,
.suspend = vdpasim_suspend,
.resume = vdpasim_resume,
.get_config_size = vdpasim_get_config_size,
@@ -759,6 +790,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
+ .reset_map = vdpasim_reset_map,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
@@ -787,6 +819,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .compat_reset = vdpasim_compat_reset,
.suspend = vdpasim_suspend,
.resume = vdpasim_resume,
.get_config_size = vdpasim_get_config_size,
@@ -796,6 +829,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_iova_range = vdpasim_get_iova_range,
.set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
+ .reset_map = vdpasim_reset_map,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index df7869537ef1..0ddd4b8abecb 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -134,7 +134,6 @@ static DEFINE_MUTEX(vduse_lock);
static DEFINE_IDR(vduse_idr);
static dev_t vduse_major;
-static struct class *vduse_class;
static struct cdev vduse_ctrl_cdev;
static struct cdev vduse_cdev;
static struct workqueue_struct *vduse_irq_wq;
@@ -1528,6 +1527,16 @@ static const struct kobj_type vq_type = {
.default_groups = vq_groups,
};
+static char *vduse_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
+}
+
+static const struct class vduse_class = {
+ .name = "vduse",
+ .devnode = vduse_devnode,
+};
+
static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
{
int i;
@@ -1638,7 +1647,7 @@ static int vduse_destroy_dev(char *name)
mutex_unlock(&dev->lock);
vduse_dev_reset(dev);
- device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+ device_destroy(&vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
idr_remove(&vduse_idr, dev->minor);
kvfree(dev->config);
vduse_dev_deinit_vqs(dev);
@@ -1805,7 +1814,7 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->minor = ret;
dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
- dev->dev = device_create_with_groups(vduse_class, NULL,
+ dev->dev = device_create_with_groups(&vduse_class, NULL,
MKDEV(MAJOR(vduse_major), dev->minor),
dev, vduse_dev_groups, "%s", config->name);
if (IS_ERR(dev->dev)) {
@@ -1821,7 +1830,7 @@ static int vduse_create_dev(struct vduse_dev_config *config,
return 0;
err_vqs:
- device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+ device_destroy(&vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
err_dev:
idr_remove(&vduse_idr, dev->minor);
err_idr:
@@ -1934,11 +1943,6 @@ static const struct file_operations vduse_ctrl_fops = {
.llseek = noop_llseek,
};
-static char *vduse_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
-}
-
struct vduse_mgmt_dev {
struct vdpa_mgmt_dev mgmt_dev;
struct device dev;
@@ -2082,11 +2086,9 @@ static int vduse_init(void)
int ret;
struct device *dev;
- vduse_class = class_create("vduse");
- if (IS_ERR(vduse_class))
- return PTR_ERR(vduse_class);
-
- vduse_class->devnode = vduse_devnode;
+ ret = class_register(&vduse_class);
+ if (ret)
+ return ret;
ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
if (ret)
@@ -2099,7 +2101,7 @@ static int vduse_init(void)
if (ret)
goto err_ctrl_cdev;
- dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
+ dev = device_create(&vduse_class, NULL, vduse_major, NULL, "control");
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
goto err_device;
@@ -2141,13 +2143,13 @@ err_bound_wq:
err_wq:
cdev_del(&vduse_cdev);
err_cdev:
- device_destroy(vduse_class, vduse_major);
+ device_destroy(&vduse_class, vduse_major);
err_device:
cdev_del(&vduse_ctrl_cdev);
err_ctrl_cdev:
unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
err_chardev_region:
- class_destroy(vduse_class);
+ class_unregister(&vduse_class);
return ret;
}
module_init(vduse_init);
@@ -2159,10 +2161,10 @@ static void vduse_exit(void)
destroy_workqueue(vduse_irq_bound_wq);
destroy_workqueue(vduse_irq_wq);
cdev_del(&vduse_cdev);
- device_destroy(vduse_class, vduse_major);
+ device_destroy(&vduse_class, vduse_major);
cdev_del(&vduse_ctrl_cdev);
unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
- class_destroy(vduse_class);
+ class_unregister(&vduse_class);
}
module_exit(vduse_exit);
diff --git a/drivers/vfio/cdx/main.c b/drivers/vfio/cdx/main.c
index a63744302b5e..9cff8d75789e 100644
--- a/drivers/vfio/cdx/main.c
+++ b/drivers/vfio/cdx/main.c
@@ -286,3 +286,4 @@ module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
+MODULE_IMPORT_NS(CDX_BUS);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4e3b2c25c721..282aac45c690 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1158,7 +1158,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
/*
* Set prot_iter to data_iter and truncate it to
* prot_bytes, and advance data_iter past any
- * preceeding prot_bytes that may be present.
+ * preceding prot_bytes that may be present.
*
* Also fix up the exp_data_len to reflect only the
* actual data payload length.
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 78379ffd2336..30df5c58db73 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -131,6 +131,15 @@ static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
return vhost_vdpa_alloc_as(v, asid);
}
+static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->reset_map)
+ ops->reset_map(vdpa, asid);
+}
+
static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
{
struct vhost_vdpa_as *as = asid_to_as(v, asid);
@@ -140,6 +149,14 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
hlist_del(&as->hash_link);
vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+ /*
+ * Devices with vendor specific IOMMU may need to restore
+ * iotlb to the initial or default state, which cannot be
+ * cleaned up in the all range unmap call above. Give them
+ * a chance to clean up or reset the map to the desired
+ * state.
+ */
+ vhost_vdpa_reset_map(v, asid);
kfree(as);
return 0;
@@ -210,13 +227,24 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
-static int vhost_vdpa_reset(struct vhost_vdpa *v)
+static int _compat_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
+ u32 flags = 0;
- v->in_batch = 0;
+ if (v->vdev.vqs) {
+ flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
+ VHOST_BACKEND_F_IOTLB_PERSIST) ?
+ VDPA_RESET_F_CLEAN_MAP : 0;
+ }
- return vdpa_reset(vdpa);
+ return vdpa_reset(vdpa, flags);
+}
+
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+ v->in_batch = 0;
+ return _compat_vdpa_reset(v);
}
static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
@@ -295,7 +323,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) {
- ret = vdpa_reset(vdpa);
+ ret = _compat_vdpa_reset(v);
if (ret)
return ret;
} else
@@ -389,6 +417,14 @@ static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
return ops->resume;
}
+static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_vq_desc_group;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -414,6 +450,15 @@ static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
return ops->get_backend_features(vdpa);
}
+static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
+ vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+}
+
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -605,6 +650,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
else if (copy_to_user(argp, &s, sizeof(s)))
return -EFAULT;
return 0;
+ case VHOST_VDPA_GET_VRING_DESC_GROUP:
+ if (!vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_desc_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
case VHOST_VDPA_SET_GROUP_ASID:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
@@ -690,6 +745,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
+ BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
BIT_ULL(VHOST_BACKEND_F_RESUME) |
BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
@@ -700,6 +757,15 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
!vhost_vdpa_can_resume(v))
return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
+ return -EINVAL;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
+ !vhost_vdpa_has_persistent_map(v))
+ return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
}
@@ -753,6 +819,10 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ if (vhost_vdpa_has_desc_group(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
+ if (vhost_vdpa_has_persistent_map(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
@@ -1285,6 +1355,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
vhost_vdpa_free_domain(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
+ v->vdev.vqs = NULL;
}
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 30577b1d3de5..83c2d7329ca5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -7,9 +7,9 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
- depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !SUPERH && \
- (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !S390 && !UML
+ depends on ALPHA || IA64 || X86 || \
+ (ARM && ARCH_FOOTBRIDGE) || \
+ (MIPS && (MIPS_MALTA || SIBYTE_BCM112X || SIBYTE_SB1250 || SIBYTE_BCM1x80 || SNI_RM))
select APERTURE_HELPERS if (DRM || FB || VFIO_PCI_CORE)
default y
help
@@ -52,7 +52,7 @@ config DUMMY_CONSOLE
config DUMMY_CONSOLE_COLUMNS
int "Initial number of console screen columns"
- depends on DUMMY_CONSOLE && !ARM
+ depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
default 160 if PARISC
default 80
help
@@ -62,8 +62,9 @@ config DUMMY_CONSOLE_COLUMNS
config DUMMY_CONSOLE_ROWS
int "Initial number of console screen rows"
- depends on DUMMY_CONSOLE && !ARM
+ depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
default 64 if PARISC
+ default 30 if ARM
default 25
help
On PA-RISC, the default value is 64, which should fit a 1280x1024
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index f1711b2f9ff0..14af5d9e13b0 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -18,9 +18,10 @@
* Dummy console driver
*/
-#if defined(__arm__)
-#define DUMMY_COLUMNS screen_info.orig_video_cols
-#define DUMMY_ROWS screen_info.orig_video_lines
+#if defined(CONFIG_ARCH_FOOTBRIDGE) && defined(CONFIG_VGA_CONSOLE)
+#include <asm/vga.h>
+#define DUMMY_COLUMNS vgacon_screen_info.orig_video_cols
+#define DUMMY_ROWS vgacon_screen_info.orig_video_lines
#else
/* set by Kconfig. Use 80x25 for 640x480 and 160x64 for 1280x1024 */
#define DUMMY_COLUMNS CONFIG_DUMMY_CONSOLE_COLUMNS
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 7ad047bcae17..8ef1579fa57f 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -89,6 +89,8 @@ static int vga_video_font_height;
static int vga_scan_lines __read_mostly;
static unsigned int vga_rolled_over; /* last vc_origin offset before wrap */
+static struct screen_info *vga_si;
+
static bool vga_hardscroll_enabled;
static bool vga_hardscroll_user_enable = true;
@@ -153,8 +155,9 @@ static const char *vgacon_startup(void)
u16 saved1, saved2;
volatile u16 *p;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB ||
- screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
+ if (!vga_si ||
+ vga_si->orig_video_isVGA == VIDEO_TYPE_VLFB ||
+ vga_si->orig_video_isVGA == VIDEO_TYPE_EFI) {
no_vga:
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
@@ -164,29 +167,29 @@ static const char *vgacon_startup(void)
#endif
}
- /* boot_params.screen_info reasonably initialized? */
- if ((screen_info.orig_video_lines == 0) ||
- (screen_info.orig_video_cols == 0))
+ /* vga_si reasonably initialized? */
+ if ((vga_si->orig_video_lines == 0) ||
+ (vga_si->orig_video_cols == 0))
goto no_vga;
/* VGA16 modes are not handled by VGACON */
- if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */
- (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */
- (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */
- (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */
- (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */
+ if ((vga_si->orig_video_mode == 0x0D) || /* 320x200/4 */
+ (vga_si->orig_video_mode == 0x0E) || /* 640x200/4 */
+ (vga_si->orig_video_mode == 0x10) || /* 640x350/4 */
+ (vga_si->orig_video_mode == 0x12) || /* 640x480/4 */
+ (vga_si->orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */
goto no_vga;
- vga_video_num_lines = screen_info.orig_video_lines;
- vga_video_num_columns = screen_info.orig_video_cols;
+ vga_video_num_lines = vga_si->orig_video_lines;
+ vga_video_num_columns = vga_si->orig_video_cols;
vgastate.vgabase = NULL;
- if (screen_info.orig_video_mode == 7) {
+ if (vga_si->orig_video_mode == 7) {
/* Monochrome display */
vga_vram_base = 0xb0000;
vga_video_port_reg = VGA_CRT_IM;
vga_video_port_val = VGA_CRT_DM;
- if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
+ if ((vga_si->orig_video_ega_bx & 0xff) != 0x10) {
static struct resource ega_console_resource =
{ .name = "ega",
.flags = IORESOURCE_IO,
@@ -223,12 +226,12 @@ static const char *vgacon_startup(void)
vga_vram_base = 0xb8000;
vga_video_port_reg = VGA_CRT_IC;
vga_video_port_val = VGA_CRT_DC;
- if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
+ if ((vga_si->orig_video_ega_bx & 0xff) != 0x10) {
int i;
vga_vram_size = 0x8000;
- if (!screen_info.orig_video_isVGA) {
+ if (!vga_si->orig_video_isVGA) {
static struct resource ega_console_resource =
{ .name = "ega",
.flags = IORESOURCE_IO,
@@ -319,14 +322,14 @@ static const char *vgacon_startup(void)
|| vga_video_type == VIDEO_TYPE_VGAC
|| vga_video_type == VIDEO_TYPE_EGAM) {
vga_hardscroll_enabled = vga_hardscroll_user_enable;
- vga_default_font_height = screen_info.orig_video_points;
- vga_video_font_height = screen_info.orig_video_points;
+ vga_default_font_height = vga_si->orig_video_points;
+ vga_video_font_height = vga_si->orig_video_points;
/* This may be suboptimal but is a safe bet - go with it */
vga_scan_lines =
vga_video_font_height * vga_video_num_lines;
}
- vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH;
+ vgacon_xres = vga_si->orig_video_cols * VGA_FONTWIDTH;
vgacon_yres = vga_scan_lines;
return display_desc;
@@ -371,7 +374,7 @@ static void vgacon_init(struct vc_data *c, int init)
/* Only set the default if the user didn't deliberately override it */
if (global_cursor_default == -1)
global_cursor_default =
- !(screen_info.flags & VIDEO_FLAGS_NOCURSOR);
+ !(vga_si->flags & VIDEO_FLAGS_NOCURSOR);
}
static void vgacon_deinit(struct vc_data *c)
@@ -589,7 +592,7 @@ static int vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
int y = c->vc_rows * c->vc_cell_height;
- int rows = screen_info.orig_video_lines * vga_default_font_height/
+ int rows = vga_si->orig_video_lines * vga_default_font_height/
c->vc_cell_height;
/*
* We need to save screen size here as it's the only way
@@ -609,7 +612,7 @@ static int vgacon_switch(struct vc_data *c)
if ((vgacon_xres != x || vgacon_yres != y) &&
(!(vga_video_num_columns % 2) &&
- vga_video_num_columns <= screen_info.orig_video_cols &&
+ vga_video_num_columns <= vga_si->orig_video_cols &&
vga_video_num_lines <= rows))
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
@@ -1056,13 +1059,13 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
* Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
* the video mode! Set the new defaults then and go away.
*/
- screen_info.orig_video_cols = width;
- screen_info.orig_video_lines = height;
+ vga_si->orig_video_cols = width;
+ vga_si->orig_video_lines = height;
vga_default_font_height = c->vc_cell_height;
return 0;
}
- if (width % 2 || width > screen_info.orig_video_cols ||
- height > (screen_info.orig_video_lines * vga_default_font_height)/
+ if (width % 2 || width > vga_si->orig_video_cols ||
+ height > (vga_si->orig_video_lines * vga_default_font_height)/
c->vc_cell_height)
return -EINVAL;
@@ -1092,8 +1095,8 @@ static void vgacon_save_screen(struct vc_data *c)
* console initialization routines.
*/
vga_bootup_console = 1;
- c->state.x = screen_info.orig_x;
- c->state.y = screen_info.orig_y;
+ c->state.x = vga_si->orig_x;
+ c->state.y = vga_si->orig_y;
}
/* We can't copy in more than the size of the video buffer,
@@ -1186,4 +1189,13 @@ const struct consw vga_con = {
};
EXPORT_SYMBOL(vga_con);
+void vgacon_register_screen(struct screen_info *si)
+{
+ if (!si || vga_si)
+ return;
+
+ conswitchp = &vga_con;
+ vga_si = si;
+}
+
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 96fdc7301481..53693c826ebd 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -364,7 +364,7 @@ config FB_IMSTT
config FB_VGA16
tristate "VGA 16-color graphics support"
- depends on FB && (X86 || PPC)
+ depends on FB && X86
select APERTURE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 2e27c6bd8044..bf59daf862fc 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1010,7 +1010,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else {
+ } else if (IS_ENABLED(CONFIG_SYSFB)) {
base = screen_info.lfb_base;
size = screen_info.lfb_size;
}
@@ -1056,13 +1056,13 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
getmem_done:
aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
- if (gen2vm) {
+ if (!gen2vm) {
+ pci_dev_put(pdev);
+ } else if (IS_ENABLED(CONFIG_SYSFB)) {
/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
screen_info.lfb_size = 0;
screen_info.lfb_base = 0;
screen_info.orig_video_isVGA = 0;
- } else {
- pci_dev_put(pdev);
}
return 0;
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index ac21942d5311..b485e9198201 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -185,8 +185,6 @@ static inline void setindex(int index)
/* Check if the video mode is supported by the driver */
static inline int check_mode_supported(const struct screen_info *si)
{
- /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
-#if defined(CONFIG_X86)
/* only EGA and VGA in 16 color graphic mode are supported */
if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
si->orig_video_isVGA != VIDEO_TYPE_VGAC)
@@ -197,7 +195,7 @@ static inline int check_mode_supported(const struct screen_info *si)
si->orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
si->orig_video_mode != 0x12) /* 640x480/4 (VGA) */
return -ENODEV;
-#endif
+
return 0;
}
@@ -1340,12 +1338,7 @@ static int vga16fb_probe(struct platform_device *dev)
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
-#if defined(CONFIG_X86)
par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
-#else
- /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
- par->isVGA = si->orig_video_isVGA;
-#endif
par->palette_blanked = 0;
par->vesa_blanked = 0;
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index f79ab13a5c28..40129b6f0eca 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -48,10 +48,6 @@ source "drivers/virt/nitro_enclaves/Kconfig"
source "drivers/virt/acrn/Kconfig"
-source "drivers/virt/coco/efi_secret/Kconfig"
-
-source "drivers/virt/coco/sev-guest/Kconfig"
-
-source "drivers/virt/coco/tdx-guest/Kconfig"
+source "drivers/virt/coco/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index e9aa6fc96fab..f29901bd7820 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -9,6 +9,4 @@ obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
obj-$(CONFIG_ACRN_HSM) += acrn/
-obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/
-obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/
-obj-$(CONFIG_INTEL_TDX_GUEST) += coco/tdx-guest/
+obj-y += coco/
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
new file mode 100644
index 000000000000..87d142c1f932
--- /dev/null
+++ b/drivers/virt/coco/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing related collateral
+#
+
+config TSM_REPORTS
+ select CONFIGFS_FS
+ tristate
+
+source "drivers/virt/coco/efi_secret/Kconfig"
+
+source "drivers/virt/coco/sev-guest/Kconfig"
+
+source "drivers/virt/coco/tdx-guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
new file mode 100644
index 000000000000..18c1aba5edb7
--- /dev/null
+++ b/drivers/virt/coco/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing related collateral
+#
+obj-$(CONFIG_TSM_REPORTS) += tsm.o
+obj-$(CONFIG_EFI_SECRET) += efi_secret/
+obj-$(CONFIG_SEV_GUEST) += sev-guest/
+obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
index da2d7ca531f0..1cffc72c41cb 100644
--- a/drivers/virt/coco/sev-guest/Kconfig
+++ b/drivers/virt/coco/sev-guest/Kconfig
@@ -5,6 +5,7 @@ config SEV_GUEST
select CRYPTO
select CRYPTO_AEAD2
select CRYPTO_GCM
+ select TSM_REPORTS
help
SEV-SNP firmware provides the guest a mechanism to communicate with
the PSP without risk from a malicious hypervisor who wishes to read,
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 97dbe715e96a..bc564adcf499 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -16,9 +16,13 @@
#include <linux/miscdevice.h>
#include <linux/set_memory.h>
#include <linux/fs.h>
+#include <linux/tsm.h>
#include <crypto/aead.h>
#include <linux/scatterlist.h>
#include <linux/psp-sev.h>
+#include <linux/sockptr.h>
+#include <linux/cleanup.h>
+#include <linux/uuid.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
@@ -57,6 +61,11 @@ struct snp_guest_dev {
struct snp_secrets_page_layout *layout;
struct snp_req_data input;
+ union {
+ struct snp_report_req report;
+ struct snp_derived_key_req derived_key;
+ struct snp_ext_report_req ext_report;
+ } req;
u32 *os_area_msg_seqno;
u8 *vmpck;
};
@@ -470,11 +479,16 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
return 0;
}
+struct snp_req_resp {
+ sockptr_t req_data;
+ sockptr_t resp_data;
+};
+
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
+ struct snp_report_req *req = &snp_dev->req.report;
struct snp_report_resp *resp;
- struct snp_report_req req;
int rc, resp_len;
lockdep_assert_held(&snp_cmd_mutex);
@@ -482,7 +496,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
return -EFAULT;
/*
@@ -496,7 +510,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
return -ENOMEM;
rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
- SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
+ SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
resp_len);
if (rc)
goto e_free;
@@ -511,9 +525,9 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
+ struct snp_derived_key_req *req = &snp_dev->req.derived_key;
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_derived_key_resp resp = {0};
- struct snp_derived_key_req req;
int rc, resp_len;
/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
u8 buf[64 + 16];
@@ -532,11 +546,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
if (sizeof(buf) < resp_len)
return -ENOMEM;
- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
return -EFAULT;
rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
- SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
+ SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
if (rc)
return rc;
@@ -550,31 +564,39 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
return rc;
}
-static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
+ struct snp_req_resp *io)
+
{
+ struct snp_ext_report_req *req = &snp_dev->req.ext_report;
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_ext_report_req req;
struct snp_report_resp *resp;
int ret, npages = 0, resp_len;
+ sockptr_t certs_address;
lockdep_assert_held(&snp_cmd_mutex);
- if (!arg->req_data || !arg->resp_data)
+ if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+ if (copy_from_sockptr(req, io->req_data, sizeof(*req)))
return -EFAULT;
- /* userspace does not want certificate data */
- if (!req.certs_len || !req.certs_address)
+ /* caller does not want certificate data */
+ if (!req->certs_len || !req->certs_address)
goto cmd;
- if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
- !IS_ALIGNED(req.certs_len, PAGE_SIZE))
+ if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
+ !IS_ALIGNED(req->certs_len, PAGE_SIZE))
return -EINVAL;
- if (!access_ok((const void __user *)req.certs_address, req.certs_len))
- return -EFAULT;
+ if (sockptr_is_kernel(io->resp_data)) {
+ certs_address = KERNEL_SOCKPTR((void *)req->certs_address);
+ } else {
+ certs_address = USER_SOCKPTR((void __user *)req->certs_address);
+ if (!access_ok(certs_address.user, req->certs_len))
+ return -EFAULT;
+ }
/*
* Initialize the intermediate buffer with all zeros. This buffer
@@ -582,8 +604,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(snp_dev->certs_data, 0, req.certs_len);
- npages = req.certs_len >> PAGE_SHIFT;
+ memset(snp_dev->certs_data, 0, req->certs_len);
+ npages = req->certs_len >> PAGE_SHIFT;
cmd:
/*
* The intermediate response buffer is used while decrypting the
@@ -597,28 +619,26 @@ cmd:
snp_dev->input.data_npages = npages;
ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
- SNP_MSG_REPORT_REQ, &req.data,
- sizeof(req.data), resp->data, resp_len);
+ SNP_MSG_REPORT_REQ, &req->data,
+ sizeof(req->data), resp->data, resp_len);
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+ req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
- if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
+ if (copy_to_sockptr(io->req_data, req, sizeof(*req)))
ret = -EFAULT;
}
if (ret)
goto e_free;
- if (npages &&
- copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
- req.certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
- if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
+ if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp)))
ret = -EFAULT;
e_free:
@@ -631,6 +651,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
struct snp_guest_dev *snp_dev = to_snp_dev(file);
void __user *argp = (void __user *)arg;
struct snp_guest_request_ioctl input;
+ struct snp_req_resp io;
int ret = -ENOTTY;
if (copy_from_user(&input, argp, sizeof(input)))
@@ -659,7 +680,14 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
ret = get_derived_key(snp_dev, &input);
break;
case SNP_GET_EXT_REPORT:
- ret = get_ext_report(snp_dev, &input);
+ /*
+ * As get_ext_report() may be called from the ioctl() path and a
+ * kernel internal path (configfs-tsm), decorate the passed
+ * buffers as user pointers.
+ */
+ io.req_data = USER_SOCKPTR((void __user *)input.req_data);
+ io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
+ ret = get_ext_report(snp_dev, &input, &io);
break;
default:
break;
@@ -743,6 +771,130 @@ static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno
return key;
}
+struct snp_msg_report_resp_hdr {
+ u32 status;
+ u32 report_size;
+ u8 rsvd[24];
+};
+
+struct snp_msg_cert_entry {
+ guid_t guid;
+ u32 offset;
+ u32 length;
+};
+
+static int sev_report_new(struct tsm_report *report, void *data)
+{
+ struct snp_msg_cert_entry *cert_table;
+ struct tsm_desc *desc = &report->desc;
+ struct snp_guest_dev *snp_dev = data;
+ struct snp_msg_report_resp_hdr hdr;
+ const u32 report_size = SZ_4K;
+ const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
+ u32 certs_size, i, size = report_size + ext_size;
+ int ret;
+
+ if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
+ return -EINVAL;
+
+ void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ guard(mutex)(&snp_cmd_mutex);
+
+ /* Check if the VMPCK is not empty */
+ if (is_vmpck_empty(snp_dev)) {
+ dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
+ return -ENOTTY;
+ }
+
+ cert_table = buf + report_size;
+ struct snp_ext_report_req ext_req = {
+ .data = { .vmpl = desc->privlevel },
+ .certs_address = (__u64)cert_table,
+ .certs_len = ext_size,
+ };
+ memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
+
+ struct snp_guest_request_ioctl input = {
+ .msg_version = 1,
+ .req_data = (__u64)&ext_req,
+ .resp_data = (__u64)buf,
+ .exitinfo2 = 0xff,
+ };
+ struct snp_req_resp io = {
+ .req_data = KERNEL_SOCKPTR(&ext_req),
+ .resp_data = KERNEL_SOCKPTR(buf),
+ };
+
+ ret = get_ext_report(snp_dev, &input, &io);
+ if (ret)
+ return ret;
+
+ memcpy(&hdr, buf, sizeof(hdr));
+ if (hdr.status == SEV_RET_INVALID_PARAM)
+ return -EINVAL;
+ if (hdr.status == SEV_RET_INVALID_KEY)
+ return -EINVAL;
+ if (hdr.status)
+ return -ENXIO;
+ if ((hdr.report_size + sizeof(hdr)) > report_size)
+ return -ENOMEM;
+
+ void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
+ report->outblob = no_free_ptr(rbuf);
+ report->outblob_len = hdr.report_size;
+
+ certs_size = 0;
+ for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
+ struct snp_msg_cert_entry *ent = &cert_table[i];
+
+ if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
+ break;
+ certs_size = max(certs_size, ent->offset + ent->length);
+ }
+
+ /* Suspicious that the response populated entries without populating size */
+ if (!certs_size && i)
+ dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
+
+ /* No certs to report */
+ if (!certs_size)
+ return 0;
+
+ /* Suspicious that the certificate blob size contract was violated
+ */
+ if (certs_size > ext_size) {
+ dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
+ certs_size = ext_size;
+ }
+
+ void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ memcpy(cbuf, cert_table, certs_size);
+ report->auxblob = no_free_ptr(cbuf);
+ report->auxblob_len = certs_size;
+
+ return 0;
+}
+
+static const struct tsm_ops sev_tsm_ops = {
+ .name = KBUILD_MODNAME,
+ .report_new = sev_report_new,
+};
+
+static void unregister_sev_tsm(void *data)
+{
+ tsm_unregister(&sev_tsm_ops);
+}
+
static int __init sev_guest_probe(struct platform_device *pdev)
{
struct snp_secrets_page_layout *layout;
@@ -816,6 +968,14 @@ static int __init sev_guest_probe(struct platform_device *pdev)
snp_dev->input.resp_gpa = __pa(snp_dev->response);
snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
+ ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type);
+ if (ret)
+ goto e_free_cert_data;
+
+ ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
+ if (ret)
+ goto e_free_cert_data;
+
ret = misc_register(misc);
if (ret)
goto e_free_cert_data;
diff --git a/drivers/virt/coco/tdx-guest/Kconfig b/drivers/virt/coco/tdx-guest/Kconfig
index 14246fc2fb02..22dd59e19431 100644
--- a/drivers/virt/coco/tdx-guest/Kconfig
+++ b/drivers/virt/coco/tdx-guest/Kconfig
@@ -1,6 +1,7 @@
config TDX_GUEST_DRIVER
tristate "TDX Guest driver"
depends on INTEL_TDX_GUEST
+ select TSM_REPORTS
help
The driver provides userspace interface to communicate with
the TDX module to request the TDX guest details like attestation
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 5e44a0fa69bd..1253bf76b570 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -12,12 +12,60 @@
#include <linux/mod_devicetable.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/set_memory.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/tsm.h>
+#include <linux/sizes.h>
#include <uapi/linux/tdx-guest.h>
#include <asm/cpu_device_id.h>
#include <asm/tdx.h>
+/*
+ * Intel's SGX QE implementation generally uses Quote size less
+ * than 8K (2K Quote data + ~5K of certificate blob).
+ */
+#define GET_QUOTE_BUF_SIZE SZ_8K
+
+#define GET_QUOTE_CMD_VER 1
+
+/* TDX GetQuote status codes */
+#define GET_QUOTE_SUCCESS 0
+#define GET_QUOTE_IN_FLIGHT 0xffffffffffffffff
+
+/* struct tdx_quote_buf: Format of Quote request buffer.
+ * @version: Quote format version, filled by TD.
+ * @status: Status code of Quote request, filled by VMM.
+ * @in_len: Length of TDREPORT, filled by TD.
+ * @out_len: Length of Quote data, filled by VMM.
+ * @data: Quote data on output or TDREPORT on input.
+ *
+ * More details of Quote request buffer can be found in TDX
+ * Guest-Host Communication Interface (GHCI) for Intel TDX 1.0,
+ * section titled "TDG.VP.VMCALL<GetQuote>"
+ */
+struct tdx_quote_buf {
+ u64 version;
+ u64 status;
+ u32 in_len;
+ u32 out_len;
+ u8 data[];
+};
+
+/* Quote data buffer */
+static void *quote_data;
+
+/* Lock to streamline quote requests */
+static DEFINE_MUTEX(quote_lock);
+
+/*
+ * GetQuote request timeout in seconds. Expect that 30 seconds
+ * is enough time for QE to respond to any Quote requests.
+ */
+static u32 getquote_timeout = 30;
+
static long tdx_get_report0(struct tdx_report_req __user *req)
{
u8 *reportdata, *tdreport;
@@ -53,6 +101,154 @@ out:
return ret;
}
+static void free_quote_buf(void *buf)
+{
+ size_t len = PAGE_ALIGN(GET_QUOTE_BUF_SIZE);
+ unsigned int count = len >> PAGE_SHIFT;
+
+ if (set_memory_encrypted((unsigned long)buf, count)) {
+ pr_err("Failed to restore encryption mask for Quote buffer, leak it\n");
+ return;
+ }
+
+ free_pages_exact(buf, len);
+}
+
+static void *alloc_quote_buf(void)
+{
+ size_t len = PAGE_ALIGN(GET_QUOTE_BUF_SIZE);
+ unsigned int count = len >> PAGE_SHIFT;
+ void *addr;
+
+ addr = alloc_pages_exact(len, GFP_KERNEL | __GFP_ZERO);
+ if (!addr)
+ return NULL;
+
+ if (set_memory_decrypted((unsigned long)addr, count)) {
+ free_pages_exact(addr, len);
+ return NULL;
+ }
+
+ return addr;
+}
+
+/*
+ * wait_for_quote_completion() - Wait for Quote request completion
+ * @quote_buf: Address of Quote buffer.
+ * @timeout: Timeout in seconds to wait for the Quote generation.
+ *
+ * As per TDX GHCI v1.0 specification, sec titled "TDG.VP.VMCALL<GetQuote>",
+ * the status field in the Quote buffer will be set to GET_QUOTE_IN_FLIGHT
+ * while VMM processes the GetQuote request, and will change it to success
+ * or error code after processing is complete. So wait till the status
+ * changes from GET_QUOTE_IN_FLIGHT or the request being timed out.
+ */
+static int wait_for_quote_completion(struct tdx_quote_buf *quote_buf, u32 timeout)
+{
+ int i = 0;
+
+ /*
+ * Quote requests usually take a few seconds to complete, so waking up
+ * once per second to recheck the status is fine for this use case.
+ */
+ while (quote_buf->status == GET_QUOTE_IN_FLIGHT && i++ < timeout) {
+ if (msleep_interruptible(MSEC_PER_SEC))
+ return -EINTR;
+ }
+
+ return (i == timeout) ? -ETIMEDOUT : 0;
+}
+
+static int tdx_report_new(struct tsm_report *report, void *data)
+{
+ u8 *buf, *reportdata = NULL, *tdreport = NULL;
+ struct tdx_quote_buf *quote_buf = quote_data;
+ struct tsm_desc *desc = &report->desc;
+ int ret;
+ u64 err;
+
+ /* TODO: switch to guard(mutex_intr) */
+ if (mutex_lock_interruptible(&quote_lock))
+ return -EINTR;
+
+ /*
+ * If the previous request is timedout or interrupted, and the
+ * Quote buf status is still in GET_QUOTE_IN_FLIGHT (owned by
+ * VMM), don't permit any new request.
+ */
+ if (quote_buf->status == GET_QUOTE_IN_FLIGHT) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ if (desc->inblob_len != TDX_REPORTDATA_LEN) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
+ if (!reportdata) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
+ if (!tdreport) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(reportdata, desc->inblob, desc->inblob_len);
+
+ /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
+ ret = tdx_mcall_get_report0(reportdata, tdreport);
+ if (ret) {
+ pr_err("GetReport call failed\n");
+ goto done;
+ }
+
+ memset(quote_data, 0, GET_QUOTE_BUF_SIZE);
+
+ /* Update Quote buffer header */
+ quote_buf->version = GET_QUOTE_CMD_VER;
+ quote_buf->in_len = TDX_REPORT_LEN;
+
+ memcpy(quote_buf->data, tdreport, TDX_REPORT_LEN);
+
+ err = tdx_hcall_get_quote(quote_data, GET_QUOTE_BUF_SIZE);
+ if (err) {
+ pr_err("GetQuote hypercall failed, status:%llx\n", err);
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = wait_for_quote_completion(quote_buf, getquote_timeout);
+ if (ret) {
+ pr_err("GetQuote request timedout\n");
+ goto done;
+ }
+
+ buf = kvmemdup(quote_buf->data, quote_buf->out_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ report->outblob = buf;
+ report->outblob_len = quote_buf->out_len;
+
+ /*
+ * TODO: parse the PEM-formatted cert chain out of the quote buffer when
+ * provided
+ */
+done:
+ mutex_unlock(&quote_lock);
+ kfree(reportdata);
+ kfree(tdreport);
+
+ return ret;
+}
+
static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -82,17 +278,48 @@ static const struct x86_cpu_id tdx_guest_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
+static const struct tsm_ops tdx_tsm_ops = {
+ .name = KBUILD_MODNAME,
+ .report_new = tdx_report_new,
+};
+
static int __init tdx_guest_init(void)
{
+ int ret;
+
if (!x86_match_cpu(tdx_guest_ids))
return -ENODEV;
- return misc_register(&tdx_misc_dev);
+ ret = misc_register(&tdx_misc_dev);
+ if (ret)
+ return ret;
+
+ quote_data = alloc_quote_buf();
+ if (!quote_data) {
+ pr_err("Failed to allocate Quote buffer\n");
+ ret = -ENOMEM;
+ goto free_misc;
+ }
+
+ ret = tsm_register(&tdx_tsm_ops, NULL, NULL);
+ if (ret)
+ goto free_quote;
+
+ return 0;
+
+free_quote:
+ free_quote_buf(quote_data);
+free_misc:
+ misc_deregister(&tdx_misc_dev);
+
+ return ret;
}
module_init(tdx_guest_init);
static void __exit tdx_guest_exit(void)
{
+ tsm_unregister(&tdx_tsm_ops);
+ free_quote_buf(quote_data);
misc_deregister(&tdx_misc_dev);
}
module_exit(tdx_guest_exit);
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c
new file mode 100644
index 000000000000..d1c2db83a8ca
--- /dev/null
+++ b/drivers/virt/coco/tsm.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tsm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/cleanup.h>
+#include <linux/configfs.h>
+
+static struct tsm_provider {
+ const struct tsm_ops *ops;
+ const struct config_item_type *type;
+ void *data;
+} provider;
+static DECLARE_RWSEM(tsm_rwsem);
+
+/**
+ * DOC: Trusted Security Module (TSM) Attestation Report Interface
+ *
+ * The TSM report interface is a common provider of blobs that facilitate
+ * attestation of a TVM (confidential computing guest) by an attestation
+ * service. A TSM report combines a user-defined blob (likely a public-key with
+ * a nonce for a key-exchange protocol) with a signed attestation report. That
+ * combined blob is then used to obtain secrets provided by an agent that can
+ * validate the attestation report. The expectation is that this interface is
+ * invoked infrequently, however configfs allows for multiple agents to
+ * own their own report generation instances to generate reports as
+ * often as needed.
+ *
+ * The attestation report format is TSM provider specific, when / if a standard
+ * materializes that can be published instead of the vendor layout. Until then
+ * the 'provider' attribute indicates the format of 'outblob', and optionally
+ * 'auxblob'.
+ */
+
+struct tsm_report_state {
+ struct tsm_report report;
+ unsigned long write_generation;
+ unsigned long read_generation;
+ struct config_item cfg;
+};
+
+enum tsm_data_select {
+ TSM_REPORT,
+ TSM_CERTS,
+};
+
+static struct tsm_report *to_tsm_report(struct config_item *cfg)
+{
+ struct tsm_report_state *state =
+ container_of(cfg, struct tsm_report_state, cfg);
+
+ return &state->report;
+}
+
+static struct tsm_report_state *to_state(struct tsm_report *report)
+{
+ return container_of(report, struct tsm_report_state, report);
+}
+
+static int try_advance_write_generation(struct tsm_report *report)
+{
+ struct tsm_report_state *state = to_state(report);
+
+ lockdep_assert_held_write(&tsm_rwsem);
+
+ /*
+ * Malicious or broken userspace has written enough times for
+ * read_generation == write_generation by modular arithmetic without an
+ * interim read. Stop accepting updates until the current report
+ * configuration is read.
+ */
+ if (state->write_generation == state->read_generation - 1)
+ return -EBUSY;
+ state->write_generation++;
+ return 0;
+}
+
+static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
+ const char *buf, size_t len)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ /*
+ * The valid privilege levels that a TSM might accept, if it accepts a
+ * privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see
+ * SEV-SNP GHCB) and a minimum of a TSM selected floor value no less
+ * than 0.
+ */
+ if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX)
+ return -EINVAL;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ rc = try_advance_write_generation(report);
+ if (rc)
+ return rc;
+ report->desc.privlevel = val;
+
+ return len;
+}
+CONFIGFS_ATTR_WO(tsm_report_, privlevel);
+
+static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
+ char *buf)
+{
+ guard(rwsem_read)(&tsm_rwsem);
+ return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor);
+}
+CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
+
+static ssize_t tsm_report_inblob_write(struct config_item *cfg,
+ const void *buf, size_t count)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ int rc;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ rc = try_advance_write_generation(report);
+ if (rc)
+ return rc;
+
+ report->desc.inblob_len = count;
+ memcpy(report->desc.inblob, buf, count);
+ return count;
+}
+CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_INBLOB_MAX);
+
+static ssize_t tsm_report_generation_show(struct config_item *cfg, char *buf)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ struct tsm_report_state *state = to_state(report);
+
+ guard(rwsem_read)(&tsm_rwsem);
+ return sysfs_emit(buf, "%lu\n", state->write_generation);
+}
+CONFIGFS_ATTR_RO(tsm_report_, generation);
+
+static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf)
+{
+ guard(rwsem_read)(&tsm_rwsem);
+ return sysfs_emit(buf, "%s\n", provider.ops->name);
+}
+CONFIGFS_ATTR_RO(tsm_report_, provider);
+
+static ssize_t __read_report(struct tsm_report *report, void *buf, size_t count,
+ enum tsm_data_select select)
+{
+ loff_t offset = 0;
+ ssize_t len;
+ u8 *out;
+
+ if (select == TSM_REPORT) {
+ out = report->outblob;
+ len = report->outblob_len;
+ } else {
+ out = report->auxblob;
+ len = report->auxblob_len;
+ }
+
+ /*
+ * Recall that a NULL @buf is configfs requesting the size of
+ * the buffer.
+ */
+ if (!buf)
+ return len;
+ return memory_read_from_buffer(buf, count, &offset, out, len);
+}
+
+static ssize_t read_cached_report(struct tsm_report *report, void *buf,
+ size_t count, enum tsm_data_select select)
+{
+ struct tsm_report_state *state = to_state(report);
+
+ guard(rwsem_read)(&tsm_rwsem);
+ if (!report->desc.inblob_len)
+ return -EINVAL;
+
+ /*
+ * A given TSM backend always fills in ->outblob regardless of
+ * whether the report includes an auxblob or not.
+ */
+ if (!report->outblob ||
+ state->read_generation != state->write_generation)
+ return -EWOULDBLOCK;
+
+ return __read_report(report, buf, count, select);
+}
+
+static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
+ size_t count, enum tsm_data_select select)
+{
+ struct tsm_report_state *state = to_state(report);
+ const struct tsm_ops *ops;
+ ssize_t rc;
+
+ /* try to read from the existing report if present and valid... */
+ rc = read_cached_report(report, buf, count, select);
+ if (rc >= 0 || rc != -EWOULDBLOCK)
+ return rc;
+
+ /* slow path, report may need to be regenerated... */
+ guard(rwsem_write)(&tsm_rwsem);
+ ops = provider.ops;
+ if (!ops)
+ return -ENOTTY;
+ if (!report->desc.inblob_len)
+ return -EINVAL;
+
+ /* did another thread already generate this report? */
+ if (report->outblob &&
+ state->read_generation == state->write_generation)
+ goto out;
+
+ kvfree(report->outblob);
+ kvfree(report->auxblob);
+ report->outblob = NULL;
+ report->auxblob = NULL;
+ rc = ops->report_new(report, provider.data);
+ if (rc < 0)
+ return rc;
+ state->read_generation = state->write_generation;
+out:
+ return __read_report(report, buf, count, select);
+}
+
+static ssize_t tsm_report_outblob_read(struct config_item *cfg, void *buf,
+ size_t count)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+
+ return tsm_report_read(report, buf, count, TSM_REPORT);
+}
+CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_OUTBLOB_MAX);
+
+static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
+ size_t count)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+
+ return tsm_report_read(report, buf, count, TSM_CERTS);
+}
+CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX);
+
+#define TSM_DEFAULT_ATTRS() \
+ &tsm_report_attr_generation, \
+ &tsm_report_attr_provider
+
+static struct configfs_attribute *tsm_report_attrs[] = {
+ TSM_DEFAULT_ATTRS(),
+ NULL,
+};
+
+static struct configfs_attribute *tsm_report_extra_attrs[] = {
+ TSM_DEFAULT_ATTRS(),
+ &tsm_report_attr_privlevel,
+ &tsm_report_attr_privlevel_floor,
+ NULL,
+};
+
+#define TSM_DEFAULT_BIN_ATTRS() \
+ &tsm_report_attr_inblob, \
+ &tsm_report_attr_outblob
+
+static struct configfs_bin_attribute *tsm_report_bin_attrs[] = {
+ TSM_DEFAULT_BIN_ATTRS(),
+ NULL,
+};
+
+static struct configfs_bin_attribute *tsm_report_bin_extra_attrs[] = {
+ TSM_DEFAULT_BIN_ATTRS(),
+ &tsm_report_attr_auxblob,
+ NULL,
+};
+
+static void tsm_report_item_release(struct config_item *cfg)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ struct tsm_report_state *state = to_state(report);
+
+ kvfree(report->auxblob);
+ kvfree(report->outblob);
+ kfree(state);
+}
+
+static struct configfs_item_operations tsm_report_item_ops = {
+ .release = tsm_report_item_release,
+};
+
+const struct config_item_type tsm_report_default_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_bin_attrs = tsm_report_bin_attrs,
+ .ct_attrs = tsm_report_attrs,
+ .ct_item_ops = &tsm_report_item_ops,
+};
+EXPORT_SYMBOL_GPL(tsm_report_default_type);
+
+const struct config_item_type tsm_report_extra_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_bin_attrs = tsm_report_bin_extra_attrs,
+ .ct_attrs = tsm_report_extra_attrs,
+ .ct_item_ops = &tsm_report_item_ops,
+};
+EXPORT_SYMBOL_GPL(tsm_report_extra_type);
+
+static struct config_item *tsm_report_make_item(struct config_group *group,
+ const char *name)
+{
+ struct tsm_report_state *state;
+
+ guard(rwsem_read)(&tsm_rwsem);
+ if (!provider.ops)
+ return ERR_PTR(-ENXIO);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&state->cfg, name, provider.type);
+ return &state->cfg;
+}
+
+static struct configfs_group_operations tsm_report_group_ops = {
+ .make_item = tsm_report_make_item,
+};
+
+static const struct config_item_type tsm_reports_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &tsm_report_group_ops,
+};
+
+static const struct config_item_type tsm_root_group_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem tsm_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "tsm",
+ .ci_type = &tsm_root_group_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex),
+};
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+ const struct config_item_type *type)
+{
+ const struct tsm_ops *conflict;
+
+ if (!type)
+ type = &tsm_report_default_type;
+ if (!(type == &tsm_report_default_type || type == &tsm_report_extra_type))
+ return -EINVAL;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ conflict = provider.ops;
+ if (conflict) {
+ pr_err("\"%s\" ops already registered\n", conflict->name);
+ return -EBUSY;
+ }
+
+ provider.ops = ops;
+ provider.data = priv;
+ provider.type = type;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_register);
+
+int tsm_unregister(const struct tsm_ops *ops)
+{
+ guard(rwsem_write)(&tsm_rwsem);
+ if (ops != provider.ops)
+ return -EBUSY;
+ provider.ops = NULL;
+ provider.data = NULL;
+ provider.type = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_unregister);
+
+static struct config_group *tsm_report_group;
+
+static int __init tsm_init(void)
+{
+ struct config_group *root = &tsm_configfs.su_group;
+ struct config_group *tsm;
+ int rc;
+
+ config_group_init(root);
+ rc = configfs_register_subsystem(&tsm_configfs);
+ if (rc)
+ return rc;
+
+ tsm = configfs_register_default_group(root, "report",
+ &tsm_reports_type);
+ if (IS_ERR(tsm)) {
+ configfs_unregister_subsystem(&tsm_configfs);
+ return PTR_ERR(tsm);
+ }
+ tsm_report_group = tsm;
+
+ return 0;
+}
+module_init(tsm_init);
+
+static void __exit tsm_exit(void)
+{
+ configfs_unregister_default_group(tsm_report_group);
+ configfs_unregister_subsystem(&tsm_configfs);
+}
+module_exit(tsm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Provide Trusted Security Module attestation reports via configfs");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 44dcb9e7b55e..1fe93e93f5bc 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -745,7 +745,7 @@ static void report_free_page_func(struct work_struct *work)
* 2) update the host about the old page removed from vb->pages list;
*
* This function preforms the balloon page migration task.
- * Called through balloon_mapping->a_ops->migratepage
+ * Called through movable_operations->migrate_page
*/
static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
struct page *newpage, struct page *page, enum migrate_mode mode)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index d6bb68ba84e5..ee6a386d250b 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -39,6 +39,39 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
+static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
+ u32 offset, const char *fname)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!__virtio_test_bit(vdev, fbit))
+ return 0;
+
+ if (likely(vp_dev->mdev.common_len >= offset))
+ return 0;
+
+ dev_err(&vdev->dev,
+ "virtio: common cfg size(%zu) does not match the feature %s\n",
+ vp_dev->mdev.common_len, fname);
+
+ return -EINVAL;
+}
+
+#define vp_check_common_size_one_feature(vdev, fbit, field) \
+ __vp_check_common_size_one_feature(vdev, fbit, \
+ offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
+
+static int vp_check_common_size(struct virtio_device *vdev)
+{
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
+ return -EINVAL;
+
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
+ return -EINVAL;
+
+ return 0;
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -57,6 +90,9 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
+ if (vp_check_common_size(vdev))
+ return -EINVAL;
+
vp_modern_set_features(&vp_dev->mdev, vdev->features);
return 0;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 9cb601e16688..e2a1fe7bb66c 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -203,6 +203,10 @@ static inline void check_offsets(void)
offsetof(struct virtio_pci_common_cfg, queue_used_lo));
BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NDATA !=
+ offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
+ offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
}
/*
@@ -292,7 +296,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
0, sizeof(struct virtio_pci_modern_common_cfg),
- NULL, NULL);
+ &mdev->common_len, NULL);
if (!mdev->common)
goto err_map_common;
mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 06ce6d8c2e00..8d63e5923d24 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -100,7 +100,7 @@ static void virtio_vdpa_reset(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- vdpa_reset(vdpa);
+ vdpa_reset(vdpa, 0);
}
static bool virtio_vdpa_notify(struct virtqueue *vq)
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
deleted file mode 100644
index e7f9492a0b04..000000000000
--- a/drivers/vlynq/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "TI VLYNQ"
- depends on AR7
-
-config VLYNQ
- bool "TI VLYNQ bus support"
- help
- Support for Texas Instruments(R) VLYNQ bus.
- The VLYNQ bus is a high-speed, serial and packetized
- data bus which allows external peripherals of a SoC
- to appear into the system's main memory.
-
- If unsure, say N
-
-config VLYNQ_DEBUG
- bool "VLYNQ bus debug"
- depends on VLYNQ && DEBUG_KERNEL
- help
- Turn on VLYNQ bus debugging.
-
-endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
deleted file mode 100644
index d9ce5b2b5ce0..000000000000
--- a/drivers/vlynq/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for kernel vlynq drivers
-#
-
-obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
deleted file mode 100644
index 4af6615808cc..000000000000
--- a/drivers/vlynq/vlynq.c
+++ /dev/null
@@ -1,799 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- *
- * Parts of the VLYNQ specification can be found here:
- * http://www.ti.com/litv/pdf/sprue36a
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#include <linux/vlynq.h>
-
-#define VLYNQ_CTRL_PM_ENABLE 0x80000000
-#define VLYNQ_CTRL_CLOCK_INT 0x00008000
-#define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16)
-#define VLYNQ_CTRL_INT_LOCAL 0x00004000
-#define VLYNQ_CTRL_INT_ENABLE 0x00002000
-#define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8)
-#define VLYNQ_CTRL_INT2CFG 0x00000080
-#define VLYNQ_CTRL_RESET 0x00000001
-
-#define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16)
-
-#define VLYNQ_INT_OFFSET 0x00000014
-#define VLYNQ_REMOTE_OFFSET 0x00000080
-
-#define VLYNQ_STATUS_LINK 0x00000001
-#define VLYNQ_STATUS_LERROR 0x00000080
-#define VLYNQ_STATUS_RERROR 0x00000100
-
-#define VINT_ENABLE 0x00000100
-#define VINT_TYPE_EDGE 0x00000080
-#define VINT_LEVEL_LOW 0x00000040
-#define VINT_VECTOR(x) ((x) & 0x1f)
-#define VINT_OFFSET(irq) (8 * ((irq) % 4))
-
-#define VLYNQ_AUTONEGO_V2 0x00010000
-
-struct vlynq_regs {
- u32 revision;
- u32 control;
- u32 status;
- u32 int_prio;
- u32 int_status;
- u32 int_pending;
- u32 int_ptr;
- u32 tx_offset;
- struct vlynq_mapping rx_mapping[4];
- u32 chip;
- u32 autonego;
- u32 unused[6];
- u32 int_device[8];
-};
-
-#ifdef CONFIG_VLYNQ_DEBUG
-static void vlynq_dump_regs(struct vlynq_device *dev)
-{
- int i;
-
- printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
- dev->local, dev->remote);
- for (i = 0; i < 32; i++) {
- printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
- i + 1, ((u32 *)dev->local)[i]);
- printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
- i + 1, ((u32 *)dev->remote)[i]);
- }
-}
-
-static void vlynq_dump_mem(u32 *base, int count)
-{
- int i;
-
- for (i = 0; i < (count + 3) / 4; i++) {
- if (i % 4 == 0)
- printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
- printk(KERN_DEBUG " 0x%08x", *(base + i));
- }
- printk(KERN_DEBUG "\n");
-}
-#endif
-
-/* Check the VLYNQ link status with a given device */
-static int vlynq_linked(struct vlynq_device *dev)
-{
- int i;
-
- for (i = 0; i < 100; i++)
- if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
- return 1;
- else
- cpu_relax();
-
- return 0;
-}
-
-static void vlynq_reset(struct vlynq_device *dev)
-{
- writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
- &dev->local->control);
-
- /* Wait for the devices to finish resetting */
- msleep(5);
-
- /* Remove reset bit */
- writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
- &dev->local->control);
-
- /* Give some time for the devices to settle */
- msleep(5);
-}
-
-static void vlynq_irq_unmask(struct irq_data *d)
-{
- struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
- int virq;
- u32 val;
-
- BUG_ON(!dev);
- virq = d->irq - dev->irq_start;
- val = readl(&dev->remote->int_device[virq >> 2]);
- val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
- writel(val, &dev->remote->int_device[virq >> 2]);
-}
-
-static void vlynq_irq_mask(struct irq_data *d)
-{
- struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
- int virq;
- u32 val;
-
- BUG_ON(!dev);
- virq = d->irq - dev->irq_start;
- val = readl(&dev->remote->int_device[virq >> 2]);
- val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
- writel(val, &dev->remote->int_device[virq >> 2]);
-}
-
-static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type)
-{
- struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
- int virq;
- u32 val;
-
- BUG_ON(!dev);
- virq = d->irq - dev->irq_start;
- val = readl(&dev->remote->int_device[virq >> 2]);
- switch (flow_type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_RISING:
- case IRQ_TYPE_EDGE_FALLING:
- case IRQ_TYPE_EDGE_BOTH:
- val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
- val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
- val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
- break;
- case IRQ_TYPE_LEVEL_LOW:
- val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
- val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
- break;
- default:
- return -EINVAL;
- }
- writel(val, &dev->remote->int_device[virq >> 2]);
- return 0;
-}
-
-static void vlynq_local_ack(struct irq_data *d)
-{
- struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
- u32 status = readl(&dev->local->status);
-
- pr_debug("%s: local status: 0x%08x\n",
- dev_name(&dev->dev), status);
- writel(status, &dev->local->status);
-}
-
-static void vlynq_remote_ack(struct irq_data *d)
-{
- struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
- u32 status = readl(&dev->remote->status);
-
- pr_debug("%s: remote status: 0x%08x\n",
- dev_name(&dev->dev), status);
- writel(status, &dev->remote->status);
-}
-
-static irqreturn_t vlynq_irq(int irq, void *dev_id)
-{
- struct vlynq_device *dev = dev_id;
- u32 status;
- int virq = 0;
-
- status = readl(&dev->local->int_status);
- writel(status, &dev->local->int_status);
-
- if (unlikely(!status))
- spurious_interrupt();
-
- while (status) {
- if (status & 1)
- do_IRQ(dev->irq_start + virq);
- status >>= 1;
- virq++;
- }
-
- return IRQ_HANDLED;
-}
-
-static struct irq_chip vlynq_irq_chip = {
- .name = "vlynq",
- .irq_unmask = vlynq_irq_unmask,
- .irq_mask = vlynq_irq_mask,
- .irq_set_type = vlynq_irq_type,
-};
-
-static struct irq_chip vlynq_local_chip = {
- .name = "vlynq local error",
- .irq_unmask = vlynq_irq_unmask,
- .irq_mask = vlynq_irq_mask,
- .irq_ack = vlynq_local_ack,
-};
-
-static struct irq_chip vlynq_remote_chip = {
- .name = "vlynq local error",
- .irq_unmask = vlynq_irq_unmask,
- .irq_mask = vlynq_irq_mask,
- .irq_ack = vlynq_remote_ack,
-};
-
-static int vlynq_setup_irq(struct vlynq_device *dev)
-{
- u32 val;
- int i, virq;
-
- if (dev->local_irq == dev->remote_irq) {
- printk(KERN_ERR
- "%s: local vlynq irq should be different from remote\n",
- dev_name(&dev->dev));
- return -EINVAL;
- }
-
- /* Clear local and remote error bits */
- writel(readl(&dev->local->status), &dev->local->status);
- writel(readl(&dev->remote->status), &dev->remote->status);
-
- /* Now setup interrupts */
- val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
- val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
- VLYNQ_CTRL_INT2CFG;
- val |= readl(&dev->local->control);
- writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
- writel(val, &dev->local->control);
-
- val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
- val |= VLYNQ_CTRL_INT_ENABLE;
- val |= readl(&dev->remote->control);
- writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
- writel(val, &dev->remote->int_ptr);
- writel(val, &dev->remote->control);
-
- for (i = dev->irq_start; i <= dev->irq_end; i++) {
- virq = i - dev->irq_start;
- if (virq == dev->local_irq) {
- irq_set_chip_and_handler(i, &vlynq_local_chip,
- handle_level_irq);
- irq_set_chip_data(i, dev);
- } else if (virq == dev->remote_irq) {
- irq_set_chip_and_handler(i, &vlynq_remote_chip,
- handle_level_irq);
- irq_set_chip_data(i, dev);
- } else {
- irq_set_chip_and_handler(i, &vlynq_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(i, dev);
- writel(0, &dev->remote->int_device[virq >> 2]);
- }
- }
-
- if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
- printk(KERN_ERR "%s: request_irq failed\n",
- dev_name(&dev->dev));
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static void vlynq_device_release(struct device *dev)
-{
- struct vlynq_device *vdev = to_vlynq_device(dev);
- kfree(vdev);
-}
-
-static int vlynq_device_match(struct device *dev,
- struct device_driver *drv)
-{
- struct vlynq_device *vdev = to_vlynq_device(dev);
- struct vlynq_driver *vdrv = to_vlynq_driver(drv);
- struct vlynq_device_id *ids = vdrv->id_table;
-
- while (ids->id) {
- if (ids->id == vdev->dev_id) {
- vdev->divisor = ids->divisor;
- vlynq_set_drvdata(vdev, ids);
- printk(KERN_INFO "Driver found for VLYNQ "
- "device: %08x\n", vdev->dev_id);
- return 1;
- }
- printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
- " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
- ids++;
- }
- return 0;
-}
-
-static int vlynq_device_probe(struct device *dev)
-{
- struct vlynq_device *vdev = to_vlynq_device(dev);
- struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
- struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
- int result = -ENODEV;
-
- if (drv->probe)
- result = drv->probe(vdev, id);
- if (result)
- put_device(dev);
- return result;
-}
-
-static void vlynq_device_remove(struct device *dev)
-{
- struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
-
- if (drv->remove)
- drv->remove(to_vlynq_device(dev));
-}
-
-int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
-{
- driver->driver.name = driver->name;
- driver->driver.bus = &vlynq_bus_type;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL(__vlynq_register_driver);
-
-void vlynq_unregister_driver(struct vlynq_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL(vlynq_unregister_driver);
-
-/*
- * A VLYNQ remote device can clock the VLYNQ bus master
- * using a dedicated clock line. In that case, both the
- * remove device and the bus master should have the same
- * serial clock dividers configured. Iterate through the
- * 8 possible dividers until we actually link with the
- * device.
- */
-static int __vlynq_try_remote(struct vlynq_device *dev)
-{
- int i;
-
- vlynq_reset(dev);
- for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
- i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
- dev->dev_id ? i++ : i--) {
-
- if (!vlynq_linked(dev))
- break;
-
- writel((readl(&dev->remote->control) &
- ~VLYNQ_CTRL_CLOCK_MASK) |
- VLYNQ_CTRL_CLOCK_INT |
- VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
- &dev->remote->control);
- writel((readl(&dev->local->control)
- & ~(VLYNQ_CTRL_CLOCK_INT |
- VLYNQ_CTRL_CLOCK_MASK)) |
- VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
- &dev->local->control);
-
- if (vlynq_linked(dev)) {
- printk(KERN_DEBUG
- "%s: using remote clock divisor %d\n",
- dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
- dev->divisor = i;
- return 0;
- } else {
- vlynq_reset(dev);
- }
- }
-
- return -ENODEV;
-}
-
-/*
- * A VLYNQ remote device can be clocked by the VLYNQ bus
- * master using a dedicated clock line. In that case, only
- * the bus master configures the serial clock divider.
- * Iterate through the 8 possible dividers until we
- * actually get a link with the device.
- */
-static int __vlynq_try_local(struct vlynq_device *dev)
-{
- int i;
-
- vlynq_reset(dev);
-
- for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
- i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
- dev->dev_id ? i++ : i--) {
-
- writel((readl(&dev->local->control) &
- ~VLYNQ_CTRL_CLOCK_MASK) |
- VLYNQ_CTRL_CLOCK_INT |
- VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
- &dev->local->control);
-
- if (vlynq_linked(dev)) {
- printk(KERN_DEBUG
- "%s: using local clock divisor %d\n",
- dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
- dev->divisor = i;
- return 0;
- } else {
- vlynq_reset(dev);
- }
- }
-
- return -ENODEV;
-}
-
-/*
- * When using external clocking method, serial clock
- * is supplied by an external oscillator, therefore we
- * should mask the local clock bit in the clock control
- * register for both the bus master and the remote device.
- */
-static int __vlynq_try_external(struct vlynq_device *dev)
-{
- vlynq_reset(dev);
- if (!vlynq_linked(dev))
- return -ENODEV;
-
- writel((readl(&dev->remote->control) &
- ~VLYNQ_CTRL_CLOCK_INT),
- &dev->remote->control);
-
- writel((readl(&dev->local->control) &
- ~VLYNQ_CTRL_CLOCK_INT),
- &dev->local->control);
-
- if (vlynq_linked(dev)) {
- printk(KERN_DEBUG "%s: using external clock\n",
- dev_name(&dev->dev));
- dev->divisor = vlynq_div_external;
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int __vlynq_enable_device(struct vlynq_device *dev)
-{
- int result;
- struct plat_vlynq_ops *ops = dev->dev.platform_data;
-
- result = ops->on(dev);
- if (result)
- return result;
-
- switch (dev->divisor) {
- case vlynq_div_external:
- case vlynq_div_auto:
- /* When the device is brought from reset it should have clock
- * generation negotiated by hardware.
- * Check which device is generating clocks and perform setup
- * accordingly */
- if (vlynq_linked(dev) && readl(&dev->remote->control) &
- VLYNQ_CTRL_CLOCK_INT) {
- if (!__vlynq_try_remote(dev) ||
- !__vlynq_try_local(dev) ||
- !__vlynq_try_external(dev))
- return 0;
- } else {
- if (!__vlynq_try_external(dev) ||
- !__vlynq_try_local(dev) ||
- !__vlynq_try_remote(dev))
- return 0;
- }
- break;
- case vlynq_ldiv1:
- case vlynq_ldiv2:
- case vlynq_ldiv3:
- case vlynq_ldiv4:
- case vlynq_ldiv5:
- case vlynq_ldiv6:
- case vlynq_ldiv7:
- case vlynq_ldiv8:
- writel(VLYNQ_CTRL_CLOCK_INT |
- VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
- vlynq_ldiv1), &dev->local->control);
- writel(0, &dev->remote->control);
- if (vlynq_linked(dev)) {
- printk(KERN_DEBUG
- "%s: using local clock divisor %d\n",
- dev_name(&dev->dev),
- dev->divisor - vlynq_ldiv1 + 1);
- return 0;
- }
- break;
- case vlynq_rdiv1:
- case vlynq_rdiv2:
- case vlynq_rdiv3:
- case vlynq_rdiv4:
- case vlynq_rdiv5:
- case vlynq_rdiv6:
- case vlynq_rdiv7:
- case vlynq_rdiv8:
- writel(0, &dev->local->control);
- writel(VLYNQ_CTRL_CLOCK_INT |
- VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
- vlynq_rdiv1), &dev->remote->control);
- if (vlynq_linked(dev)) {
- printk(KERN_DEBUG
- "%s: using remote clock divisor %d\n",
- dev_name(&dev->dev),
- dev->divisor - vlynq_rdiv1 + 1);
- return 0;
- }
- break;
- }
-
- ops->off(dev);
- return -ENODEV;
-}
-
-int vlynq_enable_device(struct vlynq_device *dev)
-{
- struct plat_vlynq_ops *ops = dev->dev.platform_data;
- int result = -ENODEV;
-
- result = __vlynq_enable_device(dev);
- if (result)
- return result;
-
- result = vlynq_setup_irq(dev);
- if (result)
- ops->off(dev);
-
- dev->enabled = !result;
- return result;
-}
-EXPORT_SYMBOL(vlynq_enable_device);
-
-
-void vlynq_disable_device(struct vlynq_device *dev)
-{
- struct plat_vlynq_ops *ops = dev->dev.platform_data;
-
- dev->enabled = 0;
- free_irq(dev->irq, dev);
- ops->off(dev);
-}
-EXPORT_SYMBOL(vlynq_disable_device);
-
-int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping)
-{
- int i;
-
- if (!dev->enabled)
- return -ENXIO;
-
- writel(tx_offset, &dev->local->tx_offset);
- for (i = 0; i < 4; i++) {
- writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
- writel(mapping[i].size, &dev->local->rx_mapping[i].size);
- }
- return 0;
-}
-EXPORT_SYMBOL(vlynq_set_local_mapping);
-
-int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping)
-{
- int i;
-
- if (!dev->enabled)
- return -ENXIO;
-
- writel(tx_offset, &dev->remote->tx_offset);
- for (i = 0; i < 4; i++) {
- writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
- writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
- }
- return 0;
-}
-EXPORT_SYMBOL(vlynq_set_remote_mapping);
-
-int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
-{
- int irq = dev->irq_start + virq;
- if (dev->enabled)
- return -EBUSY;
-
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- if (virq == dev->remote_irq)
- return -EINVAL;
-
- dev->local_irq = virq;
-
- return 0;
-}
-EXPORT_SYMBOL(vlynq_set_local_irq);
-
-int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
-{
- int irq = dev->irq_start + virq;
- if (dev->enabled)
- return -EBUSY;
-
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- if (virq == dev->local_irq)
- return -EINVAL;
-
- dev->remote_irq = virq;
-
- return 0;
-}
-EXPORT_SYMBOL(vlynq_set_remote_irq);
-
-static int vlynq_probe(struct platform_device *pdev)
-{
- struct vlynq_device *dev;
- struct resource *regs_res, *mem_res, *irq_res;
- int len, result;
-
- regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!regs_res)
- return -ENODEV;
-
- mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
- if (!mem_res)
- return -ENODEV;
-
- irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
- if (!irq_res)
- return -ENODEV;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- printk(KERN_ERR
- "vlynq: failed to allocate device structure\n");
- return -ENOMEM;
- }
-
- dev->id = pdev->id;
- dev->dev.bus = &vlynq_bus_type;
- dev->dev.parent = &pdev->dev;
- dev_set_name(&dev->dev, "vlynq%d", dev->id);
- dev->dev.platform_data = pdev->dev.platform_data;
- dev->dev.release = vlynq_device_release;
-
- dev->regs_start = regs_res->start;
- dev->regs_end = regs_res->end;
- dev->mem_start = mem_res->start;
- dev->mem_end = mem_res->end;
-
- len = resource_size(regs_res);
- if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
- printk(KERN_ERR "%s: Can't request vlynq registers\n",
- dev_name(&dev->dev));
- result = -ENXIO;
- goto fail_request;
- }
-
- dev->local = ioremap(regs_res->start, len);
- if (!dev->local) {
- printk(KERN_ERR "%s: Can't remap vlynq registers\n",
- dev_name(&dev->dev));
- result = -ENXIO;
- goto fail_remap;
- }
-
- dev->remote = (struct vlynq_regs *)((void *)dev->local +
- VLYNQ_REMOTE_OFFSET);
-
- dev->irq = platform_get_irq_byname(pdev, "irq");
- dev->irq_start = irq_res->start;
- dev->irq_end = irq_res->end;
- dev->local_irq = dev->irq_end - dev->irq_start;
- dev->remote_irq = dev->local_irq - 1;
-
- if (device_register(&dev->dev))
- goto fail_register;
- platform_set_drvdata(pdev, dev);
-
- printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
- dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
- (void *)dev->mem_start);
-
- dev->dev_id = 0;
- dev->divisor = vlynq_div_auto;
- result = __vlynq_enable_device(dev);
- if (result == 0) {
- dev->dev_id = readl(&dev->remote->chip);
- ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
- }
- if (dev->dev_id)
- printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
-
- return 0;
-
-fail_register:
- iounmap(dev->local);
-fail_remap:
-fail_request:
- release_mem_region(regs_res->start, len);
- kfree(dev);
- return result;
-}
-
-static int vlynq_remove(struct platform_device *pdev)
-{
- struct vlynq_device *dev = platform_get_drvdata(pdev);
-
- device_unregister(&dev->dev);
- iounmap(dev->local);
- release_mem_region(dev->regs_start,
- dev->regs_end - dev->regs_start + 1);
-
- kfree(dev);
-
- return 0;
-}
-
-static struct platform_driver vlynq_platform_driver = {
- .driver.name = "vlynq",
- .probe = vlynq_probe,
- .remove = vlynq_remove,
-};
-
-struct bus_type vlynq_bus_type = {
- .name = "vlynq",
- .match = vlynq_device_match,
- .probe = vlynq_device_probe,
- .remove = vlynq_device_remove,
-};
-EXPORT_SYMBOL(vlynq_bus_type);
-
-static int vlynq_init(void)
-{
- int res = 0;
-
- res = bus_register(&vlynq_bus_type);
- if (res)
- goto fail_bus;
-
- res = platform_driver_register(&vlynq_platform_driver);
- if (res)
- goto fail_platform;
-
- return 0;
-
-fail_platform:
- bus_unregister(&vlynq_bus_type);
-fail_bus:
- return res;
-}
-
-static void vlynq_exit(void)
-{
- platform_driver_unregister(&vlynq_platform_driver);
- bus_unregister(&vlynq_bus_type);
-}
-
-module_init(vlynq_init);
-module_exit(vlynq_exit);
diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
index 7592c7050d1d..cb426f7dd23d 100644
--- a/drivers/w1/slaves/w1_ds250x.c
+++ b/drivers/w1/slaves/w1_ds250x.c
@@ -168,6 +168,7 @@ static int w1_eprom_add_slave(struct w1_slave *sl)
struct nvmem_device *nvmem;
struct nvmem_config nvmem_cfg = {
.dev = &sl->dev,
+ .add_legacy_fixed_of_cells = true,
.reg_read = w1_nvmem_read,
.type = NVMEM_TYPE_OTP,
.read_only = true,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 8cb6fa45d599..7d22051b15a2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1768,12 +1768,6 @@ config SIBYTE_WDOG
To compile this driver as a loadable module, choose M here.
The module will be called sb_wdog.
-config AR7_WDT
- tristate "TI AR7 Watchdog Timer"
- depends on AR7 || (MIPS && 32BIT && COMPILE_TEST)
- help
- Hardware driver for the TI AR7 Watchdog Timer.
-
config TXX9_WDT
tristate "Toshiba TXx9 Watchdog Timer"
depends on CPU_TX49XX || (MIPS && COMPILE_TEST)
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7eab9de311cb..7cbc34514ec1 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -168,7 +168,6 @@ obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
-obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index eddeb0fede89..d4f739932f0b 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -173,6 +173,8 @@ static int apple_wdt_probe(struct platform_device *pdev)
if (!wdt->clk_rate)
return -EINVAL;
+ platform_set_drvdata(pdev, wdt);
+
wdt->wdd.ops = &apple_wdt_ops;
wdt->wdd.info = &apple_wdt_info;
wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
@@ -190,6 +192,28 @@ static int apple_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdt->wdd);
}
+static int apple_wdt_resume(struct device *dev)
+{
+ struct apple_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd) || watchdog_hw_running(&wdt->wdd))
+ apple_wdt_start(&wdt->wdd);
+
+ return 0;
+}
+
+static int apple_wdt_suspend(struct device *dev)
+{
+ struct apple_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd) || watchdog_hw_running(&wdt->wdd))
+ apple_wdt_stop(&wdt->wdd);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_wdt_pm_ops, apple_wdt_suspend, apple_wdt_resume);
+
static const struct of_device_id apple_wdt_of_match[] = {
{ .compatible = "apple,wdt" },
{},
@@ -200,6 +224,7 @@ static struct platform_driver apple_wdt_driver = {
.driver = {
.name = "apple-watchdog",
.of_match_table = apple_wdt_of_match,
+ .pm = pm_sleep_ptr(&apple_wdt_pm_ops),
},
.probe = apple_wdt_probe,
};
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
deleted file mode 100644
index cdcaeb0961ac..000000000000
--- a/drivers/watchdog/ar7_wdt.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/watchdog/ar7_wdt.c
- *
- * Copyright (C) 2007 Nicolas Thill <nico@openwrt.org>
- * Copyright (c) 2005 Enrik Berkhan <Enrik.Berkhan@akk.org>
- *
- * Some code taken from:
- * National Semiconductor SCx200 Watchdog support
- * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define LONGNAME "TI AR7 Watchdog Timer"
-
-MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>");
-MODULE_DESCRIPTION(LONGNAME);
-MODULE_LICENSE("GPL");
-
-static int margin = 60;
-module_param(margin, int, 0);
-MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
-
-#define READ_REG(x) readl((void __iomem *)&(x))
-#define WRITE_REG(x, v) writel((v), (void __iomem *)&(x))
-
-struct ar7_wdt {
- u32 kick_lock;
- u32 kick;
- u32 change_lock;
- u32 change;
- u32 disable_lock;
- u32 disable;
- u32 prescale_lock;
- u32 prescale;
-};
-
-static unsigned long wdt_is_open;
-static unsigned expect_close;
-static DEFINE_SPINLOCK(wdt_lock);
-
-/* XXX currently fixed, allows max margin ~68.72 secs */
-#define prescale_value 0xffff
-
-/* Pointer to the remapped WDT IO space */
-static struct ar7_wdt *ar7_wdt;
-
-static struct clk *vbus_clk;
-
-static void ar7_wdt_kick(u32 value)
-{
- WRITE_REG(ar7_wdt->kick_lock, 0x5555);
- if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) {
- WRITE_REG(ar7_wdt->kick_lock, 0xaaaa);
- if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) {
- WRITE_REG(ar7_wdt->kick, value);
- return;
- }
- }
- pr_err("failed to unlock WDT kick reg\n");
-}
-
-static void ar7_wdt_prescale(u32 value)
-{
- WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a);
- if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) {
- WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5);
- if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) {
- WRITE_REG(ar7_wdt->prescale, value);
- return;
- }
- }
- pr_err("failed to unlock WDT prescale reg\n");
-}
-
-static void ar7_wdt_change(u32 value)
-{
- WRITE_REG(ar7_wdt->change_lock, 0x6666);
- if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) {
- WRITE_REG(ar7_wdt->change_lock, 0xbbbb);
- if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) {
- WRITE_REG(ar7_wdt->change, value);
- return;
- }
- }
- pr_err("failed to unlock WDT change reg\n");
-}
-
-static void ar7_wdt_disable(u32 value)
-{
- WRITE_REG(ar7_wdt->disable_lock, 0x7777);
- if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) {
- WRITE_REG(ar7_wdt->disable_lock, 0xcccc);
- if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) {
- WRITE_REG(ar7_wdt->disable_lock, 0xdddd);
- if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) {
- WRITE_REG(ar7_wdt->disable, value);
- return;
- }
- }
- }
- pr_err("failed to unlock WDT disable reg\n");
-}
-
-static void ar7_wdt_update_margin(int new_margin)
-{
- u32 change;
- u32 vbus_rate;
-
- vbus_rate = clk_get_rate(vbus_clk);
- change = new_margin * (vbus_rate / prescale_value);
- if (change < 1)
- change = 1;
- if (change > 0xffff)
- change = 0xffff;
- ar7_wdt_change(change);
- margin = change * prescale_value / vbus_rate;
- pr_info("timer margin %d seconds (prescale %d, change %d, freq %d)\n",
- margin, prescale_value, change, vbus_rate);
-}
-
-static void ar7_wdt_enable_wdt(void)
-{
- pr_debug("enabling watchdog timer\n");
- ar7_wdt_disable(1);
- ar7_wdt_kick(1);
-}
-
-static void ar7_wdt_disable_wdt(void)
-{
- pr_debug("disabling watchdog timer\n");
- ar7_wdt_disable(0);
-}
-
-static int ar7_wdt_open(struct inode *inode, struct file *file)
-{
- /* only allow one at a time */
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
- ar7_wdt_enable_wdt();
- expect_close = 0;
-
- return stream_open(inode, file);
-}
-
-static int ar7_wdt_release(struct inode *inode, struct file *file)
-{
- if (!expect_close)
- pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n");
- else if (!nowayout)
- ar7_wdt_disable_wdt();
- clear_bit(0, &wdt_is_open);
- return 0;
-}
-
-static ssize_t ar7_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- /* check for a magic close character */
- if (len) {
- size_t i;
-
- spin_lock(&wdt_lock);
- ar7_wdt_kick(1);
- spin_unlock(&wdt_lock);
-
- expect_close = 0;
- for (i = 0; i < len; ++i) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 1;
- }
-
- }
- return len;
-}
-
-static long ar7_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- static const struct watchdog_info ident = {
- .identity = LONGNAME,
- .firmware_version = 1,
- .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE),
- };
- int new_margin;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)))
- return -EFAULT;
- return 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- if (put_user(0, (int *)arg))
- return -EFAULT;
- return 0;
- case WDIOC_KEEPALIVE:
- ar7_wdt_kick(1);
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, (int *)arg))
- return -EFAULT;
- if (new_margin < 1)
- return -EINVAL;
-
- spin_lock(&wdt_lock);
- ar7_wdt_update_margin(new_margin);
- ar7_wdt_kick(1);
- spin_unlock(&wdt_lock);
- fallthrough;
- case WDIOC_GETTIMEOUT:
- if (put_user(margin, (int *)arg))
- return -EFAULT;
- return 0;
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations ar7_wdt_fops = {
- .owner = THIS_MODULE,
- .write = ar7_wdt_write,
- .unlocked_ioctl = ar7_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = ar7_wdt_open,
- .release = ar7_wdt_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice ar7_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &ar7_wdt_fops,
-};
-
-static int ar7_wdt_probe(struct platform_device *pdev)
-{
- int rc;
-
- ar7_wdt = devm_platform_ioremap_resource_byname(pdev, "regs");
- if (IS_ERR(ar7_wdt))
- return PTR_ERR(ar7_wdt);
-
- vbus_clk = clk_get(NULL, "vbus");
- if (IS_ERR(vbus_clk)) {
- pr_err("could not get vbus clock\n");
- return PTR_ERR(vbus_clk);
- }
-
- ar7_wdt_disable_wdt();
- ar7_wdt_prescale(prescale_value);
- ar7_wdt_update_margin(margin);
-
- rc = misc_register(&ar7_wdt_miscdev);
- if (rc) {
- pr_err("unable to register misc device\n");
- goto out;
- }
- return 0;
-
-out:
- clk_put(vbus_clk);
- vbus_clk = NULL;
- return rc;
-}
-
-static void ar7_wdt_remove(struct platform_device *pdev)
-{
- misc_deregister(&ar7_wdt_miscdev);
- clk_put(vbus_clk);
- vbus_clk = NULL;
-}
-
-static void ar7_wdt_shutdown(struct platform_device *pdev)
-{
- if (!nowayout)
- ar7_wdt_disable_wdt();
-}
-
-static struct platform_driver ar7_wdt_driver = {
- .probe = ar7_wdt_probe,
- .remove_new = ar7_wdt_remove,
- .shutdown = ar7_wdt_shutdown,
- .driver = {
- .name = "ar7_wdt",
- },
-};
-
-module_platform_driver(ar7_wdt_driver);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index b72a858bbac7..b4773a6aaf8c 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -79,6 +79,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
#define WDT_CLEAR_TIMEOUT_STATUS 0x14
#define WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION BIT(0)
+#define WDT_RESET_MASK1 0x1c
+#define WDT_RESET_MASK2 0x20
/*
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -402,6 +404,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
(of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
+ u32 reset_mask[2];
+ size_t nrstmask = of_device_is_compatible(np, "aspeed,ast2600-wdt") ? 2 : 1;
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
reg &= wdt->cfg->ext_pulse_width_mask;
@@ -419,6 +423,13 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
reg |= WDT_OPEN_DRAIN_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
+
+ ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask);
+ if (!ret) {
+ writel(reset_mask[0], wdt->base + WDT_RESET_MASK1);
+ if (nrstmask > 1)
+ writel(reset_mask[1], wdt->base + WDT_RESET_MASK2);
+ }
}
if (!of_property_read_u32(np, "aspeed,ext-pulse-duration", &duration)) {
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index fed7be246442..b111b28acb94 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -348,25 +348,21 @@ static int __init at91wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(wdt->sclk))
- return PTR_ERR(wdt->sclk);
-
- err = clk_prepare_enable(wdt->sclk);
- if (err) {
+ wdt->sclk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(wdt->sclk)) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
- return err;
+ return PTR_ERR(wdt->sclk);
}
if (pdev->dev.of_node) {
err = of_at91wdt_init(pdev->dev.of_node, wdt);
if (err)
- goto err_clk;
+ return err;
}
err = at91_wdt_init(pdev, wdt);
if (err)
- goto err_clk;
+ return err;
platform_set_drvdata(pdev, wdt);
@@ -374,11 +370,6 @@ static int __init at91wdt_probe(struct platform_device *pdev)
wdt->wdd.timeout, wdt->nowayout);
return 0;
-
-err_clk:
- clk_disable_unprepare(wdt->sclk);
-
- return err;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
@@ -388,7 +379,6 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer(&wdt->timer);
- clk_disable_unprepare(wdt->sclk);
return 0;
}
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index b7b705060438..e5cc30622b12 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -257,19 +257,13 @@ static int ath79_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
- wdt_clk = devm_clk_get(&pdev->dev, "wdt");
+ wdt_clk = devm_clk_get_enabled(&pdev->dev, "wdt");
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
- err = clk_prepare_enable(wdt_clk);
- if (err)
- return err;
-
wdt_freq = clk_get_rate(wdt_clk);
- if (!wdt_freq) {
- err = -EINVAL;
- goto err_clk_disable;
- }
+ if (!wdt_freq)
+ return -EINVAL;
max_timeout = (0xfffffffful / wdt_freq);
if (timeout < 1 || timeout > max_timeout) {
@@ -286,20 +280,15 @@ static int ath79_wdt_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"unable to register misc device, err=%d\n", err);
- goto err_clk_disable;
+ return err;
}
return 0;
-
-err_clk_disable:
- clk_disable_unprepare(wdt_clk);
- return err;
}
static void ath79_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ath79_wdt_miscdev);
- clk_disable_unprepare(wdt_clk);
}
static void ath79_wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 0923201ce874..a7b814ea740b 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -5,12 +5,13 @@
* Author: 2013, Alexander Shiyan <shc_work@mail.ru>
*/
-#include <linux/err.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/err.h>
#include <linux/gpio/consumer.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -106,7 +107,6 @@ static const struct watchdog_ops gpio_wdt_ops = {
static int gpio_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct gpio_wdt_priv *priv;
enum gpiod_flags gflags;
unsigned int hw_margin;
@@ -119,7 +119,7 @@ static int gpio_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = of_property_read_string(np, "hw_algo", &algo);
+ ret = device_property_read_string(dev, "hw_algo", &algo);
if (ret)
return ret;
if (!strcmp(algo, "toggle")) {
@@ -136,16 +136,14 @@ static int gpio_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->gpiod))
return PTR_ERR(priv->gpiod);
- ret = of_property_read_u32(np,
- "hw_margin_ms", &hw_margin);
+ ret = device_property_read_u32(dev, "hw_margin_ms", &hw_margin);
if (ret)
return ret;
/* Disallow values lower than 2 and higher than 65535 ms */
if (hw_margin < 2 || hw_margin > 65535)
return -EINVAL;
- priv->always_running = of_property_read_bool(np,
- "always-running");
+ priv->always_running = device_property_read_bool(dev, "always-running");
watchdog_set_drvdata(&priv->wdd, priv);
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index c703586c6e5f..b21d7a74a42d 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -23,6 +23,7 @@
#define LPO_CLK_SHIFT 8
#define WDOG_CS_CLK (LPO_CLK << LPO_CLK_SHIFT)
#define WDOG_CS_EN BIT(7)
+#define WDOG_CS_INT_EN BIT(6)
#define WDOG_CS_UPDATE BIT(5)
#define WDOG_CS_WAIT BIT(1)
#define WDOG_CS_STOP BIT(0)
@@ -62,6 +63,7 @@ struct imx7ulp_wdt_device {
void __iomem *base;
struct clk *clk;
bool post_rcs_wait;
+ bool ext_reset;
const struct imx_wdt_hw_feature *hw;
};
@@ -285,6 +287,9 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout
if (wdt->hw->prescaler_enable)
val |= WDOG_CS_PRES;
+ if (wdt->ext_reset)
+ val |= WDOG_CS_INT_EN;
+
do {
ret = _imx7ulp_wdt_init(wdt, timeout, val);
toval = readl(wdt->base + WDOG_TOVAL);
@@ -321,6 +326,9 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
return PTR_ERR(imx7ulp_wdt->clk);
}
+ /* The WDOG may need to do external reset through dedicated pin */
+ imx7ulp_wdt->ext_reset = of_property_read_bool(dev->of_node, "fsl,ext-reset-output");
+
imx7ulp_wdt->post_rcs_wait = true;
if (of_device_is_compatible(dev->of_node,
"fsl,imx8ulp-wdt")) {
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 8ac021748d16..e51fe1b78518 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -34,6 +34,7 @@
#define SC_IRQ_WDOG 1
#define SC_IRQ_GROUP_WDOG 1
+#define SC_TIMER_ERR_BUSY 10
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0000);
@@ -61,7 +62,9 @@ static int imx_sc_wdt_start(struct watchdog_device *wdog)
arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
0, 0, 0, 0, 0, 0, &res);
- if (res.a0)
+
+ /* Ignore if already enabled(SC_TIMER_ERR_BUSY) */
+ if (res.a0 && res.a0 != SC_TIMER_ERR_BUSY)
return -EACCES;
arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_WDOG_ACT,
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index bb1122909396..e888b1bdd1f2 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -13,9 +13,9 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8607, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686,
- * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728,
- * IT8772, IT8783 and IT8784.
+ * IT8607, IT8613, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665,
+ * IT8686, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
+ * IT8728, IT8772, IT8783 and IT8784.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,6 +50,7 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
#define IT8607_ID 0x8607
+#define IT8613_ID 0x8613
#define IT8620_ID 0x8620
#define IT8622_ID 0x8622
#define IT8625_ID 0x8625
@@ -277,6 +278,7 @@ static int __init it87_wdt_init(void)
max_units = 65535;
break;
case IT8607_ID:
+ case IT8613_ID:
case IT8620_ID:
case IT8622_ID:
case IT8625_ID:
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 607ce4b8df57..ec0c08652ec2 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
.owner = THIS_MODULE,
};
+/*
+ * The A0 version of the IXP422 had a bug in the watchdog making
+ * is useless, but we still need to use it to restart the system
+ * as it is the only way, so in this special case we register a
+ * "dummy" watchdog that doesn't really work, but will support
+ * the restart operation.
+ */
+static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
+{
+ return 0;
+}
+
+static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
+ .start = ixp4xx_wdt_dummy,
+ .stop = ixp4xx_wdt_dummy,
+ .restart = ixp4xx_wdt_restart,
+ .owner = THIS_MODULE,
+};
+
static const struct watchdog_info ixp4xx_wdt_info = {
.options = WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE
@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
static int ixp4xx_wdt_probe(struct platform_device *pdev)
{
+ static const struct watchdog_ops *iwdt_ops;
struct device *dev = &pdev->dev;
struct ixp4xx_wdt *iwdt;
struct clk *clk;
int ret;
if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
- dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
- return -ENODEV;
+ dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
+ iwdt_ops = &ixp4xx_wdt_restart_only_ops;
+ } else {
+ iwdt_ops = &ixp4xx_wdt_ops;
}
iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
iwdt->rate = IXP4XX_TIMER_FREQ;
iwdt->wdd.info = &ixp4xx_wdt_info;
- iwdt->wdd.ops = &ixp4xx_wdt_ops;
+ iwdt->wdd.ops = iwdt_ops;
iwdt->wdd.min_timeout = 1;
iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
iwdt->wdd.parent = dev;
diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
index d7eb8286e11e..098bb141a521 100644
--- a/drivers/watchdog/marvell_gti_wdt.c
+++ b/drivers/watchdog/marvell_gti_wdt.c
@@ -8,8 +8,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/watchdog.h>
/*
@@ -190,6 +190,13 @@ static int gti_wdt_set_pretimeout(struct watchdog_device *wdev,
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
struct watchdog_device *wdog_dev = &priv->wdev;
+ if (!timeout) {
+ /* Disable Interrupt */
+ writeq(GTI_CWD_INT_ENA_CLR_VAL(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT_ENA_CLR);
+ return 0;
+ }
+
/* pretimeout should 1/3 of max_timeout */
if (timeout * 3 <= wdog_dev->max_timeout)
return gti_wdt_settimeout(wdev, timeout * 3);
@@ -271,7 +278,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
&wdt_idx);
if (!err) {
if (wdt_idx >= priv->data->gti_num_timers)
- return dev_err_probe(&pdev->dev, err,
+ return dev_err_probe(&pdev->dev, -EINVAL,
"GTI wdog timer index not valid");
priv->wdt_timer_idx = wdt_idx;
@@ -292,6 +299,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
/* Maximum timeout is 3 times the pretimeout */
wdog_dev->max_timeout = max_pretimeout * 3;
+ wdog_dev->max_hw_heartbeat_ms = max_pretimeout * 1000;
/* Minimum first timeout (pretimeout) is 1, so min_timeout as 3 */
wdog_dev->min_timeout = 3;
wdog_dev->timeout = wdog_dev->pretimeout;
@@ -308,7 +316,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(&pdev->dev, irq, "IRQ resource not found\n");
+ return irq;
err = devm_request_irq(dev, irq, gti_wdt_interrupt, 0,
pdev->name, &priv->wdev);
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
index 9c5b6616fc87..667e2c5b3431 100644
--- a/drivers/watchdog/mlx_wdt.c
+++ b/drivers/watchdog/mlx_wdt.c
@@ -39,6 +39,7 @@
* @tleft_idx: index for direct access to time left register;
* @ping_idx: index for direct access to ping register;
* @reset_idx: index for direct access to reset cause register;
+ * @regmap_val_sz: size of value in register map;
* @wd_type: watchdog HW type;
*/
struct mlxreg_wdt {
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 05657dc1d36a..352853e6fe71 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -187,7 +187,7 @@ static int xwdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
- xdev->clk = devm_clk_get_enabled(dev, NULL);
+ xdev->clk = devm_clk_get_prepared(dev, NULL);
if (IS_ERR(xdev->clk)) {
if (PTR_ERR(xdev->clk) != -ENOENT)
return PTR_ERR(xdev->clk);
@@ -218,18 +218,25 @@ static int xwdt_probe(struct platform_device *pdev)
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
+ rc = clk_enable(xdev->clk);
+ if (rc) {
+ dev_err(dev, "unable to enable clock\n");
+ return rc;
+ }
+
rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
dev_err(dev, "SelfTest routine error\n");
+ clk_disable(xdev->clk);
return rc;
}
+ clk_disable(xdev->clk);
+
rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
if (rc)
return rc;
- clk_disable(xdev->clk);
-
dev_info(dev, "Xilinx Watchdog Timer with timeout %ds\n",
xilinx_wdt_wdd->timeout);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 421ebcda62e6..5f23913ce3b4 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -152,14 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
if (action)
- sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
+ sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
else
/*
* In the single stage mode, The first signal (WS0) is ignored,
* the timeout is (WOR * 2), so the WOR should be configured
* to half value of timeout.
*/
- sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
+ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
return 0;
}
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index d2aa43c00221..4c5b8d98a4f3 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -15,7 +15,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -42,7 +41,7 @@ struct st_wdog {
void __iomem *base;
struct device *dev;
struct regmap *regmap;
- struct st_wdog_syscfg *syscfg;
+ const struct st_wdog_syscfg *syscfg;
struct clk *clk;
unsigned long clkrate;
bool warm_reset;
@@ -150,7 +149,6 @@ static void st_clk_disable_unprepare(void *data)
static int st_wdog_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
struct device_node *np = dev->of_node;
struct st_wdog *st_wdog;
struct regmap *regmap;
@@ -173,12 +171,7 @@ static int st_wdog_probe(struct platform_device *pdev)
if (!st_wdog)
return -ENOMEM;
- match = of_match_device(st_wdog_match, dev);
- if (!match) {
- dev_err(dev, "Couldn't match device\n");
- return -ENODEV;
- }
- st_wdog->syscfg = (struct st_wdog_syscfg *)match->data;
+ st_wdog->syscfg = (struct st_wdog_syscfg *)device_get_match_data(dev);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/watchdog/sunplus_wdt.c b/drivers/watchdog/sunplus_wdt.c
index e2d8c532bcb1..9d3ca848e8b6 100644
--- a/drivers/watchdog/sunplus_wdt.c
+++ b/drivers/watchdog/sunplus_wdt.c
@@ -136,11 +136,6 @@ static const struct watchdog_ops sp_wdt_ops = {
.restart = sp_wdt_restart,
};
-static void sp_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void sp_reset_control_assert(void *data)
{
reset_control_assert(data);
@@ -156,17 +151,9 @@ static int sp_wdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->clk = devm_clk_get(dev, NULL);
+ priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk))
- return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to enable clock\n");
-
- ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk);
- if (ret)
- return ret;
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
/* The timer and watchdog shared the STC reset */
priv->rstc = devm_reset_control_get_shared(dev, NULL);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 0ba99bed59fc..650fdc7996e1 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -269,7 +269,7 @@ static int wdat_wdt_stop(struct watchdog_device *wdd)
static int wdat_wdt_ping(struct watchdog_device *wdd)
{
- return wdat_wdt_run_action(to_wdat_wdt(wdd), ACPI_WDAT_RESET, 0, NULL);
+ return wdat_wdt_run_action(to_wdat_wdt(wdd), ACPI_WDAT_RESET, wdd->timeout, NULL);
}
static int wdat_wdt_set_timeout(struct watchdog_device *wdd,
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index d525957594b6..61dbe52bb3a3 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -732,4 +732,5 @@ module_exit(exit_v9fs)
MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
+MODULE_DESCRIPTION("9P Client File System");
MODULE_LICENSE("GPL");
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index cdf441f22e07..731e3d14b67d 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -52,7 +52,6 @@ void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
unsigned int flags);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
-void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 053d1cef6e13..8604e3377ee7 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
struct p9_fid *fid;
int ret;
- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
+ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
name, buffer_size);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
+ /* Txattrwalk with an empty string lists xattrs instead */
+ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
}
static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 2fe4a5832fcf..d6b9758ee23d 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -568,6 +568,7 @@ static struct dentry *affs_fh_to_parent(struct super_block *sb, struct fid *fid,
}
const struct export_operations affs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = affs_fh_to_dentry,
.fh_to_parent = affs_fh_to_parent,
.get_parent = affs_get_parent,
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index df13a4f9a6e3..c08c2c7d6fbb 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -24,7 +24,6 @@ config BCACHEFS_FS
select XXHASH
select SRCU
select SYMBOLIC_ERRNAME
- select MEAN_AND_VARIANCE
help
The bcachefs filesystem - a modern, copy on write filesystem, with
support for multiple devices, compression, checksumming, etc.
@@ -42,7 +41,6 @@ config BCACHEFS_POSIX_ACL
config BCACHEFS_DEBUG_TRANSACTIONS
bool "bcachefs runtime info"
depends on BCACHEFS_FS
- default y
help
This makes the list of running btree transactions available in debugfs.
@@ -78,7 +76,7 @@ config BCACHEFS_NO_LATENCY_ACCT
config MEAN_AND_VARIANCE_UNIT_TEST
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
depends on KUNIT
- select MEAN_AND_VARIANCE
+ depends on BCACHEFS_FS
default KUNIT_ALL_TESTS
help
This option enables the kunit tests for mean_and_variance module.
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index 0749731b9072..45b64f89258c 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -70,6 +70,7 @@ bcachefs-y := \
reflink.o \
replicas.o \
sb-clean.o \
+ sb-errors.o \
sb-members.o \
siphash.o \
six.o \
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 2d516207e223..1fec0e67891f 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -192,123 +192,109 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
return DIV_ROUND_UP(bytes, sizeof(u64));
}
-int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
+ int ret = 0;
/* allow for unknown fields */
- if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
- prt_printf(err, "incorrect value size (%zu < %u)",
- bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
+ alloc_v1_val_size_bad,
+ "incorrect value size (%zu < %u)",
+ bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+fsck_err:
+ return ret;
}
-int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_alloc_unpacked u;
+ int ret = 0;
- if (bch2_alloc_unpack_v2(&u, k)) {
- prt_printf(err, "unpack error");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
+ alloc_v2_unpack_error,
+ "unpack error");
+fsck_err:
+ return ret;
}
-int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_alloc_unpacked u;
+ int ret = 0;
- if (bch2_alloc_unpack_v3(&u, k)) {
- prt_printf(err, "unpack error");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
+ alloc_v2_unpack_error,
+ "unpack error");
+fsck_err:
+ return ret;
}
-int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+ int ret = 0;
- if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) {
- prt_printf(err, "bad val size (%u > %zu)",
- alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
+ alloc_v4_val_size_bad,
+ "bad val size (%u > %zu)",
+ alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
- if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
- BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
- prt_printf(err, "invalid backpointers_start");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
+ alloc_v4_backpointers_start_bad,
+ "invalid backpointers_start");
- if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
- prt_printf(err, "invalid data type (got %u should be %u)",
- a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
+ alloc_key_data_type_bad,
+ "invalid data type (got %u should be %u)",
+ a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
switch (a.v->data_type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
- if (a.v->dirty_sectors ||
- a.v->cached_sectors ||
- a.v->stripe) {
- prt_printf(err, "empty data type free but have data");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(a.v->dirty_sectors ||
+ a.v->cached_sectors ||
+ a.v->stripe, c, err,
+ alloc_key_empty_but_have_data,
+ "empty data type free but have data");
break;
case BCH_DATA_sb:
case BCH_DATA_journal:
case BCH_DATA_btree:
case BCH_DATA_user:
case BCH_DATA_parity:
- if (!a.v->dirty_sectors) {
- prt_printf(err, "data_type %s but dirty_sectors==0",
- bch2_data_types[a.v->data_type]);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(!a.v->dirty_sectors, c, err,
+ alloc_key_dirty_sectors_0,
+ "data_type %s but dirty_sectors==0",
+ bch2_data_types[a.v->data_type]);
break;
case BCH_DATA_cached:
- if (!a.v->cached_sectors ||
- a.v->dirty_sectors ||
- a.v->stripe) {
- prt_printf(err, "data type inconsistency");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (!a.v->io_time[READ] &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs) {
- prt_printf(err, "cached bucket with read_time == 0");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(!a.v->cached_sectors ||
+ a.v->dirty_sectors ||
+ a.v->stripe, c, err,
+ alloc_key_cached_inconsistency,
+ "data type inconsistency");
+
+ bkey_fsck_err_on(!a.v->io_time[READ] &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
+ c, err,
+ alloc_key_cached_but_read_time_zero,
+ "cached bucket with read_time == 0");
break;
case BCH_DATA_stripe:
break;
}
-
- return 0;
-}
-
-static inline u64 swab40(u64 x)
-{
- return (((x & 0x00000000ffULL) << 32)|
- ((x & 0x000000ff00ULL) << 16)|
- ((x & 0x0000ff0000ULL) >> 0)|
- ((x & 0x00ff000000ULL) >> 16)|
- ((x & 0xff00000000ULL) >> 32));
+fsck_err:
+ return ret;
}
void bch2_alloc_v4_swab(struct bkey_s k)
@@ -324,6 +310,7 @@ void bch2_alloc_v4_swab(struct bkey_s k)
a->io_time[1] = swab64(a->io_time[1]);
a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+ a->fragmentation_lru = swab64(a->fragmentation_lru);
bps = alloc_v4_backpointers(a);
for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
@@ -521,17 +508,18 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
: 0;
}
-int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
- prt_printf(err, "bad val size (%zu != %zu)",
- bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
+ bucket_gens_val_size_bad,
+ "bad val size (%zu != %zu)",
+ bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+fsck_err:
+ return ret;
}
void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
@@ -727,7 +715,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
"incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
" for %s",
set ? "setting" : "clearing",
- bch2_btree_ids[btree],
+ bch2_btree_id_str(btree),
iter.pos.inode,
iter.pos.offset,
bch2_bkey_types[old.k->type],
@@ -986,6 +974,7 @@ int bch2_check_alloc_key(struct btree_trans *trans,
int ret;
if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+ alloc_key_to_missing_dev_bucket,
"alloc key for invalid device:bucket %llu:%llu",
alloc_k.k->p.inode, alloc_k.k->p.offset))
return bch2_btree_delete_at(trans, alloc_iter, 0);
@@ -1005,7 +994,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
if (k.k->type != discard_key_type &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
+ fsck_err(c, need_discard_key_wrong,
+ "incorrect key in need_discard btree (got %s should be %s)\n"
" %s",
bch2_bkey_types[k.k->type],
bch2_bkey_types[discard_key_type],
@@ -1035,7 +1025,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
if (k.k->type != freespace_key_type &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
+ fsck_err(c, freespace_key_wrong,
+ "incorrect key in freespace btree (got %s should be %s)\n"
" %s",
bch2_bkey_types[k.k->type],
bch2_bkey_types[freespace_key_type],
@@ -1066,7 +1057,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
if (a->gen != alloc_gen(k, gens_offset) &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+ fsck_err(c, bucket_gens_key_wrong,
+ "incorrect gen in bucket_gens btree (got %u should be %u)\n"
" %s",
alloc_gen(k, gens_offset), a->gen,
(printbuf_reset(&buf),
@@ -1124,7 +1116,8 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (k.k->type != KEY_TYPE_set &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "hole in alloc btree missing in freespace btree\n"
+ fsck_err(c, freespace_hole_missing,
+ "hole in alloc btree missing in freespace btree\n"
" device %llu buckets %llu-%llu",
freespace_iter->pos.inode,
freespace_iter->pos.offset,
@@ -1187,6 +1180,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
for (i = gens_offset; i < gens_end_offset; i++) {
if (fsck_err_on(g.v.gens[i], c,
+ bucket_gens_hole_wrong,
"hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
bucket_gens_pos_to_alloc(k.k->p, i).inode,
bucket_gens_pos_to_alloc(k.k->p, i).offset,
@@ -1244,8 +1238,9 @@ static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_tr
return ret;
if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+ need_discard_freespace_key_to_invalid_dev_bucket,
"entry in %s btree for nonexistant dev:bucket %llu:%llu",
- bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
+ bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
goto delete;
a = bch2_alloc_to_v4(alloc_k, &a_convert);
@@ -1253,9 +1248,10 @@ static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_tr
if (fsck_err_on(a->data_type != state ||
(state == BCH_DATA_free &&
genbits != alloc_freespace_genbits(*a)), c,
+ need_discard_freespace_key_bad,
"%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
- bch2_btree_ids[iter->btree_id],
+ bch2_btree_id_str(iter->btree_id),
iter->pos.inode,
iter->pos.offset,
a->data_type == state,
@@ -1320,6 +1316,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
dev_exists = bch2_dev_exists2(c, k.k->p.inode);
if (!dev_exists) {
if (fsck_err_on(!dev_exists, c,
+ bucket_gens_to_invalid_dev,
"bucket_gens key for invalid device:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
@@ -1330,6 +1327,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
ca = bch_dev_bkey_exists(c, k.k->p.inode);
if (fsck_err_on(end <= ca->mi.first_bucket ||
start >= ca->mi.nbuckets, c,
+ bucket_gens_to_invalid_buckets,
"bucket_gens key for invalid buckets:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
@@ -1338,6 +1336,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
for (b = start; b < ca->mi.first_bucket; b++)
if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ bucket_gens_nonzero_for_invalid_buckets,
"bucket_gens key has nonzero gen for invalid bucket")) {
g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
need_update = true;
@@ -1345,6 +1344,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
for (b = ca->mi.nbuckets; b < end; b++)
if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ bucket_gens_nonzero_for_invalid_buckets,
"bucket_gens key has nonzero gen for invalid bucket")) {
g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
need_update = true;
@@ -1495,11 +1495,13 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
return ret;
if (fsck_err_on(!a->io_time[READ], c,
+ alloc_key_cached_but_read_time_zero,
"cached bucket with read_time 0\n"
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
+ alloc_key_to_missing_lru_entry,
"missing lru entry\n"
" %s",
(printbuf_reset(&buf),
@@ -2075,6 +2077,17 @@ void bch2_recalc_capacity(struct bch_fs *c)
closure_wake_up(&c->freelist_wait);
}
+u64 bch2_min_rw_member_capacity(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ u64 ret = U64_MAX;
+
+ for_each_rw_member(ca, c, i)
+ ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
+ return ret;
+}
+
static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
{
struct open_bucket *ob;
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index 97042067d2a9..73faf99a222a 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -149,13 +149,13 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
-int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -193,7 +193,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
.min_val_size = 48, \
})
-int bch2_bucket_gens_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -249,6 +249,7 @@ int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
int bch2_fs_freespace_init(struct bch_fs *);
void bch2_recalc_capacity(struct bch_fs *);
+u64 bch2_min_rw_member_capacity(struct bch_fs *);
void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 3bc4abd3d7d5..b85c7765272f 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -399,12 +399,23 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
struct bucket_alloc_state *s,
struct closure *cl)
{
- struct btree_iter iter;
- struct bkey_s_c k;
+ struct btree_iter iter, citer;
+ struct bkey_s_c k, ck;
struct open_bucket *ob = NULL;
- u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
- u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
+ u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+ u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+ u64 alloc_cursor = alloc_start;
int ret;
+
+ /*
+ * Scan with an uncached iterator to avoid polluting the key cache. An
+ * uncached iter will return a cached key if one exists, but if not
+ * there is no other underlying protection for the associated key cache
+ * slot. To avoid racing bucket allocations, look up the cached key slot
+ * of any likely allocation candidate before attempting to proceed with
+ * the allocation. This provides proper exclusion on the associated
+ * bucket.
+ */
again:
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
BTREE_ITER_SLOTS, k, ret) {
@@ -419,25 +430,38 @@ again:
continue;
a = bch2_alloc_to_v4(k, &a_convert);
-
if (a->data_type != BCH_DATA_free)
continue;
+ /* now check the cached key to serialize concurrent allocs of the bucket */
+ ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+ ret = bkey_err(ck);
+ if (ret)
+ break;
+
+ a = bch2_alloc_to_v4(ck, &a_convert);
+ if (a->data_type != BCH_DATA_free)
+ goto next;
+
s->buckets_seen++;
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+next:
+ citer.path->preserve = false;
+ bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
}
bch2_trans_iter_exit(trans, &iter);
+ alloc_cursor = iter.pos.offset;
ca->alloc_cursor = alloc_cursor;
if (!ob && ret)
ob = ERR_PTR(ret);
- if (!ob && alloc_cursor > alloc_start) {
- alloc_cursor = alloc_start;
+ if (!ob && alloc_start > first_bucket) {
+ alloc_cursor = alloc_start = first_bucket;
goto again;
}
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index cc856150a948..ef02c9bb0354 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -5,6 +5,7 @@
#include "backpointers.h"
#include "btree_cache.h"
#include "btree_update.h"
+#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "error.h"
@@ -37,25 +38,26 @@ static bool extent_matches_bp(struct bch_fs *c,
return false;
}
-int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
+ int ret = 0;
- if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
- prt_str(err, "backpointer at wrong pos");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
+ c, err,
+ backpointer_pos_wrong,
+ "backpointer at wrong pos");
+fsck_err:
+ return ret;
}
void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
{
prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
- bch2_btree_ids[bp->btree_id],
+ bch2_btree_id_str(bp->btree_id),
bp->level,
(u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
(u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
@@ -76,7 +78,7 @@ void bch2_backpointer_swab(struct bkey_s k)
{
struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
- bp.v->bucket_offset = swab32(bp.v->bucket_offset);
+ bp.v->bucket_offset = swab40(bp.v->bucket_offset);
bp.v->bucket_len = swab32(bp.v->bucket_len);
bch2_bpos_swab(&bp.v->pos);
}
@@ -219,18 +221,22 @@ out:
static void backpointer_not_found(struct btree_trans *trans,
struct bpos bp_pos,
struct bch_backpointer bp,
- struct bkey_s_c k,
- const char *thing_it_points_to)
+ struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+ /*
+ * If we're using the btree write buffer, the backpointer we were
+ * looking at may have already been deleted - failure to find what it
+ * pointed to is not an error:
+ */
if (likely(!bch2_backpointers_no_use_write_buffer))
return;
prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
- thing_it_points_to);
+ bp.level ? "btree node" : "extent");
prt_printf(&buf, "bucket: ");
bch2_bpos_to_text(&buf, bucket);
prt_printf(&buf, "\n ");
@@ -256,56 +262,37 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
struct bch_backpointer bp,
unsigned iter_flags)
{
- struct bch_fs *c = trans->c;
- struct btree_root *r = bch2_btree_id_root(c, bp.btree_id);
- struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
- struct bkey_s_c k;
-
- bch2_trans_node_iter_init(trans, iter,
- bp.btree_id,
- bp.pos,
- 0,
- min(bp.level, r->level),
- iter_flags);
- k = bch2_btree_iter_peek_slot(iter);
- if (bkey_err(k)) {
- bch2_trans_iter_exit(trans, iter);
- return k;
- }
-
- if (bp.level == r->level + 1)
- k = bkey_i_to_s_c(&r->key);
-
- if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
- return k;
-
- bch2_trans_iter_exit(trans, iter);
+ if (likely(!bp.level)) {
+ struct bch_fs *c = trans->c;
+ struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+ struct bkey_s_c k;
+
+ bch2_trans_node_iter_init(trans, iter,
+ bp.btree_id,
+ bp.pos,
+ 0, 0,
+ iter_flags);
+ k = bch2_btree_iter_peek_slot(iter);
+ if (bkey_err(k)) {
+ bch2_trans_iter_exit(trans, iter);
+ return k;
+ }
- if (unlikely(bch2_backpointers_no_use_write_buffer)) {
- if (bp.level) {
- struct btree *b;
+ if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
+ return k;
- /*
- * If a backpointer for a btree node wasn't found, it may be
- * because it was overwritten by a new btree node that hasn't
- * been written out yet - backpointer_get_node() checks for
- * this:
- */
- b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
- if (!IS_ERR_OR_NULL(b))
- return bkey_i_to_s_c(&b->key);
+ bch2_trans_iter_exit(trans, iter);
+ backpointer_not_found(trans, bp_pos, bp, k);
+ return bkey_s_c_null;
+ } else {
+ struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
+ if (IS_ERR_OR_NULL(b)) {
bch2_trans_iter_exit(trans, iter);
-
- if (IS_ERR(b))
- return bkey_s_c_err(PTR_ERR(b));
- return bkey_s_c_null;
+ return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
}
-
- backpointer_not_found(trans, bp_pos, bp, k, "extent");
+ return bkey_i_to_s_c(&b->key);
}
-
- return bkey_s_c_null;
}
struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
@@ -329,6 +316,8 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
if (IS_ERR(b))
goto err;
+ BUG_ON(b->c.level != bp.level - 1);
+
if (b && extent_matches_bp(c, bp.btree_id, bp.level,
bkey_i_to_s_c(&b->key),
bucket, bp))
@@ -337,8 +326,7 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
if (b && btree_node_will_make_reachable(b)) {
b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
} else {
- backpointer_not_found(trans, bp_pos, bp,
- bkey_i_to_s_c(&b->key), "btree node");
+ backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
b = NULL;
}
err:
@@ -356,6 +344,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
int ret = 0;
if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
+ backpointer_to_missing_device,
"backpointer for missing device:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, bp_iter, 0);
@@ -369,6 +358,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
goto out;
if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
+ backpointer_to_missing_alloc,
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
alloc_iter.pos.inode, alloc_iter.pos.offset,
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
@@ -453,14 +443,14 @@ fsck_err:
return ret;
missing:
prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
- bch2_btree_ids[bp.btree_id], bp.level);
+ bch2_btree_id_str(bp.btree_id), bp.level);
bch2_bkey_val_to_text(&buf, c, orig_k);
prt_printf(&buf, "\nbp pos ");
bch2_bpos_to_text(&buf, bp_iter.pos);
if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
c->opts.reconstruct_alloc ||
- fsck_err(c, "%s", buf.buf))
+ fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
goto out;
@@ -793,7 +783,9 @@ static int check_one_backpointer(struct btree_trans *trans,
}
if (fsck_err_on(!k.k, c,
- "backpointer for missing extent\n %s",
+ backpointer_to_missing_ptr,
+ "backpointer for missing %s\n %s",
+ bp.v->level ? "btree node" : "extent",
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
goto out;
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 547e0617602a..ab866feeaf66 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -7,7 +7,16 @@
#include "buckets.h"
#include "super.h"
-int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k,
+static inline u64 swab40(u64 x)
+{
+ return (((x & 0x00000000ffULL) << 32)|
+ ((x & 0x000000ff00ULL) << 16)|
+ ((x & 0x0000ff0000ULL) >> 0)|
+ ((x & 0x00ff000000ULL) >> 16)|
+ ((x & 0xff00000000ULL) >> 32));
+}
+
+int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
enum bkey_invalid_flags, struct printbuf *);
void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/bbpos.h b/fs/bcachefs/bbpos.h
index 1fbed1f8378d..be2edced5213 100644
--- a/fs/bcachefs/bbpos.h
+++ b/fs/bcachefs/bbpos.h
@@ -2,20 +2,9 @@
#ifndef _BCACHEFS_BBPOS_H
#define _BCACHEFS_BBPOS_H
+#include "bbpos_types.h"
#include "bkey_methods.h"
-
-struct bbpos {
- enum btree_id btree;
- struct bpos pos;
-};
-
-static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
-{
- return (struct bbpos) { btree, pos };
-}
-
-#define BBPOS_MIN BBPOS(0, POS_MIN)
-#define BBPOS_MAX BBPOS(BTREE_ID_NR - 1, POS_MAX)
+#include "btree_cache.h"
static inline int bbpos_cmp(struct bbpos l, struct bbpos r)
{
@@ -40,7 +29,7 @@ static inline struct bbpos bbpos_successor(struct bbpos pos)
static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos)
{
- prt_str(out, bch2_btree_ids[pos.btree]);
+ prt_str(out, bch2_btree_id_str(pos.btree));
prt_char(out, ':');
bch2_bpos_to_text(out, pos.pos);
}
diff --git a/fs/bcachefs/bbpos_types.h b/fs/bcachefs/bbpos_types.h
new file mode 100644
index 000000000000..5198e94cf3b8
--- /dev/null
+++ b/fs/bcachefs/bbpos_types.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BBPOS_TYPES_H
+#define _BCACHEFS_BBPOS_TYPES_H
+
+struct bbpos {
+ enum btree_id btree;
+ struct bpos pos;
+};
+
+static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
+{
+ return (struct bbpos) { btree, pos };
+}
+
+#define BBPOS_MIN BBPOS(0, POS_MIN)
+#define BBPOS_MAX BBPOS(BTREE_ID_NR - 1, POS_MAX)
+
+#endif /* _BCACHEFS_BBPOS_TYPES_H */
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 53ffa88cae16..9cb8684959ee 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -209,6 +209,7 @@
#include "nocow_locking_types.h"
#include "opts.h"
#include "recovery_types.h"
+#include "sb-errors_types.h"
#include "seqmutex.h"
#include "util.h"
@@ -418,6 +419,7 @@ enum bch_time_stats {
#include "buckets_types.h"
#include "buckets_waiting_for_journal_types.h"
#include "clock_types.h"
+#include "disk_groups_types.h"
#include "ec_types.h"
#include "journal_types.h"
#include "keylist_types.h"
@@ -463,6 +465,7 @@ enum gc_phase {
GC_PHASE_BTREE_snapshot_trees,
GC_PHASE_BTREE_deleted_inodes,
GC_PHASE_BTREE_logged_ops,
+ GC_PHASE_BTREE_rebalance_work,
GC_PHASE_PENDING_DELETE,
};
@@ -500,6 +503,8 @@ struct bch_dev {
* Committed by bch2_write_super() -> bch_fs_mi_update()
*/
struct bch_member_cpu mi;
+ atomic64_t errors[BCH_MEMBER_ERROR_NR];
+
__uuid_t uuid;
char name[BDEVNAME_SIZE];
@@ -578,7 +583,7 @@ enum {
BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */
BCH_FS_NEED_ANOTHER_GC,
- BCH_FS_HAVE_DELETED_SNAPSHOTS,
+ BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS,
/* errors: */
BCH_FS_ERROR,
@@ -938,9 +943,6 @@ struct bch_fs {
struct list_head moving_context_list;
struct mutex moving_context_lock;
- struct list_head data_progress_list;
- struct mutex data_progress_lock;
-
/* REBALANCE */
struct bch_fs_rebalance rebalance;
@@ -991,11 +993,6 @@ struct bch_fs {
struct bio_set dio_read_bioset;
struct bio_set nocow_flush_bioset;
- /* ERRORS */
- struct list_head fsck_errors;
- struct mutex fsck_error_lock;
- bool fsck_alloc_err;
-
/* QUOTAS */
struct bch_memquota_type quotas[QTYP_NR];
@@ -1044,6 +1041,14 @@ struct bch_fs {
struct bch2_time_stats times[BCH_TIME_STAT_NR];
struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
+
+ /* ERRORS */
+ struct list_head fsck_error_msgs;
+ struct mutex fsck_error_msgs_lock;
+ bool fsck_alloc_msgs_err;
+
+ bch_sb_errors_cpu fsck_error_counts;
+ struct mutex fsck_error_counts_lock;
};
extern struct wait_queue_head bch2_read_only_wait;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 99749f3315fe..0a750953ff92 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -613,31 +613,17 @@ struct bch_extent_stripe_ptr {
#endif
};
-struct bch_extent_reservation {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:6,
- unused:22,
- replicas:4,
- generation:32;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 generation:32,
- replicas:4,
- unused:22,
- type:6;
-#endif
-};
-
struct bch_extent_rebalance {
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:7,
- unused:33,
- compression:8,
+ __u64 type:6,
+ unused:34,
+ compression:8, /* enum bch_compression_opt */
target:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 target:16,
compression:8,
- unused:33,
- type:7;
+ unused:34,
+ type:6;
#endif
};
@@ -838,34 +824,30 @@ enum inode_opt_id {
Inode_opt_nr,
};
-enum {
- /*
- * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
- * flags)
- */
- __BCH_INODE_SYNC = 0,
- __BCH_INODE_IMMUTABLE = 1,
- __BCH_INODE_APPEND = 2,
- __BCH_INODE_NODUMP = 3,
- __BCH_INODE_NOATIME = 4,
-
- __BCH_INODE_I_SIZE_DIRTY = 5, /* obsolete */
- __BCH_INODE_I_SECTORS_DIRTY = 6, /* obsolete */
- __BCH_INODE_UNLINKED = 7,
- __BCH_INODE_BACKPTR_UNTRUSTED = 8,
-
- /* bits 20+ reserved for packed fields below: */
-};
-
-#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
-#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
-#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
-#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
-#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
-#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
-#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
-#define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
-#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
+#define BCH_INODE_FLAGS() \
+ x(sync, 0) \
+ x(immutable, 1) \
+ x(append, 2) \
+ x(nodump, 3) \
+ x(noatime, 4) \
+ x(i_size_dirty, 5) \
+ x(i_sectors_dirty, 6) \
+ x(unlinked, 7) \
+ x(backptr_untrusted, 8)
+
+/* bits 20+ reserved for packed fields below: */
+
+enum bch_inode_flags {
+#define x(t, n) BCH_INODE_##t = 1U << n,
+ BCH_INODE_FLAGS()
+#undef x
+};
+
+enum __bch_inode_flags {
+#define x(t, n) __BCH_INODE_##t = n,
+ BCH_INODE_FLAGS()
+#undef x
+};
LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
@@ -1232,7 +1214,8 @@ struct bch_sb_field {
x(journal_seq_blacklist, 8) \
x(journal_v2, 9) \
x(counters, 10) \
- x(members_v2, 11)
+ x(members_v2, 11) \
+ x(errors, 12)
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
@@ -1282,6 +1265,18 @@ enum bch_iops_measurement {
BCH_IOPS_NR
};
+#define BCH_MEMBER_ERROR_TYPES() \
+ x(read, 0) \
+ x(write, 1) \
+ x(checksum, 2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+ BCH_MEMBER_ERROR_TYPES()
+#undef x
+ BCH_MEMBER_ERROR_NR
+};
+
struct bch_member {
__uuid_t uuid;
__le64 nbuckets; /* device size */
@@ -1292,6 +1287,9 @@ struct bch_member {
__le64 flags;
__le32 iops[4];
+ __le64 errors[BCH_MEMBER_ERROR_NR];
+ __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
+ __le64 errors_reset_time;
};
#define BCH_MEMBER_V1_BYTES 56
@@ -1615,11 +1613,20 @@ struct journal_seq_blacklist_entry {
struct bch_sb_field_journal_seq_blacklist {
struct bch_sb_field field;
+ struct journal_seq_blacklist_entry start[];
+};
- struct journal_seq_blacklist_entry start[0];
- __u64 _data[];
+struct bch_sb_field_errors {
+ struct bch_sb_field field;
+ struct bch_sb_field_error_entry {
+ __le64 v;
+ __le64 last_error_time;
+ } entries[];
};
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
+
/* Superblock: */
/*
@@ -1682,7 +1689,9 @@ struct bch_sb_field_journal_seq_blacklist {
x(snapshot_skiplists, BCH_VERSION(1, 1), \
BIT_ULL(BCH_RECOVERY_PASS_check_snapshots)) \
x(deleted_inodes, BCH_VERSION(1, 2), \
- BIT_ULL(BCH_RECOVERY_PASS_check_inodes))
+ BIT_ULL(BCH_RECOVERY_PASS_check_inodes)) \
+ x(rebalance_work, BCH_VERSION(1, 3), \
+ BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@@ -1693,7 +1702,7 @@ enum bcachefs_metadata_version {
};
static const __maybe_unused
-unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
+unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
@@ -2247,7 +2256,8 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
enum btree_id_flags {
BTREE_ID_EXTENTS = BIT(0),
BTREE_ID_SNAPSHOTS = BIT(1),
- BTREE_ID_DATA = BIT(2),
+ BTREE_ID_SNAPSHOT_FIELD = BIT(2),
+ BTREE_ID_DATA = BIT(3),
};
#define BCH_BTREE_IDS() \
@@ -2302,11 +2312,13 @@ enum btree_id_flags {
BIT_ULL(KEY_TYPE_bucket_gens)) \
x(snapshot_trees, 15, 0, \
BIT_ULL(KEY_TYPE_snapshot_tree)) \
- x(deleted_inodes, 16, BTREE_ID_SNAPSHOTS, \
+ x(deleted_inodes, 16, BTREE_ID_SNAPSHOT_FIELD, \
BIT_ULL(KEY_TYPE_set)) \
x(logged_ops, 17, 0, \
BIT_ULL(KEY_TYPE_logged_op_truncate)| \
- BIT_ULL(KEY_TYPE_logged_op_finsert))
+ BIT_ULL(KEY_TYPE_logged_op_finsert)) \
+ x(rebalance_work, 18, BTREE_ID_SNAPSHOT_FIELD, \
+ BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
enum btree_id {
#define x(name, nr, ...) BTREE_ID_##name = nr,
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index 518450209236..831be01809f2 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -92,19 +92,15 @@ enum bkey_lr_packed {
#define bkey_lr_packed(_l, _r) \
((_l)->format + ((_r)->format << 1))
-#define bkey_copy(_dst, _src) \
-do { \
- BUILD_BUG_ON(!type_is(_dst, struct bkey_i *) && \
- !type_is(_dst, struct bkey_packed *)); \
- BUILD_BUG_ON(!type_is(_src, struct bkey_i *) && \
- !type_is(_src, struct bkey_packed *)); \
- EBUG_ON((u64 *) (_dst) > (u64 *) (_src) && \
- (u64 *) (_dst) < (u64 *) (_src) + \
- ((struct bkey *) (_src))->u64s); \
- \
- memcpy_u64s_small((_dst), (_src), \
- ((struct bkey *) (_src))->u64s); \
-} while (0)
+static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src)
+{
+ memcpy_u64s_small(dst, src, src->u64s);
+}
+
+static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src)
+{
+ memcpy_u64s_small(dst, src, src->k.u64s);
+}
struct btree;
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index d9fb1fc81f1e..761f5e33b1e6 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -3,6 +3,7 @@
#include "bcachefs.h"
#include "backpointers.h"
#include "bkey_methods.h"
+#include "btree_cache.h"
#include "btree_types.h"
#include "alloc_background.h"
#include "dirent.h"
@@ -25,7 +26,7 @@ const char * const bch2_bkey_types[] = {
NULL
};
-static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
return 0;
@@ -39,23 +40,24 @@ static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
.key_invalid = deleted_key_invalid, \
})
-static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int empty_val_key_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
- if (bkey_val_bytes(k.k)) {
- prt_printf(err, "incorrect value size (%zu != 0)",
- bkey_val_bytes(k.k));
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ int ret = 0;
+
+ bkey_fsck_err_on(bkey_val_bytes(k.k), c, err,
+ bkey_val_size_nonzero,
+ "incorrect value size (%zu != 0)",
+ bkey_val_bytes(k.k));
+fsck_err:
+ return ret;
}
#define bch2_bkey_ops_error ((struct bkey_ops) { \
.key_invalid = empty_val_key_invalid, \
})
-static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
return 0;
@@ -70,7 +72,7 @@ static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
.key_invalid = empty_val_key_invalid, \
})
-static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int key_type_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
return 0;
@@ -91,18 +93,6 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
.val_to_text = key_type_inline_data_to_text, \
})
-static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
-{
- if (bkey_val_bytes(k.k)) {
- prt_printf(err, "incorrect value size (%zu != %zu)",
- bkey_val_bytes(k.k), sizeof(struct bch_cookie));
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
-}
-
static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
{
bch2_key_resize(l.k, l.k->size + r.k->size);
@@ -110,7 +100,7 @@ static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_
}
#define bch2_bkey_ops_set ((struct bkey_ops) { \
- .key_invalid = key_type_set_invalid, \
+ .key_invalid = empty_val_key_invalid, \
.key_merge = key_type_set_merge, \
})
@@ -128,84 +118,95 @@ int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
struct printbuf *err)
{
const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+ int ret = 0;
- if (bkey_val_bytes(k.k) < ops->min_val_size) {
- prt_printf(err, "bad val size (%zu < %u)",
- bkey_val_bytes(k.k), ops->min_val_size);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size, c, err,
+ bkey_val_size_too_small,
+ "bad val size (%zu < %u)",
+ bkey_val_bytes(k.k), ops->min_val_size);
if (!ops->key_invalid)
return 0;
- return ops->key_invalid(c, k, flags, err);
+ ret = ops->key_invalid(c, k, flags, err);
+fsck_err:
+ return ret;
}
static u64 bch2_key_types_allowed[] = {
-#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
- BCH_BTREE_IDS()
-#undef x
[BKEY_TYPE_btree] =
BIT_ULL(KEY_TYPE_deleted)|
BIT_ULL(KEY_TYPE_btree_ptr)|
BIT_ULL(KEY_TYPE_btree_ptr_v2),
+#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
+ BCH_BTREE_IDS()
+#undef x
};
+const char *bch2_btree_node_type_str(enum btree_node_type type)
+{
+ return type == BKEY_TYPE_btree ? "internal btree node" : bch2_btree_id_str(type - 1);
+}
+
int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (k.k->u64s < BKEY_U64s) {
- prt_printf(err, "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- if (flags & BKEY_INVALID_COMMIT &&
- !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) {
- prt_printf(err, "invalid key type for btree %s (%s)",
- bch2_btree_ids[type], bch2_bkey_types[k.k->type]);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, err,
+ bkey_u64s_too_small,
+ "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
- if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
- if (k.k->size == 0) {
- prt_printf(err, "size == 0");
- return -BCH_ERR_invalid_bkey;
- }
+ if (type >= BKEY_TYPE_NR)
+ return 0;
- if (k.k->size > k.k->p.offset) {
- prt_printf(err, "size greater than offset (%u > %llu)",
- k.k->size, k.k->p.offset);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) &&
+ !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
+ bkey_invalid_type_for_btree,
+ "invalid key type for btree %s (%s)",
+ bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]);
+
+ if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
+ bkey_fsck_err_on(k.k->size == 0, c, err,
+ bkey_extent_size_zero,
+ "size == 0");
+
+ bkey_fsck_err_on(k.k->size > k.k->p.offset, c, err,
+ bkey_extent_size_greater_than_offset,
+ "size greater than offset (%u > %llu)",
+ k.k->size, k.k->p.offset);
} else {
- if (k.k->size) {
- prt_printf(err, "size != 0");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(k.k->size, c, err,
+ bkey_size_nonzero,
+ "size != 0");
}
if (type != BKEY_TYPE_btree) {
- if (!btree_type_has_snapshots((enum btree_id) type) &&
- k.k->p.snapshot) {
- prt_printf(err, "nonzero snapshot");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (btree_type_has_snapshots((enum btree_id) type) &&
- !k.k->p.snapshot) {
- prt_printf(err, "snapshot == 0");
- return -BCH_ERR_invalid_bkey;
+ enum btree_id btree = type - 1;
+
+ if (btree_type_has_snapshots(btree)) {
+ bkey_fsck_err_on(!k.k->p.snapshot, c, err,
+ bkey_snapshot_zero,
+ "snapshot == 0");
+ } else if (!btree_type_has_snapshot_field(btree)) {
+ bkey_fsck_err_on(k.k->p.snapshot, c, err,
+ bkey_snapshot_nonzero,
+ "nonzero snapshot");
+ } else {
+ /*
+ * btree uses snapshot field but it's not required to be
+ * nonzero
+ */
}
- if (bkey_eq(k.k->p, POS_MAX)) {
- prt_printf(err, "key at POS_MAX");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX), c, err,
+ bkey_at_pos_max,
+ "key at POS_MAX");
}
-
- return 0;
+fsck_err:
+ return ret;
}
int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
@@ -217,20 +218,20 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
bch2_bkey_val_invalid(c, k, flags, err);
}
-int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
- struct printbuf *err)
+int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k, struct printbuf *err)
{
- if (bpos_lt(k.k->p, b->data->min_key)) {
- prt_printf(err, "key before start of btree node");
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- if (bpos_gt(k.k->p, b->data->max_key)) {
- prt_printf(err, "key past end of btree node");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key), c, err,
+ bkey_before_start_of_btree_node,
+ "key before start of btree node");
- return 0;
+ bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key), c, err,
+ bkey_after_end_of_btree_node,
+ "key past end of btree node");
+fsck_err:
+ return ret;
}
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 668f595e2fcf..3a370b7087ac 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -21,7 +21,7 @@ extern const struct bkey_ops bch2_bkey_null_ops;
* being read or written; more aggressive checks can be enabled when rw == WRITE.
*/
struct bkey_ops {
- int (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
+ int (*key_invalid)(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err);
void (*val_to_text)(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
@@ -55,7 +55,8 @@ int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
enum bkey_invalid_flags, struct printbuf *);
int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *);
+int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *,
+ struct bkey_s_c, struct printbuf *);
void bch2_bpos_to_text(struct printbuf *, struct bpos);
void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
@@ -119,16 +120,6 @@ enum btree_update_flags {
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
-#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
- ((1U << KEY_TYPE_alloc)| \
- (1U << KEY_TYPE_alloc_v2)| \
- (1U << KEY_TYPE_alloc_v3)| \
- (1U << KEY_TYPE_alloc_v4)| \
- (1U << KEY_TYPE_stripe)| \
- (1U << KEY_TYPE_inode)| \
- (1U << KEY_TYPE_inode_v2)| \
- (1U << KEY_TYPE_snapshot))
-
static inline int bch2_trans_mark_key(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_i *new,
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
index b9aa027c881b..bcca9e76a0b4 100644
--- a/fs/bcachefs/bkey_sort.c
+++ b/fs/bcachefs/bkey_sort.c
@@ -106,7 +106,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
while ((k = sort_iter_peek(iter))) {
if (!bkey_deleted(k) &&
!should_drop_next_key(iter)) {
- bkey_copy(out, k);
+ bkey_p_copy(out, k);
btree_keys_account_key_add(&nr, 0, out);
out = bkey_p_next(out);
}
@@ -137,7 +137,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
continue;
if (!transform)
- bkey_copy(out, in);
+ bkey_p_copy(out, in);
else if (bch2_bkey_transform(out_f, out, bkey_packed(in)
? in_f : &bch2_bkey_format_current, in))
out->format = KEY_FORMAT_LOCAL_BTREE;
@@ -191,7 +191,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
memcpy_u64s_small(out, in, bkeyp_key_u64s(f, in));
set_bkeyp_val_u64s(f, out, 0);
} else {
- bkey_copy(out, in);
+ bkey_p_copy(out, in);
}
out->needs_whiteout |= needs_whiteout;
out = bkey_p_next(out);
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 5e5858191905..47e7770d0583 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -472,7 +472,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
mutex_init(&c->verify_lock);
- shrink = shrinker_alloc(0, "%s/btree_cache", c->name);
+ shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
if (!shrink)
goto err;
bc->shrink = shrink;
@@ -785,12 +785,12 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
"btree node header doesn't match ptr\n"
"btree %s level %u\n"
"ptr: ",
- bch2_btree_ids[b->c.btree_id], b->c.level);
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
prt_printf(&buf, "\nheader: btree %s level %llu\n"
"min ",
- bch2_btree_ids[BTREE_NODE_ID(b->data)],
+ bch2_btree_id_str(BTREE_NODE_ID(b->data)),
BTREE_NODE_LEVEL(b->data));
bch2_bpos_to_text(&buf, b->data->min_key);
@@ -1153,8 +1153,21 @@ wait_on_io:
six_unlock_intent(&b->c.lock);
}
-void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
- const struct btree *b)
+const char *bch2_btree_id_str(enum btree_id btree)
+{
+ return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)";
+}
+
+void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
+{
+ prt_printf(out, "%s level %u/%u\n ",
+ bch2_btree_id_str(b->c.btree_id),
+ b->c.level,
+ bch2_btree_id_root(c, b->c.btree_id)->level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+}
+
+void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
{
struct bset_stats stats;
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 1e562b6efa62..cfb80b201d61 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -123,8 +123,9 @@ static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
return bch2_btree_id_root(c, b->c.btree_id)->b;
}
-void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
- const struct btree *);
+const char *bch2_btree_id_str(enum btree_id);
+void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
+void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
#endif /* _BCACHEFS_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 693ed067b1a7..0b5d09c8475d 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -95,15 +95,15 @@ static int bch2_gc_check_topology(struct bch_fs *c,
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " cur %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1.buf, buf2.buf) &&
- should_restart_for_topology_repair(c)) {
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " cur %s",
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
+ buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
@@ -122,14 +122,12 @@ static int bch2_gc_check_topology(struct bch_fs *c,
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
bch2_bpos_to_text(&buf2, node_end);
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
+ if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf) &&
should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
@@ -287,10 +285,11 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
cur->data->min_key), c,
+ btree_node_topology_overwritten_by_next_node,
"btree node overwritten by next node at btree %s level %u:\n"
" node %s\n"
" next %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = DROP_PREV_NODE;
goto out;
@@ -298,10 +297,11 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" node %s\n"
" next %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf))
ret = set_node_max(c, prev,
bpos_predecessor(cur->data->min_key));
@@ -310,20 +310,22 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
if (mustfix_fsck_err_on(bpos_ge(expected_start,
cur->data->max_key), c,
+ btree_node_topology_overwritten_by_prev_node,
"btree node overwritten by prev node at btree %s level %u:\n"
" prev %s\n"
" node %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = DROP_THIS_NODE;
goto out;
}
if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
+ btree_node_topology_bad_min_key,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" node %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf))
ret = set_node_min(c, cur, expected_start);
}
@@ -344,10 +346,11 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
bch2_bpos_to_text(&buf2, b->key.k.p);
if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = set_node_max(c, child, b->key.k.p);
if (ret)
@@ -396,9 +399,10 @@ again:
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
if (mustfix_fsck_err_on(ret == -EIO, c,
+ btree_node_unreadable,
"Topology repair: unreadable btree node at btree %s level %u:\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level - 1,
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
@@ -504,9 +508,10 @@ again:
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
if (mustfix_fsck_err_on(!have_child, c,
+ btree_node_topology_interior_node_empty,
"empty interior btree node at btree %s level %u\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level, buf.buf))
ret = DROP_THIS_NODE;
err:
@@ -582,7 +587,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
if (!g->gen_valid &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ fsck_err(c, ptr_to_missing_alloc_key,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -599,7 +605,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
if (gen_cmp(p.ptr.gen, g->gen) > 0 &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ fsck_err(c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -620,7 +627,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ fsck_err(c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -631,7 +639,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 &&
(c->opts.reconstruct_alloc ||
- fsck_err(c, "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ fsck_err(c, stale_dirty_ptr,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -645,6 +654,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
if (fsck_err_on(bucket_data_type(g->data_type) &&
bucket_data_type(g->data_type) != data_type, c,
+ ptr_bucket_data_type_mismatch,
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
@@ -664,6 +674,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
if (fsck_err_on(!m || !m->alive, c,
+ ptr_to_missing_stripe,
"pointer to nonexistent stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
@@ -672,6 +683,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
do_update = true;
if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
+ ptr_to_incorrect_stripe,
"pointer does not match stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
@@ -811,6 +823,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
goto err;
if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+ bkey_version_in_future,
"key version number higher than recorded: %llu > %llu",
k->k->version.lo,
atomic64_read(&c->key_version)))
@@ -968,9 +981,10 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
FSCK_CAN_FIX|
FSCK_CAN_IGNORE|
FSCK_NO_RATELIMIT,
+ btree_node_read_error,
"Unreadable btree node at btree %s level %u:\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level - 1,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
@@ -1025,6 +1039,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->min_key);
if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
+ btree_root_bad_min_key,
"btree root with incorrect min_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1034,6 +1049,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->max_key);
if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
+ btree_root_bad_max_key,
"btree root with incorrect max_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1207,16 +1223,16 @@ static int bch2_gc_done(struct bch_fs *c,
percpu_down_write(&c->mark_lock);
-#define copy_field(_f, _msg, ...) \
+#define copy_field(_err, _f, _msg, ...) \
if (dst->_f != src->_f && \
(!verify || \
- fsck_err(c, _msg ": got %llu, should be %llu" \
+ fsck_err(c, _err, _msg ": got %llu, should be %llu" \
, ##__VA_ARGS__, dst->_f, src->_f))) \
dst->_f = src->_f
-#define copy_dev_field(_f, _msg, ...) \
- copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
-#define copy_fs_field(_f, _msg, ...) \
- copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+#define copy_dev_field(_err, _f, _msg, ...) \
+ copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
+#define copy_fs_field(_err, _f, _msg, ...) \
+ copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
@@ -1227,13 +1243,17 @@ static int bch2_gc_done(struct bch_fs *c,
bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
- copy_dev_field(buckets_ec, "buckets_ec");
-
for (i = 0; i < BCH_DATA_NR; i++) {
- copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
- copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]);
- copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ copy_dev_field(dev_usage_buckets_wrong,
+ d[i].buckets, "%s buckets", bch2_data_types[i]);
+ copy_dev_field(dev_usage_sectors_wrong,
+ d[i].sectors, "%s sectors", bch2_data_types[i]);
+ copy_dev_field(dev_usage_fragmented_wrong,
+ d[i].fragmented, "%s fragmented", bch2_data_types[i]);
}
+
+ copy_dev_field(dev_usage_buckets_ec_wrong,
+ buckets_ec, "buckets_ec");
}
{
@@ -1242,17 +1262,24 @@ static int bch2_gc_done(struct bch_fs *c,
struct bch_fs_usage *src = (void *)
bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
- copy_fs_field(hidden, "hidden");
- copy_fs_field(btree, "btree");
+ copy_fs_field(fs_usage_hidden_wrong,
+ hidden, "hidden");
+ copy_fs_field(fs_usage_btree_wrong,
+ btree, "btree");
if (!metadata_only) {
- copy_fs_field(data, "data");
- copy_fs_field(cached, "cached");
- copy_fs_field(reserved, "reserved");
- copy_fs_field(nr_inodes,"nr_inodes");
+ copy_fs_field(fs_usage_data_wrong,
+ data, "data");
+ copy_fs_field(fs_usage_cached_wrong,
+ cached, "cached");
+ copy_fs_field(fs_usage_reserved_wrong,
+ reserved, "reserved");
+ copy_fs_field(fs_usage_nr_inodes_wrong,
+ nr_inodes,"nr_inodes");
for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(persistent_reserved[i],
+ copy_fs_field(fs_usage_persistent_reserved_wrong,
+ persistent_reserved[i],
"persistent_reserved[%i]", i);
}
@@ -1268,7 +1295,8 @@ static int bch2_gc_done(struct bch_fs *c,
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, e);
- copy_fs_field(replicas[i], "%s", buf.buf);
+ copy_fs_field(fs_usage_replicas_wrong,
+ replicas[i], "%s", buf.buf);
}
}
@@ -1404,6 +1432,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
if (c->opts.reconstruct_alloc ||
fsck_err_on(new.data_type != gc.data_type, c,
+ alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type"
": got %s, should be %s",
iter->pos.inode, iter->pos.offset,
@@ -1412,9 +1441,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
bch2_data_types[gc.data_type]))
new.data_type = gc.data_type;
-#define copy_bucket_field(_f) \
+#define copy_bucket_field(_errtype, _f) \
if (c->opts.reconstruct_alloc || \
- fsck_err_on(new._f != gc._f, c, \
+ fsck_err_on(new._f != gc._f, c, _errtype, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
@@ -1423,11 +1452,16 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
new._f, gc._f)) \
new._f = gc._f; \
- copy_bucket_field(gen);
- copy_bucket_field(dirty_sectors);
- copy_bucket_field(cached_sectors);
- copy_bucket_field(stripe_redundancy);
- copy_bucket_field(stripe);
+ copy_bucket_field(alloc_key_gen_wrong,
+ gen);
+ copy_bucket_field(alloc_key_dirty_sectors_wrong,
+ dirty_sectors);
+ copy_bucket_field(alloc_key_cached_sectors_wrong,
+ cached_sectors);
+ copy_bucket_field(alloc_key_stripe_wrong,
+ stripe);
+ copy_bucket_field(alloc_key_stripe_redundancy_wrong,
+ stripe_redundancy);
#undef copy_bucket_field
if (!bch2_alloc_v4_cmp(*old, new))
@@ -1584,6 +1618,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
}
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ reflink_v_refcount_wrong,
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
@@ -1709,7 +1744,8 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
if (bad)
bch2_bkey_val_to_text(&buf, c, k);
- if (fsck_err_on(bad, c, "%s", buf.buf)) {
+ if (fsck_err_on(bad, c, stripe_sector_count_wrong,
+ "%s", buf.buf)) {
struct bkey_i_stripe *new;
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
@@ -1954,19 +1990,17 @@ int bch2_gc_gens(struct bch_fs *c)
trans = bch2_trans_get(c);
for_each_member_device(ca, c, i) {
- struct bucket_gens *gens;
+ struct bucket_gens *gens = bucket_gens(ca);
BUG_ON(ca->oldest_gen);
- ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
+ ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
percpu_ref_put(&ca->ref);
ret = -BCH_ERR_ENOMEM_gc_gens;
goto err;
}
- gens = bucket_gens(ca);
-
for (b = gens->first_bucket;
b < gens->nbuckets; b++)
ca->oldest_gen[b] = gens->b[b];
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index a869cf6ac7c6..37d896edb06e 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
k = new_whiteouts;
while (ptrs != ptrs_end) {
- bkey_copy(k, *ptrs);
+ bkey_p_copy(k, *ptrs);
k = bkey_p_next(k);
ptrs++;
}
@@ -260,7 +260,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
n = bkey_p_next(k);
if (!bkey_deleted(k)) {
- bkey_copy(out, k);
+ bkey_p_copy(out, k);
out = bkey_p_next(out);
} else {
BUG_ON(k->needs_whiteout);
@@ -510,16 +510,6 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
bch2_trans_node_reinit_iter(trans, b);
}
-static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
- struct btree *b)
-{
- prt_printf(out, "%s level %u/%u\n ",
- bch2_btree_ids[b->c.btree_id],
- b->c.level,
- bch2_btree_id_root(c, b->c.btree_id)->level);
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
-}
-
static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
struct bch_dev *ca,
struct btree *b, struct bset *i,
@@ -532,7 +522,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
if (ca)
prt_printf(out, "on %s ", ca->name);
prt_printf(out, "at btree ");
- btree_pos_to_text(out, c, b);
+ bch2_btree_pos_to_text(out, c, b);
prt_printf(out, "\n node offset %u", b->written);
if (i)
@@ -540,7 +530,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
prt_str(out, ": ");
}
-__printf(8, 9)
+__printf(9, 10)
static int __btree_err(int ret,
struct bch_fs *c,
struct bch_dev *ca,
@@ -548,6 +538,7 @@ static int __btree_err(int ret,
struct bset *i,
int write,
bool have_retry,
+ enum bch_sb_error_id err_type,
const char *fmt, ...)
{
struct printbuf out = PRINTBUF;
@@ -572,9 +563,15 @@ static int __btree_err(int ret,
if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
ret = -BCH_ERR_btree_node_read_err_bad_node;
+ if (ret != -BCH_ERR_btree_node_read_err_fixable)
+ bch2_sb_error_count(c, err_type);
+
switch (ret) {
case -BCH_ERR_btree_node_read_err_fixable:
- mustfix_fsck_err(c, "%s", out.buf);
+ ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
+ if (ret != -BCH_ERR_fsck_fix &&
+ ret != -BCH_ERR_fsck_ignore)
+ goto fsck_err;
ret = -BCH_ERR_fsck_fix;
break;
case -BCH_ERR_btree_node_read_err_want_retry:
@@ -599,9 +596,11 @@ fsck_err:
return ret;
}
-#define btree_err(type, c, ca, b, i, msg, ...) \
+#define btree_err(type, c, ca, b, i, _err_type, msg, ...) \
({ \
- int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
+ int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \
+ BCH_FSCK_ERR_##_err_type, \
+ msg, ##__VA_ARGS__); \
\
if (_ret != -BCH_ERR_fsck_fix) { \
ret = _ret; \
@@ -676,13 +675,17 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
int ret = 0;
btree_err_on(!bch2_version_compatible(version),
- -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_incompatible,
+ c, ca, b, i,
+ btree_node_unsupported_version,
"unsupported bset version %u.%u",
BCH_VERSION_MAJOR(version),
BCH_VERSION_MINOR(version));
if (btree_err_on(version < c->sb.version_min,
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bset_older_than_sb_min,
"bset version %u older than superblock version_min %u",
version, c->sb.version_min)) {
mutex_lock(&c->sb_lock);
@@ -693,7 +696,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
if (btree_err_on(BCH_VERSION_MAJOR(version) >
BCH_VERSION_MAJOR(c->sb.version),
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bset_newer_than_sb,
"bset version %u newer than superblock version %u",
version, c->sb.version)) {
mutex_lock(&c->sb_lock);
@@ -703,11 +708,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
}
btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
- -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_incompatible,
+ c, ca, b, i,
+ btree_node_unsupported_version,
"BSET_SEPARATE_WHITEOUTS no longer supported");
if (btree_err_on(offset + sectors > btree_sectors(c),
- -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, ca, b, i,
+ bset_past_end_of_btree_node,
"bset past end of btree node")) {
i->u64s = 0;
ret = 0;
@@ -715,12 +724,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
}
btree_err_on(offset && !i->u64s,
- -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, ca, b, i,
+ bset_empty,
"empty bset");
- btree_err_on(BSET_OFFSET(i) &&
- BSET_OFFSET(i) != offset,
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, i,
+ bset_wrong_sector_offset,
"bset at wrong sector offset");
if (!offset) {
@@ -734,16 +746,22 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
/* XXX endianness */
btree_err_on(bp->seq != bn->keys.seq,
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ bset_bad_seq,
"incorrect sequence number (wrong btree node)");
}
btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, i,
+ btree_node_bad_btree,
"incorrect btree id");
btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, i,
+ btree_node_bad_level,
"incorrect level");
if (!write)
@@ -760,7 +778,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
}
btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ btree_node_bad_min_key,
"incorrect min_key: got %s should be %s",
(printbuf_reset(&buf1),
bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
@@ -769,7 +789,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
}
btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, i,
+ btree_node_bad_max_key,
"incorrect max key %s",
(printbuf_reset(&buf1),
bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
@@ -779,7 +801,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
BSET_BIG_ENDIAN(i), write, bn);
btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
- -BCH_ERR_btree_node_read_err_bad_node, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_bad_node,
+ c, ca, b, i,
+ btree_node_bad_format,
"invalid bkey format: %s\n %s", buf1.buf,
(printbuf_reset(&buf2),
bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
@@ -802,7 +826,7 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b,
struct printbuf *err)
{
return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
- (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
+ (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
}
@@ -823,14 +847,18 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
struct bkey tmp;
if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bkey_past_bset_end,
"key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
break;
}
if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bkey_bad_format,
"invalid bkey format %u", k->format)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_p_next(k),
@@ -849,12 +877,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
printbuf_reset(&buf);
if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
printbuf_reset(&buf);
- prt_printf(&buf, "invalid bkey: ");
bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
prt_printf(&buf, "\n ");
bch2_bkey_val_to_text(&buf, c, u.s_c);
- btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+ btree_err(-BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bad_bkey,
+ "invalid bkey: %s", buf.buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_p_next(k),
@@ -878,7 +908,10 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_dump_bset(c, b, i, 0);
- if (btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf)) {
+ if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bkey_out_of_order,
+ "%s", buf.buf)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
@@ -919,47 +952,62 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
if (bch2_meta_read_fault("btree"))
- btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ btree_err(-BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ btree_node_fault_injected,
"dynamic fault");
btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ btree_node_bad_magic,
"bad magic: want %llx, got %llx",
bset_magic(c), le64_to_cpu(b->data->magic));
- btree_err_on(!b->data->keys.seq,
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
- "bad btree header: seq 0");
-
if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
struct bch_btree_ptr_v2 *bp =
&bkey_i_to_btree_ptr_v2(&b->key)->v;
btree_err_on(b->data->keys.seq != bp->seq,
- -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ btree_node_bad_seq,
"got wrong btree node (seq %llx want %llx)",
b->data->keys.seq, bp->seq);
+ } else {
+ btree_err_on(!b->data->keys.seq,
+ -BCH_ERR_btree_node_read_err_must_retry,
+ c, ca, b, NULL,
+ btree_node_bad_seq,
+ "bad btree header: seq 0");
}
while (b->written < (ptr_written ?: btree_sectors(c))) {
unsigned sectors;
struct nonce nonce;
- struct bch_csum csum;
bool first = !b->written;
+ bool csum_bad;
if (!b->written) {
i = &b->data->keys;
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
- "unknown checksum type %llu",
- BSET_CSUM_TYPE(i));
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, i,
+ bset_unknown_csum,
+ "unknown checksum type %llu", BSET_CSUM_TYPE(i));
nonce = btree_nonce(i, b->written << 9);
- csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
- btree_err_on(bch2_crc_cmp(csum, b->data->csum),
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ csum_bad = bch2_crc_cmp(b->data->csum,
+ csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data));
+ if (csum_bad)
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+
+ btree_err_on(csum_bad,
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, i,
+ bset_bad_csum,
"invalid checksum");
ret = bset_encrypt(c, i, b->written << 9);
@@ -969,7 +1017,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
!BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
- -BCH_ERR_btree_node_read_err_incompatible, c, NULL, b, NULL,
+ -BCH_ERR_btree_node_read_err_incompatible,
+ c, NULL, b, NULL,
+ btree_node_unsupported_version,
"btree node does not have NEW_EXTENT_OVERWRITE set");
sectors = vstruct_sectors(b->data, c->block_bits);
@@ -981,15 +1031,21 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
break;
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
- "unknown checksum type %llu",
- BSET_CSUM_TYPE(i));
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, i,
+ bset_unknown_csum,
+ "unknown checksum type %llu", BSET_CSUM_TYPE(i));
nonce = btree_nonce(i, b->written << 9);
- csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
-
- btree_err_on(bch2_crc_cmp(csum, bne->csum),
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ csum_bad = bch2_crc_cmp(bne->csum,
+ csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne));
+ if (csum_bad)
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+
+ btree_err_on(csum_bad,
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, i,
+ bset_bad_csum,
"invalid checksum");
ret = bset_encrypt(c, i, b->written << 9);
@@ -1022,12 +1078,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
true);
btree_err_on(blacklisted && first,
- -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, ca, b, i,
+ bset_blacklisted_journal_seq,
"first btree node bset has blacklisted journal seq (%llu)",
le64_to_cpu(i->journal_seq));
btree_err_on(blacklisted && ptr_written,
- -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, ca, b, i,
+ first_bset_blacklisted_journal_seq,
"found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
le64_to_cpu(i->journal_seq),
b->written, b->written + sectors, ptr_written);
@@ -1044,7 +1104,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
if (ptr_written) {
btree_err_on(b->written < ptr_written,
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, NULL,
+ btree_node_data_missing,
"btree node data missing: expected %u sectors, found %u",
ptr_written, b->written);
} else {
@@ -1055,7 +1117,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
!bch2_journal_seq_is_blacklisted(c,
le64_to_cpu(bne->keys.journal_seq),
true),
- -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+ -BCH_ERR_btree_node_read_err_want_retry,
+ c, ca, b, NULL,
+ btree_node_bset_after_end,
"found bset signature after last bset");
}
@@ -1097,7 +1161,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
prt_printf(&buf, "\n ");
bch2_bkey_val_to_text(&buf, c, u.s_c);
- btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+ btree_err(-BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, i,
+ btree_node_bad_bkey,
+ "%s", buf.buf);
btree_keys_account_key_drop(&b->nr, 0, k);
@@ -1177,8 +1244,9 @@ static void btree_node_read_work(struct work_struct *work)
}
start:
printbuf_reset(&buf);
- btree_pos_to_text(&buf, c, b);
- bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
+ bch2_btree_pos_to_text(&buf, c, b);
+ bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
+ "btree read error %s for %s",
bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
percpu_ref_put(&ca->io_ref);
@@ -1213,7 +1281,7 @@ start:
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->key.k.p);
bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
- __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
+ __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
bch2_btree_node_rewrite_async(c, b);
}
@@ -1322,14 +1390,20 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
}
written2 = btree_node_sectors_written(c, ra->buf[i]);
- if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, NULL,
+ btree_node_replicas_sectors_written_mismatch,
"btree node sectors written mismatch: %u != %u",
written, written2) ||
btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, NULL,
+ btree_node_bset_after_end,
"found bset signature after last bset") ||
btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
- -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ -BCH_ERR_btree_node_read_err_fixable,
+ c, NULL, b, NULL,
+ btree_node_replicas_data_mismatch,
"btree node replicas content mismatch"))
dump_bset_maps = true;
@@ -1524,7 +1598,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
struct printbuf buf = PRINTBUF;
prt_str(&buf, "btree node read error: no device to read from\n at ");
- btree_pos_to_text(&buf, c, b);
+ bch2_btree_pos_to_text(&buf, c, b);
bch_err(c, "%s", buf.buf);
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
@@ -1759,7 +1833,8 @@ static void btree_node_write_endio(struct bio *bio)
if (wbio->have_ioref)
bch2_latency_acct(ca, wbio->submit_time, WRITE);
- if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
+ if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+ "btree write error: %s",
bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("btree")) {
spin_lock_irqsave(&c->btree_write_error_lock, flags);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 1d79514754d7..c2adf3fbb0b3 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -257,7 +257,7 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
- !btree_type_has_snapshots(iter->btree_id));
+ !btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
bch2_btree_path_verify(trans, iter->update_path);
@@ -362,7 +362,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
bch2_bpos_to_text(&buf, pos);
panic("not locked: %s %s%s\n",
- bch2_btree_ids[id], buf.buf,
+ bch2_btree_id_str(id), buf.buf,
key_cache ? " cached" : "");
}
@@ -1109,6 +1109,9 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
if (unlikely(ret))
goto out;
+ if (unlikely(!trans->srcu_held))
+ bch2_trans_srcu_lock(trans);
+
/*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart:
@@ -1371,7 +1374,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
struct bkey_s_c old = { &i->old_k, i->old_v };
prt_printf(buf, "update: btree=%s cached=%u %pS",
- bch2_btree_ids[i->btree_id],
+ bch2_btree_id_str(i->btree_id),
i->cached,
(void *) i->ip_allocated);
prt_newline(buf);
@@ -1387,7 +1390,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
trans_for_each_wb_update(trans, wb) {
prt_printf(buf, "update: btree=%s wb=1 %pS",
- bch2_btree_ids[wb->btree],
+ bch2_btree_id_str(wb->btree),
(void *) i->ip_allocated);
prt_newline(buf);
@@ -1416,7 +1419,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
path->idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
- bch2_btree_ids[path->btree_id],
+ bch2_btree_id_str(path->btree_id),
path->level);
bch2_bpos_to_text(out, path->pos);
@@ -1523,6 +1526,7 @@ static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
path->ref = 0;
path->intent_ref = 0;
path->nodes_locked = 0;
+ path->alloc_seq++;
btree_path_list_add(trans, pos, path);
trans->paths_sorted = false;
@@ -1598,7 +1602,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > path->locks_want)
- bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
+ bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
return path;
}
@@ -2829,18 +2833,36 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
return p;
}
-static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
+static inline void check_srcu_held_too_long(struct btree_trans *trans)
{
- struct bch_fs *c = trans->c;
- struct btree_path *path;
+ WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
+ "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
+ (jiffies - trans->srcu_lock_time) / HZ);
+}
- trans_for_each_path(trans, path)
- if (path->cached && !btree_node_locked(path, 0))
- path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+void bch2_trans_srcu_unlock(struct btree_trans *trans)
+{
+ if (trans->srcu_held) {
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
- srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
- trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- trans->srcu_lock_time = jiffies;
+ trans_for_each_path(trans, path)
+ if (path->cached && !btree_node_locked(path, 0))
+ path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+
+ check_srcu_held_too_long(trans);
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+ trans->srcu_held = false;
+ }
+}
+
+void bch2_trans_srcu_lock(struct btree_trans *trans)
+{
+ if (!trans->srcu_held) {
+ trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+ trans->srcu_held = true;
+ }
}
/**
@@ -2894,8 +2916,9 @@ u32 bch2_trans_begin(struct btree_trans *trans)
}
trans->last_begin_time = now;
- if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
- bch2_trans_reset_srcu_lock(trans);
+ if (unlikely(trans->srcu_held &&
+ time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
+ bch2_trans_srcu_unlock(trans);
trans->last_begin_ip = _RET_IP_;
if (trans->restarted) {
@@ -2980,8 +3003,9 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->wb_updates_size = s->wb_updates_size;
}
- trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
trans->srcu_lock_time = jiffies;
+ trans->srcu_held = true;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
struct btree_trans *pos;
@@ -3025,7 +3049,7 @@ leaked:
trans_for_each_path(trans, path)
if (path->ref)
printk(KERN_ERR " btree %s %pS\n",
- bch2_btree_ids[path->btree_id],
+ bch2_btree_id_str(path->btree_id),
(void *) path->ip_allocated);
/* Be noisy about this: */
bch2_fatal_error(c);
@@ -3058,7 +3082,10 @@ void bch2_trans_put(struct btree_trans *trans)
check_btree_paths_leaked(trans);
- srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+ if (trans->srcu_held) {
+ check_srcu_held_too_long(trans);
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+ }
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
@@ -3100,7 +3127,7 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
prt_tab(out);
prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
- b->level, bch2_btree_ids[b->btree_id]);
+ b->level, bch2_btree_id_str(b->btree_id));
bch2_bpos_to_text(out, btree_node_pos(b));
prt_tab(out);
@@ -3130,7 +3157,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
path->idx,
path->cached ? 'c' : 'b',
path->level,
- bch2_btree_ids[path->btree_id]);
+ bch2_btree_id_str(path->btree_id));
bch2_bpos_to_text(out, path->pos);
prt_newline(out);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index fbe273453db3..85e7cb52f6b6 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -274,6 +274,7 @@ void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
int bch2_trans_relock(struct btree_trans *);
int bch2_trans_relock_notrace(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
+void bch2_trans_unlock_long(struct btree_trans *);
bool bch2_trans_locked(struct btree_trans *);
static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
@@ -411,11 +412,11 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
- btree_node_type_is_extents(btree_id))
+ btree_id_is_extents(btree_id))
flags |= BTREE_ITER_IS_EXTENTS;
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
- !btree_type_has_snapshots(btree_id))
+ !btree_type_has_snapshot_field(btree_id))
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
@@ -579,6 +580,9 @@ static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
KEY_TYPE_##_type, sizeof(*_val), _val)
+void bch2_trans_srcu_unlock(struct btree_trans *);
+void bch2_trans_srcu_lock(struct btree_trans *);
+
u32 bch2_trans_begin(struct btree_trans *);
/*
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index f9a5e38a085b..9b78f78a75b5 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -324,7 +324,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
ck = bkey_cached_reuse(bc);
if (unlikely(!ck)) {
bch_err(c, "error allocating memory for key cache item, btree %s",
- bch2_btree_ids[path->btree_id]);
+ bch2_btree_id_str(path->btree_id));
return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
}
@@ -407,7 +407,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
if (!new_k) {
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
- bch2_btree_ids[ck->key.btree_id], new_u64s);
+ bch2_btree_id_str(ck->key.btree_id), new_u64s);
ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
goto err;
}
@@ -509,7 +509,7 @@ fill:
* path->uptodate yet:
*/
if (!path->locks_want &&
- !__bch2_btree_path_upgrade(trans, path, 1)) {
+ !__bch2_btree_path_upgrade(trans, path, 1, NULL)) {
trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
goto err;
@@ -1038,7 +1038,7 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
bc->table_init_done = true;
- shrink = shrinker_alloc(0, "%s/btree_key_cache", c->name);
+ shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
if (!shrink)
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
bc->shrink = shrink;
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 40c8ed8f7bf1..3d48834d091f 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -431,7 +431,8 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
- bool upgrade)
+ bool upgrade,
+ struct get_locks_fail *f)
{
unsigned l = path->level;
int fail_idx = -1;
@@ -442,8 +443,14 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
if (!(upgrade
? bch2_btree_node_upgrade(trans, path, l)
- : bch2_btree_node_relock(trans, path, l)))
- fail_idx = l;
+ : bch2_btree_node_relock(trans, path, l))) {
+ fail_idx = l;
+
+ if (f) {
+ f->l = l;
+ f->b = path->l[l].b;
+ }
+ }
l++;
} while (l < path->locks_want);
@@ -584,7 +591,9 @@ __flatten
bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
- return btree_path_get_locks(trans, path, false);
+ struct get_locks_fail f;
+
+ return btree_path_get_locks(trans, path, false, &f);
}
int __bch2_btree_path_relock(struct btree_trans *trans,
@@ -600,22 +609,24 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
struct btree_path *path,
- unsigned new_locks_want)
+ unsigned new_locks_want,
+ struct get_locks_fail *f)
{
EBUG_ON(path->locks_want >= new_locks_want);
path->locks_want = new_locks_want;
- return btree_path_get_locks(trans, path, true);
+ return btree_path_get_locks(trans, path, true, f);
}
bool __bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
- unsigned new_locks_want)
+ unsigned new_locks_want,
+ struct get_locks_fail *f)
{
struct btree_path *linked;
- if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
+ if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
return true;
/*
@@ -644,7 +655,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
linked->btree_id == path->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true);
+ btree_path_get_locks(trans, linked, true, NULL);
}
return false;
@@ -656,6 +667,9 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
{
unsigned l;
+ if (trans->restarted)
+ return;
+
EBUG_ON(path->locks_want < new_locks_want);
path->locks_want = new_locks_want;
@@ -674,6 +688,9 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
}
bch2_btree_path_verify_locks(path);
+
+ path->downgrade_seq++;
+ trace_path_downgrade(trans, _RET_IP_, path);
}
/* Btree transaction locking: */
@@ -682,6 +699,9 @@ void bch2_trans_downgrade(struct btree_trans *trans)
{
struct btree_path *path;
+ if (trans->restarted)
+ return;
+
trans_for_each_path(trans, path)
bch2_btree_path_downgrade(trans, path);
}
@@ -733,6 +753,12 @@ void bch2_trans_unlock(struct btree_trans *trans)
__bch2_btree_path_unlock(trans, path);
}
+void bch2_trans_unlock_long(struct btree_trans *trans)
+{
+ bch2_trans_unlock(trans);
+ bch2_trans_srcu_unlock(trans);
+}
+
bool bch2_trans_locked(struct btree_trans *trans)
{
struct btree_path *path;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 6231e9ffc5d7..11b0a2c8cd69 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -355,26 +355,36 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
/* upgrade */
+
+struct get_locks_fail {
+ unsigned l;
+ struct btree *b;
+};
+
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
- struct btree_path *, unsigned);
+ struct btree_path *, unsigned,
+ struct get_locks_fail *);
+
bool __bch2_btree_path_upgrade(struct btree_trans *,
- struct btree_path *, unsigned);
+ struct btree_path *, unsigned,
+ struct get_locks_fail *);
static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
{
+ struct get_locks_fail f;
unsigned old_locks_want = path->locks_want;
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
if (path->locks_want < new_locks_want
- ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
+ ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
: path->uptodate == BTREE_ITER_UPTODATE)
return 0;
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
- old_locks_want, new_locks_want);
+ old_locks_want, new_locks_want, &f);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
}
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 04c1f4610972..decad7b66c59 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -269,6 +269,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(i->level != i->path->level);
BUG_ON(i->btree_id != i->path->btree_id);
EBUG_ON(!i->level &&
+ btree_type_has_snapshots(i->btree_id) &&
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
i->k->k.p.snapshot &&
@@ -349,7 +350,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k) {
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
- bch2_btree_ids[path->btree_id], new_u64s);
+ bch2_btree_id_str(path->btree_id), new_u64s);
return -BCH_ERR_ENOMEM_btree_key_cache_insert;
}
@@ -379,11 +380,10 @@ static int run_one_mem_trigger(struct btree_trans *trans,
if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0;
- if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id))
+ if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)))
return 0;
- if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ if (old_ops->atomic_trigger == new_ops->atomic_trigger) {
ret = bch2_mark_key(trans, i->btree_id, i->level,
old, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
@@ -425,8 +425,7 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
if (!i->insert_trigger_run &&
!i->overwrite_trigger_run &&
- old_ops->trans_trigger == new_ops->trans_trigger &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ old_ops->trans_trigger == new_ops->trans_trigger) {
i->overwrite_trigger_run = true;
i->insert_trigger_run = true;
return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
@@ -683,7 +682,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
BCH_JSET_ENTRY_overwrite,
i->btree_id, i->level,
i->old_k.u64s);
- bkey_reassemble(&entry->start[0],
+ bkey_reassemble((struct bkey_i *) entry->start,
(struct bkey_s_c) { &i->old_k, i->old_v });
}
@@ -691,7 +690,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
BCH_JSET_ENTRY_btree_keys,
i->btree_id, i->level,
i->k->k.u64s);
- bkey_copy(&entry->start[0], i->k);
+ bkey_copy((struct bkey_i *) entry->start, i->k);
}
trans_for_each_wb_update(trans, wb) {
@@ -699,7 +698,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
BCH_JSET_ENTRY_btree_keys,
wb->btree, 0,
wb->k.k.u64s);
- bkey_copy(&entry->start[0], &wb->k);
+ bkey_copy((struct bkey_i *) entry->start, &wb->k);
}
if (trans->journal_seq)
@@ -776,12 +775,12 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
bch2_journal_key_overwritten(trans->c, wb->btree, 0, wb->k.k.p);
}
-static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, unsigned flags,
+static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
+ enum bkey_invalid_flags flags,
struct btree_insert_entry *i,
struct printbuf *err)
{
struct bch_fs *c = trans->c;
- int rw = (flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
printbuf_reset(err);
prt_printf(err, "invalid bkey on insert from %s -> %ps",
@@ -792,8 +791,7 @@ static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, un
bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
prt_newline(err);
- bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
- i->bkey_type, rw, err);
+ bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err);
bch2_print_string_as_lines(KERN_ERR, err->buf);
bch2_inconsistent_error(c);
@@ -864,12 +862,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
*/
bch2_journal_res_put(&c->journal, &trans->journal_res);
- if (unlikely(ret))
- return ret;
-
- bch2_trans_downgrade(trans);
-
- return 0;
+ return ret;
}
static int journal_reclaim_wait_done(struct bch_fs *c)
@@ -1034,7 +1027,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
i->bkey_type, invalid_flags, &buf)))
- ret = bch2_trans_commit_bkey_invalid(trans, flags, i, &buf);
+ ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
btree_insert_entry_checks(trans, i);
printbuf_exit(&buf);
@@ -1138,6 +1131,8 @@ out:
if (likely(!(flags & BTREE_INSERT_NOCHECK_RW)))
bch2_write_ref_put(c, BCH_WRITE_REF_trans);
out_reset:
+ if (!ret)
+ bch2_trans_downgrade(trans);
bch2_trans_reset_updates(trans);
return ret;
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index bc6714d88925..941841a0c5bf 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -228,6 +228,8 @@ struct btree_path {
u8 sorted_idx;
u8 ref;
u8 intent_ref;
+ u32 alloc_seq;
+ u32 downgrade_seq;
/* btree_iter_copy starts here: */
struct bpos pos;
@@ -424,6 +426,7 @@ struct btree_trans {
u8 nr_updates;
u8 nr_wb_updates;
u8 wb_updates_size;
+ bool srcu_held:1;
bool used_mempool:1;
bool in_traverse_all:1;
bool paths_sorted:1;
@@ -636,16 +639,17 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
}
enum btree_node_type {
-#define x(kwd, val, ...) BKEY_TYPE_##kwd = val,
+ BKEY_TYPE_btree,
+#define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
BCH_BTREE_IDS()
#undef x
- BKEY_TYPE_btree,
+ BKEY_TYPE_NR
};
/* Type of a key in btree @id at level @level: */
static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
{
- return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
+ return level ? BKEY_TYPE_btree : (unsigned) id + 1;
}
/* Type of keys @b contains: */
@@ -654,19 +658,21 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
return __btree_node_type(b->c.level, b->c.btree_id);
}
+const char *bch2_btree_node_type_str(enum btree_node_type);
+
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
- (BIT(BKEY_TYPE_extents)| \
- BIT(BKEY_TYPE_alloc)| \
- BIT(BKEY_TYPE_inodes)| \
- BIT(BKEY_TYPE_stripes)| \
- BIT(BKEY_TYPE_reflink)| \
- BIT(BKEY_TYPE_btree))
+ (BIT_ULL(BKEY_TYPE_extents)| \
+ BIT_ULL(BKEY_TYPE_alloc)| \
+ BIT_ULL(BKEY_TYPE_inodes)| \
+ BIT_ULL(BKEY_TYPE_stripes)| \
+ BIT_ULL(BKEY_TYPE_reflink)| \
+ BIT_ULL(BKEY_TYPE_btree))
#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
- (BIT(BKEY_TYPE_alloc)| \
- BIT(BKEY_TYPE_inodes)| \
- BIT(BKEY_TYPE_stripes)| \
- BIT(BKEY_TYPE_snapshots))
+ (BIT_ULL(BKEY_TYPE_alloc)| \
+ BIT_ULL(BKEY_TYPE_inodes)| \
+ BIT_ULL(BKEY_TYPE_stripes)| \
+ BIT_ULL(BKEY_TYPE_snapshots))
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
@@ -674,13 +680,13 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
- return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
+ return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
}
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
const unsigned mask = 0
-#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << nr)
+#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
BCH_BTREE_IDS()
#undef x
;
@@ -690,7 +696,7 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
static inline bool btree_id_is_extents(enum btree_id btree)
{
- return btree_node_type_is_extents((enum btree_node_type) btree);
+ return btree_node_type_is_extents(__btree_node_type(0, btree));
}
static inline bool btree_type_has_snapshots(enum btree_id id)
@@ -704,6 +710,17 @@ static inline bool btree_type_has_snapshots(enum btree_id id)
return (1U << id) & mask;
}
+static inline bool btree_type_has_snapshot_field(enum btree_id id)
+{
+ const unsigned mask = 0
+#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
+ BCH_BTREE_IDS()
+#undef x
+ ;
+
+ return (1U << id) & mask;
+}
+
static inline bool btree_type_has_ptrs(enum btree_id id)
{
const unsigned mask = 0
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 7dbf6b6c7f34..39c2db68123b 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1274,14 +1274,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
btree_node_type(b), WRITE, &buf) ?:
- bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf)) {
+ bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) {
printbuf_reset(&buf);
prt_printf(&buf, "inserting invalid bkey\n ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
prt_printf(&buf, "\n ");
bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
btree_node_type(b), WRITE, &buf);
- bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf);
+ bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf);
bch2_fs_inconsistent(c, "%s", buf.buf);
dump_stack();
@@ -1987,7 +1987,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
out:
if (new_path)
bch2_path_put(trans, new_path, true);
- bch2_btree_path_downgrade(trans, iter->path);
+ bch2_trans_downgrade(trans);
return ret;
err:
bch2_btree_node_free_never_used(as, trans, n);
@@ -2411,30 +2411,24 @@ void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry
r->level = entry->level;
r->alive = true;
- bkey_copy(&r->key, &entry->start[0]);
+ bkey_copy(&r->key, (struct bkey_i *) entry->start);
mutex_unlock(&c->btree_root_lock);
}
struct jset_entry *
bch2_btree_roots_to_journal_entries(struct bch_fs *c,
- struct jset_entry *start,
- struct jset_entry *end)
+ struct jset_entry *end,
+ unsigned long skip)
{
- struct jset_entry *entry;
- unsigned long have = 0;
unsigned i;
- for (entry = start; entry < end; entry = vstruct_next(entry))
- if (entry->type == BCH_JSET_ENTRY_btree_root)
- __set_bit(entry->btree_id, &have);
-
mutex_lock(&c->btree_root_lock);
for (i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
- if (r->alive && !test_bit(i, &have)) {
+ if (r->alive && !test_bit(i, &skip)) {
journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
i, r->level, &r->key, r->key.k.u64s);
end = vstruct_next(end);
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 5e0a467fe905..4df21512d640 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -271,7 +271,7 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
struct btree_node_entry *bne = max(write_block(b),
(void *) btree_bkey_last(b, bset_tree_last(b)));
ssize_t remaining_space =
- __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+ __bch_btree_u64s_remaining(c, b, bne->keys.start);
if (unlikely(bset_written(b, bset(b, t)))) {
if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
@@ -303,7 +303,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
k.needs_whiteout = true;
b->whiteout_u64s += k.u64s;
- bkey_copy(unwritten_whiteouts_start(c, b), &k);
+ bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
}
/*
@@ -325,7 +325,7 @@ bool bch2_btree_interior_updates_flush(struct bch_fs *);
void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
- struct jset_entry *, struct jset_entry *);
+ struct jset_entry *, unsigned long);
void bch2_do_pending_node_rewrites(struct bch_fs *);
void bch2_free_pending_node_rewrites(struct bch_fs *);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index a1a4b5feadaa..58d8c6ffd955 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -370,8 +370,8 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
idx = bch2_replicas_entry_idx(c, r);
if (idx < 0 &&
- fsck_err(c, "no replicas entry\n"
- " while marking %s",
+ fsck_err(c, ptr_to_missing_replicas_entry,
+ "no replicas entry\n while marking %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
percpu_up_read(&c->mark_lock);
ret = bch2_mark_replicas(c, r);
@@ -695,6 +695,7 @@ static int check_bucket_ref(struct btree_trans *trans,
if (gen_after(ptr->gen, b_gen)) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
@@ -707,6 +708,7 @@ static int check_bucket_ref(struct btree_trans *trans,
if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_ptr_too_stale,
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
@@ -720,6 +722,7 @@ static int check_bucket_ref(struct btree_trans *trans,
if (b_gen != ptr->gen && !ptr->cached) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_stale_dirty_ptr,
"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
@@ -741,6 +744,7 @@ static int check_bucket_ref(struct btree_trans *trans,
ptr_data_type &&
bucket_data_type != ptr_data_type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
@@ -754,6 +758,7 @@ static int check_bucket_ref(struct btree_trans *trans,
if ((u64) bucket_sectors + sectors > U32_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_bucket_sector_count_overflow,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
@@ -935,14 +940,12 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
return 0;
}
-int bch2_mark_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
@@ -1018,6 +1021,14 @@ int bch2_mark_extent(struct btree_trans *trans,
return 0;
}
+int bch2_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
+}
+
int bch2_mark_stripe(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
@@ -1124,13 +1135,11 @@ int bch2_mark_stripe(struct btree_trans *trans,
return 0;
}
-int bch2_mark_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bch_fs_usage *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
@@ -1157,6 +1166,14 @@ int bch2_mark_reservation(struct btree_trans *trans,
return 0;
}
+int bch2_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 start, u64 end,
@@ -1183,7 +1200,8 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
*idx = r->offset;
return 0;
not_found:
- if (fsck_err(c, "pointer to missing indirect extent\n"
+ if (fsck_err(c, reflink_p_to_missing_reflink_v,
+ "pointer to missing indirect extent\n"
" %s\n"
" missing range %llu-%llu",
(bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
@@ -1211,13 +1229,11 @@ fsck_err:
return ret;
}
-int bch2_mark_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
@@ -1251,6 +1267,14 @@ int bch2_mark_reflink_p(struct btree_trans *trans,
return ret;
}
+int bch2_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
void bch2_trans_fs_usage_revert(struct btree_trans *trans,
struct replicas_delta_list *deltas)
{
@@ -1298,7 +1322,7 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
struct bch_fs *c = trans->c;
static int warned_disk_usage = 0;
bool warn = false;
- unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
+ u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
struct replicas_delta *d, *d2;
struct replicas_delta *top = (void *) deltas->d + deltas->used;
struct bch_fs_usage *dst;
@@ -1357,7 +1381,7 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
bch2_trans_inconsistent(trans,
- "disk usage increased %lli more than %u sectors reserved)",
+ "disk usage increased %lli more than %llu sectors reserved)",
should_not_have_added, disk_res_sectors);
return 0;
need_mark:
@@ -1452,15 +1476,11 @@ err:
return ret;
}
-int bch2_trans_mark_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
@@ -1517,6 +1537,24 @@ int bch2_trans_mark_extent(struct btree_trans *trans,
return ret;
}
+int bch2_trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ int mod = (int) bch2_bkey_needs_rebalance(c, bkey_i_to_s_c(new)) -
+ (int) bch2_bkey_needs_rebalance(c, old);
+
+ if (mod) {
+ int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new->k.p, mod > 0);
+ if (ret)
+ return ret;
+ }
+
+ return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
+}
+
static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
struct bkey_s_c_stripe s,
unsigned idx, bool deleting)
@@ -1670,15 +1708,10 @@ int bch2_trans_mark_stripe(struct btree_trans *trans,
return ret;
}
-int bch2_trans_mark_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
@@ -1700,7 +1733,16 @@ int bch2_trans_mark_reservation(struct btree_trans *trans,
return 0;
}
-static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ return trigger_run_overwrite_then_insert(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
+static int trans_mark_reflink_p_segment(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 *idx, unsigned flags)
{
@@ -1767,35 +1809,38 @@ err:
return ret;
}
-int bch2_trans_mark_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
u64 idx, end_idx;
int ret = 0;
- if (flags & BTREE_TRIGGER_INSERT) {
- struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
-
- v->front_pad = v->back_pad = 0;
- }
-
idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
end_idx = le64_to_cpu(p.v->idx) + p.k->size +
le32_to_cpu(p.v->back_pad);
while (idx < end_idx && !ret)
- ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
-
+ ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
return ret;
}
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
+
+ v->front_pad = v->back_pad = 0;
+ }
+
+ return trigger_run_overwrite_then_insert(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
enum bch_data_type type,
@@ -1818,6 +1863,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
if (a->v.data_type && type && a->v.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ BCH_FSCK_ERR_bucket_metadata_type_mismatch,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
iter.pos.inode, iter.pos.offset, a->v.gen,
@@ -1825,16 +1871,16 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
bch2_data_types[type],
bch2_data_types[type]);
ret = -EIO;
- goto out;
+ goto err;
}
- a->v.data_type = type;
- a->v.dirty_sectors = sectors;
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
- if (ret)
- goto out;
-out:
+ if (a->v.data_type != type ||
+ a->v.dirty_sectors != sectors) {
+ a->v.data_type = type;
+ a->v.dirty_sectors = sectors;
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ }
+err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -1929,6 +1975,22 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
return ret;
}
+int bch2_trans_mark_dev_sbs(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_online_member(ca, c, i) {
+ int ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret) {
+ percpu_ref_put(&ca->ref);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* Disk reservations: */
#define SECTORS_CACHE 1024
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index bf8d7f407e9c..21f6cb356921 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -339,12 +339,27 @@ int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct
int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+#define mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
+({ \
+ int ret = 0; \
+ \
+ if (_old.k->type) \
+ ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
+ if (!ret && _new.k->type) \
+ ret = _fn(_trans, _btree_id, _level, _new, _flags & ~BTREE_TRIGGER_OVERWRITE); \
+ ret; \
+})
+
+#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags) \
+ mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, bkey_i_to_s_c(_new), _flags)
+
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
size_t, enum bch_data_type, unsigned);
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
+int bch2_trans_mark_dev_sbs(struct bch_fs *);
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
{
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index f69e15dc699c..4bb88aefed12 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -332,8 +332,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
struct bch_ioctl_data_event e = {
.type = BCH_DATA_EVENT_PROGRESS,
.p.data_type = ctx->stats.data_type,
- .p.btree_id = ctx->stats.btree_id,
- .p.pos = ctx->stats.pos,
+ .p.btree_id = ctx->stats.pos.btree,
+ .p.pos = ctx->stats.pos.pos,
.p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
.p.sectors_total = bch2_fs_usage_read_short(c).used,
};
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 1480b64547b0..a8b148ec2a2b 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -697,14 +697,32 @@ err:
return ret;
}
+void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
+{
+ struct bch_compression_opt opt = bch2_compression_decode(v);
+
+ if (opt.type < BCH_COMPRESSION_OPT_NR)
+ prt_str(out, bch2_compression_opts[opt.type]);
+ else
+ prt_printf(out, "(unknown compression opt %u)", opt.type);
+ if (opt.level)
+ prt_printf(out, ":%u", opt.level);
+}
+
void bch2_opt_compression_to_text(struct printbuf *out,
struct bch_fs *c,
struct bch_sb *sb,
u64 v)
{
- struct bch_compression_opt opt = bch2_compression_decode(v);
+ return bch2_compression_opt_to_text(out, v);
+}
- prt_str(out, bch2_compression_opts[opt.type]);
- if (opt.level)
- prt_printf(out, ":%u", opt.level);
+int bch2_opt_compression_validate(u64 v, struct printbuf *err)
+{
+ if (!bch2_compression_opt_valid(v)) {
+ prt_printf(err, "invalid compression opt %llu", v);
+ return -BCH_ERR_invalid_sb_opt_compression;
+ }
+
+ return 0;
}
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index 052ea303241f..607fd5e232c9 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -4,12 +4,18 @@
#include "extents_types.h"
+static const unsigned __bch2_compression_opt_to_type[] = {
+#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
+ BCH_COMPRESSION_OPTS()
+#undef x
+};
+
struct bch_compression_opt {
u8 type:4,
level:4;
};
-static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
+static inline struct bch_compression_opt __bch2_compression_decode(unsigned v)
{
return (struct bch_compression_opt) {
.type = v & 15,
@@ -17,17 +23,25 @@ static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
};
}
+static inline bool bch2_compression_opt_valid(unsigned v)
+{
+ struct bch_compression_opt opt = __bch2_compression_decode(v);
+
+ return opt.type < ARRAY_SIZE(__bch2_compression_opt_to_type) && !(!opt.type && opt.level);
+}
+
+static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
+{
+ return bch2_compression_opt_valid(v)
+ ? __bch2_compression_decode(v)
+ : (struct bch_compression_opt) { 0 };
+}
+
static inline unsigned bch2_compression_encode(struct bch_compression_opt opt)
{
return opt.type|(opt.level << 4);
}
-static const unsigned __bch2_compression_opt_to_type[] = {
-#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
- BCH_COMPRESSION_OPTS()
-#undef x
-};
-
static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
{
return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
@@ -44,12 +58,16 @@ int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned);
void bch2_fs_compress_exit(struct bch_fs *);
int bch2_fs_compress_init(struct bch_fs *);
+void bch2_compression_opt_to_text(struct printbuf *, u64);
+
int bch2_opt_compression_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
void bch2_opt_compression_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+int bch2_opt_compression_validate(u64, struct printbuf *);
#define bch2_opt_compression (struct bch_opt_fn) { \
- .parse = bch2_opt_compression_parse, \
- .to_text = bch2_opt_compression_to_text, \
+ .parse = bch2_opt_compression_parse, \
+ .to_text = bch2_opt_compression_to_text, \
+ .validate = bch2_opt_compression_validate, \
}
#endif /* _BCACHEFS_COMPRESS_H */
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
index 114f86b45fd5..87b4b2d1ec76 100644
--- a/fs/bcachefs/darray.h
+++ b/fs/bcachefs/darray.h
@@ -69,9 +69,15 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
_ret; \
})
+#define darray_remove_item(_d, _pos) \
+ array_remove_item((_d)->data, (_d)->nr, (_pos) - (_d)->data)
+
#define darray_for_each(_d, _i) \
for (_i = (_d).data; _i < (_d).data + (_d).nr; _i++)
+#define darray_for_each_reverse(_d, _i) \
+ for (_i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i)
+
#define darray_init(_d) \
do { \
(_d)->data = NULL; \
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 899ff46de8e0..0771a6d880bf 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -13,6 +13,7 @@
#include "keylist.h"
#include "move.h"
#include "nocow_locking.h"
+#include "rebalance.h"
#include "subvolume.h"
#include "trace.h"
@@ -161,11 +162,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
(ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
!ptr->cached) {
- bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
- /*
- * See comment below:
bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
- */
rewrites_found |= 1U << i;
}
i++;
@@ -211,14 +208,8 @@ restart_drop_extra_replicas:
if (!p.ptr.cached &&
durability - ptr_durability >= m->op.opts.data_replicas) {
durability -= ptr_durability;
- bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
- /*
- * Currently, we're dropping unneeded replicas
- * instead of marking them as cached, since
- * cached data in stripe buckets prevents them
- * from being reused:
+
bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
- */
goto restart_drop_extra_replicas;
}
}
@@ -251,11 +242,11 @@ restart_drop_extra_replicas:
ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
k.k->p, bkey_start_pos(&insert->k)) ?:
bch2_insert_snapshot_whiteouts(trans, m->btree_id,
- k.k->p, insert->k.p);
- if (ret)
- goto err;
-
- ret = bch2_trans_update(trans, &iter, insert,
+ k.k->p, insert->k.p) ?:
+ bch2_bkey_set_needs_rebalance(c, insert,
+ op->opts.background_target,
+ op->opts.background_compression) ?:
+ bch2_trans_update(trans, &iter, insert,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
bch2_trans_commit(trans, &op->res,
NULL,
@@ -281,11 +272,11 @@ next:
}
continue;
nowork:
- if (m->ctxt && m->ctxt->stats) {
+ if (m->stats && m->stats) {
BUG_ON(k.k->p.offset <= iter.pos.offset);
- atomic64_inc(&m->ctxt->stats->keys_raced);
+ atomic64_inc(&m->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset,
- &m->ctxt->stats->sectors_raced);
+ &m->stats->sectors_raced);
}
this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
@@ -439,6 +430,8 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_bkey_buf_reassemble(&m->k, c, k);
m->btree_id = btree_id;
m->data_opts = data_opts;
+ m->ctxt = ctxt;
+ m->stats = ctxt ? ctxt->stats : NULL;
bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k);
@@ -487,7 +480,7 @@ int bch2_data_update_init(struct btree_trans *trans,
if (c->opts.nocow_enabled) {
if (ctxt) {
- move_ctxt_wait_event(ctxt, trans,
+ move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
!atomic_read(&ctxt->read_sectors));
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
index 7ca1f98d7e94..9dc17b9d8379 100644
--- a/fs/bcachefs/data_update.h
+++ b/fs/bcachefs/data_update.h
@@ -23,6 +23,7 @@ struct data_update {
struct bkey_buf k;
struct data_update_opts data_opts;
struct moving_context *ctxt;
+ struct bch_move_stats *stats;
struct bch_write_op op;
};
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 75a3dc7cbd47..57c5128db173 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -517,7 +517,7 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *
prt_printf(out, "%px btree=%s l=%u ",
b,
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level);
prt_newline(out);
@@ -919,18 +919,18 @@ void bch2_fs_debug_init(struct bch_fs *c)
bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
bd++) {
bd->id = bd - c->btree_debug;
- debugfs_create_file(bch2_btree_ids[bd->id],
+ debugfs_create_file(bch2_btree_id_str(bd->id),
0400, c->btree_debug_dir, bd,
&btree_debug_ops);
snprintf(name, sizeof(name), "%s-formats",
- bch2_btree_ids[bd->id]);
+ bch2_btree_id_str(bd->id));
debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
&btree_format_debug_ops);
snprintf(name, sizeof(name), "%s-bfloat-failed",
- bch2_btree_ids[bd->id]);
+ bch2_btree_id_str(bd->id));
debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
&bfloat_failed_debug_ops);
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 6c6c8d57d72b..1a0f2d571569 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -97,61 +97,51 @@ const struct bch_hash_desc bch2_dirent_hash_desc = {
.is_visible = dirent_is_visible,
};
-int bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
struct qstr d_name = bch2_dirent_get_name(d);
+ int ret = 0;
- if (!d_name.len) {
- prt_printf(err, "empty name");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(!d_name.len, c, err,
+ dirent_empty_name,
+ "empty name");
- if (bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len)) {
- prt_printf(err, "value too big (%zu > %u)",
- bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len), c, err,
+ dirent_val_too_big,
+ "value too big (%zu > %u)",
+ bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
/*
* Check new keys don't exceed the max length
* (older keys may be larger.)
*/
- if ((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX) {
- prt_printf(err, "dirent name too big (%u > %u)",
- d_name.len, BCH_NAME_MAX);
- return -BCH_ERR_invalid_bkey;
- }
-
- if (d_name.len != strnlen(d_name.name, d_name.len)) {
- prt_printf(err, "dirent has stray data after name's NUL");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (d_name.len == 1 && !memcmp(d_name.name, ".", 1)) {
- prt_printf(err, "invalid name");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (d_name.len == 2 && !memcmp(d_name.name, "..", 2)) {
- prt_printf(err, "invalid name");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (memchr(d_name.name, '/', d_name.len)) {
- prt_printf(err, "invalid name");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (d.v->d_type != DT_SUBVOL &&
- le64_to_cpu(d.v->d_inum) == d.k->p.inode) {
- prt_printf(err, "dirent points to own directory");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX, c, err,
+ dirent_name_too_long,
+ "dirent name too big (%u > %u)",
+ d_name.len, BCH_NAME_MAX);
+
+ bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len), c, err,
+ dirent_name_embedded_nul,
+ "dirent has stray data after name's NUL");
+
+ bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
+ (d_name.len == 2 && !memcmp(d_name.name, "..", 2)), c, err,
+ dirent_name_dot_or_dotdot,
+ "invalid name");
+
+ bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len), c, err,
+ dirent_name_has_slash,
+ "name with /");
+
+ bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
+ le64_to_cpu(d.v->d_inum) == d.k->p.inode, c, err,
+ dirent_to_itself,
+ "dirent points to own directory");
+fsck_err:
+ return ret;
}
void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index e9fa1df38232..cd262bf4d9c5 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -7,7 +7,7 @@
enum bkey_invalid_flags;
extern const struct bch_hash_desc bch2_dirent_hash_desc;
-int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_dirent_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index e00133b6ea51..d613695abf9f 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -175,6 +175,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
dst->deleted = BCH_GROUP_DELETED(src);
dst->parent = BCH_GROUP_PARENT(src);
+ memcpy(dst->label, src->label, sizeof(dst->label));
}
for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
@@ -382,7 +383,57 @@ int bch2_disk_path_find_or_create(struct bch_sb_handle *sb, const char *name)
return v;
}
-void bch2_disk_path_to_text(struct printbuf *out, struct bch_sb *sb, unsigned v)
+void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
+{
+ struct bch_disk_groups_cpu *groups;
+ struct bch_disk_group_cpu *g;
+ unsigned nr = 0;
+ u16 path[32];
+
+ out->atomic++;
+ rcu_read_lock();
+ groups = rcu_dereference(c->disk_groups);
+ if (!groups)
+ goto invalid;
+
+ while (1) {
+ if (nr == ARRAY_SIZE(path))
+ goto invalid;
+
+ if (v >= groups->nr)
+ goto invalid;
+
+ g = groups->entries + v;
+
+ if (g->deleted)
+ goto invalid;
+
+ path[nr++] = v;
+
+ if (!g->parent)
+ break;
+
+ v = g->parent - 1;
+ }
+
+ while (nr) {
+ v = path[--nr];
+ g = groups->entries + v;
+
+ prt_printf(out, "%.*s", (int) sizeof(g->label), g->label);
+ if (nr)
+ prt_printf(out, ".");
+ }
+out:
+ rcu_read_unlock();
+ out->atomic--;
+ return;
+invalid:
+ prt_printf(out, "invalid label %u", v);
+ goto out;
+}
+
+void bch2_disk_path_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
{
struct bch_sb_field_disk_groups *groups =
bch2_sb_field_get(sb, disk_groups);
@@ -493,10 +544,7 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
return -EINVAL;
}
-void bch2_opt_target_to_text(struct printbuf *out,
- struct bch_fs *c,
- struct bch_sb *sb,
- u64 v)
+void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
{
struct target t = target_decode(v);
@@ -504,47 +552,69 @@ void bch2_opt_target_to_text(struct printbuf *out,
case TARGET_NULL:
prt_printf(out, "none");
break;
- case TARGET_DEV:
- if (c) {
- struct bch_dev *ca;
-
- rcu_read_lock();
- ca = t.dev < c->sb.nr_devices
- ? rcu_dereference(c->devs[t.dev])
- : NULL;
-
- if (ca && percpu_ref_tryget(&ca->io_ref)) {
- prt_printf(out, "/dev/%pg", ca->disk_sb.bdev);
- percpu_ref_put(&ca->io_ref);
- } else if (ca) {
- prt_printf(out, "offline device %u", t.dev);
- } else {
- prt_printf(out, "invalid device %u", t.dev);
- }
-
- rcu_read_unlock();
+ case TARGET_DEV: {
+ struct bch_dev *ca;
+
+ rcu_read_lock();
+ ca = t.dev < c->sb.nr_devices
+ ? rcu_dereference(c->devs[t.dev])
+ : NULL;
+
+ if (ca && percpu_ref_tryget(&ca->io_ref)) {
+ prt_printf(out, "/dev/%pg", ca->disk_sb.bdev);
+ percpu_ref_put(&ca->io_ref);
+ } else if (ca) {
+ prt_printf(out, "offline device %u", t.dev);
} else {
- struct bch_member m = bch2_sb_member_get(sb, t.dev);
-
- if (bch2_dev_exists(sb, t.dev)) {
- prt_printf(out, "Device ");
- pr_uuid(out, m.uuid.b);
- prt_printf(out, " (%u)", t.dev);
- } else {
- prt_printf(out, "Bad device %u", t.dev);
- }
+ prt_printf(out, "invalid device %u", t.dev);
}
+
+ rcu_read_unlock();
break;
+ }
case TARGET_GROUP:
- if (c) {
- mutex_lock(&c->sb_lock);
- bch2_disk_path_to_text(out, c->disk_sb.sb, t.group);
- mutex_unlock(&c->sb_lock);
+ bch2_disk_path_to_text(out, c, t.group);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
+{
+ struct target t = target_decode(v);
+
+ switch (t.type) {
+ case TARGET_NULL:
+ prt_printf(out, "none");
+ break;
+ case TARGET_DEV: {
+ struct bch_member m = bch2_sb_member_get(sb, t.dev);
+
+ if (bch2_dev_exists(sb, t.dev)) {
+ prt_printf(out, "Device ");
+ pr_uuid(out, m.uuid.b);
+ prt_printf(out, " (%u)", t.dev);
} else {
- bch2_disk_path_to_text(out, sb, t.group);
+ prt_printf(out, "Bad device %u", t.dev);
}
break;
+ }
+ case TARGET_GROUP:
+ bch2_disk_path_to_text_sb(out, sb, t.group);
+ break;
default:
BUG();
}
}
+
+void bch2_opt_target_to_text(struct printbuf *out,
+ struct bch_fs *c,
+ struct bch_sb *sb,
+ u64 v)
+{
+ if (c)
+ bch2_target_to_text(out, c, v);
+ else
+ bch2_target_to_text_sb(out, sb, v);
+}
diff --git a/fs/bcachefs/disk_groups.h b/fs/bcachefs/disk_groups.h
index bd7711767fd4..441826fff224 100644
--- a/fs/bcachefs/disk_groups.h
+++ b/fs/bcachefs/disk_groups.h
@@ -2,6 +2,8 @@
#ifndef _BCACHEFS_DISK_GROUPS_H
#define _BCACHEFS_DISK_GROUPS_H
+#include "disk_groups_types.h"
+
extern const struct bch_sb_field_ops bch_sb_field_ops_disk_groups;
static inline unsigned disk_groups_nr(struct bch_sb_field_disk_groups *groups)
@@ -83,7 +85,10 @@ int bch2_disk_path_find(struct bch_sb_handle *, const char *);
/* Exported for userspace bcachefs-tools: */
int bch2_disk_path_find_or_create(struct bch_sb_handle *, const char *);
-void bch2_disk_path_to_text(struct printbuf *, struct bch_sb *, unsigned);
+void bch2_disk_path_to_text(struct printbuf *, struct bch_fs *, unsigned);
+void bch2_disk_path_to_text_sb(struct printbuf *, struct bch_sb *, unsigned);
+
+void bch2_target_to_text(struct printbuf *out, struct bch_fs *, unsigned);
int bch2_opt_target_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
void bch2_opt_target_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
diff --git a/fs/bcachefs/disk_groups_types.h b/fs/bcachefs/disk_groups_types.h
new file mode 100644
index 000000000000..a54ef085b13d
--- /dev/null
+++ b/fs/bcachefs/disk_groups_types.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_TYPES_H
+#define _BCACHEFS_DISK_GROUPS_TYPES_H
+
+struct bch_disk_group_cpu {
+ bool deleted;
+ u16 parent;
+ u8 label[BCH_SB_LABEL_SIZE];
+ struct bch_devs_mask devs;
+};
+
+struct bch_disk_groups_cpu {
+ struct rcu_head rcu;
+ unsigned nr;
+ struct bch_disk_group_cpu entries[] __counted_by(nr);
+};
+
+#endif /* _BCACHEFS_DISK_GROUPS_TYPES_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 8646856e4539..875f7c5a6fca 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -105,29 +105,26 @@ struct ec_bio {
/* Stripes btree keys: */
-int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+ int ret = 0;
- if (bkey_eq(k.k->p, POS_MIN)) {
- prt_printf(err, "stripe at POS_MIN");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (k.k->p.inode) {
- prt_printf(err, "nonzero inode field");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
+ bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
+ stripe_pos_bad,
+ "stripe at bad pos");
- if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
- prt_printf(err, "incorrect value size (%zu < %u)",
- bkey_val_u64s(k.k), stripe_val_u64s(s));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
+ stripe_val_size_bad,
+ "incorrect value size (%zu < %u)",
+ bkey_val_u64s(k.k), stripe_val_u64s(s));
- return bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+ return ret;
}
void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
@@ -153,6 +150,7 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
if (i < nr_data)
prt_printf(out, "#%u", stripe_blockcount_get(s, i));
+ prt_printf(out, " gen %u", ptr->gen);
if (ptr_stale(ca, ptr))
prt_printf(out, " stale");
}
@@ -306,16 +304,21 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
struct bch_csum got = ec_block_checksum(buf, i, offset);
if (bch2_crc_cmp(want, got)) {
- struct printbuf buf2 = PRINTBUF;
+ struct printbuf err = PRINTBUF;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
+
+ prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
+ want.hi, want.lo,
+ got.hi, got.lo,
+ bch2_csum_types[v->csum_type]);
+ prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
+ bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
+ bch_err_ratelimited(ca, "%s", err.buf);
+ printbuf_exit(&err);
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key));
-
- bch_err_ratelimited(c,
- "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
- (void *) _RET_IP_, i, j, v->csum_type,
- want.lo, got.lo, buf2.buf);
- printbuf_exit(&buf2);
clear_bit(i, buf->valid);
+
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
break;
}
@@ -373,7 +376,11 @@ static void ec_block_endio(struct bio *bio)
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
- if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
+ if (bch2_dev_io_err_on(bio->bi_status, ca,
+ bio_data_dir(bio)
+ ? BCH_MEMBER_ERROR_write
+ : BCH_MEMBER_ERROR_read,
+ "erasure coding %s error: %s",
bio_data_dir(bio) ? "write" : "read",
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
@@ -474,14 +481,10 @@ err:
return ret;
}
-static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
-{
- return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe));
-}
-
/* recovery read path: */
-int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
+int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
{
+ struct bch_fs *c = trans->c;
struct ec_stripe_buf *buf;
struct closure cl;
struct bch_stripe *v;
@@ -496,7 +499,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
if (!buf)
return -BCH_ERR_ENOMEM_ec_read_extent;
- ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
+ ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
if (ret) {
bch_err_ratelimited(c,
"error doing reconstruct read: error %i looking up stripe", ret);
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 966d165a3b66..7d0237c9819f 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -8,7 +8,7 @@
enum bkey_invalid_flags;
-int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
@@ -199,7 +199,7 @@ struct ec_stripe_head {
struct ec_stripe_new *s;
};
-int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
+int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 7cc083776a2e..68a1a96bb7ca 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -3,6 +3,8 @@
#define _BCACHEFS_ERRCODE_H
#define BCH_ERRCODES() \
+ x(ERANGE, ERANGE_option_too_small) \
+ x(ERANGE, ERANGE_option_too_big) \
x(ENOMEM, ENOMEM_stripe_buf) \
x(ENOMEM, ENOMEM_replicas_table) \
x(ENOMEM, ENOMEM_cpu_replicas) \
@@ -213,6 +215,8 @@
x(BCH_ERR_invalid_sb, invalid_sb_crypt) \
x(BCH_ERR_invalid_sb, invalid_sb_clean) \
x(BCH_ERR_invalid_sb, invalid_sb_quota) \
+ x(BCH_ERR_invalid_sb, invalid_sb_errors) \
+ x(BCH_ERR_invalid_sb, invalid_sb_opt_compression) \
x(BCH_ERR_invalid, invalid_bkey) \
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
x(EIO, btree_node_read_err) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 2a5af8872613..7b28d37922fd 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -56,8 +56,9 @@ void bch2_io_error_work(struct work_struct *work)
up_write(&c->state_lock);
}
-void bch2_io_error(struct bch_dev *ca)
+void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type)
{
+ atomic64_inc(&ca->errors[type]);
//queue_work(system_long_wq, &ca->io_error_work);
}
@@ -116,31 +117,34 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
return NULL;
- list_for_each_entry(s, &c->fsck_errors, list)
+ list_for_each_entry(s, &c->fsck_error_msgs, list)
if (s->fmt == fmt) {
/*
* move it to the head of the list: repeated fsck errors
* are common
*/
- list_move(&s->list, &c->fsck_errors);
+ list_move(&s->list, &c->fsck_error_msgs);
return s;
}
s = kzalloc(sizeof(*s), GFP_NOFS);
if (!s) {
- if (!c->fsck_alloc_err)
+ if (!c->fsck_alloc_msgs_err)
bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
- c->fsck_alloc_err = true;
+ c->fsck_alloc_msgs_err = true;
return NULL;
}
INIT_LIST_HEAD(&s->list);
s->fmt = fmt;
- list_add(&s->list, &c->fsck_errors);
+ list_add(&s->list, &c->fsck_error_msgs);
return s;
}
-int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
+int bch2_fsck_err(struct bch_fs *c,
+ enum bch_fsck_flags flags,
+ enum bch_sb_error_id err,
+ const char *fmt, ...)
{
struct fsck_err_state *s = NULL;
va_list args;
@@ -148,11 +152,13 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
struct printbuf buf = PRINTBUF, *out = &buf;
int ret = -BCH_ERR_fsck_ignore;
+ bch2_sb_error_count(c, err);
+
va_start(args, fmt);
prt_vprintf(out, fmt, args);
va_end(args);
- mutex_lock(&c->fsck_error_lock);
+ mutex_lock(&c->fsck_error_msgs_lock);
s = fsck_err_get(c, fmt);
if (s) {
/*
@@ -162,7 +168,7 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
*/
if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
ret = s->ret;
- mutex_unlock(&c->fsck_error_lock);
+ mutex_unlock(&c->fsck_error_msgs_lock);
printbuf_exit(&buf);
return ret;
}
@@ -257,7 +263,7 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
if (s)
s->ret = ret;
- mutex_unlock(&c->fsck_error_lock);
+ mutex_unlock(&c->fsck_error_msgs_lock);
printbuf_exit(&buf);
@@ -278,9 +284,9 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
{
struct fsck_err_state *s, *n;
- mutex_lock(&c->fsck_error_lock);
+ mutex_lock(&c->fsck_error_msgs_lock);
- list_for_each_entry_safe(s, n, &c->fsck_errors, list) {
+ list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) {
if (s->ratelimited && s->last_msg)
bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
@@ -289,5 +295,5 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
kfree(s);
}
- mutex_unlock(&c->fsck_error_lock);
+ mutex_unlock(&c->fsck_error_msgs_lock);
}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 7ce9540052e5..d167d65986e0 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/printk.h>
+#include "sb-errors.h"
struct bch_dev;
struct bch_fs;
@@ -101,18 +102,26 @@ struct fsck_err_state {
char *last_msg;
};
-#define FSCK_CAN_FIX (1 << 0)
-#define FSCK_CAN_IGNORE (1 << 1)
-#define FSCK_NEED_FSCK (1 << 2)
-#define FSCK_NO_RATELIMIT (1 << 3)
+enum bch_fsck_flags {
+ FSCK_CAN_FIX = 1 << 0,
+ FSCK_CAN_IGNORE = 1 << 1,
+ FSCK_NEED_FSCK = 1 << 2,
+ FSCK_NO_RATELIMIT = 1 << 3,
+};
+
+#define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
-__printf(3, 4) __cold
-int bch2_fsck_err(struct bch_fs *, unsigned, const char *, ...);
+__printf(4, 5) __cold
+int bch2_fsck_err(struct bch_fs *,
+ enum bch_fsck_flags,
+ enum bch_sb_error_id,
+ const char *, ...);
void bch2_flush_fsck_errs(struct bch_fs *);
-#define __fsck_err(c, _flags, msg, ...) \
+#define __fsck_err(c, _flags, _err_type, ...) \
({ \
- int _ret = bch2_fsck_err(c, _flags, msg, ##__VA_ARGS__); \
+ int _ret = bch2_fsck_err(c, _flags, BCH_FSCK_ERR_##_err_type, \
+ __VA_ARGS__); \
\
if (_ret != -BCH_ERR_fsck_fix && \
_ret != -BCH_ERR_fsck_ignore) { \
@@ -127,26 +136,53 @@ void bch2_flush_fsck_errs(struct bch_fs *);
/* XXX: mark in superblock that filesystem contains errors, if we ignore: */
-#define __fsck_err_on(cond, c, _flags, ...) \
- (unlikely(cond) ? __fsck_err(c, _flags, ##__VA_ARGS__) : false)
+#define __fsck_err_on(cond, c, _flags, _err_type, ...) \
+ (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false)
+
+#define need_fsck_err_on(cond, c, _err_type, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
+
+#define need_fsck_err(c, _err_type, ...) \
+ __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
+
+#define mustfix_fsck_err(c, _err_type, ...) \
+ __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
+
+#define mustfix_fsck_err_on(cond, c, _err_type, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
-#define need_fsck_err_on(cond, c, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+#define fsck_err(c, _err_type, ...) \
+ __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
-#define need_fsck_err(c, ...) \
- __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+#define fsck_err_on(cond, c, _err_type, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
-#define mustfix_fsck_err(c, ...) \
- __fsck_err(c, FSCK_CAN_FIX, ##__VA_ARGS__)
+static inline void bch2_bkey_fsck_err(struct bch_fs *c,
+ struct printbuf *err_msg,
+ enum bch_sb_error_id err_type,
+ const char *fmt, ...)
+{
+ va_list args;
-#define mustfix_fsck_err_on(cond, c, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_FIX, ##__VA_ARGS__)
+ va_start(args, fmt);
+ prt_vprintf(err_msg, fmt, args);
+ va_end(args);
-#define fsck_err(c, ...) \
- __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+}
-#define fsck_err_on(cond, c, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+#define bkey_fsck_err(c, _err_msg, _err_type, ...) \
+do { \
+ prt_printf(_err_msg, __VA_ARGS__); \
+ bch2_sb_error_count(c, BCH_FSCK_ERR_##_err_type); \
+ ret = -BCH_ERR_invalid_bkey; \
+ goto fsck_err; \
+} while (0)
+
+#define bkey_fsck_err_on(cond, ...) \
+do { \
+ if (unlikely(cond)) \
+ bkey_fsck_err(__VA_ARGS__); \
+} while (0)
/*
* Fatal errors: these don't indicate a bug, but we can't continue running in RW
@@ -179,26 +215,26 @@ do { \
void bch2_io_error_work(struct work_struct *);
/* Does the error handling without logging a message */
-void bch2_io_error(struct bch_dev *);
+void bch2_io_error(struct bch_dev *, enum bch_member_error_type);
-#define bch2_dev_io_err_on(cond, ca, ...) \
+#define bch2_dev_io_err_on(cond, ca, _type, ...) \
({ \
bool _ret = (cond); \
\
if (_ret) { \
bch_err_dev_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca); \
+ bch2_io_error(ca, _type); \
} \
_ret; \
})
-#define bch2_dev_inum_io_err_on(cond, ca, ...) \
+#define bch2_dev_inum_io_err_on(cond, ca, _type, ...) \
({ \
bool _ret = (cond); \
\
if (_ret) { \
bch_err_inum_offset_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca); \
+ bch2_io_error(ca, _type); \
} \
_ret; \
})
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 1b25f84e4b9c..a864de231b69 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -13,6 +13,7 @@
#include "btree_iter.h"
#include "buckets.h"
#include "checksum.h"
+#include "compress.h"
#include "debug.h"
#include "disk_groups.h"
#include "error.h"
@@ -162,17 +163,19 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
/* KEY_TYPE_btree_ptr: */
-int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
- prt_printf(err, "value too big (%zu > %u)",
- bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
+
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
+ btree_ptr_val_too_big,
+ "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
- return bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+ return ret;
}
void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
@@ -181,17 +184,20 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
bch2_bkey_ptrs_to_text(out, c, k);
}
-int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
- prt_printf(err, "value too big (%zu > %zu)",
- bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
+
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
+ btree_ptr_v2_val_too_big,
+ "value too big (%zu > %zu)",
+ bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
- return bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+ return ret;
}
void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
@@ -372,19 +378,18 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
/* KEY_TYPE_reservation: */
-int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+ int ret = 0;
- if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
- prt_printf(err, "invalid nr_replicas (%u)",
- r.v->nr_replicas);
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
+ reservation_key_nr_replicas_invalid,
+ "invalid nr_replicas (%u)", r.v->nr_replicas);
+fsck_err:
+ return ret;
}
void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
@@ -757,18 +762,6 @@ static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
return i;
}
-static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
-{
- union bch_extent_entry *next = extent_entry_next(entry);
-
- /* stripes have ptrs, but their layout doesn't work with this code */
- BUG_ON(k.k->type == KEY_TYPE_stripe);
-
- memmove_u64s_down(entry, next,
- (u64 *) bkey_val_end(k) - (u64 *) next);
- k.k->u64s -= (u64 *) next - (u64 *) entry;
-}
-
/*
* Returns pointer to the next entry after the one being dropped:
*/
@@ -992,10 +985,6 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
- struct bch_extent_crc_unpacked crc;
- const struct bch_extent_ptr *ptr;
- const struct bch_extent_stripe_ptr *ec;
- struct bch_dev *ca;
bool first = true;
if (c)
@@ -1006,9 +995,9 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " ");
switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- ptr = entry_to_ptr(entry);
- ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+ case BCH_EXTENT_ENTRY_ptr: {
+ const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
+ struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
? bch_dev_bkey_exists(c, ptr->dev)
: NULL;
@@ -1030,10 +1019,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " stale");
}
break;
+ }
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+ case BCH_EXTENT_ENTRY_crc128: {
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
crc.compressed_size,
@@ -1042,12 +1033,26 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
bch2_csum_types[crc.csum_type],
bch2_compression_types[crc.compression_type]);
break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- ec = &entry->stripe_ptr;
+ }
+ case BCH_EXTENT_ENTRY_stripe_ptr: {
+ const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
prt_printf(out, "ec: idx %llu block %u",
(u64) ec->idx, ec->block);
break;
+ }
+ case BCH_EXTENT_ENTRY_rebalance: {
+ const struct bch_extent_rebalance *r = &entry->rebalance;
+
+ prt_str(out, "rebalance: target ");
+ if (c)
+ bch2_target_to_text(out, c, r->target);
+ else
+ prt_printf(out, "%u", r->target);
+ prt_str(out, " compression ");
+ bch2_compression_opt_to_text(out, r->compression);
+ break;
+ }
default:
prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
return;
@@ -1057,7 +1062,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
}
}
-static int extent_ptr_invalid(const struct bch_fs *c,
+static int extent_ptr_invalid(struct bch_fs *c,
struct bkey_s_c k,
enum bkey_invalid_flags flags,
const struct bch_extent_ptr *ptr,
@@ -1070,6 +1075,7 @@ static int extent_ptr_invalid(const struct bch_fs *c,
u64 bucket;
u32 bucket_offset;
struct bch_dev *ca;
+ int ret = 0;
if (!bch2_dev_exists2(c, ptr->dev)) {
/*
@@ -1080,41 +1086,33 @@ static int extent_ptr_invalid(const struct bch_fs *c,
if (flags & BKEY_INVALID_WRITE)
return 0;
- prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
- return -BCH_ERR_invalid_bkey;
+ bkey_fsck_err(c, err, ptr_to_invalid_device,
+ "pointer to invalid device (%u)", ptr->dev);
}
ca = bch_dev_bkey_exists(c, ptr->dev);
bkey_for_each_ptr(ptrs, ptr2)
- if (ptr != ptr2 && ptr->dev == ptr2->dev) {
- prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
+ ptr_to_duplicate_device,
+ "multiple pointers to same device (%u)", ptr->dev);
bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
- if (bucket >= ca->mi.nbuckets) {
- prt_printf(err, "pointer past last bucket (%llu > %llu)",
- bucket, ca->mi.nbuckets);
- return -BCH_ERR_invalid_bkey;
- }
-
- if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
- prt_printf(err, "pointer before first bucket (%llu < %u)",
- bucket, ca->mi.first_bucket);
- return -BCH_ERR_invalid_bkey;
- }
-
- if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
- prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
+ bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
+ ptr_after_last_bucket,
+ "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
+ bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
+ ptr_before_first_bucket,
+ "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
+ bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
+ ptr_spans_multiple_buckets,
+ "pointer spans multiple buckets (%u + %u > %u)",
bucket_offset, size_ondisk, ca->mi.bucket_size);
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+fsck_err:
+ return ret;
}
-int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
@@ -1124,24 +1122,22 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
unsigned size_ondisk = k.k->size;
unsigned nonce = UINT_MAX;
unsigned nr_ptrs = 0;
- bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
- int ret;
+ bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
+ int ret = 0;
if (bkey_is_btree_ptr(k.k))
size_ondisk = btree_sectors(c);
bkey_extent_entry_for_each(ptrs, entry) {
- if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
- prt_printf(err, "invalid extent entry type (got %u, max %u)",
- __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
+ extent_ptrs_invalid_entry,
+ "invalid extent entry type (got %u, max %u)",
+ __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
- if (bkey_is_btree_ptr(k.k) &&
- !extent_entry_is_ptr(entry)) {
- prt_printf(err, "has non ptr field");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
+ !extent_entry_is_ptr(entry), c, err,
+ btree_ptr_has_non_ptr,
+ "has non ptr field");
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
@@ -1150,22 +1146,15 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
if (ret)
return ret;
- if (nr_ptrs && unwritten != entry->ptr.unwritten) {
- prt_printf(err, "extent with unwritten and written ptrs");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
- prt_printf(err, "has unwritten ptrs");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
+ ptr_cached_and_erasure_coded,
+ "cached, erasure coded ptr");
- if (entry->ptr.cached && have_ec) {
- prt_printf(err, "cached, erasure coded ptr");
- return -BCH_ERR_invalid_bkey;
- }
+ if (!entry->ptr.unwritten)
+ have_written = true;
+ else
+ have_unwritten = true;
- unwritten = entry->ptr.unwritten;
have_ec = false;
crc_since_last_ptr = false;
nr_ptrs++;
@@ -1175,72 +1164,77 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
case BCH_EXTENT_ENTRY_crc128:
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- if (crc.offset + crc.live_size >
- crc.uncompressed_size) {
- prt_printf(err, "checksum offset + key size > uncompressed size");
- return -BCH_ERR_invalid_bkey;
- }
-
- size_ondisk = crc.compressed_size;
-
- if (!bch2_checksum_type_valid(c, crc.csum_type)) {
- prt_printf(err, "invalid checksum type");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
- prt_printf(err, "invalid compression type");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
+ ptr_crc_uncompressed_size_too_small,
+ "checksum offset + key size > uncompressed size");
+ bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
+ ptr_crc_csum_type_unknown,
+ "invalid checksum type");
+ bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
+ ptr_crc_compression_type_unknown,
+ "invalid compression type");
if (bch2_csum_type_is_encryption(crc.csum_type)) {
if (nonce == UINT_MAX)
nonce = crc.offset + crc.nonce;
- else if (nonce != crc.offset + crc.nonce) {
- prt_printf(err, "incorrect nonce");
- return -BCH_ERR_invalid_bkey;
- }
+ else if (nonce != crc.offset + crc.nonce)
+ bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
+ "incorrect nonce");
}
- if (crc_since_last_ptr) {
- prt_printf(err, "redundant crc entry");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(crc_since_last_ptr, c, err,
+ ptr_crc_redundant,
+ "redundant crc entry");
crc_since_last_ptr = true;
+
+ bkey_fsck_err_on(crc_is_encoded(crc) &&
+ (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
+ (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
+ ptr_crc_uncompressed_size_too_big,
+ "too large encoded extent");
+
+ size_ondisk = crc.compressed_size;
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
- if (have_ec) {
- prt_printf(err, "redundant stripe entry");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(have_ec, c, err,
+ ptr_stripe_redundant,
+ "redundant stripe entry");
have_ec = true;
break;
- case BCH_EXTENT_ENTRY_rebalance:
+ case BCH_EXTENT_ENTRY_rebalance: {
+ const struct bch_extent_rebalance *r = &entry->rebalance;
+
+ if (!bch2_compression_opt_valid(r->compression)) {
+ struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
+ prt_printf(err, "invalid compression opt %u:%u",
+ opt.type, opt.level);
+ return -BCH_ERR_invalid_bkey;
+ }
break;
}
+ }
}
- if (!nr_ptrs) {
- prt_str(err, "no ptrs");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
- prt_str(err, "too many ptrs");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (crc_since_last_ptr) {
- prt_printf(err, "redundant crc entry");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (have_ec) {
- prt_printf(err, "redundant stripe entry");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ bkey_fsck_err_on(!nr_ptrs, c, err,
+ extent_ptrs_no_ptrs,
+ "no ptrs");
+ bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
+ extent_ptrs_too_many_ptrs,
+ "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
+ bkey_fsck_err_on(have_written && have_unwritten, c, err,
+ extent_ptrs_written_and_unwritten,
+ "extent with unwritten and written ptrs");
+ bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
+ extent_ptrs_unwritten,
+ "has unwritten ptrs");
+ bkey_fsck_err_on(crc_since_last_ptr, c, err,
+ extent_ptrs_redundant_crc,
+ "redundant crc entry");
+ bkey_fsck_err_on(have_ec, c, err,
+ extent_ptrs_redundant_stripe,
+ "redundant stripe entry");
+fsck_err:
+ return ret;
}
void bch2_ptr_swab(struct bkey_s k)
@@ -1281,6 +1275,125 @@ void bch2_ptr_swab(struct bkey_s k)
}
}
+const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+
+ bkey_extent_entry_for_each(ptrs, entry)
+ if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
+ return &entry->rebalance;
+
+ return NULL;
+}
+
+unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
+ unsigned target, unsigned compression)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ unsigned rewrite_ptrs = 0;
+
+ if (compression) {
+ unsigned compression_type = bch2_compression_opt_to_type(compression);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned i = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) {
+ rewrite_ptrs = 0;
+ goto incompressible;
+ }
+
+ if (!p.ptr.cached && p.crc.compression_type != compression_type)
+ rewrite_ptrs |= 1U << i;
+ i++;
+ }
+ }
+incompressible:
+ if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
+ const struct bch_extent_ptr *ptr;
+ unsigned i = 0;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
+ rewrite_ptrs |= 1U << i;
+ i++;
+ }
+ }
+
+ return rewrite_ptrs;
+}
+
+bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
+{
+ const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
+
+ /*
+ * If it's an indirect extent, we don't delete the rebalance entry when
+ * done so that we know what options were applied - check if it still
+ * needs work done:
+ */
+ if (r &&
+ k.k->type == KEY_TYPE_reflink_v &&
+ !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
+ r = NULL;
+
+ return r != NULL;
+}
+
+int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
+ unsigned target, unsigned compression)
+{
+ struct bkey_s k = bkey_i_to_s(_k);
+ struct bch_extent_rebalance *r;
+ bool needs_rebalance;
+
+ if (!bkey_extent_is_direct_data(k.k))
+ return 0;
+
+ /* get existing rebalance entry: */
+ r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
+ if (r) {
+ if (k.k->type == KEY_TYPE_reflink_v) {
+ /*
+ * indirect extents: existing options take precedence,
+ * so that we don't move extents back and forth if
+ * they're referenced by different inodes with different
+ * options:
+ */
+ if (r->target)
+ target = r->target;
+ if (r->compression)
+ compression = r->compression;
+ }
+
+ r->target = target;
+ r->compression = compression;
+ }
+
+ needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
+
+ if (needs_rebalance && !r) {
+ union bch_extent_entry *new = bkey_val_end(k);
+
+ new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
+ new->rebalance.compression = compression;
+ new->rebalance.target = target;
+ new->rebalance.unused = 0;
+ k.k->u64s += extent_entry_u64s(new);
+ } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
+ /*
+ * For indirect extents, don't delete the rebalance entry when
+ * we're finished so that we know we specifically moved it or
+ * compressed it to its current location/compression type
+ */
+ extent_entry_drop(k, (union bch_extent_entry *) r);
+ }
+
+ return 0;
+}
+
/* Generic extent code: */
int bch2_cut_front_s(struct bpos where, struct bkey_s k)
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 879e7d218b6a..a2ce8a3be13c 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -89,6 +89,18 @@ static inline void __extent_entry_insert(struct bkey_i *k,
memcpy_u64s_small(dst, new, extent_entry_u64s(new));
}
+static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
+{
+ union bch_extent_entry *next = extent_entry_next(entry);
+
+ /* stripes have ptrs, but their layout doesn't work with this code */
+ BUG_ON(k.k->type == KEY_TYPE_stripe);
+
+ memmove_u64s_down(entry, next,
+ (u64 *) bkey_val_end(k) - (u64 *) next);
+ k.k->u64s -= (u64 *) next - (u64 *) entry;
+}
+
static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
{
return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
@@ -190,6 +202,11 @@ static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
}
+static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
+{
+ return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
+}
+
/* bkey_ptrs: generically over any key type that has ptrs */
struct bkey_ptrs_c {
@@ -383,12 +400,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
/* KEY_TYPE_btree_ptr: */
-int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
@@ -428,7 +445,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
/* KEY_TYPE_reservation: */
-int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
@@ -688,11 +705,19 @@ void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_ptr_swab(struct bkey_s);
+const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
+unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
+ unsigned, unsigned);
+bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
+
+int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
+ unsigned, unsigned);
+
/* Generic extent code: */
enum bch_extent_overlap {
@@ -737,22 +762,4 @@ static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
k->size = new_size;
}
-/*
- * In extent_sort_fix_overlapping(), insert_fixup_extent(),
- * extent_merge_inline() - we're modifying keys in place that are packed. To do
- * that we have to unpack the key, modify the unpacked key - then this
- * copies/repacks the unpacked to the original as necessary.
- */
-static inline void extent_save(struct btree *b, struct bkey_packed *dst,
- struct bkey *src)
-{
- struct bkey_format *f = &b->format;
- struct bkey_i *dst_unpacked;
-
- if ((dst_unpacked = packed_to_bkey(dst)))
- dst_unpacked->k = *src;
- else
- BUG_ON(!bch2_bkey_pack_key(dst, src, f));
-}
-
#endif /* _BCACHEFS_EXTENTS_H */
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index bb5305441f27..4496cf91a4c1 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -51,7 +51,7 @@ int bch2_create_trans(struct btree_trans *trans,
bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u);
if (flags & BCH_CREATE_TMPFILE)
- new_inode->bi_flags |= BCH_INODE_UNLINKED;
+ new_inode->bi_flags |= BCH_INODE_unlinked;
ret = bch2_inode_create(trans, &inode_iter, new_inode, snapshot, cpu);
if (ret)
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 58ccc7b91ac7..52f0e7acda3d 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -389,6 +389,21 @@ static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs
return ret;
}
+/*
+ * Determine when a writepage io is full. We have to limit writepage bios to a
+ * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
+ * what the bounce path in bch2_write_extent() can handle. In theory we could
+ * loosen this restriction for non-bounce I/O, but we don't have that context
+ * here. Ideally, we can up this limit and make it configurable in the future
+ * when the bounce path can be enhanced to accommodate larger source bios.
+ */
+static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
+{
+ struct bio *bio = &io->op.wbio.bio;
+ return bio_full(bio, len) ||
+ (bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
+}
+
static void bch2_writepage_io_done(struct bch_write_op *op)
{
struct bch_writepage_io *io =
@@ -606,9 +621,7 @@ do_io:
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio, sectors << 9) ||
- w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
- (BIO_MAX_VECS * PAGE_SIZE) ||
+ bch_io_full(w->io, sectors << 9) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
bch2_writepage_do_io(w);
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 6a9557e7ecab..5b42a76c4796 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -113,6 +113,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
} else {
atomic_set(&dio->cl.remaining,
CLOSURE_REMAINING_INITIALIZER + 1);
+ dio->cl.closure_get_happened = true;
}
dio->req = req;
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 6040bd3f0778..5a39bcb597a3 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -45,13 +45,13 @@ static int bch2_inode_flags_set(struct btree_trans *trans,
unsigned newflags = s->flags;
unsigned oldflags = bi->bi_flags & s->mask;
- if (((newflags ^ oldflags) & (BCH_INODE_APPEND|BCH_INODE_IMMUTABLE)) &&
+ if (((newflags ^ oldflags) & (BCH_INODE_append|BCH_INODE_immutable)) &&
!capable(CAP_LINUX_IMMUTABLE))
return -EPERM;
if (!S_ISREG(bi->bi_mode) &&
!S_ISDIR(bi->bi_mode) &&
- (newflags & (BCH_INODE_NODUMP|BCH_INODE_NOATIME)) != newflags)
+ (newflags & (BCH_INODE_nodump|BCH_INODE_noatime)) != newflags)
return -EINVAL;
if (s->set_projinherit) {
diff --git a/fs/bcachefs/fs-ioctl.h b/fs/bcachefs/fs-ioctl.h
index 54a9c21a3b83..d30f9bb056fd 100644
--- a/fs/bcachefs/fs-ioctl.h
+++ b/fs/bcachefs/fs-ioctl.h
@@ -6,28 +6,28 @@
/* bcachefs inode flags -> vfs inode flags: */
static const __maybe_unused unsigned bch_flags_to_vfs[] = {
- [__BCH_INODE_SYNC] = S_SYNC,
- [__BCH_INODE_IMMUTABLE] = S_IMMUTABLE,
- [__BCH_INODE_APPEND] = S_APPEND,
- [__BCH_INODE_NOATIME] = S_NOATIME,
+ [__BCH_INODE_sync] = S_SYNC,
+ [__BCH_INODE_immutable] = S_IMMUTABLE,
+ [__BCH_INODE_append] = S_APPEND,
+ [__BCH_INODE_noatime] = S_NOATIME,
};
/* bcachefs inode flags -> FS_IOC_GETFLAGS: */
static const __maybe_unused unsigned bch_flags_to_uflags[] = {
- [__BCH_INODE_SYNC] = FS_SYNC_FL,
- [__BCH_INODE_IMMUTABLE] = FS_IMMUTABLE_FL,
- [__BCH_INODE_APPEND] = FS_APPEND_FL,
- [__BCH_INODE_NODUMP] = FS_NODUMP_FL,
- [__BCH_INODE_NOATIME] = FS_NOATIME_FL,
+ [__BCH_INODE_sync] = FS_SYNC_FL,
+ [__BCH_INODE_immutable] = FS_IMMUTABLE_FL,
+ [__BCH_INODE_append] = FS_APPEND_FL,
+ [__BCH_INODE_nodump] = FS_NODUMP_FL,
+ [__BCH_INODE_noatime] = FS_NOATIME_FL,
};
/* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
static const __maybe_unused unsigned bch_flags_to_xflags[] = {
- [__BCH_INODE_SYNC] = FS_XFLAG_SYNC,
- [__BCH_INODE_IMMUTABLE] = FS_XFLAG_IMMUTABLE,
- [__BCH_INODE_APPEND] = FS_XFLAG_APPEND,
- [__BCH_INODE_NODUMP] = FS_XFLAG_NODUMP,
- [__BCH_INODE_NOATIME] = FS_XFLAG_NOATIME,
+ [__BCH_INODE_sync] = FS_XFLAG_SYNC,
+ [__BCH_INODE_immutable] = FS_XFLAG_IMMUTABLE,
+ [__BCH_INODE_append] = FS_XFLAG_APPEND,
+ [__BCH_INODE_nodump] = FS_XFLAG_NODUMP,
+ [__BCH_INODE_noatime] = FS_XFLAG_NOATIME,
//[__BCH_INODE_PROJINHERIT] = FS_XFLAG_PROJINHERIT;
};
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index a2a5133fb6b5..166d8d8abe68 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -764,15 +764,15 @@ static int bch2_getattr(struct mnt_idmap *idmap,
stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
}
- if (inode->ei_inode.bi_flags & BCH_INODE_IMMUTABLE)
+ if (inode->ei_inode.bi_flags & BCH_INODE_immutable)
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= STATX_ATTR_IMMUTABLE;
- if (inode->ei_inode.bi_flags & BCH_INODE_APPEND)
+ if (inode->ei_inode.bi_flags & BCH_INODE_append)
stat->attributes |= STATX_ATTR_APPEND;
stat->attributes_mask |= STATX_ATTR_APPEND;
- if (inode->ei_inode.bi_flags & BCH_INODE_NODUMP)
+ if (inode->ei_inode.bi_flags & BCH_INODE_nodump)
stat->attributes |= STATX_ATTR_NODUMP;
stat->attributes_mask |= STATX_ATTR_NODUMP;
@@ -1213,9 +1213,6 @@ static struct dentry *bch2_get_parent(struct dentry *child)
.inum = inode->ei_inode.bi_dir,
};
- if (!parent_inum.inum)
- return NULL;
-
return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum));
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index b8f9e7475dc5..9f3e9bd3d767 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "bkey_buf.h"
+#include "btree_cache.h"
#include "btree_update.h"
#include "buckets.h"
#include "darray.h"
@@ -444,9 +445,10 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
if (i->equiv == n.equiv) {
bch_err(c, "snapshot deletion did not finish:\n"
" duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
- bch2_btree_ids[btree_id],
+ bch2_btree_id_str(btree_id),
pos.inode, pos.offset,
i->id, n.id, n.equiv);
+ set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
}
}
@@ -719,8 +721,9 @@ static int check_key_has_snapshot(struct btree_trans *trans,
int ret = 0;
if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
- "key in missing snapshot: %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ bkey_in_missing_snapshot,
+ "key in missing snapshot: %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
fsck_err:
@@ -789,6 +792,7 @@ static int hash_check_key(struct btree_trans *trans,
if (fsck_err_on(k.k->type == desc.key_type &&
!desc.cmp_bkey(k, hash_k), c,
+ hash_table_key_duplicate,
"duplicate hash table keys:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k),
@@ -807,8 +811,9 @@ out:
printbuf_exit(&buf);
return ret;
bad_hash:
- if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
- bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
+ if (fsck_err(c, hash_table_key_wrong_offset,
+ "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
+ bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
@@ -849,22 +854,23 @@ static int check_inode(struct btree_trans *trans,
BUG_ON(bch2_inode_unpack(k, &u));
if (!full &&
- !(u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|
- BCH_INODE_I_SECTORS_DIRTY|
- BCH_INODE_UNLINKED)))
+ !(u.bi_flags & (BCH_INODE_i_size_dirty|
+ BCH_INODE_i_sectors_dirty|
+ BCH_INODE_unlinked)))
return 0;
if (prev->bi_inum != u.bi_inum)
*prev = u;
if (fsck_err_on(prev->bi_hash_seed != u.bi_hash_seed ||
- inode_d_type(prev) != inode_d_type(&u), c,
+ inode_d_type(prev) != inode_d_type(&u),
+ c, inode_snapshot_mismatch,
"inodes in different snapshots don't match")) {
bch_err(c, "repair not implemented yet");
return -EINVAL;
}
- if ((u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED)) &&
+ if ((u.bi_flags & (BCH_INODE_i_size_dirty|BCH_INODE_unlinked)) &&
bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
struct bpos new_min_pos;
@@ -872,7 +878,7 @@ static int check_inode(struct btree_trans *trans,
if (ret)
goto err;
- u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED;
+ u.bi_flags &= ~BCH_INODE_i_size_dirty|BCH_INODE_unlinked;
ret = __write_inode(trans, &u, iter->pos.snapshot);
bch_err_msg(c, ret, "in fsck updating inode");
@@ -884,9 +890,10 @@ static int check_inode(struct btree_trans *trans,
return 0;
}
- if (u.bi_flags & BCH_INODE_UNLINKED &&
+ if (u.bi_flags & BCH_INODE_unlinked &&
(!c->sb.clean ||
- fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
+ fsck_err(c, inode_unlinked_but_clean,
+ "filesystem marked clean, but inode %llu unlinked",
u.bi_inum))) {
bch2_trans_unlock(trans);
bch2_fs_lazy_rw(c);
@@ -896,9 +903,10 @@ static int check_inode(struct btree_trans *trans,
return ret;
}
- if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
+ if (u.bi_flags & BCH_INODE_i_size_dirty &&
(!c->sb.clean ||
- fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
+ fsck_err(c, inode_i_size_dirty_but_clean,
+ "filesystem marked clean, but inode %llu has i_size dirty",
u.bi_inum))) {
bch_verbose(c, "truncating inode %llu", u.bi_inum);
@@ -922,15 +930,16 @@ static int check_inode(struct btree_trans *trans,
* We truncated without our normal sector accounting hook, just
* make sure we recalculate it:
*/
- u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
+ u.bi_flags |= BCH_INODE_i_sectors_dirty;
- u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+ u.bi_flags &= ~BCH_INODE_i_size_dirty;
do_update = true;
}
- if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
+ if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
(!c->sb.clean ||
- fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
+ fsck_err(c, inode_i_sectors_dirty_but_clean,
+ "filesystem marked clean, but inode %llu has i_sectors dirty",
u.bi_inum))) {
s64 sectors;
@@ -944,14 +953,14 @@ static int check_inode(struct btree_trans *trans,
}
u.bi_sectors = sectors;
- u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
+ u.bi_flags &= ~BCH_INODE_i_sectors_dirty;
do_update = true;
}
- if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
+ if (u.bi_flags & BCH_INODE_backptr_untrusted) {
u.bi_dir = 0;
u.bi_dir_offset = 0;
- u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
+ u.bi_flags &= ~BCH_INODE_backptr_untrusted;
do_update = true;
}
@@ -1056,10 +1065,11 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
return -BCH_ERR_internal_fsck_err;
}
- if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
- "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
- w->last_pos.inode, i->snapshot,
- i->inode.bi_sectors, i->count)) {
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
+ c, inode_i_sectors_wrong,
+ "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
+ w->last_pos.inode, i->snapshot,
+ i->inode.bi_sectors, i->count)) {
i->inode.bi_sectors = i->count;
ret = fsck_write_inode(trans, &i->inode, i->snapshot);
if (ret)
@@ -1200,7 +1210,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
prt_printf(&buf, "\n overwriting %s extent",
pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
- if (fsck_err(c, "overlapping extents%s", buf.buf)) {
+ if (fsck_err(c, extent_overlapping,
+ "overlapping extents%s", buf.buf)) {
struct btree_iter *old_iter = &iter1;
struct disk_reservation res = { 0 };
@@ -1297,6 +1308,28 @@ err:
return ret;
}
+static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_extent_crc_unpacked crc;
+ const union bch_extent_entry *i;
+ unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
+
+ bkey_for_each_crc(k.k, ptrs, crc, i)
+ if (crc_is_encoded(crc) &&
+ crc.uncompressed_size > encoded_extent_max_sectors) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ return 0;
+}
+
static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct inode_walker *inode,
@@ -1333,7 +1366,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
if (k.k->type != KEY_TYPE_whiteout) {
- if (fsck_err_on(!i, c,
+ if (fsck_err_on(!i, c, extent_in_missing_inode,
"extent in missing inode:\n %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
@@ -1341,7 +1374,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
if (fsck_err_on(i &&
!S_ISREG(i->inode.bi_mode) &&
- !S_ISLNK(i->inode.bi_mode), c,
+ !S_ISLNK(i->inode.bi_mode),
+ c, extent_in_non_reg_inode,
"extent in non regular inode mode %o:\n %s",
i->inode.bi_mode,
(printbuf_reset(&buf),
@@ -1371,9 +1405,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
continue;
if (k.k->type != KEY_TYPE_whiteout) {
- if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
- !bkey_extent_is_reservation(k), c,
+ !bkey_extent_is_reservation(k),
+ c, extent_past_end_of_inode,
"extent type past end of inode %llu:%u, i_size %llu\n %s",
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
@@ -1432,7 +1467,8 @@ int bch2_check_extents(struct bch_fs *c)
&res, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
bch2_disk_reservation_put(c, &res);
- check_extent(trans, &iter, k, &w, &s, &extent_ends);
+ check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
+ check_extent_overbig(trans, &iter, k);
})) ?:
check_i_sectors(trans, &w);
@@ -1446,6 +1482,30 @@ int bch2_check_extents(struct bch_fs *c)
return ret;
}
+int bch2_check_indirect_extents(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct disk_reservation res = { 0 };
+ int ret = 0;
+
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
+ POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ &res, NULL,
+ BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
+ bch2_disk_reservation_put(c, &res);
+ check_extent_overbig(trans, &iter, k);
+ }));
+
+ bch2_disk_reservation_put(c, &res);
+ bch2_trans_put(trans);
+
+ bch_err_fn(c, ret);
+ return ret;
+}
+
static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
{
struct bch_fs *c = trans->c;
@@ -1470,7 +1530,8 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
continue;
}
- if (fsck_err_on(i->inode.bi_nlink != i->count, c,
+ if (fsck_err_on(i->inode.bi_nlink != i->count,
+ c, inode_dir_wrong_nlink,
"directory %llu:%u with wrong i_nlink: got %u, should be %llu",
w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
i->inode.bi_nlink = i->count;
@@ -1514,27 +1575,28 @@ static int check_dirent_target(struct btree_trans *trans,
backpointer_exists = ret;
ret = 0;
- if (fsck_err_on(S_ISDIR(target->bi_mode) &&
- backpointer_exists, c,
+ if (fsck_err_on(S_ISDIR(target->bi_mode) && backpointer_exists,
+ c, inode_dir_multiple_links,
"directory %llu with multiple links",
target->bi_inum)) {
ret = __remove_dirent(trans, d.k->p);
goto out;
}
- if (fsck_err_on(backpointer_exists &&
- !target->bi_nlink, c,
+ if (fsck_err_on(backpointer_exists && !target->bi_nlink,
+ c, inode_multiple_links_but_nlink_0,
"inode %llu type %s has multiple links but i_nlink 0",
target->bi_inum, bch2_d_types[d.v->d_type])) {
target->bi_nlink++;
- target->bi_flags &= ~BCH_INODE_UNLINKED;
+ target->bi_flags &= ~BCH_INODE_unlinked;
ret = __write_inode(trans, target, target_snapshot);
if (ret)
goto err;
}
- if (fsck_err_on(!backpointer_exists, c,
+ if (fsck_err_on(!backpointer_exists,
+ c, inode_wrong_backpointer,
"inode %llu:%u has wrong backpointer:\n"
"got %llu:%llu\n"
"should be %llu:%llu",
@@ -1552,7 +1614,8 @@ static int check_dirent_target(struct btree_trans *trans,
}
}
- if (fsck_err_on(d.v->d_type != inode_d_type(target), c,
+ if (fsck_err_on(d.v->d_type != inode_d_type(target),
+ c, dirent_d_type_wrong,
"incorrect d_type: got %s, should be %s:\n%s",
bch2_d_type_str(d.v->d_type),
bch2_d_type_str(inode_d_type(target)),
@@ -1576,7 +1639,8 @@ static int check_dirent_target(struct btree_trans *trans,
if (d.v->d_type == DT_SUBVOL &&
target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) &&
(c->sb.version < bcachefs_metadata_version_subvol_dirent ||
- fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u",
+ fsck_err(c, dirent_d_parent_subvol_wrong,
+ "dirent has wrong d_parent_subvol field: got %u, should be %u",
le32_to_cpu(d.v->d_parent_subvol),
target->bi_parent_subvol))) {
n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
@@ -1648,7 +1712,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
dir->first_this_inode = false;
- if (fsck_err_on(!i, c,
+ if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
"dirent in nonexisting directory:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
@@ -1660,7 +1724,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (!i)
goto out;
- if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
+ if (fsck_err_on(!S_ISDIR(i->inode.bi_mode),
+ c, dirent_in_non_dir_inode,
"dirent in non directory inode type %s:\n%s",
bch2_d_type_str(inode_d_type(&i->inode)),
(printbuf_reset(&buf),
@@ -1694,7 +1759,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
- if (fsck_err_on(ret, c,
+ if (fsck_err_on(ret, c, dirent_to_missing_subvol,
"dirent points to missing subvolume %u",
le32_to_cpu(d.v->d_child_subvol))) {
ret = __remove_dirent(trans, d.k->p);
@@ -1706,7 +1771,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
- if (fsck_err_on(ret, c,
+ if (fsck_err_on(ret, c, subvol_to_missing_root,
"subvolume %u points to missing subvolume root %llu",
target_subvol,
target_inum)) {
@@ -1715,7 +1780,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
}
- if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
+ if (fsck_err_on(subvol_root.bi_subvol != target_subvol,
+ c, subvol_root_wrong_bi_subvol,
"subvol root %llu has wrong bi_subvol field: got %u, should be %u",
target_inum,
subvol_root.bi_subvol, target_subvol)) {
@@ -1734,7 +1800,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
goto err;
- if (fsck_err_on(!target->inodes.nr, c,
+ if (fsck_err_on(!target->inodes.nr,
+ c, dirent_to_missing_inode,
"dirent points to missing inode: (equiv %u)\n%s",
equiv.snapshot,
(printbuf_reset(&buf),
@@ -1820,7 +1887,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
inode->first_this_inode = false;
- if (fsck_err_on(!i, c,
+ if (fsck_err_on(!i, c, xattr_in_missing_inode,
"xattr for missing inode %llu",
k.k->p.inode))
return bch2_btree_delete_at(trans, iter, 0);
@@ -1869,7 +1936,8 @@ static int check_root_trans(struct btree_trans *trans)
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
- if (mustfix_fsck_err_on(ret, c, "root subvol missing")) {
+ if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
+ "root subvol missing")) {
struct bkey_i_subvolume root_subvol;
snapshot = U32_MAX;
@@ -1895,8 +1963,10 @@ static int check_root_trans(struct btree_trans *trans)
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
- if (mustfix_fsck_err_on(ret, c, "root directory missing") ||
- mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c,
+ if (mustfix_fsck_err_on(ret, c, root_dir_missing,
+ "root directory missing") ||
+ mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
+ c, root_inode_not_dir,
"root inode not a directory")) {
bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
0, NULL);
@@ -2000,7 +2070,8 @@ static int check_path(struct btree_trans *trans,
}
if (bch2_err_matches(ret, ENOENT)) {
- if (fsck_err(c, "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
+ if (fsck_err(c, inode_unreachable,
+ "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
inode->bi_inum, snapshot,
bch2_d_type_str(inode_d_type(inode)),
inode->bi_nlink,
@@ -2040,7 +2111,8 @@ static int check_path(struct btree_trans *trans,
pr_err("%llu:%u", i->inum, i->snapshot);
pr_err("%llu:%u", inode->bi_inum, snapshot);
- if (!fsck_err(c, "directory structure loop"))
+ if (!fsck_err(c, dir_loop,
+ "directory structure loop"))
return 0;
ret = commit_do(trans, NULL, NULL,
@@ -2088,7 +2160,7 @@ int bch2_check_directory_structure(struct bch_fs *c)
break;
}
- if (u.bi_flags & BCH_INODE_UNLINKED)
+ if (u.bi_flags & BCH_INODE_unlinked)
continue;
ret = check_path(trans, &path, &u, iter.pos.snapshot);
@@ -2300,7 +2372,8 @@ static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_ite
link = &links->d[++*idx];
}
- if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
+ if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
+ c, inode_wrong_nlink,
"inode %llu type %s has wrong i_nlink (%u, should be %u)",
u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
bch2_inode_nlink_get(&u), link->count)) {
diff --git a/fs/bcachefs/fsck.h b/fs/bcachefs/fsck.h
index 90c87b5089a0..da991e8cf27e 100644
--- a/fs/bcachefs/fsck.h
+++ b/fs/bcachefs/fsck.h
@@ -4,6 +4,7 @@
int bch2_check_inodes(struct bch_fs *);
int bch2_check_extents(struct bch_fs *);
+int bch2_check_indirect_extents(struct bch_fs *);
int bch2_check_dirents(struct bch_fs *);
int bch2_check_xattrs(struct bch_fs *);
int bch2_check_root(struct bch_fs *);
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index bb3f443d8381..def77f2d8802 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -6,6 +6,7 @@
#include "bkey_methods.h"
#include "btree_update.h"
#include "buckets.h"
+#include "compress.h"
#include "error.h"
#include "extents.h"
#include "extent_update.h"
@@ -19,13 +20,18 @@
#include <asm/unaligned.h>
-const char * const bch2_inode_opts[] = {
#define x(name, ...) #name,
+const char * const bch2_inode_opts[] = {
BCH_INODE_OPTS()
-#undef x
NULL,
};
+static const char * const bch2_inode_flag_strs[] = {
+ BCH_INODE_FLAGS()
+ NULL
+};
+#undef x
+
static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 };
static int inode_decode_field(const u8 *in, const u8 *end,
@@ -361,9 +367,10 @@ int bch2_inode_peek(struct btree_trans *trans,
return ret;
}
-int bch2_inode_write(struct btree_trans *trans,
+int bch2_inode_write_flags(struct btree_trans *trans,
struct btree_iter *iter,
- struct bch_inode_unpacked *inode)
+ struct bch_inode_unpacked *inode,
+ enum btree_update_flags flags)
{
struct bkey_inode_buf *inode_p;
@@ -373,7 +380,7 @@ int bch2_inode_write(struct btree_trans *trans,
bch2_inode_pack_inlined(inode_p, inode);
inode_p->inode.k.p.snapshot = iter->snapshot;
- return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
+ return bch2_trans_update(trans, iter, &inode_p->inode.k_i, flags);
}
struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
@@ -397,117 +404,121 @@ struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
return &inode_p->inode.k_i;
}
-static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)
+static int __bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k, struct printbuf *err)
{
struct bch_inode_unpacked unpacked;
+ int ret = 0;
- if (k.k->p.inode) {
- prt_printf(err, "nonzero k.p.inode");
- return -BCH_ERR_invalid_bkey;
- }
-
- if (k.k->p.offset < BLOCKDEV_INODE_MAX) {
- prt_printf(err, "fs inode in blockdev range");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(k.k->p.inode, c, err,
+ inode_pos_inode_nonzero,
+ "nonzero k.p.inode");
- if (bch2_inode_unpack(k, &unpacked)) {
- prt_printf(err, "invalid variable length fields");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX, c, err,
+ inode_pos_blockdev_range,
+ "fs inode in blockdev range");
- if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1) {
- prt_printf(err, "invalid data checksum type (%u >= %u",
- unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked), c, err,
+ inode_unpack_error,
+ "invalid variable length fields");
- if (unpacked.bi_compression >= BCH_COMPRESSION_OPT_NR + 1) {
- prt_printf(err, "invalid data checksum type (%u >= %u)",
- unpacked.bi_compression, BCH_COMPRESSION_OPT_NR + 1);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1, c, err,
+ inode_checksum_type_invalid,
+ "invalid data checksum type (%u >= %u",
+ unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
- if ((unpacked.bi_flags & BCH_INODE_UNLINKED) &&
- unpacked.bi_nlink != 0) {
- prt_printf(err, "flagged as unlinked but bi_nlink != 0");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(unpacked.bi_compression &&
+ !bch2_compression_opt_valid(unpacked.bi_compression - 1), c, err,
+ inode_compression_type_invalid,
+ "invalid compression opt %u", unpacked.bi_compression - 1);
- if (unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode)) {
- prt_printf(err, "subvolume root but not a directory");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) &&
+ unpacked.bi_nlink != 0, c, err,
+ inode_unlinked_but_nlink_nonzero,
+ "flagged as unlinked but bi_nlink != 0");
- return 0;
+ bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode), c, err,
+ inode_subvol_root_but_not_dir,
+ "subvolume root but not a directory");
+fsck_err:
+ return ret;
}
-int bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
+ int ret = 0;
- if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
- prt_printf(err, "invalid str hash type (%llu >= %u)",
- INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+ inode_str_hash_invalid,
+ "invalid str hash type (%llu >= %u)",
+ INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return __bch2_inode_invalid(k, err);
+ ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+ return ret;
}
-int bch2_inode_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
+ int ret = 0;
- if (INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
- prt_printf(err, "invalid str hash type (%llu >= %u)",
- INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+ inode_str_hash_invalid,
+ "invalid str hash type (%llu >= %u)",
+ INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return __bch2_inode_invalid(k, err);
+ ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+ return ret;
}
-int bch2_inode_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
+ int ret = 0;
- if (INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
- INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k)) {
- prt_printf(err, "invalid fields_start (got %llu, min %u max %zu)",
- INODEv3_FIELDS_START(inode.v),
- INODEv3_FIELDS_START_INITIAL,
- bkey_val_u64s(inode.k));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
+ INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k), c, err,
+ inode_v3_fields_start_bad,
+ "invalid fields_start (got %llu, min %u max %zu)",
+ INODEv3_FIELDS_START(inode.v),
+ INODEv3_FIELDS_START_INITIAL,
+ bkey_val_u64s(inode.k));
- if (INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
- prt_printf(err, "invalid str hash type (%llu >= %u)",
- INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+ inode_str_hash_invalid,
+ "invalid str hash type (%llu >= %u)",
+ INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
- return __bch2_inode_invalid(k, err);
+ ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+ return ret;
}
static void __bch2_inode_unpacked_to_text(struct printbuf *out,
struct bch_inode_unpacked *inode)
{
- prt_printf(out, "mode %o flags %x journal_seq %llu bi_size %llu bi_sectors %llu bi_version %llu",
- inode->bi_mode, inode->bi_flags,
+ prt_printf(out, "mode=%o ", inode->bi_mode);
+
+ prt_str(out, "flags=");
+ prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
+ prt_printf(out, " (%x)", inode->bi_flags);
+
+ prt_printf(out, " journal_seq=%llu bi_size=%llu bi_sectors=%llu bi_version=%llu",
inode->bi_journal_seq,
inode->bi_size,
inode->bi_sectors,
inode->bi_version);
#define x(_name, _bits) \
- prt_printf(out, " "#_name " %llu", (u64) inode->_name);
+ prt_printf(out, " "#_name "=%llu", (u64) inode->_name);
BCH_INODE_FIELDS_v3()
#undef x
}
@@ -546,7 +557,7 @@ static inline u64 bkey_inode_flags(struct bkey_s_c k)
static inline bool bkey_is_deleted_inode(struct bkey_s_c k)
{
- return bkey_inode_flags(k) & BCH_INODE_UNLINKED;
+ return bkey_inode_flags(k) & BCH_INODE_unlinked;
}
int bch2_trans_mark_inode(struct btree_trans *trans,
@@ -610,16 +621,17 @@ int bch2_mark_inode(struct btree_trans *trans,
return 0;
}
-int bch2_inode_generation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_generation_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (k.k->p.inode) {
- prt_printf(err, "nonzero k.p.inode");
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(k.k->p.inode, c, err,
+ inode_pos_inode_nonzero,
+ "nonzero k.p.inode");
+fsck_err:
+ return ret;
}
void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
@@ -926,8 +938,8 @@ int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum,
int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
{
- if (bi->bi_flags & BCH_INODE_UNLINKED)
- bi->bi_flags &= ~BCH_INODE_UNLINKED;
+ if (bi->bi_flags & BCH_INODE_unlinked)
+ bi->bi_flags &= ~BCH_INODE_unlinked;
else {
if (bi->bi_nlink == U32_MAX)
return -EINVAL;
@@ -940,13 +952,13 @@ int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi)
{
- if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_UNLINKED)) {
+ if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_unlinked)) {
bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero",
bi->bi_inum);
return;
}
- if (bi->bi_flags & BCH_INODE_UNLINKED) {
+ if (bi->bi_flags & BCH_INODE_unlinked) {
bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum);
return;
}
@@ -954,7 +966,7 @@ void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *
if (bi->bi_nlink)
bi->bi_nlink--;
else
- bi->bi_flags |= BCH_INODE_UNLINKED;
+ bi->bi_flags |= BCH_INODE_unlinked;
}
struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
@@ -979,6 +991,18 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
}
+int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts)
+{
+ struct bch_inode_unpacked inode;
+ int ret = lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, &inode));
+
+ if (ret)
+ return ret;
+
+ bch2_inode_opts_get(opts, trans->c, &inode);
+ return 0;
+}
+
int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
{
struct bch_fs *c = trans->c;
@@ -1042,53 +1066,85 @@ err:
return ret ?: -BCH_ERR_transaction_restart_nested;
}
-static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos)
+static int may_delete_deleted_inode(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos pos,
+ bool *need_another_pass)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
+ struct btree_iter inode_iter;
struct bkey_s_c k;
struct bch_inode_unpacked inode;
int ret;
- if (bch2_snapshot_is_internal_node(c, pos.snapshot))
- return 0;
-
- if (!fsck_err_on(c->sb.clean, c,
- "filesystem marked as clean but have deleted inode %llu:%u",
- pos.offset, pos.snapshot))
- return 0;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
+ k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
ret = bkey_err(k);
if (ret)
return ret;
ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
if (fsck_err_on(!bkey_is_inode(k.k), c,
+ deleted_inode_missing,
"nonexistent inode %llu:%u in deleted_inodes btree",
pos.offset, pos.snapshot))
goto delete;
ret = bch2_inode_unpack(k, &inode);
if (ret)
- goto err;
+ goto out;
if (fsck_err_on(S_ISDIR(inode.bi_mode), c,
+ deleted_inode_is_dir,
"directory %llu:%u in deleted_inodes btree",
pos.offset, pos.snapshot))
goto delete;
- if (fsck_err_on(!(inode.bi_flags & BCH_INODE_UNLINKED), c,
+ if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked), c,
+ deleted_inode_not_unlinked,
"non-deleted inode %llu:%u in deleted_inodes btree",
pos.offset, pos.snapshot))
goto delete;
- return 1;
-err:
+ if (c->sb.clean &&
+ !fsck_err(c,
+ deleted_inode_but_clean,
+ "filesystem marked as clean but have deleted inode %llu:%u",
+ pos.offset, pos.snapshot)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (bch2_snapshot_is_internal_node(c, pos.snapshot)) {
+ struct bpos new_min_pos;
+
+ ret = bch2_propagate_key_to_snapshot_leaves(trans, inode_iter.btree_id, k, &new_min_pos);
+ if (ret)
+ goto out;
+
+ inode.bi_flags &= ~BCH_INODE_unlinked;
+
+ ret = bch2_inode_write_flags(trans, &inode_iter, &inode,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch_err_msg(c, ret, "clearing inode unlinked flag");
+ if (ret)
+ goto out;
+
+ /*
+ * We'll need another write buffer flush to pick up the new
+ * unlinked inodes in the snapshot leaves:
+ */
+ *need_another_pass = true;
+ return 0;
+ }
+
+ ret = 1;
+out:
fsck_err:
+ bch2_trans_iter_exit(trans, &inode_iter);
return ret;
delete:
- return bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, pos, false);
+ ret = bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, pos, false);
+ goto out;
}
int bch2_delete_dead_inodes(struct bch_fs *c)
@@ -1096,7 +1152,10 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
+ bool need_another_pass;
int ret;
+again:
+ need_another_pass = false;
ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
@@ -1110,7 +1169,8 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
*/
for_each_btree_key(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
- ret = lockrestart_do(trans, may_delete_deleted_inode(trans, k.k->p));
+ ret = lockrestart_do(trans, may_delete_deleted_inode(trans, &iter, k.k->p,
+ &need_another_pass));
if (ret < 0)
break;
@@ -1120,12 +1180,17 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
bch2_fs_lazy_rw(c);
}
+ bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot);
+
ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
}
bch2_trans_iter_exit(trans, &iter);
+
+ if (!ret && need_another_pass)
+ goto again;
err:
bch2_trans_put(trans);
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index a7464e1b6960..88818a332b1e 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -3,16 +3,17 @@
#define _BCACHEFS_INODE_H
#include "bkey.h"
+#include "bkey_methods.h"
#include "opts.h"
enum bkey_invalid_flags;
extern const char * const bch2_inode_opts[];
-int bch2_inode_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_v2_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
-int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_v3_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -52,7 +53,7 @@ static inline bool bkey_is_inode(const struct bkey *k)
k->type == KEY_TYPE_inode_v3;
}
-int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_generation_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -101,8 +102,16 @@ void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *)
int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
struct bch_inode_unpacked *, subvol_inum, unsigned);
-int bch2_inode_write(struct btree_trans *, struct btree_iter *,
- struct bch_inode_unpacked *);
+
+int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
+ struct bch_inode_unpacked *, enum btree_update_flags);
+
+static inline int bch2_inode_write(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode)
+{
+ return bch2_inode_write_flags(trans, iter, inode, 0);
+}
void bch2_inode_init_early(struct bch_fs *,
struct bch_inode_unpacked *);
@@ -177,7 +186,7 @@ static inline unsigned nlink_bias(umode_t mode)
static inline unsigned bch2_inode_nlink_get(struct bch_inode_unpacked *bi)
{
- return bi->bi_flags & BCH_INODE_UNLINKED
+ return bi->bi_flags & BCH_INODE_unlinked
? 0
: bi->bi_nlink + nlink_bias(bi->bi_mode);
}
@@ -187,10 +196,10 @@ static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
{
if (nlink) {
bi->bi_nlink = nlink - nlink_bias(bi->bi_mode);
- bi->bi_flags &= ~BCH_INODE_UNLINKED;
+ bi->bi_flags &= ~BCH_INODE_unlinked;
} else {
bi->bi_nlink = 0;
- bi->bi_flags |= BCH_INODE_UNLINKED;
+ bi->bi_flags |= BCH_INODE_unlinked;
}
}
@@ -200,6 +209,7 @@ void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
struct bch_inode_unpacked *);
+int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *);
int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
int bch2_delete_dead_inodes(struct bch_fs *);
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 119834cb8f9e..bebc11444ef5 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -16,13 +16,14 @@
#include "io_misc.h"
#include "io_write.h"
#include "logged_ops.h"
+#include "rebalance.h"
#include "subvolume.h"
/* Overwrites whatever was present with zeroes: */
int bch2_extent_fallocate(struct btree_trans *trans,
subvol_inum inum,
struct btree_iter *iter,
- unsigned sectors,
+ u64 sectors,
struct bch_io_opts opts,
s64 *i_sectors_delta,
struct write_point_specifier write_point)
@@ -104,7 +105,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
if (ret)
goto err;
- sectors = min(sectors, wp->sectors_free);
+ sectors = min_t(u64, sectors, wp->sectors_free);
sectors_allocated = sectors;
bch2_key_resize(&e->k, sectors);
@@ -355,6 +356,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
struct btree_iter iter;
struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
+ struct bch_io_opts opts;
u64 dst_offset = le64_to_cpu(op->v.dst_offset);
u64 src_offset = le64_to_cpu(op->v.src_offset);
s64 shift = dst_offset - src_offset;
@@ -363,6 +365,10 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
bool insert = shift > 0;
int ret = 0;
+ ret = bch2_inum_opts_get(trans, inum, &opts);
+ if (ret)
+ return ret;
+
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
POS(inum.inum, 0),
BTREE_ITER_INTENT);
@@ -443,7 +449,10 @@ case LOGGED_OP_FINSERT_shift_extents:
op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
- ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
+ ret = bch2_bkey_set_needs_rebalance(c, copy,
+ opts.background_target,
+ opts.background_compression) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
bch2_logged_op_update(trans, &op->k_i) ?:
bch2_trans_commit(trans, &disk_res, NULL, BTREE_INSERT_NOFAIL);
diff --git a/fs/bcachefs/io_misc.h b/fs/bcachefs/io_misc.h
index c9e6ed40e1b8..9cb44a7c43c1 100644
--- a/fs/bcachefs/io_misc.h
+++ b/fs/bcachefs/io_misc.h
@@ -3,7 +3,7 @@
#define _BCACHEFS_IO_MISC_H
int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
- unsigned, struct bch_io_opts, s64 *,
+ u64, struct bch_io_opts, s64 *,
struct write_point_specifier);
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
subvol_inum, u64, s64 *);
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 443c3ea65527..a56ed553dc15 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -643,7 +643,7 @@ csum_err:
"data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
- bch2_io_error(ca);
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
decompression_err:
@@ -677,7 +677,7 @@ static void bch2_read_endio(struct bio *bio)
if (!rbio->split)
rbio->bio.bi_end_io = rbio->end_io;
- if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+ if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
rbio->read_pos.inode,
rbio->read_pos.offset,
"data read error: %s",
@@ -1025,7 +1025,7 @@ get_bio:
trans->notrace_relock_fail = true;
} else {
/* Attempting reconstruct read: */
- if (bch2_ec_read_extent(c, rbio)) {
+ if (bch2_ec_read_extent(trans, rbio)) {
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
}
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 6e4f85eb6ec8..f02b3f7d26a0 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -202,6 +202,17 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
struct btree_iter iter;
struct bkey_i *k;
struct bkey_i_inode_v3 *inode;
+ /*
+ * Crazy performance optimization:
+ * Every extent update needs to also update the inode: the inode trigger
+ * will set bi->journal_seq to the journal sequence number of this
+ * transaction - for fsync.
+ *
+ * But if that's the only reason we're updating the inode (we're not
+ * updating bi_size or bi_sectors), then we don't need the inode update
+ * to be journalled - if we crash, the bi_journal_seq update will be
+ * lost, but that's fine.
+ */
unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
int ret;
@@ -223,7 +234,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
inode = bkey_i_to_inode_v3(k);
- if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+ if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
new_i_size > le64_to_cpu(inode->v.bi_size)) {
inode->v.bi_size = cpu_to_le64(new_i_size);
inode_update_flags = 0;
@@ -351,10 +362,13 @@ static int bch2_write_index_default(struct bch_write_op *op)
bkey_start_pos(&sk.k->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- ret = bch2_extent_update(trans, inum, &iter, sk.k,
- &op->res,
- op->new_i_size, &op->i_sectors_delta,
- op->flags & BCH_WRITE_CHECK_ENOSPC);
+ ret = bch2_bkey_set_needs_rebalance(c, sk.k,
+ op->opts.background_target,
+ op->opts.background_compression) ?:
+ bch2_extent_update(trans, inum, &iter, sk.k,
+ &op->res,
+ op->new_i_size, &op->i_sectors_delta,
+ op->flags & BCH_WRITE_CHECK_ENOSPC);
bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -495,7 +509,6 @@ static void __bch2_write_index(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct keylist *keys = &op->insert_keys;
- struct bkey_i *k;
unsigned dev;
int ret = 0;
@@ -505,14 +518,6 @@ static void __bch2_write_index(struct bch_write_op *op)
goto err;
}
- /*
- * probably not the ideal place to hook this in, but I don't
- * particularly want to plumb io_opts all the way through the btree
- * update stack right now
- */
- for_each_keylist_key(keys, k)
- bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
-
if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
@@ -643,7 +648,7 @@ static void bch2_write_endio(struct bio *bio)
struct bch_fs *c = wbio->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
- if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+ if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
op->pos.inode,
wbio->inode_offset << 9,
"data write error: %s",
@@ -816,6 +821,7 @@ static enum prep_encoded_ret {
/* Can we just write the entire extent as is? */
if (op->crc.uncompressed_size == op->crc.live_size &&
+ op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
op->crc.compressed_size <= wp->sectors_free &&
(op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
op->incompressible)) {
@@ -1091,9 +1097,7 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
e = bkey_s_c_to_extent(k);
extent_for_each_ptr_decode(e, p, entry) {
- if (p.crc.csum_type ||
- crc_is_compressed(p.crc) ||
- p.has_ec)
+ if (crc_is_encoded(p.crc) || p.has_ec)
return false;
replicas += bch2_extent_ptr_durability(c, &p);
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 0e7a9ffa3671..5b5d69f2316b 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1019,6 +1019,25 @@ err:
return ret;
}
+int bch2_fs_journal_alloc(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_online_member(ca, c, i) {
+ if (ca->journal.nr)
+ continue;
+
+ int ret = bch2_dev_journal_alloc(ca);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* startup/shutdown: */
static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 491133cc52f3..011711e99c8d 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -534,6 +534,7 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);
int bch2_dev_journal_alloc(struct bch_dev *);
+int bch2_fs_journal_alloc(struct bch_fs *);
void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 6a3d6a374e9c..f4bc2cdbfdd7 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -140,7 +140,8 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
if (!dup->csum_good)
goto replace;
- fsck_err(c, "found duplicate but non identical journal entries (seq %llu)",
+ fsck_err(c, journal_entry_replicas_data_mismatch,
+ "found duplicate but non identical journal entries (seq %llu)",
le64_to_cpu(j->seq));
i = dup;
goto found;
@@ -235,7 +236,7 @@ static void journal_entry_err_msg(struct printbuf *out,
prt_str(out, ": ");
}
-#define journal_entry_err(c, version, jset, entry, msg, ...) \
+#define journal_entry_err(c, version, jset, entry, _err, msg, ...) \
({ \
struct printbuf _buf = PRINTBUF; \
\
@@ -244,9 +245,10 @@ static void journal_entry_err_msg(struct printbuf *out,
\
switch (flags & BKEY_INVALID_WRITE) { \
case READ: \
- mustfix_fsck_err(c, "%s", _buf.buf); \
+ mustfix_fsck_err(c, _err, "%s", _buf.buf); \
break; \
case WRITE: \
+ bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
if (bch2_fs_inconsistent(c)) { \
ret = -BCH_ERR_fsck_errors_not_fixed; \
@@ -259,8 +261,8 @@ static void journal_entry_err_msg(struct printbuf *out,
true; \
})
-#define journal_entry_err_on(cond, c, version, jset, entry, msg, ...) \
- ((cond) ? journal_entry_err(c, version, jset, entry, msg, ##__VA_ARGS__) : false)
+#define journal_entry_err_on(cond, ...) \
+ ((cond) ? journal_entry_err(__VA_ARGS__) : false)
#define FSCK_DELETED_KEY 5
@@ -277,7 +279,10 @@ static int journal_validate_key(struct bch_fs *c,
struct printbuf buf = PRINTBUF;
int ret = 0;
- if (journal_entry_err_on(!k->k.u64s, c, version, jset, entry, "k->u64s 0")) {
+ if (journal_entry_err_on(!k->k.u64s,
+ c, version, jset, entry,
+ journal_entry_bkey_u64s_0,
+ "k->u64s 0")) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
return FSCK_DELETED_KEY;
@@ -286,6 +291,7 @@ static int journal_validate_key(struct bch_fs *c,
if (journal_entry_err_on((void *) bkey_next(k) >
(void *) vstruct_next(entry),
c, version, jset, entry,
+ journal_entry_bkey_past_end,
"extends past end of journal entry")) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
@@ -294,6 +300,7 @@ static int journal_validate_key(struct bch_fs *c,
if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
c, version, jset, entry,
+ journal_entry_bkey_bad_format,
"bad format %u", k->k.format)) {
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
@@ -317,7 +324,8 @@ static int journal_validate_key(struct bch_fs *c,
bch2_bkey_invalid(c, bkey_i_to_s_c(k),
__btree_node_type(level, btree_id), write, &buf);
- mustfix_fsck_err(c, "%s", buf.buf);
+ mustfix_fsck_err(c, journal_entry_bkey_invalid,
+ "%s", buf.buf);
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
@@ -369,7 +377,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
prt_newline(out);
prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
}
- prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
+ prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
first = false;
}
@@ -387,6 +395,7 @@ static int journal_entry_btree_root_validate(struct bch_fs *c,
if (journal_entry_err_on(!entry->u64s ||
le16_to_cpu(entry->u64s) != k->k.u64s,
c, version, jset, entry,
+ journal_entry_btree_root_bad_size,
"invalid btree root journal entry: wrong number of keys")) {
void *next = vstruct_next(entry);
/*
@@ -436,6 +445,7 @@ static int journal_entry_blacklist_validate(struct bch_fs *c,
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
c, version, jset, entry,
+ journal_entry_blacklist_bad_size,
"invalid journal seq blacklist entry: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
}
@@ -463,6 +473,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
c, version, jset, entry,
+ journal_entry_blacklist_v2_bad_size,
"invalid journal seq blacklist entry: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
goto out;
@@ -473,6 +484,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
le64_to_cpu(bl_entry->end),
c, version, jset, entry,
+ journal_entry_blacklist_v2_start_past_end,
"invalid journal seq blacklist entry: start > end")) {
journal_entry_null_range(entry, vstruct_next(entry));
}
@@ -505,6 +517,7 @@ static int journal_entry_usage_validate(struct bch_fs *c,
if (journal_entry_err_on(bytes < sizeof(*u),
c, version, jset, entry,
+ journal_entry_usage_bad_size,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
@@ -539,6 +552,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
if (journal_entry_err_on(bytes < sizeof(*u) ||
bytes < sizeof(*u) + u->r.nr_devs,
c, version, jset, entry,
+ journal_entry_data_usage_bad_size,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
@@ -570,13 +584,17 @@ static int journal_entry_clock_validate(struct bch_fs *c,
int ret = 0;
if (journal_entry_err_on(bytes != sizeof(*clock),
- c, version, jset, entry, "bad size")) {
+ c, version, jset, entry,
+ journal_entry_clock_bad_size,
+ "bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
if (journal_entry_err_on(clock->rw > 1,
- c, version, jset, entry, "bad rw")) {
+ c, version, jset, entry,
+ journal_entry_clock_bad_rw,
+ "bad rw")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
@@ -608,7 +626,9 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
int ret = 0;
if (journal_entry_err_on(bytes < expected,
- c, version, jset, entry, "bad size (%u < %u)",
+ c, version, jset, entry,
+ journal_entry_dev_usage_bad_size,
+ "bad size (%u < %u)",
bytes, expected)) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
@@ -617,13 +637,17 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
dev = le32_to_cpu(u->dev);
if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
- c, version, jset, entry, "bad dev")) {
+ c, version, jset, entry,
+ journal_entry_dev_usage_bad_dev,
+ "bad dev")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
if (journal_entry_err_on(u->pad,
- c, version, jset, entry, "bad pad")) {
+ c, version, jset, entry,
+ journal_entry_dev_usage_bad_pad,
+ "bad pad")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
@@ -738,7 +762,8 @@ static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
vstruct_for_each(jset, entry) {
if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
- c, version, jset, entry,
+ c, version, jset, entry,
+ journal_entry_past_jset_end,
"journal entry extends past end of jset")) {
jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
break;
@@ -767,6 +792,7 @@ static int jset_validate(struct bch_fs *c,
version = le32_to_cpu(jset->version);
if (journal_entry_err_on(!bch2_version_compatible(version),
c, version, jset, NULL,
+ jset_unsupported_version,
"%s sector %llu seq %llu: incompatible journal entry version %u.%u",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
@@ -777,7 +803,8 @@ static int jset_validate(struct bch_fs *c,
}
if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
- c, version, jset, NULL,
+ c, version, jset, NULL,
+ jset_unknown_csum,
"%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
@@ -788,6 +815,7 @@ static int jset_validate(struct bch_fs *c,
if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
c, version, jset, NULL,
+ jset_last_seq_newer_than_seq,
"invalid journal entry: last_seq > seq (%llu > %llu)",
le64_to_cpu(jset->last_seq),
le64_to_cpu(jset->seq))) {
@@ -816,7 +844,8 @@ static int jset_validate_early(struct bch_fs *c,
version = le32_to_cpu(jset->version);
if (journal_entry_err_on(!bch2_version_compatible(version),
- c, version, jset, NULL,
+ c, version, jset, NULL,
+ jset_unsupported_version,
"%s sector %llu seq %llu: unknown journal entry version %u.%u",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
@@ -831,7 +860,8 @@ static int jset_validate_early(struct bch_fs *c,
return JOURNAL_ENTRY_REREAD;
if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
- c, version, jset, NULL,
+ c, version, jset, NULL,
+ jset_past_bucket_end,
"%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq), bytes))
@@ -900,7 +930,7 @@ reread:
ret = submit_bio_wait(bio);
kfree(bio);
- if (bch2_dev_io_err_on(ret, ca,
+ if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
"journal read error: sector %llu",
offset) ||
bch2_meta_read_fault("journal")) {
@@ -956,7 +986,8 @@ reread:
ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
csum_good = jset_csum_good(c, j);
- if (!csum_good)
+ if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
+ "journal checksum error"))
saw_bad = true;
ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
@@ -1172,6 +1203,7 @@ int bch2_journal_read(struct bch_fs *c,
if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
c, le32_to_cpu(i->j.version), &i->j, NULL,
+ jset_last_seq_newer_than_seq,
"invalid journal entry: last_seq > seq (%llu > %llu)",
le64_to_cpu(i->j.last_seq),
le64_to_cpu(i->j.seq)))
@@ -1188,7 +1220,8 @@ int bch2_journal_read(struct bch_fs *c,
}
if (!*last_seq) {
- fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
+ fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
+ "journal read done, but no entries found after dropping non-flushes");
return 0;
}
@@ -1214,6 +1247,7 @@ int bch2_journal_read(struct bch_fs *c,
if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
+ jset_seq_blacklisted,
"found blacklisted journal entry %llu", seq);
i->ignore = true;
}
@@ -1254,7 +1288,8 @@ int bch2_journal_read(struct bch_fs *c,
bch2_journal_ptrs_to_text(&buf2, c, i);
missing_end = seq - 1;
- fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
+ fsck_err(c, journal_entries_missing,
+ "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
" prev at %s\n"
" next at %s",
missing_start, missing_end,
@@ -1309,7 +1344,8 @@ int bch2_journal_read(struct bch_fs *c,
if (!degraded &&
!bch2_replicas_marked(c, &replicas.e) &&
(le64_to_cpu(i->j.seq) == *last_seq ||
- fsck_err(c, "superblock not marked as containing replicas for journal entry %llu\n %s",
+ fsck_err(c, journal_entry_replicas_not_marked,
+ "superblock not marked as containing replicas for journal entry %llu\n %s",
le64_to_cpu(i->j.seq), buf.buf))) {
ret = bch2_mark_replicas(c, &replicas.e);
if (ret)
@@ -1581,7 +1617,8 @@ static void journal_write_endio(struct bio *bio)
struct journal_buf *w = journal_last_unwritten_buf(j);
unsigned long flags;
- if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
+ if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+ "error writing journal entry %llu: %s",
le64_to_cpu(w->data->seq),
bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("journal")) {
@@ -1641,9 +1678,15 @@ static void do_journal_write(struct closure *cl)
continue_at(cl, journal_write_done, c->io_complete_wq);
}
-static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
+static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
{
- struct jset_entry *i, *next, *prev = NULL;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct jset_entry *start, *end, *i, *next, *prev = NULL;
+ struct jset *jset = w->data;
+ unsigned sectors, bytes, u64s;
+ bool validate_before_checksum = false;
+ unsigned long btree_roots_have = 0;
+ int ret;
/*
* Simple compaction, dropping empty jset_entries (from journal
@@ -1660,8 +1703,20 @@ static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset
if (!u64s)
continue;
- if (i->type == BCH_JSET_ENTRY_btree_root)
+ /*
+ * New btree roots are set by journalling them; when the journal
+ * entry gets written we have to propagate them to
+ * c->btree_roots
+ *
+ * But, every journal entry we write has to contain all the
+ * btree roots (at least for now); so after we copy btree roots
+ * to c->btree_roots we have to get any missing btree roots and
+ * add them to this journal entry:
+ */
+ if (i->type == BCH_JSET_ENTRY_btree_root) {
bch2_journal_entry_to_btree_root(c, i);
+ __set_bit(i->btree_id, &btree_roots_have);
+ }
/* Can we merge with previous entry? */
if (prev &&
@@ -1685,85 +1740,10 @@ static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset
prev = prev ? vstruct_next(prev) : jset->start;
jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
-}
-
-void bch2_journal_write(struct closure *cl)
-{
- struct journal *j = container_of(cl, struct journal, io);
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_dev *ca;
- struct journal_buf *w = journal_last_unwritten_buf(j);
- struct bch_replicas_padded replicas;
- struct jset_entry *start, *end;
- struct jset *jset;
- struct bio *bio;
- struct printbuf journal_debug_buf = PRINTBUF;
- bool validate_before_checksum = false;
- unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
- int ret;
-
- BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
-
- journal_buf_realloc(j, w);
- jset = w->data;
-
- j->write_start_time = local_clock();
-
- spin_lock(&j->lock);
-
- /*
- * If the journal is in an error state - we did an emergency shutdown -
- * we prefer to continue doing journal writes. We just mark them as
- * noflush so they'll never be used, but they'll still be visible by the
- * list_journal tool - this helps in debugging.
- *
- * There's a caveat: the first journal write after marking the
- * superblock dirty must always be a flush write, because on startup
- * from a clean shutdown we didn't necessarily read the journal and the
- * new journal write might overwrite whatever was in the journal
- * previously - we can't leave the journal without any flush writes in
- * it.
- *
- * So if we're in an error state, and we're still starting up, we don't
- * write anything at all.
- */
- if (!test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags) &&
- (bch2_journal_error(j) ||
- w->noflush ||
- (!w->must_flush &&
- (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
- test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
- w->noflush = true;
- SET_JSET_NO_FLUSH(jset, true);
- jset->last_seq = 0;
- w->last_seq = 0;
-
- j->nr_noflush_writes++;
- } else if (!bch2_journal_error(j)) {
- j->last_flush_write = jiffies;
- j->nr_flush_writes++;
- clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
- } else {
- spin_unlock(&j->lock);
- goto err;
- }
- spin_unlock(&j->lock);
-
- /*
- * New btree roots are set by journalling them; when the journal entry
- * gets written we have to propagate them to c->btree_roots
- *
- * But, every journal entry we write has to contain all the btree roots
- * (at least for now); so after we copy btree roots to c->btree_roots we
- * have to get any missing btree roots and add them to this journal
- * entry:
- */
-
- bch2_journal_entries_postprocess(c, jset);
start = end = vstruct_last(jset);
- end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
+ end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
bch2_journal_super_entries_add_common(c, &end,
le64_to_cpu(jset->seq));
@@ -1779,7 +1759,7 @@ void bch2_journal_write(struct closure *cl)
bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
vstruct_bytes(jset), w->sectors << 9,
u64s, w->u64s_reserved, j->entry_u64s_reserved);
- goto err;
+ return -EINVAL;
}
jset->magic = cpu_to_le64(jset_magic(c));
@@ -1798,37 +1778,117 @@ void bch2_journal_write(struct closure *cl)
validate_before_checksum = true;
if (validate_before_checksum &&
- jset_validate(c, NULL, jset, 0, WRITE))
- goto err;
+ (ret = jset_validate(c, NULL, jset, 0, WRITE)))
+ return ret;
ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
jset->encrypted_start,
vstruct_end(jset) - (void *) jset->encrypted_start);
if (bch2_fs_fatal_err_on(ret, c,
"error decrypting journal entry: %i", ret))
- goto err;
+ return ret;
jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
journal_nonce(jset), jset);
if (!validate_before_checksum &&
- jset_validate(c, NULL, jset, 0, WRITE))
- goto err;
+ (ret = jset_validate(c, NULL, jset, 0, WRITE)))
+ return ret;
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
+ return 0;
+}
+
+static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ int error = bch2_journal_error(j);
+
+ /*
+ * If the journal is in an error state - we did an emergency shutdown -
+ * we prefer to continue doing journal writes. We just mark them as
+ * noflush so they'll never be used, but they'll still be visible by the
+ * list_journal tool - this helps in debugging.
+ *
+ * There's a caveat: the first journal write after marking the
+ * superblock dirty must always be a flush write, because on startup
+ * from a clean shutdown we didn't necessarily read the journal and the
+ * new journal write might overwrite whatever was in the journal
+ * previously - we can't leave the journal without any flush writes in
+ * it.
+ *
+ * So if we're in an error state, and we're still starting up, we don't
+ * write anything at all.
+ */
+ if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
+ return -EIO;
+
+ if (error ||
+ w->noflush ||
+ (!w->must_flush &&
+ (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+ test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
+ w->noflush = true;
+ SET_JSET_NO_FLUSH(w->data, true);
+ w->data->last_seq = 0;
+ w->last_seq = 0;
+
+ j->nr_noflush_writes++;
+ } else {
+ j->last_flush_write = jiffies;
+ j->nr_flush_writes++;
+ clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
+ }
+
+ return 0;
+}
+
+void bch2_journal_write(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct bch_replicas_padded replicas;
+ struct bio *bio;
+ struct printbuf journal_debug_buf = PRINTBUF;
+ unsigned i, nr_rw_members = 0;
+ int ret;
+
+ BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
+
+ j->write_start_time = local_clock();
-retry_alloc:
spin_lock(&j->lock);
- ret = journal_write_alloc(j, w);
+ ret = bch2_journal_write_pick_flush(j, w);
+ spin_unlock(&j->lock);
+ if (ret)
+ goto err;
+
+ journal_buf_realloc(j, w);
+
+ ret = bch2_journal_write_prep(j, w);
+ if (ret)
+ goto err;
+
+ while (1) {
+ spin_lock(&j->lock);
+ ret = journal_write_alloc(j, w);
+ if (!ret || !j->can_discard)
+ break;
- if (ret && j->can_discard) {
spin_unlock(&j->lock);
bch2_journal_do_discards(j);
- goto retry_alloc;
}
- if (ret)
+ if (ret) {
__bch2_journal_debug_to_text(&journal_debug_buf, j);
+ spin_unlock(&j->lock);
+ bch_err(c, "Unable to allocate journal write:\n%s",
+ journal_debug_buf.buf);
+ printbuf_exit(&journal_debug_buf);
+ goto err;
+ }
/*
* write is allocated, no longer need to account for it in
@@ -1843,13 +1903,6 @@ retry_alloc:
bch2_journal_space_available(j);
spin_unlock(&j->lock);
- if (ret) {
- bch_err(c, "Unable to allocate journal write:\n%s",
- journal_debug_buf.buf);
- printbuf_exit(&journal_debug_buf);
- goto err;
- }
-
w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
if (c->opts.nochanges)
@@ -1871,7 +1924,7 @@ retry_alloc:
if (ret)
goto err;
- if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
+ if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
for_each_rw_member(ca, c, i) {
percpu_ref_get(&ca->io_ref);
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index 215a653322f3..a5cc0ed195d6 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -10,17 +10,17 @@
#include "recovery.h"
/* KEY_TYPE_lru is obsolete: */
-int bch2_lru_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_lru_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (!lru_pos_time(k.k->p)) {
- prt_printf(err, "lru entry at time=0");
- return -BCH_ERR_invalid_bkey;
-
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(!lru_pos_time(k.k->p), c, err,
+ lru_entry_at_time_0,
+ "lru entry at time=0");
+fsck_err:
+ return ret;
}
void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c,
@@ -95,6 +95,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
int ret;
if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos), c,
+ lru_entry_to_invalid_bucket,
"lru key points to nonexistent device:bucket %llu:%llu",
alloc_pos.inode, alloc_pos.offset))
return bch2_btree_delete_at(trans, lru_iter, 0);
@@ -125,7 +126,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
}
if (c->opts.reconstruct_alloc ||
- fsck_err(c, "incorrect lru entry: lru %s time %llu\n"
+ fsck_err(c, lru_entry_bad,
+ "incorrect lru entry: lru %s time %llu\n"
" %s\n"
" for %s",
bch2_lru_types[type],
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index be66bf9ad809..429dca816df5 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -48,7 +48,7 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l)
return BCH_LRU_read;
}
-int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_lru_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 39a14e321680..ab749bf2fcbc 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -20,6 +20,7 @@
#include "keylist.h"
#include "move.h"
#include "replicas.h"
+#include "snapshot.h"
#include "super-io.h"
#include "trace.h"
@@ -59,20 +60,6 @@ static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c
}
}
-static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
-{
- mutex_lock(&c->data_progress_lock);
- list_add(&stats->list, &c->data_progress_list);
- mutex_unlock(&c->data_progress_lock);
-}
-
-static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats)
-{
- mutex_lock(&c->data_progress_lock);
- list_del(&stats->list);
- mutex_unlock(&c->data_progress_lock);
-}
-
struct moving_io {
struct list_head read_list;
struct list_head io_list;
@@ -156,35 +143,31 @@ static void move_read_endio(struct bio *bio)
closure_put(&ctxt->cl);
}
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt,
- struct btree_trans *trans)
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
{
struct moving_io *io;
- if (trans)
- bch2_trans_unlock(trans);
-
while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
+ bch2_trans_unlock_long(ctxt->trans);
list_del(&io->read_list);
move_write(io);
}
}
-static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
- struct btree_trans *trans)
+void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
{
unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
- move_ctxt_wait_event(ctxt, trans,
+ move_ctxt_wait_event(ctxt,
!atomic_read(&ctxt->write_sectors) ||
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
void bch2_moving_ctxt_exit(struct moving_context *ctxt)
{
- struct bch_fs *c = ctxt->c;
+ struct bch_fs *c = ctxt->trans->c;
- move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
+ move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
closure_sync(&ctxt->cl);
EBUG_ON(atomic_read(&ctxt->write_sectors));
@@ -192,16 +175,12 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt)
EBUG_ON(atomic_read(&ctxt->read_sectors));
EBUG_ON(atomic_read(&ctxt->read_ios));
- if (ctxt->stats) {
- progress_list_del(c, ctxt->stats);
- trace_move_data(c,
- atomic64_read(&ctxt->stats->sectors_moved),
- atomic64_read(&ctxt->stats->keys_moved));
- }
-
mutex_lock(&c->moving_context_lock);
list_del(&ctxt->list);
mutex_unlock(&c->moving_context_lock);
+
+ bch2_trans_put(ctxt->trans);
+ memset(ctxt, 0, sizeof(*ctxt));
}
void bch2_moving_ctxt_init(struct moving_context *ctxt,
@@ -213,7 +192,7 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
{
memset(ctxt, 0, sizeof(*ctxt));
- ctxt->c = c;
+ ctxt->trans = bch2_trans_get(c);
ctxt->fn = (void *) _RET_IP_;
ctxt->rate = rate;
ctxt->stats = stats;
@@ -230,16 +209,17 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
mutex_lock(&c->moving_context_lock);
list_add(&ctxt->list, &c->moving_context_list);
mutex_unlock(&c->moving_context_lock);
+}
- if (stats) {
- progress_list_add(c, stats);
- stats->data_type = BCH_DATA_user;
- }
+void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
+{
+ trace_move_data(c, stats);
}
void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
{
memset(stats, 0, sizeof(*stats));
+ stats->data_type = BCH_DATA_user;
scnprintf(stats->name, sizeof(stats->name), "%s", name);
}
@@ -286,15 +266,14 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans,
bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
}
-static int bch2_move_extent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct moving_context *ctxt,
- struct move_bucket_in_flight *bucket_in_flight,
- struct bch_io_opts io_opts,
- enum btree_id btree_id,
- struct bkey_s_c k,
- struct data_update_opts data_opts)
+int bch2_move_extent(struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bch_io_opts io_opts,
+ struct data_update_opts data_opts)
{
+ struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
struct moving_io *io;
@@ -303,6 +282,8 @@ static int bch2_move_extent(struct btree_trans *trans,
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
+ if (ctxt->stats)
+ ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
trace_move_extent2(c, k);
bch2_data_update_opts_normalize(k, &data_opts);
@@ -355,7 +336,7 @@ static int bch2_move_extent(struct btree_trans *trans,
io->rbio.bio.bi_end_io = move_read_endio;
ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
- io_opts, data_opts, btree_id, k);
+ io_opts, data_opts, iter->btree_id, k);
if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages;
@@ -367,9 +348,11 @@ static int bch2_move_extent(struct btree_trans *trans,
BUG_ON(ret);
- io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
+ if (ctxt->rate)
+ bch2_ratelimit_increment(ctxt->rate, k.k->size);
+
if (ctxt->stats) {
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
@@ -399,7 +382,7 @@ static int bch2_move_extent(struct btree_trans *trans,
closure_get(&ctxt->cl);
bch2_read_extent(trans, &io->rbio,
bkey_start_pos(k.k),
- btree_id, k, 0,
+ iter->btree_id, k, 0,
BCH_READ_NODECODE|
BCH_READ_LAST_FRAGMENT);
return 0;
@@ -413,45 +396,96 @@ err:
return ret;
}
-static int lookup_inode(struct btree_trans *trans, struct bpos pos,
- struct bch_inode_unpacked *inode)
+struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
+ struct per_snapshot_io_opts *io_opts,
+ struct bkey_s_c extent_k)
+{
+ struct bch_fs *c = trans->c;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
+
+ if (io_opts->cur_inum != extent_k.k->p.inode) {
+ struct btree_iter iter;
+ struct bkey_s_c k;
+
+ io_opts->d.nr = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (k.k->p.offset != extent_k.k->p.inode)
+ break;
+
+ if (!bkey_is_inode(k.k))
+ continue;
+
+ struct bch_inode_unpacked inode;
+ BUG_ON(bch2_inode_unpack(k, &inode));
+
+ struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot };
+ bch2_inode_opts_get(&e.io_opts, trans->c, &inode);
+
+ ret = darray_push(&io_opts->d, e);
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ io_opts->cur_inum = extent_k.k->p.inode;
+ }
+
+ ret = ret ?: trans_was_restarted(trans, restart_count);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (extent_k.k->p.snapshot) {
+ struct snapshot_io_opts_entry *i;
+ darray_for_each(io_opts->d, i)
+ if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot))
+ return &i->io_opts;
+ }
+
+ return &io_opts->fs_io_opts;
+}
+
+int bch2_move_get_io_opts_one(struct btree_trans *trans,
+ struct bch_io_opts *io_opts,
+ struct bkey_s_c extent_k)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos,
- BTREE_ITER_ALL_SNAPSHOTS);
- k = bch2_btree_iter_peek(&iter);
+ /* reflink btree? */
+ if (!extent_k.k->p.inode) {
+ *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
+ return 0;
+ }
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
+ BTREE_ITER_CACHED);
ret = bkey_err(k);
- if (ret)
- goto err;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
- if (!k.k || !bkey_eq(k.k->p, pos)) {
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
+ if (!ret && bkey_is_inode(k.k)) {
+ struct bch_inode_unpacked inode;
+ bch2_inode_unpack(k, &inode);
+ bch2_inode_opts_get(io_opts, trans->c, &inode);
+ } else {
+ *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
}
- ret = bkey_is_inode(k.k) ? 0 : -EIO;
- if (ret)
- goto err;
-
- ret = bch2_inode_unpack(k, inode);
- if (ret)
- goto err;
-err:
bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return 0;
}
-static int move_ratelimit(struct btree_trans *trans,
- struct moving_context *ctxt)
+int bch2_move_ratelimit(struct moving_context *ctxt)
{
- struct bch_fs *c = trans->c;
+ struct bch_fs *c = ctxt->trans->c;
u64 delay;
- if (ctxt->wait_on_copygc) {
- bch2_trans_unlock(trans);
+ if (ctxt->wait_on_copygc && !c->copygc_running) {
+ bch2_trans_unlock_long(ctxt->trans);
wait_event_killable(c->copygc_running_wq,
!c->copygc_running ||
kthread_should_stop());
@@ -460,8 +494,12 @@ static int move_ratelimit(struct btree_trans *trans,
do {
delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
+
if (delay) {
- bch2_trans_unlock(trans);
+ if (delay > HZ / 10)
+ bch2_trans_unlock_long(ctxt->trans);
+ else
+ bch2_trans_unlock(ctxt->trans);
set_current_state(TASK_INTERRUPTIBLE);
}
@@ -474,7 +512,7 @@ static int move_ratelimit(struct btree_trans *trans,
schedule_timeout(delay);
if (unlikely(freezing(current))) {
- move_ctxt_wait_event(ctxt, trans, list_empty(&ctxt->reads));
+ move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
try_to_freeze();
}
} while (delay);
@@ -483,7 +521,7 @@ static int move_ratelimit(struct btree_trans *trans,
* XXX: these limits really ought to be per device, SSDs and hard drives
* will want different limits
*/
- move_ctxt_wait_event(ctxt, trans,
+ move_ctxt_wait_event(ctxt,
atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
@@ -492,52 +530,28 @@ static int move_ratelimit(struct btree_trans *trans,
return 0;
}
-static int move_get_io_opts(struct btree_trans *trans,
- struct bch_io_opts *io_opts,
- struct bkey_s_c k, u64 *cur_inum)
-{
- struct bch_inode_unpacked inode;
- int ret;
-
- if (*cur_inum == k.k->p.inode)
- return 0;
-
- ret = lookup_inode(trans,
- SPOS(0, k.k->p.inode, k.k->p.snapshot),
- &inode);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ret;
-
- if (!ret)
- bch2_inode_opts_get(io_opts, trans->c, &inode);
- else
- *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
- *cur_inum = k.k->p.inode;
- return 0;
-}
-
-static int __bch2_move_data(struct moving_context *ctxt,
- struct bpos start,
- struct bpos end,
- move_pred_fn pred, void *arg,
- enum btree_id btree_id)
+static int bch2_move_data_btree(struct moving_context *ctxt,
+ struct bpos start,
+ struct bpos end,
+ move_pred_fn pred, void *arg,
+ enum btree_id btree_id)
{
- struct bch_fs *c = ctxt->c;
- struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+ struct btree_trans *trans = ctxt->trans;
+ struct bch_fs *c = trans->c;
+ struct per_snapshot_io_opts snapshot_io_opts;
+ struct bch_io_opts *io_opts;
struct bkey_buf sk;
- struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct data_update_opts data_opts;
- u64 cur_inum = U64_MAX;
int ret = 0, ret2;
+ per_snapshot_io_opts_init(&snapshot_io_opts, c);
bch2_bkey_buf_init(&sk);
if (ctxt->stats) {
ctxt->stats->data_type = BCH_DATA_user;
- ctxt->stats->btree_id = btree_id;
- ctxt->stats->pos = start;
+ ctxt->stats->pos = BBPOS(btree_id, start);
}
bch2_trans_iter_init(trans, &iter, btree_id, start,
@@ -547,7 +561,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (ctxt->rate)
bch2_ratelimit_reset(ctxt->rate);
- while (!move_ratelimit(trans, ctxt)) {
+ while (!bch2_move_ratelimit(ctxt)) {
bch2_trans_begin(trans);
k = bch2_btree_iter_peek(&iter);
@@ -564,17 +578,18 @@ static int __bch2_move_data(struct moving_context *ctxt,
break;
if (ctxt->stats)
- ctxt->stats->pos = iter.pos;
+ ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos);
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
- ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+ io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k);
+ ret = PTR_ERR_OR_ZERO(io_opts);
if (ret)
continue;
memset(&data_opts, 0, sizeof(data_opts));
- if (!pred(c, arg, k, &io_opts, &data_opts))
+ if (!pred(c, arg, k, io_opts, &data_opts))
goto next;
/*
@@ -584,24 +599,20 @@ static int __bch2_move_data(struct moving_context *ctxt,
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- ret2 = bch2_move_extent(trans, &iter, ctxt, NULL,
- io_opts, btree_id, k, data_opts);
+ ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts);
if (ret2) {
if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
if (ret2 == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt, trans);
+ bch2_move_ctxt_wait_for_io(ctxt);
continue;
}
/* XXX signal failure */
goto next;
}
-
- if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate, k.k->size);
next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
@@ -610,59 +621,68 @@ next_nondata:
}
bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);
+ per_snapshot_io_opts_exit(&snapshot_io_opts);
return ret;
}
-int bch2_move_data(struct bch_fs *c,
- enum btree_id start_btree_id, struct bpos start_pos,
- enum btree_id end_btree_id, struct bpos end_pos,
- struct bch_ratelimit *rate,
- struct bch_move_stats *stats,
- struct write_point_specifier wp,
- bool wait_on_copygc,
- move_pred_fn pred, void *arg)
+int __bch2_move_data(struct moving_context *ctxt,
+ struct bbpos start,
+ struct bbpos end,
+ move_pred_fn pred, void *arg)
{
- struct moving_context ctxt;
+ struct bch_fs *c = ctxt->trans->c;
enum btree_id id;
int ret = 0;
- bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
-
- for (id = start_btree_id;
- id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
+ for (id = start.btree;
+ id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
id++) {
- stats->btree_id = id;
+ ctxt->stats->pos = BBPOS(id, POS_MIN);
- if (id != BTREE_ID_extents &&
- id != BTREE_ID_reflink)
+ if (!btree_type_has_ptrs(id) ||
+ !bch2_btree_id_root(c, id)->b)
continue;
- if (!bch2_btree_id_root(c, id)->b)
- continue;
-
- ret = __bch2_move_data(&ctxt,
- id == start_btree_id ? start_pos : POS_MIN,
- id == end_btree_id ? end_pos : POS_MAX,
+ ret = bch2_move_data_btree(ctxt,
+ id == start.btree ? start.pos : POS_MIN,
+ id == end.btree ? end.pos : POS_MAX,
pred, arg, id);
if (ret)
break;
}
+ return ret;
+}
+
+int bch2_move_data(struct bch_fs *c,
+ struct bbpos start,
+ struct bbpos end,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc,
+ move_pred_fn pred, void *arg)
+{
+
+ struct moving_context ctxt;
+ int ret;
+
+ bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+ ret = __bch2_move_data(&ctxt, start, end, pred, arg);
bch2_moving_ctxt_exit(&ctxt);
return ret;
}
-int __bch2_evacuate_bucket(struct btree_trans *trans,
- struct moving_context *ctxt,
+int __bch2_evacuate_bucket(struct moving_context *ctxt,
struct move_bucket_in_flight *bucket_in_flight,
struct bpos bucket, int gen,
struct data_update_opts _data_opts)
{
- struct bch_fs *c = ctxt->c;
+ struct btree_trans *trans = ctxt->trans;
+ struct bch_fs *c = trans->c;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct btree_iter iter;
struct bkey_buf sk;
@@ -673,7 +693,6 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
struct data_update_opts data_opts;
unsigned dirty_sectors, bucket_size;
u64 fragmentation;
- u64 cur_inum = U64_MAX;
struct bpos bp_pos = POS_MIN;
int ret = 0;
@@ -708,7 +727,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
goto err;
}
- while (!(ret = move_ratelimit(trans, ctxt))) {
+ while (!(ret = bch2_move_ratelimit(ctxt))) {
bch2_trans_begin(trans);
ret = bch2_get_next_backpointer(trans, bucket, gen,
@@ -737,7 +756,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+ ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
if (ret) {
bch2_trans_iter_exit(trans, &iter);
continue;
@@ -758,23 +777,20 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
i++;
}
- ret = bch2_move_extent(trans, &iter, ctxt,
- bucket_in_flight,
- io_opts, bp.btree_id, k, data_opts);
+ ret = bch2_move_extent(ctxt, bucket_in_flight,
+ &iter, k, io_opts, data_opts);
bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt, trans);
+ bch2_move_ctxt_wait_for_io(ctxt);
continue;
}
if (ret)
goto err;
- if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate, k.k->size);
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
} else {
@@ -825,14 +841,12 @@ int bch2_evacuate_bucket(struct bch_fs *c,
struct write_point_specifier wp,
bool wait_on_copygc)
{
- struct btree_trans *trans = bch2_trans_get(c);
struct moving_context ctxt;
int ret;
bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
- ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts);
+ ret = __bch2_evacuate_bucket(&ctxt, NULL, bucket, gen, data_opts);
bch2_moving_ctxt_exit(&ctxt);
- bch2_trans_put(trans);
return ret;
}
@@ -849,21 +863,25 @@ static int bch2_move_btree(struct bch_fs *c,
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct btree_trans *trans = bch2_trans_get(c);
+ struct moving_context ctxt;
+ struct btree_trans *trans;
struct btree_iter iter;
struct btree *b;
enum btree_id id;
struct data_update_opts data_opts;
int ret = 0;
- progress_list_add(c, stats);
+ bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
+ writepoint_ptr(&c->btree_write_point),
+ true);
+ trans = ctxt.trans;
stats->data_type = BCH_DATA_btree;
for (id = start_btree_id;
id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
id++) {
- stats->btree_id = id;
+ stats->pos = BBPOS(id, POS_MIN);
if (!bch2_btree_id_root(c, id)->b)
continue;
@@ -882,7 +900,7 @@ retry:
bpos_cmp(b->key.k.p, end_pos)) > 0)
break;
- stats->pos = iter.pos;
+ stats->pos = BBPOS(iter.btree_id, iter.pos);
if (!pred(c, arg, b, &io_opts, &data_opts))
goto next;
@@ -904,14 +922,10 @@ next:
break;
}
- bch2_trans_put(trans);
-
- if (ret)
- bch_err_fn(c, ret);
-
+ bch_err_fn(c, ret);
+ bch2_moving_ctxt_exit(&ctxt);
bch2_btree_interior_updates_flush(c);
- progress_list_del(c, stats);
return ret;
}
@@ -1032,8 +1046,7 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
mutex_unlock(&c->sb_lock);
}
- if (ret)
- bch_err_fn(c, ret);
+ bch_err_fn(c, ret);
return ret;
}
@@ -1056,14 +1069,16 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_replicas_gc2(c) ?: ret;
ret = bch2_move_data(c,
- op.start_btree, op.start_pos,
- op.end_btree, op.end_pos,
+ (struct bbpos) { op.start_btree, op.start_pos },
+ (struct bbpos) { op.end_btree, op.end_pos },
NULL,
stats,
writepoint_hashed((unsigned long) current),
true,
rereplicate_pred, c) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret;
+
+ bch2_move_stats_exit(stats, c);
break;
case BCH_DATA_OP_MIGRATE:
if (op.migrate.dev >= c->sb.nr_devices)
@@ -1080,18 +1095,21 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_replicas_gc2(c) ?: ret;
ret = bch2_move_data(c,
- op.start_btree, op.start_pos,
- op.end_btree, op.end_pos,
+ (struct bbpos) { op.start_btree, op.start_pos },
+ (struct bbpos) { op.end_btree, op.end_pos },
NULL,
stats,
writepoint_hashed((unsigned long) current),
true,
migrate_pred, &op) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret;
+
+ bch2_move_stats_exit(stats, c);
break;
case BCH_DATA_OP_REWRITE_OLD_NODES:
bch2_move_stats_init(stats, "rewrite_old_nodes");
ret = bch2_scan_old_btree_nodes(c, stats);
+ bch2_move_stats_exit(stats, c);
break;
default:
ret = -EINVAL;
@@ -1100,19 +1118,43 @@ int bch2_data_job(struct bch_fs *c,
return ret;
}
-static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
+void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
{
- struct bch_move_stats *stats = ctxt->stats;
- struct moving_io *io;
+ prt_printf(out, "%s: data type=%s pos=",
+ stats->name,
+ bch2_data_types[stats->data_type]);
+ bch2_bbpos_to_text(out, stats->pos);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_str(out, "keys moved: ");
+ prt_u64(out, atomic64_read(&stats->keys_moved));
+ prt_newline(out);
+
+ prt_str(out, "keys raced: ");
+ prt_u64(out, atomic64_read(&stats->keys_raced));
+ prt_newline(out);
+
+ prt_str(out, "bytes seen: ");
+ prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
+ prt_newline(out);
- prt_printf(out, "%s (%ps):", stats->name, ctxt->fn);
+ prt_str(out, "bytes moved: ");
+ prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
prt_newline(out);
- prt_printf(out, " data type %s btree_id %s position: ",
- bch2_data_types[stats->data_type],
- bch2_btree_ids[stats->btree_id]);
- bch2_bpos_to_text(out, stats->pos);
+ prt_str(out, "bytes raced: ");
+ prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+}
+
+static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
+{
+ struct moving_io *io;
+
+ bch2_move_stats_to_text(out, ctxt->stats);
printbuf_indent_add(out, 2);
prt_printf(out, "reads: ios %u/%u sectors %u/%u",
@@ -1153,7 +1195,4 @@ void bch2_fs_move_init(struct bch_fs *c)
{
INIT_LIST_HEAD(&c->moving_context_list);
mutex_init(&c->moving_context_lock);
-
- INIT_LIST_HEAD(&c->data_progress_list);
- mutex_init(&c->data_progress_lock);
}
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index cbdd58db8782..07cf9d42643b 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -2,6 +2,7 @@
#ifndef _BCACHEFS_MOVE_H
#define _BCACHEFS_MOVE_H
+#include "bbpos.h"
#include "bcachefs_ioctl.h"
#include "btree_iter.h"
#include "buckets.h"
@@ -11,7 +12,7 @@
struct bch_read_bio;
struct moving_context {
- struct bch_fs *c;
+ struct btree_trans *trans;
struct list_head list;
void *fn;
@@ -37,13 +38,14 @@ struct moving_context {
wait_queue_head_t wait;
};
-#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
+#define move_ctxt_wait_event(_ctxt, _cond) \
do { \
bool cond_finished = false; \
- bch2_moving_ctxt_do_pending_writes(_ctxt, _trans); \
+ bch2_moving_ctxt_do_pending_writes(_ctxt); \
\
if (_cond) \
break; \
+ bch2_trans_unlock_long((_ctxt)->trans); \
__wait_event((_ctxt)->wait, \
bch2_moving_ctxt_next_pending_write(_ctxt) || \
(cond_finished = (_cond))); \
@@ -59,22 +61,60 @@ void bch2_moving_ctxt_init(struct moving_context *, struct bch_fs *,
struct bch_ratelimit *, struct bch_move_stats *,
struct write_point_specifier, bool);
struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *);
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *,
- struct btree_trans *);
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *);
+void bch2_move_ctxt_wait_for_io(struct moving_context *);
+int bch2_move_ratelimit(struct moving_context *);
+
+/* Inodes in different snapshots may have different IO options: */
+struct snapshot_io_opts_entry {
+ u32 snapshot;
+ struct bch_io_opts io_opts;
+};
+
+struct per_snapshot_io_opts {
+ u64 cur_inum;
+ struct bch_io_opts fs_io_opts;
+ DARRAY(struct snapshot_io_opts_entry) d;
+};
+
+static inline void per_snapshot_io_opts_init(struct per_snapshot_io_opts *io_opts, struct bch_fs *c)
+{
+ memset(io_opts, 0, sizeof(*io_opts));
+ io_opts->fs_io_opts = bch2_opts_to_inode_opts(c->opts);
+}
+
+static inline void per_snapshot_io_opts_exit(struct per_snapshot_io_opts *io_opts)
+{
+ darray_exit(&io_opts->d);
+}
+
+struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *,
+ struct per_snapshot_io_opts *, struct bkey_s_c);
+int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, struct bkey_s_c);
int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
+int bch2_move_extent(struct moving_context *,
+ struct move_bucket_in_flight *,
+ struct btree_iter *,
+ struct bkey_s_c,
+ struct bch_io_opts,
+ struct data_update_opts);
+
+int __bch2_move_data(struct moving_context *,
+ struct bbpos,
+ struct bbpos,
+ move_pred_fn, void *);
int bch2_move_data(struct bch_fs *,
- enum btree_id, struct bpos,
- enum btree_id, struct bpos,
+ struct bbpos start,
+ struct bbpos end,
struct bch_ratelimit *,
struct bch_move_stats *,
struct write_point_specifier,
bool,
move_pred_fn, void *);
-int __bch2_evacuate_bucket(struct btree_trans *,
- struct moving_context *,
+int __bch2_evacuate_bucket(struct moving_context *,
struct move_bucket_in_flight *,
struct bpos, int,
struct data_update_opts);
@@ -88,7 +128,10 @@ int bch2_data_job(struct bch_fs *,
struct bch_move_stats *,
struct bch_ioctl_data);
-void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
+void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *);
+void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *);
+void bch2_move_stats_init(struct bch_move_stats *, char *);
+
void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *);
void bch2_fs_move_init(struct bch_fs *);
diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h
index baf1f8570b3f..e22841ef31e4 100644
--- a/fs/bcachefs/move_types.h
+++ b/fs/bcachefs/move_types.h
@@ -2,17 +2,17 @@
#ifndef _BCACHEFS_MOVE_TYPES_H
#define _BCACHEFS_MOVE_TYPES_H
+#include "bbpos_types.h"
+
struct bch_move_stats {
enum bch_data_type data_type;
- enum btree_id btree_id;
- struct bpos pos;
- struct list_head list;
+ struct bbpos pos;
char name[32];
atomic64_t keys_moved;
atomic64_t keys_raced;
- atomic64_t sectors_moved;
atomic64_t sectors_seen;
+ atomic64_t sectors_moved;
atomic64_t sectors_raced;
};
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 4017120baeee..0a0576326c5b 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -101,8 +101,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
return ret;
}
-static void move_buckets_wait(struct btree_trans *trans,
- struct moving_context *ctxt,
+static void move_buckets_wait(struct moving_context *ctxt,
struct buckets_in_flight *list,
bool flush)
{
@@ -111,7 +110,7 @@ static void move_buckets_wait(struct btree_trans *trans,
while ((i = list->first)) {
if (flush)
- move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
+ move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
if (atomic_read(&i->count))
break;
@@ -129,7 +128,7 @@ static void move_buckets_wait(struct btree_trans *trans,
kfree(i);
}
- bch2_trans_unlock(trans);
+ bch2_trans_unlock_long(ctxt->trans);
}
static bool bucket_in_flight(struct buckets_in_flight *list,
@@ -140,11 +139,11 @@ static bool bucket_in_flight(struct buckets_in_flight *list,
typedef DARRAY(struct move_bucket) move_buckets;
-static int bch2_copygc_get_buckets(struct btree_trans *trans,
- struct moving_context *ctxt,
+static int bch2_copygc_get_buckets(struct moving_context *ctxt,
struct buckets_in_flight *buckets_in_flight,
move_buckets *buckets)
{
+ struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
@@ -152,7 +151,7 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
int ret;
- move_buckets_wait(trans, ctxt, buckets_in_flight, false);
+ move_buckets_wait(ctxt, buckets_in_flight, false);
ret = bch2_btree_write_buffer_flush(trans);
if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
@@ -188,10 +187,11 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
}
noinline
-static int bch2_copygc(struct btree_trans *trans,
- struct moving_context *ctxt,
- struct buckets_in_flight *buckets_in_flight)
+static int bch2_copygc(struct moving_context *ctxt,
+ struct buckets_in_flight *buckets_in_flight,
+ bool *did_work)
{
+ struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct data_update_opts data_opts = {
.btree_insert_flags = BCH_WATERMARK_copygc,
@@ -202,7 +202,7 @@ static int bch2_copygc(struct btree_trans *trans,
u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
int ret = 0;
- ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
+ ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
if (ret)
goto err;
@@ -221,10 +221,12 @@ static int bch2_copygc(struct btree_trans *trans,
break;
}
- ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
+ ret = __bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
f->bucket.k.gen, data_opts);
if (ret)
goto err;
+
+ *did_work = true;
}
err:
darray_exit(&buckets);
@@ -300,24 +302,24 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
static int bch2_copygc_thread(void *arg)
{
struct bch_fs *c = arg;
- struct btree_trans *trans;
struct moving_context ctxt;
struct bch_move_stats move_stats;
struct io_clock *clock = &c->io_clock[WRITE];
- struct buckets_in_flight buckets;
+ struct buckets_in_flight *buckets;
u64 last, wait;
int ret = 0;
- memset(&buckets, 0, sizeof(buckets));
-
- ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+ buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
+ if (!buckets)
+ return -ENOMEM;
+ ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
if (ret) {
+ kfree(buckets);
bch_err_msg(c, ret, "allocating copygc buckets in flight");
return ret;
}
set_freezable();
- trans = bch2_trans_get(c);
bch2_move_stats_init(&move_stats, "copygc");
bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
@@ -325,16 +327,18 @@ static int bch2_copygc_thread(void *arg)
false);
while (!ret && !kthread_should_stop()) {
- bch2_trans_unlock(trans);
+ bool did_work = false;
+
+ bch2_trans_unlock_long(ctxt.trans);
cond_resched();
if (!c->copy_gc_enabled) {
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(&ctxt, buckets, true);
kthread_wait_freezable(c->copy_gc_enabled);
}
if (unlikely(freezing(current))) {
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(&ctxt, buckets, true);
__refrigerator(false);
continue;
}
@@ -345,7 +349,7 @@ static int bch2_copygc_thread(void *arg)
if (wait > clock->max_slop) {
c->copygc_wait_at = last;
c->copygc_wait = last + wait;
- move_buckets_wait(trans, &ctxt, &buckets, true);
+ move_buckets_wait(&ctxt, buckets, true);
trace_and_count(c, copygc_wait, c, wait, last + wait);
bch2_kthread_io_clock_wait(clock, last + wait,
MAX_SCHEDULE_TIMEOUT);
@@ -355,16 +359,29 @@ static int bch2_copygc_thread(void *arg)
c->copygc_wait = 0;
c->copygc_running = true;
- ret = bch2_copygc(trans, &ctxt, &buckets);
+ ret = bch2_copygc(&ctxt, buckets, &did_work);
c->copygc_running = false;
wake_up(&c->copygc_running_wq);
+
+ if (!wait && !did_work) {
+ u64 min_member_capacity = bch2_min_rw_member_capacity(c);
+
+ if (min_member_capacity == U64_MAX)
+ min_member_capacity = 128 * 2048;
+
+ bch2_trans_unlock_long(ctxt.trans);
+ bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
+ MAX_SCHEDULE_TIMEOUT);
+ }
}
- move_buckets_wait(trans, &ctxt, &buckets, true);
- rhashtable_destroy(&buckets.table);
- bch2_trans_put(trans);
+ move_buckets_wait(&ctxt, buckets, true);
+
+ rhashtable_destroy(&buckets->table);
+ kfree(buckets);
bch2_moving_ctxt_exit(&ctxt);
+ bch2_move_stats_exit(&move_stats, c);
return 0;
}
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 232f50c73a94..8dd4046cca41 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -12,11 +12,6 @@
#define x(t, n, ...) [n] = #t,
-const char * const bch2_iops_measurements[] = {
- BCH_IOPS_MEASUREMENTS()
- NULL
-};
-
const char * const bch2_error_actions[] = {
BCH_ERROR_ACTIONS()
NULL
@@ -42,9 +37,8 @@ const char * const bch2_sb_compat[] = {
NULL
};
-const char * const bch2_btree_ids[] = {
+const char * const __bch2_btree_ids[] = {
BCH_BTREE_IDS()
- "interior btree node",
NULL
};
@@ -271,14 +265,14 @@ int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
if (err)
prt_printf(err, "%s: too small (min %llu)",
opt->attr.name, opt->min);
- return -ERANGE;
+ return -BCH_ERR_ERANGE_option_too_small;
}
if (opt->max && v >= opt->max) {
if (err)
prt_printf(err, "%s: too big (max %llu)",
opt->attr.name, opt->max);
- return -ERANGE;
+ return -BCH_ERR_ERANGE_option_too_big;
}
if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
@@ -295,6 +289,9 @@ int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
return -EINVAL;
}
+ if (opt->fn.validate)
+ return opt->fn.validate(v, err);
+
return 0;
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 55014336c5f7..8526f177450a 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -10,13 +10,12 @@
struct bch_fs;
-extern const char * const bch2_iops_measurements[];
extern const char * const bch2_error_actions[];
extern const char * const bch2_fsck_fix_opts[];
extern const char * const bch2_version_upgrade_opts[];
extern const char * const bch2_sb_features[];
extern const char * const bch2_sb_compat[];
-extern const char * const bch2_btree_ids[];
+extern const char * const __bch2_btree_ids[];
extern const char * const bch2_csum_types[];
extern const char * const bch2_csum_opts[];
extern const char * const bch2_compression_types[];
@@ -74,6 +73,7 @@ enum opt_type {
struct bch_opt_fn {
int (*parse)(struct bch_fs *, const char *, u64 *, struct printbuf *);
void (*to_text)(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+ int (*validate)(u64, struct printbuf *);
};
/**
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
index de41f9a14492..5e653eb81d54 100644
--- a/fs/bcachefs/printbuf.c
+++ b/fs/bcachefs/printbuf.c
@@ -415,11 +415,11 @@ void bch2_prt_bitflags(struct printbuf *out,
while (list[nr])
nr++;
- while (flags && (bit = __ffs(flags)) < nr) {
+ while (flags && (bit = __ffs64(flags)) < nr) {
if (!first)
bch2_prt_printf(out, ",");
first = false;
bch2_prt_printf(out, "%s", list[bit]);
- flags ^= 1 << bit;
+ flags ^= BIT_ULL(bit);
}
}
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index cb68ae44d597..a54647c36b85 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -59,17 +59,18 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = {
.to_text = bch2_sb_quota_to_text,
};
-int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (k.k->p.inode >= QTYP_NR) {
- prt_printf(err, "invalid quota type (%llu >= %u)",
- k.k->p.inode, QTYP_NR);
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(k.k->p.inode >= QTYP_NR, c, err,
+ quota_type_invalid,
+ "invalid quota type (%llu >= %u)",
+ k.k->p.inode, QTYP_NR);
+fsck_err:
+ return ret;
}
void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h
index 2f463874a362..884f601f41c4 100644
--- a/fs/bcachefs/quota.h
+++ b/fs/bcachefs/quota.h
@@ -8,7 +8,7 @@
enum bkey_invalid_flags;
extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
-int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_quota_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 568f1e8e7507..3319190b8d9c 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "clock.h"
#include "compress.h"
#include "disk_groups.h"
#include "errcode.h"
+#include "error.h"
+#include "inode.h"
#include "move.h"
#include "rebalance.h"
+#include "subvolume.h"
#include "super-io.h"
#include "trace.h"
@@ -17,302 +23,396 @@
#include <linux/kthread.h>
#include <linux/sched/cputime.h>
-/*
- * Check if an extent should be moved:
- * returns -1 if it should not be moved, or
- * device of pointer that should be moved, if known, or INT_MAX if unknown
- */
-static bool rebalance_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned i;
-
- data_opts->rewrite_ptrs = 0;
- data_opts->target = io_opts->background_target;
- data_opts->extra_replicas = 0;
- data_opts->btree_insert_flags = 0;
-
- if (io_opts->background_compression &&
- !bch2_bkey_is_incompressible(k)) {
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- i = 0;
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (!p.ptr.cached &&
- p.crc.compression_type !=
- bch2_compression_opt_to_type(io_opts->background_compression))
- data_opts->rewrite_ptrs |= 1U << i;
- i++;
- }
- }
+#define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1)
- if (io_opts->background_target) {
- const struct bch_extent_ptr *ptr;
+static const char * const bch2_rebalance_state_strs[] = {
+#define x(t) #t,
+ BCH_REBALANCE_STATES()
+ NULL
+#undef x
+};
- i = 0;
- bkey_for_each_ptr(ptrs, ptr) {
- if (!ptr->cached &&
- !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
- bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
- data_opts->rewrite_ptrs |= 1U << i;
- i++;
- }
- }
+static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_i_cookie *cookie;
+ u64 v;
+ int ret;
- return data_opts->rewrite_ptrs != 0;
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
+ SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ v = k.k->type == KEY_TYPE_cookie
+ ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
+ : 0;
+
+ cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
+ ret = PTR_ERR_OR_ZERO(cookie);
+ if (ret)
+ goto err;
+
+ bkey_cookie_init(&cookie->k_i);
+ cookie->k.p = iter.pos;
+ cookie->v.cookie = cpu_to_le64(v + 1);
+
+ ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
}
-void bch2_rebalance_add_key(struct bch_fs *c,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts)
+int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
{
- struct data_update_opts update_opts = { 0 };
- struct bkey_ptrs_c ptrs;
- const struct bch_extent_ptr *ptr;
- unsigned i;
-
- if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
- return;
-
- i = 0;
- ptrs = bch2_bkey_ptrs_c(k);
- bkey_for_each_ptr(ptrs, ptr) {
- if ((1U << i) && update_opts.rewrite_ptrs)
- if (atomic64_add_return(k.k->size,
- &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
- k.k->size)
- rebalance_wakeup(c);
- i++;
- }
+ int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ __bch2_set_rebalance_needs_scan(trans, inum));
+ rebalance_wakeup(c);
+ return ret;
}
-void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
+int bch2_set_fs_needs_rebalance(struct bch_fs *c)
{
- if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
- sectors)
- rebalance_wakeup(c);
+ return bch2_set_rebalance_needs_scan(c, 0);
}
-struct rebalance_work {
- int dev_most_full_idx;
- unsigned dev_most_full_percent;
- u64 dev_most_full_work;
- u64 dev_most_full_capacity;
- u64 total_work;
-};
+static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 v;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
+ SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ v = k.k->type == KEY_TYPE_cookie
+ ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
+ : 0;
+
+ if (v == cookie)
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
-static void rebalance_work_accumulate(struct rebalance_work *w,
- u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
+static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
+ struct btree_iter *work_iter)
{
- unsigned percent_full;
- u64 work = dev_work + unknown_dev;
+ return !kthread_should_stop()
+ ? bch2_btree_iter_peek(work_iter)
+ : bkey_s_c_null;
+}
- /* avoid divide by 0 */
- if (!capacity)
- return;
+static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
+ int ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ extent_entry_drop(bkey_i_to_s(n),
+ (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
+ return bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+}
+
+static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
+ struct bpos work_pos,
+ struct btree_iter *extent_iter,
+ struct data_update_opts *data_opts)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+
+ bch2_trans_iter_exit(trans, extent_iter);
+ bch2_trans_iter_init(trans, extent_iter,
+ work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
+ work_pos,
+ BTREE_ITER_ALL_SNAPSHOTS);
+ k = bch2_btree_iter_peek_slot(extent_iter);
+ if (bkey_err(k))
+ return k;
+
+ const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL;
+ if (!r) {
+ /* raced due to btree write buffer, nothing to do */
+ return bkey_s_c_null;
+ }
- if (work < dev_work || work < unknown_dev)
- work = U64_MAX;
- work = min(work, capacity);
+ memset(data_opts, 0, sizeof(*data_opts));
- percent_full = div64_u64(work * 100, capacity);
+ data_opts->rewrite_ptrs =
+ bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
+ data_opts->target = r->target;
- if (percent_full >= w->dev_most_full_percent) {
- w->dev_most_full_idx = idx;
- w->dev_most_full_percent = percent_full;
- w->dev_most_full_work = work;
- w->dev_most_full_capacity = capacity;
+ if (!data_opts->rewrite_ptrs) {
+ /*
+ * device we would want to write to offline? devices in target
+ * changed?
+ *
+ * We'll now need a full scan before this extent is picked up
+ * again:
+ */
+ int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
+ if (ret)
+ return bkey_s_c_err(ret);
+ return bkey_s_c_null;
}
- if (w->total_work + dev_work >= w->total_work &&
- w->total_work + dev_work >= dev_work)
- w->total_work += dev_work;
+ return k;
}
-static struct rebalance_work rebalance_work(struct bch_fs *c)
+noinline_for_stack
+static int do_rebalance_extent(struct moving_context *ctxt,
+ struct bpos work_pos,
+ struct btree_iter *extent_iter)
{
- struct bch_dev *ca;
- struct rebalance_work ret = { .dev_most_full_idx = -1 };
- u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
- unsigned i;
-
- for_each_online_member(ca, c, i)
- rebalance_work_accumulate(&ret,
- atomic64_read(&ca->rebalance_work),
- unknown_dev,
- bucket_to_sector(ca, ca->mi.nbuckets -
- ca->mi.first_bucket),
- i);
-
- rebalance_work_accumulate(&ret,
- unknown_dev, 0, c->capacity, -1);
+ struct btree_trans *trans = ctxt->trans;
+ struct bch_fs *c = trans->c;
+ struct bch_fs_rebalance *r = &trans->c->rebalance;
+ struct data_update_opts data_opts;
+ struct bch_io_opts io_opts;
+ struct bkey_s_c k;
+ struct bkey_buf sk;
+ int ret;
+
+ ctxt->stats = &r->work_stats;
+ r->state = BCH_REBALANCE_working;
+
+ bch2_bkey_buf_init(&sk);
+
+ ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
+ extent_iter, &data_opts));
+ if (ret || !k.k)
+ goto out;
+ ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
+ if (ret)
+ goto out;
+
+ atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+
+ /*
+ * The iterator gets unlocked by __bch2_read_extent - need to
+ * save a copy of @k elsewhere:
+ */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+
+ ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
+ if (ret) {
+ if (bch2_err_matches(ret, ENOMEM)) {
+ /* memory allocation failure, wait for some IO to finish */
+ bch2_move_ctxt_wait_for_io(ctxt);
+ ret = -BCH_ERR_transaction_restart_nested;
+ }
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto out;
+
+ /* skip it and continue, XXX signal failure */
+ ret = 0;
+ }
+out:
+ bch2_bkey_buf_exit(&sk, c);
return ret;
}
-static void rebalance_work_reset(struct bch_fs *c)
+static bool rebalance_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
{
- struct bch_dev *ca;
- unsigned i;
+ unsigned target, compression;
+
+ if (k.k->p.inode) {
+ target = io_opts->background_target;
+ compression = io_opts->background_compression ?: io_opts->compression;
+ } else {
+ const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
- for_each_online_member(ca, c, i)
- atomic64_set(&ca->rebalance_work, 0);
+ target = r ? r->target : io_opts->background_target;
+ compression = r ? r->compression :
+ (io_opts->background_compression ?: io_opts->compression);
+ }
- atomic64_set(&c->rebalance.work_unknown_dev, 0);
+ data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
+ data_opts->target = target;
+ return data_opts->rewrite_ptrs != 0;
}
-static unsigned long curr_cputime(void)
+static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
{
- u64 utime, stime;
+ struct btree_trans *trans = ctxt->trans;
+ struct bch_fs_rebalance *r = &trans->c->rebalance;
+ int ret;
+
+ bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
+ ctxt->stats = &r->scan_stats;
- task_cputime_adjusted(current, &utime, &stime);
- return nsecs_to_jiffies(utime + stime);
+ if (!inum) {
+ r->scan_start = BBPOS_MIN;
+ r->scan_end = BBPOS_MAX;
+ } else {
+ r->scan_start = BBPOS(BTREE_ID_extents, POS(inum, 0));
+ r->scan_end = BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
+ }
+
+ r->state = BCH_REBALANCE_scanning;
+
+ ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
+ commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_clear_rebalance_needs_scan(trans, inum, cookie));
+
+ bch2_move_stats_exit(&r->scan_stats, trans->c);
+ return ret;
}
-static int bch2_rebalance_thread(void *arg)
+static void rebalance_wait(struct bch_fs *c)
{
- struct bch_fs *c = arg;
struct bch_fs_rebalance *r = &c->rebalance;
struct io_clock *clock = &c->io_clock[WRITE];
- struct rebalance_work w, p;
- struct bch_move_stats move_stats;
- unsigned long start, prev_start;
- unsigned long prev_run_time, prev_run_cputime;
- unsigned long cputime, prev_cputime;
- u64 io_start;
- long throttle;
+ u64 now = atomic64_read(&clock->now);
+ u64 min_member_capacity = bch2_min_rw_member_capacity(c);
- set_freezable();
+ if (min_member_capacity == U64_MAX)
+ min_member_capacity = 128 * 2048;
+
+ r->wait_iotime_end = now + (min_member_capacity >> 6);
- io_start = atomic64_read(&clock->now);
- p = rebalance_work(c);
- prev_start = jiffies;
- prev_cputime = curr_cputime();
+ if (r->state != BCH_REBALANCE_waiting) {
+ r->wait_iotime_start = now;
+ r->wait_wallclock_start = ktime_get_real_ns();
+ r->state = BCH_REBALANCE_waiting;
+ }
+
+ bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
+}
- bch2_move_stats_init(&move_stats, "rebalance");
- while (!kthread_wait_freezable(r->enabled)) {
- cond_resched();
+static int do_rebalance(struct moving_context *ctxt)
+{
+ struct btree_trans *trans = ctxt->trans;
+ struct bch_fs *c = trans->c;
+ struct bch_fs_rebalance *r = &c->rebalance;
+ struct btree_iter rebalance_work_iter, extent_iter = { NULL };
+ struct bkey_s_c k;
+ int ret = 0;
- start = jiffies;
- cputime = curr_cputime();
+ bch2_move_stats_init(&r->work_stats, "rebalance_work");
+ bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
- prev_run_time = start - prev_start;
- prev_run_cputime = cputime - prev_cputime;
+ bch2_trans_iter_init(trans, &rebalance_work_iter,
+ BTREE_ID_rebalance_work, POS_MIN,
+ BTREE_ITER_ALL_SNAPSHOTS);
- w = rebalance_work(c);
- BUG_ON(!w.dev_most_full_capacity);
+ while (!bch2_move_ratelimit(ctxt) &&
+ !kthread_wait_freezable(r->enabled)) {
+ bch2_trans_begin(trans);
- if (!w.total_work) {
- r->state = REBALANCE_WAITING;
- kthread_wait_freezable(rebalance_work(c).total_work);
+ ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
- }
+ if (ret || !k.k)
+ break;
- /*
- * If there isn't much work to do, throttle cpu usage:
- */
- throttle = prev_run_cputime * 100 /
- max(1U, w.dev_most_full_percent) -
- prev_run_time;
-
- if (w.dev_most_full_percent < 20 && throttle > 0) {
- r->throttled_until_iotime = io_start +
- div_u64(w.dev_most_full_capacity *
- (20 - w.dev_most_full_percent),
- 50);
-
- if (atomic64_read(&clock->now) + clock->max_slop <
- r->throttled_until_iotime) {
- r->throttled_until_cputime = start + throttle;
- r->state = REBALANCE_THROTTLED;
-
- bch2_kthread_io_clock_wait(clock,
- r->throttled_until_iotime,
- throttle);
- continue;
- }
- }
+ ret = k.k->type == KEY_TYPE_cookie
+ ? do_rebalance_scan(ctxt, k.k->p.inode,
+ le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
+ : do_rebalance_extent(ctxt, k.k->p, &extent_iter);
- /* minimum 1 mb/sec: */
- r->pd.rate.rate =
- max_t(u64, 1 << 11,
- r->pd.rate.rate *
- max(p.dev_most_full_percent, 1U) /
- max(w.dev_most_full_percent, 1U));
-
- io_start = atomic64_read(&clock->now);
- p = w;
- prev_start = start;
- prev_cputime = cputime;
-
- r->state = REBALANCE_RUNNING;
- memset(&move_stats, 0, sizeof(move_stats));
- rebalance_work_reset(c);
-
- bch2_move_data(c,
- 0, POS_MIN,
- BTREE_ID_NR, POS_MAX,
- /* ratelimiting disabled for now */
- NULL, /* &r->pd.rate, */
- &move_stats,
- writepoint_ptr(&c->rebalance_write_point),
- true,
- rebalance_pred, NULL);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+
+ bch2_btree_iter_advance(&rebalance_work_iter);
}
- return 0;
+ bch2_trans_iter_exit(trans, &extent_iter);
+ bch2_trans_iter_exit(trans, &rebalance_work_iter);
+ bch2_move_stats_exit(&r->scan_stats, c);
+
+ if (!ret &&
+ !kthread_should_stop() &&
+ !atomic64_read(&r->work_stats.sectors_seen) &&
+ !atomic64_read(&r->scan_stats.sectors_seen)) {
+ bch2_trans_unlock_long(trans);
+ rebalance_wait(c);
+ }
+
+ if (!bch2_err_matches(ret, EROFS))
+ bch_err_fn(c, ret);
+ return ret;
}
-void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
+static int bch2_rebalance_thread(void *arg)
{
+ struct bch_fs *c = arg;
struct bch_fs_rebalance *r = &c->rebalance;
- struct rebalance_work w = rebalance_work(c);
+ struct moving_context ctxt;
+ int ret;
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 20);
+ set_freezable();
- prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
- prt_tab(out);
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
+ writepoint_ptr(&c->rebalance_write_point),
+ true);
- prt_human_readable_u64(out, w.dev_most_full_work << 9);
- prt_printf(out, "/");
- prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
- prt_newline(out);
+ while (!kthread_should_stop() &&
+ !(ret = do_rebalance(&ctxt)))
+ ;
- prt_printf(out, "total work:");
- prt_tab(out);
+ bch2_moving_ctxt_exit(&ctxt);
- prt_human_readable_u64(out, w.total_work << 9);
- prt_printf(out, "/");
- prt_human_readable_u64(out, c->capacity << 9);
- prt_newline(out);
+ return 0;
+}
+
+void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct bch_fs_rebalance *r = &c->rebalance;
- prt_printf(out, "rate:");
- prt_tab(out);
- prt_printf(out, "%u", r->pd.rate.rate);
+ prt_str(out, bch2_rebalance_state_strs[r->state]);
prt_newline(out);
+ printbuf_indent_add(out, 2);
switch (r->state) {
- case REBALANCE_WAITING:
- prt_printf(out, "waiting");
+ case BCH_REBALANCE_waiting: {
+ u64 now = atomic64_read(&c->io_clock[WRITE].now);
+
+ prt_str(out, "io wait duration: ");
+ bch2_prt_human_readable_s64(out, r->wait_iotime_end - r->wait_iotime_start);
+ prt_newline(out);
+
+ prt_str(out, "io wait remaining: ");
+ bch2_prt_human_readable_s64(out, r->wait_iotime_end - now);
+ prt_newline(out);
+
+ prt_str(out, "duration waited: ");
+ bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
+ prt_newline(out);
break;
- case REBALANCE_THROTTLED:
- prt_printf(out, "throttled for %lu sec or ",
- (r->throttled_until_cputime - jiffies) / HZ);
- prt_human_readable_u64(out,
- (r->throttled_until_iotime -
- atomic64_read(&c->io_clock[WRITE].now)) << 9);
- prt_printf(out, " io");
+ }
+ case BCH_REBALANCE_working:
+ bch2_move_stats_to_text(out, &r->work_stats);
break;
- case REBALANCE_RUNNING:
- prt_printf(out, "running");
+ case BCH_REBALANCE_scanning:
+ bch2_move_stats_to_text(out, &r->scan_stats);
break;
}
prt_newline(out);
+ printbuf_indent_sub(out, 2);
}
void bch2_rebalance_stop(struct bch_fs *c)
@@ -361,6 +461,4 @@ int bch2_rebalance_start(struct bch_fs *c)
void bch2_fs_rebalance_init(struct bch_fs *c)
{
bch2_pd_controller_init(&c->rebalance.pd);
-
- atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
}
diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h
index 7ade0bb81cce..28a52638f16c 100644
--- a/fs/bcachefs/rebalance.h
+++ b/fs/bcachefs/rebalance.h
@@ -4,6 +4,9 @@
#include "rebalance_types.h"
+int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum);
+int bch2_set_fs_needs_rebalance(struct bch_fs *);
+
static inline void rebalance_wakeup(struct bch_fs *c)
{
struct task_struct *p;
@@ -15,11 +18,7 @@ static inline void rebalance_wakeup(struct bch_fs *c)
rcu_read_unlock();
}
-void bch2_rebalance_add_key(struct bch_fs *, struct bkey_s_c,
- struct bch_io_opts *);
-void bch2_rebalance_add_work(struct bch_fs *, u64);
-
-void bch2_rebalance_work_to_text(struct printbuf *, struct bch_fs *);
+void bch2_rebalance_status_to_text(struct printbuf *, struct bch_fs *);
void bch2_rebalance_stop(struct bch_fs *);
int bch2_rebalance_start(struct bch_fs *);
diff --git a/fs/bcachefs/rebalance_types.h b/fs/bcachefs/rebalance_types.h
index 7462a92e9598..0fffb536c1d0 100644
--- a/fs/bcachefs/rebalance_types.h
+++ b/fs/bcachefs/rebalance_types.h
@@ -2,25 +2,36 @@
#ifndef _BCACHEFS_REBALANCE_TYPES_H
#define _BCACHEFS_REBALANCE_TYPES_H
+#include "bbpos_types.h"
#include "move_types.h"
-enum rebalance_state {
- REBALANCE_WAITING,
- REBALANCE_THROTTLED,
- REBALANCE_RUNNING,
+#define BCH_REBALANCE_STATES() \
+ x(waiting) \
+ x(working) \
+ x(scanning)
+
+enum bch_rebalance_states {
+#define x(t) BCH_REBALANCE_##t,
+ BCH_REBALANCE_STATES()
+#undef x
};
struct bch_fs_rebalance {
- struct task_struct __rcu *thread;
+ struct task_struct __rcu *thread;
struct bch_pd_controller pd;
- atomic64_t work_unknown_dev;
+ enum bch_rebalance_states state;
+ u64 wait_iotime_start;
+ u64 wait_iotime_end;
+ u64 wait_wallclock_start;
+
+ struct bch_move_stats work_stats;
- enum rebalance_state state;
- u64 throttled_until_iotime;
- unsigned long throttled_until_cputime;
+ struct bbpos scan_start;
+ struct bbpos scan_end;
+ struct bch_move_stats scan_stats;
- unsigned enabled:1;
+ unsigned enabled:1;
};
#endif /* _BCACHEFS_REBALANCE_TYPES_H */
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 4cd660650e5b..9c30500ce920 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -23,6 +23,7 @@
#include "logged_ops.h"
#include "move.h"
#include "quota.h"
+#include "rebalance.h"
#include "recovery.h"
#include "replicas.h"
#include "sb-clean.h"
@@ -182,7 +183,7 @@ static int bch2_journal_replay(struct bch_fs *c)
bch2_journal_replay_key(trans, k));
if (ret) {
bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
- bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
+ bch2_btree_id_str(k->btree_id), k->level, bch2_err_str(ret));
goto err;
}
}
@@ -225,7 +226,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
if (entry->u64s) {
r->level = entry->level;
- bkey_copy(&r->key, &entry->start[0]);
+ bkey_copy(&r->key, (struct bkey_i *) entry->start);
r->error = 0;
} else {
r->error = -EIO;
@@ -364,10 +365,12 @@ static int read_btree_roots(struct bch_fs *c)
}
if (r->error) {
- __fsck_err(c, btree_id_is_alloc(i)
+ __fsck_err(c,
+ btree_id_is_alloc(i)
? FSCK_CAN_IGNORE : 0,
+ btree_root_bkey_invalid,
"invalid btree root %s",
- bch2_btree_ids[i]);
+ bch2_btree_id_str(i));
if (i == BTREE_ID_alloc)
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
}
@@ -375,8 +378,9 @@ static int read_btree_roots(struct bch_fs *c)
ret = bch2_btree_root_read(c, i, &r->key, r->level);
if (ret) {
fsck_err(c,
+ btree_root_read_error,
"error reading btree root %s",
- bch2_btree_ids[i]);
+ bch2_btree_id_str(i));
if (btree_id_is_alloc(i))
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
ret = 0;
@@ -713,6 +717,7 @@ int bch2_fs_recovery(struct bch_fs *c)
if (mustfix_fsck_err_on(c->sb.clean &&
last_journal_entry &&
!journal_entry_empty(last_journal_entry), c,
+ clean_but_journal_not_empty,
"filesystem marked clean but journal not empty")) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
@@ -720,7 +725,9 @@ int bch2_fs_recovery(struct bch_fs *c)
}
if (!last_journal_entry) {
- fsck_err_on(!c->sb.clean, c, "no journal entries found");
+ fsck_err_on(!c->sb.clean, c,
+ dirty_but_no_journal_entries,
+ "no journal entries found");
if (clean)
goto use_clean;
@@ -728,6 +735,13 @@ int bch2_fs_recovery(struct bch_fs *c)
if (*i) {
last_journal_entry = &(*i)->j;
(*i)->ignore = false;
+ /*
+ * This was probably a NO_FLUSH entry,
+ * so last_seq was garbage - but we know
+ * we're only using a single journal
+ * entry, set it here:
+ */
+ (*i)->j.last_seq = (*i)->j.seq;
break;
}
}
@@ -901,7 +915,7 @@ out:
}
kfree(clean);
- if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
+ if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) {
bch2_fs_read_write_early(c);
bch2_delete_dead_snapshots_async(c);
}
@@ -946,16 +960,12 @@ int bch2_fs_initialize(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
- for_each_online_member(ca, c, i)
+ for_each_member_device(ca, c, i)
bch2_dev_usage_init(ca);
- for_each_online_member(ca, c, i) {
- ret = bch2_dev_journal_alloc(ca);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- goto err;
- }
- }
+ ret = bch2_fs_journal_alloc(c);
+ if (ret)
+ goto err;
/*
* journal_res_get() will crash if called before this has
@@ -973,15 +983,13 @@ int bch2_fs_initialize(struct bch_fs *c)
* btree updates
*/
bch_verbose(c, "marking superblocks");
- for_each_member_device(ca, c, i) {
- ret = bch2_trans_mark_dev_sb(c, ca);
- if (ret) {
- percpu_ref_put(&ca->ref);
- goto err;
- }
+ ret = bch2_trans_mark_dev_sbs(c);
+ bch_err_msg(c, ret, "marking superblocks");
+ if (ret)
+ goto err;
+ for_each_online_member(ca, c, i)
ca->new_fs_bucket_idx = 0;
- }
ret = bch2_fs_freespace_init(c);
if (ret)
diff --git a/fs/bcachefs/recovery_types.h b/fs/bcachefs/recovery_types.h
index fbfa9d831d6f..515e3d62c2ac 100644
--- a/fs/bcachefs/recovery_types.h
+++ b/fs/bcachefs/recovery_types.h
@@ -14,6 +14,8 @@
x(snapshots_read, PASS_ALWAYS) \
x(check_topology, 0) \
x(check_allocations, PASS_FSCK) \
+ x(trans_mark_dev_sbs, PASS_ALWAYS|PASS_SILENT) \
+ x(fs_journal_alloc, PASS_ALWAYS|PASS_SILENT) \
x(set_may_go_rw, PASS_ALWAYS|PASS_SILENT) \
x(journal_replay, PASS_ALWAYS) \
x(check_alloc_info, PASS_FSCK) \
@@ -27,11 +29,12 @@
x(check_snapshot_trees, PASS_FSCK) \
x(check_snapshots, PASS_FSCK) \
x(check_subvols, PASS_FSCK) \
- x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN) \
+ x(delete_dead_snapshots, PASS_FSCK) \
x(fs_upgrade_for_subvolumes, 0) \
x(resume_logged_ops, PASS_ALWAYS) \
x(check_inodes, PASS_FSCK) \
x(check_extents, PASS_FSCK) \
+ x(check_indirect_extents, PASS_FSCK) \
x(check_dirents, PASS_FSCK) \
x(check_xattrs, PASS_FSCK) \
x(check_root, PASS_FSCK) \
@@ -39,6 +42,7 @@
x(check_nlinks, PASS_FSCK) \
x(delete_dead_inodes, PASS_FSCK|PASS_UNCLEAN) \
x(fix_reflink_p, 0) \
+ x(set_fs_needs_rebalance, 0) \
enum bch_recovery_pass {
#define x(n, when) BCH_RECOVERY_PASS_##n,
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index d77d0ea9afff..6e1bfe9feb59 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -7,6 +7,7 @@
#include "inode.h"
#include "io_misc.h"
#include "io_write.h"
+#include "rebalance.h"
#include "reflink.h"
#include "subvolume.h"
#include "super-io.h"
@@ -27,7 +28,7 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k)
/* reflink pointers */
-int bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
@@ -74,7 +75,7 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
/* indirect extents */
-int bch2_reflink_v_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
@@ -103,28 +104,29 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
}
#endif
+static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *flags)
+{
+ if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
+ new->k.type = KEY_TYPE_deleted;
+ new->k.size = 0;
+ set_bkey_val_u64s(&new->k, 0);;
+ *flags &= ~BTREE_TRIGGER_INSERT;
+ }
+}
+
int bch2_trans_mark_reflink_v(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
- struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
-
- if (!r->v.refcount) {
- r->k.type = KEY_TYPE_deleted;
- r->k.size = 0;
- set_bkey_val_u64s(&r->k, 0);
- return 0;
- }
- }
+ check_indirect_extent_deleting(new, &flags);
return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
}
/* indirect inline data */
-int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
@@ -132,7 +134,7 @@ int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
}
void bch2_indirect_inline_data_to_text(struct printbuf *out,
- struct bch_fs *c, struct bkey_s_c k)
+ struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
unsigned datalen = bkey_inline_data_bytes(k.k);
@@ -147,16 +149,7 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
- struct bkey_i_indirect_inline_data *r =
- bkey_i_to_indirect_inline_data(new);
-
- if (!r->v.refcount) {
- r->k.type = KEY_TYPE_deleted;
- r->k.size = 0;
- set_bkey_val_u64s(&r->k, 0);
- }
- }
+ check_indirect_extent_deleting(new, &flags);
return 0;
}
@@ -260,8 +253,9 @@ s64 bch2_remap_range(struct bch_fs *c,
struct bpos dst_start = POS(dst_inum.inum, dst_offset);
struct bpos src_start = POS(src_inum.inum, src_offset);
struct bpos dst_end = dst_start, src_end = src_start;
+ struct bch_io_opts opts;
struct bpos src_want;
- u64 dst_done;
+ u64 dst_done = 0;
u32 dst_snapshot, src_snapshot;
int ret = 0, ret2 = 0;
@@ -277,6 +271,10 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_bkey_buf_init(&new_src);
trans = bch2_trans_get(c);
+ ret = bch2_inum_opts_get(trans, src_inum, &opts);
+ if (ret)
+ goto err;
+
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
BTREE_ITER_INTENT);
bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
@@ -360,10 +358,13 @@ s64 bch2_remap_range(struct bch_fs *c,
min(src_k.k->p.offset - src_want.offset,
dst_end.offset - dst_iter.pos.offset));
- ret = bch2_extent_update(trans, dst_inum, &dst_iter,
- new_dst.k, &disk_res,
- new_i_size, i_sectors_delta,
- true);
+ ret = bch2_bkey_set_needs_rebalance(c, new_dst.k,
+ opts.background_target,
+ opts.background_compression) ?:
+ bch2_extent_update(trans, dst_inum, &dst_iter,
+ new_dst.k, &disk_res,
+ new_i_size, i_sectors_delta,
+ true);
bch2_disk_reservation_put(c, &disk_res);
}
bch2_trans_iter_exit(trans, &dst_iter);
@@ -394,7 +395,7 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_trans_iter_exit(trans, &inode_iter);
} while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
-
+err:
bch2_trans_put(trans);
bch2_bkey_buf_exit(&new_src, c);
bch2_bkey_buf_exit(&new_dst, c);
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
index fe52538efb52..8ccf3f9c4939 100644
--- a/fs/bcachefs/reflink.h
+++ b/fs/bcachefs/reflink.h
@@ -4,7 +4,7 @@
enum bkey_invalid_flags;
-int bch2_reflink_p_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reflink_p_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
@@ -19,7 +19,7 @@ bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
.min_val_size = 16, \
})
-int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
@@ -35,7 +35,7 @@ int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
.min_val_size = 8, \
})
-int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_indirect_inline_data_to_text(struct printbuf *,
struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index cef2a0447b86..1c3ae13bfced 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -462,18 +462,13 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
{
lockdep_assert_held(&c->replicas_gc_lock);
- if (ret)
- goto err;
-
mutex_lock(&c->sb_lock);
percpu_down_write(&c->mark_lock);
- ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
- if (ret)
- goto err;
+ ret = ret ?:
+ bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc) ?:
+ replicas_table_update(c, &c->replicas_gc);
- ret = replicas_table_update(c, &c->replicas_gc);
-err:
kfree(c->replicas_gc.entries);
c->replicas_gc.entries = NULL;
@@ -579,12 +574,9 @@ retry:
bch2_cpu_replicas_sort(&new);
- ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
- if (ret)
- goto err;
+ ret = bch2_cpu_replicas_to_sb_replicas(c, &new) ?:
+ replicas_table_update(c, &new);
- ret = replicas_table_update(c, &new);
-err:
kfree(new.entries);
percpu_up_write(&c->mark_lock);
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index 61203d7c8d36..e151ada1c8bd 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -82,6 +82,7 @@ int bch2_verify_superblock_clean(struct bch_fs *c,
int ret = 0;
if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
+ sb_clean_journal_seq_mismatch,
"superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
le64_to_cpu(clean->journal_seq),
le64_to_cpu(j->seq))) {
@@ -119,6 +120,7 @@ int bch2_verify_superblock_clean(struct bch_fs *c,
k1->k.u64s != k2->k.u64s ||
memcmp(k1, k2, bkey_bytes(&k1->k)) ||
l1 != l2, c,
+ sb_clean_btree_root_mismatch,
"superblock btree root %u doesn't match journal after clean shutdown\n"
"sb: l=%u %s\n"
"journal: l=%u %s\n", i,
@@ -140,6 +142,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
if (fsck_err_on(!sb_clean, c,
+ sb_clean_missing,
"superblock marked clean but clean section not present")) {
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
c->sb.clean = false;
@@ -373,7 +376,7 @@ void bch2_fs_mark_clean(struct bch_fs *c)
entry = sb_clean->start;
bch2_journal_super_entries_add_common(c, &entry, 0);
- entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
+ entry = bch2_btree_roots_to_journal_entries(c, entry, 0);
BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
memset(entry, 0,
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
new file mode 100644
index 000000000000..f0930ab7f036
--- /dev/null
+++ b/fs/bcachefs/sb-errors.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "sb-errors.h"
+#include "super-io.h"
+
+static const char * const bch2_sb_error_strs[] = {
+#define x(t, n, ...) [n] = #t,
+ BCH_SB_ERRS()
+ NULL
+};
+
+static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
+{
+ if (id < BCH_SB_ERR_MAX)
+ prt_str(out, bch2_sb_error_strs[id]);
+ else
+ prt_printf(out, "(unknown error %u)", id);
+}
+
+static inline unsigned bch2_sb_field_errors_nr_entries(struct bch_sb_field_errors *e)
+{
+ return e
+ ? (bch2_sb_field_bytes(&e->field) - sizeof(*e)) / sizeof(e->entries[0])
+ : 0;
+}
+
+static inline unsigned bch2_sb_field_errors_u64s(unsigned nr)
+{
+ return (sizeof(struct bch_sb_field_errors) +
+ sizeof(struct bch_sb_field_error_entry) * nr) / sizeof(u64);
+}
+
+static int bch2_sb_errors_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_errors *e = field_to_type(f, errors);
+ unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
+
+ for (i = 0; i < nr; i++) {
+ if (!BCH_SB_ERROR_ENTRY_NR(&e->entries[i])) {
+ prt_printf(err, "entry with count 0 (id ");
+ bch2_sb_error_id_to_text(err, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
+ prt_printf(err, ")");
+ return -BCH_ERR_invalid_sb_errors;
+ }
+
+ if (i + 1 < nr &&
+ BCH_SB_ERROR_ENTRY_ID(&e->entries[i]) >=
+ BCH_SB_ERROR_ENTRY_ID(&e->entries[i + 1])) {
+ prt_printf(err, "entries out of order");
+ return -BCH_ERR_invalid_sb_errors;
+ }
+ }
+
+ return 0;
+}
+
+static void bch2_sb_errors_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_errors *e = field_to_type(f, errors);
+ unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
+
+ if (out->nr_tabstops <= 1)
+ printbuf_tabstop_push(out, 16);
+
+ for (i = 0; i < nr; i++) {
+ bch2_sb_error_id_to_text(out, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
+ prt_tab(out);
+ prt_u64(out, BCH_SB_ERROR_ENTRY_NR(&e->entries[i]));
+ prt_tab(out);
+ bch2_prt_datetime(out, le64_to_cpu(e->entries[i].last_error_time));
+ prt_newline(out);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_errors = {
+ .validate = bch2_sb_errors_validate,
+ .to_text = bch2_sb_errors_to_text,
+};
+
+void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err)
+{
+ bch_sb_errors_cpu *e = &c->fsck_error_counts;
+ struct bch_sb_error_entry_cpu n = {
+ .id = err,
+ .nr = 1,
+ .last_error_time = ktime_get_real_seconds()
+ };
+ unsigned i;
+
+ mutex_lock(&c->fsck_error_counts_lock);
+ for (i = 0; i < e->nr; i++) {
+ if (err == e->data[i].id) {
+ e->data[i].nr++;
+ e->data[i].last_error_time = n.last_error_time;
+ goto out;
+ }
+ if (err < e->data[i].id)
+ break;
+ }
+
+ if (darray_make_room(e, 1))
+ goto out;
+
+ darray_insert_item(e, i, n);
+out:
+ mutex_unlock(&c->fsck_error_counts_lock);
+}
+
+void bch2_sb_errors_from_cpu(struct bch_fs *c)
+{
+ bch_sb_errors_cpu *src = &c->fsck_error_counts;
+ struct bch_sb_field_errors *dst =
+ bch2_sb_field_resize(&c->disk_sb, errors,
+ bch2_sb_field_errors_u64s(src->nr));
+ unsigned i;
+
+ if (!dst)
+ return;
+
+ for (i = 0; i < src->nr; i++) {
+ SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
+ SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
+ dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
+ }
+}
+
+static int bch2_sb_errors_to_cpu(struct bch_fs *c)
+{
+ struct bch_sb_field_errors *src = bch2_sb_field_get(c->disk_sb.sb, errors);
+ bch_sb_errors_cpu *dst = &c->fsck_error_counts;
+ unsigned i, nr = bch2_sb_field_errors_nr_entries(src);
+ int ret;
+
+ if (!nr)
+ return 0;
+
+ mutex_lock(&c->fsck_error_counts_lock);
+ ret = darray_make_room(dst, nr);
+ if (ret)
+ goto err;
+
+ dst->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ dst->data[i].id = BCH_SB_ERROR_ENTRY_ID(&src->entries[i]);
+ dst->data[i].nr = BCH_SB_ERROR_ENTRY_NR(&src->entries[i]);
+ dst->data[i].last_error_time = le64_to_cpu(src->entries[i].last_error_time);
+ }
+err:
+ mutex_unlock(&c->fsck_error_counts_lock);
+
+ return ret;
+}
+
+void bch2_fs_sb_errors_exit(struct bch_fs *c)
+{
+ darray_exit(&c->fsck_error_counts);
+}
+
+void bch2_fs_sb_errors_init_early(struct bch_fs *c)
+{
+ mutex_init(&c->fsck_error_counts_lock);
+ darray_init(&c->fsck_error_counts);
+}
+
+int bch2_fs_sb_errors_init(struct bch_fs *c)
+{
+ return bch2_sb_errors_to_cpu(c);
+}
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
new file mode 100644
index 000000000000..5a09a53966be
--- /dev/null
+++ b/fs/bcachefs/sb-errors.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_H
+#define _BCACHEFS_SB_ERRORS_H
+
+#include "sb-errors_types.h"
+
+#define BCH_SB_ERRS() \
+ x(clean_but_journal_not_empty, 0) \
+ x(dirty_but_no_journal_entries, 1) \
+ x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \
+ x(sb_clean_journal_seq_mismatch, 3) \
+ x(sb_clean_btree_root_mismatch, 4) \
+ x(sb_clean_missing, 5) \
+ x(jset_unsupported_version, 6) \
+ x(jset_unknown_csum, 7) \
+ x(jset_last_seq_newer_than_seq, 8) \
+ x(jset_past_bucket_end, 9) \
+ x(jset_seq_blacklisted, 10) \
+ x(journal_entries_missing, 11) \
+ x(journal_entry_replicas_not_marked, 12) \
+ x(journal_entry_past_jset_end, 13) \
+ x(journal_entry_replicas_data_mismatch, 14) \
+ x(journal_entry_bkey_u64s_0, 15) \
+ x(journal_entry_bkey_past_end, 16) \
+ x(journal_entry_bkey_bad_format, 17) \
+ x(journal_entry_bkey_invalid, 18) \
+ x(journal_entry_btree_root_bad_size, 19) \
+ x(journal_entry_blacklist_bad_size, 20) \
+ x(journal_entry_blacklist_v2_bad_size, 21) \
+ x(journal_entry_blacklist_v2_start_past_end, 22) \
+ x(journal_entry_usage_bad_size, 23) \
+ x(journal_entry_data_usage_bad_size, 24) \
+ x(journal_entry_clock_bad_size, 25) \
+ x(journal_entry_clock_bad_rw, 26) \
+ x(journal_entry_dev_usage_bad_size, 27) \
+ x(journal_entry_dev_usage_bad_dev, 28) \
+ x(journal_entry_dev_usage_bad_pad, 29) \
+ x(btree_node_unreadable, 30) \
+ x(btree_node_fault_injected, 31) \
+ x(btree_node_bad_magic, 32) \
+ x(btree_node_bad_seq, 33) \
+ x(btree_node_unsupported_version, 34) \
+ x(btree_node_bset_older_than_sb_min, 35) \
+ x(btree_node_bset_newer_than_sb, 36) \
+ x(btree_node_data_missing, 37) \
+ x(btree_node_bset_after_end, 38) \
+ x(btree_node_replicas_sectors_written_mismatch, 39) \
+ x(btree_node_replicas_data_mismatch, 40) \
+ x(bset_unknown_csum, 41) \
+ x(bset_bad_csum, 42) \
+ x(bset_past_end_of_btree_node, 43) \
+ x(bset_wrong_sector_offset, 44) \
+ x(bset_empty, 45) \
+ x(bset_bad_seq, 46) \
+ x(bset_blacklisted_journal_seq, 47) \
+ x(first_bset_blacklisted_journal_seq, 48) \
+ x(btree_node_bad_btree, 49) \
+ x(btree_node_bad_level, 50) \
+ x(btree_node_bad_min_key, 51) \
+ x(btree_node_bad_max_key, 52) \
+ x(btree_node_bad_format, 53) \
+ x(btree_node_bkey_past_bset_end, 54) \
+ x(btree_node_bkey_bad_format, 55) \
+ x(btree_node_bad_bkey, 56) \
+ x(btree_node_bkey_out_of_order, 57) \
+ x(btree_root_bkey_invalid, 58) \
+ x(btree_root_read_error, 59) \
+ x(btree_root_bad_min_key, 50) \
+ x(btree_root_bad_max_key, 61) \
+ x(btree_node_read_error, 62) \
+ x(btree_node_topology_bad_min_key, 63) \
+ x(btree_node_topology_bad_max_key, 64) \
+ x(btree_node_topology_overwritten_by_prev_node, 65) \
+ x(btree_node_topology_overwritten_by_next_node, 66) \
+ x(btree_node_topology_interior_node_empty, 67) \
+ x(fs_usage_hidden_wrong, 68) \
+ x(fs_usage_btree_wrong, 69) \
+ x(fs_usage_data_wrong, 70) \
+ x(fs_usage_cached_wrong, 71) \
+ x(fs_usage_reserved_wrong, 72) \
+ x(fs_usage_persistent_reserved_wrong, 73) \
+ x(fs_usage_nr_inodes_wrong, 74) \
+ x(fs_usage_replicas_wrong, 75) \
+ x(dev_usage_buckets_wrong, 76) \
+ x(dev_usage_sectors_wrong, 77) \
+ x(dev_usage_fragmented_wrong, 78) \
+ x(dev_usage_buckets_ec_wrong, 79) \
+ x(bkey_version_in_future, 80) \
+ x(bkey_u64s_too_small, 81) \
+ x(bkey_invalid_type_for_btree, 82) \
+ x(bkey_extent_size_zero, 83) \
+ x(bkey_extent_size_greater_than_offset, 84) \
+ x(bkey_size_nonzero, 85) \
+ x(bkey_snapshot_nonzero, 86) \
+ x(bkey_snapshot_zero, 87) \
+ x(bkey_at_pos_max, 88) \
+ x(bkey_before_start_of_btree_node, 89) \
+ x(bkey_after_end_of_btree_node, 90) \
+ x(bkey_val_size_nonzero, 91) \
+ x(bkey_val_size_too_small, 92) \
+ x(alloc_v1_val_size_bad, 93) \
+ x(alloc_v2_unpack_error, 94) \
+ x(alloc_v3_unpack_error, 95) \
+ x(alloc_v4_val_size_bad, 96) \
+ x(alloc_v4_backpointers_start_bad, 97) \
+ x(alloc_key_data_type_bad, 98) \
+ x(alloc_key_empty_but_have_data, 99) \
+ x(alloc_key_dirty_sectors_0, 100) \
+ x(alloc_key_data_type_inconsistency, 101) \
+ x(alloc_key_to_missing_dev_bucket, 102) \
+ x(alloc_key_cached_inconsistency, 103) \
+ x(alloc_key_cached_but_read_time_zero, 104) \
+ x(alloc_key_to_missing_lru_entry, 105) \
+ x(alloc_key_data_type_wrong, 106) \
+ x(alloc_key_gen_wrong, 107) \
+ x(alloc_key_dirty_sectors_wrong, 108) \
+ x(alloc_key_cached_sectors_wrong, 109) \
+ x(alloc_key_stripe_wrong, 110) \
+ x(alloc_key_stripe_redundancy_wrong, 111) \
+ x(bucket_sector_count_overflow, 112) \
+ x(bucket_metadata_type_mismatch, 113) \
+ x(need_discard_key_wrong, 114) \
+ x(freespace_key_wrong, 115) \
+ x(freespace_hole_missing, 116) \
+ x(bucket_gens_val_size_bad, 117) \
+ x(bucket_gens_key_wrong, 118) \
+ x(bucket_gens_hole_wrong, 119) \
+ x(bucket_gens_to_invalid_dev, 120) \
+ x(bucket_gens_to_invalid_buckets, 121) \
+ x(bucket_gens_nonzero_for_invalid_buckets, 122) \
+ x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
+ x(need_discard_freespace_key_bad, 124) \
+ x(backpointer_pos_wrong, 125) \
+ x(backpointer_to_missing_device, 126) \
+ x(backpointer_to_missing_alloc, 127) \
+ x(backpointer_to_missing_ptr, 128) \
+ x(lru_entry_at_time_0, 129) \
+ x(lru_entry_to_invalid_bucket, 130) \
+ x(lru_entry_bad, 131) \
+ x(btree_ptr_val_too_big, 132) \
+ x(btree_ptr_v2_val_too_big, 133) \
+ x(btree_ptr_has_non_ptr, 134) \
+ x(extent_ptrs_invalid_entry, 135) \
+ x(extent_ptrs_no_ptrs, 136) \
+ x(extent_ptrs_too_many_ptrs, 137) \
+ x(extent_ptrs_redundant_crc, 138) \
+ x(extent_ptrs_redundant_stripe, 139) \
+ x(extent_ptrs_unwritten, 140) \
+ x(extent_ptrs_written_and_unwritten, 141) \
+ x(ptr_to_invalid_device, 142) \
+ x(ptr_to_duplicate_device, 143) \
+ x(ptr_after_last_bucket, 144) \
+ x(ptr_before_first_bucket, 145) \
+ x(ptr_spans_multiple_buckets, 146) \
+ x(ptr_to_missing_backpointer, 147) \
+ x(ptr_to_missing_alloc_key, 148) \
+ x(ptr_to_missing_replicas_entry, 149) \
+ x(ptr_to_missing_stripe, 150) \
+ x(ptr_to_incorrect_stripe, 151) \
+ x(ptr_gen_newer_than_bucket_gen, 152) \
+ x(ptr_too_stale, 153) \
+ x(stale_dirty_ptr, 154) \
+ x(ptr_bucket_data_type_mismatch, 155) \
+ x(ptr_cached_and_erasure_coded, 156) \
+ x(ptr_crc_uncompressed_size_too_small, 157) \
+ x(ptr_crc_csum_type_unknown, 158) \
+ x(ptr_crc_compression_type_unknown, 159) \
+ x(ptr_crc_redundant, 160) \
+ x(ptr_crc_uncompressed_size_too_big, 161) \
+ x(ptr_crc_nonce_mismatch, 162) \
+ x(ptr_stripe_redundant, 163) \
+ x(reservation_key_nr_replicas_invalid, 164) \
+ x(reflink_v_refcount_wrong, 165) \
+ x(reflink_p_to_missing_reflink_v, 166) \
+ x(stripe_pos_bad, 167) \
+ x(stripe_val_size_bad, 168) \
+ x(stripe_sector_count_wrong, 169) \
+ x(snapshot_tree_pos_bad, 170) \
+ x(snapshot_tree_to_missing_snapshot, 171) \
+ x(snapshot_tree_to_missing_subvol, 172) \
+ x(snapshot_tree_to_wrong_subvol, 173) \
+ x(snapshot_tree_to_snapshot_subvol, 174) \
+ x(snapshot_pos_bad, 175) \
+ x(snapshot_parent_bad, 176) \
+ x(snapshot_children_not_normalized, 177) \
+ x(snapshot_child_duplicate, 178) \
+ x(snapshot_child_bad, 179) \
+ x(snapshot_skiplist_not_normalized, 180) \
+ x(snapshot_skiplist_bad, 181) \
+ x(snapshot_should_not_have_subvol, 182) \
+ x(snapshot_to_bad_snapshot_tree, 183) \
+ x(snapshot_bad_depth, 184) \
+ x(snapshot_bad_skiplist, 185) \
+ x(subvol_pos_bad, 186) \
+ x(subvol_not_master_and_not_snapshot, 187) \
+ x(subvol_to_missing_root, 188) \
+ x(subvol_root_wrong_bi_subvol, 189) \
+ x(bkey_in_missing_snapshot, 190) \
+ x(inode_pos_inode_nonzero, 191) \
+ x(inode_pos_blockdev_range, 192) \
+ x(inode_unpack_error, 193) \
+ x(inode_str_hash_invalid, 194) \
+ x(inode_v3_fields_start_bad, 195) \
+ x(inode_snapshot_mismatch, 196) \
+ x(inode_unlinked_but_clean, 197) \
+ x(inode_unlinked_but_nlink_nonzero, 198) \
+ x(inode_checksum_type_invalid, 199) \
+ x(inode_compression_type_invalid, 200) \
+ x(inode_subvol_root_but_not_dir, 201) \
+ x(inode_i_size_dirty_but_clean, 202) \
+ x(inode_i_sectors_dirty_but_clean, 203) \
+ x(inode_i_sectors_wrong, 204) \
+ x(inode_dir_wrong_nlink, 205) \
+ x(inode_dir_multiple_links, 206) \
+ x(inode_multiple_links_but_nlink_0, 207) \
+ x(inode_wrong_backpointer, 208) \
+ x(inode_wrong_nlink, 209) \
+ x(inode_unreachable, 210) \
+ x(deleted_inode_but_clean, 211) \
+ x(deleted_inode_missing, 212) \
+ x(deleted_inode_is_dir, 213) \
+ x(deleted_inode_not_unlinked, 214) \
+ x(extent_overlapping, 215) \
+ x(extent_in_missing_inode, 216) \
+ x(extent_in_non_reg_inode, 217) \
+ x(extent_past_end_of_inode, 218) \
+ x(dirent_empty_name, 219) \
+ x(dirent_val_too_big, 220) \
+ x(dirent_name_too_long, 221) \
+ x(dirent_name_embedded_nul, 222) \
+ x(dirent_name_dot_or_dotdot, 223) \
+ x(dirent_name_has_slash, 224) \
+ x(dirent_d_type_wrong, 225) \
+ x(dirent_d_parent_subvol_wrong, 226) \
+ x(dirent_in_missing_dir_inode, 227) \
+ x(dirent_in_non_dir_inode, 228) \
+ x(dirent_to_missing_inode, 229) \
+ x(dirent_to_missing_subvol, 230) \
+ x(dirent_to_itself, 231) \
+ x(quota_type_invalid, 232) \
+ x(xattr_val_size_too_small, 233) \
+ x(xattr_val_size_too_big, 234) \
+ x(xattr_invalid_type, 235) \
+ x(xattr_name_invalid_chars, 236) \
+ x(xattr_in_missing_inode, 237) \
+ x(root_subvol_missing, 238) \
+ x(root_dir_missing, 239) \
+ x(root_inode_not_dir, 240) \
+ x(dir_loop, 241) \
+ x(hash_table_key_duplicate, 242) \
+ x(hash_table_key_wrong_offset, 243)
+
+enum bch_sb_error_id {
+#define x(t, n) BCH_FSCK_ERR_##t = n,
+ BCH_SB_ERRS()
+#undef x
+ BCH_SB_ERR_MAX
+};
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
+
+void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
+
+void bch2_sb_errors_from_cpu(struct bch_fs *);
+
+void bch2_fs_sb_errors_exit(struct bch_fs *);
+void bch2_fs_sb_errors_init_early(struct bch_fs *);
+int bch2_fs_sb_errors_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_SB_ERRORS_H */
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
new file mode 100644
index 000000000000..b1c099843a39
--- /dev/null
+++ b/fs/bcachefs/sb-errors_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_TYPES_H
+#define _BCACHEFS_SB_ERRORS_TYPES_H
+
+#include "darray.h"
+
+struct bch_sb_error_entry_cpu {
+ u64 id:16,
+ nr:48;
+ u64 last_error_time;
+};
+
+typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
+
+#endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
+
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index 6dd85bb996fe..bed0f857fe5b 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -7,21 +7,28 @@
#include "sb-members.h"
#include "super-io.h"
-/* Code for bch_sb_field_members_v1: */
+#define x(t, n, ...) [n] = #t,
+static const char * const bch2_iops_measurements[] = {
+ BCH_IOPS_MEASUREMENTS()
+ NULL
+};
-static struct bch_member *members_v2_get_mut(struct bch_sb_field_members_v2 *mi, int i)
-{
- return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
-}
+char * const bch2_member_error_strs[] = {
+ BCH_MEMBER_ERROR_TYPES()
+ NULL
+};
+#undef x
+
+/* Code for bch_sb_field_members_v1: */
struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i)
{
- return members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
+ return __bch2_members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
}
static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i)
{
- struct bch_member ret, *p = members_v2_get_mut(mi, i);
+ struct bch_member ret, *p = __bch2_members_v2_get_mut(mi, i);
memset(&ret, 0, sizeof(ret));
memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret)));
return ret;
@@ -36,7 +43,8 @@ static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int
{
struct bch_member ret, *p = members_v1_get_mut(mi, i);
memset(&ret, 0, sizeof(ret));
- memcpy(&ret, p, min_t(size_t, sizeof(struct bch_member), sizeof(ret))); return ret;
+ memcpy(&ret, p, min_t(size_t, BCH_MEMBER_V1_BYTES, sizeof(ret)));
+ return ret;
}
struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i)
@@ -62,7 +70,7 @@ static int sb_members_v2_resize_entries(struct bch_fs *c)
for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) {
void *dst = (void *) mi->_members + (i * sizeof(struct bch_member));
- memmove(dst, members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
+ memmove(dst, __bch2_members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
memset(dst + le16_to_cpu(mi->member_bytes),
0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes)));
}
@@ -71,7 +79,7 @@ static int sb_members_v2_resize_entries(struct bch_fs *c)
return 0;
}
-int bch2_members_v2_init(struct bch_fs *c)
+int bch2_sb_members_v2_init(struct bch_fs *c)
{
struct bch_sb_field_members_v1 *mi1;
struct bch_sb_field_members_v2 *mi2;
@@ -91,7 +99,7 @@ int bch2_members_v2_init(struct bch_fs *c)
return sb_members_v2_resize_entries(c);
}
-int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
+int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
{
struct bch_sb_field_members_v1 *mi1;
struct bch_sb_field_members_v2 *mi2;
@@ -105,7 +113,7 @@ int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
mi2 = bch2_sb_field_get(disk_sb->sb, members_v2);
for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++)
- memcpy(members_v1_get_mut(mi1, i), members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
+ memcpy(members_v1_get_mut(mi1, i), __bch2_members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
return 0;
}
@@ -155,6 +163,8 @@ static void member_to_text(struct printbuf *out,
u64 bucket_size = le16_to_cpu(m.bucket_size);
u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
+ if (!bch2_member_exists(&m))
+ return;
prt_printf(out, "Device:");
prt_tab(out);
@@ -163,6 +173,21 @@ static void member_to_text(struct printbuf *out,
printbuf_indent_add(out, 2);
+ prt_printf(out, "Label:");
+ prt_tab(out);
+ if (BCH_MEMBER_GROUP(&m)) {
+ unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
+
+ if (idx < disk_groups_nr(gi))
+ prt_printf(out, "%s (%u)",
+ gi->entries[idx].label, idx);
+ else
+ prt_printf(out, "(bad disk labels section)");
+ } else {
+ prt_printf(out, "(none)");
+ }
+ prt_newline(out);
+
prt_printf(out, "UUID:");
prt_tab(out);
pr_uuid(out, m.uuid.b);
@@ -173,6 +198,13 @@ static void member_to_text(struct printbuf *out,
prt_units_u64(out, device_size << 9);
prt_newline(out);
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+ prt_printf(out, "%s errors:", bch2_member_error_strs[i]);
+ prt_tab(out);
+ prt_u64(out, le64_to_cpu(m.errors[i]));
+ prt_newline(out);
+ }
+
for (unsigned i = 0; i < BCH_IOPS_NR; i++) {
prt_printf(out, "%s iops:", bch2_iops_measurements[i]);
prt_tab(out);
@@ -198,7 +230,7 @@ static void member_to_text(struct printbuf *out,
prt_printf(out, "Last mount:");
prt_tab(out);
if (m.last_mount)
- pr_time(out, le64_to_cpu(m.last_mount));
+ bch2_prt_datetime(out, le64_to_cpu(m.last_mount));
else
prt_printf(out, "(never)");
prt_newline(out);
@@ -211,21 +243,6 @@ static void member_to_text(struct printbuf *out,
: "unknown");
prt_newline(out);
- prt_printf(out, "Label:");
- prt_tab(out);
- if (BCH_MEMBER_GROUP(&m)) {
- unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
-
- if (idx < disk_groups_nr(gi))
- prt_printf(out, "%s (%u)",
- gi->entries[idx].label, idx);
- else
- prt_printf(out, "(bad disk labels section)");
- } else {
- prt_printf(out, "(none)");
- }
- prt_newline(out);
-
prt_printf(out, "Data allowed:");
prt_tab(out);
if (BCH_MEMBER_DATA_ALLOWED(&m))
@@ -262,8 +279,7 @@ static int bch2_sb_members_v1_validate(struct bch_sb *sb,
struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
unsigned i;
- if ((void *) members_v1_get_mut(mi, sb->nr_devices) >
- vstruct_end(&mi->field)) {
+ if ((void *) members_v1_get_mut(mi, sb->nr_devices) > vstruct_end(&mi->field)) {
prt_printf(err, "too many devices for section size");
return -BCH_ERR_invalid_sb_members;
}
@@ -286,10 +302,8 @@ static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
unsigned i;
- for (i = 0; i < sb->nr_devices; i++) {
- struct bch_member m = members_v1_get(mi, i);
- member_to_text(out, m, gi, sb, i);
- }
+ for (i = 0; i < sb->nr_devices; i++)
+ member_to_text(out, members_v1_get(mi, i), gi, sb, i);
}
const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = {
@@ -304,10 +318,8 @@ static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
unsigned i;
- for (i = 0; i < sb->nr_devices; i++) {
- struct bch_member m = members_v2_get(mi, i);
- member_to_text(out, m, gi, sb, i);
- }
+ for (i = 0; i < sb->nr_devices; i++)
+ member_to_text(out, members_v2_get(mi, i), gi, sb, i);
}
static int bch2_sb_members_v2_validate(struct bch_sb *sb,
@@ -315,7 +327,7 @@ static int bch2_sb_members_v2_validate(struct bch_sb *sb,
struct printbuf *err)
{
struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
- size_t mi_bytes = (void *) members_v2_get_mut(mi, sb->nr_devices) -
+ size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) -
(void *) mi;
if (mi_bytes > vstruct_bytes(&mi->field)) {
@@ -337,3 +349,72 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
.validate = bch2_sb_members_v2_validate,
.to_text = bch2_sb_members_v2_to_text,
};
+
+void bch2_sb_members_from_cpu(struct bch_fs *c)
+{
+ struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+ struct bch_dev *ca;
+ unsigned i, e;
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL) {
+ struct bch_member *m = __bch2_members_v2_get_mut(mi, i);
+
+ for (e = 0; e < BCH_MEMBER_ERROR_NR; e++)
+ m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
+ }
+ rcu_read_unlock();
+}
+
+void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+ struct bch_member m;
+
+ mutex_lock(&ca->fs->sb_lock);
+ m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
+ mutex_unlock(&ca->fs->sb_lock);
+
+ printbuf_tabstop_push(out, 12);
+
+ prt_str(out, "IO errors since filesystem creation");
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+ prt_printf(out, "%s:", bch2_member_error_strs[i]);
+ prt_tab(out);
+ prt_u64(out, atomic64_read(&ca->errors[i]));
+ prt_newline(out);
+ }
+ printbuf_indent_sub(out, 2);
+
+ prt_str(out, "IO errors since ");
+ bch2_pr_time_units(out, (ktime_get_real_seconds() - le64_to_cpu(m.errors_reset_time)) * NSEC_PER_SEC);
+ prt_str(out, " ago");
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+ prt_printf(out, "%s:", bch2_member_error_strs[i]);
+ prt_tab(out);
+ prt_u64(out, atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
+ prt_newline(out);
+ }
+ printbuf_indent_sub(out, 2);
+}
+
+void bch2_dev_errors_reset(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+ struct bch_member *m;
+
+ mutex_lock(&c->sb_lock);
+ m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++)
+ m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i]));
+ m->errors_reset_time = ktime_get_real_seconds();
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 430f3457bfd4..03613e3eb8e3 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -2,8 +2,16 @@
#ifndef _BCACHEFS_SB_MEMBERS_H
#define _BCACHEFS_SB_MEMBERS_H
-int bch2_members_v2_init(struct bch_fs *c);
-int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
+extern char * const bch2_member_error_strs[];
+
+static inline struct bch_member *
+__bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i)
+{
+ return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
+}
+
+int bch2_sb_members_v2_init(struct bch_fs *c);
+int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
@@ -179,4 +187,41 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
+static inline bool bch2_member_exists(struct bch_member *m)
+{
+ return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
+}
+
+static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
+{
+ if (dev < sb->nr_devices) {
+ struct bch_member m = bch2_sb_member_get(sb, dev);
+ return bch2_member_exists(&m);
+ }
+ return false;
+}
+
+static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
+{
+ return (struct bch_member_cpu) {
+ .nbuckets = le64_to_cpu(mi->nbuckets),
+ .first_bucket = le16_to_cpu(mi->first_bucket),
+ .bucket_size = le16_to_cpu(mi->bucket_size),
+ .group = BCH_MEMBER_GROUP(mi),
+ .state = BCH_MEMBER_STATE(mi),
+ .discard = BCH_MEMBER_DISCARD(mi),
+ .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi),
+ .durability = BCH_MEMBER_DURABILITY(mi)
+ ? BCH_MEMBER_DURABILITY(mi) - 1
+ : 1,
+ .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
+ .valid = bch2_member_exists(mi),
+ };
+}
+
+void bch2_sb_members_from_cpu(struct bch_fs *);
+
+void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
+void bch2_dev_errors_reset(struct bch_dev *);
+
#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index b684b9f00c1b..b775cf0fb7cb 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -11,6 +11,8 @@
#include <linux/sched/task.h>
#include <linux/slab.h>
+#include <trace/events/lock.h>
+
#include "six.h"
#ifdef DEBUG
@@ -462,11 +464,12 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
smp_mb__after_atomic();
}
+ trace_contention_begin(lock, 0);
+ lock_contended(&lock->dep_map, ip);
+
if (six_optimistic_spin(lock, type))
goto out;
- lock_contended(&lock->dep_map, ip);
-
wait->task = current;
wait->lock_want = type;
wait->lock_acquired = false;
@@ -546,6 +549,7 @@ out:
six_clear_bitmask(lock, SIX_LOCK_HELD_write);
six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
}
+ trace_contention_end(lock, 0);
return ret;
}
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 4982468bfe11..e9af77b384c7 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -30,17 +30,18 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
le32_to_cpu(t.v->root_snapshot));
}
-int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1))) {
- prt_printf(err, "bad pos");
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+ bkey_lt(k.k->p, POS(0, 1)), c, err,
+ snapshot_tree_pos_bad,
+ "bad pos");
+fsck_err:
+ return ret;
}
int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
@@ -202,68 +203,60 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
le32_to_cpu(s.v->skip[2]));
}
-int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
struct bkey_s_c_snapshot s;
u32 i, id;
+ int ret = 0;
- if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1))) {
- prt_printf(err, "bad pos");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+ bkey_lt(k.k->p, POS(0, 1)), c, err,
+ snapshot_pos_bad,
+ "bad pos");
s = bkey_s_c_to_snapshot(k);
id = le32_to_cpu(s.v->parent);
- if (id && id <= k.k->p.offset) {
- prt_printf(err, "bad parent node (%u <= %llu)",
- id, k.k->p.offset);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
+ snapshot_parent_bad,
+ "bad parent node (%u <= %llu)",
+ id, k.k->p.offset);
- if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
- prt_printf(err, "children not normalized");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
+ snapshot_children_not_normalized,
+ "children not normalized");
- if (s.v->children[0] &&
- s.v->children[0] == s.v->children[1]) {
- prt_printf(err, "duplicate child nodes");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
+ snapshot_child_duplicate,
+ "duplicate child nodes");
for (i = 0; i < 2; i++) {
id = le32_to_cpu(s.v->children[i]);
- if (id >= k.k->p.offset) {
- prt_printf(err, "bad child node (%u >= %llu)",
- id, k.k->p.offset);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(id >= k.k->p.offset, c, err,
+ snapshot_child_bad,
+ "bad child node (%u >= %llu)",
+ id, k.k->p.offset);
}
if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
- if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
- le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) {
- prt_printf(err, "skiplist not normalized");
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
+ le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
+ snapshot_skiplist_not_normalized,
+ "skiplist not normalized");
for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
id = le32_to_cpu(s.v->skip[i]);
- if ((id && !s.v->parent) ||
- (id && id <= k.k->p.offset)) {
- prt_printf(err, "bad skiplist node %u", id);
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
+ snapshot_skiplist_bad,
+ "bad skiplist node %u", id);
}
}
-
- return 0;
+fsck_err:
+ return ret;
}
static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
@@ -325,8 +318,9 @@ int bch2_mark_snapshot(struct btree_trans *trans,
__set_is_ancestor_bitmap(c, id);
if (BCH_SNAPSHOT_DELETED(s.v)) {
- set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots);
+ set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
+ bch2_delete_dead_snapshots_async(c);
}
} else {
memset(t, 0, sizeof(*t));
@@ -529,7 +523,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
if (fsck_err_on(ret ||
root_id != bch2_snapshot_root(c, root_id) ||
st.k->p.offset != le32_to_cpu(s.tree),
- c,
+ c, snapshot_tree_to_missing_snapshot,
"snapshot tree points to missing/incorrect snapshot:\n %s",
(bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
@@ -541,17 +535,20 @@ static int check_snapshot_tree(struct btree_trans *trans,
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
- if (fsck_err_on(ret, c,
+ if (fsck_err_on(ret,
+ c, snapshot_tree_to_missing_subvol,
"snapshot tree points to missing subvolume:\n %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
le32_to_cpu(subvol.snapshot),
- root_id), c,
+ root_id),
+ c, snapshot_tree_to_wrong_subvol,
"snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
- fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
+ fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
+ c, snapshot_tree_to_snapshot_subvol,
"snapshot tree points to snapshot subvolume:\n %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
@@ -787,7 +784,9 @@ static int check_snapshot(struct btree_trans *trans,
goto err;
}
} else {
- if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n %s",
+ if (fsck_err_on(s.subvol,
+ c, snapshot_should_not_have_subvol,
+ "snapshot should not point to subvol:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -803,7 +802,8 @@ static int check_snapshot(struct btree_trans *trans,
if (ret < 0)
goto err;
- if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s",
+ if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
+ "snapshot points to missing/incorrect tree:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
if (ret)
@@ -815,7 +815,8 @@ static int check_snapshot(struct btree_trans *trans,
if (le32_to_cpu(s.depth) != real_depth &&
(c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
- fsck_err(c, "snapshot with incorrect depth field, should be %u:\n %s",
+ fsck_err(c, snapshot_bad_depth,
+ "snapshot with incorrect depth field, should be %u:\n %s",
real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -832,7 +833,8 @@ static int check_snapshot(struct btree_trans *trans,
if (!ret &&
(c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
- fsck_err(c, "snapshot with bad skiplist field:\n %s",
+ fsck_err(c, snapshot_bad_skiplist,
+ "snapshot with bad skiplist field:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -1251,13 +1253,7 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
return 0;
}
-/*
- * For a given snapshot, if it doesn't have a subvolume that points to it, and
- * it doesn't have child snapshot nodes - it's now redundant and we can mark it
- * as deleted.
- */
-static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k)
+static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_s_c_snapshot snap;
u32 children[2];
@@ -1278,10 +1274,21 @@ static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btre
bch2_snapshot_live(trans, children[1]);
if (ret < 0)
return ret;
+ return !ret;
+}
- if (!ret)
- return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
- return 0;
+/*
+ * For a given snapshot, if it doesn't have a subvolume that points to it, and
+ * it doesn't have child snapshot nodes - it's now redundant and we can mark it
+ * as deleted.
+ */
+static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
+{
+ int ret = bch2_snapshot_needs_delete(trans, k);
+
+ return ret <= 0
+ ? ret
+ : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
}
static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
@@ -1342,12 +1349,12 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
u32 id = le32_to_cpu(s->v.skip[j]);
if (snapshot_list_has_id(deleted, id)) {
- id = depth > 1
- ? bch2_snapshot_nth_parent_skip(c,
+ id = bch2_snapshot_nth_parent_skip(c,
parent,
- get_random_u32_below(depth - 1),
- deleted)
- : parent;
+ depth > 1
+ ? get_random_u32_below(depth - 1)
+ : 0,
+ deleted);
s->v.skip[j] = cpu_to_le32(id);
}
}
@@ -1369,6 +1376,9 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
u32 *i, id;
int ret = 0;
+ if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
+ return 0;
+
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
ret = bch2_fs_read_write_early(c);
if (ret) {
@@ -1386,7 +1396,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
NULL, NULL, 0,
- bch2_delete_redundant_snapshot(trans, &iter, k));
+ bch2_delete_redundant_snapshot(trans, k));
if (ret) {
bch_err_msg(c, ret, "deleting redundant snapshots");
goto err;
@@ -1427,6 +1437,15 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
if (!btree_type_has_snapshots(id))
continue;
+ /*
+ * deleted inodes btree is maintained by a trigger on the inodes
+ * btree - no work for us to do here, and it's not safe to scan
+ * it because we'll see out of date keys due to the btree write
+ * buffer:
+ */
+ if (id == BTREE_ID_deleted_inodes)
+ continue;
+
ret = for_each_btree_key_commit(trans, iter,
id, POS_MIN,
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
@@ -1447,6 +1466,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
}
}
+ bch2_trans_unlock(trans);
down_write(&c->snapshot_create_lock);
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
@@ -1491,8 +1511,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
goto err_create_lock;
}
}
-
- clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
err_create_lock:
up_write(&c->snapshot_create_lock);
err:
@@ -1508,8 +1526,7 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
- if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
- bch2_delete_dead_snapshots(c);
+ bch2_delete_dead_snapshots(c);
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
}
@@ -1520,20 +1537,6 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *c)
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
}
-int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
- struct btree_trans_commit_hook *h)
-{
- struct bch_fs *c = trans->c;
-
- set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
-
- if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots)
- return 0;
-
- bch2_delete_dead_snapshots_async(c);
- return 0;
-}
-
int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
enum btree_id id,
struct bpos pos)
@@ -1664,6 +1667,26 @@ again:
return ret ?: trans_was_restarted(trans, restart_count);
}
+static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c_snapshot snap;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_snapshot)
+ return 0;
+
+ snap = bkey_s_c_to_snapshot(k);
+ if (BCH_SNAPSHOT_DELETED(snap.v) ||
+ bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
+ (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
+ set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
+ return 0;
+ }
+
+ return ret;
+}
+
int bch2_snapshots_read(struct bch_fs *c)
{
struct btree_iter iter;
@@ -1674,7 +1697,8 @@ int bch2_snapshots_read(struct bch_fs *c)
for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
- bch2_snapshot_set_equiv(trans, k)) ?:
+ bch2_snapshot_set_equiv(trans, k) ?:
+ bch2_check_snapshot_needs_deletion(trans, k)) ?:
for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
(set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index de215d9d1252..f09a22f44239 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -5,7 +5,7 @@
enum bkey_invalid_flags;
void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_tree_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
@@ -19,7 +19,7 @@ struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s_c, unsigned);
@@ -244,8 +244,6 @@ int bch2_check_snapshot_trees(struct bch_fs *);
int bch2_check_snapshots(struct bch_fs *);
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
-int bch2_delete_dead_snapshots_hook(struct btree_trans *,
- struct btree_trans_commit_hook *);
void bch2_delete_dead_snapshots_work(struct work_struct *);
int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index caf2dd7dafff..fccd25aa3242 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -62,7 +62,8 @@ static int check_subvol(struct btree_trans *trans,
if (ret)
return ret;
- if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
+ if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
+ c, subvol_not_master_and_not_snapshot,
"subvolume %llu is not set as snapshot but is not master subvolume",
k.k->p.offset)) {
struct bkey_i_subvolume *s =
@@ -97,16 +98,17 @@ int bch2_check_subvols(struct bch_fs *c)
/* Subvolumes: */
-int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_subvolume_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags, struct printbuf *err)
{
- if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
- bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
- prt_printf(err, "invalid pos");
- return -BCH_ERR_invalid_bkey;
- }
+ int ret = 0;
- return 0;
+ bkey_fsck_err_on(bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
+ bkey_gt(k.k->p, SUBVOL_POS_MAX), c, err,
+ subvol_pos_bad,
+ "invalid pos");
+fsck_err:
+ return ret;
}
void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
@@ -230,7 +232,6 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
{
struct btree_iter iter;
struct bkey_s_c_subvolume subvol;
- struct btree_trans_commit_hook *h;
u32 snapid;
int ret = 0;
@@ -246,22 +247,8 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
snapid = le32_to_cpu(subvol.v->snapshot);
- ret = bch2_btree_delete_at(trans, &iter, 0);
- if (ret)
- goto err;
-
- ret = bch2_snapshot_node_set_deleted(trans, snapid);
- if (ret)
- goto err;
-
- h = bch2_trans_kmalloc(trans, sizeof(*h));
- ret = PTR_ERR_OR_ZERO(h);
- if (ret)
- goto err;
-
- h->fn = bch2_delete_dead_snapshots_hook;
- bch2_trans_commit_hook(trans, h);
-err:
+ ret = bch2_btree_delete_at(trans, &iter, 0) ?:
+ bch2_snapshot_node_set_deleted(trans, snapid);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index bb14f92e8687..a1003d30ab0a 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -9,7 +9,7 @@ enum bkey_invalid_flags;
int bch2_check_subvols(struct bch_fs *);
-int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_subvolume_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 332d41e1c0a3..f4cad903f4d6 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -13,6 +13,7 @@
#include "replicas.h"
#include "quota.h"
#include "sb-clean.h"
+#include "sb-errors.h"
#include "sb-members.h"
#include "super-io.h"
#include "super.h"
@@ -720,7 +721,7 @@ retry:
if (opt_defined(*opts, sb))
goto err;
- printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s",
+ printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s\n",
path, err.buf);
printbuf_reset(&err);
@@ -782,7 +783,7 @@ got_super:
ret = bch2_sb_validate(sb, &err, READ);
if (ret) {
- printk(KERN_ERR "bcachefs (%s): error validating superblock: %s",
+ printk(KERN_ERR "bcachefs (%s): error validating superblock: %s\n",
path, err.buf);
goto err_no_print;
}
@@ -790,7 +791,7 @@ out:
printbuf_exit(&err);
return ret;
err:
- printk(KERN_ERR "bcachefs (%s): error reading superblock: %s",
+ printk(KERN_ERR "bcachefs (%s): error reading superblock: %s\n",
path, err.buf);
err_no_print:
bch2_free_super(sb);
@@ -805,7 +806,12 @@ static void write_super_endio(struct bio *bio)
/* XXX: return errors directly */
- if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write error: %s",
+ if (bch2_dev_io_err_on(bio->bi_status, ca,
+ bio_data_dir(bio)
+ ? BCH_MEMBER_ERROR_write
+ : BCH_MEMBER_ERROR_read,
+ "superblock %s error: %s",
+ bio_data_dir(bio) ? "write" : "read",
bch2_blk_status_to_str(bio->bi_status)))
ca->sb_write_error = 1;
@@ -892,7 +898,9 @@ int bch2_write_super(struct bch_fs *c)
SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
bch2_sb_counters_from_cpu(c);
- bch_members_cpy_v2_v1(&c->disk_sb);
+ bch2_sb_members_from_cpu(c);
+ bch2_sb_members_cpy_v2_v1(&c->disk_sb);
+ bch2_sb_errors_from_cpu(c);
for_each_online_member(ca, c, i)
bch2_sb_from_fs(c, ca);
@@ -1175,7 +1183,7 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_printf(out, "Created:");
prt_tab(out);
if (sb->time_base_lo)
- pr_time(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
+ bch2_prt_datetime(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
else
prt_printf(out, "(not set)");
prt_newline(out);
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index b0d8584f475f..f5abd102bff7 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -23,6 +23,11 @@ u64 bch2_upgrade_recovery_passes(struct bch_fs *c,
unsigned,
unsigned);
+static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f)
+{
+ return le32_to_cpu(f->u64s) * sizeof(u64);
+}
+
#define field_to_type(_f, _name) \
container_of_or_null(_f, struct bch_sb_field_##_name, field)
@@ -78,41 +83,6 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
__bch2_check_set_feature(c, feat);
}
-/* BCH_SB_FIELD_members_v1: */
-
-static inline bool bch2_member_exists(struct bch_member *m)
-{
- return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
-}
-
-static inline bool bch2_dev_exists(struct bch_sb *sb,
- unsigned dev)
-{
- if (dev < sb->nr_devices) {
- struct bch_member m = bch2_sb_member_get(sb, dev);
- return bch2_member_exists(&m);
- }
- return false;
-}
-
-static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
-{
- return (struct bch_member_cpu) {
- .nbuckets = le64_to_cpu(mi->nbuckets),
- .first_bucket = le16_to_cpu(mi->first_bucket),
- .bucket_size = le16_to_cpu(mi->bucket_size),
- .group = BCH_MEMBER_GROUP(mi),
- .state = BCH_MEMBER_STATE(mi),
- .discard = BCH_MEMBER_DISCARD(mi),
- .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi),
- .durability = BCH_MEMBER_DURABILITY(mi)
- ? BCH_MEMBER_DURABILITY(mi) - 1
- : 1,
- .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
- .valid = bch2_member_exists(mi),
- };
-}
-
void bch2_sb_maybe_downgrade(struct bch_fs *);
void bch2_sb_upgrade(struct bch_fs *, unsigned);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 0e85c22672be..24672bb31cbe 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -49,6 +49,7 @@
#include "recovery.h"
#include "replicas.h"
#include "sb-clean.h"
+#include "sb-errors.h"
#include "sb-members.h"
#include "snapshot.h"
#include "subvolume.h"
@@ -400,7 +401,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch_info(c, "going read-write");
- ret = bch2_members_v2_init(c);
+ ret = bch2_sb_members_v2_init(c);
if (ret)
goto err;
@@ -481,6 +482,7 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_time_stats_exit(&c->times[i]);
bch2_free_pending_node_rewrites(c);
+ bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);
@@ -713,6 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_quota_init(c);
bch2_fs_ec_init_early(c);
bch2_fs_move_init(c);
+ bch2_fs_sb_errors_init_early(c);
INIT_LIST_HEAD(&c->list);
@@ -729,8 +732,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
INIT_LIST_HEAD(&c->journal_iters);
- INIT_LIST_HEAD(&c->fsck_errors);
- mutex_init(&c->fsck_error_lock);
+ INIT_LIST_HEAD(&c->fsck_error_msgs);
+ mutex_init(&c->fsck_error_msgs_lock);
seqcount_init(&c->gc_pos_lock);
@@ -840,6 +843,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
}
ret = bch2_fs_counters_init(c) ?:
+ bch2_fs_sb_errors_init(c) ?:
bch2_io_clock_init(&c->io_clock[READ]) ?:
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
bch2_fs_journal_init(&c->journal) ?:
@@ -942,16 +946,13 @@ int bch2_fs_start(struct bch_fs *c)
mutex_lock(&c->sb_lock);
- ret = bch2_members_v2_init(c);
+ ret = bch2_sb_members_v2_init(c);
if (ret) {
mutex_unlock(&c->sb_lock);
goto err;
}
for_each_online_member(ca, c, i)
- bch2_sb_from_fs(c, ca);
-
- for_each_online_member(ca, c, i)
bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
mutex_unlock(&c->sb_lock);
@@ -960,12 +961,6 @@ int bch2_fs_start(struct bch_fs *c)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- for (i = 0; i < BCH_TRANSACTIONS_NR; i++) {
- mutex_lock(&c->btree_transaction_stats[i].lock);
- bch2_time_stats_init(&c->btree_transaction_stats[i].lock_hold_times);
- mutex_unlock(&c->btree_transaction_stats[i].lock);
- }
-
ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
? bch2_fs_recovery(c)
: bch2_fs_initialize(c);
@@ -1140,6 +1135,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
struct bch_member *member)
{
struct bch_dev *ca;
+ unsigned i;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
@@ -1157,6 +1153,10 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
bch2_time_stats_init(&ca->io_latency[WRITE]);
ca->mi = bch2_mi_to_cpu(member);
+
+ for (i = 0; i < ARRAY_SIZE(member->errors); i++)
+ atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
+
ca->uuid = member->uuid;
ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
@@ -1591,7 +1591,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
if (BCH_MEMBER_GROUP(&dev_mi)) {
- bch2_disk_path_to_text(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
+ bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
if (label.allocation_failure) {
ret = -ENOMEM;
goto err;
@@ -1631,16 +1631,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
goto err_unlock;
}
- mi = bch2_sb_field_get(ca->disk_sb.sb, members_v2);
-
- if (!bch2_sb_field_resize(&ca->disk_sb, members_v2,
- le32_to_cpu(mi->field.u64s) +
- sizeof(dev_mi) / sizeof(u64))) {
- ret = -BCH_ERR_ENOSPC_sb_members;
- bch_err_msg(c, ret, "setting up new superblock");
- goto err_unlock;
- }
-
if (dynamic_fault("bcachefs:add:no_slot"))
goto no_slot;
@@ -1654,6 +1644,8 @@ no_slot:
have_slot:
nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+
+ mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
@@ -1689,13 +1681,13 @@ have_slot:
ret = bch2_trans_mark_dev_sb(c, ca);
if (ret) {
- bch_err_msg(c, ret, "marking new superblock");
+ bch_err_msg(ca, ret, "marking new superblock");
goto err_late;
}
ret = bch2_fs_freespace_init(c);
if (ret) {
- bch_err_msg(c, ret, "initializing free space");
+ bch_err_msg(ca, ret, "initializing free space");
goto err_late;
}
@@ -1763,19 +1755,26 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
if (ca->mi.state == BCH_MEMBER_STATE_rw)
__bch2_dev_read_write(c, ca);
- mutex_lock(&c->sb_lock);
- struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ if (!ca->mi.freespace_initialized) {
+ ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
+ bch_err_msg(ca, ret, "initializing free space");
+ if (ret)
+ goto err;
+ }
- m->last_mount =
- cpu_to_le64(ktime_get_real_seconds());
+ if (!ca->journal.nr) {
+ ret = bch2_dev_journal_alloc(ca);
+ bch_err_msg(ca, ret, "allocating journal");
+ if (ret)
+ goto err;
+ }
+ mutex_lock(&c->sb_lock);
+ bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
+ cpu_to_le64(ktime_get_real_seconds());
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- ret = bch2_fs_freespace_init(c);
- if (ret)
- bch_err_msg(c, ret, "initializing free space");
-
up_write(&c->state_lock);
return 0;
err:
@@ -1886,9 +1885,9 @@ found:
struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
struct bch_opts opts)
{
- struct bch_sb_handle *sb = NULL;
+ DARRAY(struct bch_sb_handle) sbs = { 0 };
struct bch_fs *c = NULL;
- unsigned i, best_sb = 0;
+ struct bch_sb_handle *sb, *best = NULL;
struct printbuf errbuf = PRINTBUF;
int ret = 0;
@@ -1900,49 +1899,46 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
goto err;
}
- sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
- if (!sb) {
- ret = -ENOMEM;
+ ret = darray_make_room(&sbs, nr_devices);
+ if (ret)
goto err;
- }
- for (i = 0; i < nr_devices; i++) {
- ret = bch2_read_super(devices[i], &opts, &sb[i]);
+ for (unsigned i = 0; i < nr_devices; i++) {
+ struct bch_sb_handle sb = { NULL };
+
+ ret = bch2_read_super(devices[i], &opts, &sb);
if (ret)
goto err;
+ BUG_ON(darray_push(&sbs, sb));
}
- for (i = 1; i < nr_devices; i++)
- if (le64_to_cpu(sb[i].sb->seq) >
- le64_to_cpu(sb[best_sb].sb->seq))
- best_sb = i;
-
- i = 0;
- while (i < nr_devices) {
- if (i != best_sb &&
- !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
- pr_info("%pg has been removed, skipping", sb[i].bdev);
- bch2_free_super(&sb[i]);
- array_remove_item(sb, nr_devices, i);
+ darray_for_each(sbs, sb)
+ if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
+ best = sb;
+
+ darray_for_each_reverse(sbs, sb) {
+ if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
+ pr_info("%pg has been removed, skipping", sb->bdev);
+ bch2_free_super(sb);
+ darray_remove_item(&sbs, sb);
+ best -= best > sb;
continue;
}
- ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+ ret = bch2_dev_in_fs(best->sb, sb->sb);
if (ret)
goto err_print;
- i++;
}
- c = bch2_fs_alloc(sb[best_sb].sb, opts);
- if (IS_ERR(c)) {
- ret = PTR_ERR(c);
+ c = bch2_fs_alloc(best->sb, opts);
+ ret = PTR_ERR_OR_ZERO(c);
+ if (ret)
goto err;
- }
down_write(&c->state_lock);
- for (i = 0; i < nr_devices; i++) {
- ret = bch2_dev_attach_bdev(c, &sb[i]);
+ darray_for_each(sbs, sb) {
+ ret = bch2_dev_attach_bdev(c, sb);
if (ret) {
up_write(&c->state_lock);
goto err;
@@ -1961,7 +1957,9 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
goto err;
}
out:
- kfree(sb);
+ darray_for_each(sbs, sb)
+ bch2_free_super(sb);
+ darray_exit(&sbs);
printbuf_exit(&errbuf);
module_put(THIS_MODULE);
return c;
@@ -1971,9 +1969,6 @@ err_print:
err:
if (!IS_ERR_OR_NULL(c))
bch2_fs_stop(c);
- if (sb)
- for (i = 0; i < nr_devices; i++)
- bch2_free_super(&sb[i]);
c = ERR_PTR(ret);
goto out;
}
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index 78d6138db62d..7dda4985b99f 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -37,16 +37,4 @@ struct bch_member_cpu {
u8 valid;
};
-struct bch_disk_group_cpu {
- bool deleted;
- u16 parent;
- struct bch_devs_mask devs;
-};
-
-struct bch_disk_groups_cpu {
- struct rcu_head rcu;
- unsigned nr;
- struct bch_disk_group_cpu entries[] __counted_by(nr);
-};
-
#endif /* _BCACHEFS_SUPER_TYPES_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 397116966a7c..ab743115f169 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -149,7 +149,9 @@ read_attribute(bucket_size);
read_attribute(first_bucket);
read_attribute(nbuckets);
rw_attribute(durability);
-read_attribute(iodone);
+read_attribute(io_done);
+read_attribute(io_errors);
+write_attribute(io_errors_reset);
read_attribute(io_latency_read);
read_attribute(io_latency_write);
@@ -212,7 +214,7 @@ read_attribute(copy_gc_wait);
rw_attribute(rebalance_enabled);
sysfs_pd_controller_attribute(rebalance);
-read_attribute(rebalance_work);
+read_attribute(rebalance_status);
rw_attribute(promote_whole_extents);
read_attribute(new_stripes);
@@ -341,7 +343,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
{
- prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
+ prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
bch2_bpos_to_text(out, c->gc_gens_pos);
prt_printf(out, "\n");
}
@@ -386,8 +388,8 @@ SHOW(bch2_fs)
if (attr == &sysfs_copy_gc_wait)
bch2_copygc_wait_to_text(out, c);
- if (attr == &sysfs_rebalance_work)
- bch2_rebalance_work_to_text(out, c);
+ if (attr == &sysfs_rebalance_status)
+ bch2_rebalance_status_to_text(out, c);
sysfs_print(promote_whole_extents, c->promote_whole_extents);
@@ -646,7 +648,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_copy_gc_wait,
&sysfs_rebalance_enabled,
- &sysfs_rebalance_work,
+ &sysfs_rebalance_status,
sysfs_pd_controller_files(rebalance),
&sysfs_moving_ctxts,
@@ -707,10 +709,8 @@ STORE(bch2_fs_opts_dir)
bch2_opt_set_by_id(&c->opts, id, v);
if ((id == Opt_background_target ||
- id == Opt_background_compression) && v) {
- bch2_rebalance_add_work(c, S64_MAX);
- rebalance_wakeup(c);
- }
+ id == Opt_background_compression) && v)
+ bch2_set_rebalance_needs_scan(c, 0);
ret = size;
err:
@@ -882,7 +882,7 @@ static const char * const bch2_rw[] = {
NULL
};
-static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
+static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
{
int rw, i;
@@ -910,13 +910,8 @@ SHOW(bch2_dev)
sysfs_print(discard, ca->mi.discard);
if (attr == &sysfs_label) {
- if (ca->mi.group) {
- mutex_lock(&c->sb_lock);
- bch2_disk_path_to_text(out, c->disk_sb.sb,
- ca->mi.group - 1);
- mutex_unlock(&c->sb_lock);
- }
-
+ if (ca->mi.group)
+ bch2_disk_path_to_text(out, c, ca->mi.group - 1);
prt_char(out, '\n');
}
@@ -930,8 +925,11 @@ SHOW(bch2_dev)
prt_char(out, '\n');
}
- if (attr == &sysfs_iodone)
- dev_iodone_to_text(out, ca);
+ if (attr == &sysfs_io_done)
+ dev_io_done_to_text(out, ca);
+
+ if (attr == &sysfs_io_errors)
+ bch2_dev_io_errors_to_text(out, ca);
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
@@ -998,6 +996,9 @@ STORE(bch2_dev)
return ret;
}
+ if (attr == &sysfs_io_errors_reset)
+ bch2_dev_errors_reset(ca);
+
return size;
}
SYSFS_OPS(bch2_dev);
@@ -1015,7 +1016,9 @@ struct attribute *bch2_dev_files[] = {
&sysfs_label,
&sysfs_has_data,
- &sysfs_iodone,
+ &sysfs_io_done,
+ &sysfs_io_errors,
+ &sysfs_io_errors_reset,
&sysfs_io_latency_read,
&sysfs_io_latency_write,
diff --git a/fs/bcachefs/trace.c b/fs/bcachefs/trace.c
index 33efa6005c6f..dc48b52b01b4 100644
--- a/fs/bcachefs/trace.c
+++ b/fs/bcachefs/trace.c
@@ -7,6 +7,7 @@
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "keylist.h"
+#include "move_types.h"
#include "opts.h"
#include "six.h"
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index 19264492151b..893304a1f06e 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(btree_node,
TP_printk("%d,%d %u %s %llu:%llu:%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->level,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
);
@@ -461,7 +461,7 @@ TRACE_EVENT(btree_path_relock_fail,
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
@@ -522,7 +522,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
@@ -767,25 +767,36 @@ DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
);
TRACE_EVENT(move_data,
- TP_PROTO(struct bch_fs *c, u64 sectors_moved,
- u64 keys_moved),
- TP_ARGS(c, sectors_moved, keys_moved),
+ TP_PROTO(struct bch_fs *c,
+ struct bch_move_stats *stats),
+ TP_ARGS(c, stats),
TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, sectors_moved )
+ __field(dev_t, dev )
__field(u64, keys_moved )
+ __field(u64, keys_raced )
+ __field(u64, sectors_seen )
+ __field(u64, sectors_moved )
+ __field(u64, sectors_raced )
),
TP_fast_assign(
- __entry->dev = c->dev;
- __entry->sectors_moved = sectors_moved;
- __entry->keys_moved = keys_moved;
+ __entry->dev = c->dev;
+ __entry->keys_moved = atomic64_read(&stats->keys_moved);
+ __entry->keys_raced = atomic64_read(&stats->keys_raced);
+ __entry->sectors_seen = atomic64_read(&stats->sectors_seen);
+ __entry->sectors_moved = atomic64_read(&stats->sectors_moved);
+ __entry->sectors_raced = atomic64_read(&stats->sectors_raced);
),
- TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
+ TP_printk("%d,%d keys moved %llu raced %llu"
+ "sectors seen %llu moved %llu raced %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->sectors_moved, __entry->keys_moved)
+ __entry->keys_moved,
+ __entry->keys_raced,
+ __entry->sectors_seen,
+ __entry->sectors_moved,
+ __entry->sectors_raced)
);
TRACE_EVENT(evacuate_bucket,
@@ -1012,7 +1023,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
TP_printk("%s %pS btree %s pos %llu:%llu:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
@@ -1032,13 +1043,16 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
TP_ARGS(trans, caller_ip, path)
);
+struct get_locks_fail;
+
TRACE_EVENT(trans_restart_upgrade,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
struct btree_path *path,
unsigned old_locks_want,
- unsigned new_locks_want),
- TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
+ unsigned new_locks_want,
+ struct get_locks_fail *f),
+ TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
@@ -1046,6 +1060,11 @@ TRACE_EVENT(trans_restart_upgrade,
__field(u8, btree_id )
__field(u8, old_locks_want )
__field(u8, new_locks_want )
+ __field(u8, level )
+ __field(u32, path_seq )
+ __field(u32, node_seq )
+ __field(u32, path_alloc_seq )
+ __field(u32, downgrade_seq)
TRACE_BPOS_entries(pos)
),
@@ -1055,18 +1074,28 @@ TRACE_EVENT(trans_restart_upgrade,
__entry->btree_id = path->btree_id;
__entry->old_locks_want = old_locks_want;
__entry->new_locks_want = new_locks_want;
+ __entry->level = f->l;
+ __entry->path_seq = path->l[f->l].lock_seq;
+ __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
+ __entry->path_alloc_seq = path->alloc_seq;
+ __entry->downgrade_seq = path->downgrade_seq;
TRACE_BPOS_assign(pos, path->pos)
),
- TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->old_locks_want,
- __entry->new_locks_want)
+ __entry->new_locks_want,
+ __entry->level,
+ __entry->path_seq,
+ __entry->node_seq,
+ __entry->path_alloc_seq,
+ __entry->downgrade_seq)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
@@ -1219,7 +1248,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
- bch2_btree_ids[__entry->btree_id],
+ bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
@@ -1227,6 +1256,27 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
__entry->new_u64s)
);
+TRACE_EVENT(path_downgrade,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ ),
+
+ TP_printk("%s %pS",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip)
+);
+
DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 08bac0ba8d0b..84b142fcc3df 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -467,6 +467,24 @@ static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
prt_printf(out, "%s", u->name);
}
+#ifndef __KERNEL__
+#include <time.h>
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+ time_t t = sec;
+ char buf[64];
+ ctime_r(&t, buf);
+ prt_str(out, buf);
+}
+#else
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+ char buf[64];
+ snprintf(buf, sizeof(buf), "%ptT", &sec);
+ prt_u64(out, sec);
+}
+#endif
+
#define TABSTOP_SIZE 12
static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 849a37ae497c..2984b57b2958 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -245,26 +245,7 @@ do { \
#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
void bch2_pr_time_units(struct printbuf *, u64);
-
-#ifdef __KERNEL__
-static inline void pr_time(struct printbuf *out, u64 time)
-{
- prt_printf(out, "%llu", time);
-}
-#else
-#include <time.h>
-static inline void pr_time(struct printbuf *out, u64 _time)
-{
- char time_str[64];
- time_t time = _time;
- struct tm *tm = localtime(&time);
- size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
- if (!err)
- prt_printf(out, "(formatting error)");
- else
- prt_printf(out, "%s", time_str);
-}
-#endif
+void bch2_prt_datetime(struct printbuf *, time64_t);
#ifdef __KERNEL__
static inline void uuid_unparse_lower(u8 *uuid, char *out)
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index b069b1a62e25..a39ff0c296ec 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -70,46 +70,38 @@ const struct bch_hash_desc bch2_xattr_hash_desc = {
.cmp_bkey = xattr_cmp_bkey,
};
-int bch2_xattr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
- const struct xattr_handler *handler;
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
+ unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len));
+ int ret = 0;
- if (bkey_val_u64s(k.k) <
- xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len))) {
- prt_printf(err, "value too small (%zu < %u)",
- bkey_val_u64s(k.k),
- xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len)));
- return -BCH_ERR_invalid_bkey;
- }
+ bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s, c, err,
+ xattr_val_size_too_small,
+ "value too small (%zu < %u)",
+ bkey_val_u64s(k.k), val_u64s);
/* XXX why +4 ? */
- if (bkey_val_u64s(k.k) >
- xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len) + 4)) {
- prt_printf(err, "value too big (%zu > %u)",
- bkey_val_u64s(k.k),
- xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len) + 4));
- return -BCH_ERR_invalid_bkey;
- }
-
- handler = bch2_xattr_type_to_handler(xattr.v->x_type);
- if (!handler) {
- prt_printf(err, "invalid type (%u)", xattr.v->x_type);
- return -BCH_ERR_invalid_bkey;
- }
-
- if (memchr(xattr.v->x_name, '\0', xattr.v->x_name_len)) {
- prt_printf(err, "xattr name has invalid characters");
- return -BCH_ERR_invalid_bkey;
- }
-
- return 0;
+ val_u64s = xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len) + 4);
+
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s, c, err,
+ xattr_val_size_too_big,
+ "value too big (%zu > %u)",
+ bkey_val_u64s(k.k), val_u64s);
+
+ bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type), c, err,
+ xattr_invalid_type,
+ "invalid type (%u)", xattr.v->x_type);
+
+ bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len), c, err,
+ xattr_name_invalid_chars,
+ "xattr name has invalid characters");
+fsck_err:
+ return ret;
}
void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
@@ -590,7 +582,7 @@ err:
if (value &&
(opt_id == Opt_background_compression ||
opt_id == Opt_background_target))
- bch2_rebalance_add_work(c, inode->v.i_blocks);
+ bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum);
return bch2_err_class(ret);
}
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
index f5a52e3a6016..1337f31a5c49 100644
--- a/fs/bcachefs/xattr.h
+++ b/fs/bcachefs/xattr.h
@@ -6,7 +6,7 @@
extern const struct bch_hash_desc bch2_xattr_hash_desc;
-int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_xattr_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 9acdec56f626..a93d76df8ed8 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -96,6 +96,7 @@ static const struct address_space_operations befs_symlink_aops = {
};
static const struct export_operations befs_export_operations = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = befs_fh_to_dentry,
.fh_to_parent = befs_fh_to_parent,
.get_parent = befs_get_parent,
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index c53a1d220622..1564eacc253d 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include "super.h"
+#include "mds_client.h"
static inline void ceph_set_cached_acl(struct inode *inode,
int type, struct posix_acl *acl)
@@ -31,6 +32,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct posix_acl *ceph_get_acl(struct inode *inode, int type, bool rcu)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int size;
unsigned int retry_cnt = 0;
const char *name;
@@ -72,8 +74,8 @@ retry:
} else if (size == -ENODATA || size == 0) {
acl = NULL;
} else {
- pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n",
- ceph_vinop(inode), size);
+ pr_err_ratelimited_client(cl, "%llx.%llx failed, err=%d\n",
+ ceph_vinop(inode), size);
acl = ERR_PTR(-EIO);
}
@@ -105,7 +107,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- ret = posix_acl_update_mode(&nop_mnt_idmap, inode,
+ ret = posix_acl_update_mode(idmap, inode,
&new_mode, &acl);
if (ret)
goto out;
@@ -140,7 +142,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- ret = __ceph_setattr(inode, &newattrs, NULL);
+ ret = __ceph_setattr(idmap, inode, &newattrs, NULL);
if (ret)
goto out_free;
}
@@ -151,7 +153,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = old_ctime;
newattrs.ia_mode = old_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- __ceph_setattr(inode, &newattrs, NULL);
+ __ceph_setattr(idmap, inode, &newattrs, NULL);
}
goto out_free;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 936b9e0b351d..85be3bf18cdf 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -79,18 +79,18 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
*/
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- struct inode *inode;
+ struct inode *inode = mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
if (folio_test_dirty(folio)) {
- dout("%p dirty_folio %p idx %lu -- already dirty\n",
- mapping->host, folio, folio->index);
+ doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
+ ceph_vinop(inode), folio, folio->index);
VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
return false;
}
- inode = mapping->host;
ci = ceph_inode(inode);
/* dirty the head */
@@ -111,12 +111,12 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
if (ci->i_wrbuffer_ref == 0)
ihold(inode);
++ci->i_wrbuffer_ref;
- dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
- "snapc %p seq %lld (%d snaps)\n",
- mapping->host, folio, folio->index,
- ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
- ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
- snapc, snapc->seq, snapc->num_snaps);
+ doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
+ "snapc %p seq %lld (%d snaps)\n",
+ ceph_vinop(inode), folio, folio->index,
+ ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
+ ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
+ snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&ci->i_ceph_lock);
/*
@@ -137,23 +137,22 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
static void ceph_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
- struct inode *inode;
- struct ceph_inode_info *ci;
+ struct inode *inode = folio->mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- inode = folio->mapping->host;
- ci = ceph_inode(inode);
if (offset != 0 || length != folio_size(folio)) {
- dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
- inode, folio->index, offset, length);
+ doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
+ ceph_vinop(inode), folio->index, offset, length);
return;
}
WARN_ON(!folio_test_locked(folio));
if (folio_test_private(folio)) {
- dout("%p invalidate_folio idx %lu full dirty page\n",
- inode, folio->index);
+ doutc(cl, "%llx.%llx idx %lu full dirty page\n",
+ ceph_vinop(inode), folio->index);
snapc = folio_detach_private(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
@@ -166,10 +165,10 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
{
struct inode *inode = folio->mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
- dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
- ceph_vinop(inode),
- folio->index, folio_test_dirty(folio) ? "" : "not ");
+ doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
+ folio->index, folio_test_dirty(folio) ? "" : "not ");
if (folio_test_private(folio))
return false;
@@ -229,7 +228,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
{
struct inode *inode = subreq->rreq->inode;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 objno, objoff;
u32 xlen;
@@ -244,7 +243,8 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
static void finish_netfs_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct netfs_io_subrequest *subreq = req->r_priv;
struct ceph_osd_req_op *op = &req->r_ops[0];
@@ -254,8 +254,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, osd_data->length, err);
- dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
- subreq->len, i_size_read(req->r_inode));
+ doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
+ subreq->len, i_size_read(req->r_inode));
/* no object means success but no data */
if (err == -ENOENT)
@@ -348,7 +348,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct netfs_io_request *rreq = subreq->rreq;
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter;
@@ -383,7 +384,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
goto out;
}
- dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
+ doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
+ ceph_vinop(inode), subreq->start, subreq->len, len);
iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
@@ -400,8 +402,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
if (err < 0) {
- dout("%s: iov_ter_get_pages_alloc returned %d\n",
- __func__, err);
+ doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
+ ceph_vinop(inode), err);
goto out;
}
@@ -429,12 +431,13 @@ out:
ceph_osdc_put_request(req);
if (err)
netfs_subreq_terminated(subreq, err, false);
- dout("%s: result %d\n", __func__, err);
+ doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct inode *inode = rreq->inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int got = 0, want = CEPH_CAP_FILE_CACHE;
struct ceph_netfs_request_data *priv;
int ret = 0;
@@ -466,12 +469,12 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
*/
ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
if (ret < 0) {
- dout("start_read %p, error getting cap\n", inode);
+ doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
goto out;
}
if (!(got & want)) {
- dout("start_read %p, no cache cap\n", inode);
+ doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
ret = -EACCES;
goto out;
}
@@ -563,13 +566,14 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
struct ceph_snap_context *page_snapc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
- capsnap->context, capsnap->dirty_pages);
+ doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
+ capsnap, capsnap->context, capsnap->dirty_pages);
if (!capsnap->dirty_pages)
continue;
@@ -601,8 +605,8 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
}
if (!snapc && ci->i_wrbuffer_ref_head) {
snapc = ceph_get_snap_context(ci->i_head_snapc);
- dout(" head snapc %p has %d dirty pages\n",
- snapc, ci->i_wrbuffer_ref_head);
+ doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
+ ci->i_wrbuffer_ref_head);
if (ctl) {
ctl->i_size = i_size_read(inode);
ctl->truncate_size = ci->i_truncate_size;
@@ -658,7 +662,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_snap_context *snapc, *oldest;
loff_t page_off = page_offset(page);
int err;
@@ -670,7 +675,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
bool caching = ceph_is_cache_enabled(inode);
struct page *bounce_page = NULL;
- dout("writepage %p idx %lu\n", page, page->index);
+ doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
+ page->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
@@ -678,13 +684,14 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
/* verify this is a writeable snap context */
snapc = page_snap_context(page);
if (!snapc) {
- dout("writepage %p page %p not dirty?\n", inode, page);
+ doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
+ page);
return 0;
}
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
if (snapc->seq > oldest->seq) {
- dout("writepage %p page %p snapc %p not writeable - noop\n",
- inode, page, snapc);
+ doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
+ ceph_vinop(inode), page, snapc);
/* we should only noop if called by kswapd */
WARN_ON(!(current->flags & PF_MEMALLOC));
ceph_put_snap_context(oldest);
@@ -695,8 +702,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
/* is this a partial page at end of file? */
if (page_off >= ceph_wbc.i_size) {
- dout("folio at %lu beyond eof %llu\n", folio->index,
- ceph_wbc.i_size);
+ doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
+ ceph_vinop(inode), folio->index, ceph_wbc.i_size);
folio_invalidate(folio, 0, folio_size(folio));
return 0;
}
@@ -705,8 +712,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = ceph_wbc.i_size - page_off;
wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
- dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
- inode, page, page->index, page_off, wlen, snapc, snapc->seq);
+ doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
+ ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
+ snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
@@ -747,8 +755,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
osd_req_op_extent_osd_data_pages(req, 0,
bounce_page ? &bounce_page : &page, wlen, 0,
false, false);
- dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
- page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
+ doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
+ ceph_vinop(inode), page_off, len, wlen,
+ IS_ENCRYPTED(inode) ? "" : "not ");
req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(osdc, req);
@@ -767,19 +776,21 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
wbc = &tmp_wbc;
if (err == -ERESTARTSYS) {
/* killed by SIGKILL */
- dout("writepage interrupted page %p\n", page);
+ doutc(cl, "%llx.%llx interrupted page %p\n",
+ ceph_vinop(inode), page);
redirty_page_for_writepage(wbc, page);
end_page_writeback(page);
return err;
}
if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
- dout("writepage setting page/mapping error %d %p\n",
- err, page);
+ doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
+ ceph_vinop(inode), err, page);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
- dout("writepage cleaned page %p\n", page);
+ doutc(cl, "%llx.%llx cleaned page %p\n",
+ ceph_vinop(inode), page);
err = 0; /* vfs expects us to return 0 */
}
oldest = detach_page_private(page);
@@ -803,7 +814,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
ihold(inode);
if (wbc->sync_mode == WB_SYNC_NONE &&
- ceph_inode_to_client(inode)->write_congested)
+ ceph_inode_to_fs_client(inode)->write_congested)
return AOP_WRITEPAGE_ACTIVATE;
wait_on_page_fscache(page);
@@ -829,6 +840,7 @@ static void writepages_finish(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data;
struct page *page;
int num_pages, total_pages = 0;
@@ -836,11 +848,11 @@ static void writepages_finish(struct ceph_osd_request *req)
int rc = req->r_result;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
unsigned int len = 0;
bool remove_page;
- dout("writepages_finish %p rc %d\n", inode, rc);
+ doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
if (rc < 0) {
mapping_set_error(mapping, rc);
ceph_set_error_write(ci);
@@ -862,8 +874,10 @@ static void writepages_finish(struct ceph_osd_request *req)
/* clean all pages */
for (i = 0; i < req->r_num_ops; i++) {
if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
- pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
- __func__, req->r_ops[i].op, req, i, req->r_tid);
+ pr_warn_client(cl,
+ "%llx.%llx incorrect op %d req %p index %d tid %llu\n",
+ ceph_vinop(inode), req->r_ops[i].op, req, i,
+ req->r_tid);
break;
}
@@ -890,7 +904,7 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_put_snap_context(detach_page_private(page));
end_page_writeback(page);
- dout("unlocking %p\n", page);
+ doutc(cl, "unlocking %p\n", page);
if (remove_page)
generic_error_remove_page(inode->i_mapping,
@@ -898,8 +912,9 @@ static void writepages_finish(struct ceph_osd_request *req)
unlock_page(page);
}
- dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
- inode, osd_data->length, rc >= 0 ? num_pages : 0);
+ doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
+ ceph_vinop(inode), osd_data->length,
+ rc >= 0 ? num_pages : 0);
release_pages(osd_data->pages, num_pages);
}
@@ -926,7 +941,8 @@ static int ceph_writepages_start(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_vino vino = ceph_vino(inode);
pgoff_t index, start_index, end = -1;
struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
@@ -944,15 +960,15 @@ static int ceph_writepages_start(struct address_space *mapping,
fsc->write_congested)
return 0;
- dout("writepages_start %p (mode=%s)\n", inode,
- wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
- (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+ doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
+ wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+ (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
- pr_warn_ratelimited(
- "writepage_start %p %lld forced umount\n",
- inode, ceph_ino(inode));
+ pr_warn_ratelimited_client(cl,
+ "%llx.%llx %lld forced umount\n",
+ ceph_vinop(inode), ceph_ino(inode));
}
mapping_set_error(mapping, -EIO);
return -EIO; /* we're in a forced umount, don't write! */
@@ -976,11 +992,11 @@ retry:
if (!snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
- dout(" no snap context with dirty data?\n");
+ doutc(cl, " no snap context with dirty data?\n");
goto out;
}
- dout(" oldest snapc is %p seq %lld (%d snaps)\n",
- snapc, snapc->seq, snapc->num_snaps);
+ doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
+ snapc->seq, snapc->num_snaps);
should_loop = false;
if (ceph_wbc.head_snapc && snapc != last_snapc) {
@@ -990,13 +1006,13 @@ retry:
end = -1;
if (index > 0)
should_loop = true;
- dout(" cyclic, start at %lu\n", index);
+ doutc(cl, " cyclic, start at %lu\n", index);
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = true;
- dout(" not cyclic, %lu to %lu\n", index, end);
+ doutc(cl, " not cyclic, %lu to %lu\n", index, end);
}
} else if (!ceph_wbc.head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
@@ -1005,7 +1021,7 @@ retry:
* associated with 'snapc' get written */
if (index > 0)
should_loop = true;
- dout(" non-head snapc, range whole\n");
+ doutc(cl, " non-head snapc, range whole\n");
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
@@ -1028,12 +1044,12 @@ retry:
get_more_pages:
nr_folios = filemap_get_folios_tag(mapping, &index,
end, tag, &fbatch);
- dout("pagevec_lookup_range_tag got %d\n", nr_folios);
+ doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
if (!nr_folios && !locked_pages)
break;
for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
page = &fbatch.folios[i]->page;
- dout("? %p idx %lu\n", page, page->index);
+ doutc(cl, "? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page); /* first page */
else if (!trylock_page(page))
@@ -1042,15 +1058,15 @@ get_more_pages:
/* only dirty pages, or our accounting breaks */
if (unlikely(!PageDirty(page)) ||
unlikely(page->mapping != mapping)) {
- dout("!dirty or !mapping %p\n", page);
+ doutc(cl, "!dirty or !mapping %p\n", page);
unlock_page(page);
continue;
}
/* only if matching snap context */
pgsnapc = page_snap_context(page);
if (pgsnapc != snapc) {
- dout("page snapc %p %lld != oldest %p %lld\n",
- pgsnapc, pgsnapc->seq, snapc, snapc->seq);
+ doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq, snapc, snapc->seq);
if (!should_loop &&
!ceph_wbc.head_snapc &&
wbc->sync_mode != WB_SYNC_NONE)
@@ -1061,8 +1077,8 @@ get_more_pages:
if (page_offset(page) >= ceph_wbc.i_size) {
struct folio *folio = page_folio(page);
- dout("folio at %lu beyond eof %llu\n",
- folio->index, ceph_wbc.i_size);
+ doutc(cl, "folio at %lu beyond eof %llu\n",
+ folio->index, ceph_wbc.i_size);
if ((ceph_wbc.size_stable ||
folio_pos(folio) >= i_size_read(inode)) &&
folio_clear_dirty_for_io(folio))
@@ -1072,23 +1088,23 @@ get_more_pages:
continue;
}
if (strip_unit_end && (page->index > strip_unit_end)) {
- dout("end of strip unit %p\n", page);
+ doutc(cl, "end of strip unit %p\n", page);
unlock_page(page);
break;
}
if (PageWriteback(page) || PageFsCache(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) {
- dout("%p under writeback\n", page);
+ doutc(cl, "%p under writeback\n", page);
unlock_page(page);
continue;
}
- dout("waiting on writeback %p\n", page);
+ doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
wait_on_page_fscache(page);
}
if (!clear_page_dirty_for_io(page)) {
- dout("%p !clear_page_dirty_for_io\n", page);
+ doutc(cl, "%p !clear_page_dirty_for_io\n", page);
unlock_page(page);
continue;
}
@@ -1143,8 +1159,8 @@ get_more_pages:
}
/* note position of first page in fbatch */
- dout("%p will write page %p idx %lu\n",
- inode, page, page->index);
+ doutc(cl, "%llx.%llx will write page %p idx %lu\n",
+ ceph_vinop(inode), page, page->index);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(
@@ -1158,8 +1174,9 @@ get_more_pages:
locked_pages ? GFP_NOWAIT : GFP_NOFS);
if (IS_ERR(pages[locked_pages])) {
if (PTR_ERR(pages[locked_pages]) == -EINVAL)
- pr_err("%s: inode->i_blkbits=%hhu\n",
- __func__, inode->i_blkbits);
+ pr_err_client(cl,
+ "inode->i_blkbits=%hhu\n",
+ inode->i_blkbits);
/* better not fail on first page! */
BUG_ON(locked_pages == 0);
pages[locked_pages] = NULL;
@@ -1193,7 +1210,7 @@ get_more_pages:
if (nr_folios && i == nr_folios &&
locked_pages < max_pages) {
- dout("reached end fbatch, trying for more\n");
+ doutc(cl, "reached end fbatch, trying for more\n");
folio_batch_release(&fbatch);
goto get_more_pages;
}
@@ -1254,8 +1271,8 @@ new_request:
/* Start a new extent */
osd_req_op_extent_dup_last(req, op_idx,
cur_offset - offset);
- dout("writepages got pages at %llu~%llu\n",
- offset, len);
+ doutc(cl, "got pages at %llu~%llu\n", offset,
+ len);
osd_req_op_extent_osd_data_pages(req, op_idx,
data_pages, len, 0,
from_pool, false);
@@ -1288,12 +1305,13 @@ new_request:
if (IS_ENCRYPTED(inode))
len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
- dout("writepages got pages at %llu~%llu\n", offset, len);
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
if (IS_ENCRYPTED(inode) &&
((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
- pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
- __func__, offset, len);
+ pr_warn_client(cl,
+ "bad encrypted write offset=%lld len=%llu\n",
+ offset, len);
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
0, from_pool, false);
@@ -1345,14 +1363,14 @@ new_request:
done = true;
release_folios:
- dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
- fbatch.nr ? fbatch.folios[0] : NULL);
+ doutc(cl, "folio_batch release on %d folios (%p)\n",
+ (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
folio_batch_release(&fbatch);
}
if (should_loop && !done) {
/* more to do; loop back to beginning of file */
- dout("writepages looping back to beginning of file\n");
+ doutc(cl, "looping back to beginning of file\n");
end = start_index - 1; /* OK even when start_index == 0 */
/* to write dirty pages associated with next snapc,
@@ -1390,7 +1408,8 @@ release_folios:
out:
ceph_osdc_put_request(req);
ceph_put_snap_context(last_snapc);
- dout("writepages dend - startone, rc = %d\n", rc);
+ doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
+ rc);
return rc;
}
@@ -1424,11 +1443,12 @@ static struct ceph_snap_context *
ceph_find_incompatible(struct page *page)
{
struct inode *inode = page->mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (ceph_inode_is_shutdown(inode)) {
- dout(" page %p %llx:%llx is shutdown\n", page,
- ceph_vinop(inode));
+ doutc(cl, " %llx.%llx page %p is shutdown\n",
+ ceph_vinop(inode), page);
return ERR_PTR(-ESTALE);
}
@@ -1449,13 +1469,15 @@ ceph_find_incompatible(struct page *page)
if (snapc->seq > oldest->seq) {
/* not writeable -- return it for the caller to deal with */
ceph_put_snap_context(oldest);
- dout(" page %p snapc %p not current or oldest\n", page, snapc);
+ doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
+ ceph_vinop(inode), page, snapc);
return ceph_get_snap_context(snapc);
}
ceph_put_snap_context(oldest);
/* yay, writeable, do it now (without dropping page lock) */
- dout(" page %p snapc %p not current, but oldest\n", page, snapc);
+ doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
+ ceph_vinop(inode), page, snapc);
if (clear_page_dirty_for_io(page)) {
int r = writepage_nounlock(page, NULL);
if (r < 0)
@@ -1524,10 +1546,11 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
{
struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false;
- dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
- inode, folio, (int)pos, (int)copied, (int)len);
+ doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
+ file, folio, (int)pos, (int)copied, (int)len);
if (!folio_test_uptodate(folio)) {
/* just return that nothing was copied on a short copy */
@@ -1587,6 +1610,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
int want, got, err;
@@ -1598,8 +1622,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ceph_block_sigs(&oldset);
- dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
- inode, ceph_vinop(inode), off);
+ doutc(cl, "%llx.%llx %llu trying to get caps\n",
+ ceph_vinop(inode), off);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
@@ -1610,8 +1634,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
if (err < 0)
goto out_restore;
- dout("filemap_fault %p %llu got cap refs on %s\n",
- inode, off, ceph_cap_string(got));
+ doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
+ off, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
!ceph_has_inline_data(ci)) {
@@ -1619,8 +1643,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
ceph_del_rw_context(fi, &rw_ctx);
- dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
- inode, off, ceph_cap_string(got), ret);
+ doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
+ ceph_vinop(inode), off, ceph_cap_string(got), ret);
} else
err = -EAGAIN;
@@ -1661,8 +1685,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
out_inline:
filemap_invalidate_unlock_shared(mapping);
- dout("filemap_fault %p %llu read inline data ret %x\n",
- inode, off, ret);
+ doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
+ ceph_vinop(inode), off, ret);
}
out_restore:
ceph_restore_sigs(&oldset);
@@ -1676,6 +1700,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct ceph_cap_flush *prealloc_cf;
@@ -1702,8 +1727,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
else
len = offset_in_thp(page, size);
- dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
- inode, ceph_vinop(inode), off, len, size);
+ doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
+ ceph_vinop(inode), off, len, size);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
else
@@ -1714,8 +1739,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (err < 0)
goto out_free;
- dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
- inode, off, len, ceph_cap_string(got));
+ doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
+ off, len, ceph_cap_string(got));
/* Update time before taking page lock */
file_update_time(vma->vm_file);
@@ -1763,8 +1788,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
__mark_inode_dirty(inode, dirty);
}
- dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
- inode, off, len, ceph_cap_string(got), ret);
+ doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
+ ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs_async(ci, got);
out_free:
ceph_restore_sigs(&oldset);
@@ -1778,6 +1803,7 @@ out_free:
void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -1798,8 +1824,8 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
- dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
- inode, ceph_vinop(inode), len, locked_page);
+ doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
+ ceph_vinop(inode), len, locked_page);
if (len > 0) {
void *kaddr = kmap_atomic(page);
@@ -1823,7 +1849,8 @@ int ceph_uninline_data(struct file *file)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL;
@@ -1836,8 +1863,8 @@ int ceph_uninline_data(struct file *file)
inline_version = ci->i_inline_version;
spin_unlock(&ci->i_ceph_lock);
- dout("uninline_data %p %llx.%llx inline_version %llu\n",
- inode, ceph_vinop(inode), inline_version);
+ doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
+ inline_version);
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
@@ -1949,8 +1976,8 @@ out_unlock:
}
out:
ceph_free_cap_flush(prealloc_cf);
- dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
- inode, ceph_vinop(inode), inline_version, err);
+ doutc(cl, "%llx.%llx inline_version %llu = %d\n",
+ ceph_vinop(inode), inline_version, err);
return err;
}
@@ -1977,8 +2004,9 @@ enum {
static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
s64 pool, struct ceph_string *pool_ns)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
struct rb_node **p, *parent;
struct ceph_pool_perm *perm;
@@ -2013,10 +2041,10 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
goto out;
if (pool_ns)
- dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
- pool, (int)pool_ns->len, pool_ns->str);
+ doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
+ (int)pool_ns->len, pool_ns->str);
else
- dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
+ doutc(cl, "pool %lld no perm cached\n", pool);
down_write(&mdsc->pool_perm_rwsem);
p = &mdsc->pool_perm_tree.rb_node;
@@ -2141,15 +2169,16 @@ out:
if (!err)
err = have;
if (pool_ns)
- dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
- pool, (int)pool_ns->len, pool_ns->str, err);
+ doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
+ (int)pool_ns->len, pool_ns->str, err);
else
- dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
+ doutc(cl, "pool %lld result = %d\n", pool, err);
return err;
}
int ceph_pool_perm_check(struct inode *inode, int need)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_string *pool_ns;
s64 pool;
@@ -2168,7 +2197,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
return 0;
}
- if (ceph_test_mount_opt(ceph_inode_to_client(inode),
+ if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
NOPOOLPERM))
return 0;
@@ -2179,13 +2208,11 @@ int ceph_pool_perm_check(struct inode *inode, int need)
check:
if (flags & CEPH_I_POOL_PERM) {
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
- dout("ceph_pool_perm_check pool %lld no read perm\n",
- pool);
+ doutc(cl, "pool %lld no read perm\n", pool);
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
- dout("ceph_pool_perm_check pool %lld no write perm\n",
- pool);
+ doutc(cl, "pool %lld no write perm\n", pool);
return -EPERM;
}
return 0;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index de1dee46d3df..930fbd54d2c8 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -15,7 +15,7 @@
void ceph_fscache_register_inode_cookie(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
/* No caching for filesystem? */
if (!fsc->fscache)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a104669fcf4c..2c0b8dc3dd0d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -186,10 +186,10 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
mdsc->caps_avail_count += nr_caps;
}
- dout("%s: caps %d = %d used + %d resv + %d avail\n",
- __func__,
- mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(mdsc->fsc->client,
+ "caps %d = %d used + %d resv + %d avail\n",
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
mdsc->caps_reserve_count +
mdsc->caps_avail_count);
@@ -202,6 +202,7 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int i, j;
struct ceph_cap *cap;
int have;
@@ -212,7 +213,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s;
LIST_HEAD(newcaps);
- dout("reserve caps ctx=%p need=%d\n", ctx, need);
+ doutc(cl, "ctx=%p need=%d\n", ctx, need);
/* first reserve any caps that are already allocated */
spin_lock(&mdsc->caps_list_lock);
@@ -272,8 +273,8 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
continue;
}
- pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
- ctx, need, have + alloc);
+ pr_warn_client(cl, "ctx=%p ENOMEM need=%d got=%d\n", ctx, need,
+ have + alloc);
err = -ENOMEM;
break;
}
@@ -298,20 +299,21 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
spin_unlock(&mdsc->caps_list_lock);
- dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
- ctx, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "ctx=%p %d = %d used + %d resv + %d avail\n", ctx,
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
return err;
}
void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx)
{
+ struct ceph_client *cl = mdsc->fsc->client;
bool reclaim = false;
if (!ctx->count)
return;
- dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
+ doutc(cl, "ctx=%p count=%d\n", ctx, ctx->count);
spin_lock(&mdsc->caps_list_lock);
__ceph_unreserve_caps(mdsc, ctx->count);
ctx->count = 0;
@@ -328,6 +330,7 @@ void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap = NULL;
/* temporary, until we do something about cap import/export */
@@ -359,9 +362,9 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
}
spin_lock(&mdsc->caps_list_lock);
- dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
- ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "ctx=%p (%d) %d = %d used + %d resv + %d avail\n", ctx,
+ ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
BUG_ON(!ctx->count);
BUG_ON(ctx->count > mdsc->caps_reserve_count);
BUG_ON(list_empty(&mdsc->caps_list));
@@ -382,10 +385,12 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
{
+ struct ceph_client *cl = mdsc->fsc->client;
+
spin_lock(&mdsc->caps_list_lock);
- dout("put_cap %p %d = %d used + %d resv + %d avail\n",
- cap, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "%p %d = %d used + %d resv + %d avail\n", cap,
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
mdsc->caps_use_count--;
/*
* Keep some preallocated caps around (ceph_min_count), to
@@ -491,11 +496,13 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+
ci->i_hold_caps_max = round_jiffies(jiffies +
opt->caps_wanted_delay_max * HZ);
- dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode,
- ci->i_hold_caps_max - jiffies);
+ doutc(mdsc->fsc->client, "%p %llx.%llx %lu\n", inode,
+ ceph_vinop(inode), ci->i_hold_caps_max - jiffies);
}
/*
@@ -509,8 +516,11 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode,
- ci->i_ceph_flags, ci->i_hold_caps_max);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx flags 0x%lx at %lu\n",
+ inode, ceph_vinop(inode), ci->i_ceph_flags,
+ ci->i_hold_caps_max);
if (!mdsc->stopping) {
spin_lock(&mdsc->cap_delay_lock);
if (!list_empty(&ci->i_cap_delay_list)) {
@@ -533,7 +543,9 @@ no_change:
static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list))
@@ -550,7 +562,9 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_cancel %p\n", &ci->netfs.inode);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (list_empty(&ci->i_cap_delay_list))
return;
spin_lock(&mdsc->cap_delay_lock);
@@ -562,6 +576,9 @@ static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
unsigned issued)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
unsigned had = __ceph_caps_issued(ci, NULL);
lockdep_assert_held(&ci->i_ceph_lock);
@@ -586,7 +603,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
if (issued & CEPH_CAP_FILE_SHARED)
atomic_inc(&ci->i_shared_gen);
if (S_ISDIR(ci->netfs.inode.i_mode)) {
- dout(" marking %p NOT complete\n", &ci->netfs.inode);
+ doutc(cl, " marking %p NOT complete\n", inode);
__ceph_dir_clear_complete(ci);
}
}
@@ -635,7 +652,8 @@ void ceph_add_cap(struct inode *inode,
unsigned seq, unsigned mseq, u64 realmino, int flags,
struct ceph_cap **new_cap)
{
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
int mds = session->s_mds;
@@ -644,8 +662,9 @@ void ceph_add_cap(struct inode *inode,
lockdep_assert_held(&ci->i_ceph_lock);
- dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
- session->s_mds, cap_id, ceph_cap_string(issued), seq);
+ doutc(cl, "%p %llx.%llx mds%d cap %llx %s seq %d\n", inode,
+ ceph_vinop(inode), session->s_mds, cap_id,
+ ceph_cap_string(issued), seq);
gen = atomic_read(&session->s_cap_gen);
@@ -723,9 +742,9 @@ void ceph_add_cap(struct inode *inode,
actual_wanted = __ceph_caps_wanted(ci);
if ((wanted & ~actual_wanted) ||
(issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
- dout(" issued %s, mds wanted %s, actual %s, queueing\n",
- ceph_cap_string(issued), ceph_cap_string(wanted),
- ceph_cap_string(actual_wanted));
+ doutc(cl, "issued %s, mds wanted %s, actual %s, queueing\n",
+ ceph_cap_string(issued), ceph_cap_string(wanted),
+ ceph_cap_string(actual_wanted));
__cap_delay_requeue(mdsc, ci);
}
@@ -742,9 +761,9 @@ void ceph_add_cap(struct inode *inode,
WARN_ON(ci->i_auth_cap == cap);
}
- dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
- inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
- ceph_cap_string(issued|cap->issued), seq, mds);
+ doutc(cl, "inode %p %llx.%llx cap %p %s now %s seq %d mds%d\n",
+ inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
+ ceph_cap_string(issued|cap->issued), seq, mds);
cap->cap_id = cap_id;
cap->issued = issued;
cap->implemented |= issued;
@@ -766,6 +785,8 @@ void ceph_add_cap(struct inode *inode,
*/
static int __cap_is_valid(struct ceph_cap *cap)
{
+ struct inode *inode = &cap->ci->netfs.inode;
+ struct ceph_client *cl = cap->session->s_mdsc->fsc->client;
unsigned long ttl;
u32 gen;
@@ -773,9 +794,9 @@ static int __cap_is_valid(struct ceph_cap *cap)
ttl = cap->session->s_cap_ttl;
if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
- dout("__cap_is_valid %p cap %p issued %s "
- "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode,
- cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
+ doutc(cl, "%p %llx.%llx cap %p issued %s but STALE (gen %u vs %u)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued), cap->cap_gen, gen);
return 0;
}
@@ -789,6 +810,8 @@ static int __cap_is_valid(struct ceph_cap *cap)
*/
int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int have = ci->i_snap_caps;
struct ceph_cap *cap;
struct rb_node *p;
@@ -799,8 +822,8 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
cap = rb_entry(p, struct ceph_cap, ci_node);
if (!__cap_is_valid(cap))
continue;
- dout("__ceph_caps_issued %p cap %p issued %s\n",
- &ci->netfs.inode, cap, ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p issued %s\n", inode,
+ ceph_vinop(inode), cap, ceph_cap_string(cap->issued));
have |= cap->issued;
if (implemented)
*implemented |= cap->implemented;
@@ -843,16 +866,18 @@ int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
*/
static void __touch_cap(struct ceph_cap *cap)
{
+ struct inode *inode = &cap->ci->netfs.inode;
struct ceph_mds_session *s = cap->session;
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
spin_lock(&s->s_cap_lock);
if (!s->s_cap_iterator) {
- dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap,
- s->s_mds);
+ doutc(cl, "%p %llx.%llx cap %p mds%d\n", inode,
+ ceph_vinop(inode), cap, s->s_mds);
list_move_tail(&cap->session_caps, &s->s_caps);
} else {
- dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
- &cap->ci->netfs.inode, cap, s->s_mds);
+ doutc(cl, "%p %llx.%llx cap %p mds%d NOP, iterating over caps\n",
+ inode, ceph_vinop(inode), cap, s->s_mds);
}
spin_unlock(&s->s_cap_lock);
}
@@ -864,15 +889,16 @@ static void __touch_cap(struct ceph_cap *cap)
*/
int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
struct rb_node *p;
int have = ci->i_snap_caps;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
- " (mask %s)\n", ceph_ino(&ci->netfs.inode),
- ceph_cap_string(have),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx snap issued %s (mask %s)\n",
+ inode, ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(mask));
return 1;
}
@@ -881,10 +907,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
if (!__cap_is_valid(cap))
continue;
if ((cap->issued & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
- " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap,
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx cap %p issued %s (mask %s)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
if (touch)
__touch_cap(cap);
return 1;
@@ -893,10 +919,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
/* does a combination of caps satisfy mask? */
have |= cap->issued;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
- " (mask %s)\n", ceph_ino(&ci->netfs.inode),
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx combo issued %s (mask %s)\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
if (touch) {
struct rb_node *q;
@@ -922,7 +948,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
int touch)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
int r;
r = __ceph_caps_issued_mask(ci, mask, touch);
@@ -954,13 +980,14 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
{
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int ret;
spin_lock(&ci->i_ceph_lock);
ret = __ceph_caps_revoking_other(ci, NULL, mask);
spin_unlock(&ci->i_ceph_lock);
- dout("ceph_caps_revoking %p %s = %d\n", inode,
- ceph_cap_string(mask), ret);
+ doutc(cl, "%p %llx.%llx %s = %d\n", inode, ceph_vinop(inode),
+ ceph_cap_string(mask), ret);
return ret;
}
@@ -996,7 +1023,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
struct ceph_mount_options *opt =
- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
+ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
@@ -1107,21 +1134,23 @@ int ceph_is_any_caps(struct inode *inode)
void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
{
struct ceph_mds_session *session = cap->session;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_inode_info *ci = cap->ci;
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc;
int removed = 0;
/* 'ci' being NULL means the remove have already occurred */
if (!ci) {
- dout("%s: cap inode is NULL\n", __func__);
+ doutc(cl, "inode is NULL\n");
return;
}
lockdep_assert_held(&ci->i_ceph_lock);
- dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
+ doutc(cl, "%p from %p %llx.%llx\n", cap, inode, ceph_vinop(inode));
- mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
+ mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
@@ -1132,8 +1161,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
/* not yet, we are iterating over this very cap */
- dout("__ceph_remove_cap delaying %p removal from session %p\n",
- cap, cap->session);
+ doutc(cl, "delaying %p removal from session %p\n", cap,
+ cap->session);
} else {
list_del_init(&cap->session_caps);
session->s_nr_caps--;
@@ -1178,20 +1207,21 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
}
}
-void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ bool queue_release)
{
struct ceph_inode_info *ci = cap->ci;
struct ceph_fs_client *fsc;
/* 'ci' being NULL means the remove have already occurred */
if (!ci) {
- dout("%s: cap inode is NULL\n", __func__);
+ doutc(mdsc->fsc->client, "inode is NULL\n");
return;
}
lockdep_assert_held(&ci->i_ceph_lock);
- fsc = ceph_inode_to_client(&ci->netfs.inode);
+ fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
WARN_ON_ONCE(ci->i_auth_cap == cap &&
!list_empty(&ci->i_dirty_item) &&
!fsc->blocklisted &&
@@ -1227,15 +1257,19 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
{
struct ceph_mds_caps *fc;
void *p;
- struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
-
- dout("%s %s %llx %llx caps %s wanted %s dirty %s seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu xattr_ver %llu xattr_len %d\n",
- __func__, ceph_cap_op_name(arg->op), arg->cid, arg->ino,
- ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
- ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
- arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
- arg->size, arg->max_size, arg->xattr_version,
- arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
+ struct ceph_mds_client *mdsc = arg->session->s_mdsc;
+ struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
+
+ doutc(mdsc->fsc->client,
+ "%s %llx %llx caps %s wanted %s dirty %s seq %u/%u"
+ " tid %llu/%llu mseq %u follows %lld size %llu/%llu"
+ " xattr_ver %llu xattr_len %d\n",
+ ceph_cap_op_name(arg->op), arg->cid, arg->ino,
+ ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
+ ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
+ arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
+ arg->size, arg->max_size, arg->xattr_version,
+ arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
msg->hdr.version = cpu_to_le16(12);
msg->hdr.tid = cpu_to_le64(arg->flush_tid);
@@ -1342,6 +1376,8 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
*/
void __ceph_remove_caps(struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct rb_node *p;
/* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
@@ -1351,7 +1387,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p = rb_next(p);
- ceph_remove_cap(cap, true);
+ ceph_remove_cap(mdsc, cap, true);
}
spin_unlock(&ci->i_ceph_lock);
}
@@ -1370,6 +1406,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int held, revoking;
lockdep_assert_held(&ci->i_ceph_lock);
@@ -1378,10 +1415,10 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
revoking = cap->implemented & ~cap->issued;
retain &= ~revoking;
- dout("%s %p cap %p session %p %s -> %s (revoking %s)\n",
- __func__, inode, cap, cap->session,
- ceph_cap_string(held), ceph_cap_string(held & retain),
- ceph_cap_string(revoking));
+ doutc(cl, "%p %llx.%llx cap %p session %p %s -> %s (revoking %s)\n",
+ inode, ceph_vinop(inode), cap, cap->session,
+ ceph_cap_string(held), ceph_cap_string(held & retain),
+ ceph_cap_string(revoking));
BUG_ON((retain & CEPH_CAP_PIN) == 0);
ci->i_ceph_flags &= ~CEPH_I_FLUSH;
@@ -1497,13 +1534,16 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
{
struct ceph_msg *msg;
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(arg), GFP_NOFS,
false);
if (!msg) {
- pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n",
- ceph_vinop(inode), ceph_cap_string(arg->dirty),
- arg->flush_tid);
+ pr_err_client(cl,
+ "error allocating cap msg: ino (%llx.%llx)"
+ " flushing %s tid %llu, requeuing cap.\n",
+ ceph_vinop(inode), ceph_cap_string(arg->dirty),
+ arg->flush_tid);
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(arg->session->s_mdsc, ci);
spin_unlock(&ci->i_ceph_lock);
@@ -1592,11 +1632,13 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
{
struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap_snap *capsnap;
u64 oldest_flush_tid = 0;
u64 first_tid = 1, last_tid = 0;
- dout("__flush_snaps %p session %p\n", inode, session);
+ doutc(cl, "%p %llx.%llx session %p\n", inode, ceph_vinop(inode),
+ session);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
/*
@@ -1611,7 +1653,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
/* only flush each capsnap once */
if (capsnap->cap_flush.tid > 0) {
- dout(" already flushed %p, skipping\n", capsnap);
+ doutc(cl, "already flushed %p, skipping\n", capsnap);
continue;
}
@@ -1643,8 +1685,8 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
int ret;
if (!(cap && cap->session == session)) {
- dout("__flush_snaps %p auth cap %p not mds%d, "
- "stop\n", inode, cap, session->s_mds);
+ doutc(cl, "%p %llx.%llx auth cap %p not mds%d, stop\n",
+ inode, ceph_vinop(inode), cap, session->s_mds);
break;
}
@@ -1665,15 +1707,17 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
- dout("__flush_snaps %p capsnap %p tid %llu %s\n",
- inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
+ doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n", inode,
+ ceph_vinop(inode), capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
oldest_flush_tid);
if (ret < 0) {
- pr_err("__flush_snaps: error sending cap flushsnap, "
- "ino (%llx.%llx) tid %llu follows %llu\n",
- ceph_vinop(inode), cf->tid, capsnap->follows);
+ pr_err_client(cl, "error sending cap flushsnap, "
+ "ino (%llx.%llx) tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid,
+ capsnap->follows);
}
ceph_put_cap_snap(capsnap);
@@ -1685,28 +1729,29 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession)
{
struct inode *inode = &ci->netfs.inode;
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_mds_session *session = NULL;
bool need_put = false;
int mds;
- dout("ceph_flush_snaps %p\n", inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (psession)
session = *psession;
retry:
spin_lock(&ci->i_ceph_lock);
if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
- dout(" no capsnap needs flush, doing nothing\n");
+ doutc(cl, " no capsnap needs flush, doing nothing\n");
goto out;
}
if (!ci->i_auth_cap) {
- dout(" no auth cap (migrating?), doing nothing\n");
+ doutc(cl, " no auth cap (migrating?), doing nothing\n");
goto out;
}
mds = ci->i_auth_cap->session->s_mds;
if (session && session->s_mds != mds) {
- dout(" oops, wrong session %p mutex\n", session);
+ doutc(cl, " oops, wrong session %p mutex\n", session);
ceph_put_mds_session(session);
session = NULL;
}
@@ -1750,23 +1795,25 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
struct ceph_cap_flush **pcf)
{
struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
+ ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int was = ci->i_dirty_caps;
int dirty = 0;
lockdep_assert_held(&ci->i_ceph_lock);
if (!ci->i_auth_cap) {
- pr_warn("__mark_dirty_caps %p %llx mask %s, "
- "but no auth cap (session was closed?)\n",
- inode, ceph_ino(inode), ceph_cap_string(mask));
+ pr_warn_client(cl, "%p %llx.%llx mask %s, "
+ "but no auth cap (session was closed?)\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(mask));
return 0;
}
- dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode,
- ceph_cap_string(mask), ceph_cap_string(was),
- ceph_cap_string(was | mask));
+ doutc(cl, "%p %llx.%llx %s dirty %s -> %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(mask),
+ ceph_cap_string(was), ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask;
if (was == 0) {
struct ceph_mds_session *session = ci->i_auth_cap->session;
@@ -1779,8 +1826,9 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
ci->i_head_snapc = ceph_get_snap_context(
ci->i_snap_realm->cached_context);
}
- dout(" inode %p now dirty snapc %p auth cap %p\n",
- &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap);
+ doutc(cl, "%p %llx.%llx now dirty snapc %p auth cap %p\n",
+ inode, ceph_vinop(inode), ci->i_head_snapc,
+ ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &session->s_cap_dirty);
@@ -1873,7 +1921,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session, bool wake,
u64 *oldest_flush_tid)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap_flush *cf = NULL;
int flushing;
@@ -1884,13 +1933,13 @@ static u64 __mark_caps_flushing(struct inode *inode,
BUG_ON(!ci->i_prealloc_cap_flush);
flushing = ci->i_dirty_caps;
- dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
- ceph_cap_string(flushing),
- ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(ci->i_flushing_caps | flushing));
+ doutc(cl, "flushing %s, flushing_caps %s -> %s\n",
+ ceph_cap_string(flushing),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps | flushing));
ci->i_flushing_caps |= flushing;
ci->i_dirty_caps = 0;
- dout(" inode %p now !dirty\n", inode);
+ doutc(cl, "%p %llx.%llx now !dirty\n", inode, ceph_vinop(inode));
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
@@ -1921,6 +1970,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
@@ -1932,12 +1982,13 @@ static int try_nonblocking_invalidate(struct inode *inode)
if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
/* success. */
- dout("try_nonblocking_invalidate %p success\n", inode);
+ doutc(cl, "%p %llx.%llx success\n", inode,
+ ceph_vinop(inode));
/* save any racing async invalidate some trouble */
ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
return 0;
}
- dout("try_nonblocking_invalidate %p failed\n", inode);
+ doutc(cl, "%p %llx.%llx failed\n", inode, ceph_vinop(inode));
return -1;
}
@@ -1969,6 +2020,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags)
{
struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
u64 flush_tid, oldest_flush_tid;
int file_wanted, used, cap_used;
@@ -2043,9 +2095,9 @@ retry:
}
}
- dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode),
- ceph_cap_string(file_wanted),
+ doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
+ "flushing %s issued %s revoking %s retain %s %s%s%s\n",
+ inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(issued), ceph_cap_string(revoking),
@@ -2066,10 +2118,10 @@ retry:
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
!tried_invalidate) {
- dout("check_caps trying to invalidate on %llx.%llx\n",
- ceph_vinop(inode));
+ doutc(cl, "trying to invalidate on %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
if (try_nonblocking_invalidate(inode) < 0) {
- dout("check_caps queuing invalidate\n");
+ doutc(cl, "queuing invalidate\n");
queue_invalidate = true;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
@@ -2097,35 +2149,35 @@ retry:
cap_used &= ~ci->i_auth_cap->issued;
revoking = cap->implemented & ~cap->issued;
- dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
- cap->mds, cap, ceph_cap_string(cap_used),
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->implemented),
- ceph_cap_string(revoking));
+ doutc(cl, " mds%d cap %p used %s issued %s implemented %s revoking %s\n",
+ cap->mds, cap, ceph_cap_string(cap_used),
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->implemented),
+ ceph_cap_string(revoking));
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
if (ci->i_wanted_max_size > ci->i_max_size &&
ci->i_wanted_max_size > ci->i_requested_max_size) {
- dout("requesting new max_size\n");
+ doutc(cl, "requesting new max_size\n");
goto ack;
}
/* approaching file_max? */
if (__ceph_should_report_size(ci)) {
- dout("i_size approaching max_size\n");
+ doutc(cl, "i_size approaching max_size\n");
goto ack;
}
}
/* flush anything dirty? */
if (cap == ci->i_auth_cap) {
if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
- dout("flushing dirty caps\n");
+ doutc(cl, "flushing dirty caps\n");
goto ack;
}
if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
- dout("flushing snap caps\n");
+ doutc(cl, "flushing snap caps\n");
goto ack;
}
}
@@ -2133,7 +2185,7 @@ retry:
/* completed revocation? going down and there are no caps? */
if (revoking) {
if ((revoking & cap_used) == 0) {
- dout("completed revocation of %s\n",
+ doutc(cl, "completed revocation of %s\n",
ceph_cap_string(cap->implemented & ~cap->issued));
goto ack;
}
@@ -2232,7 +2284,7 @@ ack:
*/
static int try_flush_caps(struct inode *inode, u64 *ptid)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int flushing = 0;
u64 flush_tid = 0, oldest_flush_tid = 0;
@@ -2310,7 +2362,8 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
*/
static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
int ret, err = 0;
@@ -2400,8 +2453,9 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
kfree(sessions);
}
- dout("%s %p wait on tid %llu %llu\n", __func__,
- inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
+ doutc(cl, "%p %llx.%llx wait on tid %llu %llu\n", inode,
+ ceph_vinop(inode), req1 ? req1->r_tid : 0ULL,
+ req2 ? req2->r_tid : 0ULL);
if (req1) {
ret = !wait_for_completion_timeout(&req1->r_safe_completion,
ceph_timeout_jiffies(req1->r_timeout));
@@ -2427,11 +2481,13 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u64 flush_tid;
int ret, err;
int dirty;
- dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
+ doutc(cl, "%p %llx.%llx%s\n", inode, ceph_vinop(inode),
+ datasync ? " datasync" : "");
ret = file_write_and_wait_range(file, start, end);
if (datasync)
@@ -2442,7 +2498,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
dirty = try_flush_caps(inode, &flush_tid);
- dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
+ doutc(cl, "dirty caps are %s\n", ceph_cap_string(dirty));
err = flush_mdlog_and_wait_inode_unsafe_requests(inode);
@@ -2463,7 +2519,8 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0)
ret = err;
out:
- dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
+ doutc(cl, "%p %llx.%llx%s result=%d\n", inode, ceph_vinop(inode),
+ datasync ? " datasync" : "", ret);
return ret;
}
@@ -2476,12 +2533,13 @@ out:
int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u64 flush_tid;
int err = 0;
int dirty;
int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync);
- dout("write_inode %p wait=%d\n", inode, wait);
+ doutc(cl, "%p %llx.%llx wait=%d\n", inode, ceph_vinop(inode), wait);
ceph_fscache_unpin_writeback(inode, wbc);
if (wait) {
err = ceph_wait_on_async_create(inode);
@@ -2493,7 +2551,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
caps_are_flushed(inode, flush_tid));
} else {
struct ceph_mds_client *mdsc =
- ceph_sb_to_client(inode->i_sb)->mdsc;
+ ceph_sb_to_fs_client(inode->i_sb)->mdsc;
spin_lock(&ci->i_ceph_lock);
if (__ceph_caps_dirty(ci))
@@ -2511,6 +2569,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
__acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap;
struct ceph_cap_flush *cf;
int ret;
@@ -2536,8 +2595,8 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- inode, cap, session->s_mds);
+ pr_err_client(cl, "%p auth cap %p not mds%d ???\n",
+ inode, cap, session->s_mds);
break;
}
@@ -2546,8 +2605,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
if (!cf->is_capsnap) {
struct cap_msg_args arg;
- dout("kick_flushing_caps %p cap %p tid %llu %s\n",
- inode, cap, cf->tid, ceph_cap_string(cf->caps));
+ doutc(cl, "%p %llx.%llx cap %p tid %llu %s\n",
+ inode, ceph_vinop(inode), cap, cf->tid,
+ ceph_cap_string(cf->caps));
__prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH,
(cf->tid < last_snap_flush ?
CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0),
@@ -2561,9 +2621,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_snap *capsnap =
container_of(cf, struct ceph_cap_snap,
cap_flush);
- dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
- inode, capsnap, cf->tid,
- ceph_cap_string(capsnap->dirty));
+ doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n",
+ inode, ceph_vinop(inode), capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
@@ -2571,11 +2631,10 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
oldest_flush_tid);
if (ret < 0) {
- pr_err("kick_flushing_caps: error sending "
- "cap flushsnap, ino (%llx.%llx) "
- "tid %llu follows %llu\n",
- ceph_vinop(inode), cf->tid,
- capsnap->follows);
+ pr_err_client(cl, "error sending cap flushsnap,"
+ " %p %llx.%llx tid %llu follows %llu\n",
+ inode, ceph_vinop(inode), cf->tid,
+ capsnap->follows);
}
ceph_put_cap_snap(capsnap);
@@ -2588,22 +2647,26 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
u64 oldest_flush_tid;
- dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+ struct inode *inode = &ci->netfs.inode;
+
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- &ci->netfs.inode, cap, session->s_mds);
+ pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+ inode, ceph_vinop(inode), cap,
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
continue;
}
@@ -2636,24 +2699,28 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
u64 oldest_flush_tid;
lockdep_assert_held(&session->s_mutex);
- dout("kick_flushing_caps mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+ struct inode *inode = &ci->netfs.inode;
+
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- &ci->netfs.inode, cap, session->s_mds);
+ pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+ inode, ceph_vinop(inode), cap,
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
continue;
}
@@ -2670,11 +2737,13 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
{
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_cap *cap = ci->i_auth_cap;
+ struct inode *inode = &ci->netfs.inode;
lockdep_assert_held(&ci->i_ceph_lock);
- dout("%s %p flushing %s\n", __func__, &ci->netfs.inode,
- ceph_cap_string(ci->i_flushing_caps));
+ doutc(mdsc->fsc->client, "%p %llx.%llx flushing %s\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(ci->i_flushing_caps));
if (!list_empty(&ci->i_cap_flush_list)) {
u64 oldest_flush_tid;
@@ -2696,6 +2765,9 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
bool snap_rwsem_locked)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
lockdep_assert_held(&ci->i_ceph_lock);
if (got & CEPH_CAP_PIN)
@@ -2716,10 +2788,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
}
if (got & CEPH_CAP_FILE_BUFFER) {
if (ci->i_wb_ref == 0)
- ihold(&ci->netfs.inode);
+ ihold(inode);
ci->i_wb_ref++;
- dout("%s %p wb %d -> %d (?)\n", __func__,
- &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref);
+ doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+ ceph_vinop(inode), ci->i_wb_ref-1, ci->i_wb_ref);
}
}
@@ -2746,20 +2818,23 @@ static int try_get_cap_refs(struct inode *inode, int need, int want,
loff_t endoff, int flags, int *got)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int ret = 0;
int have, implemented;
bool snap_rwsem_locked = false;
- dout("get_cap_refs %p need %s want %s\n", inode,
- ceph_cap_string(need), ceph_cap_string(want));
+ doutc(cl, "%p %llx.%llx need %s want %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(need),
+ ceph_cap_string(want));
again:
spin_lock(&ci->i_ceph_lock);
if ((flags & CHECK_FILELOCK) &&
(ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
- dout("try_get_cap_refs %p error filelock\n", inode);
+ doutc(cl, "%p %llx.%llx error filelock\n", inode,
+ ceph_vinop(inode));
ret = -EIO;
goto out_unlock;
}
@@ -2779,8 +2854,8 @@ again:
if (have & need & CEPH_CAP_FILE_WR) {
if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
- dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
- inode, endoff, ci->i_max_size);
+ doutc(cl, "%p %llx.%llx endoff %llu > maxsize %llu\n",
+ inode, ceph_vinop(inode), endoff, ci->i_max_size);
if (endoff > ci->i_requested_max_size)
ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
goto out_unlock;
@@ -2790,7 +2865,8 @@ again:
* can get a final snapshot value for size+mtime.
*/
if (__ceph_have_pending_cap_snap(ci)) {
- dout("get_cap_refs %p cap_snap_pending\n", inode);
+ doutc(cl, "%p %llx.%llx cap_snap_pending\n", inode,
+ ceph_vinop(inode));
goto out_unlock;
}
}
@@ -2808,9 +2884,9 @@ again:
int not = want & ~(have & need);
int revoking = implemented & ~have;
int exclude = revoking & not;
- dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
- inode, ceph_cap_string(have), ceph_cap_string(not),
- ceph_cap_string(revoking));
+ doutc(cl, "%p %llx.%llx have %s but not %s (revoking %s)\n",
+ inode, ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(not), ceph_cap_string(revoking));
if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) {
if (!snap_rwsem_locked &&
!ci->i_head_snapc &&
@@ -2850,28 +2926,31 @@ again:
spin_unlock(&s->s_cap_lock);
}
if (session_readonly) {
- dout("get_cap_refs %p need %s but mds%d readonly\n",
- inode, ceph_cap_string(need), ci->i_auth_cap->mds);
+ doutc(cl, "%p %llx.%llx need %s but mds%d readonly\n",
+ inode, ceph_vinop(inode), ceph_cap_string(need),
+ ci->i_auth_cap->mds);
ret = -EROFS;
goto out_unlock;
}
if (ceph_inode_is_shutdown(inode)) {
- dout("get_cap_refs %p inode is shutdown\n", inode);
+ doutc(cl, "%p %llx.%llx inode is shutdown\n",
+ inode, ceph_vinop(inode));
ret = -ESTALE;
goto out_unlock;
}
mds_wanted = __ceph_caps_mds_wanted(ci, false);
if (need & ~mds_wanted) {
- dout("get_cap_refs %p need %s > mds_wanted %s\n",
- inode, ceph_cap_string(need),
- ceph_cap_string(mds_wanted));
+ doutc(cl, "%p %llx.%llx need %s > mds_wanted %s\n",
+ inode, ceph_vinop(inode), ceph_cap_string(need),
+ ceph_cap_string(mds_wanted));
ret = -EUCLEAN;
goto out_unlock;
}
- dout("get_cap_refs %p have %s need %s\n", inode,
- ceph_cap_string(have), ceph_cap_string(need));
+ doutc(cl, "%p %llx.%llx have %s need %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(need));
}
out_unlock:
@@ -2886,8 +2965,8 @@ out_unlock:
else if (ret == 1)
ceph_update_cap_hit(&mdsc->metric);
- dout("get_cap_refs %p ret %d got %s\n", inode,
- ret, ceph_cap_string(*got));
+ doutc(cl, "%p %llx.%llx ret %d got %s\n", inode,
+ ceph_vinop(inode), ret, ceph_cap_string(*got));
return ret;
}
@@ -2899,13 +2978,14 @@ out_unlock:
static void check_max_size(struct inode *inode, loff_t endoff)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int check = 0;
/* do we need to explicitly request a larger max_size? */
spin_lock(&ci->i_ceph_lock);
if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
- dout("write %p at large endoff %llu, req max_size\n",
- inode, endoff);
+ doutc(cl, "write %p %llx.%llx at large endoff %llu, req max_size\n",
+ inode, ceph_vinop(inode), endoff);
ci->i_wanted_max_size = endoff;
}
/* duplicate ceph_check_caps()'s logic */
@@ -2964,7 +3044,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
int want, loff_t endoff, int *got)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int ret, _got, flags;
ret = ceph_pool_perm_check(inode, need);
@@ -3115,10 +3195,12 @@ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
if (!capsnap->need_flush &&
!capsnap->writing && !capsnap->dirty_pages) {
- dout("dropping cap_snap %p follows %llu\n",
- capsnap, capsnap->follows);
+ doutc(cl, "%p follows %llu\n", capsnap, capsnap->follows);
BUG_ON(capsnap->cap_flush.tid > 0);
ceph_put_snap_context(capsnap->context);
if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
@@ -3150,6 +3232,7 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
enum put_cap_refs_mode mode)
{
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int last = 0, put = 0, flushsnaps = 0, wake = 0;
bool check_flushsnaps = false;
@@ -3172,8 +3255,8 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
put++;
check_flushsnaps = true;
}
- dout("put_cap_refs %p wb %d -> %d (?)\n",
- inode, ci->i_wb_ref+1, ci->i_wb_ref);
+ doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+ ceph_vinop(inode), ci->i_wb_ref+1, ci->i_wb_ref);
}
if (had & CEPH_CAP_FILE_WR) {
if (--ci->i_wr_ref == 0) {
@@ -3213,8 +3296,8 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
}
spin_unlock(&ci->i_ceph_lock);
- dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
- last ? " last" : "", put ? " put" : "");
+ doutc(cl, "%p %llx.%llx had %s%s%s\n", inode, ceph_vinop(inode),
+ ceph_cap_string(had), last ? " last" : "", put ? " put" : "");
switch (mode) {
case PUT_CAP_REFS_SYNC:
@@ -3264,6 +3347,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc)
{
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap_snap *capsnap = NULL, *iter;
int put = 0;
bool last = false;
@@ -3287,11 +3371,10 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
- dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
- inode,
- ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
- ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
- last ? " LAST" : "");
+ doutc(cl, "on %p %llx.%llx head %d/%d -> %d/%d %s\n",
+ inode, ceph_vinop(inode), ci->i_wrbuffer_ref+nr,
+ ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref,
+ ci->i_wrbuffer_ref_head, last ? " LAST" : "");
} else {
list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
if (iter->context == snapc) {
@@ -3321,13 +3404,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
}
}
}
- dout("put_wrbuffer_cap_refs on %p cap_snap %p "
- " snap %lld %d/%d -> %d/%d %s%s\n",
- inode, capsnap, capsnap->context->seq,
- ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
- ci->i_wrbuffer_ref, capsnap->dirty_pages,
- last ? " (wrbuffer last)" : "",
- complete_capsnap ? " (complete capsnap)" : "");
+ doutc(cl, "%p %llx.%llx cap_snap %p snap %lld %d/%d -> %d/%d %s%s\n",
+ inode, ceph_vinop(inode), capsnap, capsnap->context->seq,
+ ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
+ ci->i_wrbuffer_ref, capsnap->dirty_pages,
+ last ? " (wrbuffer last)" : "",
+ complete_capsnap ? " (complete capsnap)" : "");
}
unlock:
@@ -3350,9 +3432,10 @@ unlock:
*/
static void invalidate_aliases(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct dentry *dn, *prev = NULL;
- dout("invalidate_aliases inode %p\n", inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
d_prune_aliases(inode);
/*
* For non-directory inode, d_find_alias() only returns
@@ -3411,6 +3494,7 @@ static void handle_cap_grant(struct inode *inode,
__releases(ci->i_ceph_lock)
__releases(session->s_mdsc->snap_rwsem)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int seq = le32_to_cpu(grant->seq);
int newcaps = le32_to_cpu(grant->caps);
@@ -3434,10 +3518,11 @@ static void handle_cap_grant(struct inode *inode,
if (IS_ENCRYPTED(inode) && size)
size = extra_info->fscrypt_file_size;
- dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
- inode, cap, session->s_mds, seq, ceph_cap_string(newcaps));
- dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
- i_size_read(inode));
+ doutc(cl, "%p %llx.%llx cap %p mds%d seq %d %s\n", inode,
+ ceph_vinop(inode), cap, session->s_mds, seq,
+ ceph_cap_string(newcaps));
+ doutc(cl, " size %llu max_size %llu, i_size %llu\n", size,
+ max_size, i_size_read(inode));
/*
@@ -3497,15 +3582,17 @@ static void handle_cap_grant(struct inode *inode,
inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
ci->i_btime = extra_info->btime;
- dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kgid(&init_user_ns, inode->i_gid));
+ doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+ ceph_vinop(inode), inode->i_mode,
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len ||
memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth,
ci->fscrypt_auth_len))
- pr_warn_ratelimited("%s: cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
- __func__, ci->fscrypt_auth_len,
+ pr_warn_ratelimited_client(cl,
+ "cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
+ ci->fscrypt_auth_len,
extra_info->fscrypt_auth_len);
#endif
}
@@ -3523,8 +3610,8 @@ static void handle_cap_grant(struct inode *inode,
u64 version = le64_to_cpu(grant->xattr_version);
if (version > ci->i_xattrs.version) {
- dout(" got new xattrs v%llu on %p len %d\n",
- version, inode, len);
+ doutc(cl, " got new xattrs v%llu on %p %llx.%llx len %d\n",
+ version, inode, ceph_vinop(inode), len);
if (ci->i_xattrs.blob)
ceph_buffer_put(ci->i_xattrs.blob);
ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
@@ -3575,8 +3662,8 @@ static void handle_cap_grant(struct inode *inode,
if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
if (max_size != ci->i_max_size) {
- dout("max_size %lld -> %llu\n",
- ci->i_max_size, max_size);
+ doutc(cl, "max_size %lld -> %llu\n", ci->i_max_size,
+ max_size);
ci->i_max_size = max_size;
if (max_size >= ci->i_wanted_max_size) {
ci->i_wanted_max_size = 0; /* reset */
@@ -3590,10 +3677,9 @@ static void handle_cap_grant(struct inode *inode,
wanted = __ceph_caps_wanted(ci);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
- dout(" my wanted = %s, used = %s, dirty %s\n",
- ceph_cap_string(wanted),
- ceph_cap_string(used),
- ceph_cap_string(dirty));
+ doutc(cl, " my wanted = %s, used = %s, dirty %s\n",
+ ceph_cap_string(wanted), ceph_cap_string(used),
+ ceph_cap_string(dirty));
if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) &&
(wanted & ~(cap->mds_wanted | newcaps))) {
@@ -3614,10 +3700,9 @@ static void handle_cap_grant(struct inode *inode,
if (cap->issued & ~newcaps) {
int revoking = cap->issued & ~newcaps;
- dout("revocation: %s -> %s (revoking %s)\n",
- ceph_cap_string(cap->issued),
- ceph_cap_string(newcaps),
- ceph_cap_string(revoking));
+ doutc(cl, "revocation: %s -> %s (revoking %s)\n",
+ ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
+ ceph_cap_string(revoking));
if (S_ISREG(inode->i_mode) &&
(revoking & used & CEPH_CAP_FILE_BUFFER))
writeback = true; /* initiate writeback; will delay ack */
@@ -3635,11 +3720,12 @@ static void handle_cap_grant(struct inode *inode,
cap->issued = newcaps;
cap->implemented |= newcaps;
} else if (cap->issued == newcaps) {
- dout("caps unchanged: %s -> %s\n",
- ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
+ doutc(cl, "caps unchanged: %s -> %s\n",
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
} else {
- dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
- ceph_cap_string(newcaps));
+ doutc(cl, "grant: %s -> %s\n", ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
/* non-auth MDS is revoking the newly grant caps ? */
if (cap == ci->i_auth_cap &&
__ceph_caps_revoking_other(ci, cap, newcaps))
@@ -3727,7 +3813,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
__releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap_flush *cf, *tmp_cf;
LIST_HEAD(to_remove);
unsigned seq = le32_to_cpu(m->seq);
@@ -3764,11 +3851,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
- dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
- " flushing %s -> %s\n",
- inode, session->s_mds, seq, ceph_cap_string(dirty),
- ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(ci->i_flushing_caps & ~cleaned));
+ doutc(cl, "%p %llx.%llx mds%d seq %d on %s cleaned %s, flushing %s -> %s\n",
+ inode, ceph_vinop(inode), session->s_mds, seq,
+ ceph_cap_string(dirty), ceph_cap_string(cleaned),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps & ~cleaned));
if (list_empty(&to_remove) && !cleaned)
goto out;
@@ -3784,18 +3871,21 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
if (list_empty(&ci->i_cap_flush_list)) {
list_del_init(&ci->i_flushing_item);
if (!list_empty(&session->s_cap_flushing)) {
- dout(" mds%d still flushing cap on %p\n",
- session->s_mds,
- &list_first_entry(&session->s_cap_flushing,
- struct ceph_inode_info,
- i_flushing_item)->netfs.inode);
+ struct inode *inode =
+ &list_first_entry(&session->s_cap_flushing,
+ struct ceph_inode_info,
+ i_flushing_item)->netfs.inode;
+ doutc(cl, " mds%d still flushing cap on %p %llx.%llx\n",
+ session->s_mds, inode, ceph_vinop(inode));
}
}
mdsc->num_cap_flushing--;
- dout(" inode %p now !flushing\n", inode);
+ doutc(cl, " %p %llx.%llx now !flushing\n", inode,
+ ceph_vinop(inode));
if (ci->i_dirty_caps == 0) {
- dout(" inode %p now clean\n", inode);
+ doutc(cl, " %p %llx.%llx now clean\n", inode,
+ ceph_vinop(inode));
BUG_ON(!list_empty(&ci->i_dirty_item));
drop = true;
if (ci->i_wr_ref == 0 &&
@@ -3833,12 +3923,14 @@ void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
bool *wake_ci, bool *wake_mdsc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
bool ret;
lockdep_assert_held(&ci->i_ceph_lock);
- dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
+ doutc(cl, "removing capsnap %p, %p %llx.%llx ci %p\n", capsnap,
+ inode, ceph_vinop(inode), ci);
list_del_init(&capsnap->ci_item);
ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
@@ -3877,29 +3969,31 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_session *session)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
u64 follows = le64_to_cpu(m->snap_follows);
struct ceph_cap_snap *capsnap = NULL, *iter;
bool wake_ci = false;
bool wake_mdsc = false;
- dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
- inode, ci, session->s_mds, follows);
+ doutc(cl, "%p %llx.%llx ci %p mds%d follows %lld\n", inode,
+ ceph_vinop(inode), ci, session->s_mds, follows);
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
if (iter->follows == follows) {
if (iter->cap_flush.tid != flush_tid) {
- dout(" cap_snap %p follows %lld tid %lld !="
- " %lld\n", iter, follows,
- flush_tid, iter->cap_flush.tid);
+ doutc(cl, " cap_snap %p follows %lld "
+ "tid %lld != %lld\n", iter,
+ follows, flush_tid,
+ iter->cap_flush.tid);
break;
}
capsnap = iter;
break;
} else {
- dout(" skipping cap_snap %p follows %lld\n",
- iter, iter->follows);
+ doutc(cl, " skipping cap_snap %p follows %lld\n",
+ iter, iter->follows);
}
}
if (capsnap)
@@ -3928,6 +4022,7 @@ static bool handle_cap_trunc(struct inode *inode,
struct cap_extra_info *extra_info)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int mds = session->s_mds;
int seq = le32_to_cpu(trunc->seq);
u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
@@ -3950,8 +4045,8 @@ static bool handle_cap_trunc(struct inode *inode,
if (IS_ENCRYPTED(inode) && size)
size = extra_info->fscrypt_file_size;
- dout("%s inode %p mds%d seq %d to %lld truncate seq %d\n",
- __func__, inode, mds, seq, truncate_size, truncate_seq);
+ doutc(cl, "%p %llx.%llx mds%d seq %d to %lld truncate seq %d\n",
+ inode, ceph_vinop(inode), mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
return queue_trunc;
@@ -3969,7 +4064,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
struct ceph_mds_cap_peer *ph,
struct ceph_mds_session *session)
{
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *tsession = NULL;
struct ceph_cap *cap, *tcap, *new_cap = NULL;
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -3989,8 +4085,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
target = -1;
}
- dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
- inode, ci, mds, mseq, target);
+ doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d target %d\n",
+ inode, ceph_vinop(inode), ci, mds, mseq, target);
retry:
down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
@@ -3999,7 +4095,7 @@ retry:
goto out_unlock;
if (target < 0) {
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
}
@@ -4010,12 +4106,13 @@ retry:
issued = cap->issued;
if (issued != cap->implemented)
- pr_err_ratelimited("handle_cap_export: issued != implemented: "
- "ino (%llx.%llx) mds%d seq %d mseq %d "
- "issued %s implemented %s\n",
- ceph_vinop(inode), mds, cap->seq, cap->mseq,
- ceph_cap_string(issued),
- ceph_cap_string(cap->implemented));
+ pr_err_ratelimited_client(cl, "issued != implemented: "
+ "%p %llx.%llx mds%d seq %d mseq %d"
+ " issued %s implemented %s\n",
+ inode, ceph_vinop(inode), mds,
+ cap->seq, cap->mseq,
+ ceph_cap_string(issued),
+ ceph_cap_string(cap->implemented));
tcap = __get_cap_for_mds(ci, target);
@@ -4023,7 +4120,8 @@ retry:
/* already have caps from the target */
if (tcap->cap_id == t_cap_id &&
ceph_seq_cmp(tcap->seq, t_seq) < 0) {
- dout(" updating import cap %p mds%d\n", tcap, target);
+ doutc(cl, " updating import cap %p mds%d\n", tcap,
+ target);
tcap->cap_id = t_cap_id;
tcap->seq = t_seq - 1;
tcap->issue_seq = t_seq - 1;
@@ -4034,7 +4132,7 @@ retry:
change_auth_cap_ses(ci, tcap->session);
}
}
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
} else if (tsession) {
/* add placeholder for the export tagert */
@@ -4051,7 +4149,7 @@ retry:
spin_unlock(&mdsc->cap_dirty_lock);
}
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
}
@@ -4104,6 +4202,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
struct ceph_cap **target_cap, int *old_issued)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap, *ocap, *new_cap = NULL;
int mds = session->s_mds;
int issued;
@@ -4124,8 +4223,8 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
peer = -1;
}
- dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
- inode, ci, mds, mseq, peer);
+ doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d peer %d\n",
+ inode, ceph_vinop(inode), ci, mds, mseq, peer);
retry:
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -4151,20 +4250,20 @@ retry:
ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
if (ocap && ocap->cap_id == p_cap_id) {
- dout(" remove export cap %p mds%d flags %d\n",
- ocap, peer, ph->flags);
+ doutc(cl, " remove export cap %p mds%d flags %d\n",
+ ocap, peer, ph->flags);
if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
(ocap->seq != le32_to_cpu(ph->seq) ||
ocap->mseq != le32_to_cpu(ph->mseq))) {
- pr_err_ratelimited("handle_cap_import: "
- "mismatched seq/mseq: ino (%llx.%llx) "
- "mds%d seq %d mseq %d importer mds%d "
- "has peer seq %d mseq %d\n",
- ceph_vinop(inode), peer, ocap->seq,
- ocap->mseq, mds, le32_to_cpu(ph->seq),
+ pr_err_ratelimited_client(cl, "mismatched seq/mseq: "
+ "%p %llx.%llx mds%d seq %d mseq %d"
+ " importer mds%d has peer seq %d mseq %d\n",
+ inode, ceph_vinop(inode), peer,
+ ocap->seq, ocap->mseq, mds,
+ le32_to_cpu(ph->seq),
le32_to_cpu(ph->mseq));
}
- ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+ ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
*old_issued = issued;
@@ -4227,6 +4326,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
@@ -4245,7 +4345,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
bool close_sessions = false;
bool do_cap_release = false;
- dout("handle_caps from mds%d\n", session->s_mds);
+ doutc(cl, "from mds%d\n", session->s_mds);
if (!ceph_inc_mds_stopping_blocker(mdsc, session))
return;
@@ -4347,15 +4447,15 @@ void ceph_handle_caps(struct ceph_mds_session *session,
/* lookup ino */
inode = ceph_find_inode(mdsc->fsc->sb, vino);
- dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
- vino.snap, inode);
+ doutc(cl, " op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op),
+ vino.ino, vino.snap, inode);
mutex_lock(&session->s_mutex);
- dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
- (unsigned)seq);
+ doutc(cl, " mds%d seq %lld cap seq %u\n", session->s_mds,
+ session->s_seq, (unsigned)seq);
if (!inode) {
- dout(" i don't have ino %llx\n", vino.ino);
+ doutc(cl, " i don't have ino %llx\n", vino.ino);
switch (op) {
case CEPH_CAP_OP_IMPORT:
@@ -4410,9 +4510,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ceph_inode(inode), session->s_mds);
if (!cap) {
- dout(" no cap on %p ino %llx.%llx from mds%d\n",
- inode, ceph_ino(inode), ceph_snap(inode),
- session->s_mds);
+ doutc(cl, " no cap on %p ino %llx.%llx from mds%d\n",
+ inode, ceph_ino(inode), ceph_snap(inode),
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
switch (op) {
case CEPH_CAP_OP_REVOKE:
@@ -4450,8 +4550,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
default:
spin_unlock(&ci->i_ceph_lock);
- pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
- ceph_cap_op_name(op));
+ pr_err_client(cl, "unknown cap op %d %s\n", op,
+ ceph_cap_op_name(op));
}
done:
@@ -4492,7 +4592,7 @@ flush_cap_releases:
goto done;
bad:
- pr_err("ceph_handle_caps: corrupt message\n");
+ pr_err_client(cl, "corrupt message\n");
ceph_msg_dump(msg);
goto out;
}
@@ -4506,6 +4606,7 @@ bad:
*/
unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_mount_options *opt = mdsc->fsc->mount_options;
@@ -4513,14 +4614,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
unsigned long loop_start = jiffies;
unsigned long delay = 0;
- dout("check_delayed_caps\n");
+ doutc(cl, "begin\n");
spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_delay_list)) {
ci = list_first_entry(&mdsc->cap_delay_list,
struct ceph_inode_info,
i_cap_delay_list);
if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
- dout("%s caps added recently. Exiting loop", __func__);
+ doutc(cl, "caps added recently. Exiting loop");
delay = ci->i_hold_caps_max;
break;
}
@@ -4532,13 +4633,15 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
inode = igrab(&ci->netfs.inode);
if (inode) {
spin_unlock(&mdsc->cap_delay_lock);
- dout("check_delayed_caps on %p\n", inode);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
ceph_check_caps(ci, 0);
iput(inode);
spin_lock(&mdsc->cap_delay_lock);
}
}
spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "done\n");
return delay;
}
@@ -4549,17 +4652,18 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
static void flush_dirty_session_caps(struct ceph_mds_session *s)
{
struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *inode;
- dout("flush_dirty_caps\n");
+ doutc(cl, "begin\n");
spin_lock(&mdsc->cap_dirty_lock);
while (!list_empty(&s->s_cap_dirty)) {
ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
i_dirty_item);
inode = &ci->netfs.inode;
ihold(inode);
- dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
spin_unlock(&mdsc->cap_dirty_lock);
ceph_wait_on_async_create(inode);
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
@@ -4567,7 +4671,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
spin_lock(&mdsc->cap_dirty_lock);
}
spin_unlock(&mdsc->cap_dirty_lock);
- dout("flush_dirty_caps done\n");
+ doutc(cl, "done\n");
}
void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
@@ -4672,7 +4776,7 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
if (__ceph_caps_dirty(ci)) {
struct ceph_mds_client *mdsc =
- ceph_inode_to_client(inode)->mdsc;
+ ceph_inode_to_fs_client(inode)->mdsc;
__cap_delay_requeue_front(mdsc, ci);
}
}
@@ -4692,6 +4796,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
int mds, int drop, int unless, int force)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
struct ceph_mds_request_release *rel = *p;
int used, dirty;
@@ -4701,9 +4806,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
- dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
- inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
- ceph_cap_string(unless));
+ doutc(cl, "%p %llx.%llx mds%d used|dirty %s drop %s unless %s\n",
+ inode, ceph_vinop(inode), mds, ceph_cap_string(used|dirty),
+ ceph_cap_string(drop), ceph_cap_string(unless));
/* only drop unused, clean caps */
drop &= ~(used | dirty);
@@ -4725,12 +4830,13 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
if (force || (cap->issued & drop)) {
if (cap->issued & drop) {
int wanted = __ceph_caps_wanted(ci);
- dout("encode_inode_release %p cap %p "
- "%s -> %s, wanted %s -> %s\n", inode, cap,
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->issued & ~drop),
- ceph_cap_string(cap->mds_wanted),
- ceph_cap_string(wanted));
+ doutc(cl, "%p %llx.%llx cap %p %s -> %s, "
+ "wanted %s -> %s\n", inode,
+ ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->issued & ~drop),
+ ceph_cap_string(cap->mds_wanted),
+ ceph_cap_string(wanted));
cap->issued &= ~drop;
cap->implemented &= ~drop;
@@ -4739,9 +4845,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
!(wanted & CEPH_CAP_ANY_FILE_WR))
ci->i_requested_max_size = 0;
} else {
- dout("encode_inode_release %p cap %p %s"
- " (force)\n", inode, cap,
- ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p %s (force)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued));
}
rel->ino = cpu_to_le64(ceph_ino(inode));
@@ -4756,8 +4862,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
*p += sizeof(*rel);
ret = 1;
} else {
- dout("encode_inode_release %p cap %p %s (noop)\n",
- inode, cap, ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p %s (noop)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued));
}
}
spin_unlock(&ci->i_ceph_lock);
@@ -4783,6 +4890,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
struct dentry *parent = NULL;
struct ceph_mds_request_release *rel = *p;
struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct ceph_client *cl;
int force = 0;
int ret;
@@ -4804,10 +4912,11 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
dput(parent);
+ cl = ceph_inode_to_client(dir);
spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
- dout("encode_dentry_release %p mds%d seq %d\n",
- dentry, mds, (int)di->lease_seq);
+ doutc(cl, "%p mds%d seq %d\n", dentry, mds,
+ (int)di->lease_seq);
rel->dname_seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
spin_unlock(&dentry->d_lock);
@@ -4833,12 +4942,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap_snap *capsnap;
int capsnap_release = 0;
lockdep_assert_held(&ci->i_ceph_lock);
- dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
+ doutc(cl, "removing capsnaps, ci is %p, %p %llx.%llx\n",
+ ci, inode, ceph_vinop(inode));
while (!list_empty(&ci->i_cap_snaps)) {
capsnap = list_first_entry(&ci->i_cap_snaps,
@@ -4855,8 +4966,9 @@ static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
bool is_auth;
bool dirty_dropped = false;
@@ -4864,8 +4976,8 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
lockdep_assert_held(&ci->i_ceph_lock);
- dout("removing cap %p, ci is %p, inode is %p\n",
- cap, ci, &ci->netfs.inode);
+ doutc(cl, "removing cap %p, ci is %p, %p %llx.%llx\n",
+ cap, ci, inode, ceph_vinop(inode));
is_auth = (cap == ci->i_auth_cap);
__ceph_remove_cap(cap, false);
@@ -4892,19 +5004,19 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
}
if (!list_empty(&ci->i_dirty_item)) {
- pr_warn_ratelimited(
- " dropping dirty %s state for %p %lld\n",
+ pr_warn_ratelimited_client(cl,
+ " dropping dirty %s state for %p %llx.%llx\n",
ceph_cap_string(ci->i_dirty_caps),
- inode, ceph_ino(inode));
+ inode, ceph_vinop(inode));
ci->i_dirty_caps = 0;
list_del_init(&ci->i_dirty_item);
dirty_dropped = true;
}
if (!list_empty(&ci->i_flushing_item)) {
- pr_warn_ratelimited(
- " dropping dirty+flushing %s state for %p %lld\n",
+ pr_warn_ratelimited_client(cl,
+ " dropping dirty+flushing %s state for %p %llx.%llx\n",
ceph_cap_string(ci->i_flushing_caps),
- inode, ceph_ino(inode));
+ inode, ceph_vinop(inode));
ci->i_flushing_caps = 0;
list_del_init(&ci->i_flushing_item);
mdsc->num_cap_flushing--;
@@ -4927,8 +5039,9 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
if (atomic_read(&ci->i_filelock_ref) > 0) {
/* make further file lock syscall return -EIO */
ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
- pr_warn_ratelimited(" dropping file locks for %p %lld\n",
- inode, ceph_ino(inode));
+ pr_warn_ratelimited_client(cl,
+ " dropping file locks for %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
}
if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index e3b1c3fab412..3b3c4d8d401e 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -113,7 +113,7 @@ static int ceph_crypt_set_context(struct inode *inode, const void *ctx,
cia.fscrypt_auth = cfa;
- ret = __ceph_setattr(inode, &attr, &cia);
+ ret = __ceph_setattr(&nop_mnt_idmap, inode, &attr, &cia);
if (ret == 0)
inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
kfree(cia.fscrypt_auth);
@@ -129,7 +129,7 @@ static bool ceph_crypt_empty_dir(struct inode *inode)
static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
{
- return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
+ return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
}
static struct fscrypt_operations ceph_fscrypt_ops = {
@@ -212,6 +212,7 @@ void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
static struct inode *parse_longname(const struct inode *parent,
const char *name, int *name_len)
{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = NULL;
struct ceph_vino vino = { .snap = CEPH_NOSNAP };
char *inode_number;
@@ -223,12 +224,12 @@ static struct inode *parse_longname(const struct inode *parent,
name++;
name_end = strrchr(name, '_');
if (!name_end) {
- dout("Failed to parse long snapshot name: %s\n", name);
+ doutc(cl, "failed to parse long snapshot name: %s\n", name);
return ERR_PTR(-EIO);
}
*name_len = (name_end - name);
if (*name_len <= 0) {
- pr_err("Failed to parse long snapshot name\n");
+ pr_err_client(cl, "failed to parse long snapshot name\n");
return ERR_PTR(-EIO);
}
@@ -240,7 +241,7 @@ static struct inode *parse_longname(const struct inode *parent,
return ERR_PTR(-ENOMEM);
ret = kstrtou64(inode_number, 10, &vino.ino);
if (ret) {
- dout("Failed to parse inode number: %s\n", name);
+ doutc(cl, "failed to parse inode number: %s\n", name);
dir = ERR_PTR(ret);
goto out;
}
@@ -251,7 +252,7 @@ static struct inode *parse_longname(const struct inode *parent,
/* This can happen if we're not mounting cephfs on the root */
dir = ceph_get_inode(parent->i_sb, vino, NULL);
if (IS_ERR(dir))
- dout("Can't find inode %s (%s)\n", inode_number, name);
+ doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
}
out:
@@ -262,6 +263,7 @@ out:
int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
char *buf)
{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = parent;
struct qstr iname;
u32 len;
@@ -330,7 +332,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
/* base64 encode the encrypted name */
elen = ceph_base64_encode(cryptbuf, len, buf);
- dout("base64-encoded ciphertext name = %.*s\n", elen, buf);
+ doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf);
/* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
WARN_ON(elen > 240);
@@ -505,7 +507,10 @@ int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num)
{
- dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
+ doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+ ceph_vinop(inode), len, offs, lblk_num);
return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num);
}
@@ -514,7 +519,10 @@ int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags)
{
- dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
+ doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+ ceph_vinop(inode), len, offs, lblk_num);
return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num,
gfp_flags);
}
@@ -583,6 +591,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
u64 off, struct ceph_sparse_extent *map,
u32 ext_cnt)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int i, ret = 0;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 objno, objoff;
@@ -590,7 +599,8 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
/* Nothing to do for empty array */
if (ext_cnt == 0) {
- dout("%s: empty array, ret 0\n", __func__);
+ doutc(cl, "%p %llx.%llx empty array, ret 0\n", inode,
+ ceph_vinop(inode));
return 0;
}
@@ -604,14 +614,17 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
int fret;
if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) {
- pr_warn("%s: bad encrypted sparse extent idx %d off %llx len %llx\n",
- __func__, i, ext->off, ext->len);
+ pr_warn_client(cl,
+ "%p %llx.%llx bad encrypted sparse extent "
+ "idx %d off %llx len %llx\n",
+ inode, ceph_vinop(inode), i, ext->off,
+ ext->len);
return -EIO;
}
fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx],
off + pgsoff, ext->len);
- dout("%s: [%d] 0x%llx~0x%llx fret %d\n", __func__, i,
- ext->off, ext->len, fret);
+ doutc(cl, "%p %llx.%llx [%d] 0x%llx~0x%llx fret %d\n", inode,
+ ceph_vinop(inode), i, ext->off, ext->len, fret);
if (fret < 0) {
if (ret == 0)
ret = fret;
@@ -619,7 +632,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
}
ret = pgsoff + fret;
}
- dout("%s: ret %d\n", __func__, ret);
+ doutc(cl, "ret %d\n", ret);
return ret;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 3904333fa6c3..24c08078f5aa 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -81,7 +81,7 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
@@ -100,7 +100,7 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
+ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
@@ -398,7 +398,7 @@ DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
- dout("ceph_fs_debugfs_cleanup\n");
+ doutc(fsc->client, "begin\n");
debugfs_remove(fsc->debugfs_bdi);
debugfs_remove(fsc->debugfs_congestion_kb);
debugfs_remove(fsc->debugfs_mdsmap);
@@ -407,13 +407,14 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_status);
debugfs_remove(fsc->debugfs_mdsc);
debugfs_remove_recursive(fsc->debugfs_metrics_dir);
+ doutc(fsc->client, "done\n");
}
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
char name[100];
- dout("ceph_fs_debugfs_init\n");
+ doutc(fsc->client, "begin\n");
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
@@ -469,6 +470,7 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
&metrics_size_fops);
debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
&metrics_caps_fops);
+ doutc(fsc->client, "done\n");
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 854cbdd66661..91709934c8b1 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -109,7 +109,9 @@ static int fpos_cmp(loff_t l, loff_t r)
* regardless of what dir changes take place on the
* server.
*/
-static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
+static int note_last_dentry(struct ceph_fs_client *fsc,
+ struct ceph_dir_file_info *dfi,
+ const char *name,
int len, unsigned next_offset)
{
char *buf = kmalloc(len+1, GFP_KERNEL);
@@ -120,7 +122,7 @@ static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
memcpy(dfi->last_name, name, len);
dfi->last_name[len] = 0;
dfi->next_offset = next_offset;
- dout("note_last_dentry '%s'\n", dfi->last_name);
+ doutc(fsc->client, "'%s'\n", dfi->last_name);
return 0;
}
@@ -130,6 +132,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
struct ceph_readdir_cache_control *cache_ctl)
{
struct inode *dir = d_inode(parent);
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry;
unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
loff_t ptr_pos = idx * sizeof(struct dentry *);
@@ -142,7 +145,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
ceph_readdir_cache_release(cache_ctl);
cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
if (!cache_ctl->page) {
- dout(" page %lu not found\n", ptr_pgoff);
+ doutc(cl, " page %lu not found\n", ptr_pgoff);
return ERR_PTR(-EAGAIN);
}
/* reading/filling the cache are serialized by
@@ -185,13 +188,16 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
struct ceph_dir_file_info *dfi = file->private_data;
struct dentry *parent = file->f_path.dentry;
struct inode *dir = d_inode(parent);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(dir);
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry, *last = NULL;
struct ceph_dentry_info *di;
struct ceph_readdir_cache_control cache_ctl = {};
u64 idx = 0;
int err = 0;
- dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
+ doutc(cl, "%p %llx.%llx v%u at %llx\n", dir, ceph_vinop(dir),
+ (unsigned)shared_gen, ctx->pos);
/* search start position */
if (ctx->pos > 2) {
@@ -221,7 +227,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dput(dentry);
}
- dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
+ doutc(cl, "%p %llx.%llx cache idx %llu\n", dir,
+ ceph_vinop(dir), idx);
}
@@ -257,8 +264,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
spin_unlock(&dentry->d_lock);
if (emit_dentry) {
- dout(" %llx dentry %p %pd %p\n", di->offset,
- dentry, dentry, d_inode(dentry));
+ doutc(cl, " %llx dentry %p %pd %p\n", di->offset,
+ dentry, dentry, d_inode(dentry));
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
@@ -281,7 +288,8 @@ out:
if (last) {
int ret;
di = ceph_dentry(last);
- ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len,
+ ret = note_last_dentry(fsc, dfi, last->d_name.name,
+ last->d_name.len,
fpos_off(di->offset) + 1);
if (ret < 0)
err = ret;
@@ -310,20 +318,23 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
int i;
int err;
unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
- dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
+ doutc(cl, "%p %llx.%llx file %p pos %llx\n", inode,
+ ceph_vinop(inode), file, ctx->pos);
if (dfi->file_info.flags & CEPH_F_ATEND)
return 0;
/* always start with . and .. */
if (ctx->pos == 0) {
- dout("readdir off 0 -> '.'\n");
+ doutc(cl, "%p %llx.%llx off 0 -> '.'\n", inode,
+ ceph_vinop(inode));
if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
inode->i_mode >> 12))
return 0;
@@ -337,7 +348,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
ino = ceph_present_inode(dentry->d_parent->d_inode);
spin_unlock(&dentry->d_lock);
- dout("readdir off 1 -> '..'\n");
+ doutc(cl, "%p %llx.%llx off 1 -> '..'\n", inode,
+ ceph_vinop(inode));
if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
return 0;
ctx->pos = 2;
@@ -391,8 +403,8 @@ more:
frag = fpos_frag(ctx->pos);
}
- dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
- ceph_vinop(inode), frag, dfi->last_name);
+ doutc(cl, "fetching %p %llx.%llx frag %x offset '%s'\n",
+ inode, ceph_vinop(inode), frag, dfi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -446,12 +458,12 @@ more:
ceph_mdsc_put_request(req);
return err;
}
- dout("readdir got and parsed readdir result=%d on "
- "frag %x, end=%d, complete=%d, hash_order=%d\n",
- err, frag,
- (int)req->r_reply_info.dir_end,
- (int)req->r_reply_info.dir_complete,
- (int)req->r_reply_info.hash_order);
+ doutc(cl, "%p %llx.%llx got and parsed readdir result=%d"
+ "on frag %x, end=%d, complete=%d, hash_order=%d\n",
+ inode, ceph_vinop(inode), err, frag,
+ (int)req->r_reply_info.dir_end,
+ (int)req->r_reply_info.dir_complete,
+ (int)req->r_reply_info.hash_order);
rinfo = &req->r_reply_info;
if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
@@ -481,7 +493,8 @@ more:
dfi->dir_ordered_count = req->r_dir_ordered_cnt;
}
} else {
- dout("readdir !did_prepopulate\n");
+ doutc(cl, "%p %llx.%llx !did_prepopulate\n", inode,
+ ceph_vinop(inode));
/* disable readdir cache */
dfi->readdir_cache_idx = -1;
/* preclude from marking dir complete */
@@ -494,8 +507,8 @@ more:
rinfo->dir_entries + (rinfo->dir_nr-1);
unsigned next_offset = req->r_reply_info.dir_end ?
2 : (fpos_off(rde->offset) + 1);
- err = note_last_dentry(dfi, rde->name, rde->name_len,
- next_offset);
+ err = note_last_dentry(fsc, dfi, rde->name,
+ rde->name_len, next_offset);
if (err) {
ceph_mdsc_put_request(dfi->last_readdir);
dfi->last_readdir = NULL;
@@ -508,9 +521,9 @@ more:
}
rinfo = &dfi->last_readdir->r_reply_info;
- dout("readdir frag %x num %d pos %llx chunk first %llx\n",
- dfi->frag, rinfo->dir_nr, ctx->pos,
- rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
+ doutc(cl, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n",
+ inode, ceph_vinop(inode), dfi->frag, rinfo->dir_nr, ctx->pos,
+ rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
i = 0;
/* search start position */
@@ -530,8 +543,9 @@ more:
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
if (rde->offset < ctx->pos) {
- pr_warn("%s: rde->offset 0x%llx ctx->pos 0x%llx\n",
- __func__, rde->offset, ctx->pos);
+ pr_warn_client(cl,
+ "%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n",
+ inode, ceph_vinop(inode), rde->offset, ctx->pos);
return -EIO;
}
@@ -539,9 +553,9 @@ more:
return -EIO;
ctx->pos = rde->offset;
- dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
- i, rinfo->dir_nr, ctx->pos,
- rde->name_len, rde->name, &rde->inode.in);
+ doutc(cl, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode,
+ ceph_vinop(inode), i, rinfo->dir_nr, ctx->pos,
+ rde->name_len, rde->name, &rde->inode.in);
if (!dir_emit(ctx, rde->name, rde->name_len,
ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
@@ -552,7 +566,7 @@ more:
* doesn't have enough memory, etc. So for next readdir
* it will continue.
*/
- dout("filldir stopping us...\n");
+ doutc(cl, "filldir stopping us...\n");
return 0;
}
@@ -583,7 +597,8 @@ more:
kfree(dfi->last_name);
dfi->last_name = NULL;
}
- dout("readdir next frag is %x\n", frag);
+ doutc(cl, "%p %llx.%llx next frag is %x\n", inode,
+ ceph_vinop(inode), frag);
goto more;
}
dfi->file_info.flags |= CEPH_F_ATEND;
@@ -598,20 +613,23 @@ more:
spin_lock(&ci->i_ceph_lock);
if (dfi->dir_ordered_count ==
atomic64_read(&ci->i_ordered_count)) {
- dout(" marking %p complete and ordered\n", inode);
+ doutc(cl, " marking %p %llx.%llx complete and ordered\n",
+ inode, ceph_vinop(inode));
/* use i_size to track number of entries in
* readdir cache */
BUG_ON(dfi->readdir_cache_idx < 0);
i_size_write(inode, dfi->readdir_cache_idx *
sizeof(struct dentry*));
} else {
- dout(" marking %p complete\n", inode);
+ doutc(cl, " marking %llx.%llx complete\n",
+ ceph_vinop(inode));
}
__ceph_dir_set_complete(ci, dfi->dir_release_count,
dfi->dir_ordered_count);
spin_unlock(&ci->i_ceph_lock);
}
- dout("readdir %p file %p done.\n", inode, file);
+ doutc(cl, "%p %llx.%llx file %p done.\n", inode, ceph_vinop(inode),
+ file);
return 0;
}
@@ -657,6 +675,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file->f_mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
loff_t retval;
inode_lock(inode);
@@ -676,7 +695,8 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
if (offset >= 0) {
if (need_reset_readdir(dfi, offset)) {
- dout("dir_llseek dropping %p content\n", file);
+ doutc(cl, "%p %llx.%llx dropping %p content\n",
+ inode, ceph_vinop(inode), file);
reset_readdir(dfi);
} else if (is_hash_order(offset) && offset > file->f_pos) {
/* for hash offset, we don't know if a forward seek
@@ -703,8 +723,9 @@ out:
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
+ struct ceph_client *cl = ceph_inode_to_client(parent);
/* .snap dir? */
if (ceph_snap(parent) == CEPH_NOSNAP &&
@@ -713,8 +734,9 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct inode *inode = ceph_get_snapdir(parent);
res = d_splice_alias(inode, dentry);
- dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n",
- dentry, dentry, inode, res);
+ doutc(cl, "ENOENT on snapdir %p '%pd', linking to "
+ "snapdir %p %llx.%llx. Spliced dentry %p\n",
+ dentry, dentry, inode, ceph_vinop(inode), res);
if (res)
dentry = res;
}
@@ -735,12 +757,15 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
+
if (err == -ENOENT) {
/* no trace? */
err = 0;
if (!req->r_reply_info.head->is_dentry) {
- dout("ENOENT and no trace, dentry %p inode %p\n",
- dentry, d_inode(dentry));
+ doutc(cl,
+ "ENOENT and no trace, dentry %p inode %llx.%llx\n",
+ dentry, ceph_vinop(d_inode(dentry)));
if (d_really_is_positive(dentry)) {
d_drop(dentry);
err = -ENOENT;
@@ -771,15 +796,16 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_request *req;
int op;
int mask;
int err;
- dout("lookup %p dentry %p '%pd'\n",
- dir, dentry, dentry);
+ doutc(cl, "%p %llx.%llx/'%pd' dentry %p\n", dir, ceph_vinop(dir),
+ dentry, dentry);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@@ -802,7 +828,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&ci->i_ceph_lock);
- dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags);
+ doutc(cl, " dir %llx.%llx flags are 0x%lx\n",
+ ceph_vinop(dir), ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
@@ -812,7 +839,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
__ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
__ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
spin_unlock(&ci->i_ceph_lock);
- dout(" dir %p complete, -ENOENT\n", dir);
+ doutc(cl, " dir %llx.%llx complete, -ENOENT\n",
+ ceph_vinop(dir));
d_add(dentry, NULL);
di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
return NULL;
@@ -850,7 +878,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
- dout("lookup result=%p\n", dentry);
+ doutc(cl, "result=%p\n", dentry);
return dentry;
}
@@ -885,6 +913,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
int err;
@@ -901,8 +930,8 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
- dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
- dir, dentry, mode, rdev);
+ doutc(cl, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n",
+ dir, ceph_vinop(dir), dentry, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -924,6 +953,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@@ -993,6 +1023,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *dest)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
umode_t mode = S_IFLNK | 0777;
@@ -1010,7 +1041,8 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
- dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
+ doutc(cl, "%p %llx.%llx/'%pd' to '%s'\n", dir, ceph_vinop(dir), dentry,
+ dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1040,6 +1072,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
}
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@@ -1064,6 +1097,7 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
int err;
@@ -1076,10 +1110,11 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
- dout("mksnap dir %p snap '%pd' dn %p\n", dir,
- dentry, dentry);
+ doutc(cl, "mksnap %llx.%llx/'%pd' dentry %p\n",
+ ceph_vinop(dir), dentry, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
+ doutc(cl, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n",
+ ceph_vinop(dir), dentry, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
err = -EROFS;
@@ -1117,6 +1152,8 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ if (op == CEPH_MDS_OP_MKDIR)
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mkdir.mode = cpu_to_le32(mode);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
@@ -1144,6 +1181,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
@@ -1161,8 +1199,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
if (err)
return err;
- dout("link in dir %p %llx.%llx old_dentry %p:'%pd' dentry %p:'%pd'\n",
- dir, ceph_vinop(dir), old_dentry, old_dentry, dentry, dentry);
+ doutc(cl, "%p %llx.%llx/'%pd' to '%pd'\n", dir, ceph_vinop(dir),
+ old_dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
@@ -1199,14 +1237,16 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct dentry *dentry = req->r_dentry;
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int result = req->r_err ? req->r_err :
le32_to_cpu(req->r_reply_info.head->result);
if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
- pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
- __func__, dentry, dentry);
+ pr_warn_client(cl,
+ "dentry %p:%pd async unlink bit is not set\n",
+ dentry, dentry);
spin_lock(&fsc->async_unlink_conflict_lock);
hash_del_rcu(&di->hnode);
@@ -1226,7 +1266,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
if (result) {
int pathlen = 0;
u64 base = 0;
- char *path = ceph_mdsc_build_path(dentry, &pathlen,
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
&base, 0);
/* mark error on parent + clear complete */
@@ -1240,8 +1280,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
- pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
+ pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
+ base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
}
out:
@@ -1290,7 +1330,8 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
*/
static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
struct ceph_mds_request *req;
@@ -1300,11 +1341,12 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
- dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
+ doutc(cl, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir),
+ dentry);
op = CEPH_MDS_OP_RMSNAP;
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout("unlink/rmdir dir %p dn %p inode %p\n",
- dir, dentry, inode);
+ doutc(cl, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n",
+ ceph_vinop(dir), dentry, ceph_vinop(inode));
op = d_is_dir(dentry) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
} else
@@ -1327,9 +1369,9 @@ retry:
(req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
struct ceph_dentry_info *di = ceph_dentry(dentry);
- dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
- dentry->d_name.len, dentry->d_name.name,
- ceph_cap_string(req->r_dir_caps));
+ doutc(cl, "async unlink on %llx.%llx/'%pd' caps=%s",
+ ceph_vinop(dir), dentry,
+ ceph_cap_string(req->r_dir_caps));
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_callback = ceph_async_unlink_cb;
req->r_old_inode = d_inode(dentry);
@@ -1384,6 +1426,7 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int op = CEPH_MDS_OP_RENAME;
int err;
@@ -1413,8 +1456,9 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (err)
return err;
- dout("rename dir %p dentry %p to dir %p dentry %p\n",
- old_dir, old_dentry, new_dir, new_dentry);
+ doutc(cl, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n",
+ ceph_vinop(old_dir), old_dentry, ceph_vinop(new_dir),
+ new_dentry);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1459,9 +1503,10 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
- struct ceph_mds_client *mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
+ doutc(cl, "%p %p '%pd'\n", di, dn, dn);
di->flags |= CEPH_DENTRY_LEASE_LIST;
if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
@@ -1469,7 +1514,6 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
return;
}
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_move_tail(&di->lease_list, &mdsc->dentry_leases);
spin_unlock(&mdsc->dentry_list_lock);
@@ -1493,10 +1537,10 @@ static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
- struct ceph_mds_client *mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
- di, dn, dn, di->offset);
+ doutc(cl, "%p %p '%pd' (offset 0x%llx)\n", di, dn, dn, di->offset);
if (!list_empty(&di->lease_list)) {
if (di->flags & CEPH_DENTRY_LEASE_LIST) {
@@ -1516,7 +1560,6 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
return;
}
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
__dentry_dir_lease_touch(mdsc, di),
spin_unlock(&mdsc->dentry_list_lock);
@@ -1530,7 +1573,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
if (list_empty(&di->lease_list))
return;
- mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
+ mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_del_init(&di->lease_list);
spin_unlock(&mdsc->dentry_list_lock);
@@ -1757,6 +1800,8 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *session = NULL;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
u32 seq = 0;
int valid = 0;
@@ -1789,7 +1834,7 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
CEPH_MDS_LEASE_RENEW, seq);
ceph_put_mds_session(session);
}
- dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
+ doutc(cl, "dentry %p = %d\n", dentry, valid);
return valid;
}
@@ -1832,6 +1877,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
struct ceph_mds_client *mdsc)
{
struct ceph_inode_info *ci = ceph_inode(dir);
+ struct ceph_client *cl = mdsc->fsc->client;
int valid;
int shared_gen;
@@ -1853,8 +1899,9 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
valid = 0;
spin_unlock(&dentry->d_lock);
}
- dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
- dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
+ doutc(cl, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir,
+ ceph_vinop(dir), (unsigned)atomic_read(&ci->i_shared_gen),
+ dentry, dentry, valid);
return valid;
}
@@ -1863,10 +1910,11 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
*/
static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int valid = 0;
struct dentry *parent;
struct inode *dir, *inode;
- struct ceph_mds_client *mdsc;
valid = fscrypt_d_revalidate(dentry, flags);
if (valid <= 0)
@@ -1884,16 +1932,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
}
- dout("d_revalidate %p '%pd' inode %p offset 0x%llx nokey %d\n", dentry,
- dentry, inode, ceph_dentry(dentry)->offset,
- !!(dentry->d_flags & DCACHE_NOKEY_NAME));
+ doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
+ dentry, dentry, inode, ceph_dentry(dentry)->offset,
+ !!(dentry->d_flags & DCACHE_NOKEY_NAME));
- mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
+ mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
- dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
- dentry, inode);
+ doutc(cl, "%p '%pd' inode %p is SNAPPED\n", dentry,
+ dentry, inode);
valid = 1;
} else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
valid = 1;
@@ -1948,14 +1996,14 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
break;
}
ceph_mdsc_put_request(req);
- dout("d_revalidate %p lookup result=%d\n",
- dentry, err);
+ doutc(cl, "%p '%pd', lookup result=%d\n", dentry,
+ dentry, err);
}
} else {
percpu_counter_inc(&mdsc->metric.d_lease_hit);
}
- dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
+ doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
if (!valid)
ceph_dir_clear_complete(dir);
@@ -1995,9 +2043,9 @@ static int ceph_d_delete(const struct dentry *dentry)
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
- dout("d_release %p\n", dentry);
+ doutc(fsc->client, "dentry %p '%pd'\n", dentry, dentry);
atomic64_dec(&fsc->mdsc->metric.total_dentries);
@@ -2018,10 +2066,12 @@ static void ceph_d_release(struct dentry *dentry)
*/
static void ceph_d_prune(struct dentry *dentry)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *dir_ci;
struct ceph_dentry_info *di;
- dout("ceph_d_prune %pd %p\n", dentry, dentry);
+ doutc(cl, "dentry %p '%pd'\n", dentry, dentry);
/* do we have a valid parent? */
if (IS_ROOT(dentry))
@@ -2064,7 +2114,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
int left;
const int bufsize = 1024;
- if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
+ if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!dfi->dir_info) {
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 8559990a59a5..726af69d4d62 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -36,6 +36,7 @@ struct ceph_nfs_snapfh {
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
static const int snap_handle_length =
sizeof(struct ceph_nfs_snapfh) >> 2;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
@@ -79,13 +80,14 @@ static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
*max_len = snap_handle_length;
ret = FILEID_BTRFS_WITH_PARENT;
out:
- dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret);
+ doutc(cl, "%p %llx.%llx ret=%d\n", inode, ceph_vinop(inode), ret);
return ret;
}
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
static const int handle_length =
sizeof(struct ceph_nfs_fh) >> 2;
static const int connected_handle_length =
@@ -105,15 +107,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
if (parent_inode) {
struct ceph_nfs_confh *cfh = (void *)rawfh;
- dout("encode_fh %llx with parent %llx\n",
- ceph_ino(inode), ceph_ino(parent_inode));
+ doutc(cl, "%p %llx.%llx with parent %p %llx.%llx\n", inode,
+ ceph_vinop(inode), parent_inode, ceph_vinop(parent_inode));
cfh->ino = ceph_ino(inode);
cfh->parent_ino = ceph_ino(parent_inode);
*max_len = connected_handle_length;
type = FILEID_INO32_GEN_PARENT;
} else {
struct ceph_nfs_fh *fh = (void *)rawfh;
- dout("encode_fh %llx\n", ceph_ino(inode));
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
fh->ino = ceph_ino(inode);
*max_len = handle_length;
type = FILEID_INO32_GEN;
@@ -123,7 +125,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct inode *inode;
struct ceph_vino vino;
int err;
@@ -205,7 +207,8 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
struct ceph_nfs_snapfh *sfh,
bool want_parent)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct inode *inode;
struct ceph_vino vino;
@@ -278,11 +281,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
ceph_mdsc_put_request(req);
if (want_parent) {
- dout("snapfh_to_parent %llx.%llx\n err=%d\n",
- vino.ino, vino.snap, err);
+ doutc(cl, "%llx.%llx\n err=%d\n", vino.ino, vino.snap, err);
} else {
- dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d",
- vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err);
+ doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
+ vino.snap, sfh->parent_ino, sfh->hash, err);
}
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -297,6 +299,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_fh *fh = (void *)fid->raw;
if (fh_type == FILEID_BTRFS_WITH_PARENT) {
@@ -310,14 +313,14 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
if (fh_len < sizeof(*fh) / 4)
return NULL;
- dout("fh_to_dentry %llx\n", fh->ino);
+ doutc(fsc->client, "%llx\n", fh->ino);
return __fh_to_dentry(sb, fh->ino);
}
static struct dentry *__get_parent(struct super_block *sb,
struct dentry *child, u64 ino)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct ceph_mds_request *req;
struct inode *inode;
int mask;
@@ -363,6 +366,7 @@ static struct dentry *__get_parent(struct super_block *sb,
static struct dentry *ceph_get_parent(struct dentry *child)
{
struct inode *inode = d_inode(child);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct dentry *dn;
if (ceph_snap(inode) != CEPH_NOSNAP) {
@@ -402,8 +406,8 @@ static struct dentry *ceph_get_parent(struct dentry *child)
dn = __get_parent(child->d_sb, child, 0);
}
out:
- dout("get_parent %p ino %llx.%llx err=%ld\n",
- child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
+ doutc(cl, "child %p %p %llx.%llx err=%ld\n", child, inode,
+ ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
return dn;
}
@@ -414,6 +418,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_confh *cfh = (void *)fid->raw;
struct dentry *dentry;
@@ -427,7 +432,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_len < sizeof(*cfh) / 4)
return NULL;
- dout("fh_to_parent %llx\n", cfh->parent_ino);
+ doutc(fsc->client, "%llx\n", cfh->parent_ino);
dentry = __get_parent(sb, NULL, cfh->ino);
if (unlikely(dentry == ERR_PTR(-ENOENT)))
dentry = __fh_to_dentry(sb, cfh->parent_ino);
@@ -439,7 +444,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_request *req = NULL;
char *last_name = NULL;
unsigned next_offset = 2;
@@ -526,8 +531,8 @@ out:
if (req)
ceph_mdsc_put_request(req);
kfree(last_name);
- dout("get_snap_name %p ino %llx.%llx err=%d\n",
- child, ceph_vinop(inode), err);
+ doutc(fsc->client, "child dentry %p %p %llx.%llx err=%d\n", child,
+ inode, ceph_vinop(inode), err);
return err;
}
@@ -544,7 +549,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
if (ceph_snap(inode) != CEPH_NOSNAP)
return __get_snap_name(parent, name, child);
- mdsc = ceph_inode_to_client(inode)->mdsc;
+ mdsc = ceph_inode_to_fs_client(inode)->mdsc;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
USE_ANY_MDS);
if (IS_ERR(req))
@@ -588,9 +593,9 @@ static int ceph_get_name(struct dentry *parent, char *name,
ceph_fname_free_buffer(dir, &oname);
}
out:
- dout("get_name %p ino %llx.%llx err %d %s%s\n",
- child, ceph_vinop(inode), err,
- err ? "" : "name ", err ? "" : name);
+ doutc(mdsc->fsc->client, "child dentry %p %p %llx.%llx err %d %s%s\n",
+ child, inode, ceph_vinop(inode), err, err ? "" : "name ",
+ err ? "" : name);
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 649600d0a7b6..3b5aae29e944 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -19,8 +19,9 @@
#include "io.h"
#include "metric.h"
-static __le32 ceph_flags_sys2wire(u32 flags)
+static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u32 wire_flags = 0;
switch (flags & O_ACCMODE) {
@@ -48,7 +49,7 @@ static __le32 ceph_flags_sys2wire(u32 flags)
#undef ceph_sys2wire
if (flags)
- dout("unused open flags: %x\n", flags);
+ doutc(cl, "unused open flags: %x\n", flags);
return cpu_to_le32(wire_flags);
}
@@ -189,7 +190,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
if (IS_ERR(req))
goto out;
req->r_fmode = ceph_flags_to_mode(flags);
- req->r_args.open.flags = ceph_flags_sys2wire(flags);
+ req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
out:
return req;
@@ -200,12 +201,13 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mount_options *opt =
- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
+ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi;
int ret;
- dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
- inode->i_mode, isdir ? "dir" : "regular");
+ doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode),
+ file, inode->i_mode, isdir ? "dir" : "regular");
BUG_ON(inode->i_fop->release != ceph_release);
if (isdir) {
@@ -234,7 +236,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
- fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
+ fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
ret = ceph_uninline_data(file);
@@ -259,6 +261,7 @@ error:
*/
static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int ret = 0;
switch (inode->i_mode & S_IFMT) {
@@ -271,13 +274,13 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
break;
case S_IFLNK:
- dout("init_file %p %p 0%o (symlink)\n", inode, file,
- inode->i_mode);
+ doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode,
+ ceph_vinop(inode), file, inode->i_mode);
break;
default:
- dout("init_file %p %p 0%o (special)\n", inode, file,
- inode->i_mode);
+ doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode,
+ ceph_vinop(inode), file, inode->i_mode);
/*
* we need to drop the open ref now, since we don't
* have .release set to ceph_release.
@@ -296,6 +299,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
int ceph_renew_caps(struct inode *inode, int fmode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
int err, flags, wanted;
@@ -307,8 +311,9 @@ int ceph_renew_caps(struct inode *inode, int fmode)
(!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
int issued = __ceph_caps_issued(ci, NULL);
spin_unlock(&ci->i_ceph_lock);
- dout("renew caps %p want %s issued %s updating mds_wanted\n",
- inode, ceph_cap_string(wanted), ceph_cap_string(issued));
+ doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n",
+ inode, ceph_vinop(inode), ceph_cap_string(wanted),
+ ceph_cap_string(issued));
ceph_check_caps(ci, 0);
return 0;
}
@@ -339,7 +344,8 @@ int ceph_renew_caps(struct inode *inode, int fmode)
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
out:
- dout("renew caps %p open result=%d\n", inode, err);
+ doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode),
+ err);
return err < 0 ? err : 0;
}
@@ -352,7 +358,8 @@ out:
int ceph_open(struct inode *inode, struct file *file)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct ceph_file_info *fi = file->private_data;
@@ -360,7 +367,7 @@ int ceph_open(struct inode *inode, struct file *file)
int flags, fmode, wanted;
if (fi) {
- dout("open file %p is already opened\n", file);
+ doutc(cl, "file %p is already opened\n", file);
return 0;
}
@@ -374,8 +381,8 @@ int ceph_open(struct inode *inode, struct file *file)
return err;
}
- dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
- ceph_vinop(inode), file, flags, file->f_flags);
+ doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode,
+ ceph_vinop(inode), file, flags, file->f_flags);
fmode = ceph_flags_to_mode(flags);
wanted = ceph_caps_for_mode(fmode);
@@ -399,9 +406,9 @@ int ceph_open(struct inode *inode, struct file *file)
int mds_wanted = __ceph_caps_mds_wanted(ci, true);
int issued = __ceph_caps_issued(ci, NULL);
- dout("open %p fmode %d want %s issued %s using existing\n",
- inode, fmode, ceph_cap_string(wanted),
- ceph_cap_string(issued));
+ doutc(cl, "open %p fmode %d want %s issued %s using existing\n",
+ inode, fmode, ceph_cap_string(wanted),
+ ceph_cap_string(issued));
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
@@ -421,7 +428,7 @@ int ceph_open(struct inode *inode, struct file *file)
spin_unlock(&ci->i_ceph_lock);
- dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
+ doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -435,7 +442,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!err)
err = ceph_init_file(inode, file, req->r_fmode);
ceph_mdsc_put_request(req);
- dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
+ doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
out:
return err;
}
@@ -515,6 +522,7 @@ no_async:
static void restore_deleg_ino(struct inode *dir, u64 ino)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_session *s = NULL;
@@ -525,7 +533,8 @@ static void restore_deleg_ino(struct inode *dir, u64 ino)
if (s) {
int err = ceph_restore_deleg_ino(s, ino);
if (err)
- pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
+ pr_warn_client(cl,
+ "unable to restore delegated ino 0x%llx to session: %d\n",
ino, err);
ceph_put_mds_session(s);
}
@@ -557,6 +566,7 @@ static void wake_async_create_waiters(struct inode *inode,
static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct dentry *dentry = req->r_dentry;
struct inode *dinode = d_inode(dentry);
struct inode *tinode = req->r_target_inode;
@@ -574,10 +584,11 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (result) {
int pathlen = 0;
u64 base = 0;
- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
&base, 0);
- pr_warn("async create failure path=(%llx)%s result=%d!\n",
+ pr_warn_client(cl,
+ "async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
@@ -596,14 +607,15 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
u64 ino = ceph_vino(tinode).ino;
if (req->r_deleg_ino != ino)
- pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
- __func__, req->r_err, req->r_deleg_ino, ino);
+ pr_warn_client(cl,
+ "inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
+ req->r_err, req->r_deleg_ino, ino);
mapping_set_error(tinode->i_mapping, result);
wake_async_create_waiters(tinode, req->r_session);
} else if (!result) {
- pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
- req->r_deleg_ino);
+ pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n",
+ req->r_deleg_ino);
}
out:
ceph_mdsc_release_dir_caps(req);
@@ -625,6 +637,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
struct timespec64 now;
struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@@ -655,7 +668,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
in.truncate_seq = cpu_to_le32(1);
in.truncate_size = cpu_to_le64(-1ULL);
in.xattr_version = cpu_to_le64(1);
- in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
+ in.uid = cpu_to_le32(from_kuid(&init_user_ns,
+ mapped_fsuid(req->r_mnt_idmap,
+ &init_user_ns)));
if (dir->i_mode & S_ISGID) {
in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
@@ -663,7 +678,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
if (S_ISDIR(mode))
mode |= S_ISGID;
} else {
- in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns,
+ mapped_fsgid(req->r_mnt_idmap,
+ &init_user_ns)));
}
in.mode = cpu_to_le32((u32)mode);
@@ -683,7 +700,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
req->r_fmode, NULL);
up_read(&mdsc->snap_rwsem);
if (ret) {
- dout("%s failed to fill inode: %d\n", __func__, ret);
+ doutc(cl, "failed to fill inode: %d\n", ret);
ceph_dir_clear_complete(dir);
if (!d_unhashed(dentry))
d_drop(dentry);
@@ -691,8 +708,8 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
} else {
struct dentry *dn;
- dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
- vino.ino, ceph_ino(dir), dentry->d_name.name);
+ doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n",
+ vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
if (inode->i_state & I_NEW) {
@@ -730,7 +747,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct inode *new_inode = NULL;
@@ -740,9 +759,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
int mask;
int err;
- dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
- dir, dentry, dentry,
- d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
+ doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
+ dir, ceph_vinop(dir), dentry, dentry,
+ d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
if (dentry->d_name.len > NAME_MAX)
return -ENAMETOOLONG;
@@ -788,6 +807,8 @@ retry:
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.open.mask = cpu_to_le32(mask);
req->r_parent = dir;
+ if (req->r_op == CEPH_MDS_OP_CREATE)
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
ihold(dir);
if (IS_ENCRYPTED(dir)) {
set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
@@ -880,17 +901,18 @@ retry:
goto out_req;
if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
/* make vfs retry on splice, ENOENT, or symlink */
- dout("atomic_open finish_no_open on dn %p\n", dn);
+ doutc(cl, "finish_no_open on dn %p\n", dn);
err = finish_no_open(file, dn);
} else {
if (IS_ENCRYPTED(dir) &&
!fscrypt_has_permitted_context(dir, d_inode(dentry))) {
- pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
+ pr_warn_client(cl,
+ "Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
goto out_req;
}
- dout("atomic_open finish_open on dn %p\n", dn);
+ doutc(cl, "finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
struct inode *newino = d_inode(dentry);
@@ -905,17 +927,19 @@ out_req:
iput(new_inode);
out_ctx:
ceph_release_acl_sec_ctx(&as_ctx);
- dout("atomic_open result=%d\n", err);
+ doutc(cl, "result=%d\n", err);
return err;
}
int ceph_release(struct inode *inode, struct file *file)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (S_ISDIR(inode->i_mode)) {
struct ceph_dir_file_info *dfi = file->private_data;
- dout("release inode %p dir file %p\n", inode, file);
+ doutc(cl, "%p %llx.%llx dir file %p\n", inode,
+ ceph_vinop(inode), file);
WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
ceph_put_fmode(ci, dfi->file_info.fmode, 1);
@@ -927,7 +951,8 @@ int ceph_release(struct inode *inode, struct file *file)
kmem_cache_free(ceph_dir_file_cachep, dfi);
} else {
struct ceph_file_info *fi = file->private_data;
- dout("release inode %p regular file %p\n", inode, file);
+ doutc(cl, "%p %llx.%llx regular file %p\n", inode,
+ ceph_vinop(inode), file);
WARN_ON(!list_empty(&fi->rw_contexts));
ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
@@ -962,7 +987,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
u64 *last_objver)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
u64 off = *ki_pos;
@@ -971,7 +997,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
u64 objver = 0;
- dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
+ doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode,
+ ceph_vinop(inode), *ki_pos, len);
if (ceph_inode_is_shutdown(inode))
return -EIO;
@@ -1005,8 +1032,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
/* determine new offset/length if encrypted */
ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
- dout("sync_read orig %llu~%llu reading %llu~%llu",
- off, len, read_off, read_len);
+ doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len,
+ read_off, read_len);
req = ceph_osdc_new_request(osdc, &ci->i_layout,
ci->i_vino, read_off, &read_len, 0, 1,
@@ -1059,8 +1086,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
objver = req->r_version;
i_size = i_size_read(inode);
- dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
- off, len, ret, i_size, (more ? " MORE" : ""));
+ doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len,
+ ret, i_size, (more ? " MORE" : ""));
/* Fix it to go to end of extent map */
if (sparse && ret >= 0)
@@ -1101,8 +1128,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
- dout("sync_read zero gap %llu~%llu\n",
- off + ret, off + ret + zlen);
+ doutc(cl, "zero gap %llu~%llu\n", off + ret,
+ off + ret + zlen);
ceph_zero_page_vector_range(zoff, zlen, pages);
ret += zlen;
}
@@ -1151,7 +1178,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (last_objver)
*last_objver = objver;
}
- dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
+ doutc(cl, "result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
@@ -1160,9 +1187,11 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
- dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
- iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+ doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos,
+ iov_iter_count(to),
+ (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
}
@@ -1190,6 +1219,7 @@ static void ceph_aio_retry_work(struct work_struct *work);
static void ceph_aio_complete(struct inode *inode,
struct ceph_aio_request *aio_req)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int ret;
@@ -1203,7 +1233,7 @@ static void ceph_aio_complete(struct inode *inode,
if (!ret)
ret = aio_req->total_len;
- dout("ceph_aio_complete %p rc %d\n", inode, ret);
+ doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret);
if (ret >= 0 && aio_req->write) {
int dirty;
@@ -1242,11 +1272,13 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
unsigned int len = osd_data->bvec_pos.iter.bi_size;
bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
- dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
+ doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req,
+ inode, ceph_vinop(inode), rc, len);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@@ -1256,7 +1288,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
if (aio_work) {
INIT_WORK(&aio_work->work, ceph_aio_retry_work);
aio_work->req = req;
- queue_work(ceph_inode_to_client(inode)->inode_wq,
+ queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
&aio_work->work);
return;
}
@@ -1386,7 +1418,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_client_metric *metric = &fsc->mdsc->metric;
struct ceph_vino vino;
struct ceph_osd_request *req;
@@ -1405,9 +1438,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
- dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
- (write ? "write" : "read"), file, pos, (unsigned)count,
- snapc, snapc ? snapc->seq : 0);
+ doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
+ (write ? "write" : "read"), file, pos, (unsigned)count,
+ snapc, snapc ? snapc->seq : 0);
if (write) {
int ret2;
@@ -1418,7 +1451,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
pos >> PAGE_SHIFT,
(pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0)
- dout("invalidate_inode_pages2_range returned %d\n", ret2);
+ doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+ ret2);
flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
} else {
@@ -1610,7 +1644,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
struct page **pages;
@@ -1625,8 +1660,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
- dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
- file, pos, (unsigned)count, snapc, snapc->seq);
+ doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos,
+ (unsigned)count, snapc, snapc->seq);
ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
@@ -1670,9 +1705,9 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
last = (pos + len) != (write_pos + write_len);
rmw = first || last;
- dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
- ci->i_vino.ino, pos, len, write_pos, write_len,
- rmw ? "" : "no ");
+ doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
+ ci->i_vino.ino, pos, len, write_pos, write_len,
+ rmw ? "" : "no ");
/*
* The data is emplaced into the page as it would be if it were
@@ -1881,7 +1916,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
left -= ret;
}
if (ret < 0) {
- dout("sync_write write failed with %d\n", ret);
+ doutc(cl, "write failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
break;
}
@@ -1891,7 +1926,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
write_pos, write_len,
GFP_KERNEL);
if (ret < 0) {
- dout("encryption failed with %d\n", ret);
+ doutc(cl, "encryption failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
break;
}
@@ -1910,7 +1945,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
break;
}
- dout("sync_write write op %lld~%llu\n", write_pos, write_len);
+ doutc(cl, "write op %lld~%llu\n", write_pos, write_len);
osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
offset_in_page(write_pos), false,
true);
@@ -1941,7 +1976,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
req->r_end_latency, len, ret);
ceph_osdc_put_request(req);
if (ret != 0) {
- dout("sync_write osd write returned %d\n", ret);
+ doutc(cl, "osd write returned %d\n", ret);
/* Version changed! Must re-do the rmw cycle */
if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
(!assert_ver && ret == -EEXIST)) {
@@ -1971,13 +2006,13 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
pos >> PAGE_SHIFT,
(pos + len - 1) >> PAGE_SHIFT);
if (ret < 0) {
- dout("invalidate_inode_pages2_range returned %d\n",
- ret);
+ doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+ ret);
ret = 0;
}
pos += len;
written += len;
- dout("sync_write written %d\n", written);
+ doutc(cl, "written %d\n", written);
if (pos > i_size_read(inode)) {
check_caps = ceph_inode_set_size(inode, pos);
if (check_caps)
@@ -1991,7 +2026,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
ret = written;
iocb->ki_pos = pos;
}
- dout("sync_write returning %d\n", ret);
+ doutc(cl, "returning %d\n", ret);
return ret;
}
@@ -2010,13 +2045,14 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
ssize_t ret;
int want = 0, got = 0;
int retry_op = 0, read = 0;
again:
- dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n",
+ iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode));
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
@@ -2044,9 +2080,9 @@ again:
(iocb->ki_flags & IOCB_DIRECT) ||
(fi->flags & CEPH_F_SYNC)) {
- dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
- ceph_cap_string(got));
+ doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
if (!ceph_has_inline_data(ci)) {
if (!retry_op &&
@@ -2064,16 +2100,16 @@ again:
}
} else {
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
- dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
- ceph_cap_string(got));
+ doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
ret = generic_file_read_iter(iocb, to);
ceph_del_rw_context(fi, &rw_ctx);
}
- dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
- inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+ doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n",
+ inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
ceph_put_cap_refs(ci, got);
if (direct_lock)
@@ -2133,8 +2169,8 @@ again:
/* hit EOF or hole? */
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
- dout("sync_read hit hole, ppos %lld < size %lld"
- ", reading more\n", iocb->ki_pos, i_size);
+ doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n",
+ iocb->ki_pos, i_size);
read += ret;
len -= ret;
@@ -2228,7 +2264,8 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
@@ -2296,8 +2333,9 @@ retry_snap:
if (err)
goto out;
- dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
- inode, ceph_vinop(inode), pos, count, i_size_read(inode));
+ doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
+ inode, ceph_vinop(inode), pos, count,
+ i_size_read(inode));
if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
want |= CEPH_CAP_FILE_BUFFER;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
@@ -2313,8 +2351,8 @@ retry_snap:
inode_inc_iversion_raw(inode);
- dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
- inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
+ doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
@@ -2374,14 +2412,14 @@ retry_snap:
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
}
- dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
- inode, ceph_vinop(inode), pos, (unsigned)count,
- ceph_cap_string(got));
+ doutc(cl, "%p %llx.%llx %llu~%u dropping cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)count,
+ ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
if (written == -EOLDSNAPC) {
- dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
- inode, ceph_vinop(inode), pos, (unsigned)count);
+ doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
+ inode, ceph_vinop(inode), pos, (unsigned)count);
goto retry_snap;
}
@@ -2462,7 +2500,7 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t offset, loff_t *length)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_osd_request *req;
int ret = 0;
loff_t zero = 0;
@@ -2553,14 +2591,15 @@ static long ceph_fallocate(struct file *file, int mode,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap_flush *prealloc_cf;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int want, got = 0;
int dirty;
int ret = 0;
loff_t endoff = 0;
loff_t size;
- dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
- inode, ceph_vinop(inode), mode, offset, length);
+ doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n",
+ inode, ceph_vinop(inode), mode, offset, length);
if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@@ -2689,6 +2728,7 @@ static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
loff_t src_off, loff_t dst_off, size_t len)
{
+ struct ceph_client *cl = ceph_inode_to_client(src_inode);
loff_t size, endoff;
size = i_size_read(src_inode);
@@ -2699,8 +2739,8 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
* inode.
*/
if (src_off + len > size) {
- dout("Copy beyond EOF (%llu + %zu > %llu)\n",
- src_off, len, size);
+ doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off,
+ len, size);
return -EOPNOTSUPP;
}
size = i_size_read(dst_inode);
@@ -2776,6 +2816,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
u32 object_size = src_ci->i_layout.object_size;
+ struct ceph_client *cl = fsc->client;
int ret;
src_oloc.pool = src_ci->i_layout.pool_id;
@@ -2817,9 +2858,10 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
if (ret) {
if (ret == -EOPNOTSUPP) {
fsc->have_copy_from2 = false;
- pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+ pr_notice_client(cl,
+ "OSDs don't support copy-from2; disabling copy offload\n");
}
- dout("ceph_osdc_copy_from returned %d\n", ret);
+ doutc(cl, "returned %d\n", ret);
if (!bytes)
bytes = ret;
goto out;
@@ -2845,7 +2887,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
struct ceph_inode_info *src_ci = ceph_inode(src_inode);
struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
struct ceph_cap_flush *prealloc_cf;
- struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
+ struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
+ struct ceph_client *cl = src_fsc->client;
loff_t size;
ssize_t ret = -EIO, bytes;
u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
@@ -2853,7 +2896,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
int src_got = 0, dst_got = 0, err, dirty;
if (src_inode->i_sb != dst_inode->i_sb) {
- struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
+ struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
if (ceph_fsid_compare(&src_fsc->client->fsid,
&dst_fsc->client->fsid)) {
@@ -2888,7 +2931,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
(src_ci->i_layout.stripe_count != 1) ||
(dst_ci->i_layout.stripe_count != 1) ||
(src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
- dout("Invalid src/dst files layout\n");
+ doutc(cl, "Invalid src/dst files layout\n");
return -EOPNOTSUPP;
}
@@ -2906,12 +2949,12 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
/* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
if (ret < 0) {
- dout("failed to write src file (%zd)\n", ret);
+ doutc(cl, "failed to write src file (%zd)\n", ret);
goto out;
}
ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
if (ret < 0) {
- dout("failed to write dst file (%zd)\n", ret);
+ doutc(cl, "failed to write dst file (%zd)\n", ret);
goto out;
}
@@ -2923,7 +2966,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
err = get_rd_wr_caps(src_file, &src_got,
dst_file, (dst_off + len), &dst_got);
if (err < 0) {
- dout("get_rd_wr_caps returned %d\n", err);
+ doutc(cl, "get_rd_wr_caps returned %d\n", err);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2938,7 +2981,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
dst_off >> PAGE_SHIFT,
(dst_off + len) >> PAGE_SHIFT);
if (ret < 0) {
- dout("Failed to invalidate inode pages (%zd)\n", ret);
+ doutc(cl, "Failed to invalidate inode pages (%zd)\n",
+ ret);
ret = 0; /* XXX */
}
ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
@@ -2959,7 +3003,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* starting at the src_off
*/
if (src_objoff) {
- dout("Initial partial copy of %u bytes\n", src_objlen);
+ doutc(cl, "Initial partial copy of %u bytes\n", src_objlen);
/*
* we need to temporarily drop all caps as we'll be calling
@@ -2970,7 +3014,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
&dst_off, src_objlen, flags);
/* Abort on short copies or on error */
if (ret < (long)src_objlen) {
- dout("Failed partial copy (%zd)\n", ret);
+ doutc(cl, "Failed partial copy (%zd)\n", ret);
goto out;
}
len -= ret;
@@ -2992,7 +3036,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
ret = bytes;
goto out_caps;
}
- dout("Copied %zu bytes out of %zu\n", bytes, len);
+ doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len);
len -= bytes;
ret += bytes;
@@ -3020,13 +3064,13 @@ out_caps:
* there were errors in remote object copies (len >= object_size).
*/
if (len && (len < src_ci->i_layout.object_size)) {
- dout("Final partial copy of %zu bytes\n", len);
+ doutc(cl, "Final partial copy of %zu bytes\n", len);
bytes = do_splice_direct(src_file, &src_off, dst_file,
&dst_off, len, flags);
if (bytes > 0)
ret += bytes;
else
- dout("Failed partial copy (%zd)\n", bytes);
+ doutc(cl, "Failed partial copy (%zd)\n", bytes);
}
out:
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 2e2a303b9e64..0679240f06db 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -129,6 +129,8 @@ void ceph_as_ctx_to_req(struct ceph_mds_request *req,
struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
struct inode *newino)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
if (ceph_vino_is_reserved(vino))
@@ -145,12 +147,13 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
}
if (!inode) {
- dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
+ doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
return ERR_PTR(-ENOMEM);
}
- dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
- ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
+ doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
+ ceph_present_inode(inode), ceph_vinop(inode), inode,
+ !!(inode->i_state & I_NEW));
return inode;
}
@@ -159,6 +162,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
*/
struct inode *ceph_get_snapdir(struct inode *parent)
{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
struct ceph_vino vino = {
.ino = ceph_ino(parent),
.snap = CEPH_SNAPDIR,
@@ -171,14 +175,14 @@ struct inode *ceph_get_snapdir(struct inode *parent)
return inode;
if (!S_ISDIR(parent->i_mode)) {
- pr_warn_once("bad snapdir parent type (mode=0%o)\n",
- parent->i_mode);
+ pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
+ parent->i_mode);
goto err;
}
if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
- pr_warn_once("bad snapdir inode type (mode=0%o)\n",
- inode->i_mode);
+ pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
+ inode->i_mode);
goto err;
}
@@ -203,7 +207,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
inode->i_flags |= S_ENCRYPTED;
ci->fscrypt_auth_len = pci->fscrypt_auth_len;
} else {
- dout("Failed to alloc snapdir fscrypt_auth\n");
+ doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
ret = -ENOMEM;
goto err;
}
@@ -249,6 +253,8 @@ const struct inode_operations ceph_file_iops = {
static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
u32 f)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_frag *frag;
@@ -279,8 +285,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
rb_link_node(&frag->node, parent, p);
rb_insert_color(&frag->node, &ci->i_fragtree);
- dout("get_or_create_frag added %llx.%llx frag %x\n",
- ceph_vinop(&ci->netfs.inode), f);
+ doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
return frag;
}
@@ -313,6 +318,7 @@ struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag, int *found)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
u32 t = ceph_frag_make(0, 0);
struct ceph_inode_frag *frag;
unsigned nway, i;
@@ -336,8 +342,8 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
/* choose child */
nway = 1 << frag->split_by;
- dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
- frag->split_by, nway);
+ doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
+ frag->split_by, nway);
for (i = 0; i < nway; i++) {
n = ceph_frag_make_child(t, frag->split_by, i);
if (ceph_frag_contains_value(n, v)) {
@@ -347,7 +353,7 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
}
BUG_ON(i == nway);
}
- dout("choose_frag(%x) = %x\n", v, t);
+ doutc(cl, "frag(%x) = %x\n", v, t);
return t;
}
@@ -371,6 +377,7 @@ static int ceph_fill_dirfrag(struct inode *inode,
struct ceph_mds_reply_dirfrag *dirinfo)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_frag *frag;
u32 id = le32_to_cpu(dirinfo->frag);
int mds = le32_to_cpu(dirinfo->auth);
@@ -395,14 +402,14 @@ static int ceph_fill_dirfrag(struct inode *inode,
goto out;
if (frag->split_by == 0) {
/* tree leaf, remove */
- dout("fill_dirfrag removed %llx.%llx frag %x"
- " (no ref)\n", ceph_vinop(inode), id);
+ doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
+ inode, ceph_vinop(inode), id);
rb_erase(&frag->node, &ci->i_fragtree);
kfree(frag);
} else {
/* tree branch, keep and clear */
- dout("fill_dirfrag cleared %llx.%llx frag %x"
- " referral\n", ceph_vinop(inode), id);
+ doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
+ inode, ceph_vinop(inode), id);
frag->mds = -1;
frag->ndist = 0;
}
@@ -415,8 +422,9 @@ static int ceph_fill_dirfrag(struct inode *inode,
if (IS_ERR(frag)) {
/* this is not the end of the world; we can continue
with bad/inaccurate delegation info */
- pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
- ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
+ pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
+ inode, ceph_vinop(inode),
+ le32_to_cpu(dirinfo->frag));
err = -ENOMEM;
goto out;
}
@@ -425,8 +433,8 @@ static int ceph_fill_dirfrag(struct inode *inode,
frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
for (i = 0; i < frag->ndist; i++)
frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
- dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
- ceph_vinop(inode), frag->frag, frag->ndist);
+ doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
+ ceph_vinop(inode), frag->frag, frag->ndist);
out:
mutex_unlock(&ci->i_fragtree_mutex);
@@ -454,6 +462,7 @@ static int ceph_fill_fragtree(struct inode *inode,
struct ceph_frag_tree_head *fragtree,
struct ceph_mds_reply_dirfrag *dirinfo)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_frag *frag, *prev_frag = NULL;
struct rb_node *rb_node;
@@ -489,15 +498,15 @@ static int ceph_fill_fragtree(struct inode *inode,
frag_tree_split_cmp, NULL);
}
- dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
rb_node = rb_first(&ci->i_fragtree);
for (i = 0; i < nsplits; i++) {
id = le32_to_cpu(fragtree->splits[i].frag);
split_by = le32_to_cpu(fragtree->splits[i].by);
if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
- pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
- "frag %x split by %d\n", ceph_vinop(inode),
- i, nsplits, id, split_by);
+ pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
+ "frag %x split by %d\n", inode,
+ ceph_vinop(inode), i, nsplits, id, split_by);
continue;
}
frag = NULL;
@@ -529,7 +538,7 @@ static int ceph_fill_fragtree(struct inode *inode,
if (frag->split_by == 0)
ci->i_fragtree_nsplits++;
frag->split_by = split_by;
- dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+ doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
prev_frag = frag;
}
while (rb_node) {
@@ -554,6 +563,7 @@ out_unlock:
*/
struct inode *ceph_alloc_inode(struct super_block *sb)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_inode_info *ci;
int i;
@@ -561,7 +571,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
if (!ci)
return NULL;
- dout("alloc_inode %p\n", &ci->netfs.inode);
+ doutc(fsc->client, "%p\n", &ci->netfs.inode);
/* Set parameters for the netfs library */
netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
@@ -675,10 +685,11 @@ void ceph_evict_inode(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_frag *frag;
struct rb_node *n;
- dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+ doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
percpu_counter_dec(&mdsc->metric.total_inodes);
@@ -701,8 +712,8 @@ void ceph_evict_inode(struct inode *inode)
*/
if (ci->i_snap_realm) {
if (ceph_snap(inode) == CEPH_NOSNAP) {
- dout(" dropping residual ref to snap realm %p\n",
- ci->i_snap_realm);
+ doutc(cl, " dropping residual ref to snap realm %p\n",
+ ci->i_snap_realm);
ceph_change_snap_realm(inode, NULL);
} else {
ceph_put_snapid_map(mdsc, ci->i_snapid_map);
@@ -743,15 +754,16 @@ static inline blkcnt_t calc_inode_blocks(u64 size)
int ceph_fill_file_size(struct inode *inode, int issued,
u32 truncate_seq, u64 truncate_size, u64 size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int queue_trunc = 0;
loff_t isize = i_size_read(inode);
if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
(truncate_seq == ci->i_truncate_seq && size > isize)) {
- dout("size %lld -> %llu\n", isize, size);
+ doutc(cl, "size %lld -> %llu\n", isize, size);
if (size > 0 && S_ISDIR(inode->i_mode)) {
- pr_err("fill_file_size non-zero size for directory\n");
+ pr_err_client(cl, "non-zero size for directory\n");
size = 0;
}
i_size_write(inode, size);
@@ -764,8 +776,8 @@ int ceph_fill_file_size(struct inode *inode, int issued,
ceph_fscache_update(inode);
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
- dout("%s truncate_seq %u -> %u\n", __func__,
- ci->i_truncate_seq, truncate_seq);
+ doutc(cl, "truncate_seq %u -> %u\n",
+ ci->i_truncate_seq, truncate_seq);
ci->i_truncate_seq = truncate_seq;
/* the MDS should have revoked these caps */
@@ -794,14 +806,15 @@ int ceph_fill_file_size(struct inode *inode, int issued,
* anyway.
*/
if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
- dout("%s truncate_size %lld -> %llu, encrypted %d\n", __func__,
- ci->i_truncate_size, truncate_size, !!IS_ENCRYPTED(inode));
+ doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
+ ci->i_truncate_size, truncate_size,
+ !!IS_ENCRYPTED(inode));
ci->i_truncate_size = truncate_size;
if (IS_ENCRYPTED(inode)) {
- dout("%s truncate_pagecache_size %lld -> %llu\n",
- __func__, ci->i_truncate_pagecache_size, size);
+ doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
+ ci->i_truncate_pagecache_size, size);
ci->i_truncate_pagecache_size = size;
} else {
ci->i_truncate_pagecache_size = truncate_size;
@@ -814,6 +827,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
u64 time_warp_seq, struct timespec64 *ctime,
struct timespec64 *mtime, struct timespec64 *atime)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct timespec64 ictime = inode_get_ctime(inode);
int warn = 0;
@@ -825,7 +839,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
CEPH_CAP_XATTR_EXCL)) {
if (ci->i_version == 0 ||
timespec64_compare(ctime, &ictime) > 0) {
- dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
+ doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
ictime.tv_sec, ictime.tv_nsec,
ctime->tv_sec, ctime->tv_nsec);
inode_set_ctime_to_ts(inode, *ctime);
@@ -833,8 +847,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
if (ci->i_version == 0 ||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
/* the MDS did a utimes() */
- dout("mtime %lld.%09ld -> %lld.%09ld "
- "tw %d -> %d\n",
+ doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
inode_get_mtime_sec(inode),
inode_get_mtime_nsec(inode),
mtime->tv_sec, mtime->tv_nsec,
@@ -849,14 +862,14 @@ void ceph_fill_file_time(struct inode *inode, int issued,
/* nobody did utimes(); take the max */
ts = inode_get_mtime(inode);
if (timespec64_compare(mtime, &ts) > 0) {
- dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
+ doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
ts.tv_sec, ts.tv_nsec,
mtime->tv_sec, mtime->tv_nsec);
inode_set_mtime_to_ts(inode, *mtime);
}
ts = inode_get_atime(inode);
if (timespec64_compare(atime, &ts) > 0) {
- dout("atime %lld.%09ld -> %lld.%09ld inc\n",
+ doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
ts.tv_sec, ts.tv_nsec,
atime->tv_sec, atime->tv_nsec);
inode_set_atime_to_ts(inode, *atime);
@@ -878,13 +891,16 @@ void ceph_fill_file_time(struct inode *inode, int issued,
}
}
if (warn) /* time_warp_seq shouldn't go backwards */
- dout("%p mds time_warp_seq %llu < %u\n",
- inode, time_warp_seq, ci->i_time_warp_seq);
+ doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
+ time_warp_seq, ci->i_time_warp_seq);
}
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
-static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+ const char *encsym,
+ int enclen, u8 **decsym)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int declen;
u8 *sym;
@@ -894,8 +910,9 @@ static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
declen = ceph_base64_decode(encsym, enclen, sym);
if (declen < 0) {
- pr_err("%s: can't decode symlink (%d). Content: %.*s\n",
- __func__, declen, enclen, encsym);
+ pr_err_client(cl,
+ "can't decode symlink (%d). Content: %.*s\n",
+ declen, enclen, encsym);
kfree(sym);
return -EIO;
}
@@ -904,7 +921,9 @@ static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
return declen;
}
#else
-static int decode_encrypted_symlink(const char *encsym, int symlen, u8 **decsym)
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+ const char *encsym,
+ int symlen, u8 **decsym)
{
return -EOPNOTSUPP;
}
@@ -921,6 +940,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_cap_reservation *caps_reservation)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_reply_inode *info = iinfo->in;
struct ceph_inode_info *ci = ceph_inode(inode);
int issued, new_issued, info_caps;
@@ -939,25 +959,26 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
lockdep_assert_held(&mdsc->snap_rwsem);
- dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
- inode, ceph_vinop(inode), le64_to_cpu(info->version),
- ci->i_version);
+ doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
+ le64_to_cpu(info->version), ci->i_version);
/* Once I_NEW is cleared, we can't change type or dev numbers */
if (inode->i_state & I_NEW) {
inode->i_mode = mode;
} else {
if (inode_wrong_type(inode, mode)) {
- pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
- ceph_vinop(inode), inode->i_mode, mode);
+ pr_warn_once_client(cl,
+ "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
+ ceph_vinop(inode), inode->i_mode, mode);
return -ESTALE;
}
if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
- pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
- ceph_vinop(inode), MAJOR(inode->i_rdev),
- MINOR(inode->i_rdev), MAJOR(rdev),
- MINOR(rdev));
+ pr_warn_once_client(cl,
+ "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
+ ceph_vinop(inode), MAJOR(inode->i_rdev),
+ MINOR(inode->i_rdev), MAJOR(rdev),
+ MINOR(rdev));
return -ESTALE;
}
}
@@ -979,8 +1000,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (iinfo->xattr_len > 4) {
xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
if (!xattr_blob)
- pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
- iinfo->xattr_len);
+ pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
+ iinfo->xattr_len);
}
if (iinfo->pool_ns_len > 0)
@@ -1034,9 +1055,10 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
- dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kgid(&init_user_ns, inode->i_gid));
+ doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+ ceph_vinop(inode), inode->i_mode,
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
}
@@ -1092,7 +1114,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
size = fsize;
} else {
- pr_warn("fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
+ pr_warn_client(cl,
+ "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
info->size, size);
}
}
@@ -1104,8 +1127,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
/* only update max_size on auth cap */
if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
ci->i_max_size != le64_to_cpu(info->max_size)) {
- dout("max_size %lld -> %llu\n", ci->i_max_size,
- le64_to_cpu(info->max_size));
+ doutc(cl, "max_size %lld -> %llu\n",
+ ci->i_max_size, le64_to_cpu(info->max_size));
ci->i_max_size = le64_to_cpu(info->max_size);
}
}
@@ -1168,15 +1191,17 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (IS_ENCRYPTED(inode)) {
if (symlen != i_size_read(inode))
- pr_err("%s %llx.%llx BAD symlink size %lld\n",
- __func__, ceph_vinop(inode),
+ pr_err_client(cl,
+ "%p %llx.%llx BAD symlink size %lld\n",
+ inode, ceph_vinop(inode),
i_size_read(inode));
- err = decode_encrypted_symlink(iinfo->symlink,
+ err = decode_encrypted_symlink(mdsc, iinfo->symlink,
symlen, (u8 **)&sym);
if (err < 0) {
- pr_err("%s decoding encrypted symlink failed: %d\n",
- __func__, err);
+ pr_err_client(cl,
+ "decoding encrypted symlink failed: %d\n",
+ err);
goto out;
}
symlen = err;
@@ -1184,8 +1209,9 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
inode->i_blocks = calc_inode_blocks(symlen);
} else {
if (symlen != i_size_read(inode)) {
- pr_err("%s %llx.%llx BAD symlink size %lld\n",
- __func__, ceph_vinop(inode),
+ pr_err_client(cl,
+ "%p %llx.%llx BAD symlink size %lld\n",
+ inode, ceph_vinop(inode),
i_size_read(inode));
i_size_write(inode, symlen);
inode->i_blocks = calc_inode_blocks(symlen);
@@ -1220,8 +1246,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
inode->i_fop = &ceph_dir_fops;
break;
default:
- pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
- ceph_vinop(inode), inode->i_mode);
+ pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
+ ceph_vinop(inode), inode->i_mode);
}
/* were we issued a capability? */
@@ -1242,7 +1268,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
(info_caps & CEPH_CAP_FILE_SHARED) &&
(issued & CEPH_CAP_FILE_EXCL) == 0 &&
!__ceph_dir_is_complete(ci)) {
- dout(" marking %p complete (empty)\n", inode);
+ doutc(cl, " marking %p complete (empty)\n",
+ inode);
i_size_write(inode, 0);
__ceph_dir_set_complete(ci,
atomic64_read(&ci->i_release_count),
@@ -1251,8 +1278,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
wake = true;
} else {
- dout(" %p got snap_caps %s\n", inode,
- ceph_cap_string(info_caps));
+ doutc(cl, " %p got snap_caps %s\n", inode,
+ ceph_cap_string(info_caps));
ci->i_snap_caps |= info_caps;
}
}
@@ -1268,8 +1295,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (cap_fmode >= 0) {
if (!info_caps)
- pr_warn("mds issued no caps on %llx.%llx\n",
- ceph_vinop(inode));
+ pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
+ ceph_vinop(inode));
__ceph_touch_fmode(ci, mdsc, cap_fmode);
}
@@ -1315,14 +1342,14 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
unsigned long from_time,
struct ceph_mds_session **old_lease_session)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
unsigned mask = le16_to_cpu(lease->mask);
long unsigned duration = le32_to_cpu(lease->duration_ms);
long unsigned ttl = from_time + (duration * HZ) / 1000;
long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
- dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
- dentry, duration, ttl);
+ doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
/* only track leases on regular dentries */
if (ceph_snap(dir) != CEPH_NOSNAP)
@@ -1423,6 +1450,7 @@ out_unlock:
*/
static int splice_dentry(struct dentry **pdn, struct inode *in)
{
+ struct ceph_client *cl = ceph_inode_to_client(in);
struct dentry *dn = *pdn;
struct dentry *realdn;
@@ -1454,23 +1482,21 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
d_drop(dn);
realdn = d_splice_alias(in, dn);
if (IS_ERR(realdn)) {
- pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
- PTR_ERR(realdn), dn, in, ceph_vinop(in));
+ pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
+ PTR_ERR(realdn), dn, in, ceph_vinop(in));
return PTR_ERR(realdn);
}
if (realdn) {
- dout("dn %p (%d) spliced with %p (%d) "
- "inode %p ino %llx.%llx\n",
- dn, d_count(dn),
- realdn, d_count(realdn),
- d_inode(realdn), ceph_vinop(d_inode(realdn)));
+ doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
+ dn, d_count(dn), realdn, d_count(realdn),
+ d_inode(realdn), ceph_vinop(d_inode(realdn)));
dput(dn);
*pdn = realdn;
} else {
BUG_ON(!ceph_dentry(dn));
- dout("dn %p attached to %p ino %llx.%llx\n",
- dn, d_inode(dn), ceph_vinop(d_inode(dn)));
+ doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
+ d_inode(dn), ceph_vinop(d_inode(dn)));
}
return 0;
}
@@ -1492,14 +1518,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct inode *in = NULL;
struct ceph_vino tvino, dvino;
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
int err = 0;
- dout("fill_trace %p is_dentry %d is_target %d\n", req,
- rinfo->head->is_dentry, rinfo->head->is_target);
+ doutc(cl, "%p is_dentry %d is_target %d\n", req,
+ rinfo->head->is_dentry, rinfo->head->is_target);
if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
- dout("fill_trace reply is empty!\n");
+ doutc(cl, "reply is empty!\n");
if (rinfo->head->result == 0 && req->r_parent)
ceph_invalidate_dir_request(req);
return 0;
@@ -1556,13 +1583,13 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
retry_lookup:
dn = d_lookup(parent, &dname);
- dout("d_lookup on parent=%p name=%.*s got %p\n",
- parent, dname.len, dname.name, dn);
+ doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+ parent, dname.len, dname.name, dn);
if (!dn) {
dn = d_alloc(parent, &dname);
- dout("d_alloc %p '%.*s' = %p\n", parent,
- dname.len, dname.name, dn);
+ doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+ dname.len, dname.name, dn);
if (!dn) {
dput(parent);
ceph_fname_free_buffer(dir, &oname);
@@ -1578,8 +1605,8 @@ retry_lookup:
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
- dout(" dn %p points to wrong inode %p\n",
- dn, d_inode(dn));
+ doutc(cl, " dn %p points to wrong inode %p\n",
+ dn, d_inode(dn));
ceph_dir_clear_ordered(dir);
d_delete(dn);
dput(dn);
@@ -1604,8 +1631,8 @@ retry_lookup:
rinfo->head->result == 0) ? req->r_fmode : -1,
&req->r_caps_reservation);
if (err < 0) {
- pr_err("ceph_fill_inode badness %p %llx.%llx\n",
- in, ceph_vinop(in));
+ pr_err_client(cl, "badness %p %llx.%llx\n", in,
+ ceph_vinop(in));
req->r_target_inode = NULL;
if (in->i_state & I_NEW)
discard_new_inode(in);
@@ -1655,36 +1682,32 @@ retry_lookup:
have_lease = have_dir_cap ||
le32_to_cpu(rinfo->dlease->duration_ms);
if (!have_lease)
- dout("fill_trace no dentry lease or dir cap\n");
+ doutc(cl, "no dentry lease or dir cap\n");
/* rename? */
if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
struct inode *olddir = req->r_old_dentry_dir;
BUG_ON(!olddir);
- dout(" src %p '%pd' dst %p '%pd'\n",
- req->r_old_dentry,
- req->r_old_dentry,
- dn, dn);
- dout("fill_trace doing d_move %p -> %p\n",
- req->r_old_dentry, dn);
+ doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+ req->r_old_dentry, req->r_old_dentry, dn, dn);
+ doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
/* d_move screws up sibling dentries' offsets */
ceph_dir_clear_ordered(dir);
ceph_dir_clear_ordered(olddir);
d_move(req->r_old_dentry, dn);
- dout(" src %p '%pd' dst %p '%pd'\n",
- req->r_old_dentry,
- req->r_old_dentry,
- dn, dn);
+ doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+ req->r_old_dentry, req->r_old_dentry, dn, dn);
/* ensure target dentry is invalidated, despite
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(dn);
- dout("dn %p gets new offset %lld\n", req->r_old_dentry,
- ceph_dentry(req->r_old_dentry)->offset);
+ doutc(cl, "dn %p gets new offset %lld\n",
+ req->r_old_dentry,
+ ceph_dentry(req->r_old_dentry)->offset);
/* swap r_dentry and r_old_dentry in case that
* splice_dentry() gets called later. This is safe
@@ -1696,9 +1719,9 @@ retry_lookup:
/* null dentry? */
if (!rinfo->head->is_target) {
- dout("fill_trace null dentry\n");
+ doutc(cl, "null dentry\n");
if (d_really_is_positive(dn)) {
- dout("d_delete %p\n", dn);
+ doutc(cl, "d_delete %p\n", dn);
ceph_dir_clear_ordered(dir);
d_delete(dn);
} else if (have_lease) {
@@ -1722,9 +1745,9 @@ retry_lookup:
goto done;
dn = req->r_dentry; /* may have spliced */
} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
- dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
- dn, d_inode(dn), ceph_vinop(d_inode(dn)),
- ceph_vinop(in));
+ doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
+ dn, d_inode(dn), ceph_vinop(d_inode(dn)),
+ ceph_vinop(in));
d_invalidate(dn);
have_lease = false;
}
@@ -1734,7 +1757,7 @@ retry_lookup:
rinfo->dlease, session,
req->r_request_started);
}
- dout(" final dn %p\n", dn);
+ doutc(cl, " final dn %p\n", dn);
} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP) &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
@@ -1745,7 +1768,8 @@ retry_lookup:
BUG_ON(!dir);
BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
BUG_ON(!req->r_dentry);
- dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
+ doutc(cl, " linking snapped dir %p to dn %p\n", in,
+ req->r_dentry);
ceph_dir_clear_ordered(dir);
ihold(in);
err = splice_dentry(&req->r_dentry, in);
@@ -1767,7 +1791,7 @@ retry_lookup:
&dvino, ptvino);
}
done:
- dout("fill_trace done err=%d\n", err);
+ doutc(cl, "done err=%d\n", err);
return err;
}
@@ -1778,6 +1802,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
int i, err = 0;
for (i = 0; i < rinfo->dir_nr; i++) {
@@ -1792,14 +1817,14 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
if (IS_ERR(in)) {
err = PTR_ERR(in);
- dout("new_inode badness got %d\n", err);
+ doutc(cl, "badness got %d\n", err);
continue;
}
rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
-1, &req->r_caps_reservation);
if (rc < 0) {
- pr_err("ceph_fill_inode badness on %p got %d\n",
- in, rc);
+ pr_err_client(cl, "inode badness on %p got %d\n", in,
+ rc);
err = rc;
if (in->i_state & I_NEW) {
ihold(in);
@@ -1828,6 +1853,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
struct ceph_readdir_cache_control *ctl,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_inode_info *ci = ceph_inode(dir);
unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
unsigned idx = ctl->index % nsize;
@@ -1853,11 +1879,11 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
- dout("readdir cache dn %p idx %d\n", dn, ctl->index);
+ doutc(cl, "dn %p idx %d\n", dn, ctl->index);
ctl->dentries[idx] = dn;
ctl->index++;
} else {
- dout("disable readdir cache\n");
+ doutc(cl, "disable readdir cache\n");
ctl->index = -1;
}
return 0;
@@ -1870,6 +1896,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct inode *inode = d_inode(parent);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct qstr dname;
struct dentry *dn;
struct inode *in;
@@ -1897,19 +1924,18 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
if (rinfo->dir_dir &&
le32_to_cpu(rinfo->dir_dir->frag) != frag) {
- dout("readdir_prepopulate got new frag %x -> %x\n",
- frag, le32_to_cpu(rinfo->dir_dir->frag));
+ doutc(cl, "got new frag %x -> %x\n", frag,
+ le32_to_cpu(rinfo->dir_dir->frag));
frag = le32_to_cpu(rinfo->dir_dir->frag);
if (!rinfo->hash_order)
req->r_readdir_offset = 2;
}
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
- dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
- rinfo->dir_nr, parent);
+ doutc(cl, "%d items under SNAPDIR dn %p\n",
+ rinfo->dir_nr, parent);
} else {
- dout("readdir_prepopulate %d items under dn %p\n",
- rinfo->dir_nr, parent);
+ doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
if (rinfo->dir_dir)
ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
@@ -1953,15 +1979,15 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
retry_lookup:
dn = d_lookup(parent, &dname);
- dout("d_lookup on parent=%p name=%.*s got %p\n",
- parent, dname.len, dname.name, dn);
+ doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+ parent, dname.len, dname.name, dn);
if (!dn) {
dn = d_alloc(parent, &dname);
- dout("d_alloc %p '%.*s' = %p\n", parent,
- dname.len, dname.name, dn);
+ doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+ dname.len, dname.name, dn);
if (!dn) {
- dout("d_alloc badness\n");
+ doutc(cl, "d_alloc badness\n");
err = -ENOMEM;
goto out;
}
@@ -1974,8 +2000,8 @@ retry_lookup:
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
struct ceph_dentry_info *di = ceph_dentry(dn);
- dout(" dn %p points to wrong inode %p\n",
- dn, d_inode(dn));
+ doutc(cl, " dn %p points to wrong inode %p\n",
+ dn, d_inode(dn));
spin_lock(&dn->d_lock);
if (di->offset > 0 &&
@@ -1997,7 +2023,7 @@ retry_lookup:
} else {
in = ceph_get_inode(parent->d_sb, tvino, NULL);
if (IS_ERR(in)) {
- dout("new_inode badness\n");
+ doutc(cl, "new_inode badness\n");
d_drop(dn);
dput(dn);
err = PTR_ERR(in);
@@ -2008,7 +2034,8 @@ retry_lookup:
ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
-1, &req->r_caps_reservation);
if (ret < 0) {
- pr_err("ceph_fill_inode badness on %p\n", in);
+ pr_err_client(cl, "badness on %p %llx.%llx\n", in,
+ ceph_vinop(in));
if (d_really_is_negative(dn)) {
if (in->i_state & I_NEW) {
ihold(in);
@@ -2025,8 +2052,8 @@ retry_lookup:
if (d_really_is_negative(dn)) {
if (ceph_security_xattr_deadlock(in)) {
- dout(" skip splicing dn %p to inode %p"
- " (security xattr deadlock)\n", dn, in);
+ doutc(cl, " skip splicing dn %p to inode %p"
+ " (security xattr deadlock)\n", dn, in);
iput(in);
skipped++;
goto next_item;
@@ -2058,17 +2085,18 @@ out:
req->r_readdir_cache_idx = cache_ctl.index;
}
ceph_readdir_cache_release(&cache_ctl);
- dout("readdir_prepopulate done\n");
+ doutc(cl, "done\n");
return err;
}
bool ceph_inode_set_size(struct inode *inode, loff_t size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
bool ret;
spin_lock(&ci->i_ceph_lock);
- dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
+ doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
i_size_write(inode, size);
ceph_fscache_update(inode);
inode->i_blocks = calc_inode_blocks(size);
@@ -2082,22 +2110,25 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
void ceph_queue_inode_work(struct inode *inode, int work_bit)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
set_bit(work_bit, &ci->i_work_mask);
ihold(inode);
if (queue_work(fsc->inode_wq, &ci->i_work)) {
- dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
+ doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
+ ceph_vinop(inode), ci->i_work_mask);
} else {
- dout("queue_inode_work %p already queued, mask=%lx\n",
- inode, ci->i_work_mask);
+ doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
+ inode, ceph_vinop(inode), ci->i_work_mask);
iput(inode);
}
}
static void ceph_do_invalidate_pages(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u32 orig_gen;
int check = 0;
@@ -2107,8 +2138,9 @@ static void ceph_do_invalidate_pages(struct inode *inode)
mutex_lock(&ci->i_truncate_mutex);
if (ceph_inode_is_shutdown(inode)) {
- pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
- __func__, ceph_vinop(inode));
+ pr_warn_ratelimited_client(cl,
+ "%p %llx.%llx is shut down\n", inode,
+ ceph_vinop(inode));
mapping_set_error(inode->i_mapping, -EIO);
truncate_pagecache(inode, 0);
mutex_unlock(&ci->i_truncate_mutex);
@@ -2116,8 +2148,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
}
spin_lock(&ci->i_ceph_lock);
- dout("invalidate_pages %p gen %d revoking %d\n", inode,
- ci->i_rdcache_gen, ci->i_rdcache_revoking);
+ doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
+ ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
check = 1;
@@ -2129,21 +2161,21 @@ static void ceph_do_invalidate_pages(struct inode *inode)
spin_unlock(&ci->i_ceph_lock);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
- pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
- ceph_vinop(inode));
+ pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
+ ceph_vinop(inode));
}
spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
- dout("invalidate_pages %p gen %d successful\n", inode,
- ci->i_rdcache_gen);
+ doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
+ ceph_vinop(inode), ci->i_rdcache_gen);
ci->i_rdcache_revoking--;
check = 1;
} else {
- dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
- inode, orig_gen, ci->i_rdcache_gen,
- ci->i_rdcache_revoking);
+ doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
+ inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
+ ci->i_rdcache_revoking);
if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
check = 1;
}
@@ -2160,6 +2192,7 @@ out:
*/
void __ceph_do_pending_vmtruncate(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 to;
int wrbuffer_refs, finish = 0;
@@ -2168,7 +2201,8 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
- dout("%s %p none pending\n", __func__, inode);
+ doutc(cl, "%p %llx.%llx none pending\n", inode,
+ ceph_vinop(inode));
spin_unlock(&ci->i_ceph_lock);
mutex_unlock(&ci->i_truncate_mutex);
return;
@@ -2180,7 +2214,8 @@ retry:
*/
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
spin_unlock(&ci->i_ceph_lock);
- dout("%s %p flushing snaps first\n", __func__, inode);
+ doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
+ ceph_vinop(inode));
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
@@ -2191,8 +2226,8 @@ retry:
to = ci->i_truncate_pagecache_size;
wrbuffer_refs = ci->i_wrbuffer_ref;
- dout("%s %p (%d) to %lld\n", __func__, inode,
- ci->i_truncate_pending, to);
+ doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
+ ci->i_truncate_pending, to);
spin_unlock(&ci->i_ceph_lock);
ceph_fscache_resize(inode, to);
@@ -2220,9 +2255,10 @@ static void ceph_inode_work(struct work_struct *work)
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_work);
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
- dout("writeback %p\n", inode);
+ doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
filemap_fdatawrite(&inode->i_data);
}
if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
@@ -2294,6 +2330,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
struct ceph_mds_request *req,
struct iattr *attr)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
loff_t pos, orig_pos = round_down(attr->ia_size,
@@ -2316,9 +2353,9 @@ static int fill_fscrypt_truncate(struct inode *inode,
issued = __ceph_caps_issued(ci, NULL);
- dout("%s size %lld -> %lld got cap refs on %s, issued %s\n", __func__,
- i_size, attr->ia_size, ceph_cap_string(got),
- ceph_cap_string(issued));
+ doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
+ i_size, attr->ia_size, ceph_cap_string(got),
+ ceph_cap_string(issued));
/* Try to writeback the dirty pagecaches */
if (issued & (CEPH_CAP_FILE_BUFFER)) {
@@ -2373,8 +2410,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
* If the Rados object doesn't exist, it will be set to 0.
*/
if (!objver) {
- dout("%s hit hole, ppos %lld < size %lld\n", __func__,
- pos, i_size);
+ doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
header.data_len = cpu_to_le32(8 + 8 + 4);
header.file_offset = 0;
@@ -2383,8 +2419,8 @@ static int fill_fscrypt_truncate(struct inode *inode,
header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
header.file_offset = cpu_to_le64(orig_pos);
- dout("%s encrypt block boff/bsize %d/%lu\n", __func__,
- boff, CEPH_FSCRYPT_BLOCK_SIZE);
+ doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
+ CEPH_FSCRYPT_BLOCK_SIZE);
/* truncate and zero out the extra contents for the last block */
memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
@@ -2412,8 +2448,8 @@ static int fill_fscrypt_truncate(struct inode *inode,
}
req->r_pagelist = pagelist;
out:
- dout("%s %p size dropping cap refs on %s\n", __func__,
- inode, ceph_cap_string(got));
+ doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
if (iov.iov_base)
kunmap_local(iov.iov_base);
@@ -2424,13 +2460,14 @@ out:
return ret;
}
-int __ceph_setattr(struct inode *inode, struct iattr *attr,
- struct ceph_iattr *cia)
+int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *attr, struct ceph_iattr *cia)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
loff_t isize = i_size_read(inode);
int issued;
@@ -2469,7 +2506,8 @@ retry:
}
}
- dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
+ doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
+ ceph_cap_string(issued));
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
if (cia && cia->fscrypt_auth) {
u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
@@ -2480,8 +2518,8 @@ retry:
goto out;
}
- dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n",
- ceph_vinop(inode), ci->fscrypt_auth_len, len);
+ doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
+ ceph_vinop(inode), ci->fscrypt_auth_len, len);
/* It should never be re-set once set */
WARN_ON_ONCE(ci->fscrypt_auth);
@@ -2509,38 +2547,44 @@ retry:
#endif /* CONFIG_FS_ENCRYPTION */
if (ia_valid & ATTR_UID) {
- dout("setattr %p uid %d -> %d\n", inode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kuid(&init_user_ns, attr->ia_uid));
+ kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
+
+ doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
+ ceph_vinop(inode),
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kuid(&init_user_ns, attr->ia_uid));
if (issued & CEPH_CAP_AUTH_EXCL) {
- inode->i_uid = attr->ia_uid;
+ inode->i_uid = fsuid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- !uid_eq(attr->ia_uid, inode->i_uid)) {
+ !uid_eq(fsuid, inode->i_uid)) {
req->r_args.setattr.uid = cpu_to_le32(
- from_kuid(&init_user_ns, attr->ia_uid));
+ from_kuid(&init_user_ns, fsuid));
mask |= CEPH_SETATTR_UID;
release |= CEPH_CAP_AUTH_SHARED;
}
}
if (ia_valid & ATTR_GID) {
- dout("setattr %p gid %d -> %d\n", inode,
- from_kgid(&init_user_ns, inode->i_gid),
- from_kgid(&init_user_ns, attr->ia_gid));
+ kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
+
+ doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
+ ceph_vinop(inode),
+ from_kgid(&init_user_ns, inode->i_gid),
+ from_kgid(&init_user_ns, attr->ia_gid));
if (issued & CEPH_CAP_AUTH_EXCL) {
- inode->i_gid = attr->ia_gid;
+ inode->i_gid = fsgid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- !gid_eq(attr->ia_gid, inode->i_gid)) {
+ !gid_eq(fsgid, inode->i_gid)) {
req->r_args.setattr.gid = cpu_to_le32(
- from_kgid(&init_user_ns, attr->ia_gid));
+ from_kgid(&init_user_ns, fsgid));
mask |= CEPH_SETATTR_GID;
release |= CEPH_CAP_AUTH_SHARED;
}
}
if (ia_valid & ATTR_MODE) {
- dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
- attr->ia_mode);
+ doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
+ ceph_vinop(inode), inode->i_mode, attr->ia_mode);
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_mode = attr->ia_mode;
dirtied |= CEPH_CAP_AUTH_EXCL;
@@ -2556,9 +2600,10 @@ retry:
if (ia_valid & ATTR_ATIME) {
struct timespec64 atime = inode_get_atime(inode);
- dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
- atime.tv_sec, atime.tv_nsec,
- attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
+ doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
+ inode, ceph_vinop(inode),
+ atime.tv_sec, atime.tv_nsec,
+ attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
inode_set_atime_to_ts(inode, attr->ia_atime);
@@ -2578,7 +2623,8 @@ retry:
}
}
if (ia_valid & ATTR_SIZE) {
- dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
+ doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
+ ceph_vinop(inode), isize, attr->ia_size);
/*
* Only when the new size is smaller and not aligned to
* CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
@@ -2631,9 +2677,10 @@ retry:
if (ia_valid & ATTR_MTIME) {
struct timespec64 mtime = inode_get_mtime(inode);
- dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
- mtime.tv_sec, mtime.tv_nsec,
- attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
+ doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
+ inode, ceph_vinop(inode),
+ mtime.tv_sec, mtime.tv_nsec,
+ attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
inode_set_mtime_to_ts(inode, attr->ia_mtime);
@@ -2656,11 +2703,12 @@ retry:
if (ia_valid & ATTR_CTIME) {
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
- dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- inode_get_ctime_sec(inode),
- inode_get_ctime_nsec(inode),
- attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
- only ? "ctime only" : "ignored");
+ doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
+ inode, ceph_vinop(inode),
+ inode_get_ctime_sec(inode),
+ inode_get_ctime_nsec(inode),
+ attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
+ only ? "ctime only" : "ignored");
if (only) {
/*
* if kernel wants to dirty ctime but nothing else,
@@ -2678,7 +2726,8 @@ retry:
}
}
if (ia_valid & ATTR_FILE)
- dout("setattr %p ATTR_FILE ... hrm!\n", inode);
+ doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
+ ceph_vinop(inode));
if (dirtied) {
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
@@ -2719,16 +2768,17 @@ retry:
*/
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == -EAGAIN && truncate_retry--) {
- dout("setattr %p result=%d (%s locally, %d remote), retry it!\n",
- inode, err, ceph_cap_string(dirtied), mask);
+ doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
+ inode, ceph_vinop(inode), err,
+ ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
ceph_free_cap_flush(prealloc_cf);
goto retry;
}
}
out:
- dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
- ceph_cap_string(dirtied), mask);
+ doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
+ ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
ceph_free_cap_flush(prealloc_cf);
@@ -2746,7 +2796,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int err;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -2759,7 +2809,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (err)
return err;
- err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err != 0)
return err;
@@ -2771,10 +2821,10 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
return -EDQUOT;
- err = __ceph_setattr(inode, attr, NULL);
+ err = __ceph_setattr(idmap, inode, attr, NULL);
if (err >= 0 && (attr->ia_valid & ATTR_MODE))
- err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
+ err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
return err;
}
@@ -2816,19 +2866,21 @@ int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
int mask, bool force)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int mode;
int err;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("do_getattr inode %p SNAPDIR\n", inode);
+ doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
+ ceph_vinop(inode));
return 0;
}
- dout("do_getattr inode %p mask %s mode 0%o\n",
- inode, ceph_cap_string(mask), inode->i_mode);
+ doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
+ ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
return 0;
@@ -2855,14 +2907,15 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
}
}
ceph_mdsc_put_request(req);
- dout("do_getattr result=%d\n", err);
+ doutc(cl, "result=%d\n", err);
return err;
}
int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int mode = USE_AUTH_MDS;
@@ -2892,7 +2945,7 @@ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
xattr_value = req->r_reply_info.xattr_info.xattr_value;
xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
- dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
+ doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
err = (int)xattr_value_len;
if (size == 0)
@@ -2907,7 +2960,7 @@ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
put:
ceph_mdsc_put_request(req);
out:
- dout("do_getvxattr result=%d\n", err);
+ doutc(cl, "result=%d\n", err);
return err;
}
@@ -2927,7 +2980,7 @@ int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
if (!err)
- err = generic_permission(&nop_mnt_idmap, inode, mask);
+ err = generic_permission(idmap, inode, mask);
return err;
}
@@ -2984,7 +3037,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
return err;
}
- generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->ino = ceph_present_inode(inode);
/*
@@ -3007,7 +3060,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
if (S_ISDIR(inode->i_mode)) {
- if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
+ if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
stat->size = ci->i_rbytes;
} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
struct ceph_inode_info *pci;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 91a84917d203..e861de3c79b9 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -65,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file_inode(file));
@@ -140,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err;
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
- &ceph_sb_to_client(inode->i_sb)->client->osdc;
+ &ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
struct ceph_object_locator oloc;
CEPH_DEFINE_OID_ONSTACK(oid);
u32 xlen;
@@ -244,7 +244,8 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
spin_lock(&ci->i_ceph_lock);
@@ -252,11 +253,13 @@ static long ceph_ioctl_lazyio(struct file *file)
ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
__ceph_touch_fmode(ci, mdsc, fi->fmode);
spin_unlock(&ci->i_ceph_lock);
- dout("ioctl_layzio: file %p marked lazy\n", file);
+ doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
+ ceph_vinop(inode));
ceph_check_caps(ci, 0);
} else {
- dout("ioctl_layzio: file %p already lazy\n", file);
+ doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
+ ceph_vinop(inode));
}
return 0;
}
@@ -355,10 +358,12 @@ static const char *ceph_ioctl_cmd_name(const unsigned int cmd)
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = file_inode(file);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int ret;
- dout("ioctl file %p cmd %s arg %lu\n", file,
- ceph_ioctl_cmd_name(cmd), arg);
+ doutc(fsc->client, "file %p %p %llx.%llx cmd %s arg %lu\n", file,
+ inode, ceph_vinop(inode), ceph_ioctl_cmd_name(cmd), arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index cb51c7e9c8e2..e07ad29ff8b9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -77,6 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
int cmd, u8 wait, struct file_lock *fl)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
u64 length = 0;
@@ -111,10 +112,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
owner = secure_addr(fl->fl_owner);
- dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
- "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
- (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
- wait, fl->fl_type);
+ doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, "
+ "start: %llu, length: %llu, wait: %d, type: %d\n",
+ (int)lock_type, (int)operation, owner, (u64)fl->fl_pid,
+ fl->fl_start, length, wait, fl->fl_type);
req->r_args.filelock_change.rule = lock_type;
req->r_args.filelock_change.type = cmd;
@@ -147,16 +148,17 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
}
ceph_mdsc_put_request(req);
- dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
- (int)operation, (u64)fl->fl_pid, fl->fl_start,
- length, wait, fl->fl_type, err);
+ doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, "
+ "length: %llu, wait: %d, type: %d, err code %d\n",
+ (int)lock_type, (int)operation, (u64)fl->fl_pid,
+ fl->fl_start, length, wait, fl->fl_type, err);
return err;
}
static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *intr_req;
struct inode *inode = req->r_inode;
int err, lock_type;
@@ -174,8 +176,7 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (!err)
return 0;
- dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
- req->r_tid);
+ doutc(cl, "request %llu was interrupted\n", req->r_tid);
mutex_lock(&mdsc->mutex);
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
@@ -246,6 +247,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u16 op = CEPH_MDS_OP_SETFILELOCK;
u8 wait = 0;
@@ -257,7 +259,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
- dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
+ doutc(cl, "fl_owner: %p\n", fl->fl_owner);
/* set wait bit as appropriate, then make command as Ceph expects it*/
if (IS_GETLK(cmd))
@@ -292,7 +294,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
if (!err) {
if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
- dout("mds locked, locking locally\n");
+ doutc(cl, "locking locally\n");
err = posix_lock_file(file, fl, NULL);
if (err) {
/* undo! This should only happen if
@@ -300,8 +302,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
* deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on posix_lock_file, undid lock\n",
- err);
+ doutc(cl, "got %d on posix_lock_file, undid lock\n",
+ err);
}
}
}
@@ -312,6 +314,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u8 wait = 0;
u8 lock_cmd;
@@ -322,7 +325,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
- dout("ceph_flock, fl_file: %p\n", fl->fl_file);
+ doutc(cl, "fl_file: %p\n", fl->fl_file);
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
@@ -359,7 +362,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
inode, CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on locks_lock_file_wait, undid lock\n", err);
+ doutc(cl, "got %d on locks_lock_file_wait, undid lock\n",
+ err);
}
}
return err;
@@ -371,6 +375,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct file_lock *lock;
struct file_lock_context *ctx;
@@ -386,17 +391,20 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
++(*flock_count);
spin_unlock(&ctx->flc_lock);
}
- dout("counted %d flock locks and %d fcntl locks\n",
- *flock_count, *fcntl_count);
+ doutc(cl, "counted %d flock locks and %d fcntl locks\n",
+ *flock_count, *fcntl_count);
}
/*
* Given a pointer to a lock, convert it to a ceph filelock
*/
-static int lock_to_ceph_filelock(struct file_lock *lock,
+static int lock_to_ceph_filelock(struct inode *inode,
+ struct file_lock *lock,
struct ceph_filelock *cephlock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
+
cephlock->start = cpu_to_le64(lock->fl_start);
cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
cephlock->client = cpu_to_le64(0);
@@ -414,7 +422,7 @@ static int lock_to_ceph_filelock(struct file_lock *lock,
cephlock->type = CEPH_LOCK_UNLOCK;
break;
default:
- dout("Have unknown lock type %d\n", lock->fl_type);
+ doutc(cl, "Have unknown lock type %d\n", lock->fl_type);
err = -EINVAL;
}
@@ -432,13 +440,14 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
{
struct file_lock *lock;
struct file_lock_context *ctx = locks_inode_context(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
int l = 0;
- dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
- num_fcntl_locks);
+ doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks,
+ num_fcntl_locks);
if (!ctx)
return 0;
@@ -450,7 +459,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &flocks[l]);
+ err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;
@@ -461,7 +470,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &flocks[l]);
+ err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index de798444bb97..d95eb525519a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -12,6 +12,7 @@
#include <linux/bits.h>
#include <linux/ktime.h>
#include <linux/bitmap.h>
+#include <linux/mnt_idmapping.h>
#include "super.h"
#include "mds_client.h"
@@ -411,6 +412,7 @@ static int parse_reply_info_readdir(void **p, void *end,
u64 features)
{
struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
u32 num, i = 0;
int err;
@@ -433,7 +435,7 @@ static int parse_reply_info_readdir(void **p, void *end,
BUG_ON(!info->dir_entries);
if ((unsigned long)(info->dir_entries + num) >
(unsigned long)info->dir_entries + info->dir_buf_size) {
- pr_err("dir contents are larger than expected\n");
+ pr_err_client(cl, "dir contents are larger than expected\n");
WARN_ON(1);
goto bad;
}
@@ -454,7 +456,7 @@ static int parse_reply_info_readdir(void **p, void *end,
ceph_decode_need(p, end, _name_len, bad);
_name = *p;
*p += _name_len;
- dout("parsed dir dname '%.*s'\n", _name_len, _name);
+ doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name);
if (info->hash_order)
rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
@@ -514,8 +516,8 @@ static int parse_reply_info_readdir(void **p, void *end,
rde->is_nokey = false;
err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
if (err) {
- pr_err("%s unable to decode %.*s, got %d\n", __func__,
- _name_len, _name, err);
+ pr_err_client(cl, "unable to decode %.*s, got %d\n",
+ _name_len, _name, err);
goto out_bad;
}
rde->name = oname.name;
@@ -539,7 +541,7 @@ done:
bad:
err = -EIO;
out_bad:
- pr_err("problem parsing dir contents %d\n", err);
+ pr_err_client(cl, "problem parsing dir contents %d\n", err);
return err;
}
@@ -570,10 +572,11 @@ bad:
static int ceph_parse_deleg_inos(void **p, void *end,
struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
u32 sets;
ceph_decode_32_safe(p, end, sets, bad);
- dout("got %u sets of delegated inodes\n", sets);
+ doutc(cl, "got %u sets of delegated inodes\n", sets);
while (sets--) {
u64 start, len;
@@ -582,8 +585,9 @@ static int ceph_parse_deleg_inos(void **p, void *end,
/* Don't accept a delegation of system inodes */
if (start < CEPH_INO_SYSTEM_BASE) {
- pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
- start, len);
+ pr_warn_ratelimited_client(cl,
+ "ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
+ start, len);
continue;
}
while (len--) {
@@ -591,10 +595,10 @@ static int ceph_parse_deleg_inos(void **p, void *end,
DELEGATED_INO_AVAILABLE,
GFP_KERNEL);
if (!err) {
- dout("added delegated inode 0x%llx\n",
- start - 1);
+ doutc(cl, "added delegated inode 0x%llx\n", start - 1);
} else if (err == -EBUSY) {
- pr_warn("MDS delegated inode 0x%llx more than once.\n",
+ pr_warn_client(cl,
+ "MDS delegated inode 0x%llx more than once.\n",
start - 1);
} else {
return err;
@@ -744,6 +748,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
struct ceph_mds_request *req, u64 features)
{
struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
void *p, *end;
u32 len;
int err;
@@ -783,7 +788,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
bad:
err = -EIO;
out_bad:
- pr_err("mds parse_reply err %d\n", err);
+ pr_err_client(cl, "mds parse_reply err %d\n", err);
ceph_msg_dump(msg);
return err;
}
@@ -830,7 +835,8 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
*/
int ceph_wait_on_conflict_unlink(struct dentry *dentry)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_client *cl = fsc->client;
struct dentry *pdentry = dentry->d_parent;
struct dentry *udentry, *found = NULL;
struct ceph_dentry_info *di;
@@ -855,8 +861,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
goto next;
if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
- pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
- __func__, dentry, dentry);
+ pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n",
+ dentry, dentry);
if (!d_same_name(udentry, pdentry, &dname))
goto next;
@@ -872,8 +878,8 @@ next:
if (likely(!found))
return 0;
- dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__,
- dentry, dentry, found, found);
+ doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry,
+ found, found);
err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
TASK_KILLABLE);
@@ -957,6 +963,7 @@ static int __verify_registered_session(struct ceph_mds_client *mdsc,
static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
int mds)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *s;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
@@ -973,7 +980,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
int newmax = 1 << get_count_order(mds + 1);
struct ceph_mds_session **sa;
- dout("%s: realloc to %d\n", __func__, newmax);
+ doutc(cl, "realloc to %d\n", newmax);
sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
if (!sa)
goto fail_realloc;
@@ -986,7 +993,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
mdsc->max_sessions = newmax;
}
- dout("%s: mds%d\n", __func__, mds);
+ doutc(cl, "mds%d\n", mds);
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
@@ -1029,7 +1036,7 @@ fail_realloc:
static void __unregister_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
- dout("__unregister_session mds%d %p\n", s->s_mds, s);
+ doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s);
BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
ceph_con_close(&s->s_con);
@@ -1116,6 +1123,8 @@ void ceph_mdsc_release_request(struct kref *kref)
kfree(req->r_path1);
kfree(req->r_path2);
put_cred(req->r_cred);
+ if (req->r_mnt_idmap)
+ mnt_idmap_put(req->r_mnt_idmap);
if (req->r_pagelist)
ceph_pagelist_release(req->r_pagelist);
kfree(req->r_fscrypt_auth);
@@ -1155,6 +1164,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
struct inode *dir)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int ret = 0;
req->r_tid = ++mdsc->last_tid;
@@ -1162,18 +1172,20 @@ static void __register_request(struct ceph_mds_client *mdsc,
ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
req->r_num_caps);
if (ret < 0) {
- pr_err("__register_request %p "
- "failed to reserve caps: %d\n", req, ret);
+ pr_err_client(cl, "%p failed to reserve caps: %d\n",
+ req, ret);
/* set req->r_err to fail early from __do_request */
req->r_err = ret;
return;
}
}
- dout("__register_request %p tid %lld\n", req, req->r_tid);
+ doutc(cl, "%p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
insert_request(&mdsc->request_tree, req);
req->r_cred = get_current_cred();
+ if (!req->r_mnt_idmap)
+ req->r_mnt_idmap = &nop_mnt_idmap;
if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
mdsc->oldest_tid = req->r_tid;
@@ -1192,7 +1204,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
static void __unregister_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
- dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+ doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid);
/* Never leave an unregistered request on an unsafe list! */
list_del_init(&req->r_unsafe_item);
@@ -1278,6 +1290,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
int mds = -1;
u32 hash = req->r_direct_hash;
bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
+ struct ceph_client *cl = mdsc->fsc->client;
if (random)
*random = false;
@@ -1289,8 +1302,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
if (req->r_resend_mds >= 0 &&
(__have_session(mdsc, req->r_resend_mds) ||
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
- dout("%s using resend_mds mds%d\n", __func__,
- req->r_resend_mds);
+ doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds);
return req->r_resend_mds;
}
@@ -1307,7 +1319,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_lock();
inode = get_nonsnap_parent(req->r_dentry);
rcu_read_unlock();
- dout("%s using snapdir's parent %p\n", __func__, inode);
+ doutc(cl, "using snapdir's parent %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
}
} else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
@@ -1327,7 +1340,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
/* direct snapped/virtual snapdir requests
* based on parent dir inode */
inode = get_nonsnap_parent(parent);
- dout("%s using nonsnap parent %p\n", __func__, inode);
+ doutc(cl, "using nonsnap parent %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
} else {
/* dentry target */
inode = d_inode(req->r_dentry);
@@ -1343,10 +1357,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_unlock();
}
- dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
- hash, mode);
if (!inode)
goto random;
+
+ doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode,
+ ceph_vinop(inode), (int)is_hash, hash, mode);
ci = ceph_inode(inode);
if (is_hash && S_ISDIR(inode->i_mode)) {
@@ -1362,9 +1377,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
get_random_bytes(&r, 1);
r %= frag.ndist;
mds = frag.dist[r];
- dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
- __func__, inode, ceph_vinop(inode),
- frag.frag, mds, (int)r, frag.ndist);
+ doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n",
+ inode, ceph_vinop(inode), frag.frag,
+ mds, (int)r, frag.ndist);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE &&
!ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
@@ -1377,9 +1392,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
if (frag.mds >= 0) {
/* choose auth mds */
mds = frag.mds;
- dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
- __func__, inode, ceph_vinop(inode),
- frag.frag, mds);
+ doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n",
+ inode, ceph_vinop(inode), frag.frag, mds);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE) {
if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
@@ -1403,9 +1417,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
goto random;
}
mds = cap->session->s_mds;
- dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
- inode, ceph_vinop(inode), mds,
- cap == ci->i_auth_cap ? "auth " : "", cap);
+ doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode,
+ ceph_vinop(inode), mds,
+ cap == ci->i_auth_cap ? "auth " : "", cap);
spin_unlock(&ci->i_ceph_lock);
out:
iput(inode);
@@ -1416,7 +1430,7 @@ random:
*random = true;
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
- dout("%s chose random mds%d\n", __func__, mds);
+ doutc(cl, "chose random mds%d\n", mds);
return mds;
}
@@ -1529,6 +1543,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+ struct ceph_client *cl = mdsc->fsc->client;
size_t size, count;
void *p, *end;
int ret;
@@ -1567,7 +1582,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
if (!msg) {
- pr_err("ENOMEM creating session open msg\n");
+ pr_err_client(cl, "ENOMEM creating session open msg\n");
return ERR_PTR(-ENOMEM);
}
p = msg->front.iov_base;
@@ -1607,14 +1622,14 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
ret = encode_supported_features(&p, end);
if (ret) {
- pr_err("encode_supported_features failed!\n");
+ pr_err_client(cl, "encode_supported_features failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
ret = encode_metric_spec(&p, end);
if (ret) {
- pr_err("encode_metric_spec failed!\n");
+ pr_err_client(cl, "encode_metric_spec failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
@@ -1642,8 +1657,8 @@ static int __open_session(struct ceph_mds_client *mdsc,
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
- dout("open_session to mds%d (%s)\n", mds,
- ceph_mds_state_name(mstate));
+ doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds,
+ ceph_mds_state_name(mstate));
session->s_state = CEPH_MDS_SESSION_OPENING;
session->s_renew_requested = jiffies;
@@ -1686,8 +1701,9 @@ struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
{
struct ceph_mds_session *session;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("open_export_target_session to mds%d\n", target);
+ doutc(cl, "to mds%d\n", target);
mutex_lock(&mdsc->mutex);
session = __open_export_target_session(mdsc, target);
@@ -1702,13 +1718,14 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_info *mi;
struct ceph_mds_session *ts;
int i, mds = session->s_mds;
+ struct ceph_client *cl = mdsc->fsc->client;
if (mds >= mdsc->mdsmap->possible_max_rank)
return;
mi = &mdsc->mdsmap->m_info[mds];
- dout("open_export_target_sessions for mds%d (%d targets)\n",
- session->s_mds, mi->num_export_targets);
+ doutc(cl, "for mds%d (%d targets)\n", session->s_mds,
+ mi->num_export_targets);
for (i = 0; i < mi->num_export_targets; i++) {
ts = __open_export_target_session(mdsc, mi->export_targets[i]);
@@ -1731,11 +1748,13 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
static void detach_cap_releases(struct ceph_mds_session *session,
struct list_head *target)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
+
lockdep_assert_held(&session->s_cap_lock);
list_splice_init(&session->s_cap_releases, target);
session->s_num_cap_releases = 0;
- dout("dispose_cap_releases mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
}
static void dispose_cap_releases(struct ceph_mds_client *mdsc,
@@ -1753,16 +1772,17 @@ static void dispose_cap_releases(struct ceph_mds_client *mdsc,
static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p;
- dout("cleanup_session_requests mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
while (!list_empty(&session->s_unsafe)) {
req = list_first_entry(&session->s_unsafe,
struct ceph_mds_request, r_unsafe_item);
- pr_warn_ratelimited(" dropping unsafe request %llu\n",
- req->r_tid);
+ pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n",
+ req->r_tid);
if (req->r_target_inode)
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
if (req->r_unsafe_dir)
@@ -1791,13 +1811,14 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct list_head *p;
struct ceph_cap *cap;
struct inode *inode, *last_inode = NULL;
struct ceph_cap *old_cap = NULL;
int ret;
- dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
+ doutc(cl, "%p mds%d\n", session, session->s_mds);
spin_lock(&session->s_cap_lock);
p = session->s_caps.next;
while (p != &session->s_caps) {
@@ -1828,8 +1849,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
spin_lock(&session->s_cap_lock);
p = p->next;
if (!cap->ci) {
- dout("iterate_session_caps finishing cap %p removal\n",
- cap);
+ doutc(cl, "finishing cap %p removal\n", cap);
BUG_ON(cap->session != session);
cap->session = NULL;
list_del_init(&cap->session_caps);
@@ -1858,6 +1878,7 @@ out:
static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
bool invalidate = false;
struct ceph_cap *cap;
int iputs = 0;
@@ -1865,8 +1886,8 @@ static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (cap) {
- dout(" removing cap %p, ci is %p, inode is %p\n",
- cap, ci, &ci->netfs.inode);
+ doutc(cl, " removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->netfs.inode);
iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
}
@@ -1890,7 +1911,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
struct super_block *sb = fsc->sb;
LIST_HEAD(dispose);
- dout("remove_session_caps on %p\n", session);
+ doutc(fsc->client, "on %p\n", session);
ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
wake_up_all(&fsc->mdsc->cap_flushing_wq);
@@ -1971,7 +1992,9 @@ static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
{
- dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
+
+ doutc(cl, "session %p mds%d\n", session, session->s_mds);
ceph_iterate_session_caps(session, wake_up_session_cb,
(void *)(unsigned long)ev);
}
@@ -1985,25 +2008,26 @@ static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
static int send_renew_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
int state;
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
- pr_info("mds%d caps stale\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps stale\n", session->s_mds);
session->s_renew_requested = jiffies;
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
if (state < CEPH_MDS_STATE_RECONNECT) {
- dout("send_renew_caps ignoring mds%d (%s)\n",
- session->s_mds, ceph_mds_state_name(state));
+ doutc(cl, "ignoring mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
return 0;
}
- dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
- ceph_mds_state_name(state));
+ doutc(cl, "to mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
if (!msg)
@@ -2015,10 +2039,11 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, u64 seq)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
- dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
- session->s_mds, ceph_session_state_name(session->s_state), seq);
+ doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds,
+ ceph_session_state_name(session->s_state), seq);
msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
if (!msg)
return -ENOMEM;
@@ -2035,6 +2060,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
static void renewed_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, int is_renew)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int was_stale;
int wake = 0;
@@ -2046,15 +2072,17 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
if (was_stale) {
if (time_before(jiffies, session->s_cap_ttl)) {
- pr_info("mds%d caps renewed\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps renewed\n",
+ session->s_mds);
wake = 1;
} else {
- pr_info("mds%d caps still stale\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps still stale\n",
+ session->s_mds);
}
}
- dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
- session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
- time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
+ doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds,
+ session->s_cap_ttl, was_stale ? "stale" : "fresh",
+ time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
spin_unlock(&session->s_cap_lock);
if (wake)
@@ -2066,11 +2094,11 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
*/
static int request_close_session(struct ceph_mds_session *session)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
- dout("request_close_session mds%d state %s seq %lld\n",
- session->s_mds, ceph_session_state_name(session->s_state),
- session->s_seq);
+ doutc(cl, "mds%d state %s seq %lld\n", session->s_mds,
+ ceph_session_state_name(session->s_state), session->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
session->s_seq);
if (!msg)
@@ -2126,6 +2154,8 @@ out:
*/
static int trim_caps_cb(struct inode *inode, int mds, void *arg)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
@@ -2145,9 +2175,10 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
wanted = __ceph_caps_file_wanted(ci);
oissued = __ceph_caps_issued_other(ci, cap);
- dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
- inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
- ceph_cap_string(used), ceph_cap_string(wanted));
+ doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n",
+ inode, ceph_vinop(inode), cap, ceph_cap_string(mine),
+ ceph_cap_string(oissued), ceph_cap_string(used),
+ ceph_cap_string(wanted));
if (cap == ci->i_auth_cap) {
if (ci->i_dirty_caps || ci->i_flushing_caps ||
!list_empty(&ci->i_cap_snaps))
@@ -2173,7 +2204,7 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
if (oissued) {
/* we aren't the only cap.. just remove us */
- ceph_remove_cap(cap, true);
+ ceph_remove_cap(mdsc, cap, true);
(*remaining)--;
} else {
struct dentry *dentry;
@@ -2187,8 +2218,8 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
count = atomic_read(&inode->i_count);
if (count == 1)
(*remaining)--;
- dout("trim_caps_cb %p cap %p pruned, count now %d\n",
- inode, cap, count);
+ doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
+ inode, ceph_vinop(inode), cap, count);
} else {
dput(dentry);
}
@@ -2207,17 +2238,18 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
int max_caps)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int trim_caps = session->s_nr_caps - max_caps;
- dout("trim_caps mds%d start: %d / %d, trim %d\n",
- session->s_mds, session->s_nr_caps, max_caps, trim_caps);
+ doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds,
+ session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
int remaining = trim_caps;
ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
- dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
- session->s_mds, session->s_nr_caps, max_caps,
- trim_caps - remaining);
+ doutc(cl, "mds%d done: %d / %d, trimmed %d\n",
+ session->s_mds, session->s_nr_caps, max_caps,
+ trim_caps - remaining);
}
ceph_flush_cap_releases(mdsc, session);
@@ -2227,6 +2259,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
static int check_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int ret = 1;
spin_lock(&mdsc->cap_dirty_lock);
@@ -2235,8 +2268,8 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
list_first_entry(&mdsc->cap_flush_list,
struct ceph_cap_flush, g_list);
if (cf->tid <= want_flush_tid) {
- dout("check_caps_flush still flushing tid "
- "%llu <= %llu\n", cf->tid, want_flush_tid);
+ doutc(cl, "still flushing tid %llu <= %llu\n",
+ cf->tid, want_flush_tid);
ret = 0;
}
}
@@ -2252,12 +2285,14 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
static void wait_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
- dout("check_caps_flush want %llu\n", want_flush_tid);
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "want %llu\n", want_flush_tid);
wait_event(mdsc->cap_flushing_wq,
check_caps_flush(mdsc, want_flush_tid));
- dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
+ doutc(cl, "ok, flushed thru %llu\n", want_flush_tid);
}
/*
@@ -2266,6 +2301,7 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc,
static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg = NULL;
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
@@ -2324,7 +2360,7 @@ again:
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+ doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
msg = NULL;
}
@@ -2344,13 +2380,13 @@ again:
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+ doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
}
return;
out_err:
- pr_err("send_cap_releases mds%d, failed to allocate message\n",
- session->s_mds);
+ pr_err_client(cl, "mds%d, failed to allocate message\n",
+ session->s_mds);
spin_lock(&session->s_cap_lock);
list_splice(&tmp_list, &session->s_cap_releases);
session->s_num_cap_releases += num_cap_releases;
@@ -2373,16 +2409,17 @@ static void ceph_cap_release_work(struct work_struct *work)
void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
ceph_get_mds_session(session);
if (queue_work(mdsc->fsc->cap_wq,
&session->s_cap_release_work)) {
- dout("cap release work queued\n");
+ doutc(cl, "cap release work queued\n");
} else {
ceph_put_mds_session(session);
- dout("failed to queue cap release work\n");
+ doutc(cl, "failed to queue cap release work\n");
}
}
@@ -2410,13 +2447,14 @@ static void ceph_cap_reclaim_work(struct work_struct *work)
void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
- dout("caps reclaim work queued\n");
+ doutc(cl, "caps reclaim work queued\n");
} else {
- dout("failed to queue caps release work\n");
+ doutc(cl, "failed to queue caps release work\n");
}
}
@@ -2588,6 +2626,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
/**
* ceph_mdsc_build_path - build a path string to a given dentry
+ * @mdsc: mds client
* @dentry: dentry to which path should be built
* @plen: returned length of string
* @pbase: returned base inode number
@@ -2607,9 +2646,10 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
- int for_wire)
+char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ int *plen, u64 *pbase, int for_wire)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct dentry *cur;
struct inode *inode;
char *path;
@@ -2635,8 +2675,7 @@ retry:
spin_lock(&cur->d_lock);
inode = d_inode(cur);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("build_path path+%d: %p SNAPDIR\n",
- pos, cur);
+ doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
spin_unlock(&cur->d_lock);
parent = dget_parent(cur);
} else if (for_wire && inode && dentry != cur &&
@@ -2714,21 +2753,21 @@ retry:
* A rename didn't occur, but somehow we didn't end up where
* we thought we would. Throw a warning and try again.
*/
- pr_warn("build_path did not end path lookup where expected (pos = %d)\n",
- pos);
+ pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
+ pos);
goto retry;
}
*pbase = base;
*plen = PATH_MAX - 1 - pos;
- dout("build_path on %p %d built %llx '%.*s'\n",
- dentry, d_count(dentry), base, *plen, path + pos);
+ doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
+ base, *plen, path + pos);
return path + pos;
}
-static int build_dentry_path(struct dentry *dentry, struct inode *dir,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath, bool parent_locked)
+static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ struct inode *dir, const char **ppath, int *ppathlen,
+ u64 *pino, bool *pfreepath, bool parent_locked)
{
char *path;
@@ -2744,7 +2783,7 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
return 0;
}
rcu_read_unlock();
- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
@@ -2756,6 +2795,7 @@ static int build_inode_path(struct inode *inode,
const char **ppath, int *ppathlen, u64 *pino,
bool *pfreepath)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
@@ -2765,7 +2805,7 @@ static int build_inode_path(struct inode *inode,
return 0;
}
dentry = d_find_alias(inode);
- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -2778,27 +2818,28 @@ static int build_inode_path(struct inode *inode,
* request arguments may be specified via an inode *, a dentry *, or
* an explicit ino+path.
*/
-static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
- struct inode *rdiri, const char *rpath,
- u64 rino, const char **ppath, int *pathlen,
- u64 *ino, bool *freepath, bool parent_locked)
+static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
+ struct dentry *rdentry, struct inode *rdiri,
+ const char *rpath, u64 rino, const char **ppath,
+ int *pathlen, u64 *ino, bool *freepath,
+ bool parent_locked)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int r = 0;
if (rinode) {
r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
- dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
- ceph_snap(rinode));
+ doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ ceph_snap(rinode));
} else if (rdentry) {
- r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
+ r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
freepath, parent_locked);
- dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
- *ppath);
+ doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
} else if (rpath || rino) {
*ino = rino;
*ppath = rpath;
*pathlen = rpath ? strlen(rpath) : 0;
- dout(" path %.*s\n", *pathlen, rpath);
+ doutc(cl, " path %.*s\n", *pathlen, rpath);
}
return r;
@@ -2840,6 +2881,17 @@ static void encode_mclientrequest_tail(void **p,
}
}
+static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
+{
+ if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features))
+ return 1;
+
+ if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features))
+ return 2;
+
+ return CEPH_MDS_REQUEST_HEAD_VERSION;
+}
+
static struct ceph_mds_request_head_legacy *
find_legacy_request_head(void *p, u64 features)
{
@@ -2861,6 +2913,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_request_head_legacy *lhead;
const char *path1 = NULL;
@@ -2874,10 +2927,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
- bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
- &session->s_features);
+ u16 request_head_version = mds_supported_head_version(session);
+ kuid_t caller_fsuid = req->r_cred->fsuid;
+ kgid_t caller_fsgid = req->r_cred->fsgid;
- ret = set_request_path_attr(req->r_inode, req->r_dentry,
+ ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
&path1, &pathlen1, &ino1, &freepath1,
test_bit(CEPH_MDS_R_PARENT_LOCKED,
@@ -2891,7 +2945,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
old_dentry = req->r_old_dentry;
- ret = set_request_path_attr(NULL, old_dentry,
+ ret = set_request_path_attr(mdsc, NULL, old_dentry,
req->r_old_dentry_dir,
req->r_path2, req->r_ino2.ino,
&path2, &pathlen2, &ino2, &freepath2, true);
@@ -2916,8 +2970,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
*/
if (legacy)
len = sizeof(struct ceph_mds_request_head_legacy);
- else if (old_version)
+ else if (request_head_version == 1)
len = sizeof(struct ceph_mds_request_head_old);
+ else if (request_head_version == 2)
+ len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
else
len = sizeof(struct ceph_mds_request_head);
@@ -2967,6 +3023,30 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
lhead = find_legacy_request_head(msg->front.iov_base,
session->s_con.peer_features);
+ if ((req->r_mnt_idmap != &nop_mnt_idmap) &&
+ !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) {
+ WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op));
+
+ if (enable_unsafe_idmap) {
+ pr_warn_once_client(cl,
+ "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+ " is not supported by MDS. UID/GID-based restrictions may"
+ " not work properly.\n");
+
+ caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+ VFSUIDT_INIT(req->r_cred->fsuid));
+ caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+ VFSGIDT_INIT(req->r_cred->fsgid));
+ } else {
+ pr_err_ratelimited_client(cl,
+ "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+ " is not supported by MDS. Fail request with -EIO.\n");
+
+ ret = -EIO;
+ goto out_err;
+ }
+ }
+
/*
* The ceph_mds_request_head_legacy didn't contain a version field, and
* one was added when we moved the message version from 3->4.
@@ -2974,17 +3054,40 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
if (legacy) {
msg->hdr.version = cpu_to_le16(3);
p = msg->front.iov_base + sizeof(*lhead);
- } else if (old_version) {
+ } else if (request_head_version == 1) {
struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
ohead->version = cpu_to_le16(1);
p = msg->front.iov_base + sizeof(*ohead);
+ } else if (request_head_version == 2) {
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
+
+ msg->hdr.version = cpu_to_le16(6);
+ nhead->version = cpu_to_le16(2);
+
+ p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd);
} else {
struct ceph_mds_request_head *nhead = msg->front.iov_base;
+ kuid_t owner_fsuid;
+ kgid_t owner_fsgid;
msg->hdr.version = cpu_to_le16(6);
nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
+ nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head));
+
+ if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) {
+ owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+ VFSUIDT_INIT(req->r_cred->fsuid));
+ owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+ VFSGIDT_INIT(req->r_cred->fsgid));
+ nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid));
+ nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid));
+ } else {
+ nhead->owner_uid = cpu_to_le32(-1);
+ nhead->owner_gid = cpu_to_le32(-1);
+ }
+
p = msg->front.iov_base + sizeof(*nhead);
}
@@ -2993,9 +3096,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
lhead->op = cpu_to_le32(req->r_op);
lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
- req->r_cred->fsuid));
+ caller_fsuid));
lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
- req->r_cred->fsgid));
+ caller_fsgid));
lhead->ino = cpu_to_le64(req->r_deleg_ino);
lhead->args = req->r_args;
@@ -3099,6 +3202,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request_head_legacy *lhead;
struct ceph_mds_request_head *nhead;
struct ceph_msg *msg;
@@ -3117,8 +3221,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
if ((old_version && req->r_attempts >= old_max_retry) ||
((uint32_t)req->r_attempts >= U32_MAX)) {
- pr_warn_ratelimited("%s request tid %llu seq overflow\n",
- __func__, req->r_tid);
+ pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n",
+ req->r_tid);
return -EMULTIHOP;
}
}
@@ -3133,8 +3237,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
else
req->r_sent_on_mseq = -1;
}
- dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
- req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
+ doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid,
+ ceph_mds_op_name(req->r_op), req->r_attempts);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
void *p;
@@ -3202,7 +3306,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
}
- dout(" r_parent = %p\n", req->r_parent);
+ doutc(cl, " r_parent = %p\n", req->r_parent);
return 0;
}
@@ -3230,6 +3334,7 @@ static int __send_request(struct ceph_mds_session *session,
static void __do_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session = NULL;
int mds = -1;
int err = 0;
@@ -3242,29 +3347,29 @@ static void __do_request(struct ceph_mds_client *mdsc,
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
- dout("do_request metadata corrupted\n");
+ doutc(cl, "metadata corrupted\n");
err = -EIO;
goto finish;
}
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
- dout("do_request timed out\n");
+ doutc(cl, "timed out\n");
err = -ETIMEDOUT;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
- dout("do_request forced umount\n");
+ doutc(cl, "forced umount\n");
err = -EIO;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
if (mdsc->mdsmap_err) {
err = mdsc->mdsmap_err;
- dout("do_request mdsmap err %d\n", err);
+ doutc(cl, "mdsmap err %d\n", err);
goto finish;
}
if (mdsc->mdsmap->m_epoch == 0) {
- dout("do_request no mdsmap, waiting for map\n");
+ doutc(cl, "no mdsmap, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3285,7 +3390,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
err = -EJUKEBOX;
goto finish;
}
- dout("do_request no mds or not active, waiting for map\n");
+ doutc(cl, "no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3301,8 +3406,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
}
req->r_session = ceph_get_mds_session(session);
- dout("do_request mds%d session %p state %s\n", mds, session,
- ceph_session_state_name(session->s_state));
+ doutc(cl, "mds%d session %p state %s\n", mds, session,
+ ceph_session_state_name(session->s_state));
/*
* The old ceph will crash the MDSs when see unknown OPs
@@ -3393,8 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
- dout("do_request session changed for auth cap %d -> %d\n",
- cap->session->s_mds, session->s_mds);
+ doutc(cl, "session changed for auth cap %d -> %d\n",
+ cap->session->s_mds, session->s_mds);
/* Remove the auth cap from old session */
spin_lock(&cap->session->s_cap_lock);
@@ -3421,7 +3526,7 @@ out_session:
ceph_put_mds_session(session);
finish:
if (err) {
- dout("__do_request early error %d\n", err);
+ doutc(cl, "early error %d\n", err);
req->r_err = err;
complete_request(mdsc, req);
__unregister_request(mdsc, req);
@@ -3435,6 +3540,7 @@ finish:
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
LIST_HEAD(tmp_list);
@@ -3444,7 +3550,8 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
req = list_entry(tmp_list.next,
struct ceph_mds_request, r_wait);
list_del_init(&req->r_wait);
- dout(" wake request %p tid %llu\n", req, req->r_tid);
+ doutc(cl, " wake request %p tid %llu\n", req,
+ req->r_tid);
__do_request(mdsc, req);
}
}
@@ -3455,10 +3562,11 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
*/
static void kick_requests(struct ceph_mds_client *mdsc, int mds)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p = rb_first(&mdsc->request_tree);
- dout("kick_requests mds%d\n", mds);
+ doutc(cl, "kick_requests mds%d\n", mds);
while (p) {
req = rb_entry(p, struct ceph_mds_request, r_node);
p = rb_next(p);
@@ -3468,7 +3576,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
continue; /* only new requests */
if (req->r_session &&
req->r_session->s_mds == mds) {
- dout(" kicking tid %llu\n", req->r_tid);
+ doutc(cl, " kicking tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__do_request(mdsc, req);
}
@@ -3478,6 +3586,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
@@ -3499,8 +3608,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
if (req->r_inode) {
err = ceph_wait_on_async_create(req->r_inode);
if (err) {
- dout("%s: wait for async create returned: %d\n",
- __func__, err);
+ doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
@@ -3508,13 +3616,12 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
if (!err && req->r_old_inode) {
err = ceph_wait_on_async_create(req->r_old_inode);
if (err) {
- dout("%s: wait for async create returned: %d\n",
- __func__, err);
+ doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
- dout("submit_request on %p for inode %p\n", req, dir);
+ doutc(cl, "submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
__do_request(mdsc, req);
@@ -3527,10 +3634,11 @@ int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
ceph_mds_request_wait_callback_t wait_func)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err;
/* wait */
- dout("do_request waiting\n");
+ doutc(cl, "do_request waiting\n");
if (wait_func) {
err = wait_func(mdsc, req);
} else {
@@ -3544,14 +3652,14 @@ int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
else
err = timeleft; /* killed */
}
- dout("do_request waited, got %d\n", err);
+ doutc(cl, "do_request waited, got %d\n", err);
mutex_lock(&mdsc->mutex);
/* only abort if we didn't race with a real reply */
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
err = le32_to_cpu(req->r_reply_info.head->result);
} else if (err < 0) {
- dout("aborted request %lld with %d\n", req->r_tid, err);
+ doutc(cl, "aborted request %lld with %d\n", req->r_tid, err);
/*
* ensure we aren't running concurrently with
@@ -3582,15 +3690,16 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err;
- dout("do_request on %p\n", req);
+ doutc(cl, "do_request on %p\n", req);
/* issue */
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err)
err = ceph_mdsc_wait_request(mdsc, req, NULL);
- dout("do_request %p done, result %d\n", req, err);
+ doutc(cl, "do_request %p done, result %d\n", req, err);
return err;
}
@@ -3602,8 +3711,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
{
struct inode *dir = req->r_parent;
struct inode *old_dir = req->r_old_dentry_dir;
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
- dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
+ doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n",
+ dir, old_dir);
ceph_dir_clear_complete(dir);
if (old_dir)
@@ -3624,6 +3735,7 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_mds_reply_head *head = msg->front.iov_base;
struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
@@ -3634,7 +3746,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) {
- pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+ pr_err_client(cl, "got corrupt (short) reply\n");
ceph_msg_dump(msg);
return;
}
@@ -3644,17 +3756,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
- dout("handle_reply on unknown tid %llu\n", tid);
+ doutc(cl, "on unknown tid %llu\n", tid);
mutex_unlock(&mdsc->mutex);
return;
}
- dout("handle_reply %p\n", req);
+ doutc(cl, "handle_reply %p\n", req);
/* correct session? */
if (req->r_session != session) {
- pr_err("mdsc_handle_reply got %llu on session mds%d"
- " not mds%d\n", tid, session->s_mds,
- req->r_session ? req->r_session->s_mds : -1);
+ pr_err_client(cl, "got %llu on session mds%d not mds%d\n",
+ tid, session->s_mds,
+ req->r_session ? req->r_session->s_mds : -1);
mutex_unlock(&mdsc->mutex);
goto out;
}
@@ -3662,14 +3774,14 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
/* dup? */
if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
(test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
- pr_warn("got a dup %s reply on %llu from mds%d\n",
- head->safe ? "safe" : "unsafe", tid, mds);
+ pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n",
+ head->safe ? "safe" : "unsafe", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
- pr_warn("got unsafe after safe on %llu from mds%d\n",
- tid, mds);
+ pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n",
+ tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
@@ -3692,7 +3804,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
* response. And even if it did, there is nothing
* useful we could do with a revised return value.
*/
- dout("got safe reply %llu, mds%d\n", tid, mds);
+ doutc(cl, "got safe reply %llu, mds%d\n", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
@@ -3702,7 +3814,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
}
- dout("handle_reply tid %lld result %d\n", tid, result);
+ doutc(cl, "tid %lld result %d\n", tid, result);
if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
err = parse_reply_info(session, msg, req, (u64)-1);
else
@@ -3742,7 +3854,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&session->s_mutex);
if (err < 0) {
- pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
+ pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n",
+ mds, tid);
ceph_msg_dump(msg);
goto out_err;
}
@@ -3806,7 +3919,7 @@ out_err:
set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
}
} else {
- dout("reply arrived after request %lld was aborted\n", tid);
+ doutc(cl, "reply arrived after request %lld was aborted\n", tid);
}
mutex_unlock(&mdsc->mutex);
@@ -3835,6 +3948,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
u64 tid = le64_to_cpu(msg->hdr.tid);
u32 next_mds;
@@ -3852,12 +3966,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
req = lookup_get_request(mdsc, tid);
if (!req) {
mutex_unlock(&mdsc->mutex);
- dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
+ doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds);
return; /* dup reply? */
}
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
- dout("forward tid %llu aborted, unregistering\n", tid);
+ doutc(cl, "forward tid %llu aborted, unregistering\n", tid);
__unregister_request(mdsc, req);
} else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
/*
@@ -3873,10 +3987,11 @@ static void handle_forward(struct ceph_mds_client *mdsc,
set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
mutex_unlock(&req->r_fill_mutex);
aborted = true;
- pr_warn_ratelimited("forward tid %llu seq overflow\n", tid);
+ pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n",
+ tid);
} else {
/* resend. forward race not possible; mds would drop */
- dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
+ doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds);
BUG_ON(req->r_err);
BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
req->r_attempts = 0;
@@ -3894,7 +4009,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
return;
bad:
- pr_err("mdsc_handle_forward decode error err=%d\n", err);
+ pr_err_client(cl, "decode error err=%d\n", err);
ceph_msg_dump(msg);
}
@@ -3933,6 +4048,7 @@ static void handle_session(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int mds = session->s_mds;
int msg_version = le16_to_cpu(msg->hdr.version);
void *p = msg->front.iov_base;
@@ -3980,7 +4096,8 @@ static void handle_session(struct ceph_mds_session *session,
/* version >= 5, flags */
ceph_decode_32_safe(&p, end, flags, bad);
if (flags & CEPH_SESSION_BLOCKLISTED) {
- pr_warn("mds%d session blocklisted\n", session->s_mds);
+ pr_warn_client(cl, "mds%d session blocklisted\n",
+ session->s_mds);
blocklisted = true;
}
}
@@ -3996,22 +4113,24 @@ static void handle_session(struct ceph_mds_session *session,
mutex_lock(&session->s_mutex);
- dout("handle_session mds%d %s %p state %s seq %llu\n",
- mds, ceph_session_op_name(op), session,
- ceph_session_state_name(session->s_state), seq);
+ doutc(cl, "mds%d %s %p state %s seq %llu\n", mds,
+ ceph_session_op_name(op), session,
+ ceph_session_state_name(session->s_state), seq);
if (session->s_state == CEPH_MDS_SESSION_HUNG) {
session->s_state = CEPH_MDS_SESSION_OPEN;
- pr_info("mds%d came back\n", session->s_mds);
+ pr_info_client(cl, "mds%d came back\n", session->s_mds);
}
switch (op) {
case CEPH_SESSION_OPEN:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
- pr_info("mds%d reconnect success\n", session->s_mds);
+ pr_info_client(cl, "mds%d reconnect success\n",
+ session->s_mds);
if (session->s_state == CEPH_MDS_SESSION_OPEN) {
- pr_notice("mds%d is already opened\n", session->s_mds);
+ pr_notice_client(cl, "mds%d is already opened\n",
+ session->s_mds);
} else {
session->s_state = CEPH_MDS_SESSION_OPEN;
session->s_features = features;
@@ -4041,7 +4160,8 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_CLOSE:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
- pr_info("mds%d reconnect denied\n", session->s_mds);
+ pr_info_client(cl, "mds%d reconnect denied\n",
+ session->s_mds);
session->s_state = CEPH_MDS_SESSION_CLOSED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
@@ -4050,8 +4170,8 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_STALE:
- pr_info("mds%d caps went stale, renewing\n",
- session->s_mds);
+ pr_info_client(cl, "mds%d caps went stale, renewing\n",
+ session->s_mds);
atomic_inc(&session->s_cap_gen);
session->s_cap_ttl = jiffies - 1;
send_renew_caps(mdsc, session);
@@ -4072,7 +4192,7 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_FORCE_RO:
- dout("force_session_readonly %p\n", session);
+ doutc(cl, "force_session_readonly %p\n", session);
spin_lock(&session->s_cap_lock);
session->s_readonly = true;
spin_unlock(&session->s_cap_lock);
@@ -4081,7 +4201,8 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_REJECT:
WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
- pr_info("mds%d rejected session\n", session->s_mds);
+ pr_info_client(cl, "mds%d rejected session\n",
+ session->s_mds);
session->s_state = CEPH_MDS_SESSION_REJECTED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
@@ -4091,7 +4212,7 @@ static void handle_session(struct ceph_mds_session *session,
break;
default:
- pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
+ pr_err_client(cl, "bad op %d mds%d\n", op, mds);
WARN_ON(1);
}
@@ -4108,30 +4229,32 @@ static void handle_session(struct ceph_mds_session *session,
return;
bad:
- pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
- (int)msg->front.iov_len);
+ pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
+ (int)msg->front.iov_len);
ceph_msg_dump(msg);
return;
}
void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
- dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
}
}
void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
- dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
dcaps);
}
@@ -4146,7 +4269,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req, *nreq;
struct rb_node *p;
- dout("replay_unsafe_requests mds%d\n", session->s_mds);
+ doutc(mdsc->fsc->client, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
@@ -4290,6 +4413,8 @@ out_unlock:
*/
static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
union {
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
@@ -4307,7 +4432,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
- path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
+ path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
@@ -4326,9 +4451,9 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
err = 0;
goto out_err;
}
- dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
- inode, ceph_vinop(inode), cap, cap->cap_id,
- ceph_cap_string(cap->issued));
+ doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode,
+ ceph_vinop(inode), cap, cap->cap_id,
+ ceph_cap_string(cap->issued));
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
@@ -4482,6 +4607,7 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
{
struct rb_node *p;
struct ceph_pagelist *pagelist = recon_state->pagelist;
+ struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
if (recon_state->msg_version >= 4) {
@@ -4520,8 +4646,8 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
}
- dout(" adding snap realm %llx seq %lld parent %llx\n",
- realm->ino, realm->seq, realm->parent_ino);
+ doutc(cl, " adding snap realm %llx seq %lld parent %llx\n",
+ realm->ino, realm->seq, realm->parent_ino);
sr_rec.ino = cpu_to_le64(realm->ino);
sr_rec.seq = cpu_to_le64(realm->seq);
sr_rec.parent = cpu_to_le64(realm->parent_ino);
@@ -4550,6 +4676,7 @@ fail:
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *reply;
int mds = session->s_mds;
int err = -ENOMEM;
@@ -4558,7 +4685,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
};
LIST_HEAD(dispose);
- pr_info("mds%d reconnect start\n", mds);
+ pr_info_client(cl, "mds%d reconnect start\n", mds);
recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!recon_state.pagelist)
@@ -4574,8 +4701,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
session->s_seq = 0;
- dout("session %p state %s\n", session,
- ceph_session_state_name(session->s_state));
+ doutc(cl, "session %p state %s\n", session,
+ ceph_session_state_name(session->s_state));
atomic_inc(&session->s_cap_gen);
@@ -4709,7 +4836,8 @@ fail:
fail_nomsg:
ceph_pagelist_release(recon_state.pagelist);
fail_nopagelist:
- pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+ pr_err_client(cl, "error %d preparing reconnect for mds%d\n",
+ err, mds);
return;
}
@@ -4728,9 +4856,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
int oldstate, newstate;
struct ceph_mds_session *s;
unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("check_new_map new %u old %u\n",
- newmap->m_epoch, oldmap->m_epoch);
+ doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch);
if (newmap->m_info) {
for (i = 0; i < newmap->possible_max_rank; i++) {
@@ -4746,12 +4874,12 @@ static void check_new_map(struct ceph_mds_client *mdsc,
oldstate = ceph_mdsmap_get_state(oldmap, i);
newstate = ceph_mdsmap_get_state(newmap, i);
- dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
- i, ceph_mds_state_name(oldstate),
- ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
- ceph_mds_state_name(newstate),
- ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
- ceph_session_state_name(s->s_state));
+ doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n",
+ i, ceph_mds_state_name(oldstate),
+ ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
+ ceph_mds_state_name(newstate),
+ ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
+ ceph_session_state_name(s->s_state));
if (i >= newmap->possible_max_rank) {
/* force close session for stopped mds */
@@ -4804,7 +4932,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
newstate >= CEPH_MDS_STATE_ACTIVE) {
if (oldstate != CEPH_MDS_STATE_CREATING &&
oldstate != CEPH_MDS_STATE_STARTING)
- pr_info("mds%d recovery completed\n", s->s_mds);
+ pr_info_client(cl, "mds%d recovery completed\n",
+ s->s_mds);
kick_requests(mdsc, i);
mutex_unlock(&mdsc->mutex);
mutex_lock(&s->s_mutex);
@@ -4848,12 +4977,13 @@ static void check_new_map(struct ceph_mds_client *mdsc,
s = __open_export_target_session(mdsc, i);
if (IS_ERR(s)) {
err = PTR_ERR(s);
- pr_err("failed to open export target session, err %d\n",
- err);
+ pr_err_client(cl,
+ "failed to open export target session, err %d\n",
+ err);
continue;
}
}
- dout("send reconnect to export target mds.%d\n", i);
+ doutc(cl, "send reconnect to export target mds.%d\n", i);
mutex_unlock(&mdsc->mutex);
send_mds_reconnect(mdsc, s);
ceph_put_mds_session(s);
@@ -4869,8 +4999,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
s->s_state == CEPH_MDS_SESSION_HUNG ||
s->s_state == CEPH_MDS_SESSION_CLOSING) {
- dout(" connecting to export targets of laggy mds%d\n",
- i);
+ doutc(cl, " connecting to export targets of laggy mds%d\n", i);
__open_export_target_sessions(mdsc, s);
}
}
@@ -4897,6 +5026,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
struct dentry *parent, *dentry;
@@ -4908,7 +5038,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
struct qstr dname;
int release = 0;
- dout("handle_lease from mds%d\n", mds);
+ doutc(cl, "from mds%d\n", mds);
if (!ceph_inc_mds_stopping_blocker(mdsc, session))
return;
@@ -4926,20 +5056,19 @@ static void handle_lease(struct ceph_mds_client *mdsc,
/* lookup inode */
inode = ceph_find_inode(sb, vino);
- dout("handle_lease %s, ino %llx %p %.*s\n",
- ceph_lease_op_name(h->action), vino.ino, inode,
- dname.len, dname.name);
+ doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action),
+ vino.ino, inode, dname.len, dname.name);
mutex_lock(&session->s_mutex);
if (!inode) {
- dout("handle_lease no inode %llx\n", vino.ino);
+ doutc(cl, "no inode %llx\n", vino.ino);
goto release;
}
/* dentry */
parent = d_find_alias(inode);
if (!parent) {
- dout("no parent dentry on inode %p\n", inode);
+ doutc(cl, "no parent dentry on inode %p\n", inode);
WARN_ON(1);
goto release; /* hrm... */
}
@@ -4999,7 +5128,7 @@ out:
bad:
ceph_dec_mds_stopping_blocker(mdsc);
- pr_err("corrupt lease message\n");
+ pr_err_client(cl, "corrupt lease message\n");
ceph_msg_dump(msg);
}
@@ -5007,13 +5136,14 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
u32 seq)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_lease *lease;
struct inode *dir;
int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
- dout("lease_send_msg identry %p %s to mds%d\n",
- dentry, ceph_lease_op_name(action), session->s_mds);
+ doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action),
+ session->s_mds);
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
if (!msg)
@@ -5046,6 +5176,7 @@ static void lock_unlock_session(struct ceph_mds_session *s)
static void maybe_recover_session(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_fs_client *fsc = mdsc->fsc;
if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
@@ -5057,17 +5188,19 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
if (!READ_ONCE(fsc->blocklisted))
return;
- pr_info("auto reconnect after blocklisted\n");
+ pr_info_client(cl, "auto reconnect after blocklisted\n");
ceph_force_reconnect(fsc->sb);
}
bool check_session_state(struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
+
switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
s->s_state = CEPH_MDS_SESSION_HUNG;
- pr_info("mds%d hung\n", s->s_mds);
+ pr_info_client(cl, "mds%d hung\n", s->s_mds);
}
break;
case CEPH_MDS_SESSION_CLOSING:
@@ -5087,6 +5220,8 @@ bool check_session_state(struct ceph_mds_session *s)
*/
void inc_session_sequence(struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
+
lockdep_assert_held(&s->s_mutex);
s->s_seq++;
@@ -5094,11 +5229,11 @@ void inc_session_sequence(struct ceph_mds_session *s)
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
int ret;
- dout("resending session close request for mds%d\n", s->s_mds);
+ doutc(cl, "resending session close request for mds%d\n", s->s_mds);
ret = request_close_session(s);
if (ret < 0)
- pr_err("unable to close session to mds%d: %d\n",
- s->s_mds, ret);
+ pr_err_client(cl, "unable to close session to mds%d: %d\n",
+ s->s_mds, ret);
}
}
@@ -5127,7 +5262,7 @@ static void delayed_work(struct work_struct *work)
int renew_caps;
int i;
- dout("mdsc delayed_work\n");
+ doutc(mdsc->fsc->client, "mdsc delayed_work\n");
if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
return;
@@ -5256,6 +5391,7 @@ err_mdsc:
*/
static void wait_requests(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_options *opts = mdsc->fsc->client->options;
struct ceph_mds_request *req;
@@ -5263,25 +5399,25 @@ static void wait_requests(struct ceph_mds_client *mdsc)
if (__get_oldest_req(mdsc)) {
mutex_unlock(&mdsc->mutex);
- dout("wait_requests waiting for requests\n");
+ doutc(cl, "waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
ceph_timeout_jiffies(opts->mount_timeout));
/* tear down remaining requests */
mutex_lock(&mdsc->mutex);
while ((req = __get_oldest_req(mdsc))) {
- dout("wait_requests timed out on tid %llu\n",
- req->r_tid);
+ doutc(cl, "timed out on tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__unregister_request(mdsc, req);
}
}
mutex_unlock(&mdsc->mutex);
- dout("wait_requests done\n");
+ doutc(cl, "done\n");
}
void send_flush_mdlog(struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
struct ceph_msg *msg;
/*
@@ -5291,13 +5427,13 @@ void send_flush_mdlog(struct ceph_mds_session *s)
return;
mutex_lock(&s->s_mutex);
- dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
- ceph_session_state_name(s->s_state), s->s_seq);
+ doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n",
+ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
s->s_seq);
if (!msg) {
- pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
- s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
+ pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n",
+ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
} else {
ceph_con_send(&s->s_con, msg);
}
@@ -5310,7 +5446,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
*/
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
- dout("pre_umount\n");
+ doutc(mdsc->fsc->client, "begin\n");
mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
@@ -5325,6 +5461,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
ceph_msgr_flush();
ceph_cleanup_quotarealms_inodes(mdsc);
+ doutc(mdsc->fsc->client, "done\n");
}
/*
@@ -5333,12 +5470,13 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
u64 want_tid)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req = NULL, *nextreq;
struct ceph_mds_session *last_session = NULL;
struct rb_node *n;
mutex_lock(&mdsc->mutex);
- dout("%s want %lld\n", __func__, want_tid);
+ doutc(cl, "want %lld\n", want_tid);
restart:
req = __get_oldest_req(mdsc);
while (req && req->r_tid <= want_tid) {
@@ -5372,8 +5510,8 @@ restart:
} else {
ceph_put_mds_session(s);
}
- dout("%s wait on %llu (want %llu)\n", __func__,
- req->r_tid, want_tid);
+ doutc(cl, "wait on %llu (want %llu)\n",
+ req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&mdsc->mutex);
@@ -5391,17 +5529,18 @@ restart:
}
mutex_unlock(&mdsc->mutex);
ceph_put_mds_session(last_session);
- dout("%s done\n", __func__);
+ doutc(cl, "done\n");
}
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u64 want_tid, want_flush;
if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
return;
- dout("sync\n");
+ doutc(cl, "sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
mutex_unlock(&mdsc->mutex);
@@ -5417,8 +5556,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
}
spin_unlock(&mdsc->cap_dirty_lock);
- dout("sync want tid %lld flush_seq %lld\n",
- want_tid, want_flush);
+ doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
wait_caps_flush(mdsc, want_flush);
@@ -5440,11 +5578,12 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{
struct ceph_options *opts = mdsc->fsc->client->options;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session;
int i;
int skipped = 0;
- dout("close_sessions\n");
+ doutc(cl, "begin\n");
/* close sessions */
mutex_lock(&mdsc->mutex);
@@ -5462,7 +5601,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
}
mutex_unlock(&mdsc->mutex);
- dout("waiting for sessions to close\n");
+ doutc(cl, "waiting for sessions to close\n");
wait_event_timeout(mdsc->session_close_wq,
done_closing_sessions(mdsc, skipped),
ceph_timeout_jiffies(opts->mount_timeout));
@@ -5490,7 +5629,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
cancel_work_sync(&mdsc->cap_reclaim_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
- dout("stopped\n");
+ doutc(cl, "done\n");
}
void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
@@ -5498,7 +5637,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
struct ceph_mds_session *session;
int mds;
- dout("force umount\n");
+ doutc(mdsc->fsc->client, "force umount\n");
mutex_lock(&mdsc->mutex);
for (mds = 0; mds < mdsc->max_sessions; mds++) {
@@ -5529,7 +5668,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
- dout("stop\n");
+ doutc(mdsc->fsc->client, "stop\n");
/*
* Make sure the delayed work stopped before releasing
* the resources.
@@ -5550,7 +5689,7 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
- dout("mdsc_destroy %p\n", mdsc);
+ doutc(fsc->client, "%p\n", mdsc);
if (!mdsc)
return;
@@ -5564,12 +5703,13 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
fsc->mdsc = NULL;
kfree(mdsc);
- dout("mdsc_destroy %p done\n", mdsc);
+ doutc(fsc->client, "%p done\n", mdsc);
}
void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
struct ceph_fs_client *fsc = mdsc->fsc;
+ struct ceph_client *cl = fsc->client;
const char *mds_namespace = fsc->mount_options->mds_namespace;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
@@ -5581,7 +5721,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
ceph_decode_need(&p, end, sizeof(u32), bad);
epoch = ceph_decode_32(&p);
- dout("handle_fsmap epoch %u\n", epoch);
+ doutc(cl, "epoch %u\n", epoch);
/* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
@@ -5626,7 +5766,8 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
bad:
- pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n",
+ err);
ceph_umount_begin(mdsc->fsc->sb);
ceph_msg_dump(msg);
err_out:
@@ -5641,6 +5782,7 @@ err_out:
*/
void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u32 epoch;
u32 maplen;
void *p = msg->front.iov_base;
@@ -5655,18 +5797,17 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
- dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
+ doutc(cl, "epoch %u len %d\n", epoch, (int)maplen);
/* do we need it? */
mutex_lock(&mdsc->mutex);
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
- dout("handle_map epoch %u <= our %u\n",
- epoch, mdsc->mdsmap->m_epoch);
+ doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
return;
}
- newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
+ newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad_unlock;
@@ -5695,7 +5836,8 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
bad_unlock:
mutex_unlock(&mdsc->mutex);
bad:
- pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n",
+ err);
ceph_umount_begin(mdsc->fsc->sb);
ceph_msg_dump(msg);
return;
@@ -5726,7 +5868,8 @@ static void mds_peer_reset(struct ceph_connection *con)
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
- pr_warn("mds%d closed our session\n", s->s_mds);
+ pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
+ s->s_mds);
if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
send_mds_reconnect(mdsc, s);
}
@@ -5735,6 +5878,7 @@ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int type = le16_to_cpu(msg->hdr.type);
mutex_lock(&mdsc->mutex);
@@ -5774,8 +5918,8 @@ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
break;
default:
- pr_err("received unknown message type %d %s\n", type,
- ceph_msg_type_name(type));
+ pr_err_client(cl, "received unknown message type %d %s\n",
+ type, ceph_msg_type_name(type));
}
out:
ceph_msg_put(msg);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 5a3714bdd64a..2e6ddaa13d72 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -14,9 +14,9 @@
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
-#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
+#include "mdsmap.h"
#include "metric.h"
#include "super.h"
@@ -33,8 +33,10 @@ enum ceph_feature_type {
CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
CEPHFS_FEATURE_OP_GETVXATTR,
CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ CEPHFS_FEATURE_NEW_SNAPREALM_INFO,
+ CEPHFS_FEATURE_HAS_OWNER_UIDGID,
- CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_HAS_OWNER_UIDGID,
};
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
@@ -49,6 +51,7 @@ enum ceph_feature_type {
CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
CEPHFS_FEATURE_OP_GETVXATTR, \
CEPHFS_FEATURE_32BITS_RETRY_FWD, \
+ CEPHFS_FEATURE_HAS_OWNER_UIDGID, \
}
/*
@@ -300,6 +303,7 @@ struct ceph_mds_request {
int r_fmode; /* file mode, if expecting cap */
int r_request_release_offset;
const struct cred *r_cred;
+ struct mnt_idmap *r_mnt_idmap;
struct timespec64 r_stamp;
/* for choosing which mds to send this request to */
@@ -581,7 +585,8 @@ static inline void ceph_mdsc_free_path(char *path, int len)
__putname(path - (PATH_MAX - 1 - len));
}
-extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
+extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
+ struct dentry *dentry, int *plen, u64 *base,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
@@ -614,4 +619,6 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
extern int ceph_wait_on_conflict_unlink(struct dentry *dentry);
extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
extern int ceph_restore_deleg_ino(struct ceph_mds_session *session, u64 ino);
+
+extern bool enable_unsafe_idmap;
#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 7dac21ee6ce7..fae97c25ce58 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -7,10 +7,11 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/ceph/mdsmap.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
+#include "mdsmap.h"
+#include "mds_client.h"
#include "super.h"
#define CEPH_MDS_IS_READY(i, ignore_laggy) \
@@ -114,8 +115,10 @@ bad:
* Ignore any fields we don't care about (there are quite a few of
* them).
*/
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+ void *end, bool msgr2)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mdsmap *m;
const void *start = *p;
int i, j, n;
@@ -233,20 +236,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
*p = info_end;
}
- dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
- i+1, n, global_id, mds, inc,
- ceph_pr_addr(&addr),
- ceph_mds_state_name(state),
- laggy ? "(laggy)" : "");
+ doutc(cl, "%d/%d %lld mds%d.%d %s %s%s\n", i+1, n, global_id,
+ mds, inc, ceph_pr_addr(&addr),
+ ceph_mds_state_name(state), laggy ? "(laggy)" : "");
if (mds < 0 || mds >= m->possible_max_rank) {
- pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
+ pr_warn_client(cl, "got incorrect mds(%d)\n", mds);
continue;
}
if (state <= 0) {
- dout("mdsmap_decode got incorrect state(%s)\n",
- ceph_mds_state_name(state));
+ doutc(cl, "got incorrect state(%s)\n",
+ ceph_mds_state_name(state));
continue;
}
@@ -385,16 +386,16 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
m->m_max_xattr_size = 0;
}
bad_ext:
- dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
- !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
+ doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+ !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
*p = end;
- dout("mdsmap_decode success epoch %u\n", m->m_epoch);
+ doutc(cl, "success epoch %u\n", m->m_epoch);
return m;
nomem:
err = -ENOMEM;
goto out_err;
corrupt:
- pr_err("corrupt mdsmap\n");
+ pr_err_client(cl, "corrupt mdsmap\n");
print_hex_dump(KERN_DEBUG, "mdsmap: ",
DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true);
diff --git a/include/linux/ceph/mdsmap.h b/fs/ceph/mdsmap.h
index 4c3e0648dc27..89f1931f1ba6 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/fs/ceph/mdsmap.h
@@ -5,6 +5,8 @@
#include <linux/bug.h>
#include <linux/ceph/types.h>
+struct ceph_mds_client;
+
/*
* mds map - describe servers in the mds cluster.
*
@@ -65,7 +67,8 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+ void *end, bool msgr2);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 6d3584f16f9a..871c1090e520 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -31,6 +31,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_client_metric *m = &mdsc->metric;
u64 nr_caps = atomic64_read(&m->total_caps);
u32 header_len = sizeof(struct ceph_metric_header);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
s64 sum;
s32 items = 0;
@@ -51,8 +52,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
if (!msg) {
- pr_err("send metrics to mds%d, failed to allocate message\n",
- s->s_mds);
+ pr_err_client(cl, "to mds%d, failed to allocate message\n",
+ s->s_mds);
return false;
}
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index f7fcf7f08ec6..9d36c3532de1 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -43,6 +43,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
{
struct super_block *sb = mdsc->fsc->sb;
struct ceph_mds_quota *h = msg->front.iov_base;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino;
struct inode *inode;
struct ceph_inode_info *ci;
@@ -51,8 +52,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
return;
if (msg->front.iov_len < sizeof(*h)) {
- pr_err("%s corrupt message mds%d len %d\n", __func__,
- session->s_mds, (int)msg->front.iov_len);
+ pr_err_client(cl, "corrupt message mds%d len %d\n",
+ session->s_mds, (int)msg->front.iov_len);
ceph_msg_dump(msg);
goto out;
}
@@ -62,7 +63,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
vino.snap = CEPH_NOSNAP;
inode = ceph_find_inode(sb, vino);
if (!inode) {
- pr_warn("Failed to find inode %llu\n", vino.ino);
+ pr_warn_client(cl, "failed to find inode %llx\n", vino.ino);
goto out;
}
ci = ceph_inode(inode);
@@ -85,6 +86,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
{
struct ceph_quotarealm_inode *qri = NULL;
struct rb_node **node, *parent = NULL;
+ struct ceph_client *cl = mdsc->fsc->client;
mutex_lock(&mdsc->quotarealms_inodes_mutex);
node = &(mdsc->quotarealms_inodes.rb_node);
@@ -110,7 +112,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
rb_link_node(&qri->node, parent, node);
rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
} else
- pr_warn("Failed to alloc quotarealms_inode\n");
+ pr_warn_client(cl, "Failed to alloc quotarealms_inode\n");
}
mutex_unlock(&mdsc->quotarealms_inodes_mutex);
@@ -129,6 +131,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
struct super_block *sb,
struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_quotarealm_inode *qri;
struct inode *in;
@@ -161,8 +164,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
}
if (IS_ERR(in)) {
- dout("Can't lookup inode %llx (err: %ld)\n",
- realm->ino, PTR_ERR(in));
+ doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
+ PTR_ERR(in));
qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
} else {
qri->timeout = 0;
@@ -213,6 +216,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
enum quota_get_realm which_quota,
bool retry)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = NULL;
struct ceph_snap_realm *realm, *next;
struct inode *in;
@@ -226,8 +230,9 @@ restart:
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
- pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) "
- "null i_snap_realm\n", ceph_vinop(inode));
+ pr_err_ratelimited_client(cl,
+ "%p %llx.%llx null i_snap_realm\n",
+ inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@@ -317,6 +322,7 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
loff_t delta)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_snap_realm *realm, *next;
struct inode *in;
@@ -332,8 +338,9 @@ restart:
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
- pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) "
- "null i_snap_realm\n", ceph_vinop(inode));
+ pr_err_ratelimited_client(cl,
+ "%p %llx.%llx null i_snap_realm\n",
+ inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@@ -383,7 +390,7 @@ restart:
break;
default:
/* Shouldn't happen */
- pr_warn("Invalid quota check op (%d)\n", op);
+ pr_warn_client(cl, "Invalid quota check op (%d)\n", op);
exceeded = true; /* Just break the loop */
}
iput(in);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 6732e1ea97d9..c65f2b202b2b 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -138,7 +138,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
__insert_snap_realm(&mdsc->snap_realms, realm);
mdsc->num_snap_realms++;
- dout("%s %llx %p\n", __func__, realm->ino, realm);
+ doutc(mdsc->fsc->client, "%llx %p\n", realm->ino, realm);
return realm;
}
@@ -150,6 +150,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct rb_node *n = mdsc->snap_realms.rb_node;
struct ceph_snap_realm *r;
@@ -162,7 +163,7 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
else if (ino > r->ino)
n = n->rb_right;
else {
- dout("%s %llx %p\n", __func__, r->ino, r);
+ doutc(cl, "%llx %p\n", r->ino, r);
return r;
}
}
@@ -188,9 +189,10 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
lockdep_assert_held_write(&mdsc->snap_rwsem);
- dout("%s %p %llx\n", __func__, realm, realm->ino);
+ doutc(cl, "%p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms);
mdsc->num_snap_realms--;
@@ -290,6 +292,7 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm,
u64 parentino)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent;
lockdep_assert_held_write(&mdsc->snap_rwsem);
@@ -303,8 +306,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
if (IS_ERR(parent))
return PTR_ERR(parent);
}
- dout("%s %llx %p: %llx %p -> %llx %p\n", __func__, realm->ino,
- realm, realm->parent_ino, realm->parent, parentino, parent);
+ doutc(cl, "%llx %p: %llx %p -> %llx %p\n", realm->ino, realm,
+ realm->parent_ino, realm->parent, parentino, parent);
if (realm->parent) {
list_del_init(&realm->child_item);
ceph_put_snap_realm(mdsc, realm->parent);
@@ -329,10 +332,12 @@ static int cmpu64_rev(const void *a, const void *b)
/*
* build the snap context for a given realm.
*/
-static int build_snap_context(struct ceph_snap_realm *realm,
+static int build_snap_context(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm,
struct list_head *realm_queue,
struct list_head *dirty_realms)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent = realm->parent;
struct ceph_snap_context *snapc;
int err = 0;
@@ -360,10 +365,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
realm->cached_context->seq == realm->seq &&
(!parent ||
realm->cached_context->seq >= parent->cached_context->seq)) {
- dout("%s %llx %p: %p seq %lld (%u snaps) (unchanged)\n",
- __func__, realm->ino, realm, realm->cached_context,
- realm->cached_context->seq,
- (unsigned int)realm->cached_context->num_snaps);
+ doutc(cl, "%llx %p: %p seq %lld (%u snaps) (unchanged)\n",
+ realm->ino, realm, realm->cached_context,
+ realm->cached_context->seq,
+ (unsigned int)realm->cached_context->num_snaps);
return 0;
}
@@ -400,8 +405,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
snapc->num_snaps = num;
- dout("%s %llx %p: %p seq %lld (%u snaps)\n", __func__, realm->ino,
- realm, snapc, snapc->seq, (unsigned int) snapc->num_snaps);
+ doutc(cl, "%llx %p: %p seq %lld (%u snaps)\n", realm->ino, realm,
+ snapc, snapc->seq, (unsigned int) snapc->num_snaps);
ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
@@ -418,16 +423,18 @@ fail:
ceph_put_snap_context(realm->cached_context);
realm->cached_context = NULL;
}
- pr_err("%s %llx %p fail %d\n", __func__, realm->ino, realm, err);
+ pr_err_client(cl, "%llx %p fail %d\n", realm->ino, realm, err);
return err;
}
/*
* rebuild snap context for the given realm and all of its children.
*/
-static void rebuild_snap_realms(struct ceph_snap_realm *realm,
+static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm,
struct list_head *dirty_realms)
{
+ struct ceph_client *cl = mdsc->fsc->client;
LIST_HEAD(realm_queue);
int last = 0;
bool skip = false;
@@ -451,9 +458,10 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm,
continue;
}
- last = build_snap_context(_realm, &realm_queue, dirty_realms);
- dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
- last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
+ last = build_snap_context(mdsc, _realm, &realm_queue,
+ dirty_realms);
+ doutc(cl, "%llx %p, %s\n", realm->ino, realm,
+ last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
/* is any child in the list ? */
list_for_each_entry(child, &_realm->children, child_item) {
@@ -523,6 +531,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap **pcapsnap)
{
struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *old_snapc, *new_snapc;
struct ceph_cap_snap *capsnap = *pcapsnap;
struct ceph_buffer *old_blob = NULL;
@@ -548,14 +557,14 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
- dout("%s %p %llx.%llx already pending\n",
- __func__, inode, ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx already pending\n", inode,
+ ceph_vinop(inode));
goto update_snapc;
}
if (ci->i_wrbuffer_ref_head == 0 &&
!(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
- dout("%s %p %llx.%llx nothing dirty|writing\n",
- __func__, inode, ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx nothing dirty|writing\n", inode,
+ ceph_vinop(inode));
goto update_snapc;
}
@@ -575,15 +584,15 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
} else {
if (!(used & CEPH_CAP_FILE_WR) &&
ci->i_wrbuffer_ref_head == 0) {
- dout("%s %p %llx.%llx no new_snap|dirty_page|writing\n",
- __func__, inode, ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx no new_snap|dirty_page|writing\n",
+ inode, ceph_vinop(inode));
goto update_snapc;
}
}
- dout("%s %p %llx.%llx cap_snap %p queuing under %p %s %s\n",
- __func__, inode, ceph_vinop(inode), capsnap, old_snapc,
- ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
+ doutc(cl, "%p %llx.%llx cap_snap %p queuing under %p %s %s\n",
+ inode, ceph_vinop(inode), capsnap, old_snapc,
+ ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
ihold(inode);
capsnap->follows = old_snapc->seq;
@@ -615,9 +624,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) {
- dout("%s %p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
- " now pending\n", __func__, inode, ceph_vinop(inode),
- capsnap, old_snapc, old_snapc->seq);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
+ " now pending\n", inode, ceph_vinop(inode), capsnap,
+ old_snapc, old_snapc->seq);
capsnap->writing = 1;
} else {
/* note mtime, size NOW. */
@@ -634,7 +643,7 @@ update_snapc:
ci->i_head_snapc = NULL;
} else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
- dout(" new snapc is %p\n", new_snapc);
+ doutc(cl, " new snapc is %p\n", new_snapc);
}
spin_unlock(&ci->i_ceph_lock);
@@ -655,6 +664,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
{
struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
BUG_ON(capsnap->writing);
capsnap->size = i_size_read(inode);
@@ -667,11 +677,12 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->truncate_size = ci->i_truncate_size;
capsnap->truncate_seq = ci->i_truncate_seq;
if (capsnap->dirty_pages) {
- dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
- "still has %d dirty pages\n", __func__, inode,
- ceph_vinop(inode), capsnap, capsnap->context,
- capsnap->context->seq, ceph_cap_string(capsnap->dirty),
- capsnap->size, capsnap->dirty_pages);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+ "s=%llu still has %d dirty pages\n", inode,
+ ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq,
+ ceph_cap_string(capsnap->dirty),
+ capsnap->size, capsnap->dirty_pages);
return 0;
}
@@ -680,20 +691,20 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
* And trigger to flush the buffer immediately.
*/
if (ci->i_wrbuffer_ref) {
- dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
- "used WRBUFFER, delaying\n", __func__, inode,
- ceph_vinop(inode), capsnap, capsnap->context,
- capsnap->context->seq, ceph_cap_string(capsnap->dirty),
- capsnap->size);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+ "s=%llu used WRBUFFER, delaying\n", inode,
+ ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
ceph_queue_writeback(inode);
return 0;
}
ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
- dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
- __func__, inode, ceph_vinop(inode), capsnap, capsnap->context,
- capsnap->context->seq, ceph_cap_string(capsnap->dirty),
- capsnap->size);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
+ inode, ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
if (list_empty(&ci->i_snap_flush_item)) {
@@ -708,13 +719,15 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
* Queue cap_snaps for snap writeback for this realm and its children.
* Called under snap_rwsem, so realm topology won't change.
*/
-static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *lastinode = NULL;
struct ceph_cap_snap *capsnap = NULL;
- dout("%s %p %llx inode\n", __func__, realm, realm->ino);
+ doutc(cl, "%p %llx inode\n", realm, realm->ino);
spin_lock(&realm->inodes_with_caps_lock);
list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
@@ -733,8 +746,9 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (!capsnap) {
capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
if (!capsnap) {
- pr_err("ENOMEM allocating ceph_cap_snap on %p\n",
- inode);
+ pr_err_client(cl,
+ "ENOMEM allocating ceph_cap_snap on %p\n",
+ inode);
return;
}
}
@@ -752,7 +766,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (capsnap)
kmem_cache_free(ceph_cap_snap_cachep, capsnap);
- dout("%s %p %llx done\n", __func__, realm, realm->ino);
+ doutc(cl, "%p %llx done\n", realm, realm->ino);
}
/*
@@ -766,6 +780,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
void *p, void *e, bool deletion,
struct ceph_snap_realm **realm_ret)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_snap_realm *ri; /* encoded */
__le64 *snaps; /* encoded */
__le64 *prior_parent_snaps; /* encoded */
@@ -780,7 +795,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
lockdep_assert_held_write(&mdsc->snap_rwsem);
- dout("%s deletion=%d\n", __func__, deletion);
+ doutc(cl, "deletion=%d\n", deletion);
more:
realm = NULL;
rebuild_snapcs = 0;
@@ -810,8 +825,8 @@ more:
rebuild_snapcs += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
- dout("%s updating %llx %p %lld -> %lld\n", __func__,
- realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
+ doutc(cl, "updating %llx %p %lld -> %lld\n", realm->ino,
+ realm, realm->seq, le64_to_cpu(ri->seq));
/* update realm parameters, snap lists */
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
@@ -834,16 +849,16 @@ more:
rebuild_snapcs = 1;
} else if (!realm->cached_context) {
- dout("%s %llx %p seq %lld new\n", __func__,
- realm->ino, realm, realm->seq);
+ doutc(cl, "%llx %p seq %lld new\n", realm->ino, realm,
+ realm->seq);
rebuild_snapcs = 1;
} else {
- dout("%s %llx %p seq %lld unchanged\n", __func__,
- realm->ino, realm, realm->seq);
+ doutc(cl, "%llx %p seq %lld unchanged\n", realm->ino, realm,
+ realm->seq);
}
- dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
- realm, rebuild_snapcs, p, e);
+ doutc(cl, "done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
+ realm, rebuild_snapcs, p, e);
/*
* this will always track the uppest parent realm from which
@@ -855,7 +870,7 @@ more:
/* rebuild_snapcs when we reach the _end_ (root) of the trace */
if (realm_to_rebuild && p >= e)
- rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
+ rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
if (!first_realm)
first_realm = realm;
@@ -873,7 +888,7 @@ more:
realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
dirty_item);
list_del_init(&realm->dirty_item);
- queue_realm_cap_snaps(realm);
+ queue_realm_cap_snaps(mdsc, realm);
}
if (realm_ret)
@@ -891,7 +906,7 @@ fail:
ceph_put_snap_realm(mdsc, realm);
if (first_realm)
ceph_put_snap_realm(mdsc, first_realm);
- pr_err("%s error %d\n", __func__, err);
+ pr_err_client(cl, "error %d\n", err);
/*
* When receiving a corrupted snap trace we don't know what
@@ -905,11 +920,12 @@ fail:
WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
if (ret)
- pr_err("%s failed to blocklist %s: %d\n", __func__,
- ceph_pr_addr(&client->msgr.inst.addr), ret);
+ pr_err_client(cl, "failed to blocklist %s: %d\n",
+ ceph_pr_addr(&client->msgr.inst.addr), ret);
- WARN(1, "%s: %s%sdo remount to continue%s",
- __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
+ WARN(1, "[client.%lld] %s %s%sdo remount to continue%s",
+ client->monc.auth->global_id, __func__,
+ ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
ret ? "" : " was blocklisted, ",
err == -EIO ? " after corrupted snaptrace is fixed" : "");
@@ -925,11 +941,12 @@ fail:
*/
static void flush_snaps(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *inode;
struct ceph_mds_session *session = NULL;
- dout("%s\n", __func__);
+ doutc(cl, "begin\n");
spin_lock(&mdsc->snap_flush_lock);
while (!list_empty(&mdsc->snap_flush_list)) {
ci = list_first_entry(&mdsc->snap_flush_list,
@@ -944,7 +961,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->snap_flush_lock);
ceph_put_mds_session(session);
- dout("%s done\n", __func__);
+ doutc(cl, "done\n");
}
/**
@@ -960,7 +977,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
lockdep_assert_held(&ci->i_ceph_lock);
@@ -1000,6 +1017,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
int mds = session->s_mds;
u64 split;
@@ -1030,8 +1048,8 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
trace_len = le32_to_cpu(h->trace_len);
p += sizeof(*h);
- dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
- mds, ceph_snap_op_name(op), split, trace_len);
+ doutc(cl, "from mds%d op %s split %llx tracelen %d\n", mds,
+ ceph_snap_op_name(op), split, trace_len);
down_write(&mdsc->snap_rwsem);
locked_rwsem = 1;
@@ -1062,7 +1080,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
goto out;
}
- dout("splitting snap_realm %llx %p\n", realm->ino, realm);
+ doutc(cl, "splitting snap_realm %llx %p\n", realm->ino, realm);
for (i = 0; i < num_split_inos; i++) {
struct ceph_vino vino = {
.ino = le64_to_cpu(split_inos[i]),
@@ -1087,13 +1105,13 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
*/
if (ci->i_snap_realm->created >
le64_to_cpu(ri->created)) {
- dout(" leaving %p %llx.%llx in newer realm %llx %p\n",
- inode, ceph_vinop(inode), ci->i_snap_realm->ino,
- ci->i_snap_realm);
+ doutc(cl, " leaving %p %llx.%llx in newer realm %llx %p\n",
+ inode, ceph_vinop(inode), ci->i_snap_realm->ino,
+ ci->i_snap_realm);
goto skip_inode;
}
- dout(" will move %p %llx.%llx to split realm %llx %p\n",
- inode, ceph_vinop(inode), realm->ino, realm);
+ doutc(cl, " will move %p %llx.%llx to split realm %llx %p\n",
+ inode, ceph_vinop(inode), realm->ino, realm);
ceph_get_snap_realm(mdsc, realm);
ceph_change_snap_realm(inode, realm);
@@ -1154,7 +1172,7 @@ skip_inode:
return;
bad:
- pr_err("%s corrupt snap message from mds%d\n", __func__, mds);
+ pr_err_client(cl, "corrupt snap message from mds%d\n", mds);
ceph_msg_dump(msg);
out:
if (locked_rwsem)
@@ -1170,6 +1188,7 @@ out:
struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm, *exist;
struct rb_node **p, *parent;
int ret;
@@ -1192,8 +1211,8 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
}
spin_unlock(&mdsc->snapid_map_lock);
if (exist) {
- dout("%s found snapid map %llx -> %x\n", __func__,
- exist->snap, exist->dev);
+ doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+ exist->dev);
return exist;
}
@@ -1237,13 +1256,12 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
if (exist) {
free_anon_bdev(sm->dev);
kfree(sm);
- dout("%s found snapid map %llx -> %x\n", __func__,
- exist->snap, exist->dev);
+ doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+ exist->dev);
return exist;
}
- dout("%s create snapid map %llx -> %x\n", __func__,
- sm->snap, sm->dev);
+ doutc(cl, "create snapid map %llx -> %x\n", sm->snap, sm->dev);
return sm;
}
@@ -1268,6 +1286,7 @@ void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
unsigned long now;
LIST_HEAD(to_free);
@@ -1289,7 +1308,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
while (!list_empty(&to_free)) {
sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
list_del(&sm->lru);
- dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
+ doutc(cl, "trim snapid map %llx -> %x\n", sm->snap, sm->dev);
free_anon_bdev(sm->dev);
kfree(sm);
}
@@ -1297,6 +1316,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
struct rb_node *p;
LIST_HEAD(to_free);
@@ -1315,8 +1335,8 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
list_del(&sm->lru);
free_anon_bdev(sm->dev);
if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
- pr_err("snapid map %llx -> %x still in use\n",
- sm->snap, sm->dev);
+ pr_err_client(cl, "snapid map %llx -> %x still in use\n",
+ sm->snap, sm->dev);
}
kfree(sm);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2d7f5a8d4a92..5ec102f6b1ac 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -44,28 +44,29 @@ static LIST_HEAD(ceph_fsc_list);
*/
static void ceph_put_super(struct super_block *s)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
- dout("put_super\n");
+ doutc(fsc->client, "begin\n");
ceph_fscrypt_free_dummy_policy(fsc);
ceph_mdsc_close_sessions(fsc->mdsc);
+ doutc(fsc->client, "done\n");
}
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
struct ceph_mon_client *monc = &fsc->client->monc;
struct ceph_statfs st;
int i, err;
u64 data_pool;
+ doutc(fsc->client, "begin\n");
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
} else {
data_pool = CEPH_NOPOOL;
}
- dout("statfs\n");
err = ceph_monc_do_statfs(monc, data_pool, &st);
if (err < 0)
return err;
@@ -113,24 +114,26 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
/* fold the fs_cluster_id into the upper bits */
buf->f_fsid.val[1] = monc->fs_cluster_id;
+ doutc(fsc->client, "done\n");
return 0;
}
static int ceph_sync_fs(struct super_block *sb, int wait)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
if (!wait) {
- dout("sync_fs (non-blocking)\n");
+ doutc(cl, "(non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
- dout("sync_fs (non-blocking) done\n");
+ doutc(cl, "(non-blocking) done\n");
return 0;
}
- dout("sync_fs (blocking)\n");
+ doutc(cl, "(blocking)\n");
ceph_osdc_sync(&fsc->client->osdc);
ceph_mdsc_sync(fsc->mdsc);
- dout("sync_fs (blocking) done\n");
+ doutc(cl, "(blocking) done\n");
return 0;
}
@@ -341,7 +344,7 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
char *dev_name = param->string, *dev_name_end;
int ret;
- dout("%s '%s'\n", __func__, dev_name);
+ dout("'%s'\n", dev_name);
if (!dev_name || !*dev_name)
return invalfc(fc, "Empty source");
@@ -413,7 +416,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
return ret;
token = fs_parse(fc, ceph_mount_parameters, param, &result);
- dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
+ dout("%s: fs_parse '%s' token %d\n",__func__, param->key, token);
if (token < 0)
return token;
@@ -684,7 +687,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
*/
static int ceph_show_options(struct seq_file *m, struct dentry *root)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
size_t pos;
int ret;
@@ -881,7 +884,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
static void destroy_fs_client(struct ceph_fs_client *fsc)
{
- dout("destroy_fs_client %p\n", fsc);
+ doutc(fsc->client, "%p\n", fsc);
spin_lock(&ceph_fsc_lock);
list_del(&fsc->metric_wakeup);
@@ -896,7 +899,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
ceph_destroy_client(fsc->client);
kfree(fsc);
- dout("destroy_fs_client %p done\n", fsc);
+ dout("%s: %p done\n", __func__, fsc);
}
/*
@@ -1015,9 +1018,9 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
*/
void ceph_umount_begin(struct super_block *sb)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
- dout("ceph_umount_begin - starting forced umount\n");
+ doutc(fsc->client, "starting forced umount\n");
if (!fsc)
return;
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
@@ -1045,13 +1048,14 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
const char *path,
unsigned long started)
{
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req = NULL;
int err;
struct dentry *root;
/* open dir */
- dout("open_root_inode opening '%s'\n", path);
+ doutc(cl, "opening '%s'\n", path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_CAST(req);
@@ -1071,13 +1075,13 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
if (err == 0) {
struct inode *inode = req->r_target_inode;
req->r_target_inode = NULL;
- dout("open_root_inode success\n");
+ doutc(cl, "success\n");
root = d_make_root(inode);
if (!root) {
root = ERR_PTR(-ENOMEM);
goto out;
}
- dout("open_root_inode success, root dentry is %p\n", root);
+ doutc(cl, "success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
@@ -1136,11 +1140,12 @@ static int ceph_apply_test_dummy_encryption(struct super_block *sb,
static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
struct fs_context *fc)
{
+ struct ceph_client *cl = fsc->client;
int err;
unsigned long started = jiffies; /* note the start time */
struct dentry *root;
- dout("mount start %p\n", fsc);
+ doutc(cl, "mount start %p\n", fsc);
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
@@ -1163,7 +1168,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
if (err)
goto out;
- dout("mount opening path '%s'\n", path);
+ doutc(cl, "mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
@@ -1178,7 +1183,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
- dout("mount success\n");
+ doutc(cl, "mount success\n");
mutex_unlock(&fsc->client->mount_mutex);
return root;
@@ -1191,9 +1196,10 @@ out:
static int ceph_set_super(struct super_block *s, struct fs_context *fc)
{
struct ceph_fs_client *fsc = s->s_fs_info;
+ struct ceph_client *cl = fsc->client;
int ret;
- dout("set_super %p\n", s);
+ doutc(cl, "%p\n", s);
s->s_maxbytes = MAX_LFS_FILESIZE;
@@ -1226,31 +1232,32 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
- dout("ceph_compare_super %p\n", sb);
+ doutc(cl, "%p\n", sb);
if (compare_mount_options(fsopt, opt, fsc)) {
- dout("monitor(s)/mount options don't match\n");
+ doutc(cl, "monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
- dout("fsid doesn't match\n");
+ doutc(cl, "fsid doesn't match\n");
return 0;
}
if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
- dout("flags differ\n");
+ doutc(cl, "flags differ\n");
return 0;
}
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
- dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+ doutc(cl, "client is blocklisted (and CLEANRECOVER is not set)\n");
return 0;
}
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
- dout("client has been forcibly unmounted\n");
+ doutc(cl, "client has been forcibly unmounted\n");
return 0;
}
@@ -1322,9 +1329,9 @@ static int ceph_get_tree(struct fs_context *fc)
goto out;
}
- if (ceph_sb_to_client(sb) != fsc) {
+ if (ceph_sb_to_fs_client(sb) != fsc) {
destroy_fs_client(fsc);
- fsc = ceph_sb_to_client(sb);
+ fsc = ceph_sb_to_fs_client(sb);
dout("get_sb got existing client %p\n", fsc);
} else {
dout("get_sb using new client %p\n", fsc);
@@ -1338,8 +1345,9 @@ static int ceph_get_tree(struct fs_context *fc)
err = PTR_ERR(res);
goto out_splat;
}
- dout("root %p inode %p ino %llx.%llx\n", res,
- d_inode(res), ceph_vinop(d_inode(res)));
+
+ doutc(fsc->client, "root %p inode %p ino %llx.%llx\n", res,
+ d_inode(res), ceph_vinop(d_inode(res)));
fc->root = fsc->sb->s_root;
return 0;
@@ -1377,7 +1385,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct ceph_mount_options *fsopt = pctx->opts;
struct super_block *sb = fc->root->d_sb;
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
if (err)
@@ -1397,7 +1405,8 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
kfree(fsc->mount_options->mon_addr);
fsc->mount_options->mon_addr = fsopt->mon_addr;
fsopt->mon_addr = NULL;
- pr_notice("ceph: monitor addresses recorded, but not used for reconnection");
+ pr_notice_client(fsc->client,
+ "monitor addresses recorded, but not used for reconnection");
}
sync_filesystem(sb);
@@ -1516,11 +1525,12 @@ void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
static void ceph_kill_sb(struct super_block *s)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
bool wait;
- dout("kill_sb %p\n", s);
+ doutc(cl, "%p\n", s);
ceph_mdsc_pre_umount(mdsc);
flush_fs_workqueues(fsc);
@@ -1551,9 +1561,9 @@ static void ceph_kill_sb(struct super_block *s)
&mdsc->stopping_waiter,
fsc->client->options->mount_timeout);
if (!timeleft) /* timed out */
- pr_warn("umount timed out, %ld\n", timeleft);
+ pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
else if (timeleft < 0) /* killed */
- pr_warn("umount was killed, %ld\n", timeleft);
+ pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
}
mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
@@ -1572,13 +1582,13 @@ static struct file_system_type ceph_fs_type = {
.name = "ceph",
.init_fs_context = ceph_init_fs_context,
.kill_sb = ceph_kill_sb,
- .fs_flags = FS_RENAME_DOES_D_MOVE,
+ .fs_flags = FS_RENAME_DOES_D_MOVE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("ceph");
int ceph_force_reconnect(struct super_block *sb)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
int err = 0;
fsc->mount_state = CEPH_MOUNT_RECOVER;
@@ -1671,6 +1681,11 @@ static const struct kernel_param_ops param_ops_mount_syntax = {
module_param_cb(mount_syntax_v1, &param_ops_mount_syntax, &mount_support, 0444);
module_param_cb(mount_syntax_v2, &param_ops_mount_syntax, &mount_support, 0444);
+bool enable_unsafe_idmap = false;
+module_param(enable_unsafe_idmap, bool, 0644);
+MODULE_PARM_DESC(enable_unsafe_idmap,
+ "Allow to use idmapped mounts with MDS without CEPHFS_FEATURE_HAS_OWNER_UIDGID");
+
module_init(init_ceph);
module_exit(exit_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 98844fc8a2f7..fe0f64a0acb2 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -488,13 +488,13 @@ ceph_inode(const struct inode *inode)
}
static inline struct ceph_fs_client *
-ceph_inode_to_client(const struct inode *inode)
+ceph_inode_to_fs_client(const struct inode *inode)
{
return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
}
static inline struct ceph_fs_client *
-ceph_sb_to_client(const struct super_block *sb)
+ceph_sb_to_fs_client(const struct super_block *sb)
{
return (struct ceph_fs_client *)sb->s_fs_info;
}
@@ -502,7 +502,13 @@ ceph_sb_to_client(const struct super_block *sb)
static inline struct ceph_mds_client *
ceph_sb_to_mdsc(const struct super_block *sb)
{
- return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
+ return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
+}
+
+static inline struct ceph_client *
+ceph_inode_to_client(const struct inode *inode)
+{
+ return (struct ceph_client *)ceph_inode_to_fs_client(inode)->client;
}
static inline struct ceph_vino
@@ -558,7 +564,7 @@ static inline u64 ceph_snap(struct inode *inode)
*/
static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
{
- if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
+ if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
return ceph_ino_to_ino32(ino);
return ino;
}
@@ -1094,8 +1100,8 @@ struct ceph_iattr {
struct ceph_fscrypt_auth *fscrypt_auth;
};
-extern int __ceph_setattr(struct inode *inode, struct iattr *attr,
- struct ceph_iattr *cia);
+extern int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *attr, struct ceph_iattr *cia);
extern int ceph_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct mnt_idmap *idmap,
@@ -1106,7 +1112,7 @@ void ceph_inode_shutdown(struct inode *inode);
static inline bool ceph_inode_is_shutdown(struct inode *inode)
{
unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int state = READ_ONCE(fsc->mount_state);
return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
@@ -1223,7 +1229,8 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
-extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ bool queue_release);
extern void __ceph_remove_caps(struct ceph_inode_info *ci);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 097ce7f74073..e066a556eccb 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -57,7 +57,8 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_string *pool_ns;
s64 pool = ci->i_layout.pool_id;
@@ -69,7 +70,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
- dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
+ doutc(cl, "%p\n", &ci->netfs.inode);
down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
@@ -161,7 +162,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
char *val, size_t size)
{
ssize_t ret;
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
s64 pool = ci->i_layout.pool_id;
const char *pool_name;
@@ -313,7 +314,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
char *val, size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
}
@@ -321,7 +322,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
char *val, size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "client%lld",
ceph_client_gid(fsc->client));
@@ -570,6 +571,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
int flags, int update_xattr,
struct ceph_inode_xattr **newxattr)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@@ -626,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr->should_free_name = update_xattr;
ci->i_xattrs.count++;
- dout("%s count=%d\n", __func__, ci->i_xattrs.count);
+ doutc(cl, "count=%d\n", ci->i_xattrs.count);
} else {
kfree(*newxattr);
*newxattr = NULL;
@@ -654,13 +657,13 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (new) {
rb_link_node(&xattr->node, parent, p);
rb_insert_color(&xattr->node, &ci->i_xattrs.index);
- dout("%s p=%p\n", __func__, p);
+ doutc(cl, "p=%p\n", p);
}
- dout("%s added %llx.%llx xattr %p %.*s=%.*s%s\n", __func__,
- ceph_vinop(&ci->netfs.inode), xattr, name_len, name,
- min(val_len, MAX_XATTR_VAL_PRINT_LEN), val,
- val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
+ doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode,
+ ceph_vinop(inode), xattr, name_len, name, min(val_len,
+ MAX_XATTR_VAL_PRINT_LEN), val,
+ val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
return 0;
}
@@ -668,6 +671,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
const char *name)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@@ -688,13 +692,13 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
else {
int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN);
- dout("%s %s: found %.*s%s\n", __func__, name, len,
- xattr->val, xattr->val_len > len ? "..." : "");
+ doutc(cl, "%s found %.*s%s\n", name, len, xattr->val,
+ xattr->val_len > len ? "..." : "");
return xattr;
}
}
- dout("%s %s: not found\n", __func__, name);
+ doutc(cl, "%s not found\n", name);
return NULL;
}
@@ -735,19 +739,20 @@ static int __remove_xattr(struct ceph_inode_info *ci,
static char *__copy_xattr_names(struct ceph_inode_info *ci,
char *dest)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
- dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
+ doutc(cl, "count=%d\n", ci->i_xattrs.count);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
memcpy(dest, xattr->name, xattr->name_len);
dest[xattr->name_len] = '\0';
- dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
- xattr->name_len, ci->i_xattrs.names_size);
+ doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
+ xattr->name_len, ci->i_xattrs.names_size);
dest += xattr->name_len + 1;
p = rb_next(p);
@@ -758,19 +763,19 @@ static char *__copy_xattr_names(struct ceph_inode_info *ci,
void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p, *tmp;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
- dout("__ceph_destroy_xattrs p=%p\n", p);
+ doutc(cl, "p=%p\n", p);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
tmp = p;
p = rb_next(tmp);
- dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
- xattr->name_len, xattr->name);
+ doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name);
rb_erase(tmp, &ci->i_xattrs.index);
__free_xattr(xattr);
@@ -787,6 +792,7 @@ static int __build_xattrs(struct inode *inode)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u32 namelen;
u32 numattr = 0;
void *p, *end;
@@ -798,8 +804,8 @@ static int __build_xattrs(struct inode *inode)
int err = 0;
int i;
- dout("__build_xattrs() len=%d\n",
- ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
+ doutc(cl, "len=%d\n",
+ ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
return 0; /* already built */
@@ -874,6 +880,8 @@ bad:
static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int val_size)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
+
/*
* 4 bytes for the length, and additional 4 bytes per each xattr name,
* 4 bytes per each value
@@ -881,9 +889,8 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int size = 4 + ci->i_xattrs.count*(4 + 4) +
ci->i_xattrs.names_size +
ci->i_xattrs.vals_size;
- dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
- ci->i_xattrs.count, ci->i_xattrs.names_size,
- ci->i_xattrs.vals_size);
+ doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
+ ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
if (name_size)
size += 4 + 4 + name_size + val_size;
@@ -899,12 +906,14 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
*/
struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
struct ceph_buffer *old_blob = NULL;
void *dest;
- dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (ci->i_xattrs.dirty) {
int need = __get_required_blob_size(ci, 0, 0);
@@ -962,6 +971,7 @@ static inline int __get_request_mask(struct inode *in) {
ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
size_t size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_xattr *xattr;
struct ceph_vxattr *vxattr;
@@ -1000,8 +1010,9 @@ handle_non_vxattrs:
req_mask = __get_request_mask(inode);
spin_lock(&ci->i_ceph_lock);
- dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
- ci->i_xattrs.version, ci->i_xattrs.index_version);
+ doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode,
+ ceph_vinop(inode), name, ci->i_xattrs.version,
+ ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!((req_mask & CEPH_CAP_XATTR_SHARED) ||
@@ -1010,8 +1021,9 @@ handle_non_vxattrs:
/* security module gets xattr while filling trace */
if (current->journal_info) {
- pr_warn_ratelimited("sync getxattr %p "
- "during filling trace\n", inode);
+ pr_warn_ratelimited_client(cl,
+ "sync %p %llx.%llx during filling trace\n",
+ inode, ceph_vinop(inode));
return -EBUSY;
}
@@ -1053,14 +1065,16 @@ out:
ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
struct inode *inode = d_inode(dentry);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
bool len_only = (size == 0);
u32 namelen;
int err;
spin_lock(&ci->i_ceph_lock);
- dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
- ci->i_xattrs.version, ci->i_xattrs.index_version);
+ doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode,
+ ceph_vinop(inode), ci->i_xattrs.version,
+ ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
@@ -1094,7 +1108,8 @@ out:
static int ceph_sync_setxattr(struct inode *inode, const char *name,
const char *value, size_t size, int flags)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1119,7 +1134,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
flags |= CEPH_XATTR_REMOVE;
}
- dout("setxattr value size: %zu\n", size);
+ doutc(cl, "name %s value size %zu\n", name, size);
/* do request */
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -1148,10 +1163,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
req->r_num_caps = 1;
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
- dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
+ doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
- dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
+ doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
out:
if (pagelist)
@@ -1162,9 +1177,10 @@ out:
int __ceph_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_vxattr *vxattr;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL;
int issued;
@@ -1220,9 +1236,9 @@ retry:
required_blob_size = __get_required_blob_size(ci, name_len, val_len);
if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
(required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
- dout("%s do sync setxattr: version: %llu size: %d max: %llu\n",
- __func__, ci->i_xattrs.version, required_blob_size,
- mdsc->mdsmap->m_max_xattr_size);
+ doutc(cl, "sync version: %llu size: %d max: %llu\n",
+ ci->i_xattrs.version, required_blob_size,
+ mdsc->mdsmap->m_max_xattr_size);
goto do_sync;
}
@@ -1236,8 +1252,8 @@ retry:
}
}
- dout("setxattr %p name '%s' issued %s\n", inode, name,
- ceph_cap_string(issued));
+ doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode,
+ ceph_vinop(inode), name, ceph_cap_string(issued));
__build_xattrs(inode);
if (!ci->i_xattrs.prealloc_blob ||
@@ -1246,7 +1262,8 @@ retry:
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); /* Shouldn't be required */
- dout(" pre-allocating new blob size=%d\n", required_blob_size);
+ doutc(cl, " pre-allocating new blob size=%d\n",
+ required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
@@ -1285,8 +1302,9 @@ do_sync_unlocked:
/* security module set xattr while filling trace */
if (current->journal_info) {
- pr_warn_ratelimited("sync setxattr %p "
- "during filling trace\n", inode);
+ pr_warn_ratelimited_client(cl,
+ "sync %p %llx.%llx during filling trace\n",
+ inode, ceph_vinop(inode));
err = -EBUSY;
} else {
err = ceph_sync_setxattr(inode, name, value, size, flags);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 87b3753aa4b1..c45e8c2d62e1 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user
new[pos + count] = '\0';
strim(new);
- rcu_assign_pointer(*(char **)file->private_data, new);
+ rcu_assign_pointer(*(char __rcu **)file->private_data, new);
synchronize_rcu();
kfree(old);
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 76dd3c7295d9..91290fe4a70b 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -21,8 +21,12 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
dev_t dev, bool is_removable)
{
struct inode *inode = new_inode(sb);
+ struct efivarfs_fs_info *fsi = sb->s_fs_info;
+ struct efivarfs_mount_opts *opts = &fsi->mount_opts;
if (inode) {
+ inode->i_uid = opts->uid;
+ inode->i_gid = opts->gid;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
simple_inode_init_ts(inode);
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index 8ebf3a6a8aa2..c66647f5c0bd 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -9,6 +9,15 @@
#include <linux/list.h>
#include <linux/efi.h>
+struct efivarfs_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+};
+
+struct efivarfs_fs_info {
+ struct efivarfs_mount_opts mount_opts;
+};
+
struct efi_variable {
efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 996271473609..77240953a92e 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -8,6 +8,7 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/ucs2_string.h>
@@ -24,12 +25,28 @@ static void efivarfs_evict_inode(struct inode *inode)
clear_inode(inode);
}
+static int efivarfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct efivarfs_fs_info *sbi = sb->s_fs_info;
+ struct efivarfs_mount_opts *opts = &sbi->mount_opts;
+
+ if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->uid));
+ if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->gid));
+ return 0;
+}
+
static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
const u32 attr = EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
u64 storage_space, remaining_space, max_variable_size;
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
efi_status_t status;
/* Some UEFI firmware does not implement QueryVariableInfo() */
@@ -53,6 +70,7 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = storage_space;
buf->f_bfree = remaining_space;
buf->f_type = dentry->d_sb->s_magic;
+ buf->f_fsid = u64_to_fsid(id);
/*
* In f_bavail we declare the free space that the kernel will allow writing
@@ -70,6 +88,7 @@ static const struct super_operations efivarfs_ops = {
.statfs = efivarfs_statfs,
.drop_inode = generic_delete_inode,
.evict_inode = efivarfs_evict_inode,
+ .show_options = efivarfs_show_options,
};
/*
@@ -231,6 +250,45 @@ static int efivarfs_destroy(struct efivar_entry *entry, void *data)
return 0;
}
+enum {
+ Opt_uid, Opt_gid,
+};
+
+static const struct fs_parameter_spec efivarfs_parameters[] = {
+ fsparam_u32("uid", Opt_uid),
+ fsparam_u32("gid", Opt_gid),
+ {},
+};
+
+static int efivarfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct efivarfs_fs_info *sbi = fc->s_fs_info;
+ struct efivarfs_mount_opts *opts = &sbi->mount_opts;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, efivarfs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(opts->uid))
+ return -EINVAL;
+ break;
+ case Opt_gid:
+ opts->gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(opts->gid))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode = NULL;
@@ -277,10 +335,21 @@ static int efivarfs_get_tree(struct fs_context *fc)
static const struct fs_context_operations efivarfs_context_ops = {
.get_tree = efivarfs_get_tree,
+ .parse_param = efivarfs_parse_param,
};
static int efivarfs_init_fs_context(struct fs_context *fc)
{
+ struct efivarfs_fs_info *sfi;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi)
+ return -ENOMEM;
+
+ sfi->mount_opts.uid = GLOBAL_ROOT_UID;
+ sfi->mount_opts.gid = GLOBAL_ROOT_GID;
+
+ fc->s_fs_info = sfi;
fc->ops = &efivarfs_context_ops;
return 0;
}
@@ -301,6 +370,7 @@ static struct file_system_type efivarfs_type = {
.name = "efivarfs",
.init_fs_context = efivarfs_init_fs_context,
.kill_sb = efivarfs_kill_sb,
+ .parameters = efivarfs_parameters,
};
static __init int efivarfs_init(void)
diff --git a/fs/efs/super.c b/fs/efs/super.c
index b287f47c165b..f17fdac76b2e 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -123,6 +123,7 @@ static const struct super_operations efs_superblock_operations = {
};
static const struct export_operations efs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = efs_fh_to_dentry,
.fh_to_parent = efs_fh_to_parent,
.get_parent = efs_get_parent,
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 976dc39a88f7..3789d6224513 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -567,6 +567,7 @@ static struct dentry *erofs_get_parent(struct dentry *child)
}
static const struct export_operations erofs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = erofs_fh_to_dentry,
.fh_to_parent = erofs_fh_to_parent,
.get_parent = erofs_get_parent,
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 02c4e2937879..bfdfafe00993 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -295,6 +295,7 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (attr->ia_valid & ATTR_SIZE)
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ setattr_copy(&nop_mnt_idmap, inode, attr);
exfat_truncate_inode_atime(inode);
if (attr->ia_valid & ATTR_SIZE) {
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 875234179d1f..e7ff58b8e68c 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -56,18 +56,18 @@ int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
&ep->dentry.file.create_time_cs);
+ ts = inode_get_mtime(inode);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
&ep->dentry.file.modify_time_cs);
- inode_set_mtime_to_ts(inode, ts);
+ ts = inode_get_atime(inode);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
&ep->dentry.file.access_date,
NULL);
- inode_set_atime_to_ts(inode, ts);
/* File size should be zero if there is no cluster allocated */
on_disk_size = i_size_read(inode);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index c20704aa21b3..3ae0154c5680 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -342,43 +342,30 @@ out:
return error;
}
+#define FILEID_INO64_GEN_LEN 3
+
/**
- * export_encode_fh - default export_operations->encode_fh function
+ * exportfs_encode_ino64_fid - encode non-decodeable 64bit ino file id
* @inode: the object to encode
* @fid: where to store the file handle fragment
- * @max_len: maximum length to store there
- * @parent: parent directory inode, if wanted
+ * @max_len: maximum length to store there (in 4 byte units)
*
- * This default encode_fh function assumes that the 32 inode number
- * is suitable for locating an inode, and that the generation number
- * can be used to check that it is still valid. It places them in the
- * filehandle fragment where export_decode_fh expects to find them.
+ * This generic function is used to encode a non-decodeable file id for
+ * fanotify for filesystems that do not support NFS export.
*/
-static int export_encode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent)
+static int exportfs_encode_ino64_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
{
- int len = *max_len;
- int type = FILEID_INO32_GEN;
-
- if (parent && (len < 4)) {
- *max_len = 4;
- return FILEID_INVALID;
- } else if (len < 2) {
- *max_len = 2;
+ if (*max_len < FILEID_INO64_GEN_LEN) {
+ *max_len = FILEID_INO64_GEN_LEN;
return FILEID_INVALID;
}
- len = 2;
- fid->i32.ino = inode->i_ino;
- fid->i32.gen = inode->i_generation;
- if (parent) {
- fid->i32.parent_ino = parent->i_ino;
- fid->i32.parent_gen = parent->i_generation;
- len = 4;
- type = FILEID_INO32_GEN_PARENT;
- }
- *max_len = len;
- return type;
+ fid->i64.ino = inode->i_ino;
+ fid->i64.gen = inode->i_generation;
+ *max_len = FILEID_INO64_GEN_LEN;
+
+ return FILEID_INO64_GEN;
}
/**
@@ -396,17 +383,13 @@ int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
{
const struct export_operations *nop = inode->i_sb->s_export_op;
- /*
- * If a decodeable file handle was requested, we need to make sure that
- * filesystem can decode file handles.
- */
- if (nop && !(flags & EXPORT_FH_FID) && !nop->fh_to_dentry)
+ if (!exportfs_can_encode_fh(nop, flags))
return -EOPNOTSUPP;
- if (nop && nop->encode_fh)
- return nop->encode_fh(inode, fid->raw, max_len, parent);
+ if (!nop && (flags & EXPORT_FH_FID))
+ return exportfs_encode_ino64_fid(inode, fid, max_len);
- return export_encode_fh(inode, fid, max_len, parent);
+ return nop->encode_fh(inode, fid->raw, max_len, parent);
}
EXPORT_SYMBOL_GPL(exportfs_encode_inode_fh);
@@ -456,7 +439,7 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
/*
* Try to get any dentry for the given file handle from the filesystem.
*/
- if (!nop || !nop->fh_to_dentry)
+ if (!exportfs_can_decode_fh(nop))
return ERR_PTR(-ESTALE);
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (IS_ERR_OR_NULL(result))
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 645ee6142f69..01f9addc8b1f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -397,6 +397,7 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
}
static const struct export_operations ext2_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ext2_fh_to_dentry,
.fh_to_parent = ext2_fh_to_parent,
.get_parent = ext2_get_parent,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 54a9dde7483a..c5fcf377ab1f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1654,6 +1654,7 @@ static const struct super_operations ext4_sops = {
};
static const struct export_operations ext4_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index d820801f473e..36e5dab6baae 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -893,14 +893,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
{
+#ifdef CONFIG_F2FS_CHECK_FS
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
- bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
int cluster_end = 0;
+ unsigned int count;
int i;
char *reason = "";
- if (!compressed)
+ if (dn->data_blkaddr != COMPRESS_ADDR)
return false;
/* [..., COMPR_ADDR, ...] */
@@ -909,7 +910,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
goto out;
}
- for (i = 1; i < cluster_size; i++) {
+ for (i = 1, count = 1; i < cluster_size; i++, count++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
@@ -929,19 +930,42 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
goto out;
}
}
+
+ f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
+ !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
+
return false;
out:
f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
set_sbi_flag(sbi, SBI_NEED_FSCK);
return true;
+#else
+ return false;
+#endif
+}
+
+static int __f2fs_get_cluster_blocks(struct inode *inode,
+ struct dnode_of_data *dn)
+{
+ unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int count, i;
+
+ for (i = 1, count = 1; i < cluster_size; i++) {
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+ if (__is_valid_data_blkaddr(blkaddr))
+ count++;
+ }
+
+ return count;
}
static int __f2fs_cluster_blocks(struct inode *inode,
- unsigned int cluster_idx, bool compr)
+ unsigned int cluster_idx, bool compr_blks)
{
struct dnode_of_data dn;
- unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
unsigned int start_idx = cluster_idx <<
F2FS_I(inode)->i_log_cluster_size;
int ret;
@@ -956,31 +980,14 @@ static int __f2fs_cluster_blocks(struct inode *inode,
if (f2fs_sanity_check_cluster(&dn)) {
ret = -EFSCORRUPTED;
- f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
goto fail;
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
- int i;
-
- ret = 1;
- for (i = 1; i < cluster_size; i++) {
- block_t blkaddr;
-
- blkaddr = data_blkaddr(dn.inode,
- dn.node_page, dn.ofs_in_node + i);
- if (compr) {
- if (__is_valid_data_blkaddr(blkaddr))
- ret++;
- } else {
- if (blkaddr != NULL_ADDR)
- ret++;
- }
- }
-
- f2fs_bug_on(F2FS_I_SB(inode),
- !compr && ret != cluster_size &&
- !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
+ if (compr_blks)
+ ret = __f2fs_get_cluster_blocks(inode, &dn);
+ else
+ ret = 1;
}
fail:
f2fs_put_dnode(&dn);
@@ -993,7 +1000,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
}
-/* return # of valid blocks in compressed cluster */
+/* return whether cluster is compressed one or not */
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
@@ -1976,7 +1983,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
- char slab_name[32];
+ char slab_name[35];
if (!f2fs_sb_has_compression(sbi))
return 0;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 916e317ac925..4e42b5f24deb 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1690,9 +1690,7 @@ next_block:
map->m_flags |= F2FS_MAP_NEW;
} else if (is_hole) {
if (f2fs_compressed_file(inode) &&
- f2fs_sanity_check_cluster(&dn) &&
- (flag != F2FS_GET_BLOCK_FIEMAP ||
- IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
+ f2fs_sanity_check_cluster(&dn)) {
err = -EFSCORRUPTED;
f2fs_handle_error(sbi,
ERROR_CORRUPTED_CLUSTER);
@@ -2344,8 +2342,10 @@ skip_reading_dnode:
f2fs_wait_on_block_writeback(inode, blkaddr);
if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
- if (atomic_dec_and_test(&dic->remaining_pages))
+ if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
+ break;
+ }
continue;
}
@@ -2665,6 +2665,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (f2fs_is_atomic_file(inode))
return true;
+ /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
+ if (f2fs_compressed_file(inode) &&
+ F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
+ is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
+ return true;
/* swap file is migrating in aligned write mode */
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
@@ -3023,7 +3028,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0, retry = 0;
- struct page *pages[F2FS_ONSTACK_PAGES];
+ struct page *pages_local[F2FS_ONSTACK_PAGES];
+ struct page **pages = pages_local;
struct folio_batch fbatch;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
@@ -3047,6 +3053,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
#endif
int nr_folios, p, idx;
int nr_pages;
+ unsigned int max_pages = F2FS_ONSTACK_PAGES;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
@@ -3056,6 +3063,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int submitted = 0;
int i;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode) &&
+ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
+ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
+ max_pages = 1 << cc.log_cluster_size;
+ }
+#endif
+
folio_batch_init(&fbatch);
if (get_dirty_pages(mapping->host) <=
@@ -3101,7 +3117,7 @@ again:
add_more:
pages[nr_pages] = folio_page(folio, idx);
folio_get(folio);
- if (++nr_pages == F2FS_ONSTACK_PAGES) {
+ if (++nr_pages == max_pages) {
index = folio->index + idx + 1;
folio_batch_release(&fbatch);
goto write;
@@ -3283,6 +3299,11 @@ next:
if (bio)
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (pages != pages_local)
+ kfree(pages);
+#endif
+
return ret;
}
@@ -4055,7 +4076,7 @@ next:
sis->highest_bit = cur_lblock - 1;
out:
if (not_aligned)
- f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
+ f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
not_aligned, blks_per_sec * F2FS_BLKSIZE);
return ret;
}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 0e2d49140c07..ad8dfac73bd4 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
}
}
-static bool __may_read_extent_tree(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
- if (!test_opt(sbi, READ_EXTENT_CACHE))
- return false;
- if (is_inode_flag_set(inode, FI_NO_EXTENT))
- return false;
- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
- !f2fs_sb_has_readonly(sbi))
- return false;
- return S_ISREG(inode->i_mode);
-}
-
-static bool __may_age_extent_tree(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
- if (!test_opt(sbi, AGE_EXTENT_CACHE))
- return false;
- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
- return false;
- if (file_is_cold(inode))
- return false;
-
- return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
-}
-
static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
{
if (type == EX_READ)
- return __may_read_extent_tree(inode);
- else if (type == EX_BLOCK_AGE)
- return __may_age_extent_tree(inode);
+ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
+ S_ISREG(inode->i_mode);
+ if (type == EX_BLOCK_AGE)
+ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
return false;
}
@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
if (list_empty(&F2FS_I_SB(inode)->s_list))
return false;
- return __init_may_extent_tree(inode, type);
+ if (!__init_may_extent_tree(inode, type))
+ return false;
+
+ if (type == EX_READ) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ return false;
+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
+ return false;
+ } else if (type == EX_BLOCK_AGE) {
+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+ return false;
+ if (file_is_cold(inode))
+ return false;
+ }
+ return true;
}
static void __try_update_largest_extent(struct extent_tree *et,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index dd99abbb7186..e50363583f01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -3258,11 +3258,12 @@ int f2fs_precache_extents(struct inode *inode)
return -EOPNOTSUPP;
map.m_lblk = 0;
+ map.m_pblk = 0;
map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- end = max_file_blocks(inode);
+ end = F2FS_BLK_ALIGN(i_size_read(inode));
while (map.m_lblk < end) {
map.m_len = end - map.m_lblk;
@@ -3270,7 +3271,7 @@ int f2fs_precache_extents(struct inode *inode)
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- if (err)
+ if (err || !map.m_len)
return err;
map.m_lblk = m_next_extent;
@@ -4005,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
F2FS_I(inode)->i_compress_algorithm = option.algorithm;
F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
+ /* Set default level */
+ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
+ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ else
+ F2FS_I(inode)->i_compress_level = 0;
+ /* Adjust mount option level */
+ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
+ F2FS_OPTION(sbi).compress_level)
+ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
f2fs_mark_inode_dirty_sync(inode, true);
if (!f2fs_is_compress_backend_ready(inode))
@@ -4849,6 +4859,9 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
filp->f_mode &= ~FMODE_RANDOM;
spin_unlock(&filp->f_lock);
return 0;
+ } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
+ /* Load extent cache at the first readahead. */
+ f2fs_precache_extents(inode);
}
err = generic_fadvise(filp, offset, len, advice);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 5779c7edd49b..560bfcad1af2 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -315,7 +315,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
f2fs_has_inline_xattr(inode) &&
(!fi->i_inline_xattr_size ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MAX_INLINE_XATTR_SIZE);
return false;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ee2e1dd64f25..6c7f6a649d27 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -633,7 +633,7 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
/* Then, try readahead for siblings of the desired node */
end = start + n;
- end = min(end, NIDS_PER_BLOCK);
+ end = min(end, (int)NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
f2fs_ra_node_page(sbi, nid);
@@ -1467,7 +1467,8 @@ page_hit:
ofs_of_node(page), cpver_of_node(page),
next_blkaddr_of_node(page));
set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = -EINVAL;
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
+ err = -EFSCORRUPTED;
out_err:
ClearPageUptodate(page);
out_put_err:
@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
if (blk_addr == NEW_ADDR)
- return -EINVAL;
+ return -EFSCORRUPTED;
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (ret) {
f2fs_up_read(&nm_i->nat_tree_lock);
- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+
+ if (ret == -EFSCORRUPTED) {
+ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi,
+ ERROR_INCONSISTENT_NAT);
+ }
+
return ret;
}
}
@@ -2743,7 +2751,9 @@ recover_xnid:
f2fs_update_inode_page(inode);
/* 3: update and set xattr node page dirty */
- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
+ if (page)
+ memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
+ VALID_XATTR_BLOCK_SIZE);
set_page_dirty(xpage);
f2fs_put_page(xpage, 1);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index d05b41608fc0..727d016318f9 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -4910,22 +4910,31 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
}
/*
- * The write pointer matches with the valid blocks or
- * already points to the end of the zone.
+ * When safely unmounted in the previous mount, we can trust write
+ * pointers. Otherwise, finish zones.
*/
- if ((last_valid_block + 1 == wp_block) ||
- (zone->wp == zone->start + zone->len))
- return 0;
-
- if (last_valid_block + 1 == zone_block) {
+ if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
/*
- * If there is no valid block in the zone and if write pointer
- * is not at zone start, reset the write pointer.
+ * The write pointer matches with the valid blocks or
+ * already points to the end of the zone.
*/
- f2fs_notice(sbi,
- "Zone without valid block has non-zero write "
- "pointer. Reset the write pointer: wp[0x%x,0x%x]",
- wp_segno, wp_blkoff);
+ if ((last_valid_block + 1 == wp_block) ||
+ (zone->wp == zone->start + zone->len))
+ return 0;
+ }
+
+ if (last_valid_block + 1 == zone_block) {
+ if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ /*
+ * If there is no valid block in the zone and if write
+ * pointer is not at zone start, reset the write
+ * pointer.
+ */
+ f2fs_notice(sbi,
+ "Zone without valid block has non-zero write "
+ "pointer. Reset the write pointer: wp[0x%x,0x%x]",
+ wp_segno, wp_blkoff);
+ }
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
zone->len >> log_sectors_per_block);
if (ret)
@@ -4935,18 +4944,20 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
return ret;
}
- /*
- * If there are valid blocks and the write pointer doesn't
- * match with them, we need to report the inconsistency and
- * fill the zone till the end to close the zone. This inconsistency
- * does not cause write error because the zone will not be selected
- * for write operation until it get discarded.
- */
- f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: "
- "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
- GET_SEGNO(sbi, last_valid_block),
- GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
- wp_segno, wp_blkoff);
+ if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ /*
+ * If there are valid blocks and the write pointer doesn't match
+ * with them, we need to report the inconsistency and fill
+ * the zone till the end to close the zone. This inconsistency
+ * does not cause write error because the zone will not be
+ * selected for write operation until it get discarded.
+ */
+ f2fs_notice(sbi, "Valid blocks are not aligned with write "
+ "pointer: valid block[0x%x,0x%x] wp[0x%x,0x%x]",
+ GET_SEGNO(sbi, last_valid_block),
+ GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
+ wp_segno, wp_blkoff);
+ }
ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
zone->start, zone->len, GFP_NOFS);
@@ -5020,18 +5031,27 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
- wp_segno = GET_SEGNO(sbi, wp_block);
- wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
-
- if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
- wp_sector_off == 0)
- return 0;
+ /*
+ * When safely unmounted in the previous mount, we could use current
+ * segments. Otherwise, allocate new sections.
+ */
+ if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+ wp_segno = GET_SEGNO(sbi, wp_block);
+ wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+ wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+
+ if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+ wp_sector_off == 0)
+ return 0;
- f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
- "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
- type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
+ f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
+ "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
+ cs->next_blkoff, wp_segno, wp_blkoff);
+ } else {
+ f2fs_notice(sbi, "Not successfully unmounted in the previous "
+ "mount");
+ }
f2fs_notice(sbi, "Assign new section to curseg[%d]: "
"curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 2ca8fb5d0dc4..8129be788bd5 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -108,11 +108,11 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
(sbi)->log_blocks_per_seg))
#define GET_SEC_FROM_SEG(sbi, segno) \
- (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+ (((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \
((secno) * (sbi)->segs_per_sec)
#define GET_ZONE_FROM_SEC(sbi, secno) \
- (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
+ (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 05f9f7b6ebf8..033af907c3b1 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -562,6 +562,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
+static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
+ const char *new_ext, bool is_ext)
+{
+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
+ int ext_cnt;
+ int i;
+
+ if (is_ext) {
+ ext = F2FS_OPTION(sbi).extensions;
+ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ } else {
+ ext = F2FS_OPTION(sbi).noextensions;
+ ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ }
+
+ for (i = 0; i < ext_cnt; i++) {
+ if (!strcasecmp(new_ext, ext[i]))
+ return true;
+ }
+
+ return false;
+}
+
/*
* 1. The same extension name cannot not appear in both compress and non-compress extension
* at the same time.
@@ -1164,6 +1187,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -EINVAL;
}
+ if (is_compress_extension_exist(sbi, name, true)) {
+ kfree(name);
+ break;
+ }
+
strcpy(ext[ext_cnt], name);
F2FS_OPTION(sbi).compress_ext_cnt++;
kfree(name);
@@ -1188,6 +1216,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -EINVAL;
}
+ if (is_compress_extension_exist(sbi, name, false)) {
+ kfree(name);
+ break;
+ }
+
strcpy(noext[noext_cnt], name);
F2FS_OPTION(sbi).nocompress_ext_cnt++;
kfree(name);
@@ -1644,7 +1677,7 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
- if (err) {
+ if (err || f2fs_cp_error(sbi)) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
}
@@ -2301,9 +2334,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
unsigned long old_sb_flags;
int err;
bool need_restart_gc = false, need_stop_gc = false;
- bool need_restart_ckpt = false, need_stop_ckpt = false;
bool need_restart_flush = false, need_stop_flush = false;
bool need_restart_discard = false, need_stop_discard = false;
+ bool need_enable_checkpoint = false, need_disable_checkpoint = false;
bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
@@ -2467,24 +2500,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
- if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
- !test_opt(sbi, MERGE_CHECKPOINT)) {
- f2fs_stop_ckpt_thread(sbi);
- need_restart_ckpt = true;
- } else {
- /* Flush if the prevous checkpoint, if exists. */
- f2fs_flush_ckpt_thread(sbi);
-
- err = f2fs_start_ckpt_thread(sbi);
- if (err) {
- f2fs_err(sbi,
- "Failed to start F2FS issue_checkpoint_thread (%d)",
- err);
- goto restore_gc;
- }
- need_stop_ckpt = true;
- }
-
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
@@ -2496,7 +2511,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
} else {
err = f2fs_create_flush_cmd_control(sbi);
if (err)
- goto restore_ckpt;
+ goto restore_gc;
need_stop_flush = true;
}
@@ -2518,8 +2533,31 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = f2fs_disable_checkpoint(sbi);
if (err)
goto restore_discard;
+ need_enable_checkpoint = true;
} else {
f2fs_enable_checkpoint(sbi);
+ need_disable_checkpoint = true;
+ }
+ }
+
+ /*
+ * Place this routine at the end, since a new checkpoint would be
+ * triggered while remount and we need to take care of it before
+ * returning from remount.
+ */
+ if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
+ !test_opt(sbi, MERGE_CHECKPOINT)) {
+ f2fs_stop_ckpt_thread(sbi);
+ } else {
+ /* Flush if the prevous checkpoint, if exists. */
+ f2fs_flush_ckpt_thread(sbi);
+
+ err = f2fs_start_ckpt_thread(sbi);
+ if (err) {
+ f2fs_err(sbi,
+ "Failed to start F2FS issue_checkpoint_thread (%d)",
+ err);
+ goto restore_checkpoint;
}
}
@@ -2537,6 +2575,13 @@ skip:
adjust_unusable_cap_perc(sbi);
*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
+restore_checkpoint:
+ if (need_enable_checkpoint) {
+ f2fs_enable_checkpoint(sbi);
+ } else if (need_disable_checkpoint) {
+ if (f2fs_disable_checkpoint(sbi))
+ f2fs_warn(sbi, "checkpoint has not been disabled");
+ }
restore_discard:
if (need_restart_discard) {
if (f2fs_start_discard_thread(sbi))
@@ -2552,13 +2597,6 @@ restore_flush:
clear_opt(sbi, FLUSH_MERGE);
f2fs_destroy_flush_cmd_control(sbi, false);
}
-restore_ckpt:
- if (need_restart_ckpt) {
- if (f2fs_start_ckpt_thread(sbi))
- f2fs_warn(sbi, "background ckpt thread has stopped");
- } else if (need_stop_ckpt) {
- f2fs_stop_ckpt_thread(sbi);
- }
restore_gc:
if (need_restart_gc) {
if (f2fs_start_gc_thread(sbi))
@@ -3292,6 +3330,7 @@ static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
}
static const struct export_operations f2fs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = f2fs_fh_to_dentry,
.fh_to_parent = f2fs_fh_to_parent,
.get_parent = f2fs_get_parent,
@@ -3479,7 +3518,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
- /* Currently, support 512/1024/2048/4096 bytes sector size */
+ /* Currently, support 512/1024/2048/4096/16K bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
le32_to_cpu(raw_super->log_sectorsize) <
@@ -4926,7 +4965,7 @@ static int __init init_f2fs_fs(void)
int err;
if (PAGE_SIZE != F2FS_BLKSIZE) {
- printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
+ printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
PAGE_SIZE, F2FS_BLKSIZE);
return -EINVAL;
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 4314456854f6..47e88b4d4e7d 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
*xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
if (!*xe) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
- err = -EFSCORRUPTED;
+ err = -ENODATA;
f2fs_handle_error(F2FS_I_SB(inode),
ERROR_CORRUPTED_XATTR);
goto out;
@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
- error = -EFSCORRUPTED;
f2fs_handle_error(F2FS_I_SB(inode),
ERROR_CORRUPTED_XATTR);
- goto cleanup;
+ break;
}
if (!prefix)
@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
-
+retry:
error = read_all_xattrs(inode, ipage, &base_addr);
if (error)
return error;
@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
/* find entry with wanted name. */
here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
if (!here) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ if (!F2FS_I(inode)->i_xattr_nid) {
+ f2fs_notice(F2FS_I_SB(inode),
+ "recover xattr in inode (%lu)", inode->i_ino);
+ f2fs_recover_xattr_data(inode, NULL);
+ kfree(base_addr);
+ goto retry;
+ }
+ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
error = -EFSCORRUPTED;
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index 3626eb585a98..c52e63e10d35 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -279,6 +279,7 @@ static struct dentry *fat_get_parent(struct dentry *child_dir)
}
const struct export_operations fat_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = fat_fh_to_dentry,
.fh_to_parent = fat_fh_to_parent,
.get_parent = fat_get_parent,
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 6ea8d35a9382..18b3ba8dc8ea 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -26,12 +26,8 @@ static long do_sys_name_to_handle(const struct path *path,
/*
* We need to make sure whether the file system support decoding of
* the file handle if decodeable file handle was requested.
- * Otherwise, even empty export_operations are sufficient to opt-in
- * to encoding FIDs.
*/
- if (!path->dentry->d_sb->s_export_op ||
- (!(fh_flags & EXPORT_FH_FID) &&
- !path->dentry->d_sb->s_export_op->fh_to_dentry))
+ if (!exportfs_can_encode_fh(path->dentry->d_sb->s_export_op, fh_flags))
return -EOPNOTSUPP;
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 310d73e254df..e6e2a2185e7c 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -76,6 +76,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
{
struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb);
struct vxfs_sb *raw_sb = infp->vsi_raw;
+ u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev);
bufp->f_type = VXFS_SUPER_MAGIC;
bufp->f_bsize = dentry->d_sb->s_blocksize;
@@ -84,6 +85,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
bufp->f_bavail = 0;
bufp->f_files = 0;
bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree);
+ bufp->f_fsid = u64_to_fsid(id);
bufp->f_namelen = VXFS_NAMELEN;
return 0;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index caa8121ad99c..74d4f09d5827 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -999,7 +999,7 @@ static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
}
*max_len = len;
- return parent ? 0x82 : 0x81;
+ return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
}
static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
@@ -1007,7 +1007,8 @@ static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
{
struct fuse_inode_handle handle;
- if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
+ if ((fh_type != FILEID_INO64_GEN &&
+ fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
return NULL;
handle.nodeid = (u64) fid->raw[0] << 32;
@@ -1021,7 +1022,7 @@ static struct dentry *fuse_fh_to_parent(struct super_block *sb,
{
struct fuse_inode_handle parent;
- if (fh_type != 0x82 || fh_len < 6)
+ if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
return NULL;
parent.nodeid = (u64) fid->raw[3] << 32;
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index d4deb2b19959..82f5b09c04e6 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -11,9 +11,9 @@
#define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
-extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
-extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
- struct posix_acl *acl, int type);
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
+int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct posix_acl *acl, int type);
#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 6b060fc9e260..9611bfceda4b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -155,7 +155,7 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
+ if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
goto out;
if (folio_test_checked(folio) || current->journal_info)
goto out_ignore;
@@ -214,12 +214,12 @@ static int gfs2_write_jdata_batch(struct address_space *mapping,
unsigned nrblocks;
int i;
int ret;
- int nr_pages = 0;
+ size_t size = 0;
int nr_folios = folio_batch_count(fbatch);
for (i = 0; i < nr_folios; i++)
- nr_pages += folio_nr_pages(fbatch->folios[i]);
- nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+ size += folio_size(fbatch->folios[i]);
+ nrblocks = size >> inode->i_blkbits;
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
if (ret < 0)
@@ -403,27 +403,27 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
}
/**
- * stuffed_readpage - Fill in a Linux page with stuffed file data
+ * stuffed_readpage - Fill in a Linux folio with stuffed file data
* @ip: the inode
- * @page: the page
+ * @folio: the folio
*
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
{
struct buffer_head *dibh;
- u64 dsize = i_size_read(&ip->i_inode);
- void *kaddr;
+ size_t i_size = i_size_read(&ip->i_inode);
+ void *data;
int error;
/*
* Due to the order of unstuffing files and ->fault(), we can be
- * asked for a zero page in the case of a stuffed file being extended,
+ * asked for a zero folio in the case of a stuffed file being extended,
* so we need to supply one here. It doesn't happen often.
*/
- if (unlikely(page->index)) {
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (unlikely(folio->index)) {
+ folio_zero_range(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
return 0;
}
@@ -431,13 +431,11 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
if (error)
return error;
- kaddr = kmap_local_page(page);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
- kunmap_local(kaddr);
- flush_dcache_page(page);
+ data = dibh->b_data + sizeof(struct gfs2_dinode);
+ memcpy_to_folio(folio, 0, data, i_size);
+ folio_zero_range(folio, i_size, folio_size(folio) - i_size);
brelse(dibh);
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return 0;
}
@@ -458,7 +456,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
- error = stuffed_readpage(ip, &folio->page);
+ error = stuffed_readpage(ip, folio);
folio_unlock(folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
@@ -479,31 +477,29 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
*
*/
-int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
- unsigned size)
+ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+ size_t size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
unsigned long index = *pos >> PAGE_SHIFT;
- unsigned offset = *pos & (PAGE_SIZE - 1);
- unsigned copied = 0;
- unsigned amt;
- struct page *page;
+ size_t copied = 0;
do {
- page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -EINTR)
+ size_t offset, chunk;
+ struct folio *folio;
+
+ folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EINTR)
continue;
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
- amt = size - copied;
- if (offset + size > PAGE_SIZE)
- amt = PAGE_SIZE - offset;
- memcpy_from_page(buf + copied, page, offset, amt);
- put_page(page);
- copied += amt;
- index++;
- offset = 0;
+ offset = *pos + copied - folio_pos(folio);
+ chunk = min(size - copied, folio_size(folio) - offset);
+ memcpy_from_folio(buf + copied, folio, offset, chunk);
+ index = folio_next_index(folio);
+ folio_put(folio);
+ copied += chunk;
} while(copied < size);
(*pos) += size;
return size;
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
index f08322ef41cf..a10c4334d248 100644
--- a/fs/gfs2/aops.h
+++ b/fs/gfs2/aops.h
@@ -8,8 +8,8 @@
#include "incore.h"
-extern void adjust_fs_space(struct inode *inode);
-extern void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
- size_t from, size_t len);
+void adjust_fs_space(struct inode *inode);
+void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
+ size_t from, size_t len);
#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6eb6f1bd9e34..d9ccfd27e4f1 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -104,7 +104,7 @@ static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct folio *folio)
and write it out to disk */
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
goto out_brelse;
if (isdir) {
@@ -315,6 +315,12 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
}
}
+static inline struct buffer_head *
+metapath_dibh(struct metapath *mp)
+{
+ return mp->mp_bh[0];
+}
+
static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
unsigned int x, unsigned int h)
{
@@ -413,13 +419,12 @@ static void release_metapath(struct metapath *mp)
* gfs2_extent_length - Returns length of an extent of blocks
* @bh: The metadata block
* @ptr: Current position in @bh
- * @limit: Max extent length to return
* @eob: Set to 1 if we hit "end of block"
*
* Returns: The length of the extent (minimum of one block)
*/
-static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
+static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, int *eob)
{
const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
const __be64 *first = ptr;
@@ -658,7 +663,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct buffer_head *dibh = mp->mp_bh[0];
+ struct buffer_head *dibh = metapath_dibh(mp);
u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
size_t dblks = iomap->length >> inode->i_blkbits;
@@ -700,7 +705,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
i = mp->mp_aheight;
do {
n = blks - alloced;
- ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+ ret = gfs2_alloc_blocks(ip, &bn, &n, 0);
if (ret)
goto out;
alloced += n;
@@ -911,7 +916,7 @@ unstuff:
goto do_alloc;
bh = mp->mp_bh[ip->i_height - 1];
- len = gfs2_extent_length(bh, ptr, len, &eob);
+ len = gfs2_extent_length(bh, ptr, &eob);
iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
iomap->length = len << inode->i_blkbits;
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index e5b7d17131ed..4e8b1e8ebdf3 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -46,24 +46,24 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
extern const struct iomap_ops gfs2_iomap_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
-extern int gfs2_unstuff_dinode(struct gfs2_inode *ip);
-extern int gfs2_block_map(struct inode *inode, sector_t lblock,
- struct buffer_head *bh, int create);
-extern int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
- struct iomap *iomap);
-extern int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
- struct iomap *iomap);
-extern int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
- unsigned int *extlen);
-extern int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
- unsigned *extlen, bool *new);
-extern int gfs2_setattr_size(struct inode *inode, u64 size);
-extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
-extern int gfs2_file_dealloc(struct gfs2_inode *ip);
-extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
- unsigned int len);
-extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
-extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
-extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
+int gfs2_unstuff_dinode(struct gfs2_inode *ip);
+int gfs2_block_map(struct inode *inode, sector_t lblock,
+ struct buffer_head *bh, int create);
+int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap *iomap);
+int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap *iomap);
+int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
+ unsigned int *extlen);
+int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
+ unsigned *extlen, bool *new);
+int gfs2_setattr_size(struct inode *inode, u64 size);
+int gfs2_truncatei_resume(struct gfs2_inode *ip);
+int gfs2_file_dealloc(struct gfs2_inode *ip);
+int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+ unsigned int len);
+int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
+void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 61ddd03ea111..560e4624c09f 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -868,7 +868,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_dirent *dent;
struct timespec64 tv = current_time(inode);
- error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &bn, &n, 0);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 5b76480c17c9..25a857c78b53 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -23,32 +23,32 @@ struct gfs2_diradd {
int save_loc;
};
-extern struct inode *gfs2_dir_search(struct inode *dir,
- const struct qstr *filename,
- bool fail_on_exist);
-extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
- const struct gfs2_inode *ip);
-extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
- const struct gfs2_inode *ip, struct gfs2_diradd *da);
+struct inode *gfs2_dir_search(struct inode *dir,
+ const struct qstr *filename,
+ bool fail_on_exist);
+int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+ const struct gfs2_inode *ip);
+int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+ const struct gfs2_inode *ip, struct gfs2_diradd *da);
static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
{
brelse(da->bh);
da->bh = NULL;
}
-extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
-extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
- struct file_ra_state *f_ra);
-extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
- const struct gfs2_inode *nip, unsigned int new_type);
+int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
+int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+ struct file_ra_state *f_ra);
+int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+ const struct gfs2_inode *nip, unsigned int new_type);
-extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
-extern int gfs2_diradd_alloc_required(struct inode *dir,
- const struct qstr *filename,
- struct gfs2_diradd *da);
-extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
- struct buffer_head **bhp);
-extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
+int gfs2_diradd_alloc_required(struct inode *dir,
+ const struct qstr *filename,
+ struct gfs2_diradd *da);
+int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+ struct buffer_head **bhp);
+void gfs2_dir_hash_inval(struct gfs2_inode *ip);
static inline u32 gfs2_disk_hash(const char *data, int len)
{
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index f2700477a300..4b66efc1a82a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -418,7 +418,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
u64 offset = page_offset(page);
unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED;
@@ -1120,14 +1120,16 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret)
goto out_unlock;
- ret = file_update_time(file);
- if (ret)
- goto out_unlock;
-
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
ssize_t buffered, ret2;
+ /*
+ * Note that under direct I/O, we don't allow and inode
+ * timestamp updates, so we're not calling file_update_time()
+ * here.
+ */
+
ret = gfs2_file_direct_write(iocb, from, &gh);
if (ret < 0 || !iov_iter_count(from))
goto out_unlock;
@@ -1154,6 +1156,10 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!ret || ret2 > 0)
ret += ret2;
} else {
+ ret = file_update_time(file);
+ if (ret)
+ goto out_unlock;
+
ret = gfs2_file_buffered_write(iocb, from, &gh);
if (likely(ret > 0))
ret = generic_write_sync(iocb, ret);
@@ -1245,7 +1251,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes, max_blks;
int error;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d5fa75eac0bf..d6bf1f8c25dc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1524,7 +1524,6 @@ fail:
return;
}
list_add_tail(&gh->gh_list, insert_pt);
- gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
spin_unlock(&gl->gl_lockref.lock);
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c8685ca7d2a2..61197598abfd 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -156,21 +156,6 @@ out:
return gh;
}
-static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_EXCLUSIVE;
-}
-
-static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_DEFERRED;
-}
-
-static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_SHARED;
-}
-
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
{
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
@@ -181,40 +166,40 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
return NULL;
}
-extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
- const struct gfs2_glock_operations *glops,
- int create, struct gfs2_glock **glp);
-extern struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put(struct gfs2_glock *gl);
-extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
+int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ int create, struct gfs2_glock **glp);
+struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
+void gfs2_glock_put(struct gfs2_glock *gl);
+void gfs2_glock_queue_put(struct gfs2_glock *gl);
-extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
- u16 flags, struct gfs2_holder *gh,
- unsigned long ip);
+void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+ u16 flags, struct gfs2_holder *gh,
+ unsigned long ip);
static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh) {
__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
}
-extern void gfs2_holder_reinit(unsigned int state, u16 flags,
- struct gfs2_holder *gh);
-extern void gfs2_holder_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq(struct gfs2_holder *gh);
-extern int gfs2_glock_poll(struct gfs2_holder *gh);
-extern int gfs2_instantiate(struct gfs2_holder *gh);
-extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
-extern int gfs2_glock_wait(struct gfs2_holder *gh);
-extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
- const struct gfs2_glock_operations *glops,
- unsigned int state, u16 flags,
- struct gfs2_holder *gh);
-extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
+void gfs2_holder_reinit(unsigned int state, u16 flags,
+ struct gfs2_holder *gh);
+void gfs2_holder_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq(struct gfs2_holder *gh);
+int gfs2_glock_poll(struct gfs2_holder *gh);
+int gfs2_instantiate(struct gfs2_holder *gh);
+int gfs2_glock_holder_ready(struct gfs2_holder *gh);
+int gfs2_glock_wait(struct gfs2_holder *gh);
+int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq(struct gfs2_holder *gh);
+void gfs2_glock_dq_wait(struct gfs2_holder *gh);
+void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ unsigned int state, u16 flags,
+ struct gfs2_holder *gh);
+int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
bool fsid);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
gfs2_dump_glock(NULL, gl, true); \
@@ -228,7 +213,7 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
while (0)
-extern __printf(2, 3)
+__printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
@@ -256,27 +241,27 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
return error;
}
-extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
-extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
-extern bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
-extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
-extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
-extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
-extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
-extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
-extern void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
+void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
+void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
+void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
+void gfs2_glock_free(struct gfs2_glock *gl);
-extern int __init gfs2_glock_init(void);
-extern void gfs2_glock_exit(void);
+int __init gfs2_glock_init(void);
+void gfs2_glock_exit(void);
-extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_register_debugfs(void);
-extern void gfs2_unregister_debugfs(void);
+void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_register_debugfs(void);
+void gfs2_unregister_debugfs(void);
-extern void glock_set_object(struct gfs2_glock *gl, void *object);
-extern void glock_clear_object(struct gfs2_glock *gl, void *object);
+void glock_set_object(struct gfs2_glock *gl, void *object);
+void glock_clear_object(struct gfs2_glock *gl, void *object);
extern const struct lm_lockops gfs2_dlm_ops;
@@ -295,7 +280,7 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
return !list_empty(&gh->gh_list);
}
-extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
-extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index e7d334c277a1..b41c78bd2cc0 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -615,18 +615,6 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
}
/**
- * freeze_go_demote_ok
- * @gl: the glock
- *
- * Always returns 0
- */
-
-static int freeze_go_demote_ok(const struct gfs2_glock *gl)
-{
- return 0;
-}
-
-/**
* iopen_go_callback - schedule the dcache entry for the inode to be deleted
* @gl: the glock
* @remote: true if this came from a different cluster node
@@ -745,7 +733,6 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
const struct gfs2_glock_operations gfs2_freeze_glops = {
.go_xmote_bh = freeze_go_xmote_bh,
- .go_demote_ok = freeze_go_demote_ok,
.go_callback = freeze_go_callback,
.go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h
index 695898afcaf1..9341423798df 100644
--- a/fs/gfs2/glops.h
+++ b/fs/gfs2/glops.h
@@ -22,7 +22,7 @@ extern const struct gfs2_glock_operations gfs2_quota_glops;
extern const struct gfs2_glock_operations gfs2_journal_glops;
extern const struct gfs2_glock_operations *gfs2_glops_list[];
-extern int gfs2_inode_metasync(struct gfs2_glock *gl);
-extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
+int gfs2_inode_metasync(struct gfs2_glock *gl);
+void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
#endif /* __GLOPS_DOT_H__ */
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a8c95c5293c6..95a334d64da2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -863,7 +863,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
preempt_enable();
}
-extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
{
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7fe77bc771e5..1b95db2c3aac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -266,17 +266,18 @@ fail_iput:
}
-struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
+/**
+ * gfs2_lookup_meta - Look up an inode in a metadata directory
+ * @dip: The directory
+ * @name: The name of the inode
+ */
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name)
{
struct qstr qstr;
struct inode *inode;
+
gfs2_str2qstr(&qstr, name);
inode = gfs2_lookupi(dip, &qstr, 1);
- /* gfs2_lookupi has inconsistent callers: vfs
- * related routines expect NULL for no entry found,
- * gfs2_lookup_simple callers expect ENOENT
- * and do not check for NULL.
- */
if (IS_ERR_OR_NULL(inode))
return inode ? inode : ERR_PTR(-ENOENT);
@@ -418,7 +419,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
if (error)
goto out_ipreserv;
- error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
+ error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1);
if (error)
goto out_trans_end;
@@ -1867,16 +1868,24 @@ out:
int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
+ int may_not_block = mask & MAY_NOT_BLOCK;
struct gfs2_inode *ip;
struct gfs2_holder i_gh;
+ struct gfs2_glock *gl;
int error;
gfs2_holder_mark_uninitialized(&i_gh);
ip = GFS2_I(inode);
- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
- if (mask & MAY_NOT_BLOCK)
+ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
+ if (unlikely(!gl)) {
+ /* inode is getting torn down, must be RCU mode */
+ WARN_ON_ONCE(!may_not_block);
+ return -ECHILD;
+ }
+ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
+ if (may_not_block)
return -ECHILD;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
}
@@ -1921,7 +1930,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
kuid_t ouid, nuid;
kgid_t ogid, ngid;
int error;
- struct gfs2_alloc_parms ap;
+ struct gfs2_alloc_parms ap = {};
ouid = inode->i_uid;
ogid = inode->i_gid;
@@ -2154,7 +2163,7 @@ static int gfs2_update_time(struct inode *inode, int flags)
int error;
gh = gfs2_glock_is_locked_by_me(gl);
- if (gh && !gfs2_glock_is_held_excl(gl)) {
+ if (gh && gl->gl_state != LM_ST_EXCLUSIVE) {
gfs2_glock_dq(gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
error = gfs2_glock_nq(gh);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index c8c5814e7295..fd15d1c6b6fb 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -13,9 +13,9 @@
#include "util.h"
bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
-extern int gfs2_internal_read(struct gfs2_inode *ip,
- char *buf, loff_t *pos, unsigned size);
-extern void gfs2_set_aops(struct inode *inode);
+ssize_t gfs2_internal_read(struct gfs2_inode *ip,
+ char *buf, loff_t *pos, size_t size);
+void gfs2_set_aops(struct inode *inode);
static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
{
@@ -44,19 +44,17 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip)
static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
{
- inode->i_blocks = blocks <<
- (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+ inode->i_blocks = blocks << (inode->i_blkbits - 9);
}
static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
{
- return inode->i_blocks >>
- (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+ return inode->i_blocks >> (inode->i_blkbits - 9);
}
static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
{
- change <<= inode->i_blkbits - GFS2_BASIC_BLOCK_SHIFT;
+ change <<= inode->i_blkbits - 9;
gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change));
inode->i_blocks += change;
}
@@ -88,33 +86,33 @@ err:
return -EIO;
}
-extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino,
- unsigned int blktype);
-extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 no_formal_ino,
- unsigned int blktype);
-
-extern int gfs2_inode_refresh(struct gfs2_inode *ip);
-
-extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
- int is_root);
-extern int gfs2_permission(struct mnt_idmap *idmap,
- struct inode *inode, int mask);
-extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
-extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
-extern int gfs2_open_common(struct inode *inode, struct file *file);
-extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
-extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
+ u64 no_addr, u64 no_formal_ino,
+ unsigned int blktype);
+struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+ u64 no_formal_ino,
+ unsigned int blktype);
+
+int gfs2_inode_refresh(struct gfs2_inode *ip);
+
+struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
+ int is_root);
+int gfs2_permission(struct mnt_idmap *idmap,
+ struct inode *inode, int mask);
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name);
+void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+int gfs2_open_common(struct inode *inode, struct file *file);
+loff_t gfs2_seek_data(struct file *file, loff_t offset);
+loff_t gfs2_seek_hole(struct file *file, loff_t offset);
extern const struct file_operations gfs2_file_fops_nolock;
extern const struct file_operations gfs2_dir_fops_nolock;
-extern int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-extern int gfs2_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
-extern void gfs2_set_inode_flags(struct inode *inode);
-
+int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int gfs2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct fileattr *fa);
+void gfs2_set_inode_flags(struct inode *inode);
+
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
extern const struct file_operations gfs2_file_fops;
extern const struct file_operations gfs2_dir_fops;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 653cffcbf869..c27b05099c1e 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -70,29 +70,29 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
}
}
-extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
-extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
-extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
-extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
-extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
-extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
- unsigned int *extra_revokes);
-extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
- unsigned int *extra_revokes);
-extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- u64 seq, u32 tail, u32 lblock, u32 flags,
- blk_opf_t op_flags);
-extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- u32 type);
-extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
-extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
-extern void log_flush_wait(struct gfs2_sbd *sdp);
+void gfs2_ordered_del_inode(struct gfs2_inode *ip);
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
+void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
+bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
+void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
+void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
+void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
+void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 lblock, u32 flags,
+ blk_opf_t op_flags);
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
+ u32 type);
+void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
+void log_flush_wait(struct gfs2_sbd *sdp);
-extern int gfs2_logd(void *data);
-extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
-extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
-extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
+int gfs2_logd(void *data);
+void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+void gfs2_ail_drain(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 1412ffba1d44..07890c7b145d 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -11,16 +11,18 @@
#include "incore.h"
extern const struct gfs2_log_operations *gfs2_log_ops[];
-extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
-extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
-extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- struct page *page, unsigned size, unsigned offset,
- u64 blkno);
-extern void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
-extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head, bool keep_cache);
-extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
+void gfs2_log_incr_head(struct gfs2_sbd *sdp);
+u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
+void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ struct page *page, unsigned size, unsigned offset,
+ u64 blkno);
+void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
+void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+int gfs2_find_jhead(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head, bool keep_cache);
+void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
return sdp->sd_ldptrs;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index d0a58cdd433a..831d988c2ceb 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -50,21 +50,21 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
return inode->i_sb->s_fs_info;
}
-extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
-extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
- int rahead, struct buffer_head **bhp);
-extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
- int create);
+struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
+int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+ int rahead, struct buffer_head **bhp);
+int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
+ int create);
enum {
REMOVE_JDATA = 0,
REMOVE_META = 1,
};
-extern void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
-extern void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
- struct buffer_head **bhp);
+void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
+void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
+int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
+ struct buffer_head **bhp);
static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
struct buffer_head **bhp)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ecf789b7168c..b108c5d26839 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -292,8 +292,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
return error;
}
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
- GFS2_BASIC_BLOCK_SHIFT;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64);
@@ -648,7 +647,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
struct gfs2_jdesc *jd;
struct gfs2_inode *ip;
- sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
+ sdp->sd_statfs_inode = gfs2_lookup_meta(master, "statfs");
if (IS_ERR(sdp->sd_statfs_inode)) {
error = PTR_ERR(sdp->sd_statfs_inode);
fs_err(sdp, "can't read in statfs inode: %d\n", error);
@@ -657,7 +656,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
if (sdp->sd_args.ar_spectator)
goto out;
- pn = gfs2_lookup_simple(master, "per_node");
+ pn = gfs2_lookup_meta(master, "per_node");
if (IS_ERR(pn)) {
error = PTR_ERR(pn);
fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -674,7 +673,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
goto free_local;
}
sprintf(buf, "statfs_change%u", jd->jd_jid);
- lsi->si_sc_inode = gfs2_lookup_simple(pn, buf);
+ lsi->si_sc_inode = gfs2_lookup_meta(pn, buf);
if (IS_ERR(lsi->si_sc_inode)) {
error = PTR_ERR(lsi->si_sc_inode);
fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
@@ -739,7 +738,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
if (undo)
goto fail_statfs;
- sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
+ sdp->sd_jindex = gfs2_lookup_meta(master, "jindex");
if (IS_ERR(sdp->sd_jindex)) {
fs_err(sdp, "can't lookup journal index: %d\n", error);
return PTR_ERR(sdp->sd_jindex);
@@ -888,7 +887,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
goto fail;
/* Read in the resource index inode */
- sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
+ sdp->sd_rindex = gfs2_lookup_meta(master, "rindex");
if (IS_ERR(sdp->sd_rindex)) {
error = PTR_ERR(sdp->sd_rindex);
fs_err(sdp, "can't get resource index inode: %d\n", error);
@@ -897,7 +896,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
sdp->sd_rindex_uptodate = 0;
/* Read in the quota inode */
- sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
+ sdp->sd_quota_inode = gfs2_lookup_meta(master, "quota");
if (IS_ERR(sdp->sd_quota_inode)) {
error = PTR_ERR(sdp->sd_quota_inode);
fs_err(sdp, "can't get quota file inode: %d\n", error);
@@ -941,7 +940,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
if (undo)
goto fail_qc_gh;
- pn = gfs2_lookup_simple(master, "per_node");
+ pn = gfs2_lookup_meta(master, "per_node");
if (IS_ERR(pn)) {
error = PTR_ERR(pn);
fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -949,7 +948,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
}
sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
- sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
+ sdp->sd_qc_inode = gfs2_lookup_meta(pn, buf);
if (IS_ERR(sdp->sd_qc_inode)) {
error = PTR_ERR(sdp->sd_qc_inode);
fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
@@ -1187,10 +1186,9 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
/* Set up the buffer cache and fill in some fake block size values
to allow us to read-in the on-disk superblock. */
- sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
+ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512);
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
- GFS2_BASIC_BLOCK_SHIFT;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
@@ -1278,10 +1276,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (!sb_rdonly(sb)) {
error = init_threads(sdp);
- if (error) {
- gfs2_withdraw_delayed(sdp);
+ if (error)
goto fail_per_node;
- }
}
error = gfs2_freeze_lock_shared(sdp);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 5cbbc1a46a92..95dae7838b4e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -470,6 +470,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
(sync_gen && (qd->qd_sync_gen >= *sync_gen)))
return 0;
+ /*
+ * If qd_change is 0 it means a pending quota change was negated.
+ * We should not sync it, but we still have a qd reference and slot
+ * reference taken by gfs2_quota_change -> do_qc that need to be put.
+ */
+ if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
+ slot_put(qd);
+ qd_put(qd);
+ return 0;
+ }
+
if (!lockref_get_not_dead(&qd->qd_lockref))
return 0;
@@ -912,7 +923,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
struct gfs2_sbd *sdp = (*qda)->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
unsigned int data_blocks, ind_blocks;
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
@@ -1086,8 +1097,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
u32 x;
int error;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
- sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
error = gfs2_quota_hold(ip, uid, gid);
@@ -1194,17 +1204,16 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
#define MAX_LINE 256
-static int print_message(struct gfs2_quota_data *qd, char *type)
+static void print_message(struct gfs2_quota_data *qd, char *type)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
fs_info(sdp, "quota %s for %s %u\n",
type,
(qd->qd_id.type == USRQUOTA) ? "user" : "group",
from_kqid(&init_user_ns, qd->qd_id));
-
- return 0;
+ }
}
/**
@@ -1274,7 +1283,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
* HZ)) {
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
- error = print_message(qd, "warning");
+ print_message(qd, "warning");
+ error = 0;
qd->qd_last_warn = jiffies;
}
}
@@ -1288,8 +1298,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 x;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
- sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
gfs2_assert_warn(sdp, change))
return;
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
@@ -1746,7 +1755,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 36f54b426b0c..f462d9cb3087 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -15,27 +15,27 @@ struct gfs2_sbd;
#define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID
-extern int gfs2_qa_get(struct gfs2_inode *ip);
-extern void gfs2_qa_put(struct gfs2_inode *ip);
-extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unhold(struct gfs2_inode *ip);
+int gfs2_qa_get(struct gfs2_inode *ip);
+void gfs2_qa_put(struct gfs2_inode *ip);
+int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unhold(struct gfs2_inode *ip);
-extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unlock(struct gfs2_inode *ip);
+int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unlock(struct gfs2_inode *ip);
-extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
- struct gfs2_alloc_parms *ap);
-extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
- kuid_t uid, kgid_t gid);
+int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+ struct gfs2_alloc_parms *ap);
+void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+ kuid_t uid, kgid_t gid);
-extern int gfs2_quota_sync(struct super_block *sb, int type);
-extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
+int gfs2_quota_sync(struct super_block *sb, int type);
+int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
-extern int gfs2_quota_init(struct gfs2_sbd *sdp);
-extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
-extern int gfs2_quotad(void *data);
+int gfs2_quota_init(struct gfs2_sbd *sdp);
+void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
+int gfs2_quotad(void *data);
-extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
+void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
struct gfs2_alloc_parms *ap)
@@ -50,8 +50,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
return ret;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
- sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
return 0;
ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap);
if (ret)
@@ -63,6 +62,7 @@ extern const struct quotactl_ops gfs2_quotactl_ops;
int __init gfs2_qd_shrinker_init(void);
void gfs2_qd_shrinker_exit(void);
extern struct list_lru gfs2_qd_lru;
-extern void __init gfs2_quota_hash_init(void);
+
+void __init gfs2_quota_hash_init(void);
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 7a0c9d0b7503..6a0fd42e1120 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -17,18 +17,18 @@ static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
*blk = 0;
}
-extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
+int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
struct buffer_head **bh);
-extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
+int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+void gfs2_revoke_clean(struct gfs2_jdesc *jd);
-extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
-extern void gfs2_recover_func(struct work_struct *work);
-extern int __get_log_header(struct gfs2_sbd *sdp,
- const struct gfs2_log_header *lh, unsigned int blkno,
- struct gfs2_log_header_host *head);
+int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
+void gfs2_recover_func(struct work_struct *work);
+int __get_log_header(struct gfs2_sbd *sdp,
+ const struct gfs2_log_header *lh, unsigned int blkno,
+ struct gfs2_log_header_host *head);
#endif /* __RECOVERY_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 9308190895c8..c2060203b98a 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -2411,13 +2411,12 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
* @bn: Used to return the starting block number
* @nblocks: requested number of blocks/extent length (value/result)
* @dinode: 1 if we're allocating a dinode block, else 0
- * @generation: the generation number of the inode
*
* Returns: 0 or error
*/
int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
- bool dinode, u64 *generation)
+ bool dinode)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
@@ -2477,10 +2476,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
rbm.rgd->rd_free -= *nblocks;
spin_unlock(&rbm.rgd->rd_rsspin);
if (dinode) {
+ u64 generation;
+
rbm.rgd->rd_dinodes++;
- *generation = rbm.rgd->rd_igeneration++;
- if (*generation == 0)
- *generation = rbm.rgd->rd_igeneration++;
+ generation = rbm.rgd->rd_igeneration++;
+ if (generation == 0)
+ generation = rbm.rgd->rd_igeneration++;
+ ip->i_generation = generation;
}
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 00b30cf893af..8d20e99385db 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -22,38 +22,38 @@ struct gfs2_rgrpd;
struct gfs2_sbd;
struct gfs2_holder;
-extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
+void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
-extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
+struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
+struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
+struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
-extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
-extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
-extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
-extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
-extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
+void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
+int gfs2_rindex_update(struct gfs2_sbd *sdp);
+void gfs2_free_clones(struct gfs2_rgrpd *rgd);
+int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
+void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
+struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
- struct gfs2_alloc_parms *ap);
-extern void gfs2_inplace_release(struct gfs2_inode *ip);
-
-extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
- bool dinode, u64 *generation);
-
-extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
-extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
- u64 bstart, u32 blen, int meta);
-extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
- u64 bstart, u32 blen);
-extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
-extern void gfs2_unlink_di(struct inode *inode);
-extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
- unsigned int type);
+int gfs2_inplace_reserve(struct gfs2_inode *ip,
+ struct gfs2_alloc_parms *ap);
+void gfs2_inplace_release(struct gfs2_inode *ip);
+
+int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+ bool dinode);
+
+void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
+void gfs2_rs_delete(struct gfs2_inode *ip);
+void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta);
+void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen);
+void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
+void gfs2_unlink_di(struct inode *inode);
+int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
+ unsigned int type);
struct gfs2_rgrp_list {
unsigned int rl_rgrps;
@@ -62,18 +62,19 @@ struct gfs2_rgrp_list {
struct gfs2_holder *rl_ghs;
};
-extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
- u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
- unsigned int state, u16 flags);
-extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
-extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
- const char *fs_id_buf);
-extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
- struct buffer_head *bh,
- const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
-extern int gfs2_fitrim(struct file *filp, void __user *argp);
+void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+ u64 block);
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+ unsigned int state, u16 flags);
+void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ const char *fs_id_buf);
+int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+ struct buffer_head *bh,
+ const struct gfs2_bitmap *bi, unsigned minlen,
+ u64 *ptrimmed);
+int gfs2_fitrim(struct file *filp, void __user *argp);
/* This is how to tell if a reservation is in the rgrp tree: */
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
@@ -88,9 +89,9 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
return first <= block && block < last;
}
-extern void check_and_update_goal(struct gfs2_inode *ip);
+void check_and_update_goal(struct gfs2_inode *ip);
-extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
-extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
+void rgrp_lock_local(struct gfs2_rgrpd *rgd);
+void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 52a878fa7139..d21c04a22d73 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -602,13 +602,15 @@ restart:
}
spin_unlock(&sdp->sd_jindex_spin);
- if (!sb_rdonly(sb)) {
+ if (!sb_rdonly(sb))
gfs2_make_fs_ro(sdp);
- }
- if (gfs2_withdrawn(sdp)) {
- gfs2_destroy_threads(sdp);
+ else {
+ if (gfs2_withdrawn(sdp))
+ gfs2_destroy_threads(sdp);
+
gfs2_quota_cleanup(sdp);
}
+
WARN_ON(gfs2_withdrawing(sdp));
/* At this point, we're through modifying the disk */
@@ -1006,6 +1008,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = sc.sc_dinodes + sc.sc_free;
buf->f_ffree = sc.sc_free;
buf->f_namelen = GFS2_FNAMESIZE;
+ buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
return 0;
}
@@ -1299,18 +1302,8 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
* As a last resort, if another node keeps holding the iopen glock
* without showing any activity on the inode glock, we will eventually
* time out and fail the iopen glock upgrade.
- *
- * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
- * locking request as an optimization to notify lock holders as soon as
- * possible. Without that flag, they'd be notified implicitly by the
- * second locking request.
*/
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
- error = gfs2_glock_nq(gh);
- if (error != GLR_TRYFAILED)
- return !error;
-
gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
error = gfs2_glock_nq(gh);
if (error)
@@ -1550,7 +1543,7 @@ out:
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put_eventually(ip->i_gl);
- ip->i_gl = NULL;
+ rcu_assign_pointer(ip->i_gl, NULL);
}
}
@@ -1576,7 +1569,7 @@ static void gfs2_free_inode(struct inode *inode)
kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
}
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
+void free_local_statfs_inodes(struct gfs2_sbd *sdp)
{
struct local_statfs_inode *lsi, *safe;
@@ -1591,8 +1584,8 @@ extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
}
}
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
- unsigned int index)
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+ unsigned int index)
{
struct local_statfs_inode *lsi;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b4ddf6244586..b27a774d9580 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -15,7 +15,7 @@
#define GFS2_FS_FORMAT_MIN (1801)
#define GFS2_FS_FORMAT_MAX (1802)
-extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
+void gfs2_lm_unmount(struct gfs2_sbd *sdp);
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
{
@@ -26,33 +26,33 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
return x;
}
-extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
+void gfs2_jindex_free(struct gfs2_sbd *sdp);
-extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
-extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
-extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
- struct gfs2_inode **ipp);
+struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
+int gfs2_jdesc_check(struct gfs2_jdesc *jd);
+int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
+ struct gfs2_inode **ipp);
-extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
-extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
-extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
-extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
-extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
-extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
- s64 dinodes);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
- const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
- void *buf);
-extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
-extern int gfs2_statfs_sync(struct super_block *sb, int type);
-extern void gfs2_freeze_func(struct work_struct *work);
-extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
+int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
+void gfs2_online_uevent(struct gfs2_sbd *sdp);
+void gfs2_destroy_threads(struct gfs2_sbd *sdp);
+int gfs2_statfs_init(struct gfs2_sbd *sdp);
+void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
+ s64 dinodes);
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+ const void *buf);
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
+ void *buf);
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
+int gfs2_statfs_sync(struct super_block *sb, int type);
+void gfs2_freeze_func(struct work_struct *work);
+void gfs2_thaw_freeze_initiator(struct super_block *sb);
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
- unsigned int index);
-extern void free_sbd(struct gfs2_sbd *sdp);
+void free_local_statfs_inodes(struct gfs2_sbd *sdp);
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+ unsigned int index);
+void free_sbd(struct gfs2_sbd *sdp);
extern struct file_system_type gfs2_fs_type;
extern struct file_system_type gfs2meta_fs_type;
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index c76ad9a4c75a..f8ce5302280d 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -34,17 +34,17 @@ static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned
return rgd->rd_length;
}
-extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
- unsigned int blocks, unsigned int revokes,
- unsigned long ip);
-extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
- unsigned int revokes);
-
-extern void gfs2_trans_end(struct gfs2_sbd *sdp);
-extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
-extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
+int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+ unsigned int blocks, unsigned int revokes,
+ unsigned long ip);
+int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+ unsigned int revokes);
+
+void gfs2_trans_end(struct gfs2_sbd *sdp);
+void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index cdb839529175..11c9d59b6889 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -147,10 +147,10 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
char *file, unsigned int line);
-extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- bool verbose);
-extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
-extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ bool verbose);
+int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
+void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
#define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 79d5c5559512..8c96ba6230d1 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -639,7 +639,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
u64 block;
int error;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, block, 1);
@@ -701,7 +701,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, block, 1);
@@ -1002,7 +1002,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
} else {
u64 blk;
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &blk, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, blk, 1);
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index 2aed9d7d483d..eb12eb7e37c1 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -50,14 +50,14 @@ struct gfs2_ea_location {
struct gfs2_ea_header *el_prev;
};
-extern int __gfs2_xattr_set(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags, int type);
-extern ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
+int __gfs2_xattr_set(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags, int type);
+ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
+int gfs2_ea_dealloc(struct gfs2_inode *ip);
/* Exported to acl.c */
-extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
+int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
#endif /* __EATTR_DOT_H__ */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 54b3d489b6a7..f757d4f7ad98 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1179,7 +1179,9 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
struct hstate *h = hstate_inode(d_inode(dentry));
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+ buf->f_fsid = u64_to_fsid(id);
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = huge_page_size(h);
if (sbinfo) {
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 7ea37f49f1e1..f99591a634b4 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -150,6 +150,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
}
static const struct export_operations jffs2_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.get_parent = jffs2_get_parent,
.fh_to_dentry = jffs2_fh_to_dentry,
.fh_to_parent = jffs2_fh_to_parent,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 966826c394ee..8d8e556bd610 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -896,6 +896,7 @@ static const struct super_operations jfs_super_operations = {
};
static const struct export_operations jfs_export_operations = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = jfs_fh_to_dentry,
.fh_to_parent = jfs_fh_to_parent,
.get_parent = jfs_get_parent,
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index aaa76410e550..f0cb729e9a97 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -854,6 +854,33 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
return ret;
}
+static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct kernfs_open_file *of = kernfs_of(file);
+ const struct kernfs_ops *ops;
+ loff_t ret;
+
+ /*
+ * @of->mutex nests outside active ref and is primarily to ensure that
+ * the ops aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!kernfs_get_active(of->kn)) {
+ mutex_unlock(&of->mutex);
+ return -ENODEV;
+ }
+
+ ops = kernfs_ops(of->kn);
+ if (ops->llseek)
+ ret = ops->llseek(of, offset, whence);
+ else
+ ret = generic_file_llseek(file, offset, whence);
+
+ kernfs_put_active(of->kn);
+ mutex_unlock(&of->mutex);
+ return ret;
+}
+
static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
@@ -956,7 +983,7 @@ EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
.read_iter = kernfs_fop_read_iter,
.write_iter = kernfs_fop_write_iter,
- .llseek = generic_file_llseek,
+ .llseek = kernfs_fop_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
diff --git a/fs/libfs.c b/fs/libfs.c
index abe2b5a40ba1..e9440d55073c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(simple_getattr);
int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+
+ buf->f_fsid = u64_to_fsid(id);
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = PAGE_SIZE;
buf->f_namelen = NAME_MAX;
@@ -1310,6 +1313,47 @@ ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
EXPORT_SYMBOL_GPL(simple_attr_write_signed);
/**
+ * generic_encode_ino32_fh - generic export_operations->encode_fh function
+ * @inode: the object to encode
+ * @fh: where to store the file handle fragment
+ * @max_len: maximum length to store there (in 4 byte units)
+ * @parent: parent directory inode, if wanted
+ *
+ * This generic encode_fh function assumes that the 32 inode number
+ * is suitable for locating an inode, and that the generation number
+ * can be used to check that it is still valid. It places them in the
+ * filehandle fragment where export_decode_fh expects to find them.
+ */
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent)
+{
+ struct fid *fid = (void *)fh;
+ int len = *max_len;
+ int type = FILEID_INO32_GEN;
+
+ if (parent && (len < 4)) {
+ *max_len = 4;
+ return FILEID_INVALID;
+ } else if (len < 2) {
+ *max_len = 2;
+ return FILEID_INVALID;
+ }
+
+ len = 2;
+ fid->i32.ino = inode->i_ino;
+ fid->i32.gen = inode->i_generation;
+ if (parent) {
+ fid->i32.parent_ino = parent->i_ino;
+ fid->i32.parent_gen = parent->i_generation;
+ len = 4;
+ type = FILEID_INO32_GEN_PARENT;
+ }
+ *max_len = len;
+ return type;
+}
+EXPORT_SYMBOL_GPL(generic_encode_ino32_fh);
+
+/**
* generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
* @sb: filesystem to do the file handle conversion on
* @fid: file handle to convert
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 4905665c47d0..57d1dedf3f8f 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -256,6 +256,7 @@ struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
return idmap;
}
+EXPORT_SYMBOL_GPL(mnt_idmap_get);
/**
* mnt_idmap_put - put a reference to an idmapping
@@ -271,3 +272,4 @@ void mnt_idmap_put(struct mnt_idmap *idmap)
kfree(idmap);
}
}
+EXPORT_SYMBOL_GPL(mnt_idmap_put);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 7df2503cef6c..01ac733a6320 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -125,7 +125,7 @@ config PNFS_BLOCK
config PNFS_FLEXFILE_LAYOUT
tristate
- depends on NFS_V4_1 && NFS_V3
+ depends on NFS_V4_1
default NFS_V4
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index cf7365581031..fa1a14def45c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -448,6 +448,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
delegation->cred = get_cred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
+ delegation->test_gen = 0;
spin_lock_init(&delegation->lock);
spin_lock(&clp->cl_lock);
@@ -1294,6 +1295,8 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
struct inode *inode;
const struct cred *cred;
nfs4_stateid stateid;
+ unsigned long gen = ++server->delegation_gen;
+
restart:
rcu_read_lock();
restart_locked:
@@ -1303,7 +1306,8 @@ restart_locked:
test_bit(NFS_DELEGATION_RETURNING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_TEST_EXPIRED,
- &delegation->flags) == 0)
+ &delegation->flags) == 0 ||
+ delegation->test_gen == gen)
continue;
inode = nfs_delegation_grab_inode(delegation);
if (inode == NULL)
@@ -1312,6 +1316,7 @@ restart_locked:
cred = get_cred_rcu(delegation->cred);
nfs4_stateid_copy(&stateid, &delegation->stateid);
spin_unlock(&delegation->lock);
+ delegation->test_gen = gen;
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
rcu_read_unlock();
nfs_delegation_test_free_expired(inode, &stateid, cred);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 1c378992b7c0..a6f495d012cf 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -21,6 +21,7 @@ struct nfs_delegation {
fmode_t type;
unsigned long pagemod_limit;
__u64 change_attr;
+ unsigned long test_gen;
unsigned long flags;
refcount_t refcount;
spinlock_t lock;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e6a51fd94fea..13dffe4201e6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2532,7 +2532,7 @@ EXPORT_SYMBOL_GPL(nfs_unlink);
int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
- struct page *page;
+ struct folio *folio;
char *kaddr;
struct iattr attr;
unsigned int pathlen = strlen(symname);
@@ -2547,24 +2547,24 @@ int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
attr.ia_mode = S_IFLNK | S_IRWXUGO;
attr.ia_valid = ATTR_MODE;
- page = alloc_page(GFP_USER);
- if (!page)
+ folio = folio_alloc(GFP_USER, 0);
+ if (!folio)
return -ENOMEM;
- kaddr = page_address(page);
+ kaddr = folio_address(folio);
memcpy(kaddr, symname, pathlen);
if (pathlen < PAGE_SIZE)
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
trace_nfs_symlink_enter(dir, dentry);
- error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
+ error = NFS_PROTO(dir)->symlink(dir, dentry, folio, pathlen, &attr);
trace_nfs_symlink_exit(dir, dentry, error);
if (error != 0) {
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
dir->i_sb->s_id, dir->i_ino,
dentry, symname, error);
d_drop(dentry);
- __free_page(page);
+ folio_put(folio);
return error;
}
@@ -2574,18 +2574,13 @@ int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
* No big deal if we can't add this page to the page cache here.
* READLINK will get the missing page from the server if needed.
*/
- if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
- GFP_KERNEL)) {
- SetPageUptodate(page);
- unlock_page(page);
- /*
- * add_to_page_cache_lru() grabs an extra page refcount.
- * Drop it here to avoid leaking this page later.
- */
- put_page(page);
- } else
- __free_page(page);
+ if (filemap_add_folio(d_inode(dentry)->i_mapping, folio, 0,
+ GFP_KERNEL) == 0) {
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+ folio_put(folio);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_symlink);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 4bf208a0a8e9..2de66e4e8280 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -543,9 +543,10 @@ out:
}
static int
-nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
unsigned int len, struct iattr *sattr)
{
+ struct page *page = &folio->page;
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 827d00e2f094..581698f1b7b2 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -209,6 +209,7 @@ struct nfs4_exception {
struct inode *inode;
nfs4_stateid *stateid;
long timeout;
+ unsigned short retrans;
unsigned char task_is_privileged : 1;
unsigned char delay : 1,
recovering : 1,
@@ -546,6 +547,7 @@ extern unsigned short max_session_slots;
extern unsigned short max_session_cb_slots;
extern unsigned short send_implementation_id;
extern bool recover_lost_locks;
+extern short nfs_delay_retrans;
#define NFS4_CLIENT_ID_UNIQ_LEN (64)
extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a654d7234f51..8a943fffaad5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -585,6 +585,21 @@ wait_on_recovery:
return 0;
}
+/*
+ * Track the number of NFS4ERR_DELAY related retransmissions and return
+ * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
+ * set by 'nfs_delay_retrans'.
+ */
+static int nfs4_exception_should_retrans(const struct nfs_server *server,
+ struct nfs4_exception *exception)
+{
+ if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
+ if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
+ return -EAGAIN;
+ }
+ return 0;
+}
+
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
@@ -595,6 +610,11 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
ret = nfs4_do_handle_exception(server, errorcode, exception);
if (exception->delay) {
+ int ret2 = nfs4_exception_should_retrans(server, exception);
+ if (ret2 < 0) {
+ exception->retry = 0;
+ return ret2;
+ }
ret = nfs4_delay(&exception->timeout,
exception->interruptible);
goto out_retry;
@@ -623,6 +643,11 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
ret = nfs4_do_handle_exception(server, errorcode, exception);
if (exception->delay) {
+ int ret2 = nfs4_exception_should_retrans(server, exception);
+ if (ret2 < 0) {
+ exception->retry = 0;
+ return ret2;
+ }
rpc_delay(task, nfs4_update_delay(&exception->timeout));
goto out_retry;
}
@@ -5011,9 +5036,10 @@ static void nfs4_free_createdata(struct nfs4_createdata *data)
}
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
- struct page *page, unsigned int len, struct iattr *sattr,
+ struct folio *folio, unsigned int len, struct iattr *sattr,
struct nfs4_label *label)
{
+ struct page *page = &folio->page;
struct nfs4_createdata *data;
int status = -ENAMETOOLONG;
@@ -5038,7 +5064,7 @@ out:
}
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
- struct page *page, unsigned int len, struct iattr *sattr)
+ struct folio *folio, unsigned int len, struct iattr *sattr)
{
struct nfs4_exception exception = {
.interruptible = true,
@@ -5049,7 +5075,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
label = nfs4_label_init_security(dir, dentry, sattr, &l);
do {
- err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
+ err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
trace_nfs4_symlink(dir, &dentry->d_name, err);
err = nfs4_handle_exception(NFS_SERVER(dir), err,
&exception);
@@ -5622,7 +5648,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
}
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
@@ -5663,7 +5689,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
+ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
}
static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
@@ -8934,6 +8961,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+try_again:
/* Test connection for session trunking. Async exchange_id call */
task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
if (IS_ERR(task))
@@ -8946,11 +8974,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
if (status == 0)
rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
- else if (rpc_clnt_xprt_switch_has_addr(clnt,
+ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
(struct sockaddr *)&xprt->addr))
rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
rpc_put_task(task);
+ if (status == -NFS4ERR_DELAY) {
+ ssleep(1);
+ goto try_again;
+ }
}
EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
@@ -9621,6 +9653,9 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
nfs4_sequence_free_slot(&lgp->res.seq_res);
+ exception->state = NULL;
+ exception->stateid = NULL;
+
switch (nfs4err) {
case 0:
goto out;
@@ -9716,7 +9751,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
};
struct pnfs_layout_segment *
-nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+ struct nfs4_exception *exception)
{
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
@@ -9736,13 +9772,10 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
RPC_TASK_MOVEABLE,
};
struct pnfs_layout_segment *lseg = NULL;
- struct nfs4_exception exception = {
- .inode = inode,
- .timeout = *timeout,
- };
int status = 0;
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
+ exception->retry = 0;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -9753,11 +9786,12 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
goto out;
if (task->tk_status < 0) {
- status = nfs4_layoutget_handle_exception(task, lgp, &exception);
- *timeout = exception.timeout;
+ exception->retry = 1;
+ status = nfs4_layoutget_handle_exception(task, lgp, exception);
} else if (lgp->res.layoutp->len == 0) {
+ exception->retry = 1;
status = -EAGAIN;
- *timeout = nfs4_update_delay(&exception.timeout);
+ nfs4_update_delay(&exception->timeout);
} else
lseg = pnfs_layout_process(lgp);
out:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 84343aefbbd6..21a365357629 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1980,7 +1980,9 @@ pnfs_update_layout(struct inode *ino,
struct pnfs_layout_segment *lseg = NULL;
struct nfs4_layoutget *lgp;
nfs4_stateid stateid;
- long timeout = 0;
+ struct nfs4_exception exception = {
+ .inode = ino,
+ };
unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
bool first;
@@ -2144,7 +2146,7 @@ lookup_again:
lgp->lo = lo;
pnfs_get_layout_hdr(lo);
- lseg = nfs4_proc_layoutget(lgp, &timeout);
+ lseg = nfs4_proc_layoutget(lgp, &exception);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
nfs_layoutget_end(lo);
@@ -2171,6 +2173,8 @@ lookup_again:
goto out_put_layout_hdr;
}
if (lseg) {
+ if (!exception.retry)
+ goto out_put_layout_hdr;
if (first)
pnfs_clear_first_layoutget(lo);
trace_pnfs_update_layout(ino, pos, count,
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index d886c8226d8f..db57a85500ee 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -35,6 +35,7 @@
#include <linux/nfs_page.h>
#include <linux/workqueue.h>
+struct nfs4_exception;
struct nfs4_opendata;
enum {
@@ -245,7 +246,9 @@ extern size_t max_response_pages(struct nfs_server *server);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
const struct cred *cred);
-extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout);
+extern struct pnfs_layout_segment *
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+ struct nfs4_exception *exception);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
/* pnfs.c */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index e3570c656b0f..ad3a321ae997 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -396,9 +396,10 @@ nfs_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
}
static int
-nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
unsigned int len, struct iattr *sattr)
{
+ struct page *page = &folio->page;
struct nfs_fh *fh;
struct nfs_fattr *fattr;
struct nfs_symlinkargs arg = {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2667ab753d42..075b31c93f87 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1371,6 +1371,7 @@ unsigned short max_session_cb_slots = NFS4_DEF_CB_SLOT_TABLE_SIZE;
unsigned short send_implementation_id = 1;
char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
bool recover_lost_locks = false;
+short nfs_delay_retrans = -1;
EXPORT_SYMBOL_GPL(nfs_callback_nr_threads);
EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
@@ -1381,6 +1382,7 @@ EXPORT_SYMBOL_GPL(max_session_cb_slots);
EXPORT_SYMBOL_GPL(send_implementation_id);
EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier);
EXPORT_SYMBOL_GPL(recover_lost_locks);
+EXPORT_SYMBOL_GPL(nfs_delay_retrans);
#define NFS_CALLBACK_MAXPORTNR (65535U)
@@ -1429,5 +1431,9 @@ MODULE_PARM_DESC(recover_lost_locks,
"If the server reports that a lock might be lost, "
"try to recover it risking data corruption.");
-
+module_param_named(delay_retrans, nfs_delay_retrans, short, 0644);
+MODULE_PARM_DESC(delay_retrans,
+ "Unless negative, specifies the number of times the NFSv4 "
+ "client retries a request before returning an EAGAIN error, "
+ "after a reply of NFS4ERR_DELAY from the server.");
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9d82d50ce0b1..b664caea8b4e 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -739,6 +739,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
&pgio);
pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
+ if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
+ break;
} while (err < 0 && !nfs_error_is_fatal(err));
nfs_io_completion_put(ioc);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b7da17e53007..7b641095a665 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -426,8 +426,7 @@ static int check_export(struct path *path, int *flags, unsigned char *uuid)
return -EINVAL;
}
- if (!inode->i_sb->s_export_op ||
- !inode->i_sb->s_export_op->fh_to_dentry) {
+ if (!exportfs_can_decode_fh(inode->i_sb->s_export_op)) {
dprintk("exp_export: export of invalid fs type.\n");
return -EINVAL;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 45aecdc302f4..4d765c72496f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1595,7 +1595,7 @@ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
* file handles so user can use name_to_handle_at() to compare fids
* reported with events to the file handle of watched objects.
*/
- if (!nop)
+ if (!exportfs_can_encode_fid(nop))
return -EOPNOTSUPP;
/*
@@ -1603,7 +1603,7 @@ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
* supports decoding file handles, so user has a way to map back the
* reported fids to filesystem objects.
*/
- if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
+ if (mark_type != FAN_MARK_INODE && !exportfs_can_decode_fh(nop))
return -EOPNOTSUPP;
return 0;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index ab44f2db533b..d7498ddc4a72 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -384,6 +384,7 @@ static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
* and due to using iget() whereas NTFS needs ntfs_iget().
*/
const struct export_operations ntfs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.get_parent = ntfs_get_parent, /* Find the parent of a given
directory. */
.fh_to_dentry = ntfs_fh_to_dentry,
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index f763e3256ccc..9153dffde950 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -811,6 +811,7 @@ static int ntfs_nfs_commit_metadata(struct inode *inode)
}
static const struct export_operations ntfs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ntfs_fh_to_dentry,
.fh_to_parent = ntfs_fh_to_parent,
.get_parent = ntfs3_get_parent,
diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile
index 4e173d56b11f..5648954f8588 100644
--- a/fs/overlayfs/Makefile
+++ b/fs/overlayfs/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_OVERLAY_FS) += overlay.o
overlay-objs := super.o namei.o util.o inode.o file.o dir.o readdir.o \
- copy_up.o export.o params.o
+ copy_up.o export.o params.o xattrs.o
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index ada3fcc9c6d5..4382881b0709 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -252,7 +252,9 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
return PTR_ERR(old_file);
/* Try to use clone_file_range to clone up within the same fs */
+ ovl_start_write(dentry);
cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
+ ovl_end_write(dentry);
if (cloned == len)
goto out_fput;
/* Couldn't clone, so now we try to copy the data */
@@ -287,8 +289,12 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
* it may not recognize all kind of holes and sometimes
* only skips partial of hole area. However, it will be
* enough for most of the use cases.
+ *
+ * We do not hold upper sb_writers throughout the loop to avert
+ * lockdep warning with llseek of lower file in nested overlay:
+ * - upper sb_writers
+ * -- lower ovl_inode_lock (ovl_llseek)
*/
-
if (skip_hole && data_pos < old_pos) {
data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
if (data_pos > old_pos) {
@@ -303,9 +309,11 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
}
}
+ ovl_start_write(dentry);
bytes = do_splice_direct(old_file, &old_pos,
new_file, &new_pos,
this_len, SPLICE_F_MOVE);
+ ovl_end_write(dentry);
if (bytes <= 0) {
error = bytes;
break;
@@ -426,29 +434,29 @@ out_err:
return ERR_PTR(err);
}
-int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
- struct dentry *upper)
+struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
{
- const struct ovl_fh *fh = NULL;
- int err;
-
/*
* When lower layer doesn't support export operations store a 'null' fh,
* so we can use the overlay.origin xattr to distignuish between a copy
* up and a pure upper inode.
*/
- if (ovl_can_decode_fh(lower->d_sb)) {
- fh = ovl_encode_real_fh(ofs, lower, false);
- if (IS_ERR(fh))
- return PTR_ERR(fh);
- }
+ if (!ovl_can_decode_fh(origin->d_sb))
+ return NULL;
+
+ return ovl_encode_real_fh(ofs, origin, false);
+}
+
+int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+ struct dentry *upper)
+{
+ int err;
/*
* Do not fail when upper doesn't support xattrs.
*/
err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf,
fh ? fh->fb.len : 0, 0);
- kfree(fh);
/* Ignore -EPERM from setting "user.*" on symlink/special */
return err == -EPERM ? 0 : err;
@@ -476,7 +484,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
*
* Caller must hold i_mutex on indexdir.
*/
-static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
+static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh,
struct dentry *upper)
{
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
@@ -502,7 +510,7 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry))))
return -EIO;
- err = ovl_get_index_name(ofs, origin, &name);
+ err = ovl_get_index_name_fh(fh, &name);
if (err)
return err;
@@ -541,6 +549,7 @@ struct ovl_copy_up_ctx {
struct dentry *destdir;
struct qstr destname;
struct dentry *workdir;
+ const struct ovl_fh *origin_fh;
bool origin;
bool indexed;
bool metacopy;
@@ -555,14 +564,16 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
struct inode *udir = d_inode(upperdir);
+ ovl_start_write(c->dentry);
+
/* Mark parent "impure" because it may now contain non-pure upper */
err = ovl_set_impure(c->parent, upperdir);
if (err)
- return err;
+ goto out;
err = ovl_set_nlink_lower(c->dentry);
if (err)
- return err;
+ goto out;
inode_lock_nested(udir, I_MUTEX_PARENT);
upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
@@ -581,10 +592,12 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
}
inode_unlock(udir);
if (err)
- return err;
+ goto out;
err = ovl_set_nlink_upper(c->dentry);
+out:
+ ovl_end_write(c->dentry);
return err;
}
@@ -637,7 +650,7 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
* hard link.
*/
if (c->origin) {
- err = ovl_set_origin(ofs, c->lowerpath.dentry, temp);
+ err = ovl_set_origin_fh(ofs, c->origin_fh, temp);
if (err)
return err;
}
@@ -719,21 +732,19 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
.link = c->link
};
- /* workdir and destdir could be the same when copying up to indexdir */
- err = -EIO;
- if (lock_rename(c->workdir, c->destdir) != NULL)
- goto unlock;
-
err = ovl_prep_cu_creds(c->dentry, &cc);
if (err)
- goto unlock;
+ return err;
+ ovl_start_write(c->dentry);
+ inode_lock(wdir);
temp = ovl_create_temp(ofs, c->workdir, &cattr);
+ inode_unlock(wdir);
+ ovl_end_write(c->dentry);
ovl_revert_cu_creds(&cc);
- err = PTR_ERR(temp);
if (IS_ERR(temp))
- goto unlock;
+ return PTR_ERR(temp);
/*
* Copy up data first and then xattrs. Writing data after
@@ -741,15 +752,28 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
*/
path.dentry = temp;
err = ovl_copy_up_data(c, &path);
- if (err)
+ /*
+ * We cannot hold lock_rename() throughout this helper, because or
+ * lock ordering with sb_writers, which shouldn't be held when calling
+ * ovl_copy_up_data(), so lock workdir and destdir and make sure that
+ * temp wasn't moved before copy up completion or cleanup.
+ * If temp was moved, abort without the cleanup.
+ */
+ ovl_start_write(c->dentry);
+ if (lock_rename(c->workdir, c->destdir) != NULL ||
+ temp->d_parent != c->workdir) {
+ err = -EIO;
+ goto unlock;
+ } else if (err) {
goto cleanup;
+ }
err = ovl_copy_up_metadata(c, temp);
if (err)
goto cleanup;
if (S_ISDIR(c->stat.mode) && c->indexed) {
- err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
+ err = ovl_create_index(c->dentry, c->origin_fh, temp);
if (err)
goto cleanup;
}
@@ -779,6 +803,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
ovl_set_flag(OVL_WHITEOUTS, inode);
unlock:
unlock_rename(c->workdir, c->destdir);
+ ovl_end_write(c->dentry);
return err;
@@ -802,9 +827,10 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
if (err)
return err;
+ ovl_start_write(c->dentry);
tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
+ ovl_end_write(c->dentry);
ovl_revert_cu_creds(&cc);
-
if (IS_ERR(tmpfile))
return PTR_ERR(tmpfile);
@@ -815,9 +841,11 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
goto out_fput;
}
+ ovl_start_write(c->dentry);
+
err = ovl_copy_up_metadata(c, temp);
if (err)
- goto out_fput;
+ goto out;
inode_lock_nested(udir, I_MUTEX_PARENT);
@@ -831,7 +859,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
inode_unlock(udir);
if (err)
- goto out_fput;
+ goto out;
if (c->metacopy_digest)
ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
@@ -843,6 +871,8 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
ovl_set_upperdata(d_inode(c->dentry));
ovl_inode_update(d_inode(c->dentry), dget(temp));
+out:
+ ovl_end_write(c->dentry);
out_fput:
fput(tmpfile);
return err;
@@ -861,6 +891,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
{
int err;
struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
+ struct dentry *origin = c->lowerpath.dentry;
+ struct ovl_fh *fh = NULL;
bool to_index = false;
/*
@@ -877,25 +909,35 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
to_index = true;
}
- if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index)
+ if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) {
+ fh = ovl_get_origin_fh(ofs, origin);
+ if (IS_ERR(fh))
+ return PTR_ERR(fh);
+
+ /* origin_fh may be NULL */
+ c->origin_fh = fh;
c->origin = true;
+ }
if (to_index) {
c->destdir = ovl_indexdir(c->dentry->d_sb);
- err = ovl_get_index_name(ofs, c->lowerpath.dentry, &c->destname);
+ err = ovl_get_index_name(ofs, origin, &c->destname);
if (err)
- return err;
+ goto out_free_fh;
} else if (WARN_ON(!c->parent)) {
/* Disconnected dentry must be copied up to index dir */
- return -EIO;
+ err = -EIO;
+ goto out_free_fh;
} else {
/*
* Mark parent "impure" because it may now contain non-pure
* upper
*/
+ ovl_start_write(c->dentry);
err = ovl_set_impure(c->parent, c->destdir);
+ ovl_end_write(c->dentry);
if (err)
- return err;
+ goto out_free_fh;
}
/* Should we copyup with O_TMPFILE or with workdir? */
@@ -909,6 +951,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
if (c->indexed)
ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
+ ovl_start_write(c->dentry);
if (to_index) {
/* Initialize nlink for copy up of disconnected dentry */
err = ovl_set_nlink_upper(c->dentry);
@@ -923,10 +966,13 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
ovl_dentry_set_upper_alias(c->dentry);
ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
}
+ ovl_end_write(c->dentry);
out:
if (to_index)
kfree(c->destname.name);
+out_free_fh:
+ kfree(fh);
return err;
}
@@ -1011,15 +1057,16 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
* Writing to upper file will clear security.capability xattr. We
* don't want that to happen for normal copy-up operation.
*/
+ ovl_start_write(c->dentry);
if (capability) {
err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
capability, cap_size, 0);
- if (err)
- goto out_free;
}
-
-
- err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
+ if (!err) {
+ err = ovl_removexattr(ofs, upperpath.dentry,
+ OVL_XATTR_METACOPY);
+ }
+ ovl_end_write(c->dentry);
if (err)
goto out_free;
@@ -1170,17 +1217,10 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
int ovl_maybe_copy_up(struct dentry *dentry, int flags)
{
- int err = 0;
-
- if (ovl_open_need_copy_up(dentry, flags)) {
- err = ovl_want_write(dentry);
- if (!err) {
- err = ovl_copy_up_flags(dentry, flags);
- ovl_drop_write(dentry);
- }
- }
+ if (!ovl_open_need_copy_up(dentry, flags))
+ return 0;
- return err;
+ return ovl_copy_up_flags(dentry, flags);
}
int ovl_copy_up_with_data(struct dentry *dentry)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 033fc0458a3d..aab3f5d93556 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -477,7 +477,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
goto out_unlock;
err = -ESTALE;
- if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
+ if (d_is_negative(upper) || !ovl_upper_is_whiteout(ofs, upper))
goto out_dput;
newdentry = ovl_create_temp(ofs, workdir, cattr);
@@ -559,10 +559,6 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
struct cred *override_cred;
struct dentry *parent = dentry->d_parent;
- err = ovl_copy_up(parent);
- if (err)
- return err;
-
old_cred = ovl_override_creds(dentry->d_sb);
/*
@@ -626,6 +622,10 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
.link = link,
};
+ err = ovl_copy_up(dentry->d_parent);
+ if (err)
+ return err;
+
err = ovl_want_write(dentry);
if (err)
goto out;
@@ -700,28 +700,24 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
int err;
struct inode *inode;
- err = ovl_want_write(old);
+ err = ovl_copy_up(old);
if (err)
goto out;
- err = ovl_copy_up(old);
+ err = ovl_copy_up(new->d_parent);
if (err)
- goto out_drop_write;
+ goto out;
- err = ovl_copy_up(new->d_parent);
+ err = ovl_nlink_start(old);
if (err)
- goto out_drop_write;
+ goto out;
if (ovl_is_metacopy_dentry(old)) {
err = ovl_set_link_redirect(old);
if (err)
- goto out_drop_write;
+ goto out_nlink_end;
}
- err = ovl_nlink_start(old);
- if (err)
- goto out_drop_write;
-
inode = d_inode(old);
ihold(inode);
@@ -731,9 +727,8 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
iput(inode);
+out_nlink_end:
ovl_nlink_end(old);
-out_drop_write:
- ovl_drop_write(old);
out:
return err;
}
@@ -891,17 +886,13 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
goto out;
}
- err = ovl_want_write(dentry);
- if (err)
- goto out;
-
err = ovl_copy_up(dentry->d_parent);
if (err)
- goto out_drop_write;
+ goto out;
err = ovl_nlink_start(dentry);
if (err)
- goto out_drop_write;
+ goto out;
old_cred = ovl_override_creds(dentry->d_sb);
if (!lower_positive)
@@ -926,8 +917,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
if (ovl_dentry_upper(dentry))
ovl_copyattr(d_inode(dentry));
-out_drop_write:
- ovl_drop_write(dentry);
out:
ovl_cache_free(&list);
return err;
@@ -1131,29 +1120,32 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir,
}
}
- err = ovl_want_write(old);
- if (err)
- goto out;
-
err = ovl_copy_up(old);
if (err)
- goto out_drop_write;
+ goto out;
err = ovl_copy_up(new->d_parent);
if (err)
- goto out_drop_write;
+ goto out;
if (!overwrite) {
err = ovl_copy_up(new);
if (err)
- goto out_drop_write;
+ goto out;
} else if (d_inode(new)) {
err = ovl_nlink_start(new);
if (err)
- goto out_drop_write;
+ goto out;
update_nlink = true;
}
+ if (!update_nlink) {
+ /* ovl_nlink_start() took ovl_want_write() */
+ err = ovl_want_write(old);
+ if (err)
+ goto out;
+ }
+
old_cred = ovl_override_creds(old->d_sb);
if (!list_empty(&list)) {
@@ -1219,7 +1211,7 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir,
}
} else {
if (!d_is_negative(newdentry)) {
- if (!new_opaque || !ovl_is_whiteout(newdentry))
+ if (!new_opaque || !ovl_upper_is_whiteout(ofs, newdentry))
goto out_dput;
} else {
if (flags & RENAME_EXCHANGE)
@@ -1286,8 +1278,8 @@ out_revert_creds:
revert_creds(old_cred);
if (update_nlink)
ovl_nlink_end(new);
-out_drop_write:
- ovl_drop_write(old);
+ else
+ ovl_drop_write(old);
out:
dput(opaquedir);
ovl_cache_free(&list);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 26b782c53910..7e16bbcad95e 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -23,12 +23,7 @@ static int ovl_encode_maybe_copy_up(struct dentry *dentry)
if (ovl_dentry_upper(dentry))
return 0;
- err = ovl_want_write(dentry);
- if (!err) {
- err = ovl_copy_up(dentry);
- ovl_drop_write(dentry);
- }
-
+ err = ovl_copy_up(dentry);
if (err) {
pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n",
dentry, err);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index ec3671ca140c..131621daeb13 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -15,10 +15,15 @@
#include <linux/fs.h>
#include "overlayfs.h"
+#include "../internal.h" /* for sb_init_dio_done_wq */
+
struct ovl_aio_req {
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
+ /* used for aio completion */
+ struct work_struct work;
+ long res;
};
static struct kmem_cache *ovl_aio_request_cachep;
@@ -235,6 +240,12 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
return ret;
}
+static void ovl_file_modified(struct file *file)
+{
+ /* Update size/mtime */
+ ovl_copyattr(file_inode(file));
+}
+
static void ovl_file_accessed(struct file *file)
{
struct inode *inode, *upperinode;
@@ -263,20 +274,12 @@ static void ovl_file_accessed(struct file *file)
touch_atime(&file->f_path);
}
-static rwf_t ovl_iocb_to_rwf(int ifl)
+#define OVL_IOCB_MASK \
+ (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
+
+static rwf_t iocb_to_rw_flags(int flags)
{
- rwf_t flags = 0;
-
- if (ifl & IOCB_NOWAIT)
- flags |= RWF_NOWAIT;
- if (ifl & IOCB_HIPRI)
- flags |= RWF_HIPRI;
- if (ifl & IOCB_DSYNC)
- flags |= RWF_DSYNC;
- if (ifl & IOCB_SYNC)
- flags |= RWF_SYNC;
-
- return flags;
+ return (__force rwf_t)(flags & OVL_IOCB_MASK);
}
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
@@ -293,10 +296,8 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
struct kiocb *orig_iocb = aio_req->orig_iocb;
if (iocb->ki_flags & IOCB_WRITE) {
- struct inode *inode = file_inode(orig_iocb->ki_filp);
-
kiocb_end_write(iocb);
- ovl_copyattr(inode);
+ ovl_file_modified(orig_iocb->ki_filp);
}
orig_iocb->ki_pos = iocb->ki_pos;
@@ -313,6 +314,37 @@ static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
orig_iocb->ki_complete(orig_iocb, res);
}
+static void ovl_aio_complete_work(struct work_struct *work)
+{
+ struct ovl_aio_req *aio_req = container_of(work,
+ struct ovl_aio_req, work);
+
+ ovl_aio_rw_complete(&aio_req->iocb, aio_req->res);
+}
+
+static void ovl_aio_queue_completion(struct kiocb *iocb, long res)
+{
+ struct ovl_aio_req *aio_req = container_of(iocb,
+ struct ovl_aio_req, iocb);
+ struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+ /*
+ * Punt to a work queue to serialize updates of mtime/size.
+ */
+ aio_req->res = res;
+ INIT_WORK(&aio_req->work, ovl_aio_complete_work);
+ queue_work(file_inode(orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
+ &aio_req->work);
+}
+
+static int ovl_init_aio_done_wq(struct super_block *sb)
+{
+ if (sb->s_dio_done_wq)
+ return 0;
+
+ return sb_init_dio_done_wq(sb);
+}
+
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
@@ -334,8 +366,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
- ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb->ki_flags));
+ rwf_t rwf = iocb_to_rw_flags(iocb->ki_flags);
+
+ ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, rwf);
} else {
struct ovl_aio_req *aio_req;
@@ -401,15 +434,20 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(ifl);
+
file_start_write(real.file);
- ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(ifl));
+ ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, rwf);
file_end_write(real.file);
/* Update size */
- ovl_copyattr(inode);
+ ovl_file_modified(file);
} else {
struct ovl_aio_req *aio_req;
+ ret = ovl_init_aio_done_wq(inode->i_sb);
+ if (ret)
+ goto out;
+
ret = -ENOMEM;
aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
if (!aio_req)
@@ -418,7 +456,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
- aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ aio_req->iocb.ki_complete = ovl_aio_queue_completion;
refcount_set(&aio_req->ref, 2);
kiocb_start_write(&aio_req->iocb);
ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
@@ -492,7 +530,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
file_end_write(real.file);
/* Update size */
- ovl_copyattr(inode);
+ ovl_file_modified(out);
revert_creds(old_cred);
fdput(real);
@@ -573,7 +611,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
revert_creds(old_cred);
/* Update size */
- ovl_copyattr(inode);
+ ovl_file_modified(file);
fdput(real);
@@ -657,7 +695,7 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
revert_creds(old_cred);
/* Update size */
- ovl_copyattr(inode_out);
+ ovl_file_modified(file_out);
fdput(real_in);
fdput(real_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b6e98a7d36ce..345b8f161ca4 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -32,10 +32,6 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (err)
return err;
- err = ovl_want_write(dentry);
- if (err)
- goto out;
-
if (attr->ia_valid & ATTR_SIZE) {
/* Truncate should trigger data copy up as well */
full_copy_up = true;
@@ -54,7 +50,7 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
winode = d_inode(upperdentry);
err = get_write_access(winode);
if (err)
- goto out_drop_write;
+ goto out;
}
if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
@@ -78,6 +74,10 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
*/
attr->ia_valid &= ~ATTR_OPEN;
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out_put_write;
+
inode_lock(upperdentry->d_inode);
old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_do_notify_change(ofs, upperdentry, attr);
@@ -85,12 +85,12 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (!err)
ovl_copyattr(dentry->d_inode);
inode_unlock(upperdentry->d_inode);
+ ovl_drop_write(dentry);
+out_put_write:
if (winode)
put_write_access(winode);
}
-out_drop_write:
- ovl_drop_write(dentry);
out:
return err;
}
@@ -339,130 +339,6 @@ static const char *ovl_get_link(struct dentry *dentry,
return p;
}
-bool ovl_is_private_xattr(struct super_block *sb, const char *name)
-{
- struct ovl_fs *ofs = OVL_FS(sb);
-
- if (ofs->config.userxattr)
- return strncmp(name, OVL_XATTR_USER_PREFIX,
- sizeof(OVL_XATTR_USER_PREFIX) - 1) == 0;
- else
- return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
- sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) == 0;
-}
-
-int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- int err;
- struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
- struct dentry *upperdentry = ovl_i_dentry_upper(inode);
- struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
- struct path realpath;
- const struct cred *old_cred;
-
- err = ovl_want_write(dentry);
- if (err)
- goto out;
-
- if (!value && !upperdentry) {
- ovl_path_lower(dentry, &realpath);
- old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0);
- revert_creds(old_cred);
- if (err < 0)
- goto out_drop_write;
- }
-
- if (!upperdentry) {
- err = ovl_copy_up(dentry);
- if (err)
- goto out_drop_write;
-
- realdentry = ovl_dentry_upper(dentry);
- }
-
- old_cred = ovl_override_creds(dentry->d_sb);
- if (value) {
- err = ovl_do_setxattr(ofs, realdentry, name, value, size,
- flags);
- } else {
- WARN_ON(flags != XATTR_REPLACE);
- err = ovl_do_removexattr(ofs, realdentry, name);
- }
- revert_creds(old_cred);
-
- /* copy c/mtime */
- ovl_copyattr(inode);
-
-out_drop_write:
- ovl_drop_write(dentry);
-out:
- return err;
-}
-
-int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
- void *value, size_t size)
-{
- ssize_t res;
- const struct cred *old_cred;
- struct path realpath;
-
- ovl_i_path_real(inode, &realpath);
- old_cred = ovl_override_creds(dentry->d_sb);
- res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size);
- revert_creds(old_cred);
- return res;
-}
-
-static bool ovl_can_list(struct super_block *sb, const char *s)
-{
- /* Never list private (.overlay) */
- if (ovl_is_private_xattr(sb, s))
- return false;
-
- /* List all non-trusted xattrs */
- if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
- return true;
-
- /* list other trusted for superuser only */
- return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
-}
-
-ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
-{
- struct dentry *realdentry = ovl_dentry_real(dentry);
- ssize_t res;
- size_t len;
- char *s;
- const struct cred *old_cred;
-
- old_cred = ovl_override_creds(dentry->d_sb);
- res = vfs_listxattr(realdentry, list, size);
- revert_creds(old_cred);
- if (res <= 0 || size == 0)
- return res;
-
- /* filter out private xattrs */
- for (s = list, len = res; len;) {
- size_t slen = strnlen(s, len) + 1;
-
- /* underlying fs providing us with an broken xattr list? */
- if (WARN_ON(slen > len))
- return -EIO;
-
- len -= slen;
- if (!ovl_can_list(dentry->d_sb, s)) {
- res -= slen;
- memmove(s, s + slen, len);
- } else {
- s += slen;
- }
- }
-
- return res;
-}
-
#ifdef CONFIG_FS_POSIX_ACL
/*
* Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
@@ -611,10 +487,6 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
struct dentry *upperdentry = ovl_dentry_upper(dentry);
struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
- err = ovl_want_write(dentry);
- if (err)
- return err;
-
/*
* If ACL is to be removed from a lower file, check if it exists in
* the first place before copying it up.
@@ -630,7 +502,7 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
revert_creds(old_cred);
if (IS_ERR(real_acl)) {
err = PTR_ERR(real_acl);
- goto out_drop_write;
+ goto out;
}
posix_acl_release(real_acl);
}
@@ -638,23 +510,26 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
if (!upperdentry) {
err = ovl_copy_up(dentry);
if (err)
- goto out_drop_write;
+ goto out;
realdentry = ovl_dentry_upper(dentry);
}
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out;
+
old_cred = ovl_override_creds(dentry->d_sb);
if (acl)
err = ovl_do_set_acl(ofs, realdentry, acl_name, acl);
else
err = ovl_do_remove_acl(ofs, realdentry, acl_name);
revert_creds(old_cred);
+ ovl_drop_write(dentry);
/* copy c/mtime */
ovl_copyattr(inode);
-
-out_drop_write:
- ovl_drop_write(dentry);
+out:
return err;
}
@@ -778,14 +653,14 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
unsigned int flags;
int err;
- err = ovl_want_write(dentry);
- if (err)
- goto out;
-
err = ovl_copy_up(dentry);
if (!err) {
ovl_path_real(dentry, &upperpath);
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out;
+
old_cred = ovl_override_creds(inode->i_sb);
/*
* Store immutable/append-only flags in xattr and clear them
@@ -798,6 +673,7 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
if (!err)
err = ovl_real_fileattr_set(&upperpath, fa);
revert_creds(old_cred);
+ ovl_drop_write(dentry);
/*
* Merge real inode flags with inode flags read from
@@ -812,7 +688,6 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
/* Update ctime */
ovl_copyattr(inode);
}
- ovl_drop_write(dentry);
out:
return err;
}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 80391c687c2a..03bc8d5dfa31 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -251,7 +251,10 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
err = -EREMOTE;
goto out_err;
}
- if (ovl_is_whiteout(this)) {
+
+ path.dentry = this;
+ path.mnt = d->mnt;
+ if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
d->stop = d->opaque = true;
goto put_and_out;
}
@@ -264,8 +267,6 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
goto put_and_out;
}
- path.dentry = this;
- path.mnt = d->mnt;
if (!d_can_lookup(this)) {
if (d->is_dir || !last_element) {
d->stop = true;
@@ -438,7 +439,7 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
else if (IS_ERR(origin))
return PTR_ERR(origin);
- if (upperdentry && !ovl_is_whiteout(upperdentry) &&
+ if (upperdentry && !ovl_upper_is_whiteout(ofs, upperdentry) &&
inode_wrong_type(d_inode(upperdentry), d_inode(origin)->i_mode))
goto invalid;
@@ -507,6 +508,19 @@ static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry,
return err;
}
+int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
+ enum ovl_xattr ox, const struct ovl_fh *fh,
+ bool is_upper, bool set)
+{
+ int err;
+
+ err = ovl_verify_fh(ofs, dentry, ox, fh);
+ if (set && err == -ENODATA)
+ err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+
+ return err;
+}
+
/*
* Verify that @real dentry matches the file handle stored in xattr @name.
*
@@ -515,9 +529,9 @@ static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry,
*
* Return 0 on match, -ESTALE on mismatch, -ENODATA on no xattr, < 0 on error.
*/
-int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
- enum ovl_xattr ox, struct dentry *real, bool is_upper,
- bool set)
+int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ enum ovl_xattr ox, struct dentry *real,
+ bool is_upper, bool set)
{
struct inode *inode;
struct ovl_fh *fh;
@@ -530,9 +544,7 @@ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
goto fail;
}
- err = ovl_verify_fh(ofs, dentry, ox, fh);
- if (set && err == -ENODATA)
- err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+ err = ovl_verify_set_fh(ofs, dentry, ox, fh, is_upper, set);
if (err)
goto fail;
@@ -548,6 +560,7 @@ fail:
goto out;
}
+
/* Get upper dentry from index */
struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index,
bool connected)
@@ -684,7 +697,7 @@ orphan:
goto out;
}
-static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name)
+int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name)
{
char *n, *s;
@@ -873,20 +886,27 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry,
struct dentry *lower, struct dentry *upper)
{
+ const struct ovl_fh *fh;
int err;
if (ovl_check_origin_xattr(ofs, upper))
return 0;
+ fh = ovl_get_origin_fh(ofs, lower);
+ if (IS_ERR(fh))
+ return PTR_ERR(fh);
+
err = ovl_want_write(dentry);
if (err)
- return err;
+ goto out;
- err = ovl_set_origin(ofs, lower, upper);
+ err = ovl_set_origin_fh(ofs, fh, upper);
if (!err)
err = ovl_set_impure(dentry->d_parent, upper->d_parent);
ovl_drop_write(dentry);
+out:
+ kfree(fh);
return err;
}
@@ -1383,7 +1403,11 @@ bool ovl_lower_positive(struct dentry *dentry)
break;
}
} else {
- positive = !ovl_is_whiteout(this);
+ struct path path = {
+ .dentry = this,
+ .mnt = parentpath->layer->mnt,
+ };
+ positive = !ovl_path_is_whiteout(OVL_FS(dentry->d_sb), &path);
done = true;
dput(this);
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 9817b2dcb132..ca88b2636a57 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -28,7 +28,16 @@ enum ovl_path_type {
#define OVL_XATTR_NAMESPACE "overlay."
#define OVL_XATTR_TRUSTED_PREFIX XATTR_TRUSTED_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1)
#define OVL_XATTR_USER_PREFIX XATTR_USER_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_USER_PREFIX_LEN (sizeof(OVL_XATTR_USER_PREFIX) - 1)
+
+#define OVL_XATTR_ESCAPE_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_ESCAPE_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_PREFIX) - 1)
+#define OVL_XATTR_ESCAPE_TRUSTED_PREFIX OVL_XATTR_TRUSTED_PREFIX OVL_XATTR_ESCAPE_PREFIX
+#define OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_TRUSTED_PREFIX) - 1)
+#define OVL_XATTR_ESCAPE_USER_PREFIX OVL_XATTR_USER_PREFIX OVL_XATTR_ESCAPE_PREFIX
+#define OVL_XATTR_ESCAPE_USER_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_USER_PREFIX) - 1)
enum ovl_xattr {
OVL_XATTR_OPAQUE,
@@ -40,6 +49,8 @@ enum ovl_xattr {
OVL_XATTR_UUID,
OVL_XATTR_METACOPY,
OVL_XATTR_PROTATTR,
+ OVL_XATTR_XWHITEOUT,
+ OVL_XATTR_XWHITEOUTS,
};
enum ovl_inode_flag {
@@ -398,6 +409,10 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
}
/* util.c */
+int ovl_get_write_access(struct dentry *dentry);
+void ovl_put_write_access(struct dentry *dentry);
+void ovl_start_write(struct dentry *dentry);
+void ovl_end_write(struct dentry *dentry);
int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
@@ -460,6 +475,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry);
void ovl_dir_modified(struct dentry *dentry, bool impurity);
u64 ovl_inode_version_get(struct inode *inode);
bool ovl_is_whiteout(struct dentry *dentry);
+bool ovl_path_is_whiteout(struct ovl_fs *ofs, const struct path *path);
struct file *ovl_path_open(const struct path *path, int flags);
int ovl_copy_up_start(struct dentry *dentry, int flags);
void ovl_copy_up_end(struct dentry *dentry);
@@ -467,9 +483,21 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags);
bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
enum ovl_xattr ox);
bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
+bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
+bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
const struct path *upperpath);
+static inline bool ovl_upper_is_whiteout(struct ovl_fs *ofs,
+ struct dentry *upperdentry)
+{
+ struct path upperpath = {
+ .dentry = upperdentry,
+ .mnt = ovl_upper_mnt(ofs),
+ };
+ return ovl_path_is_whiteout(ofs, &upperpath);
+}
+
static inline bool ovl_check_origin_xattr(struct ovl_fs *ofs,
struct dentry *upperdentry)
{
@@ -624,11 +652,15 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
struct dentry *upperdentry, struct ovl_path **stackp);
int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
- enum ovl_xattr ox, struct dentry *real, bool is_upper,
- bool set);
+ enum ovl_xattr ox, const struct ovl_fh *fh,
+ bool is_upper, bool set);
+int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ enum ovl_xattr ox, struct dentry *real,
+ bool is_upper, bool set);
struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index,
bool connected);
int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index);
+int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name);
int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
struct qstr *name);
struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
@@ -640,17 +672,24 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
bool ovl_lower_positive(struct dentry *dentry);
+static inline int ovl_verify_origin_fh(struct ovl_fs *ofs, struct dentry *upper,
+ const struct ovl_fh *fh, bool set)
+{
+ return ovl_verify_set_fh(ofs, upper, OVL_XATTR_ORIGIN, fh, false, set);
+}
+
static inline int ovl_verify_origin(struct ovl_fs *ofs, struct dentry *upper,
struct dentry *origin, bool set)
{
- return ovl_verify_set_fh(ofs, upper, OVL_XATTR_ORIGIN, origin,
- false, set);
+ return ovl_verify_origin_xattr(ofs, upper, OVL_XATTR_ORIGIN, origin,
+ false, set);
}
static inline int ovl_verify_upper(struct ovl_fs *ofs, struct dentry *index,
struct dentry *upper, bool set)
{
- return ovl_verify_set_fh(ofs, index, OVL_XATTR_UPPER, upper, true, set);
+ return ovl_verify_origin_xattr(ofs, index, OVL_XATTR_UPPER, upper,
+ true, set);
}
/* readdir.c */
@@ -684,17 +723,8 @@ int ovl_set_nlink_lower(struct dentry *dentry);
unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
struct dentry *upperdentry,
unsigned int fallback);
-int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
- struct iattr *attr);
-int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
- struct kstat *stat, u32 request_mask, unsigned int flags);
int ovl_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask);
-int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
- const void *value, size_t size, int flags);
-int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
- void *value, size_t size);
-ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap,
@@ -815,8 +845,9 @@ int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentr
int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
bool is_upper);
-int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
- struct dentry *upper);
+struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
+int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+ struct dentry *upper);
/* export.c */
extern const struct export_operations ovl_export_operations;
@@ -830,3 +861,12 @@ static inline bool ovl_force_readonly(struct ovl_fs *ofs)
{
return (!ovl_upper_mnt(ofs) || !ofs->workdir);
}
+
+/* xattr.c */
+
+const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs);
+int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
+int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags);
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index f6ff23fd101c..ddab9ea267d1 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -43,8 +43,10 @@ module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
MODULE_PARM_DESC(metacopy,
"Default to on or off for the metadata only copy up feature");
-enum {
+enum ovl_opt {
Opt_lowerdir,
+ Opt_lowerdir_add,
+ Opt_datadir_add,
Opt_upperdir,
Opt_workdir,
Opt_default_permissions,
@@ -140,8 +142,11 @@ static int ovl_verity_mode_def(void)
#define fsparam_string_empty(NAME, OPT) \
__fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
const struct fs_parameter_spec ovl_parameter_spec[] = {
fsparam_string_empty("lowerdir", Opt_lowerdir),
+ fsparam_string("lowerdir+", Opt_lowerdir_add),
+ fsparam_string("datadir+", Opt_datadir_add),
fsparam_string("upperdir", Opt_upperdir),
fsparam_string("workdir", Opt_workdir),
fsparam_flag("default_permissions", Opt_default_permissions),
@@ -238,19 +243,8 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
pr_err("failed to resolve '%s': %i\n", name, err);
goto out;
}
- err = -EINVAL;
- if (ovl_dentry_weird(path->dentry)) {
- pr_err("filesystem on '%s' not supported\n", name);
- goto out_put;
- }
- if (!d_is_dir(path->dentry)) {
- pr_err("'%s' not a directory\n", name);
- goto out_put;
- }
return 0;
-out_put:
- path_put_init(path);
out:
return err;
}
@@ -268,7 +262,7 @@ static void ovl_unescape(char *s)
}
}
-static int ovl_mount_dir(const char *name, struct path *path, bool upper)
+static int ovl_mount_dir(const char *name, struct path *path)
{
int err = -ENOMEM;
char *tmp = kstrdup(name, GFP_KERNEL);
@@ -276,68 +270,147 @@ static int ovl_mount_dir(const char *name, struct path *path, bool upper)
if (tmp) {
ovl_unescape(tmp);
err = ovl_mount_dir_noesc(tmp, path);
-
- if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
- pr_err("filesystem on '%s' not supported as upperdir\n",
- tmp);
- path_put_init(path);
- err = -EINVAL;
- }
kfree(tmp);
}
return err;
}
-static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
- bool workdir)
+static int ovl_mount_dir_check(struct fs_context *fc, const struct path *path,
+ enum ovl_opt layer, const char *name, bool upper)
{
- int err;
- struct ovl_fs *ofs = fc->s_fs_info;
- struct ovl_config *config = &ofs->config;
struct ovl_fs_context *ctx = fc->fs_private;
- struct path path;
- char *dup;
- err = ovl_mount_dir(name, &path, true);
- if (err)
- return err;
+ if (ovl_dentry_weird(path->dentry))
+ return invalfc(fc, "filesystem on %s not supported", name);
+
+ if (!d_is_dir(path->dentry))
+ return invalfc(fc, "%s is not a directory", name);
+
/*
* Check whether upper path is read-only here to report failures
* early. Don't forget to recheck when the superblock is created
* as the mount attributes could change.
*/
- if (__mnt_is_readonly(path.mnt)) {
- path_put(&path);
- return -EINVAL;
+ if (upper) {
+ if (path->dentry->d_flags & DCACHE_OP_REAL)
+ return invalfc(fc, "filesystem on %s not supported as upperdir", name);
+ if (__mnt_is_readonly(path->mnt))
+ return invalfc(fc, "filesystem on %s is read-only", name);
+ } else {
+ if (ctx->lowerdir_all && layer != Opt_lowerdir)
+ return invalfc(fc, "lowerdir+ and datadir+ cannot follow lowerdir");
+ if (ctx->nr_data && layer == Opt_lowerdir_add)
+ return invalfc(fc, "regular lower layers cannot follow data layers");
+ if (ctx->nr == OVL_MAX_STACK)
+ return invalfc(fc, "too many lower directories, limit is %d",
+ OVL_MAX_STACK);
}
+ return 0;
+}
- dup = kstrdup(name, GFP_KERNEL);
- if (!dup) {
- path_put(&path);
+static int ovl_ctx_realloc_lower(struct fs_context *fc)
+{
+ struct ovl_fs_context *ctx = fc->fs_private;
+ struct ovl_fs_context_layer *l;
+ size_t nr;
+
+ if (ctx->nr < ctx->capacity)
+ return 0;
+
+ nr = min_t(size_t, max(4096 / sizeof(*l), ctx->capacity * 2),
+ OVL_MAX_STACK);
+ l = krealloc_array(ctx->lower, nr, sizeof(*l), GFP_KERNEL_ACCOUNT);
+ if (!l)
return -ENOMEM;
+
+ ctx->lower = l;
+ ctx->capacity = nr;
+ return 0;
+}
+
+static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer,
+ struct path *path, char **pname)
+{
+ struct ovl_fs *ofs = fc->s_fs_info;
+ struct ovl_config *config = &ofs->config;
+ struct ovl_fs_context *ctx = fc->fs_private;
+ struct ovl_fs_context_layer *l;
+
+ switch (layer) {
+ case Opt_workdir:
+ swap(config->workdir, *pname);
+ swap(ctx->work, *path);
+ break;
+ case Opt_upperdir:
+ swap(config->upperdir, *pname);
+ swap(ctx->upper, *path);
+ break;
+ case Opt_datadir_add:
+ ctx->nr_data++;
+ fallthrough;
+ case Opt_lowerdir_add:
+ WARN_ON(ctx->nr >= ctx->capacity);
+ l = &ctx->lower[ctx->nr++];
+ memset(l, 0, sizeof(*l));
+ swap(l->name, *pname);
+ swap(l->path, *path);
+ break;
+ default:
+ WARN_ON(1);
}
+}
- if (workdir) {
- kfree(config->workdir);
- config->workdir = dup;
- path_put(&ctx->work);
- ctx->work = path;
- } else {
- kfree(config->upperdir);
- config->upperdir = dup;
- path_put(&ctx->upper);
- ctx->upper = path;
+static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param,
+ enum ovl_opt layer)
+{
+ char *name = kstrdup(param->string, GFP_KERNEL);
+ bool upper = (layer == Opt_upperdir || layer == Opt_workdir);
+ struct path path;
+ int err;
+
+ if (!name)
+ return -ENOMEM;
+
+ if (upper)
+ err = ovl_mount_dir(name, &path);
+ else
+ err = ovl_mount_dir_noesc(name, &path);
+ if (err)
+ goto out_free;
+
+ err = ovl_mount_dir_check(fc, &path, layer, name, upper);
+ if (err)
+ goto out_put;
+
+ if (!upper) {
+ err = ovl_ctx_realloc_lower(fc);
+ if (err)
+ goto out_put;
}
- return 0;
+
+ /* Store the user provided path string in ctx to show in mountinfo */
+ ovl_add_layer(fc, layer, &path, &name);
+
+out_put:
+ path_put(&path);
+out_free:
+ kfree(name);
+ return err;
}
-static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
+static void ovl_reset_lowerdirs(struct ovl_fs_context *ctx)
{
- for (size_t nr = 0; nr < ctx->nr; nr++) {
- path_put(&ctx->lower[nr].path);
- kfree(ctx->lower[nr].name);
- ctx->lower[nr].name = NULL;
+ struct ovl_fs_context_layer *l = ctx->lower;
+
+ // Reset old user provided lowerdir string
+ kfree(ctx->lowerdir_all);
+ ctx->lowerdir_all = NULL;
+
+ for (size_t nr = 0; nr < ctx->nr; nr++, l++) {
+ path_put(&l->path);
+ kfree(l->name);
+ l->name = NULL;
}
ctx->nr = 0;
ctx->nr_data = 0;
@@ -346,7 +419,7 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
/*
* Parse lowerdir= mount option:
*
- * (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2
+ * e.g.: lowerdir=/lower1:/lower2:/lower3::/data1::/data2
* Set "/lower1", "/lower2", and "/lower3" as lower layers and
* "/data1" and "/data2" as data lower layers. Any existing lower
* layers are replaced.
@@ -356,9 +429,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
int err;
struct ovl_fs_context *ctx = fc->fs_private;
struct ovl_fs_context_layer *l;
- char *dup = NULL, *dup_iter;
+ char *dup = NULL, *iter;
ssize_t nr_lower = 0, nr = 0, nr_data = 0;
- bool append = false, data_layer = false;
+ bool data_layer = false;
/*
* Ensure we're backwards compatible with mount(2)
@@ -366,16 +439,21 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
*/
/* drop all existing lower layers */
- if (!*name) {
- ovl_parse_param_drop_lowerdir(ctx);
+ ovl_reset_lowerdirs(ctx);
+
+ if (!*name)
return 0;
- }
if (*name == ':') {
pr_err("cannot append lower layer");
return -EINVAL;
}
+ // Store user provided lowerdir string to show in mount options
+ ctx->lowerdir_all = kstrdup(name, GFP_KERNEL);
+ if (!ctx->lowerdir_all)
+ return -ENOMEM;
+
dup = kstrdup(name, GFP_KERNEL);
if (!dup)
return -ENOMEM;
@@ -385,36 +463,11 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
if (nr_lower < 0)
goto out_err;
- if ((nr_lower > OVL_MAX_STACK) ||
- (append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) {
+ if (nr_lower > OVL_MAX_STACK) {
pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK);
goto out_err;
}
- if (!append)
- ovl_parse_param_drop_lowerdir(ctx);
-
- /*
- * (1) append
- *
- * We want nr <= nr_lower <= capacity We know nr > 0 and nr <=
- * capacity. If nr == 0 this wouldn't be append. If nr +
- * nr_lower is <= capacity then nr <= nr_lower <= capacity
- * already holds. If nr + nr_lower exceeds capacity, we realloc.
- *
- * (2) replace
- *
- * Ensure we're backwards compatible with mount(2) which allows
- * "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last
- * specified lowerdir mount option to win.
- *
- * We want nr <= nr_lower <= capacity We know either (i) nr == 0
- * or (ii) nr > 0. We also know nr_lower > 0. The capacity
- * could've been changed multiple times already so we only know
- * nr <= capacity. If nr + nr_lower > capacity we realloc,
- * otherwise nr <= nr_lower <= capacity holds already.
- */
- nr_lower += ctx->nr;
if (nr_lower > ctx->capacity) {
err = -ENOMEM;
l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
@@ -426,41 +479,21 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
ctx->capacity = nr_lower;
}
- /*
- * (3) By (1) and (2) we know nr <= nr_lower <= capacity.
- * (4) If ctx->nr == 0 => replace
- * We have verified above that the lowerdir mount option
- * isn't an append, i.e., the lowerdir mount option
- * doesn't start with ":" or "::".
- * (4.1) The lowerdir mount options only contains regular lower
- * layers ":".
- * => Nothing to verify.
- * (4.2) The lowerdir mount options contains regular ":" and
- * data "::" layers.
- * => We need to verify that data lower layers "::" aren't
- * followed by regular ":" lower layers
- * (5) If ctx->nr > 0 => append
- * We know that there's at least one regular layer
- * otherwise we would've failed when parsing the previous
- * lowerdir mount option.
- * (5.1) The lowerdir mount option is a regular layer ":" append
- * => We need to verify that no data layers have been
- * specified before.
- * (5.2) The lowerdir mount option is a data layer "::" append
- * We know that there's at least one regular layer or
- * other data layers. => There's nothing to verify.
- */
- dup_iter = dup;
- for (nr = ctx->nr; nr < nr_lower; nr++) {
- l = &ctx->lower[nr];
+ iter = dup;
+ l = ctx->lower;
+ for (nr = 0; nr < nr_lower; nr++, l++) {
memset(l, 0, sizeof(*l));
- err = ovl_mount_dir(dup_iter, &l->path, false);
+ err = ovl_mount_dir(iter, &l->path);
+ if (err)
+ goto out_put;
+
+ err = ovl_mount_dir_check(fc, &l->path, Opt_lowerdir, iter, false);
if (err)
goto out_put;
err = -ENOMEM;
- l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT);
+ l->name = kstrdup(iter, GFP_KERNEL_ACCOUNT);
if (!l->name)
goto out_put;
@@ -472,8 +505,8 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
break;
err = -EINVAL;
- dup_iter = strchr(dup_iter, '\0') + 1;
- if (*dup_iter) {
+ iter = strchr(iter, '\0') + 1;
+ if (*iter) {
/*
* This is a regular layer so we require that
* there are no data layers.
@@ -489,7 +522,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
/* This is a data lower layer. */
data_layer = true;
- dup_iter++;
+ iter++;
}
ctx->nr = nr_lower;
ctx->nr_data += nr_data;
@@ -497,21 +530,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
return 0;
out_put:
- /*
- * We know nr >= ctx->nr < nr_lower. If we failed somewhere
- * we want to undo until nr == ctx->nr. This is correct for
- * both ctx->nr == 0 and ctx->nr > 0.
- */
- for (; nr >= ctx->nr; nr--) {
- l = &ctx->lower[nr];
- kfree(l->name);
- l->name = NULL;
- path_put(&l->path);
-
- /* don't overflow */
- if (nr == 0)
- break;
- }
+ ovl_reset_lowerdirs(ctx);
out_err:
kfree(dup);
@@ -556,11 +575,11 @@ static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_lowerdir:
err = ovl_parse_param_lowerdir(param->string, fc);
break;
+ case Opt_lowerdir_add:
+ case Opt_datadir_add:
case Opt_upperdir:
- fallthrough;
case Opt_workdir:
- err = ovl_parse_param_upperdir(param->string, fc,
- (Opt_workdir == opt));
+ err = ovl_parse_layer(fc, param, opt);
break;
case Opt_default_permissions:
config->default_permissions = true;
@@ -617,7 +636,7 @@ static int ovl_get_tree(struct fs_context *fc)
static inline void ovl_fs_context_free(struct ovl_fs_context *ctx)
{
- ovl_parse_param_drop_lowerdir(ctx);
+ ovl_reset_lowerdirs(ctx);
path_put(&ctx->upper);
path_put(&ctx->work);
kfree(ctx->lower);
@@ -933,23 +952,28 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
{
struct super_block *sb = dentry->d_sb;
struct ovl_fs *ofs = OVL_FS(sb);
- size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
+ size_t nr, nr_merged_lower, nr_lower = 0;
+ char **lowerdirs = ofs->config.lowerdirs;
/*
- * lowerdirs[] starts from offset 1, then
- * >= 0 regular lower layers prefixed with : and
- * >= 0 data-only lower layers prefixed with ::
- *
- * we need to escase comma and space like seq_show_option() does and
- * we also need to escape the colon separator from lowerdir paths.
+ * lowerdirs[0] holds the colon separated list that user provided
+ * with lowerdir mount option.
+ * lowerdirs[1..numlayer] hold the lowerdir paths that were added
+ * using the lowerdir+ and datadir+ mount options.
+ * For now, we do not allow mixing the legacy lowerdir mount option
+ * with the new lowerdir+ and datadir+ mount options.
*/
- seq_puts(m, ",lowerdir=");
- for (nr = 1; nr < ofs->numlayer; nr++) {
- if (nr > 1)
- seq_putc(m, ':');
- if (nr >= nr_merged_lower)
- seq_putc(m, ':');
- seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
+ if (lowerdirs[0]) {
+ seq_show_option(m, "lowerdir", lowerdirs[0]);
+ } else {
+ nr_lower = ofs->numlayer;
+ nr_merged_lower = nr_lower - ofs->numdatalayer;
+ }
+ for (nr = 1; nr < nr_lower; nr++) {
+ if (nr < nr_merged_lower)
+ seq_show_option(m, "lowerdir+", lowerdirs[nr]);
+ else
+ seq_show_option(m, "datadir+", lowerdirs[nr]);
}
if (ofs->config.upperdir) {
seq_show_option(m, "upperdir", ofs->config.upperdir);
diff --git a/fs/overlayfs/params.h b/fs/overlayfs/params.h
index 8750da68ab2a..c96d93982021 100644
--- a/fs/overlayfs/params.h
+++ b/fs/overlayfs/params.h
@@ -32,6 +32,7 @@ struct ovl_fs_context {
size_t nr_data;
struct ovl_opt_set set;
struct ovl_fs_context_layer *lower;
+ char *lowerdir_all; /* user provided lowerdir string */
};
int ovl_init_fs_context(struct fs_context *fc);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index de39e067ae65..a490fc47c3e7 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -25,6 +25,7 @@ struct ovl_cache_entry {
struct ovl_cache_entry *next_maybe_whiteout;
bool is_upper;
bool is_whiteout;
+ bool check_xwhiteout;
char name[];
};
@@ -47,6 +48,7 @@ struct ovl_readdir_data {
int err;
bool is_upper;
bool d_type_supported;
+ bool in_xwhiteouts_dir;
};
struct ovl_dir_file {
@@ -162,6 +164,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
p->ino = 0;
p->is_upper = rdd->is_upper;
p->is_whiteout = false;
+ /* Defer check for overlay.whiteout to ovl_iterate() */
+ p->check_xwhiteout = rdd->in_xwhiteouts_dir && d_type == DT_REG;
if (d_type == DT_CHR) {
p->next_maybe_whiteout = rdd->first_maybe_whiteout;
@@ -301,6 +305,8 @@ static inline int ovl_dir_read(const struct path *realpath,
if (IS_ERR(realfile))
return PTR_ERR(realfile);
+ rdd->in_xwhiteouts_dir = rdd->dentry &&
+ ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
rdd->first_maybe_whiteout = NULL;
rdd->ctx.pos = 0;
do {
@@ -447,7 +453,7 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
}
/*
- * Set d_ino for upper entries. Non-upper entries should always report
+ * Set d_ino for upper entries if needed. Non-upper entries should always report
* the uppermost real inode ino and should not call this function.
*
* When not all layer are on same fs, report real ino also for upper.
@@ -455,8 +461,11 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
* When all layers are on the same fs, and upper has a reference to
* copy up origin, call vfs_getattr() on the overlay entry to make
* sure that d_ino will be consistent with st_ino from stat(2).
+ *
+ * Also checks the overlay.whiteout xattr by doing a full lookup which will return
+ * negative in this case.
*/
-static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry *p)
+static int ovl_cache_update(const struct path *path, struct ovl_cache_entry *p, bool update_ino)
{
struct dentry *dir = path->dentry;
@@ -467,7 +476,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
int xinobits = ovl_xino_bits(ofs);
int err = 0;
- if (!ovl_same_dev(ofs))
+ if (!ovl_same_dev(ofs) && !p->check_xwhiteout)
goto out;
if (p->name[0] == '.') {
@@ -481,6 +490,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
goto get;
}
}
+ /* This checks also for xwhiteouts */
this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len);
if (IS_ERR_OR_NULL(this) || !this->d_inode) {
/* Mark a stale entry */
@@ -494,6 +504,9 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
}
get:
+ if (!ovl_same_dev(ofs) || !update_ino)
+ goto out;
+
type = ovl_path_type(this);
if (OVL_TYPE_ORIGIN(type)) {
struct kstat stat;
@@ -572,7 +585,7 @@ static int ovl_dir_read_impure(const struct path *path, struct list_head *list,
list_for_each_entry_safe(p, n, list, l_node) {
if (strcmp(p->name, ".") != 0 &&
strcmp(p->name, "..") != 0) {
- err = ovl_cache_update_ino(path, p);
+ err = ovl_cache_update(path, p, true);
if (err)
return err;
}
@@ -778,13 +791,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
while (od->cursor != &od->cache->entries) {
p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
if (!p->is_whiteout) {
- if (!p->ino) {
- err = ovl_cache_update_ino(&file->f_path, p);
+ if (!p->ino || p->check_xwhiteout) {
+ err = ovl_cache_update(&file->f_path, p, !p->ino);
if (err)
goto out;
}
}
- /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+ /* ovl_cache_update() sets is_whiteout on stale entry */
if (!p->is_whiteout) {
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 17864a8d2b85..a0967bb25003 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -445,68 +445,6 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
return ok;
}
-static int ovl_own_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- return -EOPNOTSUPP;
-}
-
-static int ovl_own_xattr_set(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
-static int ovl_other_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- return ovl_xattr_get(dentry, inode, name, buffer, size);
-}
-
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- return ovl_xattr_set(dentry, inode, name, value, size, flags);
-}
-
-static const struct xattr_handler ovl_own_trusted_xattr_handler = {
- .prefix = OVL_XATTR_TRUSTED_PREFIX,
- .get = ovl_own_xattr_get,
- .set = ovl_own_xattr_set,
-};
-
-static const struct xattr_handler ovl_own_user_xattr_handler = {
- .prefix = OVL_XATTR_USER_PREFIX,
- .get = ovl_own_xattr_get,
- .set = ovl_own_xattr_set,
-};
-
-static const struct xattr_handler ovl_other_xattr_handler = {
- .prefix = "", /* catch all */
- .get = ovl_other_xattr_get,
- .set = ovl_other_xattr_set,
-};
-
-static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
- &ovl_own_trusted_xattr_handler,
- &ovl_other_xattr_handler,
- NULL
-};
-
-static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
- &ovl_own_user_xattr_handler,
- &ovl_other_xattr_handler,
- NULL
-};
-
static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
struct inode **ptrap, const char *name)
{
@@ -647,7 +585,7 @@ static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
if (IS_ERR(whiteout))
goto cleanup_temp;
- err = ovl_is_whiteout(whiteout);
+ err = ovl_upper_is_whiteout(ofs, whiteout);
/* Best effort cleanup of whiteout and temp file */
if (err)
@@ -887,15 +825,20 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
{
struct vfsmount *mnt = ovl_upper_mnt(ofs);
struct dentry *indexdir;
+ struct dentry *origin = ovl_lowerstack(oe)->dentry;
+ const struct ovl_fh *fh;
int err;
+ fh = ovl_get_origin_fh(ofs, origin);
+ if (IS_ERR(fh))
+ return PTR_ERR(fh);
+
err = mnt_want_write(mnt);
if (err)
- return err;
+ goto out_free_fh;
/* Verify lower root is upper root origin */
- err = ovl_verify_origin(ofs, upperpath->dentry,
- ovl_lowerstack(oe)->dentry, true);
+ err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true);
if (err) {
pr_err("failed to verify upper root origin\n");
goto out;
@@ -927,9 +870,10 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
* directory entries.
*/
if (ovl_check_origin_xattr(ofs, ofs->indexdir)) {
- err = ovl_verify_set_fh(ofs, ofs->indexdir,
- OVL_XATTR_ORIGIN,
- upperpath->dentry, true, false);
+ err = ovl_verify_origin_xattr(ofs, ofs->indexdir,
+ OVL_XATTR_ORIGIN,
+ upperpath->dentry, true,
+ false);
if (err)
pr_err("failed to verify index dir 'origin' xattr\n");
}
@@ -947,6 +891,8 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
out:
mnt_drop_write(mnt);
+out_free_fh:
+ kfree(fh);
return err;
}
@@ -1382,8 +1328,11 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
ofs->layers = layers;
/*
* Layer 0 is reserved for upper even if there's no upper.
- * For consistency, config.lowerdirs[0] is NULL.
+ * config.lowerdirs[0] is used for storing the user provided colon
+ * separated lowerdir string.
*/
+ ofs->config.lowerdirs[0] = ctx->lowerdir_all;
+ ctx->lowerdir_all = NULL;
ofs->numlayer = 1;
sb->s_stack_depth = 0;
@@ -1493,8 +1442,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
- sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers :
- ovl_trusted_xattr_handlers;
+ sb->s_xattr = ovl_xattr_handlers(ofs);
sb->s_fs_info = ofs;
#ifdef CONFIG_FS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 868afd8834c3..50a201e9cd39 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -17,12 +17,38 @@
#include <linux/ratelimit.h>
#include "overlayfs.h"
+/* Get write access to upper mnt - may fail if upper sb was remounted ro */
+int ovl_get_write_access(struct dentry *dentry)
+{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ return mnt_get_write_access(ovl_upper_mnt(ofs));
+}
+
+/* Get write access to upper sb - may block if upper sb is frozen */
+void ovl_start_write(struct dentry *dentry)
+{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ sb_start_write(ovl_upper_mnt(ofs)->mnt_sb);
+}
+
int ovl_want_write(struct dentry *dentry)
{
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
return mnt_want_write(ovl_upper_mnt(ofs));
}
+void ovl_put_write_access(struct dentry *dentry)
+{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ mnt_put_write_access(ovl_upper_mnt(ofs));
+}
+
+void ovl_end_write(struct dentry *dentry)
+{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ sb_end_write(ovl_upper_mnt(ofs)->mnt_sb);
+}
+
void ovl_drop_write(struct dentry *dentry)
{
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
@@ -55,7 +81,7 @@ int ovl_can_decode_fh(struct super_block *sb)
if (!capable(CAP_DAC_READ_SEARCH))
return 0;
- if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry)
+ if (!exportfs_can_decode_fh(sb->s_export_op))
return 0;
return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
@@ -575,6 +601,16 @@ bool ovl_is_whiteout(struct dentry *dentry)
return inode && IS_WHITEOUT(inode);
}
+/*
+ * Use this over ovl_is_whiteout for upper and lower files, as it also
+ * handles overlay.whiteout xattr whiteout files.
+ */
+bool ovl_path_is_whiteout(struct ovl_fs *ofs, const struct path *path)
+{
+ return ovl_is_whiteout(path->dentry) ||
+ ovl_path_check_xwhiteout_xattr(ofs, path);
+}
+
struct file *ovl_path_open(const struct path *path, int flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -644,22 +680,36 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags)
return false;
}
+/*
+ * The copy up "transaction" keeps an elevated mnt write count on upper mnt,
+ * but leaves taking freeze protection on upper sb to lower level helpers.
+ */
int ovl_copy_up_start(struct dentry *dentry, int flags)
{
struct inode *inode = d_inode(dentry);
int err;
err = ovl_inode_lock_interruptible(inode);
- if (!err && ovl_already_copied_up_locked(dentry, flags)) {
+ if (err)
+ return err;
+
+ if (ovl_already_copied_up_locked(dentry, flags))
err = 1; /* Already copied up */
- ovl_inode_unlock(inode);
- }
+ else
+ err = ovl_get_write_access(dentry);
+ if (err)
+ goto out_unlock;
+
+ return 0;
+out_unlock:
+ ovl_inode_unlock(inode);
return err;
}
void ovl_copy_up_end(struct dentry *dentry)
{
+ ovl_put_write_access(dentry);
ovl_inode_unlock(d_inode(dentry));
}
@@ -676,6 +726,32 @@ bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path)
return false;
}
+bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
+{
+ struct dentry *dentry = path->dentry;
+ int res;
+
+ /* xattr.whiteout must be a zero size regular file */
+ if (!d_is_reg(dentry) || i_size_read(d_inode(dentry)) != 0)
+ return false;
+
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUT, NULL, 0);
+ return res >= 0;
+}
+
+bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
+{
+ struct dentry *dentry = path->dentry;
+ int res;
+
+ /* xattr.whiteouts must be a directory */
+ if (!d_is_dir(dentry))
+ return false;
+
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
+ return res >= 0;
+}
+
/*
* Load persistent uuid from xattr into s_uuid if found, or store a new
* random generated value in s_uuid and in xattr.
@@ -760,6 +836,8 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
#define OVL_XATTR_UUID_POSTFIX "uuid"
#define OVL_XATTR_METACOPY_POSTFIX "metacopy"
#define OVL_XATTR_PROTATTR_POSTFIX "protattr"
+#define OVL_XATTR_XWHITEOUT_POSTFIX "whiteout"
+#define OVL_XATTR_XWHITEOUTS_POSTFIX "whiteouts"
#define OVL_XATTR_TAB_ENTRY(x) \
[x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
@@ -775,6 +853,8 @@ const char *const ovl_xattr_table[][2] = {
OVL_XATTR_TAB_ENTRY(OVL_XATTR_UUID),
OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
+ OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
+ OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
};
int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
@@ -973,12 +1053,18 @@ static void ovl_cleanup_index(struct dentry *dentry)
struct dentry *index = NULL;
struct inode *inode;
struct qstr name = { };
+ bool got_write = false;
int err;
err = ovl_get_index_name(ofs, lowerdentry, &name);
if (err)
goto fail;
+ err = ovl_want_write(dentry);
+ if (err)
+ goto fail;
+
+ got_write = true;
inode = d_inode(upperdentry);
if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) {
pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
@@ -1016,6 +1102,8 @@ static void ovl_cleanup_index(struct dentry *dentry)
goto fail;
out:
+ if (got_write)
+ ovl_drop_write(dentry);
kfree(name.name);
dput(index);
return;
@@ -1062,8 +1150,12 @@ int ovl_nlink_start(struct dentry *dentry)
if (err)
return err;
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out_unlock;
+
if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode))
- goto out;
+ return 0;
old_cred = ovl_override_creds(dentry->d_sb);
/*
@@ -1074,10 +1166,15 @@ int ovl_nlink_start(struct dentry *dentry)
*/
err = ovl_set_nlink_upper(dentry);
revert_creds(old_cred);
-
-out:
if (err)
- ovl_inode_unlock(inode);
+ goto out_drop_write;
+
+ return 0;
+
+out_drop_write:
+ ovl_drop_write(dentry);
+out_unlock:
+ ovl_inode_unlock(inode);
return err;
}
@@ -1086,6 +1183,8 @@ void ovl_nlink_end(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
+ ovl_drop_write(dentry);
+
if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) {
const struct cred *old_cred;
@@ -1403,6 +1502,7 @@ void ovl_copyattr(struct inode *inode)
realinode = ovl_i_path_real(inode, &realpath);
real_idmap = mnt_idmap(realpath.mnt);
+ spin_lock(&inode->i_lock);
vfsuid = i_uid_into_vfsuid(real_idmap, realinode);
vfsgid = i_gid_into_vfsgid(real_idmap, realinode);
@@ -1413,4 +1513,5 @@ void ovl_copyattr(struct inode *inode)
inode_set_mtime_to_ts(inode, inode_get_mtime(realinode));
inode_set_ctime_to_ts(inode, inode_get_ctime(realinode));
i_size_write(inode, i_size_read(realinode));
+ spin_unlock(&inode->i_lock);
}
diff --git a/fs/overlayfs/xattrs.c b/fs/overlayfs/xattrs.c
new file mode 100644
index 000000000000..383978e4663c
--- /dev/null
+++ b/fs/overlayfs/xattrs.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include "overlayfs.h"
+
+static bool ovl_is_escaped_xattr(struct super_block *sb, const char *name)
+{
+ struct ovl_fs *ofs = sb->s_fs_info;
+
+ if (ofs->config.userxattr)
+ return strncmp(name, OVL_XATTR_ESCAPE_USER_PREFIX,
+ OVL_XATTR_ESCAPE_USER_PREFIX_LEN) == 0;
+ else
+ return strncmp(name, OVL_XATTR_ESCAPE_TRUSTED_PREFIX,
+ OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN - 1) == 0;
+}
+
+static bool ovl_is_own_xattr(struct super_block *sb, const char *name)
+{
+ struct ovl_fs *ofs = OVL_FS(sb);
+
+ if (ofs->config.userxattr)
+ return strncmp(name, OVL_XATTR_USER_PREFIX,
+ OVL_XATTR_USER_PREFIX_LEN) == 0;
+ else
+ return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
+ OVL_XATTR_TRUSTED_PREFIX_LEN) == 0;
+}
+
+bool ovl_is_private_xattr(struct super_block *sb, const char *name)
+{
+ return ovl_is_own_xattr(sb, name) && !ovl_is_escaped_xattr(sb, name);
+}
+
+static int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ int err;
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ struct dentry *upperdentry = ovl_i_dentry_upper(inode);
+ struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
+ struct path realpath;
+ const struct cred *old_cred;
+
+ if (!value && !upperdentry) {
+ ovl_path_lower(dentry, &realpath);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0);
+ revert_creds(old_cred);
+ if (err < 0)
+ goto out;
+ }
+
+ if (!upperdentry) {
+ err = ovl_copy_up(dentry);
+ if (err)
+ goto out;
+
+ realdentry = ovl_dentry_upper(dentry);
+ }
+
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out;
+
+ old_cred = ovl_override_creds(dentry->d_sb);
+ if (value) {
+ err = ovl_do_setxattr(ofs, realdentry, name, value, size,
+ flags);
+ } else {
+ WARN_ON(flags != XATTR_REPLACE);
+ err = ovl_do_removexattr(ofs, realdentry, name);
+ }
+ revert_creds(old_cred);
+ ovl_drop_write(dentry);
+
+ /* copy c/mtime */
+ ovl_copyattr(inode);
+out:
+ return err;
+}
+
+static int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
+ void *value, size_t size)
+{
+ ssize_t res;
+ const struct cred *old_cred;
+ struct path realpath;
+
+ ovl_i_path_real(inode, &realpath);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size);
+ revert_creds(old_cred);
+ return res;
+}
+
+static bool ovl_can_list(struct super_block *sb, const char *s)
+{
+ /* Never list private (.overlay) */
+ if (ovl_is_private_xattr(sb, s))
+ return false;
+
+ /* List all non-trusted xattrs */
+ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+ return true;
+
+ /* list other trusted for superuser only */
+ return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+}
+
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+ struct dentry *realdentry = ovl_dentry_real(dentry);
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ ssize_t res;
+ size_t len;
+ char *s;
+ const struct cred *old_cred;
+ size_t prefix_len, name_len;
+
+ old_cred = ovl_override_creds(dentry->d_sb);
+ res = vfs_listxattr(realdentry, list, size);
+ revert_creds(old_cred);
+ if (res <= 0 || size == 0)
+ return res;
+
+ prefix_len = ofs->config.userxattr ?
+ OVL_XATTR_USER_PREFIX_LEN : OVL_XATTR_TRUSTED_PREFIX_LEN;
+
+ /* filter out private xattrs */
+ for (s = list, len = res; len;) {
+ size_t slen = strnlen(s, len) + 1;
+
+ /* underlying fs providing us with an broken xattr list? */
+ if (WARN_ON(slen > len))
+ return -EIO;
+
+ len -= slen;
+ if (!ovl_can_list(dentry->d_sb, s)) {
+ res -= slen;
+ memmove(s, s + slen, len);
+ } else if (ovl_is_escaped_xattr(dentry->d_sb, s)) {
+ res -= OVL_XATTR_ESCAPE_PREFIX_LEN;
+ name_len = slen - prefix_len - OVL_XATTR_ESCAPE_PREFIX_LEN;
+ s += prefix_len;
+ memmove(s, s + OVL_XATTR_ESCAPE_PREFIX_LEN, name_len + len);
+ s += name_len;
+ } else {
+ s += slen;
+ }
+ }
+
+ return res;
+}
+
+static char *ovl_xattr_escape_name(const char *prefix, const char *name)
+{
+ size_t prefix_len = strlen(prefix);
+ size_t name_len = strlen(name);
+ size_t escaped_len;
+ char *escaped, *s;
+
+ escaped_len = prefix_len + OVL_XATTR_ESCAPE_PREFIX_LEN + name_len;
+ if (escaped_len > XATTR_NAME_MAX)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ escaped = kmalloc(escaped_len + 1, GFP_KERNEL);
+ if (escaped == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ s = escaped;
+ memcpy(s, prefix, prefix_len);
+ s += prefix_len;
+ memcpy(s, OVL_XATTR_ESCAPE_PREFIX, OVL_XATTR_ESCAPE_PREFIX_LEN);
+ s += OVL_XATTR_ESCAPE_PREFIX_LEN;
+ memcpy(s, name, name_len + 1);
+
+ return escaped;
+}
+
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ char *escaped;
+ int r;
+
+ escaped = ovl_xattr_escape_name(handler->prefix, name);
+ if (IS_ERR(escaped))
+ return PTR_ERR(escaped);
+
+ r = ovl_xattr_get(dentry, inode, escaped, buffer, size);
+
+ kfree(escaped);
+
+ return r;
+}
+
+static int ovl_own_xattr_set(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ char *escaped;
+ int r;
+
+ escaped = ovl_xattr_escape_name(handler->prefix, name);
+ if (IS_ERR(escaped))
+ return PTR_ERR(escaped);
+
+ r = ovl_xattr_set(dentry, inode, escaped, value, size, flags);
+
+ kfree(escaped);
+
+ return r;
+}
+
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return ovl_xattr_get(dentry, inode, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ return ovl_xattr_set(dentry, inode, name, value, size, flags);
+}
+
+static const struct xattr_handler ovl_own_trusted_xattr_handler = {
+ .prefix = OVL_XATTR_TRUSTED_PREFIX,
+ .get = ovl_own_xattr_get,
+ .set = ovl_own_xattr_set,
+};
+
+static const struct xattr_handler ovl_own_user_xattr_handler = {
+ .prefix = OVL_XATTR_USER_PREFIX,
+ .get = ovl_own_xattr_get,
+ .set = ovl_own_xattr_set,
+};
+
+static const struct xattr_handler ovl_other_xattr_handler = {
+ .prefix = "", /* catch all */
+ .get = ovl_other_xattr_get,
+ .set = ovl_other_xattr_set,
+};
+
+static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
+ &ovl_own_trusted_xattr_handler,
+ &ovl_other_xattr_handler,
+ NULL
+};
+
+static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
+ &ovl_own_user_xattr_handler,
+ &ovl_other_xattr_handler,
+ NULL
+};
+
+const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs)
+{
+ return ofs->config.userxattr ? ovl_user_xattr_handlers :
+ ovl_trusted_xattr_handlers;
+}
+
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index fe1bf5b6e0cb..59f6b8e32cc9 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* fully cached or it may be in the process of
* being deleted due to a lease break.
*/
- if (!cfid->has_lease) {
+ if (!cfid->time || !cfid->has_lease) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
@@ -193,10 +193,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
npath = path_no_prefix(cifs_sb, path);
if (IS_ERR(npath)) {
rc = PTR_ERR(npath);
- kfree(utf16_path);
- return rc;
+ goto out;
}
+ if (!npath[0]) {
+ dentry = dget(cifs_sb->root);
+ } else {
+ dentry = path_to_dentry(cifs_sb, npath);
+ if (IS_ERR(dentry)) {
+ rc = -ENOENT;
+ goto out;
+ }
+ }
+ cfid->dentry = dentry;
+
/*
* We do not hold the lock for the open because in case
* SMB2_open needs to reconnect.
@@ -249,6 +259,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
+ /*
+ * Set @cfid->has_lease to true before sending out compounded request so
+ * its lease reference can be put in cached_dir_lease_break() due to a
+ * potential lease break right after the request is sent or while @cfid
+ * is still being cached. Concurrent processes won't be to use it yet
+ * due to @cfid->time being zero.
+ */
+ cfid->has_lease = true;
+
rc = compound_send_recv(xid, ses, server,
flags, 2, rqst,
resp_buftype, rsp_iov);
@@ -263,6 +282,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
cfid->tcon = tcon;
cfid->is_open = true;
+ spin_lock(&cfids->cfid_list_lock);
+
o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
oparms.fid->persistent_fid = o_rsp->PersistentFileId;
oparms.fid->volatile_fid = o_rsp->VolatileFileId;
@@ -270,18 +291,25 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
#endif /* CIFS_DEBUG2 */
- if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ rc = -EINVAL;
+ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
+ spin_unlock(&cfids->cfid_list_lock);
goto oshr_free;
+ }
smb2_parse_contexts(server, o_rsp,
&oparms.fid->epoch,
oparms.fid->lease_key, &oplock,
NULL, NULL);
- if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
+ if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
+ spin_unlock(&cfids->cfid_list_lock);
goto oshr_free;
+ }
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
+ spin_unlock(&cfids->cfid_list_lock);
goto oshr_free;
+ }
if (!smb2_validate_and_copy_iov(
le16_to_cpu(qi_rsp->OutputBufferOffset),
sizeof(struct smb2_file_all_info),
@@ -289,37 +317,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
(char *)&cfid->file_all_info))
cfid->file_all_info_is_valid = true;
- if (!npath[0])
- dentry = dget(cifs_sb->root);
- else {
- dentry = path_to_dentry(cifs_sb, npath);
- if (IS_ERR(dentry)) {
- rc = -ENOENT;
- goto oshr_free;
- }
- }
- spin_lock(&cfids->cfid_list_lock);
- cfid->dentry = dentry;
cfid->time = jiffies;
- cfid->has_lease = true;
spin_unlock(&cfids->cfid_list_lock);
+ /* At this point the directory handle is fully cached */
+ rc = 0;
oshr_free:
- kfree(utf16_path);
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
- spin_lock(&cfids->cfid_list_lock);
- if (!cfid->has_lease) {
- if (rc) {
- if (cfid->on_list) {
- list_del(&cfid->entry);
- cfid->on_list = false;
- cfids->num_entries--;
- }
- rc = -ENOENT;
- } else {
+ if (rc) {
+ spin_lock(&cfids->cfid_list_lock);
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfids->num_entries--;
+ }
+ if (cfid->has_lease) {
/*
* We are guaranteed to have two references at this
* point. One for the caller and one for a potential
@@ -327,25 +342,24 @@ oshr_free:
* will be closed when the caller closes the cached
* handle.
*/
+ cfid->has_lease = false;
spin_unlock(&cfids->cfid_list_lock);
kref_put(&cfid->refcount, smb2_close_cached_fid);
goto out;
}
+ spin_unlock(&cfids->cfid_list_lock);
}
- spin_unlock(&cfids->cfid_list_lock);
+out:
if (rc) {
if (cfid->is_open)
SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
cfid->fid.volatile_fid);
free_cached_dir(cfid);
- cfid = NULL;
- }
-out:
- if (rc == 0) {
+ } else {
*ret_cfid = cfid;
atomic_inc(&tcon->num_remote_opens);
}
-
+ kfree(utf16_path);
return rc;
}
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 76922fcc4bc6..6d8804fb6535 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -427,6 +427,8 @@ skip_rdma:
if (server->nosharesock)
seq_printf(m, " nosharesock");
+ seq_printf(m, "\nServer capabilities: 0x%x", server->capabilities);
+
if (server->rdma)
seq_printf(m, "\nRDMA ");
seq_printf(m, "\nTCP status: %d Instance: %d"
@@ -452,6 +454,11 @@ skip_rdma:
seq_printf(m, "\n\n\tSessions: ");
i = 0;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ continue;
+ }
i++;
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
@@ -472,6 +479,7 @@ skip_rdma:
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->ses_status);
}
+ spin_unlock(&ses->ses_lock);
seq_printf(m, "\n\tSecurity type: %s ",
get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 22869cda1356..ea3a7a668b45 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -1191,6 +1191,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
const struct inode_operations cifs_symlink_inode_ops = {
.get_link = cifs_get_link,
+ .setattr = cifs_setattr,
.permission = cifs_permission,
.listxattr = cifs_listxattr,
};
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index e17222fec9d2..a75220db5c1e 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2570,7 +2570,7 @@ typedef struct {
struct win_dev {
- unsigned char type[8]; /* IntxCHR or IntxBLK */
+ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
__le64 major;
__le64 minor;
} __attribute__((packed));
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 0c37eefa18a5..890ceddae07e 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
char *cifs_build_devname(char *nodename, const char *prepath);
extern void delete_mid(struct mid_q_entry *mid);
-extern void release_mid(struct mid_q_entry *mid);
+void __release_mid(struct kref *refcount);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
@@ -740,4 +740,9 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
return true;
}
+static inline void release_mid(struct mid_q_entry *mid)
+{
+ kref_put(&mid->refcount, __release_mid);
+}
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 7b923e36501b..1a137b33858a 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -119,6 +119,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
static void smb2_query_server_interfaces(struct work_struct *work)
{
int rc;
+ int xid;
struct cifs_tcon *tcon = container_of(work,
struct cifs_tcon,
query_interfaces.work);
@@ -126,7 +127,10 @@ static void smb2_query_server_interfaces(struct work_struct *work)
/*
* query server network interfaces, in case they change
*/
- rc = SMB3_request_interfaces(0, tcon, false);
+ xid = get_xid();
+ rc = SMB3_request_interfaces(xid, tcon, false);
+ free_xid(xid);
+
if (rc) {
cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
__func__, rc);
@@ -156,13 +160,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
/* If server is a channel, select the primary channel */
pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
- spin_lock(&pserver->srv_lock);
+ /* if we need to signal just this channel */
if (!all_channels) {
- pserver->tcpStatus = CifsNeedReconnect;
- spin_unlock(&pserver->srv_lock);
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus != CifsExiting)
+ server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&server->srv_lock);
return;
}
- spin_unlock(&pserver->srv_lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
@@ -1969,9 +1974,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
void __cifs_put_smb_ses(struct cifs_ses *ses)
{
- unsigned int rc, xid;
- unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
+ unsigned int xid;
+ size_t i;
+ int rc;
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
@@ -2017,20 +2023,14 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- chan_count = ses->chan_count;
-
/* close any extra channels */
- if (chan_count > 1) {
- int i;
-
- for (i = 1; i < chan_count; i++) {
- if (ses->chans[i].iface) {
- kref_put(&ses->chans[i].iface->refcount, release_iface);
- ses->chans[i].iface = NULL;
- }
- cifs_put_tcp_session(ses->chans[i].server, 0);
- ses->chans[i].server = NULL;
+ for (i = 1; i < ses->chan_count; i++) {
+ if (ses->chans[i].iface) {
+ kref_put(&ses->chans[i].iface->refcount, release_iface);
+ ses->chans[i].iface = NULL;
}
+ cifs_put_tcp_session(ses->chans[i].server, 0);
+ ses->chans[i].server = NULL;
}
sesInfoFree(ses);
@@ -3849,8 +3849,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
spin_unlock(&ses->chan_lock);
- if (!is_binding)
+ if (!is_binding) {
ses->ses_status = SES_IN_SETUP;
+
+ /* force iface_list refresh */
+ ses->iface_last_update = 0;
+ }
spin_unlock(&ses->ses_lock);
/* update ses ip_addr only for primary chan */
diff --git a/fs/smb/client/export.c b/fs/smb/client/export.c
index 37c28415df1e..d606e8cbcb7d 100644
--- a/fs/smb/client/export.c
+++ b/fs/smb/client/export.c
@@ -41,13 +41,12 @@ static struct dentry *cifs_get_parent(struct dentry *dentry)
}
const struct export_operations cifs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.get_parent = cifs_get_parent,
-/* Following five export operations are unneeded so far and can default:
- .get_dentry =
- .get_name =
- .find_exported_dentry =
- .decode_fh =
- .encode_fs = */
+/*
+ * Following export operations are mandatory for NFS export support:
+ * .fh_to_dentry =
+ */
};
#endif /* CONFIG_CIFS_NFSD_EXPORT */
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 3abfe77bfa46..86fbd3f847d6 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -594,6 +594,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
cifs_dbg(FYI, "Symlink\n");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
+ } else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
+ cifs_dbg(FYI, "FIFO\n");
+ fattr->cf_mode |= S_IFIFO;
+ fattr->cf_dtype = DT_FIFO;
} else {
fattr->cf_mode |= S_IFREG; /* file? */
fattr->cf_dtype = DT_REG;
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index c66be4904e1f..a1da50e66fbb 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -42,23 +42,11 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
rc = cifs_alloc_hash("md5", &md5);
if (rc)
- goto symlink_hash_err;
+ return rc;
- rc = crypto_shash_init(md5);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__);
- goto symlink_hash_err;
- }
- rc = crypto_shash_update(md5, link_str, link_len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
- goto symlink_hash_err;
- }
- rc = crypto_shash_final(md5, md5_hash);
+ rc = crypto_shash_digest(md5, link_str, link_len, md5_hash);
if (rc)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
-symlink_hash_err:
cifs_free_hash(&md5);
return rc;
}
diff --git a/fs/smb/client/ntlmssp.h b/fs/smb/client/ntlmssp.h
index 2c5dde2ece58..875de43b72de 100644
--- a/fs/smb/client/ntlmssp.h
+++ b/fs/smb/client/ntlmssp.h
@@ -133,8 +133,8 @@ typedef struct _AUTHENTICATE_MESSAGE {
SECURITY_BUFFER WorkstationName;
SECURITY_BUFFER SessionKey;
__le32 NegotiateFlags;
- /* SECURITY_BUFFER for version info not present since we
- do not set the version is present flag */
+ struct ntlmssp_version Version;
+ /* SECURITY_BUFFER */
char UserString[];
} __attribute__((packed)) AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 79f26c560edf..cd474cf98f30 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -186,7 +186,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
}
if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
- ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
cifs_server_dbg(VFS, "no multichannel support\n");
return 0;
@@ -1060,10 +1059,16 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
+ /* send version information in ntlmssp authenticate also */
flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
- NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
- /* we only send version information in ntlmssp negotiate, so do not set this flag */
- flags = flags & ~NTLMSSP_NEGOTIATE_VERSION;
+ NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_VERSION |
+ NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
+
+ sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR;
+ sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL;
+ sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD);
+ sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3;
+
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 25f7cd6f23d6..32dfa0f7a78c 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
{
struct close_cancelled_open *cancelled;
- cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
if (!cancelled)
return -ENOMEM;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index f4849a8ad40b..601e7a187f87 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -5089,7 +5089,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
* over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
*/
- if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
return rc;
cifs_dbg(FYI, "sfu compat create special file\n");
@@ -5137,6 +5137,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
pdev->minor = cpu_to_le64(MINOR(dev));
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
+ } else if (S_ISFIFO(mode)) {
+ memcpy(pdev->type, "LnxFIFO", 8);
+ pdev->major = 0;
+ pdev->minor = 0;
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
}
tcon->ses->server->ops->close(xid, tcon, &fid);
d_drop(dentry);
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 14710afdc2a3..d553b7a54621 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
return temp;
}
-static void __release_mid(struct kref *refcount)
+void __release_mid(struct kref *refcount)
{
struct mid_q_entry *midEntry =
container_of(refcount, struct mid_q_entry, refcount);
@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
mempool_free(midEntry, cifs_mid_poolp);
}
-void release_mid(struct mid_q_entry *mid)
-{
- struct TCP_Server_Info *server = mid->server;
-
- spin_lock(&server->mid_lock);
- kref_put(&mid->refcount, __release_mid);
- spin_unlock(&server->mid_lock);
-}
-
void
delete_mid(struct mid_q_entry *mid)
{
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 319fb9ffc6a0..8983f45f8430 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -34,6 +34,7 @@
#define SMB2_QUERY_INFO_HE 0x0010
#define SMB2_SET_INFO_HE 0x0011
#define SMB2_OPLOCK_BREAK_HE 0x0012
+#define SMB2_SERVER_TO_CLIENT_NOTIFICATION 0x0013
/* The same list in little endian */
#define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
@@ -411,6 +412,7 @@ struct smb2_tree_disconnect_rsp {
#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
#define SMB2_GLOBAL_CAP_ENCRYPTION 0x00000040 /* New to SMB3 */
+#define SMB2_GLOBAL_CAP_NOTIFICATIONS 0x00000080 /* New to SMB3.1.1 */
/* Internal types */
#define SMB2_NT_FIND 0x00100000
#define SMB2_LARGE_FILES 0x00200000
@@ -981,6 +983,19 @@ struct smb2_change_notify_rsp {
__u8 Buffer[]; /* array of file notify structs */
} __packed;
+/*
+ * SMB2_SERVER_TO_CLIENT_NOTIFICATION: See MS-SMB2 section 2.2.44
+ */
+
+#define SMB2_NOTIFY_SESSION_CLOSED 0x0000
+
+struct smb2_server_client_notification {
+ struct smb2_hdr hdr;
+ __le16 StructureSize;
+ __u16 Reserved; /* MBZ */
+ __le32 NotificationType;
+ __u8 NotificationBuffer[4]; /* MBZ */
+} __packed;
/*
* SMB2_CREATE See MS-SMB2 section 2.2.13
@@ -1097,16 +1112,23 @@ struct smb2_change_notify_rsp {
#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
+/* FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010) should be zero, ignored */
+/* FILE_SYNCHRONOUS_IO_NONALERT cpu_to_le32(0x00000020) should be zero, ignored */
#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
+/* FILE_OPEN_REMOTE_INSTANCE cpu_to_le32(0x00000400) should be zero, ignored */
#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
-#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
+#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000) /* MBZ */
#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
+/* FILE_OPEN_REQUIRING_OPLOCK cpu_to_le32(0x00010000) should be zero, ignored */
+/* FILE_DISALLOW_EXCLUSIVE cpu_to_le32(0x00020000) should be zero, ignored */
+/* FILE_RESERVE_OPFILTER cpu_to_le32(0x00100000) MBZ */
#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
+/* #define FILE_OPEN_FOR_FREE_SPACE_QUERY cpu_to_le32(0x00800000) should be zero, ignored */
#define CREATE_OPTIONS_MASK_LE cpu_to_le32(0x00FFFFFF)
#define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index e6ba1e9b8589..6691ae68af0c 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -366,11 +366,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
return 0;
}
+/**
+ * set_smb1_rsp_status() - set error type in smb response header
+ * @work: smb work containing smb response header
+ * @err: error code to set in response
+ */
+static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
+{
+ work->send_no_response = 1;
+}
+
static struct smb_version_ops smb1_server_ops = {
.get_cmd_val = get_smb1_cmd_val,
.init_rsp_hdr = init_smb1_rsp_hdr,
.allocate_rsp_buf = smb1_allocate_rsp_buf,
.check_user_session = smb1_check_user_session,
+ .set_rsp_status = set_smb1_rsp_status,
};
static int smb1_negotiate(struct ksmbd_work *work)
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 6c0305be895e..51b8bfab7481 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -1107,6 +1107,7 @@ pass:
struct smb_acl *pdacl;
struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
+ int pntsd_alloc_size;
if (parent_pntsd->osidoffset) {
powner_sid = (struct smb_sid *)((char *)parent_pntsd +
@@ -1119,9 +1120,10 @@ pass:
pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
}
- pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
- pgroup_sid_size + sizeof(struct smb_acl) +
- nt_size, GFP_KERNEL);
+ pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
+ pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
+
+ pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
if (!pntsd) {
rc = -ENOMEM;
goto free_aces_base;
@@ -1136,6 +1138,27 @@ pass:
pntsd->gsidoffset = parent_pntsd->gsidoffset;
pntsd->dacloffset = parent_pntsd->dacloffset;
+ if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
+ pntsd_alloc_size) {
+ rc = -EINVAL;
+ kfree(pntsd);
+ goto free_aces_base;
+ }
+
+ if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
+ pntsd_alloc_size) {
+ rc = -EINVAL;
+ kfree(pntsd);
+ goto free_aces_base;
+ }
+
+ if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
+ pntsd_alloc_size) {
+ rc = -EINVAL;
+ kfree(pntsd);
+ goto free_aces_base;
+ }
+
if (pntsd->osidoffset) {
struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 1053127f71ad..c53dea5598fc 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1177,9 +1177,10 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
/**
* ksmbd_vfs_kern_path_locked() - lookup a file and get path info
- * @name: file path that is relative to share
- * @flags: lookup flags
- * @path: if lookup succeed, return path info
+ * @name: file path that is relative to share
+ * @flags: lookup flags
+ * @parent_path: if lookup succeed, return parent_path info
+ * @path: if lookup succeed, return path info
* @caseless: caseless filename lookup
*
* Return: 0 on success, otherwise error
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 723763746238..62972f0ff868 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -173,6 +173,7 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
const struct export_operations squashfs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = squashfs_fh_to_dentry,
.fh_to_parent = squashfs_fh_to_parent,
.get_parent = squashfs_get_parent
diff --git a/fs/super.c b/fs/super.c
index 77faad662739..076392396e72 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -2160,3 +2160,4 @@ int sb_init_dio_done_wq(struct super_block *sb)
destroy_workqueue(wq);
return 0;
}
+EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index a12ac0356c69..6b7652fb8050 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -167,6 +167,18 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
return battr->mmap(of->file, kobj, battr, vma);
}
+static loff_t sysfs_kf_bin_llseek(struct kernfs_open_file *of, loff_t offset,
+ int whence)
+{
+ struct bin_attribute *battr = of->kn->priv;
+ struct kobject *kobj = of->kn->parent->priv;
+
+ if (battr->llseek)
+ return battr->llseek(of->file, kobj, battr, offset, whence);
+ else
+ return generic_file_llseek(of->file, offset, whence);
+}
+
static int sysfs_kf_bin_open(struct kernfs_open_file *of)
{
struct bin_attribute *battr = of->kn->priv;
@@ -249,6 +261,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
.write = sysfs_kf_bin_write,
.mmap = sysfs_kf_bin_mmap,
.open = sysfs_kf_bin_open,
+ .llseek = sysfs_kf_bin_llseek,
};
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 8c8d64e76103..f8a594a50ae6 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -2,8 +2,9 @@
/*
* event_inode.c - part of tracefs, a pseudo file system for activating tracing
*
- * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt (VMware) <rostedt@goodmis.org>
+ * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt <rostedt@goodmis.org>
* Copyright (C) 2020-23 VMware Inc, author: Ajay Kaher <akaher@vmware.com>
+ * Copyright (C) 2023 Google, author: Steven Rostedt <rostedt@goodmis.org>
*
* eventfs is used to dynamically create inodes and dentries based on the
* meta data provided by the tracing system.
@@ -23,48 +24,30 @@
#include <linux/delay.h>
#include "internal.h"
-struct eventfs_inode {
- struct list_head e_top_files;
-};
+/*
+ * eventfs_mutex protects the eventfs_inode (ei) dentry. Any access
+ * to the ei->dentry must be done under this mutex and after checking
+ * if ei->is_freed is not set. The ei->dentry is released under the
+ * mutex at the same time ei->is_freed is set. If ei->is_freed is set
+ * then the ei->dentry is invalid.
+ */
+static DEFINE_MUTEX(eventfs_mutex);
/*
- * struct eventfs_file - hold the properties of the eventfs files and
- * directories.
- * @name: the name of the file or directory to create
- * @d_parent: holds parent's dentry
- * @dentry: once accessed holds dentry
- * @list: file or directory to be added to parent directory
- * @ei: list of files and directories within directory
- * @fop: file_operations for file or directory
- * @iop: inode_operations for file or directory
- * @data: something that the caller will want to get to later on
- * @mode: the permission that the file or directory should have
+ * The eventfs_inode (ei) itself is protected by SRCU. It is released from
+ * its parent's list and will have is_freed set (under eventfs_mutex).
+ * After the SRCU grace period is over, the ei may be freed.
*/
-struct eventfs_file {
- const char *name;
- struct dentry *d_parent;
- struct dentry *dentry;
- struct list_head list;
- struct eventfs_inode *ei;
- const struct file_operations *fop;
- const struct inode_operations *iop;
- /*
- * Union - used for deletion
- * @del_list: list of eventfs_file to delete
- * @rcu: eventfs_file to delete in RCU
- * @is_freed: node is freed if one of the above is set
- */
- union {
- struct list_head del_list;
- struct rcu_head rcu;
- unsigned long is_freed;
- };
- void *data;
- umode_t mode;
+DEFINE_STATIC_SRCU(eventfs_srcu);
+
+/* Mode is unsigned short, use the upper bits for flags */
+enum {
+ EVENTFS_SAVE_MODE = BIT(16),
+ EVENTFS_SAVE_UID = BIT(17),
+ EVENTFS_SAVE_GID = BIT(18),
};
-static DEFINE_MUTEX(eventfs_mutex);
-DEFINE_STATIC_SRCU(eventfs_srcu);
+#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
static struct dentry *eventfs_root_lookup(struct inode *dir,
struct dentry *dentry,
@@ -73,8 +56,88 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
static int eventfs_release(struct inode *inode, struct file *file);
+static void update_attr(struct eventfs_attr *attr, struct iattr *iattr)
+{
+ unsigned int ia_valid = iattr->ia_valid;
+
+ if (ia_valid & ATTR_MODE) {
+ attr->mode = (attr->mode & ~EVENTFS_MODE_MASK) |
+ (iattr->ia_mode & EVENTFS_MODE_MASK) |
+ EVENTFS_SAVE_MODE;
+ }
+ if (ia_valid & ATTR_UID) {
+ attr->mode |= EVENTFS_SAVE_UID;
+ attr->uid = iattr->ia_uid;
+ }
+ if (ia_valid & ATTR_GID) {
+ attr->mode |= EVENTFS_SAVE_GID;
+ attr->gid = iattr->ia_gid;
+ }
+}
+
+static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *iattr)
+{
+ const struct eventfs_entry *entry;
+ struct eventfs_inode *ei;
+ const char *name;
+ int ret;
+
+ mutex_lock(&eventfs_mutex);
+ ei = dentry->d_fsdata;
+ if (ei->is_freed) {
+ /* Do not allow changes if the event is about to be removed. */
+ mutex_unlock(&eventfs_mutex);
+ return -ENODEV;
+ }
+
+ /* Preallocate the children mode array if necessary */
+ if (!(dentry->d_inode->i_mode & S_IFDIR)) {
+ if (!ei->entry_attrs) {
+ ei->entry_attrs = kzalloc(sizeof(*ei->entry_attrs) * ei->nr_entries,
+ GFP_KERNEL);
+ if (!ei->entry_attrs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+ ret = simple_setattr(idmap, dentry, iattr);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * If this is a dir, then update the ei cache, only the file
+ * mode is saved in the ei->m_children, and the ownership is
+ * determined by the parent directory.
+ */
+ if (dentry->d_inode->i_mode & S_IFDIR) {
+ update_attr(&ei->attr, iattr);
+
+ } else {
+ name = dentry->d_name.name;
+
+ for (int i = 0; i < ei->nr_entries; i++) {
+ entry = &ei->entries[i];
+ if (strcmp(name, entry->name) == 0) {
+ update_attr(&ei->entry_attrs[i], iattr);
+ break;
+ }
+ }
+ }
+ out:
+ mutex_unlock(&eventfs_mutex);
+ return ret;
+}
+
static const struct inode_operations eventfs_root_dir_inode_operations = {
.lookup = eventfs_root_lookup,
+ .setattr = eventfs_set_attr,
+};
+
+static const struct inode_operations eventfs_file_inode_operations = {
+ .setattr = eventfs_set_attr,
};
static const struct file_operations eventfs_file_operations = {
@@ -85,26 +148,40 @@ static const struct file_operations eventfs_file_operations = {
.release = eventfs_release,
};
+static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, umode_t mode)
+{
+ if (!attr) {
+ inode->i_mode = mode;
+ return;
+ }
+
+ if (attr->mode & EVENTFS_SAVE_MODE)
+ inode->i_mode = attr->mode & EVENTFS_MODE_MASK;
+ else
+ inode->i_mode = mode;
+
+ if (attr->mode & EVENTFS_SAVE_UID)
+ inode->i_uid = attr->uid;
+
+ if (attr->mode & EVENTFS_SAVE_GID)
+ inode->i_gid = attr->gid;
+}
+
/**
* create_file - create a file in the tracefs filesystem
* @name: the name of the file to create.
* @mode: the permission that the file should have.
+ * @attr: saved attributes changed by user
* @parent: parent dentry for this file.
* @data: something that the caller will want to get to later on.
* @fop: struct file_operations that should be used for this file.
*
- * This is the basic "create a file" function for tracefs. It allows for a
- * wide range of flexibility in creating a file.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the tracefs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %NULL will be returned.
- *
- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
+ * This function creates a dentry that represents a file in the eventsfs_inode
+ * directory. The inode.i_private pointer will point to @data in the open()
+ * call.
*/
static struct dentry *create_file(const char *name, umode_t mode,
+ struct eventfs_attr *attr,
struct dentry *parent, void *data,
const struct file_operations *fop)
{
@@ -118,6 +195,7 @@ static struct dentry *create_file(const char *name, umode_t mode,
if (WARN_ON_ONCE(!S_ISREG(mode)))
return NULL;
+ WARN_ON_ONCE(!parent);
dentry = eventfs_start_creating(name, parent);
if (IS_ERR(dentry))
@@ -127,7 +205,10 @@ static struct dentry *create_file(const char *name, umode_t mode,
if (unlikely(!inode))
return eventfs_failed_creating(dentry);
- inode->i_mode = mode;
+ /* If the user updated the directory's attributes, use them */
+ update_inode_attr(inode, attr, mode);
+
+ inode->i_op = &eventfs_file_inode_operations;
inode->i_fop = fop;
inode->i_private = data;
@@ -140,28 +221,19 @@ static struct dentry *create_file(const char *name, umode_t mode,
/**
* create_dir - create a dir in the tracefs filesystem
- * @name: the name of the file to create.
+ * @ei: the eventfs_inode that represents the directory to create
* @parent: parent dentry for this file.
- * @data: something that the caller will want to get to later on.
*
- * This is the basic "create a dir" function for eventfs. It allows for a
- * wide range of flexibility in creating a dir.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the tracefs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %NULL will be returned.
- *
- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
+ * This function will create a dentry for a directory represented by
+ * a eventfs_inode.
*/
-static struct dentry *create_dir(const char *name, struct dentry *parent, void *data)
+static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent)
{
struct tracefs_inode *ti;
struct dentry *dentry;
struct inode *inode;
- dentry = eventfs_start_creating(name, parent);
+ dentry = eventfs_start_creating(ei->name, parent);
if (IS_ERR(dentry))
return dentry;
@@ -169,10 +241,11 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
if (unlikely(!inode))
return eventfs_failed_creating(dentry);
- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ /* If the user updated the directory's attributes, use them */
+ update_inode_attr(inode, &ei->attr, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
+
inode->i_op = &eventfs_root_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
- inode->i_private = data;
ti = get_tracefs(inode);
ti->flags |= TRACEFS_EVENT_INODE;
@@ -184,117 +257,198 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
return eventfs_end_creating(dentry);
}
+static void free_ei(struct eventfs_inode *ei)
+{
+ kfree_const(ei->name);
+ kfree(ei->d_children);
+ kfree(ei->entry_attrs);
+ kfree(ei);
+}
+
/**
- * eventfs_set_ef_status_free - set the ef->status to free
+ * eventfs_set_ei_status_free - remove the dentry reference from an eventfs_inode
* @ti: the tracefs_inode of the dentry
- * @dentry: dentry who's status to be freed
+ * @dentry: dentry which has the reference to remove.
*
- * eventfs_set_ef_status_free will be called if no more
- * references remain
+ * Remove the association between a dentry from an eventfs_inode.
*/
-void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
+void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry)
{
- struct tracefs_inode *ti_parent;
struct eventfs_inode *ei;
- struct eventfs_file *ef, *tmp;
-
- /* The top level events directory may be freed by this */
- if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
- LIST_HEAD(ef_del_list);
+ int i;
- mutex_lock(&eventfs_mutex);
+ mutex_lock(&eventfs_mutex);
- ei = ti->private;
+ ei = dentry->d_fsdata;
+ if (!ei)
+ goto out;
- /* Record all the top level files */
- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
- lockdep_is_held(&eventfs_mutex)) {
- list_add_tail(&ef->del_list, &ef_del_list);
+ /* This could belong to one of the files of the ei */
+ if (ei->dentry != dentry) {
+ for (i = 0; i < ei->nr_entries; i++) {
+ if (ei->d_children[i] == dentry)
+ break;
}
+ if (WARN_ON_ONCE(i == ei->nr_entries))
+ goto out;
+ ei->d_children[i] = NULL;
+ } else if (ei->is_freed) {
+ free_ei(ei);
+ } else {
+ ei->dentry = NULL;
+ }
+
+ dentry->d_fsdata = NULL;
+ out:
+ mutex_unlock(&eventfs_mutex);
+}
- /* Nothing should access this, but just in case! */
- ti->private = NULL;
+/**
+ * create_file_dentry - create a dentry for a file of an eventfs_inode
+ * @ei: the eventfs_inode that the file will be created under
+ * @idx: the index into the d_children[] of the @ei
+ * @parent: The parent dentry of the created file.
+ * @name: The name of the file to create
+ * @mode: The mode of the file.
+ * @data: The data to use to set the inode of the file with on open()
+ * @fops: The fops of the file to be created.
+ * @lookup: If called by the lookup routine, in which case, dput() the created dentry.
+ *
+ * Create a dentry for a file of an eventfs_inode @ei and place it into the
+ * address located at @e_dentry. If the @e_dentry already has a dentry, then
+ * just do a dget() on it and return. Otherwise create the dentry and attach it.
+ */
+static struct dentry *
+create_file_dentry(struct eventfs_inode *ei, int idx,
+ struct dentry *parent, const char *name, umode_t mode, void *data,
+ const struct file_operations *fops, bool lookup)
+{
+ struct eventfs_attr *attr = NULL;
+ struct dentry **e_dentry = &ei->d_children[idx];
+ struct dentry *dentry;
+ bool invalidate = false;
+ mutex_lock(&eventfs_mutex);
+ if (ei->is_freed) {
mutex_unlock(&eventfs_mutex);
+ return NULL;
+ }
+ /* If the e_dentry already has a dentry, use it */
+ if (*e_dentry) {
+ /* lookup does not need to up the ref count */
+ if (!lookup)
+ dget(*e_dentry);
+ mutex_unlock(&eventfs_mutex);
+ return *e_dentry;
+ }
- /* Now safely free the top level files and their children */
- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
- list_del(&ef->del_list);
- eventfs_remove(ef);
- }
+ /* ei->entry_attrs are protected by SRCU */
+ if (ei->entry_attrs)
+ attr = &ei->entry_attrs[idx];
- kfree(ei);
- return;
- }
+ mutex_unlock(&eventfs_mutex);
- mutex_lock(&eventfs_mutex);
+ /* The lookup already has the parent->d_inode locked */
+ if (!lookup)
+ inode_lock(parent->d_inode);
- ti_parent = get_tracefs(dentry->d_parent->d_inode);
- if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
- goto out;
+ dentry = create_file(name, mode, attr, parent, data, fops);
- ef = dentry->d_fsdata;
- if (!ef)
- goto out;
+ if (!lookup)
+ inode_unlock(parent->d_inode);
- /*
- * If ef was freed, then the LSB bit is set for d_fsdata.
- * But this should not happen, as it should still have a
- * ref count that prevents it. Warn in case it does.
- */
- if (WARN_ON_ONCE((unsigned long)ef & 1))
- goto out;
+ mutex_lock(&eventfs_mutex);
- dentry->d_fsdata = NULL;
- ef->dentry = NULL;
-out:
+ if (IS_ERR_OR_NULL(dentry)) {
+ /*
+ * When the mutex was released, something else could have
+ * created the dentry for this e_dentry. In which case
+ * use that one.
+ *
+ * Note, with the mutex held, the e_dentry cannot have content
+ * and the ei->is_freed be true at the same time.
+ */
+ dentry = *e_dentry;
+ if (WARN_ON_ONCE(dentry && ei->is_freed))
+ dentry = NULL;
+ /* The lookup does not need to up the dentry refcount */
+ if (dentry && !lookup)
+ dget(dentry);
+ mutex_unlock(&eventfs_mutex);
+ return dentry;
+ }
+
+ if (!*e_dentry && !ei->is_freed) {
+ *e_dentry = dentry;
+ dentry->d_fsdata = ei;
+ } else {
+ /*
+ * Should never happen unless we get here due to being freed.
+ * Otherwise it means two dentries exist with the same name.
+ */
+ WARN_ON_ONCE(!ei->is_freed);
+ invalidate = true;
+ }
mutex_unlock(&eventfs_mutex);
+
+ if (invalidate)
+ d_invalidate(dentry);
+
+ if (lookup || invalidate)
+ dput(dentry);
+
+ return invalidate ? NULL : dentry;
}
/**
* eventfs_post_create_dir - post create dir routine
- * @ef: eventfs_file of recently created dir
+ * @ei: eventfs_inode of recently created dir
*
* Map the meta-data of files within an eventfs dir to their parent dentry
*/
-static void eventfs_post_create_dir(struct eventfs_file *ef)
+static void eventfs_post_create_dir(struct eventfs_inode *ei)
{
- struct eventfs_file *ef_child;
+ struct eventfs_inode *ei_child;
struct tracefs_inode *ti;
+ lockdep_assert_held(&eventfs_mutex);
+
/* srcu lock already held */
/* fill parent-child relation */
- list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+ list_for_each_entry_srcu(ei_child, &ei->children, list,
srcu_read_lock_held(&eventfs_srcu)) {
- ef_child->d_parent = ef->dentry;
+ ei_child->d_parent = ei->dentry;
}
- ti = get_tracefs(ef->dentry->d_inode);
- ti->private = ef->ei;
+ ti = get_tracefs(ei->dentry->d_inode);
+ ti->private = ei;
}
/**
- * create_dentry - helper function to create dentry
- * @ef: eventfs_file of file or directory to create
- * @parent: parent dentry
- * @lookup: true if called from lookup routine
+ * create_dir_dentry - Create a directory dentry for the eventfs_inode
+ * @pei: The eventfs_inode parent of ei.
+ * @ei: The eventfs_inode to create the directory for
+ * @parent: The dentry of the parent of this directory
+ * @lookup: True if this is called by the lookup code
*
- * Used to create a dentry for file/dir, executes post dentry creation routine
+ * This creates and attaches a directory dentry to the eventfs_inode @ei.
*/
static struct dentry *
-create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
+create_dir_dentry(struct eventfs_inode *pei, struct eventfs_inode *ei,
+ struct dentry *parent, bool lookup)
{
bool invalidate = false;
- struct dentry *dentry;
+ struct dentry *dentry = NULL;
mutex_lock(&eventfs_mutex);
- if (ef->is_freed) {
+ if (pei->is_freed || ei->is_freed) {
mutex_unlock(&eventfs_mutex);
return NULL;
}
- if (ef->dentry) {
- dentry = ef->dentry;
- /* On dir open, up the ref count */
+ if (ei->dentry) {
+ /* If the dentry already has a dentry, use it */
+ dentry = ei->dentry;
+ /* lookup does not need to up the ref count */
if (!lookup)
dget(dentry);
mutex_unlock(&eventfs_mutex);
@@ -302,42 +456,44 @@ create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
}
mutex_unlock(&eventfs_mutex);
+ /* The lookup already has the parent->d_inode locked */
if (!lookup)
inode_lock(parent->d_inode);
- if (ef->ei)
- dentry = create_dir(ef->name, parent, ef->data);
- else
- dentry = create_file(ef->name, ef->mode, parent,
- ef->data, ef->fop);
+ dentry = create_dir(ei, parent);
if (!lookup)
inode_unlock(parent->d_inode);
mutex_lock(&eventfs_mutex);
- if (IS_ERR_OR_NULL(dentry)) {
- /* If the ef was already updated get it */
- dentry = ef->dentry;
+
+ if (IS_ERR_OR_NULL(dentry) && !ei->is_freed) {
+ /*
+ * When the mutex was released, something else could have
+ * created the dentry for this e_dentry. In which case
+ * use that one.
+ *
+ * Note, with the mutex held, the e_dentry cannot have content
+ * and the ei->is_freed be true at the same time.
+ */
+ dentry = ei->dentry;
if (dentry && !lookup)
dget(dentry);
mutex_unlock(&eventfs_mutex);
return dentry;
}
- if (!ef->dentry && !ef->is_freed) {
- ef->dentry = dentry;
- if (ef->ei)
- eventfs_post_create_dir(ef);
- dentry->d_fsdata = ef;
+ if (!ei->dentry && !ei->is_freed) {
+ ei->dentry = dentry;
+ eventfs_post_create_dir(ei);
+ dentry->d_fsdata = ei;
} else {
- /* A race here, should try again (unless freed) */
- invalidate = true;
-
/*
* Should never happen unless we get here due to being freed.
* Otherwise it means two dentries exist with the same name.
*/
- WARN_ON_ONCE(!ef->is_freed);
+ WARN_ON_ONCE(!ei->is_freed);
+ invalidate = true;
}
mutex_unlock(&eventfs_mutex);
if (invalidate)
@@ -349,50 +505,90 @@ create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
return invalidate ? NULL : dentry;
}
-static bool match_event_file(struct eventfs_file *ef, const char *name)
-{
- bool ret;
-
- mutex_lock(&eventfs_mutex);
- ret = !ef->is_freed && strcmp(ef->name, name) == 0;
- mutex_unlock(&eventfs_mutex);
-
- return ret;
-}
-
/**
* eventfs_root_lookup - lookup routine to create file/dir
* @dir: in which a lookup is being done
* @dentry: file/dir dentry
- * @flags: to pass as flags parameter to simple lookup
+ * @flags: Just passed to simple_lookup()
*
- * Used to create a dynamic file/dir within @dir. Use the eventfs_inode
- * list of meta data to find the information needed to create the file/dir.
+ * Used to create dynamic file/dir with-in @dir, search with-in @ei
+ * list, if @dentry found go ahead and create the file/dir
*/
+
static struct dentry *eventfs_root_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags)
{
+ const struct file_operations *fops;
+ const struct eventfs_entry *entry;
+ struct eventfs_inode *ei_child;
struct tracefs_inode *ti;
struct eventfs_inode *ei;
- struct eventfs_file *ef;
+ struct dentry *ei_dentry = NULL;
struct dentry *ret = NULL;
+ const char *name = dentry->d_name.name;
+ bool created = false;
+ umode_t mode;
+ void *data;
int idx;
+ int i;
+ int r;
ti = get_tracefs(dir);
if (!(ti->flags & TRACEFS_EVENT_INODE))
return NULL;
- ei = ti->private;
+ /* Grab srcu to prevent the ei from going away */
idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+
+ /*
+ * Grab the eventfs_mutex to consistent value from ti->private.
+ * This s
+ */
+ mutex_lock(&eventfs_mutex);
+ ei = READ_ONCE(ti->private);
+ if (ei && !ei->is_freed)
+ ei_dentry = READ_ONCE(ei->dentry);
+ mutex_unlock(&eventfs_mutex);
+
+ if (!ei || !ei_dentry)
+ goto out;
+
+ data = ei->data;
+
+ list_for_each_entry_srcu(ei_child, &ei->children, list,
srcu_read_lock_held(&eventfs_srcu)) {
- if (!match_event_file(ef, dentry->d_name.name))
+ if (strcmp(ei_child->name, name) != 0)
continue;
ret = simple_lookup(dir, dentry, flags);
- create_dentry(ef, ef->d_parent, true);
+ create_dir_dentry(ei, ei_child, ei_dentry, true);
+ created = true;
break;
}
+
+ if (created)
+ goto out;
+
+ for (i = 0; i < ei->nr_entries; i++) {
+ entry = &ei->entries[i];
+ if (strcmp(name, entry->name) == 0) {
+ void *cdata = data;
+ mutex_lock(&eventfs_mutex);
+ /* If ei->is_freed, then the event itself may be too */
+ if (!ei->is_freed)
+ r = entry->callback(name, &mode, &cdata, &fops);
+ else
+ r = -1;
+ mutex_unlock(&eventfs_mutex);
+ if (r <= 0)
+ continue;
+ ret = simple_lookup(dir, dentry, flags);
+ create_file_dentry(ei, i, ei_dentry, name, mode, cdata,
+ fops, true);
+ break;
+ }
+ }
+ out:
srcu_read_unlock(&eventfs_srcu, idx);
return ret;
}
@@ -432,29 +628,48 @@ static int eventfs_release(struct inode *inode, struct file *file)
return dcache_dir_close(inode, file);
}
+static int add_dentries(struct dentry ***dentries, struct dentry *d, int cnt)
+{
+ struct dentry **tmp;
+
+ tmp = krealloc(*dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
+ if (!tmp)
+ return -1;
+ tmp[cnt] = d;
+ tmp[cnt + 1] = NULL;
+ *dentries = tmp;
+ return 0;
+}
+
/**
* dcache_dir_open_wrapper - eventfs open wrapper
* @inode: not used
- * @file: dir to be opened (to create its child)
+ * @file: dir to be opened (to create it's children)
*
- * Used to dynamically create the file/dir within @file. @file is really a
- * directory and all the files/dirs of the children within @file will be
- * created. If any of the files/dirs have already been created, their
- * reference count will be incremented.
+ * Used to dynamic create file/dir with-in @file, all the
+ * file/dir will be created. If already created then references
+ * will be increased
*/
static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
{
+ const struct file_operations *fops;
+ const struct eventfs_entry *entry;
+ struct eventfs_inode *ei_child;
struct tracefs_inode *ti;
struct eventfs_inode *ei;
- struct eventfs_file *ef;
struct dentry_list *dlist;
struct dentry **dentries = NULL;
- struct dentry *dentry = file_dentry(file);
+ struct dentry *parent = file_dentry(file);
struct dentry *d;
struct inode *f_inode = file_inode(file);
+ const char *name = parent->d_name.name;
+ umode_t mode;
+ void *data;
int cnt = 0;
int idx;
int ret;
+ int i;
+ int r;
ti = get_tracefs(f_inode);
if (!(ti->flags & TRACEFS_EVENT_INODE))
@@ -463,25 +678,56 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
if (WARN_ON_ONCE(file->private_data))
return -EINVAL;
+ idx = srcu_read_lock(&eventfs_srcu);
+
+ mutex_lock(&eventfs_mutex);
+ ei = READ_ONCE(ti->private);
+ mutex_unlock(&eventfs_mutex);
+
+ if (!ei) {
+ srcu_read_unlock(&eventfs_srcu, idx);
+ return -EINVAL;
+ }
+
+
+ data = ei->data;
+
dlist = kmalloc(sizeof(*dlist), GFP_KERNEL);
- if (!dlist)
+ if (!dlist) {
+ srcu_read_unlock(&eventfs_srcu, idx);
return -ENOMEM;
+ }
- ei = ti->private;
- idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ list_for_each_entry_srcu(ei_child, &ei->children, list,
srcu_read_lock_held(&eventfs_srcu)) {
- d = create_dentry(ef, dentry, false);
+ d = create_dir_dentry(ei, ei_child, parent, false);
if (d) {
- struct dentry **tmp;
+ ret = add_dentries(&dentries, d, cnt);
+ if (ret < 0)
+ break;
+ cnt++;
+ }
+ }
- tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
- if (!tmp)
+ for (i = 0; i < ei->nr_entries; i++) {
+ void *cdata = data;
+ entry = &ei->entries[i];
+ name = entry->name;
+ mutex_lock(&eventfs_mutex);
+ /* If ei->is_freed, then the event itself may be too */
+ if (!ei->is_freed)
+ r = entry->callback(name, &mode, &cdata, &fops);
+ else
+ r = -1;
+ mutex_unlock(&eventfs_mutex);
+ if (r <= 0)
+ continue;
+ d = create_file_dentry(ei, i, parent, name, mode, cdata, fops, false);
+ if (d) {
+ ret = add_dentries(&dentries, d, cnt);
+ if (ret < 0)
break;
- tmp[cnt] = d;
- tmp[cnt + 1] = NULL;
cnt++;
- dentries = tmp;
}
}
srcu_read_unlock(&eventfs_srcu, idx);
@@ -514,63 +760,104 @@ static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx)
}
/**
- * eventfs_prepare_ef - helper function to prepare eventfs_file
- * @name: the name of the file/directory to create.
- * @mode: the permission that the file should have.
- * @fop: struct file_operations that should be used for this file/directory.
- * @iop: struct inode_operations that should be used for this file/directory.
- * @data: something that the caller will want to get to later on. The
- * inode.i_private pointer will point to this value on the open() call.
+ * eventfs_create_dir - Create the eventfs_inode for this directory
+ * @name: The name of the directory to create.
+ * @parent: The eventfs_inode of the parent directory.
+ * @entries: A list of entries that represent the files under this directory
+ * @size: The number of @entries
+ * @data: The default data to pass to the files (an entry may override it).
+ *
+ * This function creates the descriptor to represent a directory in the
+ * eventfs. This descriptor is an eventfs_inode, and it is returned to be
+ * used to create other children underneath.
+ *
+ * The @entries is an array of eventfs_entry structures which has:
+ * const char *name
+ * eventfs_callback callback;
*
- * This function allocates and fills the eventfs_file structure.
+ * The name is the name of the file, and the callback is a pointer to a function
+ * that will be called when the file is reference (either by lookup or by
+ * reading a directory). The callback is of the prototype:
+ *
+ * int callback(const char *name, umode_t *mode, void **data,
+ * const struct file_operations **fops);
+ *
+ * When a file needs to be created, this callback will be called with
+ * name = the name of the file being created (so that the same callback
+ * may be used for multiple files).
+ * mode = a place to set the file's mode
+ * data = A pointer to @data, and the callback may replace it, which will
+ * cause the file created to pass the new data to the open() call.
+ * fops = the fops to use for the created file.
+ *
+ * NB. @callback is called while holding internal locks of the eventfs
+ * system. The callback must not call any code that might also call into
+ * the tracefs or eventfs system or it will risk creating a deadlock.
*/
-static struct eventfs_file *eventfs_prepare_ef(const char *name, umode_t mode,
- const struct file_operations *fop,
- const struct inode_operations *iop,
- void *data)
+struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data)
{
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
- ef = kzalloc(sizeof(*ef), GFP_KERNEL);
- if (!ef)
+ if (!parent)
+ return ERR_PTR(-EINVAL);
+
+ ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ if (!ei)
return ERR_PTR(-ENOMEM);
- ef->name = kstrdup(name, GFP_KERNEL);
- if (!ef->name) {
- kfree(ef);
+ ei->name = kstrdup_const(name, GFP_KERNEL);
+ if (!ei->name) {
+ kfree(ei);
return ERR_PTR(-ENOMEM);
}
- if (S_ISDIR(mode)) {
- ef->ei = kzalloc(sizeof(*ef->ei), GFP_KERNEL);
- if (!ef->ei) {
- kfree(ef->name);
- kfree(ef);
+ if (size) {
+ ei->d_children = kzalloc(sizeof(*ei->d_children) * size, GFP_KERNEL);
+ if (!ei->d_children) {
+ kfree_const(ei->name);
+ kfree(ei);
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&ef->ei->e_top_files);
- } else {
- ef->ei = NULL;
}
- ef->iop = iop;
- ef->fop = fop;
- ef->mode = mode;
- ef->data = data;
- return ef;
+ ei->entries = entries;
+ ei->nr_entries = size;
+ ei->data = data;
+ INIT_LIST_HEAD(&ei->children);
+ INIT_LIST_HEAD(&ei->list);
+
+ mutex_lock(&eventfs_mutex);
+ if (!parent->is_freed) {
+ list_add_tail(&ei->list, &parent->children);
+ ei->d_parent = parent->dentry;
+ }
+ mutex_unlock(&eventfs_mutex);
+
+ /* Was the parent freed? */
+ if (list_empty(&ei->list)) {
+ free_ei(ei);
+ ei = NULL;
+ }
+ return ei;
}
/**
- * eventfs_create_events_dir - create the trace event structure
- * @name: the name of the directory to create.
- * @parent: parent dentry for this file. This should be a directory dentry
- * if set. If this parameter is NULL, then the directory will be
- * created in the root of the tracefs filesystem.
+ * eventfs_create_events_dir - create the top level events directory
+ * @name: The name of the top level directory to create.
+ * @parent: Parent dentry for this file in the tracefs directory.
+ * @entries: A list of entries that represent the files under this directory
+ * @size: The number of @entries
+ * @data: The default data to pass to the files (an entry may override it).
*
* This function creates the top of the trace event directory.
+ *
+ * See eventfs_create_dir() for use of @entries.
*/
-struct dentry *eventfs_create_events_dir(const char *name,
- struct dentry *parent)
+struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data)
{
struct dentry *dentry = tracefs_start_creating(name, parent);
struct eventfs_inode *ei;
@@ -581,19 +868,32 @@ struct dentry *eventfs_create_events_dir(const char *name,
return NULL;
if (IS_ERR(dentry))
- return dentry;
+ return ERR_CAST(dentry);
ei = kzalloc(sizeof(*ei), GFP_KERNEL);
if (!ei)
- return ERR_PTR(-ENOMEM);
+ goto fail_ei;
+
inode = tracefs_get_inode(dentry->d_sb);
- if (unlikely(!inode)) {
- kfree(ei);
- tracefs_failed_creating(dentry);
- return ERR_PTR(-ENOMEM);
+ if (unlikely(!inode))
+ goto fail;
+
+ if (size) {
+ ei->d_children = kzalloc(sizeof(*ei->d_children) * size, GFP_KERNEL);
+ if (!ei->d_children)
+ goto fail;
}
- INIT_LIST_HEAD(&ei->e_top_files);
+ ei->dentry = dentry;
+ ei->entries = entries;
+ ei->nr_entries = size;
+ ei->data = data;
+ ei->name = kstrdup_const(name, GFP_KERNEL);
+ if (!ei->name)
+ goto fail;
+
+ INIT_LIST_HEAD(&ei->children);
+ INIT_LIST_HEAD(&ei->list);
ti = get_tracefs(inode);
ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
@@ -603,198 +903,97 @@ struct dentry *eventfs_create_events_dir(const char *name,
inode->i_op = &eventfs_root_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
+ dentry->d_fsdata = ei;
+
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
inc_nlink(dentry->d_parent->d_inode);
fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
- return tracefs_end_creating(dentry);
-}
-
-/**
- * eventfs_add_subsystem_dir - add eventfs subsystem_dir to list to create later
- * @name: the name of the file to create.
- * @parent: parent dentry for this dir.
- *
- * This function adds eventfs subsystem dir to list.
- * And all these dirs are created on the fly when they are looked up,
- * and the dentry and inodes will be removed when they are done.
- */
-struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
- struct dentry *parent)
-{
- struct tracefs_inode *ti_parent;
- struct eventfs_inode *ei_parent;
- struct eventfs_file *ef;
-
- if (security_locked_down(LOCKDOWN_TRACEFS))
- return NULL;
-
- if (!parent)
- return ERR_PTR(-EINVAL);
-
- ti_parent = get_tracefs(parent->d_inode);
- ei_parent = ti_parent->private;
+ tracefs_end_creating(dentry);
- ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
- if (IS_ERR(ef))
- return ef;
+ return ei;
- mutex_lock(&eventfs_mutex);
- list_add_tail(&ef->list, &ei_parent->e_top_files);
- ef->d_parent = parent;
- mutex_unlock(&eventfs_mutex);
- return ef;
+ fail:
+ kfree(ei->d_children);
+ kfree(ei);
+ fail_ei:
+ tracefs_failed_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
-/**
- * eventfs_add_dir - add eventfs dir to list to create later
- * @name: the name of the file to create.
- * @ef_parent: parent eventfs_file for this dir.
- *
- * This function adds eventfs dir to list.
- * And all these dirs are created on the fly when they are looked up,
- * and the dentry and inodes will be removed when they are done.
- */
-struct eventfs_file *eventfs_add_dir(const char *name,
- struct eventfs_file *ef_parent)
-{
- struct eventfs_file *ef;
-
- if (security_locked_down(LOCKDOWN_TRACEFS))
- return NULL;
-
- if (!ef_parent)
- return ERR_PTR(-EINVAL);
-
- ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
- if (IS_ERR(ef))
- return ef;
+static LLIST_HEAD(free_list);
- mutex_lock(&eventfs_mutex);
- list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
- ef->d_parent = ef_parent->dentry;
- mutex_unlock(&eventfs_mutex);
- return ef;
-}
-
-/**
- * eventfs_add_events_file - add the data needed to create a file for later reference
- * @name: the name of the file to create.
- * @mode: the permission that the file should have.
- * @parent: parent dentry for this file.
- * @data: something that the caller will want to get to later on.
- * @fop: struct file_operations that should be used for this file.
- *
- * This function is used to add the information needed to create a
- * dentry/inode within the top level events directory. The file created
- * will have the @mode permissions. The @data will be used to fill the
- * inode.i_private when the open() call is done. The dentry and inodes are
- * all created when they are referenced, and removed when they are no
- * longer referenced.
- */
-int eventfs_add_events_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fop)
+static void eventfs_workfn(struct work_struct *work)
{
- struct tracefs_inode *ti;
- struct eventfs_inode *ei;
- struct eventfs_file *ef;
-
- if (security_locked_down(LOCKDOWN_TRACEFS))
- return -ENODEV;
-
- if (!parent)
- return -EINVAL;
-
- if (!(mode & S_IFMT))
- mode |= S_IFREG;
-
- if (!parent->d_inode)
- return -EINVAL;
-
- ti = get_tracefs(parent->d_inode);
- if (!(ti->flags & TRACEFS_EVENT_INODE))
- return -EINVAL;
-
- ei = ti->private;
- ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
-
- if (IS_ERR(ef))
- return -ENOMEM;
-
- mutex_lock(&eventfs_mutex);
- list_add_tail(&ef->list, &ei->e_top_files);
- ef->d_parent = parent;
- mutex_unlock(&eventfs_mutex);
- return 0;
+ struct eventfs_inode *ei, *tmp;
+ struct llist_node *llnode;
+
+ llnode = llist_del_all(&free_list);
+ llist_for_each_entry_safe(ei, tmp, llnode, llist) {
+ /* This dput() matches the dget() from unhook_dentry() */
+ for (int i = 0; i < ei->nr_entries; i++) {
+ if (ei->d_children[i])
+ dput(ei->d_children[i]);
+ }
+ /* This should only get here if it had a dentry */
+ if (!WARN_ON_ONCE(!ei->dentry))
+ dput(ei->dentry);
+ }
}
-/**
- * eventfs_add_file - add eventfs file to list to create later
- * @name: the name of the file to create.
- * @mode: the permission that the file should have.
- * @ef_parent: parent eventfs_file for this file.
- * @data: something that the caller will want to get to later on.
- * @fop: struct file_operations that should be used for this file.
- *
- * This function is used to add the information needed to create a
- * file within a subdirectory of the events directory. The file created
- * will have the @mode permissions. The @data will be used to fill the
- * inode.i_private when the open() call is done. The dentry and inodes are
- * all created when they are referenced, and removed when they are no
- * longer referenced.
- */
-int eventfs_add_file(const char *name, umode_t mode,
- struct eventfs_file *ef_parent,
- void *data,
- const struct file_operations *fop)
-{
- struct eventfs_file *ef;
-
- if (security_locked_down(LOCKDOWN_TRACEFS))
- return -ENODEV;
+static DECLARE_WORK(eventfs_work, eventfs_workfn);
- if (!ef_parent)
- return -EINVAL;
+static void free_rcu_ei(struct rcu_head *head)
+{
+ struct eventfs_inode *ei = container_of(head, struct eventfs_inode, rcu);
- if (!(mode & S_IFMT))
- mode |= S_IFREG;
+ if (ei->dentry) {
+ /* Do not free the ei until all references of dentry are gone */
+ if (llist_add(&ei->llist, &free_list))
+ queue_work(system_unbound_wq, &eventfs_work);
+ return;
+ }
- ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
- if (IS_ERR(ef))
- return -ENOMEM;
+ /* If the ei doesn't have a dentry, neither should its children */
+ for (int i = 0; i < ei->nr_entries; i++) {
+ WARN_ON_ONCE(ei->d_children[i]);
+ }
- mutex_lock(&eventfs_mutex);
- list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
- ef->d_parent = ef_parent->dentry;
- mutex_unlock(&eventfs_mutex);
- return 0;
+ free_ei(ei);
}
-static void free_ef(struct rcu_head *head)
+static void unhook_dentry(struct dentry *dentry)
{
- struct eventfs_file *ef = container_of(head, struct eventfs_file, rcu);
+ if (!dentry)
+ return;
+ /*
+ * Need to add a reference to the dentry that is expected by
+ * simple_recursive_removal(), which will include a dput().
+ */
+ dget(dentry);
- kfree(ef->name);
- kfree(ef->ei);
- kfree(ef);
+ /*
+ * Also add a reference for the dput() in eventfs_workfn().
+ * That is required as that dput() will free the ei after
+ * the SRCU grace period is over.
+ */
+ dget(dentry);
}
/**
* eventfs_remove_rec - remove eventfs dir or file from list
- * @ef: eventfs_file to be removed.
- * @head: to create list of eventfs_file to be deleted
- * @level: to check recursion depth
+ * @ei: eventfs_inode to be removed.
+ * @level: prevent recursion from going more than 3 levels deep.
*
- * The helper function eventfs_remove_rec() is used to clean up and free the
- * associated data from eventfs for both of the added functions.
+ * This function recursively removes eventfs_inodes which
+ * contains info of files and/or directories.
*/
-static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head, int level)
+static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
{
- struct eventfs_file *ef_child;
+ struct eventfs_inode *ei_child;
- if (!ef)
+ if (!ei)
return;
/*
* Check recursion depth. It should never be greater than 3:
@@ -806,100 +1005,76 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
if (WARN_ON_ONCE(level > 3))
return;
- if (ef->ei) {
- /* search for nested folders or files */
- list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
- lockdep_is_held(&eventfs_mutex)) {
- eventfs_remove_rec(ef_child, head, level + 1);
+ /* search for nested folders or files */
+ list_for_each_entry_srcu(ei_child, &ei->children, list,
+ lockdep_is_held(&eventfs_mutex)) {
+ /* Children only have dentry if parent does */
+ WARN_ON_ONCE(ei_child->dentry && !ei->dentry);
+ eventfs_remove_rec(ei_child, level + 1);
+ }
+
+
+ ei->is_freed = 1;
+
+ for (int i = 0; i < ei->nr_entries; i++) {
+ if (ei->d_children[i]) {
+ /* Children only have dentry if parent does */
+ WARN_ON_ONCE(!ei->dentry);
+ unhook_dentry(ei->d_children[i]);
}
}
- list_del_rcu(&ef->list);
- list_add_tail(&ef->del_list, head);
+ unhook_dentry(ei->dentry);
+
+ list_del_rcu(&ei->list);
+ call_srcu(&eventfs_srcu, &ei->rcu, free_rcu_ei);
}
/**
- * eventfs_remove - remove eventfs dir or file from list
- * @ef: eventfs_file to be removed.
+ * eventfs_remove_dir - remove eventfs dir or file from list
+ * @ei: eventfs_inode to be removed.
*
* This function acquire the eventfs_mutex lock and call eventfs_remove_rec()
*/
-void eventfs_remove(struct eventfs_file *ef)
+void eventfs_remove_dir(struct eventfs_inode *ei)
{
- struct eventfs_file *tmp;
- LIST_HEAD(ef_del_list);
- struct dentry *dentry_list = NULL;
struct dentry *dentry;
- if (!ef)
+ if (!ei)
return;
mutex_lock(&eventfs_mutex);
- eventfs_remove_rec(ef, &ef_del_list, 0);
- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
- if (ef->dentry) {
- unsigned long ptr = (unsigned long)dentry_list;
-
- /* Keep the dentry from being freed yet */
- dget(ef->dentry);
-
- /*
- * Paranoid: The dget() above should prevent the dentry
- * from being freed and calling eventfs_set_ef_status_free().
- * But just in case, set the link list LSB pointer to 1
- * and have eventfs_set_ef_status_free() check that to
- * make sure that if it does happen, it will not think
- * the d_fsdata is an event_file.
- *
- * For this to work, no event_file should be allocated
- * on a odd space, as the ef should always be allocated
- * to be at least word aligned. Check for that too.
- */
- WARN_ON_ONCE(ptr & 1);
-
- ef->dentry->d_fsdata = (void *)(ptr | 1);
- dentry_list = ef->dentry;
- ef->dentry = NULL;
- }
- call_srcu(&eventfs_srcu, &ef->rcu, free_ef);
- }
+ dentry = ei->dentry;
+ eventfs_remove_rec(ei, 0);
mutex_unlock(&eventfs_mutex);
- while (dentry_list) {
- unsigned long ptr;
-
- dentry = dentry_list;
- ptr = (unsigned long)dentry->d_fsdata & ~1UL;
- dentry_list = (struct dentry *)ptr;
- dentry->d_fsdata = NULL;
- d_invalidate(dentry);
- mutex_lock(&eventfs_mutex);
- /* dentry should now have at least a single reference */
- WARN_ONCE((int)d_count(dentry) < 1,
- "dentry %p less than one reference (%d) after invalidate\n",
- dentry, d_count(dentry));
- mutex_unlock(&eventfs_mutex);
- dput(dentry);
- }
+ /*
+ * If any of the ei children has a dentry, then the ei itself
+ * must have a dentry.
+ */
+ if (dentry)
+ simple_recursive_removal(dentry, NULL);
}
/**
- * eventfs_remove_events_dir - remove eventfs dir or file from list
- * @dentry: events's dentry to be removed.
+ * eventfs_remove_events_dir - remove the top level eventfs directory
+ * @ei: the event_inode returned by eventfs_create_events_dir().
*
- * This function remove events main directory
+ * This function removes the events main directory
*/
-void eventfs_remove_events_dir(struct dentry *dentry)
+void eventfs_remove_events_dir(struct eventfs_inode *ei)
{
- struct tracefs_inode *ti;
-
- if (!dentry || !dentry->d_inode)
- return;
+ struct dentry *dentry;
- ti = get_tracefs(dentry->d_inode);
- if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
- return;
+ dentry = ei->dentry;
+ eventfs_remove_dir(ei);
- d_invalidate(dentry);
+ /*
+ * Matches the dget() done by tracefs_start_creating()
+ * in eventfs_create_events_dir() when it the dentry was
+ * created. In other words, it's a normal dentry that
+ * sticks around while the other ei->dentry are created
+ * and destroyed dynamically.
+ */
dput(dentry);
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 429603d865a9..5b54948514fe 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -385,7 +385,7 @@ static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
ti = get_tracefs(inode);
if (ti && ti->flags & TRACEFS_EVENT_INODE)
- eventfs_set_ef_status_free(ti, dentry);
+ eventfs_set_ei_status_free(ti, dentry);
iput(inode);
}
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index 4f2e49e2197b..ccee18ca66c7 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -13,6 +13,58 @@ struct tracefs_inode {
struct inode vfs_inode;
};
+/*
+ * struct eventfs_attr - cache the mode and ownership of a eventfs entry
+ * @mode: saved mode plus flags of what is saved
+ * @uid: saved uid if changed
+ * @gid: saved gid if changed
+ */
+struct eventfs_attr {
+ int mode;
+ kuid_t uid;
+ kgid_t gid;
+};
+
+/*
+ * struct eventfs_inode - hold the properties of the eventfs directories.
+ * @list: link list into the parent directory
+ * @entries: the array of entries representing the files in the directory
+ * @name: the name of the directory to create
+ * @children: link list into the child eventfs_inode
+ * @dentry: the dentry of the directory
+ * @d_parent: pointer to the parent's dentry
+ * @d_children: The array of dentries to represent the files when created
+ * @entry_attrs: Saved mode and ownership of the @d_children
+ * @attr: Saved mode and ownership of eventfs_inode itself
+ * @data: The private data to pass to the callbacks
+ * @is_freed: Flag set if the eventfs is on its way to be freed
+ * Note if is_freed is set, then dentry is corrupted.
+ * @nr_entries: The number of items in @entries
+ */
+struct eventfs_inode {
+ struct list_head list;
+ const struct eventfs_entry *entries;
+ const char *name;
+ struct list_head children;
+ struct dentry *dentry; /* Check is_freed to access */
+ struct dentry *d_parent;
+ struct dentry **d_children;
+ struct eventfs_attr *entry_attrs;
+ struct eventfs_attr attr;
+ void *data;
+ /*
+ * Union - used for deletion
+ * @llist: for calling dput() if needed after RCU
+ * @rcu: eventfs_inode to delete in RCU
+ */
+ union {
+ struct llist_node llist;
+ struct rcu_head rcu;
+ };
+ unsigned int is_freed:1;
+ unsigned int nr_entries:31;
+};
+
static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
{
return container_of(inode, struct tracefs_inode, vfs_inode);
@@ -25,6 +77,6 @@ struct inode *tracefs_get_inode(struct super_block *sb);
struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
struct dentry *eventfs_failed_creating(struct dentry *dentry);
struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
+void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 7af442de44c3..3b13c648d490 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -725,7 +725,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
struct inode *inode = d_inode(old_dentry);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_inode *dir_ui = ubifs_inode(dir);
- int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
+ int err, sz_change;
struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2,
.dirtied_ino_d = ALIGN(ui->data_len, 8) };
struct fscrypt_name nm;
@@ -749,6 +749,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
if (err)
return err;
+ sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
err = dbg_check_synced_i_size(c, inode);
if (err)
goto out_fname;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2e65fd2dbdc3..2d2b39f843ce 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1375,6 +1375,9 @@ static inline int mctime_update_needed(const struct inode *inode,
/**
* ubifs_update_time - update time of inode.
* @inode: inode to update
+ * @time: timespec structure to hold the current time value
+ * @flags: time updating control flag determines updating
+ * which time fields of @inode
*
* This function updates time of the inode.
*/
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index d69d2154645b..f0a5538c84b0 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -1607,6 +1607,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
ubifs_err(c, "bad data node (block %u, inode %lu)",
blk, inode->i_ino);
ubifs_dump_node(c, dn, dn_size);
+ err = -EUCLEAN;
goto out_free;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 0d0478815d4d..09e270d6ed02 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -919,8 +919,10 @@ static void free_buds(struct ubifs_info *c)
{
struct ubifs_bud *bud, *n;
- rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb)
+ rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) {
+ kfree(bud->log_hash);
kfree(bud);
+ }
}
/**
@@ -1189,6 +1191,7 @@ static void destroy_journal(struct ubifs_info *c)
bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
list_del(&bud->list);
+ kfree(bud->log_hash);
kfree(bud);
}
ubifs_destroy_idx_gc(c);
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 6b7d95b65f4b..f4728e65d1bd 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
else {
ubifs_err(c, "old idx added twice!");
kfree(old_idx);
+ return;
}
}
rb_link_node(&old_idx->rb, parent, p);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 23377c1baed9..a480810cd4e3 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -137,6 +137,7 @@ static struct dentry *ufs_get_parent(struct dentry *child)
}
static const struct export_operations ufs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ufs_fh_to_dentry,
.fh_to_parent = ufs_fh_to_parent,
.get_parent = ufs_get_parent,
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 30c931b38853..be62acffad6c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -21,7 +21,7 @@
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_quota.h"
@@ -2989,7 +2989,7 @@ xfs_bmap_extsize_align(
* If realtime, and the result isn't a multiple of the realtime
* extent size we need to remove blocks until it is.
*/
- if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
+ if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
/*
* We're not covering the original request, or
* we won't be able to once we fix the length.
@@ -3016,7 +3016,7 @@ xfs_bmap_extsize_align(
else {
align_alen -= orig_off - align_off;
align_off = orig_off;
- align_alen -= align_alen % mp->m_sb.sb_rextsize;
+ align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
}
/*
* Result doesn't cover the request, fail it.
@@ -4826,12 +4826,8 @@ xfs_bmap_del_extent_delay(
ASSERT(got->br_startoff <= del->br_startoff);
ASSERT(got_endoff >= del_endoff);
- if (isrt) {
- uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
-
- do_div(rtexts, mp->m_sb.sb_rextsize);
- xfs_mod_frextents(mp, rtexts);
- }
+ if (isrt)
+ xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
/*
* Update the inode delalloc counter now and wait to update the
@@ -5057,33 +5053,20 @@ xfs_bmap_del_extent_real(
flags = XFS_ILOG_CORE;
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
- xfs_filblks_t len;
- xfs_extlen_t mod;
-
- len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
- &mod);
- ASSERT(mod == 0);
-
if (!(bflags & XFS_BMAPI_REMAP)) {
- xfs_fsblock_t bno;
-
- bno = div_u64_rem(del->br_startblock,
- mp->m_sb.sb_rextsize, &mod);
- ASSERT(mod == 0);
-
- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+ error = xfs_rtfree_blocks(tp, del->br_startblock,
+ del->br_blockcount);
if (error)
goto done;
}
do_fx = 0;
- nblks = len * mp->m_sb.sb_rextsize;
qfield = XFS_TRANS_DQ_RTBCOUNT;
} else {
do_fx = 1;
- nblks = del->br_blockcount;
qfield = XFS_TRANS_DQ_BCOUNT;
}
+ nblks = del->br_blockcount;
del_endblock = del->br_startblock + del->br_blockcount;
if (cur) {
@@ -5289,7 +5272,6 @@ __xfs_bunmapi(
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
- xfs_fsblock_t sum;
xfs_filblks_t len = *rlen; /* length to unmap in file */
xfs_fileoff_t end;
struct xfs_iext_cursor icur;
@@ -5384,8 +5366,8 @@ __xfs_bunmapi(
if (!isrt)
goto delete;
- sum = del.br_startblock + del.br_blockcount;
- div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
+ mod = xfs_rtb_to_rtxoff(mp,
+ del.br_startblock + del.br_blockcount);
if (mod) {
/*
* Realtime extent not lined up at the end.
@@ -5432,7 +5414,8 @@ __xfs_bunmapi(
goto error0;
goto nodelete;
}
- div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
+
+ mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
if (mod) {
xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
@@ -6209,8 +6192,8 @@ xfs_bmap_validate_extent(
return __this_address;
if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
- if (!xfs_verify_rtext(mp, irec->br_startblock,
- irec->br_blockcount))
+ if (!xfs_verify_rtbext(mp, irec->br_startblock,
+ irec->br_blockcount))
return __this_address;
} else {
if (!xfs_verify_fsbext(mp, irec->br_startblock,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 371dc07233e0..9a88aba1589f 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -98,7 +98,7 @@ typedef struct xfs_sb {
uint32_t sb_blocksize; /* logical block size, bytes */
xfs_rfsblock_t sb_dblocks; /* number of data blocks */
xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
- xfs_rtblock_t sb_rextents; /* number of realtime extents */
+ xfs_rtbxlen_t sb_rextents; /* number of realtime extents */
uuid_t sb_uuid; /* user-visible file system unique id */
xfs_fsblock_t sb_logstart; /* starting block of log if internal */
xfs_ino_t sb_rootino; /* root inode number */
@@ -691,6 +691,22 @@ struct xfs_agfl {
xfs_daddr_to_agno(mp, (d) + (len) - 1)))
/*
+ * Realtime bitmap information is accessed by the word, which is currently
+ * stored in host-endian format.
+ */
+union xfs_rtword_raw {
+ __u32 old;
+};
+
+/*
+ * Realtime summary counts are accessed by the word, which is currently
+ * stored in host-endian format.
+ */
+union xfs_suminfo_raw {
+ __u32 old;
+};
+
+/*
* XFS Timestamps
* ==============
*
@@ -1142,24 +1158,10 @@ static inline bool xfs_dinode_has_large_extent_counts(
#define XFS_BLOCKSIZE(mp) ((mp)->m_sb.sb_blocksize)
#define XFS_BLOCKMASK(mp) ((mp)->m_blockmask)
-#define XFS_BLOCKWSIZE(mp) ((mp)->m_blockwsize)
-#define XFS_BLOCKWMASK(mp) ((mp)->m_blockwmask)
/*
- * RT Summary and bit manipulation macros.
+ * RT bit manipulation macros.
*/
-#define XFS_SUMOFFS(mp,ls,bb) ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb)))
-#define XFS_SUMOFFSTOBLOCK(mp,s) \
- (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
-#define XFS_SUMPTR(mp,bp,so) \
- ((xfs_suminfo_t *)((bp)->b_addr + \
- (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
-
-#define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log)
-#define XFS_BLOCKTOBIT(mp,bb) ((bb) << (mp)->m_blkbit_log)
-#define XFS_BITTOWORD(mp,bi) \
- ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp)))
-
#define XFS_RTMIN(a,b) ((a) < (b) ? (a) : (b))
#define XFS_RTMAX(a,b) ((a) > (b) ? (a) : (b))
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 396648acb5be..c269d704314d 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -16,6 +16,7 @@
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
+#include "xfs_rtbitmap.h"
/*
* Realtime allocator bitmap functions shared with userspace.
@@ -46,25 +47,69 @@ const struct xfs_buf_ops xfs_rtbuf_ops = {
.verify_write = xfs_rtbuf_verify_write,
};
+/* Release cached rt bitmap and summary buffers. */
+void
+xfs_rtbuf_cache_relse(
+ struct xfs_rtalloc_args *args)
+{
+ if (args->rbmbp) {
+ xfs_trans_brelse(args->tp, args->rbmbp);
+ args->rbmbp = NULL;
+ args->rbmoff = NULLFILEOFF;
+ }
+ if (args->sumbp) {
+ xfs_trans_brelse(args->tp, args->sumbp);
+ args->sumbp = NULL;
+ args->sumoff = NULLFILEOFF;
+ }
+}
+
/*
* Get a buffer for the bitmap or summary file block specified.
* The buffer is returned read and locked.
*/
int
xfs_rtbuf_get(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t block, /* block number in bitmap or summary */
- int issum, /* is summary not bitmap */
- struct xfs_buf **bpp) /* output: buffer for the block */
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t block, /* block number in bitmap or summary */
+ int issum) /* is summary not bitmap */
{
- struct xfs_buf *bp; /* block buffer, result */
- xfs_inode_t *ip; /* bitmap or summary inode */
- xfs_bmbt_irec_t map;
- int nmap = 1;
- int error; /* error value */
+ struct xfs_mount *mp = args->mp;
+ struct xfs_buf **cbpp; /* cached block buffer */
+ xfs_fileoff_t *coffp; /* cached block number */
+ struct xfs_buf *bp; /* block buffer, result */
+ struct xfs_inode *ip; /* bitmap or summary inode */
+ struct xfs_bmbt_irec map;
+ enum xfs_blft type;
+ int nmap = 1;
+ int error;
- ip = issum ? mp->m_rsumip : mp->m_rbmip;
+ if (issum) {
+ cbpp = &args->sumbp;
+ coffp = &args->sumoff;
+ ip = mp->m_rsumip;
+ type = XFS_BLFT_RTSUMMARY_BUF;
+ } else {
+ cbpp = &args->rbmbp;
+ coffp = &args->rbmoff;
+ ip = mp->m_rbmip;
+ type = XFS_BLFT_RTBITMAP_BUF;
+ }
+
+ /*
+ * If we have a cached buffer, and the block number matches, use that.
+ */
+ if (*cbpp && *coffp == block)
+ return 0;
+
+ /*
+ * Otherwise we have to have to get the buffer. If there was an old
+ * one, get rid of it first.
+ */
+ if (*cbpp) {
+ xfs_trans_brelse(args->tp, *cbpp);
+ *cbpp = NULL;
+ }
error = xfs_bmapi_read(ip, block, 1, &map, &nmap, 0);
if (error)
@@ -74,15 +119,15 @@ xfs_rtbuf_get(
return -EFSCORRUPTED;
ASSERT(map.br_startblock != NULLFSBLOCK);
- error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+ error = xfs_trans_read_buf(mp, args->tp, mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, map.br_startblock),
mp->m_bsize, 0, &bp, &xfs_rtbuf_ops);
if (error)
return error;
- xfs_trans_buf_set_type(tp, bp, issum ? XFS_BLFT_RTSUMMARY_BUF
- : XFS_BLFT_RTBITMAP_BUF);
- *bpp = bp;
+ xfs_trans_buf_set_type(args->tp, bp, type);
+ *cbpp = bp;
+ *coffp = block;
return 0;
}
@@ -92,47 +137,44 @@ xfs_rtbuf_get(
*/
int
xfs_rtfind_back(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to look at */
- xfs_rtblock_t limit, /* last block to look at */
- xfs_rtblock_t *rtblock) /* out: start block found */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext to look at */
+ xfs_rtxnum_t limit, /* last rtext to look at */
+ xfs_rtxnum_t *rtx) /* out: start rtext found */
{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- struct xfs_buf *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t firstbit; /* first useful bit in the word */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t len; /* length of inspected area */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t want; /* mask for "good" values */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
+ struct xfs_mount *mp = args->mp;
+ int bit; /* bit number in the word */
+ xfs_fileoff_t block; /* bitmap block number */
+ int error; /* error value */
+ xfs_rtxnum_t firstbit; /* first useful bit in the word */
+ xfs_rtxnum_t i; /* current bit number rel. to start */
+ xfs_rtxnum_t len; /* length of inspected area */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t want; /* mask for "good" values */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ xfs_rtword_t incore;
+ unsigned int word; /* word number in the buffer */
/*
* Compute and read in starting bitmap block for starting block.
*/
- block = XFS_BITTOBLOCK(mp, start);
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
+ block = xfs_rtx_to_rbmblock(mp, start);
+ error = xfs_rtbitmap_read_buf(args, block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
+
/*
* Get the first word's index & point to it.
*/
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
+ word = xfs_rtx_to_rbmword(mp, start);
bit = (int)(start & (XFS_NBWORD - 1));
len = start - limit + 1;
/*
* Compute match value, based on the bit at start: if 1 (free)
* then all-ones, else all-zeroes.
*/
- want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+ incore = xfs_rtbitmap_getword(args, word);
+ want = (incore & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
/*
* If the starting position is not word-aligned, deal with the
* partial word.
@@ -149,13 +191,12 @@ xfs_rtfind_back(
* Calculate the difference between the value there
* and what we're looking for.
*/
- if ((wdiff = (*b ^ want) & mask)) {
+ if ((wdiff = (incore ^ want) & mask)) {
/*
* Different. Mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i = bit - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
+ *rtx = start - i + 1;
return 0;
}
i = bit - firstbit + 1;
@@ -167,19 +208,11 @@ xfs_rtfind_back(
/*
* If done with this block, get the previous one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, --block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
- word = XFS_BLOCKWMASK(mp);
- b = &bufp[word];
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b--;
+
+ word = mp->m_blockwsize - 1;
}
} else {
/*
@@ -195,13 +228,13 @@ xfs_rtfind_back(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = *b ^ want)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = incore ^ want)) {
/*
* Different, mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
+ *rtx = start - i + 1;
return 0;
}
i += XFS_NBWORD;
@@ -213,19 +246,11 @@ xfs_rtfind_back(
/*
* If done with this block, get the previous one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, --block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
- word = XFS_BLOCKWMASK(mp);
- b = &bufp[word];
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b--;
+
+ word = mp->m_blockwsize - 1;
}
}
/*
@@ -242,13 +267,13 @@ xfs_rtfind_back(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = (*b ^ want) & mask)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = (incore ^ want) & mask)) {
/*
* Different, mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
+ *rtx = start - i + 1;
return 0;
} else
i = len;
@@ -256,8 +281,7 @@ xfs_rtfind_back(
/*
* No match, return that we scanned the whole area.
*/
- xfs_trans_brelse(tp, bp);
- *rtblock = start - i + 1;
+ *rtx = start - i + 1;
return 0;
}
@@ -267,47 +291,44 @@ xfs_rtfind_back(
*/
int
xfs_rtfind_forw(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to look at */
- xfs_rtblock_t limit, /* last block to look at */
- xfs_rtblock_t *rtblock) /* out: start block found */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext to look at */
+ xfs_rtxnum_t limit, /* last rtext to look at */
+ xfs_rtxnum_t *rtx) /* out: start rtext found */
{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- struct xfs_buf *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t lastbit; /* last useful bit in the word */
- xfs_rtblock_t len; /* length of inspected area */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t want; /* mask for "good" values */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
+ struct xfs_mount *mp = args->mp;
+ int bit; /* bit number in the word */
+ xfs_fileoff_t block; /* bitmap block number */
+ int error;
+ xfs_rtxnum_t i; /* current bit number rel. to start */
+ xfs_rtxnum_t lastbit;/* last useful bit in the word */
+ xfs_rtxnum_t len; /* length of inspected area */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t want; /* mask for "good" values */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ xfs_rtword_t incore;
+ unsigned int word; /* word number in the buffer */
/*
* Compute and read in starting bitmap block for starting block.
*/
- block = XFS_BITTOBLOCK(mp, start);
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
+ block = xfs_rtx_to_rbmblock(mp, start);
+ error = xfs_rtbitmap_read_buf(args, block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
+
/*
* Get the first word's index & point to it.
*/
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
+ word = xfs_rtx_to_rbmword(mp, start);
bit = (int)(start & (XFS_NBWORD - 1));
len = limit - start + 1;
/*
* Compute match value, based on the bit at start: if 1 (free)
* then all-ones, else all-zeroes.
*/
- want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+ incore = xfs_rtbitmap_getword(args, word);
+ want = (incore & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
/*
* If the starting position is not word-aligned, deal with the
* partial word.
@@ -323,13 +344,12 @@ xfs_rtfind_forw(
* Calculate the difference between the value there
* and what we're looking for.
*/
- if ((wdiff = (*b ^ want) & mask)) {
+ if ((wdiff = (incore ^ want) & mask)) {
/*
* Different. Mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i = XFS_RTLOBIT(wdiff) - bit;
- *rtblock = start + i - 1;
+ *rtx = start + i - 1;
return 0;
}
i = lastbit - bit;
@@ -337,22 +357,15 @@ xfs_rtfind_forw(
* Go on to next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* If done with this block, get the previous one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- b = bufp = bp->b_addr;
+
word = 0;
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b++;
}
} else {
/*
@@ -368,13 +381,13 @@ xfs_rtfind_forw(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = *b ^ want)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = incore ^ want)) {
/*
* Different, mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_RTLOBIT(wdiff);
- *rtblock = start + i - 1;
+ *rtx = start + i - 1;
return 0;
}
i += XFS_NBWORD;
@@ -382,22 +395,15 @@ xfs_rtfind_forw(
* Go on to next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* If done with this block, get the next one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- b = bufp = bp->b_addr;
+
word = 0;
- } else {
- /*
- * Go on to the next word in the buffer.
- */
- b++;
}
}
/*
@@ -412,13 +418,13 @@ xfs_rtfind_forw(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = (*b ^ want) & mask)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = (incore ^ want) & mask)) {
/*
* Different, mark where we are and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_RTLOBIT(wdiff);
- *rtblock = start + i - 1;
+ *rtx = start + i - 1;
return 0;
} else
i = len;
@@ -426,11 +432,25 @@ xfs_rtfind_forw(
/*
* No match, return that we scanned the whole area.
*/
- xfs_trans_brelse(tp, bp);
- *rtblock = start + i - 1;
+ *rtx = start + i - 1;
return 0;
}
+/* Log rtsummary counter at @infoword. */
+static inline void
+xfs_trans_log_rtsummary(
+ struct xfs_rtalloc_args *args,
+ unsigned int infoword)
+{
+ struct xfs_buf *bp = args->sumbp;
+ size_t first, last;
+
+ first = (void *)xfs_rsumblock_infoptr(args, infoword) - bp->b_addr;
+ last = first + sizeof(xfs_suminfo_t) - 1;
+
+ xfs_trans_log_buf(args->tp, bp, first, last);
+}
+
/*
* Read and/or modify the summary information for a given extent size,
* bitmap block combination.
@@ -442,86 +462,77 @@ xfs_rtfind_forw(
*/
int
xfs_rtmodify_summary_int(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int log, /* log2 of extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- int delta, /* change to make to summary info */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_suminfo_t *sum) /* out: summary info for this block */
+ struct xfs_rtalloc_args *args,
+ int log, /* log2 of extent size */
+ xfs_fileoff_t bbno, /* bitmap block number */
+ int delta, /* change to make to summary info */
+ xfs_suminfo_t *sum) /* out: summary info for this block */
{
- struct xfs_buf *bp; /* buffer for the summary block */
- int error; /* error value */
- xfs_fsblock_t sb; /* summary fsblock */
- int so; /* index into the summary file */
- xfs_suminfo_t *sp; /* pointer to returned data */
+ struct xfs_mount *mp = args->mp;
+ int error;
+ xfs_fileoff_t sb; /* summary fsblock */
+ xfs_rtsumoff_t so; /* index into the summary file */
+ unsigned int infoword;
/*
* Compute entry number in the summary file.
*/
- so = XFS_SUMOFFS(mp, log, bbno);
+ so = xfs_rtsumoffs(mp, log, bbno);
/*
* Compute the block number in the summary file.
*/
- sb = XFS_SUMOFFSTOBLOCK(mp, so);
- /*
- * If we have an old buffer, and the block number matches, use that.
- */
- if (*rbpp && *rsb == sb)
- bp = *rbpp;
- /*
- * Otherwise we have to get the buffer.
- */
- else {
- /*
- * If there was an old one, get rid of it first.
- */
- if (*rbpp)
- xfs_trans_brelse(tp, *rbpp);
- error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
- if (error) {
- return error;
- }
- /*
- * Remember this buffer and block for the next call.
- */
- *rbpp = bp;
- *rsb = sb;
- }
+ sb = xfs_rtsumoffs_to_block(mp, so);
+
+ error = xfs_rtsummary_read_buf(args, sb);
+ if (error)
+ return error;
+
/*
* Point to the summary information, modify/log it, and/or copy it out.
*/
- sp = XFS_SUMPTR(mp, bp, so);
+ infoword = xfs_rtsumoffs_to_infoword(mp, so);
if (delta) {
- uint first = (uint)((char *)sp - (char *)bp->b_addr);
+ xfs_suminfo_t val = xfs_suminfo_add(args, infoword, delta);
- *sp += delta;
if (mp->m_rsum_cache) {
- if (*sp == 0 && log == mp->m_rsum_cache[bbno])
- mp->m_rsum_cache[bbno]++;
- if (*sp != 0 && log < mp->m_rsum_cache[bbno])
+ if (val == 0 && log + 1 == mp->m_rsum_cache[bbno])
mp->m_rsum_cache[bbno] = log;
+ if (val != 0 && log >= mp->m_rsum_cache[bbno])
+ mp->m_rsum_cache[bbno] = log + 1;
}
- xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1);
+ xfs_trans_log_rtsummary(args, infoword);
+ if (sum)
+ *sum = val;
+ } else if (sum) {
+ *sum = xfs_suminfo_get(args, infoword);
}
- if (sum)
- *sum = *sp;
return 0;
}
int
xfs_rtmodify_summary(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int log, /* log2 of extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- int delta, /* change to make to summary info */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
+ struct xfs_rtalloc_args *args,
+ int log, /* log2 of extent size */
+ xfs_fileoff_t bbno, /* bitmap block number */
+ int delta) /* in/out: summary block number */
+{
+ return xfs_rtmodify_summary_int(args, log, bbno, delta, NULL);
+}
+
+/* Log rtbitmap block from the word @from to the byte before @next. */
+static inline void
+xfs_trans_log_rtbitmap(
+ struct xfs_rtalloc_args *args,
+ unsigned int from,
+ unsigned int next)
{
- return xfs_rtmodify_summary_int(mp, tp, log, bbno,
- delta, rbpp, rsb, NULL);
+ struct xfs_buf *bp = args->rbmbp;
+ size_t first, last;
+
+ first = (void *)xfs_rbmblock_wordptr(args, from) - bp->b_addr;
+ last = ((void *)xfs_rbmblock_wordptr(args, next) - 1) - bp->b_addr;
+
+ xfs_trans_log_buf(args->tp, bp, first, last);
}
/*
@@ -530,41 +541,37 @@ xfs_rtmodify_summary(
*/
int
xfs_rtmodify_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to modify */
- xfs_extlen_t len, /* length of extent to modify */
- int val) /* 1 for free, 0 for allocated */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext to modify */
+ xfs_rtxlen_t len, /* length of extent to modify */
+ int val) /* 1 for free, 0 for allocated */
{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- struct xfs_buf *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtword_t *first; /* first used word in the buffer */
- int i; /* current bit number rel. to start */
- int lastbit; /* last useful bit in word */
- xfs_rtword_t mask; /* mask o frelevant bits for value */
- int word; /* word number in the buffer */
+ struct xfs_mount *mp = args->mp;
+ int bit; /* bit number in the word */
+ xfs_fileoff_t block; /* bitmap block number */
+ int error;
+ int i; /* current bit number rel. to start */
+ int lastbit; /* last useful bit in word */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t incore;
+ unsigned int firstword; /* first word used in the buffer */
+ unsigned int word; /* word number in the buffer */
/*
* Compute starting bitmap block number.
*/
- block = XFS_BITTOBLOCK(mp, start);
+ block = xfs_rtx_to_rbmblock(mp, start);
/*
* Read the bitmap block, and point to its data.
*/
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
+
/*
* Compute the starting word's address, and starting bit.
*/
- word = XFS_BITTOWORD(mp, start);
- first = b = &bufp[word];
+ firstword = word = xfs_rtx_to_rbmword(mp, start);
bit = (int)(start & (XFS_NBWORD - 1));
/*
* 0 (allocated) => all zeroes; 1 (free) => all ones.
@@ -583,34 +590,28 @@ xfs_rtmodify_range(
/*
* Set/clear the active bits.
*/
+ incore = xfs_rtbitmap_getword(args, word);
if (val)
- *b |= mask;
+ incore |= mask;
else
- *b &= ~mask;
+ incore &= ~mask;
+ xfs_rtbitmap_setword(args, word, incore);
i = lastbit - bit;
/*
* Go on to the next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* Log the changed part of this block.
* Get the next one.
*/
- xfs_trans_log_buf(tp, bp,
- (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp));
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ xfs_trans_log_rtbitmap(args, firstword, word);
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- first = b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer
- */
- b++;
+
+ firstword = word = 0;
}
} else {
/*
@@ -626,31 +627,23 @@ xfs_rtmodify_range(
/*
* Set the word value correctly.
*/
- *b = val;
+ xfs_rtbitmap_setword(args, word, val);
i += XFS_NBWORD;
/*
* Go on to the next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* Log the changed part of this block.
* Get the next one.
*/
- xfs_trans_log_buf(tp, bp,
- (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp));
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ xfs_trans_log_rtbitmap(args, firstword, word);
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- first = b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer
- */
- b++;
+
+ firstword = word = 0;
}
}
/*
@@ -665,18 +658,19 @@ xfs_rtmodify_range(
/*
* Set/clear the active bits.
*/
+ incore = xfs_rtbitmap_getword(args, word);
if (val)
- *b |= mask;
+ incore |= mask;
else
- *b &= ~mask;
- b++;
+ incore &= ~mask;
+ xfs_rtbitmap_setword(args, word, incore);
+ word++;
}
/*
* Log any remaining changed bytes.
*/
- if (b > first)
- xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp - 1));
+ if (word > firstword)
+ xfs_trans_log_rtbitmap(args, firstword, word);
return 0;
}
@@ -686,23 +680,21 @@ xfs_rtmodify_range(
*/
int
xfs_rtfree_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to free */
- xfs_extlen_t len, /* length to free */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext to free */
+ xfs_rtxlen_t len) /* in/out: summary block number */
{
- xfs_rtblock_t end; /* end of the freed extent */
- int error; /* error value */
- xfs_rtblock_t postblock; /* first block freed > end */
- xfs_rtblock_t preblock; /* first block freed < start */
+ struct xfs_mount *mp = args->mp;
+ xfs_rtxnum_t end; /* end of the freed extent */
+ int error; /* error value */
+ xfs_rtxnum_t postblock; /* first rtext freed > end */
+ xfs_rtxnum_t preblock; /* first rtext freed < start */
end = start + len - 1;
/*
* Modify the bitmap to mark this extent freed.
*/
- error = xfs_rtmodify_range(mp, tp, start, len, 1);
+ error = xfs_rtmodify_range(args, start, len, 1);
if (error) {
return error;
}
@@ -711,15 +703,15 @@ xfs_rtfree_range(
* We need to find the beginning and end of the extent so we can
* properly update the summary.
*/
- error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+ error = xfs_rtfind_back(args, start, 0, &preblock);
if (error) {
return error;
}
/*
* Find the next allocated block (end of allocated extent).
*/
- error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
- &postblock);
+ error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
+ &postblock);
if (error)
return error;
/*
@@ -727,9 +719,9 @@ xfs_rtfree_range(
* old extent, add summary data for them to be allocated.
*/
if (preblock < start) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(start - preblock),
- XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+ error = xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(start - preblock),
+ xfs_rtx_to_rbmblock(mp, preblock), -1);
if (error) {
return error;
}
@@ -739,9 +731,9 @@ xfs_rtfree_range(
* old extent, add summary data for them to be allocated.
*/
if (postblock > end) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock - end),
- XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+ error = xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(postblock - end),
+ xfs_rtx_to_rbmblock(mp, end + 1), -1);
if (error) {
return error;
}
@@ -750,10 +742,9 @@ xfs_rtfree_range(
* Increment the summary information corresponding to the entire
* (new) free extent.
*/
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock + 1 - preblock),
- XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
- return error;
+ return xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(postblock + 1 - preblock),
+ xfs_rtx_to_rbmblock(mp, preblock), 1);
}
/*
@@ -762,43 +753,39 @@ xfs_rtfree_range(
*/
int
xfs_rtcheck_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block number of extent */
- xfs_extlen_t len, /* length of extent */
- int val, /* 1 for free, 0 for allocated */
- xfs_rtblock_t *new, /* out: first block not matching */
- int *stat) /* out: 1 for matches, 0 for not */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext number of extent */
+ xfs_rtxlen_t len, /* length of extent */
+ int val, /* 1 for free, 0 for allocated */
+ xfs_rtxnum_t *new, /* out: first rtext not matching */
+ int *stat) /* out: 1 for matches, 0 for not */
{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- struct xfs_buf *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t lastbit; /* last useful bit in word */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
+ struct xfs_mount *mp = args->mp;
+ int bit; /* bit number in the word */
+ xfs_fileoff_t block; /* bitmap block number */
+ int error;
+ xfs_rtxnum_t i; /* current bit number rel. to start */
+ xfs_rtxnum_t lastbit; /* last useful bit in word */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ xfs_rtword_t incore;
+ unsigned int word; /* word number in the buffer */
/*
* Compute starting bitmap block number
*/
- block = XFS_BITTOBLOCK(mp, start);
+ block = xfs_rtx_to_rbmblock(mp, start);
/*
* Read the bitmap block.
*/
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, block);
+ if (error)
return error;
- }
- bufp = bp->b_addr;
+
/*
* Compute the starting word's address, and starting bit.
*/
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
+ word = xfs_rtx_to_rbmword(mp, start);
bit = (int)(start & (XFS_NBWORD - 1));
/*
* 0 (allocated) => all zero's; 1 (free) => all one's.
@@ -820,11 +807,11 @@ xfs_rtcheck_range(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = (*b ^ val) & mask)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = (incore ^ val) & mask)) {
/*
* Different, compute first wrong bit and return.
*/
- xfs_trans_brelse(tp, bp);
i = XFS_RTLOBIT(wdiff) - bit;
*new = start + i;
*stat = 0;
@@ -835,22 +822,15 @@ xfs_rtcheck_range(
* Go on to next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* If done with this block, get the next one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- b = bufp = bp->b_addr;
+
word = 0;
- } else {
- /*
- * Go on to the next word in the buffer.
- */
- b++;
}
} else {
/*
@@ -866,11 +846,11 @@ xfs_rtcheck_range(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = *b ^ val)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = incore ^ val)) {
/*
* Different, compute first wrong bit and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_RTLOBIT(wdiff);
*new = start + i;
*stat = 0;
@@ -881,22 +861,15 @@ xfs_rtcheck_range(
* Go on to next block if that's where the next word is
* and we need the next word.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ if (++word == mp->m_blockwsize && i < len) {
/*
* If done with this block, get the next one.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
+ error = xfs_rtbitmap_read_buf(args, ++block);
+ if (error)
return error;
- }
- b = bufp = bp->b_addr;
+
word = 0;
- } else {
- /*
- * Go on to the next word in the buffer.
- */
- b++;
}
}
/*
@@ -911,11 +884,11 @@ xfs_rtcheck_range(
/*
* Compute difference between actual and desired value.
*/
- if ((wdiff = (*b ^ val) & mask)) {
+ incore = xfs_rtbitmap_getword(args, word);
+ if ((wdiff = (incore ^ val) & mask)) {
/*
* Different, compute first wrong bit and return.
*/
- xfs_trans_brelse(tp, bp);
i += XFS_RTLOBIT(wdiff);
*new = start + i;
*stat = 0;
@@ -926,7 +899,6 @@ xfs_rtcheck_range(
/*
* Successful, return.
*/
- xfs_trans_brelse(tp, bp);
*new = start + i;
*stat = 1;
return 0;
@@ -936,58 +908,57 @@ xfs_rtcheck_range(
/*
* Check that the given extent (block range) is allocated already.
*/
-STATIC int /* error */
+STATIC int
xfs_rtcheck_alloc_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number of extent */
- xfs_extlen_t len) /* length of extent */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext number of extent */
+ xfs_rtxlen_t len) /* length of extent */
{
- xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */
- int stat;
- int error;
+ xfs_rtxnum_t new; /* dummy for xfs_rtcheck_range */
+ int stat;
+ int error;
- error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+ error = xfs_rtcheck_range(args, start, len, 0, &new, &stat);
if (error)
return error;
ASSERT(stat);
return 0;
}
#else
-#define xfs_rtcheck_alloc_range(m,t,b,l) (0)
+#define xfs_rtcheck_alloc_range(a,b,l) (0)
#endif
/*
* Free an extent in the realtime subvolume. Length is expressed in
* realtime extents, as is the block number.
*/
-int /* error */
+int
xfs_rtfree_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to free */
- xfs_extlen_t len) /* length of extent freed */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_rtxnum_t start, /* starting rtext number to free */
+ xfs_rtxlen_t len) /* length of extent freed */
{
- int error; /* error value */
- xfs_mount_t *mp; /* file system mount structure */
- xfs_fsblock_t sb; /* summary file block number */
- struct xfs_buf *sumbp = NULL; /* summary file block buffer */
- struct timespec64 atime;
-
- mp = tp->t_mountp;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rtalloc_args args = {
+ .mp = mp,
+ .tp = tp,
+ };
+ int error;
+ struct timespec64 atime;
ASSERT(mp->m_rbmip->i_itemp != NULL);
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
- error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+ error = xfs_rtcheck_alloc_range(&args, start, len);
if (error)
return error;
/*
* Free the range of realtime blocks.
*/
- error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
- if (error) {
- return error;
- }
+ error = xfs_rtfree_range(&args, start, len);
+ if (error)
+ goto out;
+
/*
* Mark more blocks free in the superblock.
*/
@@ -1002,11 +973,47 @@ xfs_rtfree_extent(
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
atime = inode_get_atime(VFS_I(mp->m_rbmip));
- *((uint64_t *)&atime) = 0;
+ atime.tv_sec = 0;
inode_set_atime_to_ts(VFS_I(mp->m_rbmip), atime);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
}
- return 0;
+ error = 0;
+out:
+ xfs_rtbuf_cache_relse(&args);
+ return error;
+}
+
+/*
+ * Free some blocks in the realtime subvolume. rtbno and rtlen are in units of
+ * rt blocks, not rt extents; must be aligned to the rt extent size; and rtlen
+ * cannot exceed XFS_MAX_BMBT_EXTLEN.
+ */
+int
+xfs_rtfree_blocks(
+ struct xfs_trans *tp,
+ xfs_fsblock_t rtbno,
+ xfs_filblks_t rtlen)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_rtxnum_t start;
+ xfs_filblks_t len;
+ xfs_extlen_t mod;
+
+ ASSERT(rtlen <= XFS_MAX_BMBT_EXTLEN);
+
+ len = xfs_rtb_to_rtxrem(mp, rtlen, &mod);
+ if (mod) {
+ ASSERT(mod == 0);
+ return -EIO;
+ }
+
+ start = xfs_rtb_to_rtxrem(mp, rtbno, &mod);
+ if (mod) {
+ ASSERT(mod == 0);
+ return -EIO;
+ }
+
+ return xfs_rtfree_extent(tp, start, len);
}
/* Find all the free records within a given range. */
@@ -1019,10 +1026,14 @@ xfs_rtalloc_query_range(
xfs_rtalloc_query_range_fn fn,
void *priv)
{
+ struct xfs_rtalloc_args args = {
+ .mp = mp,
+ .tp = tp,
+ };
struct xfs_rtalloc_rec rec;
- xfs_rtblock_t rtstart;
- xfs_rtblock_t rtend;
- xfs_rtblock_t high_key;
+ xfs_rtxnum_t rtstart;
+ xfs_rtxnum_t rtend;
+ xfs_rtxnum_t high_key;
int is_free;
int error = 0;
@@ -1038,13 +1049,13 @@ xfs_rtalloc_query_range(
rtstart = low_rec->ar_startext;
while (rtstart <= high_key) {
/* Is the first block free? */
- error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
+ error = xfs_rtcheck_range(&args, rtstart, 1, 1, &rtend,
&is_free);
if (error)
break;
/* How long does the extent go for? */
- error = xfs_rtfind_forw(mp, tp, rtstart, high_key, &rtend);
+ error = xfs_rtfind_forw(&args, rtstart, high_key, &rtend);
if (error)
break;
@@ -1060,6 +1071,7 @@ xfs_rtalloc_query_range(
rtstart = rtend + 1;
}
+ xfs_rtbuf_cache_relse(&args);
return error;
}
@@ -1085,18 +1097,79 @@ int
xfs_rtalloc_extent_is_free(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_rtblock_t start,
- xfs_extlen_t len,
+ xfs_rtxnum_t start,
+ xfs_rtxlen_t len,
bool *is_free)
{
- xfs_rtblock_t end;
+ struct xfs_rtalloc_args args = {
+ .mp = mp,
+ .tp = tp,
+ };
+ xfs_rtxnum_t end;
int matches;
int error;
- error = xfs_rtcheck_range(mp, tp, start, len, 1, &end, &matches);
+ error = xfs_rtcheck_range(&args, start, len, 1, &end, &matches);
+ xfs_rtbuf_cache_relse(&args);
if (error)
return error;
*is_free = matches;
return 0;
}
+
+/*
+ * Compute the number of rtbitmap blocks needed to track the given number of rt
+ * extents.
+ */
+xfs_filblks_t
+xfs_rtbitmap_blockcount(
+ struct xfs_mount *mp,
+ xfs_rtbxlen_t rtextents)
+{
+ return howmany_64(rtextents, NBBY * mp->m_sb.sb_blocksize);
+}
+
+/*
+ * Compute the number of rtbitmap words needed to populate every block of a
+ * bitmap that is large enough to track the given number of rt extents.
+ */
+unsigned long long
+xfs_rtbitmap_wordcount(
+ struct xfs_mount *mp,
+ xfs_rtbxlen_t rtextents)
+{
+ xfs_filblks_t blocks;
+
+ blocks = xfs_rtbitmap_blockcount(mp, rtextents);
+ return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
+}
+
+/* Compute the number of rtsummary blocks needed to track the given rt space. */
+xfs_filblks_t
+xfs_rtsummary_blockcount(
+ struct xfs_mount *mp,
+ unsigned int rsumlevels,
+ xfs_extlen_t rbmblocks)
+{
+ unsigned long long rsumwords;
+
+ rsumwords = (unsigned long long)rsumlevels * rbmblocks;
+ return XFS_B_TO_FSB(mp, rsumwords << XFS_WORDLOG);
+}
+
+/*
+ * Compute the number of rtsummary info words needed to populate every block of
+ * a summary file that is large enough to track the given rt space.
+ */
+unsigned long long
+xfs_rtsummary_wordcount(
+ struct xfs_mount *mp,
+ unsigned int rsumlevels,
+ xfs_extlen_t rbmblocks)
+{
+ xfs_filblks_t blocks;
+
+ blocks = xfs_rtsummary_blockcount(mp, rsumlevels, rbmblocks);
+ return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
+}
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
new file mode 100644
index 000000000000..c0637057d69c
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rtbitmap.h
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#ifndef __XFS_RTBITMAP_H__
+#define __XFS_RTBITMAP_H__
+
+struct xfs_rtalloc_args {
+ struct xfs_mount *mp;
+ struct xfs_trans *tp;
+
+ struct xfs_buf *rbmbp; /* bitmap block buffer */
+ struct xfs_buf *sumbp; /* summary block buffer */
+
+ xfs_fileoff_t rbmoff; /* bitmap block number */
+ xfs_fileoff_t sumoff; /* summary block number */
+};
+
+static inline xfs_rtblock_t
+xfs_rtx_to_rtb(
+ struct xfs_mount *mp,
+ xfs_rtxnum_t rtx)
+{
+ if (mp->m_rtxblklog >= 0)
+ return rtx << mp->m_rtxblklog;
+
+ return rtx * mp->m_sb.sb_rextsize;
+}
+
+static inline xfs_extlen_t
+xfs_rtxlen_to_extlen(
+ struct xfs_mount *mp,
+ xfs_rtxlen_t rtxlen)
+{
+ if (mp->m_rtxblklog >= 0)
+ return rtxlen << mp->m_rtxblklog;
+
+ return rtxlen * mp->m_sb.sb_rextsize;
+}
+
+/* Compute the misalignment between an extent length and a realtime extent .*/
+static inline unsigned int
+xfs_extlen_to_rtxmod(
+ struct xfs_mount *mp,
+ xfs_extlen_t len)
+{
+ if (mp->m_rtxblklog >= 0)
+ return len & mp->m_rtxblkmask;
+
+ return len % mp->m_sb.sb_rextsize;
+}
+
+static inline xfs_rtxlen_t
+xfs_extlen_to_rtxlen(
+ struct xfs_mount *mp,
+ xfs_extlen_t len)
+{
+ if (mp->m_rtxblklog >= 0)
+ return len >> mp->m_rtxblklog;
+
+ return len / mp->m_sb.sb_rextsize;
+}
+
+/* Convert an rt block number into an rt extent number. */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtx(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ if (likely(mp->m_rtxblklog >= 0))
+ return rtbno >> mp->m_rtxblklog;
+
+ return div_u64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Return the offset of an rt block number within an rt extent. */
+static inline xfs_extlen_t
+xfs_rtb_to_rtxoff(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ if (likely(mp->m_rtxblklog >= 0))
+ return rtbno & mp->m_rtxblkmask;
+
+ return do_div(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/*
+ * Crack an rt block number into an rt extent number and an offset within that
+ * rt extent. Returns the rt extent number directly and the offset in @off.
+ */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtxrem(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno,
+ xfs_extlen_t *off)
+{
+ if (likely(mp->m_rtxblklog >= 0)) {
+ *off = rtbno & mp->m_rtxblkmask;
+ return rtbno >> mp->m_rtxblklog;
+ }
+
+ return div_u64_rem(rtbno, mp->m_sb.sb_rextsize, off);
+}
+
+/*
+ * Convert an rt block number into an rt extent number, rounding up to the next
+ * rt extent if the rt block is not aligned to an rt extent boundary.
+ */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtxup(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ if (likely(mp->m_rtxblklog >= 0)) {
+ if (rtbno & mp->m_rtxblkmask)
+ return (rtbno >> mp->m_rtxblklog) + 1;
+ return rtbno >> mp->m_rtxblklog;
+ }
+
+ if (do_div(rtbno, mp->m_sb.sb_rextsize))
+ rtbno++;
+ return rtbno;
+}
+
+/* Round this rtblock up to the nearest rt extent size. */
+static inline xfs_rtblock_t
+xfs_rtb_roundup_rtx(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ return roundup_64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Round this rtblock down to the nearest rt extent size. */
+static inline xfs_rtblock_t
+xfs_rtb_rounddown_rtx(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ return rounddown_64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Convert an rt extent number to a file block offset in the rt bitmap file. */
+static inline xfs_fileoff_t
+xfs_rtx_to_rbmblock(
+ struct xfs_mount *mp,
+ xfs_rtxnum_t rtx)
+{
+ return rtx >> mp->m_blkbit_log;
+}
+
+/* Convert an rt extent number to a word offset within an rt bitmap block. */
+static inline unsigned int
+xfs_rtx_to_rbmword(
+ struct xfs_mount *mp,
+ xfs_rtxnum_t rtx)
+{
+ return (rtx >> XFS_NBWORDLOG) & (mp->m_blockwsize - 1);
+}
+
+/* Convert a file block offset in the rt bitmap file to an rt extent number. */
+static inline xfs_rtxnum_t
+xfs_rbmblock_to_rtx(
+ struct xfs_mount *mp,
+ xfs_fileoff_t rbmoff)
+{
+ return rbmoff << mp->m_blkbit_log;
+}
+
+/* Return a pointer to a bitmap word within a rt bitmap block. */
+static inline union xfs_rtword_raw *
+xfs_rbmblock_wordptr(
+ struct xfs_rtalloc_args *args,
+ unsigned int index)
+{
+ union xfs_rtword_raw *words = args->rbmbp->b_addr;
+
+ return words + index;
+}
+
+/* Convert an ondisk bitmap word to its incore representation. */
+static inline xfs_rtword_t
+xfs_rtbitmap_getword(
+ struct xfs_rtalloc_args *args,
+ unsigned int index)
+{
+ union xfs_rtword_raw *word = xfs_rbmblock_wordptr(args, index);
+
+ return word->old;
+}
+
+/* Set an ondisk bitmap word from an incore representation. */
+static inline void
+xfs_rtbitmap_setword(
+ struct xfs_rtalloc_args *args,
+ unsigned int index,
+ xfs_rtword_t value)
+{
+ union xfs_rtword_raw *word = xfs_rbmblock_wordptr(args, index);
+
+ word->old = value;
+}
+
+/*
+ * Convert a rt extent length and rt bitmap block number to a xfs_suminfo_t
+ * offset within the rt summary file.
+ */
+static inline xfs_rtsumoff_t
+xfs_rtsumoffs(
+ struct xfs_mount *mp,
+ int log2_len,
+ xfs_fileoff_t rbmoff)
+{
+ return log2_len * mp->m_sb.sb_rbmblocks + rbmoff;
+}
+
+/*
+ * Convert an xfs_suminfo_t offset to a file block offset within the rt summary
+ * file.
+ */
+static inline xfs_fileoff_t
+xfs_rtsumoffs_to_block(
+ struct xfs_mount *mp,
+ xfs_rtsumoff_t rsumoff)
+{
+ return XFS_B_TO_FSBT(mp, rsumoff * sizeof(xfs_suminfo_t));
+}
+
+/*
+ * Convert an xfs_suminfo_t offset to an info word offset within an rt summary
+ * block.
+ */
+static inline unsigned int
+xfs_rtsumoffs_to_infoword(
+ struct xfs_mount *mp,
+ xfs_rtsumoff_t rsumoff)
+{
+ unsigned int mask = mp->m_blockmask >> XFS_SUMINFOLOG;
+
+ return rsumoff & mask;
+}
+
+/* Return a pointer to a summary info word within a rt summary block. */
+static inline union xfs_suminfo_raw *
+xfs_rsumblock_infoptr(
+ struct xfs_rtalloc_args *args,
+ unsigned int index)
+{
+ union xfs_suminfo_raw *info = args->sumbp->b_addr;
+
+ return info + index;
+}
+
+/* Get the current value of a summary counter. */
+static inline xfs_suminfo_t
+xfs_suminfo_get(
+ struct xfs_rtalloc_args *args,
+ unsigned int index)
+{
+ union xfs_suminfo_raw *info = xfs_rsumblock_infoptr(args, index);
+
+ return info->old;
+}
+
+/* Add to the current value of a summary counter and return the new value. */
+static inline xfs_suminfo_t
+xfs_suminfo_add(
+ struct xfs_rtalloc_args *args,
+ unsigned int index,
+ int delta)
+{
+ union xfs_suminfo_raw *info = xfs_rsumblock_infoptr(args, index);
+
+ info->old += delta;
+ return info->old;
+}
+
+/*
+ * Functions for walking free space rtextents in the realtime bitmap.
+ */
+struct xfs_rtalloc_rec {
+ xfs_rtxnum_t ar_startext;
+ xfs_rtbxlen_t ar_extcount;
+};
+
+typedef int (*xfs_rtalloc_query_range_fn)(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ const struct xfs_rtalloc_rec *rec,
+ void *priv);
+
+#ifdef CONFIG_XFS_RT
+void xfs_rtbuf_cache_relse(struct xfs_rtalloc_args *args);
+
+int xfs_rtbuf_get(struct xfs_rtalloc_args *args, xfs_fileoff_t block,
+ int issum);
+
+static inline int
+xfs_rtbitmap_read_buf(
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t block)
+{
+ return xfs_rtbuf_get(args, block, 0);
+}
+
+static inline int
+xfs_rtsummary_read_buf(
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t block)
+{
+ return xfs_rtbuf_get(args, block, 1);
+}
+
+int xfs_rtcheck_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+ xfs_rtxlen_t len, int val, xfs_rtxnum_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+ xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
+int xfs_rtfind_forw(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+ xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
+int xfs_rtmodify_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+ xfs_rtxlen_t len, int val);
+int xfs_rtmodify_summary_int(struct xfs_rtalloc_args *args, int log,
+ xfs_fileoff_t bbno, int delta, xfs_suminfo_t *sum);
+int xfs_rtmodify_summary(struct xfs_rtalloc_args *args, int log,
+ xfs_fileoff_t bbno, int delta);
+int xfs_rtfree_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+ xfs_rtxlen_t len);
+int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
+ const struct xfs_rtalloc_rec *low_rec,
+ const struct xfs_rtalloc_rec *high_rec,
+ xfs_rtalloc_query_range_fn fn, void *priv);
+int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtalloc_query_range_fn fn,
+ void *priv);
+int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtxnum_t start, xfs_rtxlen_t len,
+ bool *is_free);
+/*
+ * Free an extent in the realtime subvolume. Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int /* error */
+xfs_rtfree_extent(
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_rtxnum_t start, /* starting rtext number to free */
+ xfs_rtxlen_t len); /* length of extent freed */
+
+/* Same as above, but in units of rt blocks. */
+int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
+ xfs_filblks_t rtlen);
+
+xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
+ rtextents);
+unsigned long long xfs_rtbitmap_wordcount(struct xfs_mount *mp,
+ xfs_rtbxlen_t rtextents);
+
+xfs_filblks_t xfs_rtsummary_blockcount(struct xfs_mount *mp,
+ unsigned int rsumlevels, xfs_extlen_t rbmblocks);
+unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
+ unsigned int rsumlevels, xfs_extlen_t rbmblocks);
+#else /* CONFIG_XFS_RT */
+# define xfs_rtfree_extent(t,b,l) (-ENOSYS)
+# define xfs_rtfree_blocks(t,rb,rl) (-ENOSYS)
+# define xfs_rtalloc_query_range(m,t,l,h,f,p) (-ENOSYS)
+# define xfs_rtalloc_query_all(m,t,f,p) (-ENOSYS)
+# define xfs_rtbitmap_read_buf(a,b) (-ENOSYS)
+# define xfs_rtsummary_read_buf(a,b) (-ENOSYS)
+# define xfs_rtbuf_cache_relse(a) (0)
+# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (-ENOSYS)
+static inline xfs_filblks_t
+xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
+{
+ /* shut up gcc */
+ return 0;
+}
+# define xfs_rtbitmap_wordcount(mp, r) (0)
+# define xfs_rtsummary_blockcount(mp, l, b) (0)
+# define xfs_rtsummary_wordcount(mp, l, b) (0)
+#endif /* CONFIG_XFS_RT */
+
+#endif /* __XFS_RTBITMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 6264daaab37b..1f74d0cd1618 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -975,6 +975,8 @@ xfs_sb_mount_common(
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
+ mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
+ mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index a5e14740ec9a..19134b23c10b 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -25,7 +25,7 @@ extern uint64_t xfs_sb_version_to_features(struct xfs_sb *sbp);
extern int xfs_update_secondary_sbs(struct xfs_mount *mp);
-#define XFS_FS_GEOM_MAX_STRUCT_VER (4)
+#define XFS_FS_GEOM_MAX_STRUCT_VER (5)
extern void xfs_fs_geometry(struct xfs_mount *mp, struct xfs_fsop_geom *geo,
int struct_version);
extern int xfs_sb_read_secondary(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 5b2f27cbdb80..6cd45e8c118d 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -19,6 +19,7 @@
#include "xfs_trans.h"
#include "xfs_qm.h"
#include "xfs_trans_space.h"
+#include "xfs_rtbitmap.h"
#define _ALLOC true
#define _FREE false
@@ -217,11 +218,12 @@ xfs_rtalloc_block_count(
struct xfs_mount *mp,
unsigned int num_ops)
{
- unsigned int blksz = XFS_FSB_TO_B(mp, 1);
- unsigned int rtbmp_bytes;
+ unsigned int rtbmp_blocks;
+ xfs_rtxlen_t rtxlen;
- rtbmp_bytes = (XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize) / NBBY;
- return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
+ rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
+ rtbmp_blocks = xfs_rtbitmap_blockcount(mp, rtxlen);
+ return (rtbmp_blocks + 1) * num_ops;
}
/*
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 5c2765934732..c299b16c9365 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -148,10 +148,10 @@ xfs_verify_rtbno(
/* Verify that a realtime device extent is fully contained inside the volume. */
bool
-xfs_verify_rtext(
+xfs_verify_rtbext(
struct xfs_mount *mp,
xfs_rtblock_t rtbno,
- xfs_rtblock_t len)
+ xfs_filblks_t len)
{
if (rtbno + len <= rtbno)
return false;
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 851220021484..533200c4ccc2 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -11,6 +11,7 @@ typedef uint32_t prid_t; /* project ID */
typedef uint32_t xfs_agblock_t; /* blockno in alloc. group */
typedef uint32_t xfs_agino_t; /* inode # within allocation grp */
typedef uint32_t xfs_extlen_t; /* extent length in blocks */
+typedef uint32_t xfs_rtxlen_t; /* file extent length in rtextents */
typedef uint32_t xfs_agnumber_t; /* allocation group number */
typedef uint64_t xfs_extnum_t; /* # of extents in a file */
typedef uint32_t xfs_aextnum_t; /* # extents in an attribute fork */
@@ -18,6 +19,7 @@ typedef int64_t xfs_fsize_t; /* bytes in a file */
typedef uint64_t xfs_ufsize_t; /* unsigned bytes in a file */
typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
+typedef uint32_t xfs_rtsumoff_t; /* offset of an rtsummary info word */
typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */
typedef int64_t xfs_lsn_t; /* log sequence number */
@@ -31,6 +33,8 @@ typedef uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */
typedef uint64_t xfs_rtblock_t; /* extent (block) in realtime area */
typedef uint64_t xfs_fileoff_t; /* block number in a file */
typedef uint64_t xfs_filblks_t; /* number of blocks in a file */
+typedef uint64_t xfs_rtxnum_t; /* rtextent number */
+typedef uint64_t xfs_rtbxlen_t; /* rtbitmap extent length in rtextents */
typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
@@ -47,6 +51,7 @@ typedef void * xfs_failaddr_t;
#define NULLRFSBLOCK ((xfs_rfsblock_t)-1)
#define NULLRTBLOCK ((xfs_rtblock_t)-1)
#define NULLFILEOFF ((xfs_fileoff_t)-1)
+#define NULLRTEXTNO ((xfs_rtxnum_t)-1)
#define NULLAGBLOCK ((xfs_agblock_t)-1)
#define NULLAGNUMBER ((xfs_agnumber_t)-1)
@@ -145,6 +150,7 @@ typedef uint32_t xfs_dqid_t;
*/
#define XFS_NBBYLOG 3 /* log2(NBBY) */
#define XFS_WORDLOG 2 /* log2(sizeof(xfs_rtword_t)) */
+#define XFS_SUMINFOLOG 2 /* log2(sizeof(xfs_suminfo_t)) */
#define XFS_NBWORDLOG (XFS_NBBYLOG + XFS_WORDLOG)
#define XFS_NBWORD (1 << XFS_NBWORDLOG)
#define XFS_WORDMASK ((1 << XFS_WORDLOG) - 1)
@@ -229,8 +235,8 @@ bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
-bool xfs_verify_rtext(struct xfs_mount *mp, xfs_rtblock_t rtbno,
- xfs_rtblock_t len);
+bool xfs_verify_rtbext(struct xfs_mount *mp, xfs_rtblock_t rtbno,
+ xfs_filblks_t len);
bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off);
void xfs_icount_range(struct xfs_mount *mp, unsigned long long *min,
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 75588915572e..06d8c1996a33 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -410,7 +410,7 @@ xchk_bmap_iextent(
/* Make sure the extent points to a valid place. */
if (info->is_rt &&
- !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
+ !xfs_verify_rtbext(mp, irec->br_startblock, irec->br_blockcount))
xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (!info->is_rt &&
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index 05be757668bb..5799e9a94f1f 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -16,7 +16,7 @@
#include "xfs_health.h"
#include "xfs_btree.h"
#include "xfs_ag.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "scrub/scrub.h"
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 59d7912fb75f..889f556bc98f 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -20,6 +20,7 @@
#include "xfs_reflink.h"
#include "xfs_rmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_rtbitmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
@@ -225,7 +226,7 @@ xchk_inode_extsize(
*/
if ((flags & XFS_DIFLAG_RTINHERIT) &&
(flags & XFS_DIFLAG_EXTSZINHERIT) &&
- value % sc->mp->m_sb.sb_rextsize > 0)
+ xfs_extlen_to_rtxmod(sc->mp, value) > 0)
xchk_ino_set_warning(sc, ino);
}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 008ddb599e13..41a1d89ae8e6 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -11,7 +11,7 @@
#include "xfs_mount.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "scrub/scrub.h"
@@ -48,12 +48,12 @@ xchk_rtbitmap_rec(
{
struct xfs_scrub *sc = priv;
xfs_rtblock_t startblock;
- xfs_rtblock_t blockcount;
+ xfs_filblks_t blockcount;
- startblock = rec->ar_startext * mp->m_sb.sb_rextsize;
- blockcount = rec->ar_extcount * mp->m_sb.sb_rextsize;
+ startblock = xfs_rtx_to_rtb(mp, rec->ar_startext);
+ blockcount = xfs_rtx_to_rtb(mp, rec->ar_extcount);
- if (!xfs_verify_rtext(mp, startblock, blockcount))
+ if (!xfs_verify_rtbext(mp, startblock, blockcount))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return 0;
}
@@ -128,26 +128,22 @@ out:
void
xchk_xref_is_used_rt_space(
struct xfs_scrub *sc,
- xfs_rtblock_t fsbno,
+ xfs_rtblock_t rtbno,
xfs_extlen_t len)
{
- xfs_rtblock_t startext;
- xfs_rtblock_t endext;
- xfs_rtblock_t extcount;
+ xfs_rtxnum_t startext;
+ xfs_rtxnum_t endext;
bool is_free;
int error;
if (xchk_skip_xref(sc->sm))
return;
- startext = fsbno;
- endext = fsbno + len - 1;
- do_div(startext, sc->mp->m_sb.sb_rextsize);
- do_div(endext, sc->mp->m_sb.sb_rextsize);
- extcount = endext - startext + 1;
+ startext = xfs_rtb_to_rtx(sc->mp, rtbno);
+ endext = xfs_rtb_to_rtx(sc->mp, rtbno + len - 1);
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
- error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
- &is_free);
+ error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext,
+ endext - startext + 1, &is_free);
if (!xchk_should_check_xref(sc, &error, NULL))
goto out_unlock;
if (is_free)
diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
index 437ed9acbb27..8b15c47408d0 100644
--- a/fs/xfs/scrub/rtsummary.c
+++ b/fs/xfs/scrub/rtsummary.c
@@ -13,7 +13,7 @@
#include "xfs_inode.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
#include "xfs_bit.h"
#include "xfs_bmap.h"
#include "scrub/scrub.h"
@@ -81,34 +81,45 @@ typedef unsigned int xchk_rtsumoff_t;
static inline int
xfsum_load(
struct xfs_scrub *sc,
- xchk_rtsumoff_t sumoff,
- xfs_suminfo_t *info)
+ xfs_rtsumoff_t sumoff,
+ union xfs_suminfo_raw *rawinfo)
{
- return xfile_obj_load(sc->xfile, info, sizeof(xfs_suminfo_t),
+ return xfile_obj_load(sc->xfile, rawinfo,
+ sizeof(union xfs_suminfo_raw),
sumoff << XFS_WORDLOG);
}
static inline int
xfsum_store(
struct xfs_scrub *sc,
- xchk_rtsumoff_t sumoff,
- const xfs_suminfo_t info)
+ xfs_rtsumoff_t sumoff,
+ const union xfs_suminfo_raw rawinfo)
{
- return xfile_obj_store(sc->xfile, &info, sizeof(xfs_suminfo_t),
+ return xfile_obj_store(sc->xfile, &rawinfo,
+ sizeof(union xfs_suminfo_raw),
sumoff << XFS_WORDLOG);
}
static inline int
xfsum_copyout(
struct xfs_scrub *sc,
- xchk_rtsumoff_t sumoff,
- xfs_suminfo_t *info,
+ xfs_rtsumoff_t sumoff,
+ union xfs_suminfo_raw *rawinfo,
unsigned int nr_words)
{
- return xfile_obj_load(sc->xfile, info, nr_words << XFS_WORDLOG,
+ return xfile_obj_load(sc->xfile, rawinfo, nr_words << XFS_WORDLOG,
sumoff << XFS_WORDLOG);
}
+static inline xfs_suminfo_t
+xchk_rtsum_inc(
+ struct xfs_mount *mp,
+ union xfs_suminfo_raw *v)
+{
+ v->old += 1;
+ return v->old;
+}
+
/* Update the summary file to reflect the free extent that we've accumulated. */
STATIC int
xchk_rtsum_record_free(
@@ -121,23 +132,24 @@ xchk_rtsum_record_free(
xfs_fileoff_t rbmoff;
xfs_rtblock_t rtbno;
xfs_filblks_t rtlen;
- xchk_rtsumoff_t offs;
+ xfs_rtsumoff_t offs;
unsigned int lenlog;
- xfs_suminfo_t v = 0;
+ union xfs_suminfo_raw v;
+ xfs_suminfo_t value;
int error = 0;
if (xchk_should_terminate(sc, &error))
return error;
/* Compute the relevant location in the rtsum file. */
- rbmoff = XFS_BITTOBLOCK(mp, rec->ar_startext);
+ rbmoff = xfs_rtx_to_rbmblock(mp, rec->ar_startext);
lenlog = XFS_RTBLOCKLOG(rec->ar_extcount);
- offs = XFS_SUMOFFS(mp, lenlog, rbmoff);
+ offs = xfs_rtsumoffs(mp, lenlog, rbmoff);
- rtbno = rec->ar_startext * mp->m_sb.sb_rextsize;
- rtlen = rec->ar_extcount * mp->m_sb.sb_rextsize;
+ rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
+ rtlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
- if (!xfs_verify_rtext(mp, rtbno, rtlen)) {
+ if (!xfs_verify_rtbext(mp, rtbno, rtlen)) {
xchk_ino_xref_set_corrupt(sc, mp->m_rbmip->i_ino);
return -EFSCORRUPTED;
}
@@ -147,9 +159,9 @@ xchk_rtsum_record_free(
if (error)
return error;
- v++;
+ value = xchk_rtsum_inc(sc->mp, &v);
trace_xchk_rtsum_record_free(mp, rec->ar_startext, rec->ar_extcount,
- lenlog, offs, v);
+ lenlog, offs, value);
return xfsum_store(sc, offs, v);
}
@@ -160,12 +172,11 @@ xchk_rtsum_compute(
struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
- unsigned long long rtbmp_bytes;
+ unsigned long long rtbmp_blocks;
/* If the bitmap size doesn't match the computed size, bail. */
- rtbmp_bytes = howmany_64(mp->m_sb.sb_rextents, NBBY);
- if (roundup_64(rtbmp_bytes, mp->m_sb.sb_blocksize) !=
- mp->m_rbmip->i_disk_size)
+ rtbmp_blocks = xfs_rtbitmap_blockcount(mp, mp->m_sb.sb_rextents);
+ if (XFS_FSB_TO_B(mp, rtbmp_blocks) != mp->m_rbmip->i_disk_size)
return -EFSCORRUPTED;
return xfs_rtalloc_query_all(sc->mp, sc->tp, xchk_rtsum_record_free,
@@ -177,14 +188,18 @@ STATIC int
xchk_rtsum_compare(
struct xfs_scrub *sc)
{
+ struct xfs_rtalloc_args args = {
+ .mp = sc->mp,
+ .tp = sc->tp,
+ };
struct xfs_mount *mp = sc->mp;
- struct xfs_buf *bp;
struct xfs_bmbt_irec map;
xfs_fileoff_t off;
xchk_rtsumoff_t sumoff = 0;
int nmap;
for (off = 0; off < XFS_B_TO_FSB(mp, mp->m_rsumsize); off++) {
+ union xfs_suminfo_raw *ondisk_info;
int error = 0;
if (xchk_should_terminate(sc, &error))
@@ -205,22 +220,23 @@ xchk_rtsum_compare(
}
/* Read a block's worth of ondisk rtsummary file. */
- error = xfs_rtbuf_get(mp, sc->tp, off, 1, &bp);
+ error = xfs_rtsummary_read_buf(&args, off);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, off, &error))
return error;
/* Read a block's worth of computed rtsummary file. */
error = xfsum_copyout(sc, sumoff, sc->buf, mp->m_blockwsize);
if (error) {
- xfs_trans_brelse(sc->tp, bp);
+ xfs_rtbuf_cache_relse(&args);
return error;
}
- if (memcmp(bp->b_addr, sc->buf,
+ ondisk_info = xfs_rsumblock_infoptr(&args, 0);
+ if (memcmp(ondisk_info, sc->buf,
mp->m_blockwsize << XFS_WORDLOG) != 0)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
- xfs_trans_brelse(sc->tp, bp);
+ xfs_rtbuf_cache_relse(&args);
sumoff += mp->m_blockwsize;
}
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 46249e7b17e0..29afa4851235 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -13,6 +13,7 @@
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ag.h"
+#include "xfs_rtbitmap.h"
#include "scrub/scrub.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index cbd4d01e253c..4a8bc6f3c8f2 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -1036,17 +1036,18 @@ TRACE_EVENT(xfarray_sort_stats,
#ifdef CONFIG_XFS_RT
TRACE_EVENT(xchk_rtsum_record_free,
- TP_PROTO(struct xfs_mount *mp, xfs_rtblock_t start,
- uint64_t len, unsigned int log, loff_t pos, xfs_suminfo_t v),
- TP_ARGS(mp, start, len, log, pos, v),
+ TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start,
+ xfs_rtbxlen_t len, unsigned int log, loff_t pos,
+ xfs_suminfo_t value),
+ TP_ARGS(mp, start, len, log, pos, value),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(dev_t, rtdev)
- __field(xfs_rtblock_t, start)
+ __field(xfs_rtxnum_t, start)
__field(unsigned long long, len)
__field(unsigned int, log)
__field(loff_t, pos)
- __field(xfs_suminfo_t, v)
+ __field(xfs_suminfo_t, value)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
@@ -1055,7 +1056,7 @@ TRACE_EVENT(xchk_rtsum_record_free,
__entry->len = len;
__entry->log = log;
__entry->pos = pos;
- __entry->v = v;
+ __entry->value = value;
),
TP_printk("dev %d:%d rtdev %d:%d rtx 0x%llx rtxcount 0x%llx log %u rsumpos 0x%llx sumcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1064,7 +1065,7 @@ TRACE_EVENT(xchk_rtsum_record_free,
__entry->len,
__entry->log,
__entry->pos,
- __entry->v)
+ __entry->value)
);
#endif /* CONFIG_XFS_RT */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 40e0a1f1f753..731260a5af6d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -28,6 +28,7 @@
#include "xfs_icache.h"
#include "xfs_iomap.h"
#include "xfs_reflink.h"
+#include "xfs_rtbitmap.h"
/* Kernel only BMAP related definitions and functions */
@@ -75,28 +76,28 @@ xfs_bmap_rtalloc(
{
struct xfs_mount *mp = ap->ip->i_mount;
xfs_fileoff_t orig_offset = ap->offset;
- xfs_rtblock_t rtb;
- xfs_extlen_t prod = 0; /* product factor for allocators */
+ xfs_rtxnum_t rtx;
+ xfs_rtxlen_t prod = 0; /* product factor for allocators */
xfs_extlen_t mod = 0; /* product factor for allocators */
- xfs_extlen_t ralen = 0; /* realtime allocation length */
+ xfs_rtxlen_t ralen = 0; /* realtime allocation length */
xfs_extlen_t align; /* minimum allocation alignment */
xfs_extlen_t orig_length = ap->length;
xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
- xfs_extlen_t raminlen;
+ xfs_rtxlen_t raminlen;
bool rtlocked = false;
bool ignore_locality = false;
int error;
align = xfs_get_extsz_hint(ap->ip);
retry:
- prod = align / mp->m_sb.sb_rextsize;
+ prod = xfs_extlen_to_rtxlen(mp, align);
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 1, ap->eof, 0,
ap->conv, &ap->offset, &ap->length);
if (error)
return error;
ASSERT(ap->length);
- ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
+ ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
/*
* If we shifted the file offset downward to satisfy an extent size
@@ -116,17 +117,14 @@ retry:
prod = 1;
/*
* Set ralen to be the actual requested length in rtextents.
- */
- ralen = ap->length / mp->m_sb.sb_rextsize;
- /*
+ *
* If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
* we rounded up to it, cut it back so it's valid again.
* Note that if it's a really large request (bigger than
* XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
* adjust the starting point to match it.
*/
- if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
- ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
+ ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
/*
* Lock out modifications to both the RT bitmap and summary inodes
@@ -144,12 +142,10 @@ retry:
* pick an extent that will space things out in the rt area.
*/
if (ap->eof && ap->offset == 0) {
- xfs_rtblock_t rtx; /* realtime extent no */
-
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
if (error)
return error;
- ap->blkno = rtx * mp->m_sb.sb_rextsize;
+ ap->blkno = xfs_rtx_to_rtb(mp, rtx);
} else {
ap->blkno = 0;
}
@@ -160,20 +156,18 @@ retry:
* Realtime allocation, done through xfs_rtallocate_extent.
*/
if (ignore_locality)
- ap->blkno = 0;
+ rtx = 0;
else
- do_div(ap->blkno, mp->m_sb.sb_rextsize);
- rtb = ap->blkno;
- ap->length = ralen;
- raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
- error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
- &ralen, ap->wasdel, prod, &rtb);
+ rtx = xfs_rtb_to_rtx(mp, ap->blkno);
+ raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
+ error = xfs_rtallocate_extent(ap->tp, rtx, raminlen, ralen, &ralen,
+ ap->wasdel, prod, &rtx);
if (error)
return error;
- if (rtb != NULLRTBLOCK) {
- ap->blkno = rtb * mp->m_sb.sb_rextsize;
- ap->length = ralen * mp->m_sb.sb_rextsize;
+ if (rtx != NULLRTEXTNO) {
+ ap->blkno = xfs_rtx_to_rtb(mp, rtx);
+ ap->length = xfs_rtxlen_to_extlen(mp, ralen);
ap->ip->i_nblocks += ap->length;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
@@ -690,7 +684,7 @@ xfs_can_free_eofblocks(
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
- end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
+ end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb);
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return false;
@@ -780,12 +774,10 @@ xfs_alloc_file_space(
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
- xfs_filblks_t allocated_fsb;
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
- int nimaps;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
@@ -808,7 +800,6 @@ xfs_alloc_file_space(
count = len;
imapp = &imaps[0];
- nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
allocatesize_fsb = endoffset_fsb - startoffset_fsb;
@@ -819,6 +810,7 @@ xfs_alloc_file_space(
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
unsigned int dblocks, rblocks, resblks;
+ int nimaps = 1;
/*
* Determine space reservations for data/realtime.
@@ -884,15 +876,19 @@ xfs_alloc_file_space(
if (error)
break;
- allocated_fsb = imapp->br_blockcount;
-
- if (nimaps == 0) {
- error = -ENOSPC;
- break;
+ /*
+ * If the allocator cannot find a single free extent large
+ * enough to cover the start block of the requested range,
+ * xfs_bmapi_write will return 0 but leave *nimaps set to 0.
+ *
+ * In that case we simply need to keep looping with the same
+ * startoffset_fsb so that one of the following allocations
+ * will eventually reach the requested range.
+ */
+ if (nimaps) {
+ startoffset_fsb += imapp->br_blockcount;
+ allocatesize_fsb -= imapp->br_blockcount;
}
-
- startoffset_fsb += allocated_fsb;
- allocatesize_fsb -= allocated_fsb;
}
return error;
@@ -989,10 +985,8 @@ xfs_free_file_space(
/* We can only free complete realtime extents. */
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
- startoffset_fsb = roundup_64(startoffset_fsb,
- mp->m_sb.sb_rextsize);
- endoffset_fsb = rounddown_64(endoffset_fsb,
- mp->m_sb.sb_rextsize);
+ startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb);
+ endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb);
}
/*
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 203700278ddb..e33e5e13b95f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -214,6 +214,43 @@ xfs_ilock_iocb(
return 0;
}
+static int
+xfs_ilock_iocb_for_write(
+ struct kiocb *iocb,
+ unsigned int *lock_mode)
+{
+ ssize_t ret;
+ struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
+
+ ret = xfs_ilock_iocb(iocb, *lock_mode);
+ if (ret)
+ return ret;
+
+ if (*lock_mode == XFS_IOLOCK_EXCL)
+ return 0;
+ if (!xfs_iflags_test(ip, XFS_IREMAPPING))
+ return 0;
+
+ xfs_iunlock(ip, *lock_mode);
+ *lock_mode = XFS_IOLOCK_EXCL;
+ return xfs_ilock_iocb(iocb, *lock_mode);
+}
+
+static unsigned int
+xfs_ilock_for_write_fault(
+ struct xfs_inode *ip)
+{
+ /* get a shared lock if no remapping in progress */
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+ if (!xfs_iflags_test(ip, XFS_IREMAPPING))
+ return XFS_MMAPLOCK_SHARED;
+
+ /* wait for remapping to complete */
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ return XFS_MMAPLOCK_EXCL;
+}
+
STATIC ssize_t
xfs_file_dio_read(
struct kiocb *iocb,
@@ -551,7 +588,7 @@ xfs_file_dio_write_aligned(
unsigned int iolock = XFS_IOLOCK_SHARED;
ssize_t ret;
- ret = xfs_ilock_iocb(iocb, iolock);
+ ret = xfs_ilock_iocb_for_write(iocb, &iolock);
if (ret)
return ret;
ret = xfs_file_write_checks(iocb, from, &iolock);
@@ -618,7 +655,7 @@ retry_exclusive:
flags = IOMAP_DIO_FORCE_WAIT;
}
- ret = xfs_ilock_iocb(iocb, iolock);
+ ret = xfs_ilock_iocb_for_write(iocb, &iolock);
if (ret)
return ret;
@@ -1180,7 +1217,7 @@ xfs_file_remap_range(
if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
xfs_log_force_inode(dest);
out_unlock:
- xfs_iunlock2_io_mmap(src, dest);
+ xfs_iunlock2_remapping(src, dest);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return remapped > 0 ? remapped : ret;
@@ -1328,6 +1365,7 @@ __xfs_filemap_fault(
struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
vm_fault_t ret;
+ unsigned int lock_mode = 0;
trace_xfs_filemap_fault(ip, order, write_fault);
@@ -1336,25 +1374,24 @@ __xfs_filemap_fault(
file_update_time(vmf->vma->vm_file);
}
+ if (IS_DAX(inode) || write_fault)
+ lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
+
if (IS_DAX(inode)) {
pfn_t pfn;
- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, order, pfn);
- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ } else if (write_fault) {
+ ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
} else {
- if (write_fault) {
- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = iomap_page_mkwrite(vmf,
- &xfs_page_mkwrite_iomap_ops);
- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- } else {
- ret = filemap_fault(vmf);
- }
+ ret = filemap_fault(vmf);
}
+ if (lock_mode)
+ xfs_iunlock(XFS_I(inode), lock_mode);
+
if (write_fault)
sb_end_pagefault(inode->i_sb);
return ret;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 736e5545f584..5a72217f5feb 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -23,7 +23,7 @@
#include "xfs_refcount.h"
#include "xfs_refcount_btree.h"
#include "xfs_alloc_btree.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
#include "xfs_ag.h"
/* Convert an xfs_fsmap to an fsmap. */
@@ -483,11 +483,11 @@ xfs_getfsmap_rtdev_rtbitmap_helper(
xfs_rtblock_t rtbno;
xfs_daddr_t rec_daddr, len_daddr;
- rtbno = rec->ar_startext * mp->m_sb.sb_rextsize;
+ rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
irec.rm_startblock = rtbno;
- rtbno = rec->ar_extcount * mp->m_sb.sb_rextsize;
+ rtbno = xfs_rtx_to_rtb(mp, rec->ar_extcount);
len_daddr = XFS_FSB_TO_BB(mp, rtbno);
irec.rm_blockcount = rtbno;
@@ -514,7 +514,7 @@ xfs_getfsmap_rtdev_rtbitmap(
uint64_t eofs;
int error;
- eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rextents * mp->m_sb.sb_rextsize);
+ eofs = XFS_FSB_TO_BB(mp, xfs_rtx_to_rtb(mp, mp->m_sb.sb_rextents));
if (keys[0].fmr_physical >= eofs)
return 0;
start_rtb = XFS_BB_TO_FSBT(mp,
@@ -539,11 +539,8 @@ xfs_getfsmap_rtdev_rtbitmap(
* Set up query parameters to return free rtextents covering the range
* we want.
*/
- alow.ar_startext = start_rtb;
- ahigh.ar_startext = end_rtb;
- do_div(alow.ar_startext, mp->m_sb.sb_rextsize);
- if (do_div(ahigh.ar_startext, mp->m_sb.sb_rextsize))
- ahigh.ar_startext++;
+ alow.ar_startext = xfs_rtb_to_rtx(mp, start_rtb);
+ ahigh.ar_startext = xfs_rtb_to_rtxup(mp, end_rtb);
error = xfs_rtalloc_query_range(mp, tp, &alow, &ahigh,
xfs_getfsmap_rtdev_rtbitmap_helper, info);
if (error)
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 36f5cf802c07..c0f1c89786c2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -918,6 +918,13 @@ xfs_droplink(
xfs_trans_t *tp,
xfs_inode_t *ip)
{
+ if (VFS_I(ip)->i_nlink == 0) {
+ xfs_alert(ip->i_mount,
+ "%s: Attempt to drop inode (%llu) with nlink zero.",
+ __func__, ip->i_ino);
+ return -EFSCORRUPTED;
+ }
+
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
drop_nlink(VFS_I(ip));
@@ -3621,6 +3628,23 @@ xfs_iunlock2_io_mmap(
inode_unlock(VFS_I(ip1));
}
+/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
+void
+xfs_iunlock2_remapping(
+ struct xfs_inode *ip1,
+ struct xfs_inode *ip2)
+{
+ xfs_iflags_clear(ip1, XFS_IREMAPPING);
+
+ if (ip1 != ip2)
+ xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
+ xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+
+ if (ip1 != ip2)
+ inode_unlock_shared(VFS_I(ip1));
+ inode_unlock(VFS_I(ip2));
+}
+
/*
* Reload the incore inode list for this inode. Caller should ensure that
* the link count cannot change, either by taking ILOCK_SHARED or otherwise
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0c5bdb91152e..3dc47937da5d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -347,6 +347,14 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
/* Quotacheck is running but inode has not been added to quota counts. */
#define XFS_IQUOTAUNCHECKED (1 << 14)
+/*
+ * Remap in progress. Callers that wish to update file data while
+ * holding a shared IOLOCK or MMAPLOCK must drop the lock and retake
+ * the lock in exclusive mode. Relocking the file will block until
+ * IREMAPPING is cleared.
+ */
+#define XFS_IREMAPPING (1U << 15)
+
/* All inode state flags related to inode reclaim. */
#define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \
XFS_IRECLAIM | \
@@ -595,6 +603,7 @@ void xfs_end_io(struct work_struct *work);
int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+void xfs_iunlock2_remapping(struct xfs_inode *ip1, struct xfs_inode *ip2);
static inline bool
xfs_inode_unlinked_incomplete(
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 17c51804f9c6..cd7803fda8b1 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -19,6 +19,7 @@
#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_error.h"
+#include "xfs_rtbitmap.h"
#include <linux/iversion.h>
@@ -107,7 +108,7 @@ xfs_inode_item_precommit(
*/
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
- (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+ xfs_extlen_to_rtxmod(ip->i_mount, ip->i_extsize) > 0) {
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
XFS_DIFLAG_EXTSZINHERIT);
ip->i_extsize = 0;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 55bb01173cde..a82470e027f7 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -38,6 +38,7 @@
#include "xfs_reflink.h"
#include "xfs_ioctl.h"
#include "xfs_xattr.h"
+#include "xfs_rtbitmap.h"
#include <linux/mount.h>
#include <linux/namei.h>
@@ -1004,7 +1005,7 @@ xfs_fill_fsxattr(
* later.
*/
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
- ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+ xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) {
fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
FS_XFLAG_EXTSZINHERIT);
fa->fsx_extsize = 0;
@@ -1130,7 +1131,7 @@ xfs_ioctl_setattr_xflags(
/* If realtime flag is set then must have realtime device */
if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
- (ip->i_extsize % mp->m_sb.sb_rextsize))
+ xfs_extlen_to_rtxmod(mp, ip->i_extsize))
return -EINVAL;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e9d317a3dafe..d7873e0360f0 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -198,6 +198,18 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
return x;
}
+/* If @b is a power of 2, return log2(b). Else return -1. */
+static inline int8_t log2_if_power2(unsigned long b)
+{
+ return is_power_of_2(b) ? ilog2(b) : -1;
+}
+
+/* If @b is a power of 2, return a mask of the lower bits, else return zero. */
+static inline unsigned long long mask64_if_power2(unsigned long b)
+{
+ return is_power_of_2(b) ? b - 1 : 0;
+}
+
int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, enum req_op op);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 219681d29fbc..503fe3c7edbf 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -101,9 +101,9 @@ typedef struct xfs_mount {
/*
* Optional cache of rt summary level per bitmap block with the
- * invariant that m_rsum_cache[bbno] <= the minimum i for which
- * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
- * inode lock.
+ * invariant that m_rsum_cache[bbno] > the maximum i for which
+ * rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i.
+ * Reads and writes are serialized by the rsumip inode lock.
*/
uint8_t *m_rsum_cache;
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
@@ -119,6 +119,7 @@ typedef struct xfs_mount {
uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
uint8_t m_agno_log; /* log #ag's */
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
+ int8_t m_rtxblklog; /* log2 of rextsize, if possible */
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
@@ -152,6 +153,7 @@ typedef struct xfs_mount {
uint64_t m_features; /* active filesystem features */
uint64_t m_low_space[XFS_LOWSP_MAX];
uint64_t m_low_rtexts[XFS_LOWSP_MAX];
+ uint64_t m_rtxblkmask; /* rt extent block mask */
struct xfs_ino_geometry m_ino_geo; /* inode geometry */
struct xfs_trans_resv m_resv; /* precomputed res values */
/* low free space thresholds */
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index c4cc99b70dd3..21a7e350b4c5 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -72,6 +72,10 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_map_t, 4);
XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_name_local_t, 4);
+ /* realtime structures */
+ XFS_CHECK_STRUCT_SIZE(union xfs_rtword_raw, 4);
+ XFS_CHECK_STRUCT_SIZE(union xfs_suminfo_raw, 4);
+
/*
* m68k has problems with xfs_attr_leaf_name_remote_t, but we pad it to
* 4 bytes anyway so it's not obviously a problem. Hence for the moment
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index eb9102453aff..658edee8381d 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1540,6 +1540,10 @@ xfs_reflink_remap_prep(
if (ret)
goto out_unlock;
+ xfs_iflags_set(src, XFS_IREMAPPING);
+ if (inode_in != inode_out)
+ xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
+
return 0;
out_unlock:
xfs_iunlock2_io_mmap(src, dest);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 2e1a4e5cd03d..88c48de5c9c8 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -19,6 +19,7 @@
#include "xfs_icache.h"
#include "xfs_rtalloc.h"
#include "xfs_sb.h"
+#include "xfs_rtbitmap.h"
/*
* Read and return the summary information for a given extent size,
@@ -28,48 +29,48 @@
*/
static int
xfs_rtget_summary(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int log, /* log2 of extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_suminfo_t *sum) /* out: summary info for this block */
+ struct xfs_rtalloc_args *args,
+ int log, /* log2 of extent size */
+ xfs_fileoff_t bbno, /* bitmap block number */
+ xfs_suminfo_t *sum) /* out: summary info for this block */
{
- return xfs_rtmodify_summary_int(mp, tp, log, bbno, 0, rbpp, rsb, sum);
+ return xfs_rtmodify_summary_int(args, log, bbno, 0, sum);
}
/*
* Return whether there are any free extents in the size range given
* by low and high, for the bitmap block bbno.
*/
-STATIC int /* error */
+STATIC int
xfs_rtany_summary(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int low, /* low log2 extent size */
- int high, /* high log2 extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- int *stat) /* out: any good extents here? */
+ struct xfs_rtalloc_args *args,
+ int low, /* low log2 extent size */
+ int high, /* high log2 extent size */
+ xfs_fileoff_t bbno, /* bitmap block number */
+ int *maxlog) /* out: max log2 extent size free */
{
- int error; /* error value */
- int log; /* loop counter, log2 of ext. size */
- xfs_suminfo_t sum; /* summary data */
-
- /* There are no extents at levels < m_rsum_cache[bbno]. */
- if (mp->m_rsum_cache && low < mp->m_rsum_cache[bbno])
- low = mp->m_rsum_cache[bbno];
+ struct xfs_mount *mp = args->mp;
+ int error;
+ int log; /* loop counter, log2 of ext. size */
+ xfs_suminfo_t sum; /* summary data */
+
+ /* There are no extents at levels >= m_rsum_cache[bbno]. */
+ if (mp->m_rsum_cache) {
+ high = min(high, mp->m_rsum_cache[bbno] - 1);
+ if (low > high) {
+ *maxlog = -1;
+ return 0;
+ }
+ }
/*
* Loop over logs of extent sizes.
*/
- for (log = low; log <= high; log++) {
+ for (log = high; log >= low; log--) {
/*
* Get one summary datum.
*/
- error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
+ error = xfs_rtget_summary(args, log, bbno, &sum);
if (error) {
return error;
}
@@ -77,18 +78,18 @@ xfs_rtany_summary(
* If there are any, return success.
*/
if (sum) {
- *stat = 1;
+ *maxlog = log;
goto out;
}
}
/*
* Found nothing, return failure.
*/
- *stat = 0;
+ *maxlog = -1;
out:
- /* There were no extents at levels < log. */
- if (mp->m_rsum_cache && log > mp->m_rsum_cache[bbno])
- mp->m_rsum_cache[bbno] = log;
+ /* There were no extents at levels > log. */
+ if (mp->m_rsum_cache && log + 1 < mp->m_rsum_cache[bbno])
+ mp->m_rsum_cache[bbno] = log + 1;
return 0;
}
@@ -97,60 +98,54 @@ out:
* Copy and transform the summary file, given the old and new
* parameters in the mount structures.
*/
-STATIC int /* error */
+STATIC int
xfs_rtcopy_summary(
- xfs_mount_t *omp, /* old file system mount point */
- xfs_mount_t *nmp, /* new file system mount point */
- xfs_trans_t *tp) /* transaction pointer */
+ struct xfs_rtalloc_args *oargs,
+ struct xfs_rtalloc_args *nargs)
{
- xfs_rtblock_t bbno; /* bitmap block number */
- struct xfs_buf *bp; /* summary buffer */
- int error; /* error return value */
- int log; /* summary level number (log length) */
- xfs_suminfo_t sum; /* summary data */
- xfs_fsblock_t sumbno; /* summary block number */
+ xfs_fileoff_t bbno; /* bitmap block number */
+ int error;
+ int log; /* summary level number (log length) */
+ xfs_suminfo_t sum; /* summary data */
- bp = NULL;
- for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
- for (bbno = omp->m_sb.sb_rbmblocks - 1;
+ for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
+ for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
(xfs_srtblock_t)bbno >= 0;
bbno--) {
- error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
- &sumbno, &sum);
+ error = xfs_rtget_summary(oargs, log, bbno, &sum);
if (error)
- return error;
+ goto out;
if (sum == 0)
continue;
- error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
- &bp, &sumbno);
+ error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
if (error)
- return error;
- error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
- &bp, &sumbno);
+ goto out;
+ error = xfs_rtmodify_summary(nargs, log, bbno, sum);
if (error)
- return error;
+ goto out;
ASSERT(sum > 0);
}
}
+ error = 0;
+out:
+ xfs_rtbuf_cache_relse(oargs);
return 0;
}
/*
* Mark an extent specified by start and len allocated.
* Updates all the summary information as well as the bitmap.
*/
-STATIC int /* error */
+STATIC int
xfs_rtallocate_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* start block to allocate */
- xfs_extlen_t len, /* length to allocate */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* start rtext to allocate */
+ xfs_rtxlen_t len) /* in/out: summary block number */
{
- xfs_rtblock_t end; /* end of the allocated extent */
- int error; /* error value */
- xfs_rtblock_t postblock = 0; /* first block allocated > end */
- xfs_rtblock_t preblock = 0; /* first block allocated < start */
+ struct xfs_mount *mp = args->mp;
+ xfs_rtxnum_t end; /* end of the allocated rtext */
+ int error;
+ xfs_rtxnum_t postblock = 0; /* first rtext allocated > end */
+ xfs_rtxnum_t preblock = 0; /* first rtext allocated < start */
end = start + len - 1;
/*
@@ -158,15 +153,15 @@ xfs_rtallocate_range(
* We need to find the beginning and end of the extent so we can
* properly update the summary.
*/
- error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+ error = xfs_rtfind_back(args, start, 0, &preblock);
if (error) {
return error;
}
/*
* Find the next allocated block (end of free extent).
*/
- error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
- &postblock);
+ error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
+ &postblock);
if (error) {
return error;
}
@@ -174,9 +169,9 @@ xfs_rtallocate_range(
* Decrement the summary information corresponding to the entire
* (old) free extent.
*/
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock + 1 - preblock),
- XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+ error = xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(postblock + 1 - preblock),
+ xfs_rtx_to_rbmblock(mp, preblock), -1);
if (error) {
return error;
}
@@ -185,9 +180,9 @@ xfs_rtallocate_range(
* old extent, add summary data for them to be free.
*/
if (preblock < start) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(start - preblock),
- XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+ error = xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(start - preblock),
+ xfs_rtx_to_rbmblock(mp, preblock), 1);
if (error) {
return error;
}
@@ -197,9 +192,9 @@ xfs_rtallocate_range(
* old extent, add summary data for them to be free.
*/
if (postblock > end) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock - end),
- XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+ error = xfs_rtmodify_summary(args,
+ XFS_RTBLOCKLOG(postblock - end),
+ xfs_rtx_to_rbmblock(mp, end + 1), 1);
if (error) {
return error;
}
@@ -207,54 +202,69 @@ xfs_rtallocate_range(
/*
* Modify the bitmap to mark this extent allocated.
*/
- error = xfs_rtmodify_range(mp, tp, start, len, 0);
+ error = xfs_rtmodify_range(args, start, len, 0);
return error;
}
/*
+ * Make sure we don't run off the end of the rt volume. Be careful that
+ * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
+ */
+static inline xfs_rtxlen_t
+xfs_rtallocate_clamp_len(
+ struct xfs_mount *mp,
+ xfs_rtxnum_t startrtx,
+ xfs_rtxlen_t rtxlen,
+ xfs_rtxlen_t prod)
+{
+ xfs_rtxlen_t ret;
+
+ ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx;
+ return rounddown(ret, prod);
+}
+
+/*
* Attempt to allocate an extent minlen<=len<=maxlen starting from
* bitmap block bbno. If we don't get maxlen then use prod to trim
- * the length, if given. Returns error; returns starting block in *rtblock.
+ * the length, if given. Returns error; returns starting block in *rtx.
* The lengths are all in rtextents.
*/
-STATIC int /* error */
+STATIC int
xfs_rtallocate_extent_block(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bbno, /* bitmap block number */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
- xfs_rtblock_t *nextp, /* out: next block to try */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock) /* out: start block allocated */
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t bbno, /* bitmap block number */
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
+ xfs_rtxnum_t *nextp, /* out: next rtext to try */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
- xfs_rtblock_t besti; /* best rtblock found so far */
- xfs_rtblock_t bestlen; /* best length found so far */
- xfs_rtblock_t end; /* last rtblock in chunk */
- int error; /* error value */
- xfs_rtblock_t i; /* current rtblock trying */
- xfs_rtblock_t next; /* next rtblock to try */
- int stat; /* status from internal calls */
+ struct xfs_mount *mp = args->mp;
+ xfs_rtxnum_t besti; /* best rtext found so far */
+ xfs_rtxnum_t bestlen;/* best length found so far */
+ xfs_rtxnum_t end; /* last rtext in chunk */
+ int error;
+ xfs_rtxnum_t i; /* current rtext trying */
+ xfs_rtxnum_t next; /* next rtext to try */
+ int stat; /* status from internal calls */
/*
* Loop over all the extents starting in this bitmap block,
* looking for one that's long enough.
*/
- for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
- end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+ for (i = xfs_rbmblock_to_rtx(mp, bbno), besti = -1, bestlen = 0,
+ end = xfs_rbmblock_to_rtx(mp, bbno + 1) - 1;
i <= end;
i++) {
/* Make sure we don't scan off the end of the rt volume. */
- maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
+ maxlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
/*
* See if there's a free extent of maxlen starting at i.
* If it's not so then next will contain the first non-free.
*/
- error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+ error = xfs_rtcheck_range(args, i, maxlen, 1, &next, &stat);
if (error) {
return error;
}
@@ -262,13 +272,12 @@ xfs_rtallocate_extent_block(
/*
* i for maxlen is all free, allocate and return that.
*/
- error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
- rsb);
+ error = xfs_rtallocate_range(args, i, maxlen);
if (error) {
return error;
}
*len = maxlen;
- *rtblock = i;
+ *rtx = i;
return 0;
}
/*
@@ -278,7 +287,7 @@ xfs_rtallocate_extent_block(
* so far, remember it.
*/
if (minlen < maxlen) {
- xfs_rtblock_t thislen; /* this extent size */
+ xfs_rtxnum_t thislen; /* this extent size */
thislen = next - i;
if (thislen >= minlen && thislen > bestlen) {
@@ -290,7 +299,7 @@ xfs_rtallocate_extent_block(
* If not done yet, find the start of the next free space.
*/
if (next < end) {
- error = xfs_rtfind_forw(mp, tp, next, end, &i);
+ error = xfs_rtfind_forw(args, next, end, &i);
if (error) {
return error;
}
@@ -301,7 +310,7 @@ xfs_rtallocate_extent_block(
* Searched the whole thing & didn't find a maxlen free extent.
*/
if (minlen < maxlen && besti != -1) {
- xfs_extlen_t p; /* amount to trim length by */
+ xfs_rtxlen_t p; /* amount to trim length by */
/*
* If size should be a multiple of prod, make that so.
@@ -315,51 +324,49 @@ xfs_rtallocate_extent_block(
/*
* Allocate besti for bestlen & return that.
*/
- error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb);
+ error = xfs_rtallocate_range(args, besti, bestlen);
if (error) {
return error;
}
*len = bestlen;
- *rtblock = besti;
+ *rtx = besti;
return 0;
}
/*
* Allocation failed. Set *nextp to the next block to try.
*/
*nextp = next;
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
/*
* Allocate an extent of length minlen<=len<=maxlen, starting at block
* bno. If we don't get maxlen then use prod to trim the length, if given.
- * Returns error; returns starting block in *rtblock.
+ * Returns error; returns starting block in *rtx.
* The lengths are all in rtextents.
*/
-STATIC int /* error */
+STATIC int
xfs_rtallocate_extent_exact(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to allocate */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock) /* out: start block allocated */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext number to allocate */
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
- int error; /* error value */
- xfs_extlen_t i; /* extent length trimmed due to prod */
- int isfree; /* extent is free */
- xfs_rtblock_t next; /* next block to try (dummy) */
+ int error;
+ xfs_rtxlen_t i; /* extent length trimmed due to prod */
+ int isfree; /* extent is free */
+ xfs_rtxnum_t next; /* next rtext to try (dummy) */
- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+ ASSERT(minlen % prod == 0);
+ ASSERT(maxlen % prod == 0);
/*
* Check if the range in question (for maxlen) is free.
*/
- error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree);
+ error = xfs_rtcheck_range(args, start, maxlen, 1, &next, &isfree);
if (error) {
return error;
}
@@ -367,23 +374,23 @@ xfs_rtallocate_extent_exact(
/*
* If it is, allocate it and return success.
*/
- error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+ error = xfs_rtallocate_range(args, start, maxlen);
if (error) {
return error;
}
*len = maxlen;
- *rtblock = bno;
+ *rtx = start;
return 0;
}
/*
* If not, allocate what there is, if it's at least minlen.
*/
- maxlen = next - bno;
+ maxlen = next - start;
if (maxlen < minlen) {
/*
* Failed, return failure status.
*/
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
/*
@@ -395,81 +402,82 @@ xfs_rtallocate_extent_exact(
/*
* Now we can't do it, return failure status.
*/
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
}
/*
* Allocate what we can and return it.
*/
- error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+ error = xfs_rtallocate_range(args, start, maxlen);
if (error) {
return error;
}
*len = maxlen;
- *rtblock = bno;
+ *rtx = start;
return 0;
}
/*
* Allocate an extent of length minlen<=len<=maxlen, starting as near
- * to bno as possible. If we don't get maxlen then use prod to trim
+ * to start as possible. If we don't get maxlen then use prod to trim
* the length, if given. The lengths are all in rtextents.
*/
-STATIC int /* error */
+STATIC int
xfs_rtallocate_extent_near(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to allocate */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock) /* out: start block allocated */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxnum_t start, /* starting rtext number to allocate */
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
- int any; /* any useful extents from summary */
- xfs_rtblock_t bbno; /* bitmap block number */
- int error; /* error value */
- int i; /* bitmap block offset (loop control) */
- int j; /* secondary loop control */
- int log2len; /* log2 of minlen */
- xfs_rtblock_t n; /* next block to try */
- xfs_rtblock_t r; /* result block */
-
- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+ struct xfs_mount *mp = args->mp;
+ int maxlog; /* max useful extent from summary */
+ xfs_fileoff_t bbno; /* bitmap block number */
+ int error;
+ int i; /* bitmap block offset (loop control) */
+ int j; /* secondary loop control */
+ int log2len; /* log2 of minlen */
+ xfs_rtxnum_t n; /* next rtext to try */
+ xfs_rtxnum_t r; /* result rtext */
+
+ ASSERT(minlen % prod == 0);
+ ASSERT(maxlen % prod == 0);
+
/*
* If the block number given is off the end, silently set it to
* the last block.
*/
- if (bno >= mp->m_sb.sb_rextents)
- bno = mp->m_sb.sb_rextents - 1;
+ if (start >= mp->m_sb.sb_rextents)
+ start = mp->m_sb.sb_rextents - 1;
/* Make sure we don't run off the end of the rt volume. */
- maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
+ maxlen = xfs_rtallocate_clamp_len(mp, start, maxlen, prod);
if (maxlen < minlen) {
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
/*
* Try the exact allocation first.
*/
- error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len,
- rbpp, rsb, prod, &r);
+ error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
+ prod, &r);
if (error) {
return error;
}
/*
* If the exact allocation worked, return that.
*/
- if (r != NULLRTBLOCK) {
- *rtblock = r;
+ if (r != NULLRTEXTNO) {
+ *rtx = r;
return 0;
}
- bbno = XFS_BITTOBLOCK(mp, bno);
+ bbno = xfs_rtx_to_rbmblock(mp, start);
i = 0;
+ j = -1;
ASSERT(minlen != 0);
log2len = xfs_highbit32(minlen);
/*
@@ -480,8 +488,8 @@ xfs_rtallocate_extent_near(
* Get summary information of extents of all useful levels
* starting in this bitmap block.
*/
- error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1,
- bbno + i, rbpp, rsb, &any);
+ error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
+ bbno + i, &maxlog);
if (error) {
return error;
}
@@ -489,7 +497,10 @@ xfs_rtallocate_extent_near(
* If there are any useful extents starting here, try
* allocating one.
*/
- if (any) {
+ if (maxlog >= 0) {
+ xfs_extlen_t maxavail =
+ min_t(xfs_rtblock_t, maxlen,
+ (1ULL << (maxlog + 1)) - 1);
/*
* On the positive side of the starting location.
*/
@@ -498,17 +509,17 @@ xfs_rtallocate_extent_near(
* Try to allocate an extent starting in
* this block.
*/
- error = xfs_rtallocate_extent_block(mp, tp,
- bbno + i, minlen, maxlen, len, &n, rbpp,
- rsb, prod, &r);
+ error = xfs_rtallocate_extent_block(args,
+ bbno + i, minlen, maxavail, len,
+ &n, prod, &r);
if (error) {
return error;
}
/*
* If it worked, return it.
*/
- if (r != NULLRTBLOCK) {
- *rtblock = r;
+ if (r != NULLRTEXTNO) {
+ *rtx = r;
return 0;
}
}
@@ -516,68 +527,46 @@ xfs_rtallocate_extent_near(
* On the negative side of the starting location.
*/
else { /* i < 0 */
+ int maxblocks;
+
/*
- * Loop backwards through the bitmap blocks from
- * the starting point-1 up to where we are now.
- * There should be an extent which ends in this
- * bitmap block and is long enough.
+ * Loop backwards to find the end of the extent
+ * we found in the realtime summary.
+ *
+ * maxblocks is the maximum possible number of
+ * bitmap blocks from the start of the extent
+ * to the end of the extent.
*/
- for (j = -1; j > i; j--) {
- /*
- * Grab the summary information for
- * this bitmap block.
- */
- error = xfs_rtany_summary(mp, tp,
- log2len, mp->m_rsumlevels - 1,
- bbno + j, rbpp, rsb, &any);
- if (error) {
- return error;
- }
- /*
- * If there's no extent given in the
- * summary that means the extent we
- * found must carry over from an
- * earlier block. If there is an
- * extent given, we've already tried
- * that allocation, don't do it again.
- */
- if (any)
- continue;
- error = xfs_rtallocate_extent_block(mp,
- tp, bbno + j, minlen, maxlen,
- len, &n, rbpp, rsb, prod, &r);
+ if (maxlog == 0)
+ maxblocks = 0;
+ else if (maxlog < mp->m_blkbit_log)
+ maxblocks = 1;
+ else
+ maxblocks = 2 << (maxlog - mp->m_blkbit_log);
+
+ /*
+ * We need to check bbno + i + maxblocks down to
+ * bbno + i. We already checked bbno down to
+ * bbno + j + 1, so we don't need to check those
+ * again.
+ */
+ j = min(i + maxblocks, j);
+ for (; j >= i; j--) {
+ error = xfs_rtallocate_extent_block(args,
+ bbno + j, minlen,
+ maxavail, len, &n, prod,
+ &r);
if (error) {
return error;
}
/*
* If it works, return the extent.
*/
- if (r != NULLRTBLOCK) {
- *rtblock = r;
+ if (r != NULLRTEXTNO) {
+ *rtx = r;
return 0;
}
}
- /*
- * There weren't intervening bitmap blocks
- * with a long enough extent, or the
- * allocation didn't work for some reason
- * (i.e. it's a little * too short).
- * Try to allocate from the summary block
- * that we found.
- */
- error = xfs_rtallocate_extent_block(mp, tp,
- bbno + i, minlen, maxlen, len, &n, rbpp,
- rsb, prod, &r);
- if (error) {
- return error;
- }
- /*
- * If it works, return the extent.
- */
- if (r != NULLRTBLOCK) {
- *rtblock = r;
- return 0;
- }
}
}
/*
@@ -610,7 +599,7 @@ xfs_rtallocate_extent_near(
else
break;
}
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
@@ -619,26 +608,25 @@ xfs_rtallocate_extent_near(
* specified. If we don't get maxlen then use prod to trim
* the length, if given. The lengths are all in rtextents.
*/
-STATIC int /* error */
+STATIC int
xfs_rtallocate_extent_size(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
- struct xfs_buf **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock) /* out: start block allocated */
+ struct xfs_rtalloc_args *args,
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
- int error; /* error value */
- int i; /* bitmap block number */
- int l; /* level number (loop control) */
- xfs_rtblock_t n; /* next block to be tried */
- xfs_rtblock_t r; /* result block number */
- xfs_suminfo_t sum; /* summary information for extents */
-
- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+ struct xfs_mount *mp = args->mp;
+ int error;
+ xfs_fileoff_t i; /* bitmap block number */
+ int l; /* level number (loop control) */
+ xfs_rtxnum_t n; /* next rtext to be tried */
+ xfs_rtxnum_t r; /* result rtext number */
+ xfs_suminfo_t sum; /* summary information for extents */
+
+ ASSERT(minlen % prod == 0);
+ ASSERT(maxlen % prod == 0);
ASSERT(maxlen != 0);
/*
@@ -656,8 +644,7 @@ xfs_rtallocate_extent_size(
/*
* Get the summary for this level/block.
*/
- error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
- &sum);
+ error = xfs_rtget_summary(args, l, i, &sum);
if (error) {
return error;
}
@@ -669,16 +656,16 @@ xfs_rtallocate_extent_size(
/*
* Try allocating the extent.
*/
- error = xfs_rtallocate_extent_block(mp, tp, i, maxlen,
- maxlen, len, &n, rbpp, rsb, prod, &r);
+ error = xfs_rtallocate_extent_block(args, i, maxlen,
+ maxlen, len, &n, prod, &r);
if (error) {
return error;
}
/*
* If it worked, return that.
*/
- if (r != NULLRTBLOCK) {
- *rtblock = r;
+ if (r != NULLRTEXTNO) {
+ *rtx = r;
return 0;
}
/*
@@ -686,8 +673,8 @@ xfs_rtallocate_extent_size(
* allocator is beyond the next bitmap block,
* skip to that bitmap block.
*/
- if (XFS_BITTOBLOCK(mp, n) > i + 1)
- i = XFS_BITTOBLOCK(mp, n) - 1;
+ if (xfs_rtx_to_rbmblock(mp, n) > i + 1)
+ i = xfs_rtx_to_rbmblock(mp, n) - 1;
}
}
/*
@@ -695,7 +682,7 @@ xfs_rtallocate_extent_size(
* we're asking for a fixed size extent.
*/
if (minlen > --maxlen) {
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
ASSERT(minlen != 0);
@@ -715,8 +702,7 @@ xfs_rtallocate_extent_size(
/*
* Get the summary information for this level/block.
*/
- error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
- &sum);
+ error = xfs_rtget_summary(args, l, i, &sum);
if (error) {
return error;
}
@@ -730,18 +716,18 @@ xfs_rtallocate_extent_size(
* minlen/maxlen are in the possible range for
* this summary level.
*/
- error = xfs_rtallocate_extent_block(mp, tp, i,
+ error = xfs_rtallocate_extent_block(args, i,
XFS_RTMAX(minlen, 1 << l),
XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
- len, &n, rbpp, rsb, prod, &r);
+ len, &n, prod, &r);
if (error) {
return error;
}
/*
* If it worked, return that extent.
*/
- if (r != NULLRTBLOCK) {
- *rtblock = r;
+ if (r != NULLRTEXTNO) {
+ *rtx = r;
return 0;
}
/*
@@ -749,14 +735,14 @@ xfs_rtallocate_extent_size(
* allocator is beyond the next bitmap block,
* skip to that bitmap block.
*/
- if (XFS_BITTOBLOCK(mp, n) > i + 1)
- i = XFS_BITTOBLOCK(mp, n) - 1;
+ if (xfs_rtx_to_rbmblock(mp, n) > i + 1)
+ i = xfs_rtx_to_rbmblock(mp, n) - 1;
}
}
/*
* Got nothing, return failure.
*/
- *rtblock = NULLRTBLOCK;
+ *rtx = NULLRTEXTNO;
return 0;
}
@@ -886,12 +872,14 @@ xfs_alloc_rsum_cache(
xfs_extlen_t rbmblocks) /* number of rt bitmap blocks */
{
/*
- * The rsum cache is initialized to all zeroes, which is trivially a
- * lower bound on the minimum level with any free extents. We can
- * continue without the cache if it couldn't be allocated.
+ * The rsum cache is initialized to the maximum value, which is
+ * trivially an upper bound on the maximum level with any free extents.
+ * We can continue without the cache if it couldn't be allocated.
*/
- mp->m_rsum_cache = kvzalloc(rbmblocks, GFP_KERNEL);
- if (!mp->m_rsum_cache)
+ mp->m_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
+ if (mp->m_rsum_cache)
+ memset(mp->m_rsum_cache, -1, rbmblocks);
+ else
xfs_warn(mp, "could not allocate realtime summary cache");
}
@@ -907,13 +895,13 @@ xfs_growfs_rt(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_growfs_rt_t *in) /* growfs rt input struct */
{
- xfs_rtblock_t bmbno; /* bitmap block number */
+ xfs_fileoff_t bmbno; /* bitmap block number */
struct xfs_buf *bp; /* temporary buffer */
int error; /* error return value */
xfs_mount_t *nmp; /* new (fake) mount structure */
xfs_rfsblock_t nrblocks; /* new number of realtime blocks */
xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */
- xfs_rtblock_t nrextents; /* new number of realtime extents */
+ xfs_rtxnum_t nrextents; /* new number of realtime extents */
uint8_t nrextslog; /* new log2 of sb_rextents */
xfs_extlen_t nrsumblocks; /* new number of summary blocks */
uint nrsumlevels; /* new rt summary levels */
@@ -922,7 +910,6 @@ xfs_growfs_rt(
xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */
xfs_extlen_t rsumblocks; /* current number of rt summary blks */
xfs_sb_t *sbp; /* old superblock */
- xfs_fsblock_t sumbno; /* summary block number */
uint8_t *rsum_cache; /* old summary cache */
sbp = &mp->m_sb;
@@ -954,7 +941,7 @@ xfs_growfs_rt(
return -EINVAL;
/* Unsupported realtime features. */
- if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
+ if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp))
return -EOPNOTSUPP;
nrblocks = in->newblocks;
@@ -976,11 +963,10 @@ xfs_growfs_rt(
*/
nrextents = nrblocks;
do_div(nrextents, in->extsize);
- nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize);
+ nrbmblocks = xfs_rtbitmap_blockcount(mp, nrextents);
nrextslog = xfs_highbit32(nrextents);
nrsumlevels = nrextslog + 1;
- nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks;
- nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+ nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels, nrbmblocks);
nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
/*
* New summary size can't be more than half the size of
@@ -1023,6 +1009,12 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks;
bmbno++) {
+ struct xfs_rtalloc_args args = {
+ .mp = mp,
+ };
+ struct xfs_rtalloc_args nargs = {
+ .mp = nmp,
+ };
struct xfs_trans *tp;
xfs_rfsblock_t nrblocks_step;
@@ -1032,19 +1024,17 @@ xfs_growfs_rt(
* Calculate new sb and mount fields for this round.
*/
nsbp->sb_rextsize = in->extsize;
+ nmp->m_rtxblklog = -1; /* don't use shift or masking */
nsbp->sb_rbmblocks = bmbno + 1;
nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
nsbp->sb_rextsize;
nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
- nsbp->sb_rextents = nsbp->sb_rblocks;
- do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+ nsbp->sb_rextents = xfs_rtb_to_rtx(nmp, nsbp->sb_rblocks);
ASSERT(nsbp->sb_rextents != 0);
nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
- nrsumsize =
- (uint)sizeof(xfs_suminfo_t) * nrsumlevels *
- nsbp->sb_rbmblocks;
- nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+ nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels,
+ nsbp->sb_rbmblocks);
nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
/*
* Start a transaction, get the log reservation.
@@ -1053,6 +1043,9 @@ xfs_growfs_rt(
&tp);
if (error)
break;
+ args.tp = tp;
+ nargs.tp = tp;
+
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
@@ -1086,7 +1079,7 @@ xfs_growfs_rt(
*/
if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks ||
mp->m_rsumlevels != nmp->m_rsumlevels) {
- error = xfs_rtcopy_summary(mp, nmp, tp);
+ error = xfs_rtcopy_summary(&args, &nargs);
if (error)
goto error_cancel;
}
@@ -1111,9 +1104,9 @@ xfs_growfs_rt(
/*
* Free new extent.
*/
- bp = NULL;
- error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
- nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
+ error = xfs_rtfree_range(&nargs, sbp->sb_rextents,
+ nsbp->sb_rextents - sbp->sb_rextents);
+ xfs_rtbuf_cache_relse(&nargs);
if (error) {
error_cancel:
xfs_trans_cancel(tp);
@@ -1171,59 +1164,60 @@ out_free:
* parameters. The length units are all in realtime extents, as is the
* result block number.
*/
-int /* error */
+int
xfs_rtallocate_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to allocate */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
- int wasdel, /* was a delayed allocation extent */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock) /* out: start block allocated */
+ struct xfs_trans *tp,
+ xfs_rtxnum_t start, /* starting rtext number to allocate */
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
+ int wasdel, /* was a delayed allocation extent */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtblock) /* out: start rtext allocated */
{
- xfs_mount_t *mp = tp->t_mountp;
- int error; /* error value */
- xfs_rtblock_t r; /* result allocated block */
- xfs_fsblock_t sb; /* summary file block number */
- struct xfs_buf *sumbp; /* summary file block buffer */
-
- ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+ struct xfs_rtalloc_args args = {
+ .mp = tp->t_mountp,
+ .tp = tp,
+ };
+ int error; /* error value */
+ xfs_rtxnum_t r; /* result allocated rtext */
+
+ ASSERT(xfs_isilocked(args.mp->m_rbmip, XFS_ILOCK_EXCL));
ASSERT(minlen > 0 && minlen <= maxlen);
/*
* If prod is set then figure out what to do to minlen and maxlen.
*/
if (prod > 1) {
- xfs_extlen_t i;
+ xfs_rtxlen_t i;
if ((i = maxlen % prod))
maxlen -= i;
if ((i = minlen % prod))
minlen += prod - i;
if (maxlen < minlen) {
- *rtblock = NULLRTBLOCK;
+ *rtblock = NULLRTEXTNO;
return 0;
}
}
retry:
- sumbp = NULL;
- if (bno == 0) {
- error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len,
- &sumbp, &sb, prod, &r);
+ if (start == 0) {
+ error = xfs_rtallocate_extent_size(&args, minlen,
+ maxlen, len, prod, &r);
} else {
- error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen,
- len, &sumbp, &sb, prod, &r);
+ error = xfs_rtallocate_extent_near(&args, start, minlen,
+ maxlen, len, prod, &r);
}
+ xfs_rtbuf_cache_relse(&args);
if (error)
return error;
/*
* If it worked, update the superblock.
*/
- if (r != NULLRTBLOCK) {
+ if (r != NULLRTEXTNO) {
long slen = (long)*len;
ASSERT(*len >= minlen && *len <= maxlen);
@@ -1250,6 +1244,7 @@ xfs_rtmount_init(
struct xfs_buf *bp; /* buffer for last block of subvolume */
struct xfs_sb *sbp; /* filesystem superblock copy in mount */
xfs_daddr_t d; /* address of last block of subvolume */
+ unsigned int rsumblocks;
int error;
sbp = &mp->m_sb;
@@ -1261,10 +1256,9 @@ xfs_rtmount_init(
return -ENODEV;
}
mp->m_rsumlevels = sbp->sb_rextslog + 1;
- mp->m_rsumsize =
- (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels *
- sbp->sb_rbmblocks;
- mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize);
+ rsumblocks = xfs_rtsummary_blockcount(mp, mp->m_rsumlevels,
+ mp->m_sb.sb_rbmblocks);
+ mp->m_rsumsize = XFS_FSB_TO_B(mp, rsumblocks);
mp->m_rbmip = mp->m_rsumip = NULL;
/*
* Check that the realtime section is an ok size.
@@ -1418,27 +1412,27 @@ xfs_rtunmount_inodes(
* of rtextents and the fraction.
* The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
*/
-int /* error */
+int /* error */
xfs_rtpick_extent(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
- xfs_extlen_t len, /* allocation length (rtextents) */
- xfs_rtblock_t *pick) /* result rt extent */
- {
- xfs_rtblock_t b; /* result block */
+ xfs_rtxlen_t len, /* allocation length (rtextents) */
+ xfs_rtxnum_t *pick) /* result rt extent */
+{
+ xfs_rtxnum_t b; /* result rtext */
int log2; /* log of sequence number */
uint64_t resid; /* residual after log removed */
uint64_t seq; /* sequence number of file creation */
- struct timespec64 ts; /* temporary timespec64 storage */
+ struct timespec64 ts; /* timespec in inode */
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+ ts = inode_get_atime(VFS_I(mp->m_rbmip));
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
seq = 0;
} else {
- ts = inode_get_atime(VFS_I(mp->m_rbmip));
- seq = (uint64_t)ts.tv_sec;
+ seq = ts.tv_sec;
}
if ((log2 = xfs_highbit64(seq)) == -1)
b = 0;
@@ -1451,7 +1445,7 @@ xfs_rtpick_extent(
if (b + len > mp->m_sb.sb_rextents)
b = mp->m_sb.sb_rextents - len;
}
- ts.tv_sec = (time64_t)seq + 1;
+ ts.tv_sec = seq + 1;
inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
*pick = b;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 62c7ad79cbb6..f7cb9ffe51ca 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -11,22 +11,6 @@
struct xfs_mount;
struct xfs_trans;
-/*
- * XXX: Most of the realtime allocation functions deal in units of realtime
- * extents, not realtime blocks. This looks funny when paired with the type
- * name and screams for a larger cleanup.
- */
-struct xfs_rtalloc_rec {
- xfs_rtblock_t ar_startext;
- xfs_rtblock_t ar_extcount;
-};
-
-typedef int (*xfs_rtalloc_query_range_fn)(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- const struct xfs_rtalloc_rec *rec,
- void *priv);
-
#ifdef CONFIG_XFS_RT
/*
* Function prototypes for exported functions.
@@ -40,23 +24,14 @@ typedef int (*xfs_rtalloc_query_range_fn)(
int /* error */
xfs_rtallocate_extent(
struct xfs_trans *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to allocate */
- xfs_extlen_t minlen, /* minimum length to allocate */
- xfs_extlen_t maxlen, /* maximum length to allocate */
- xfs_extlen_t *len, /* out: actual length allocated */
+ xfs_rtxnum_t start, /* starting rtext number to allocate */
+ xfs_rtxlen_t minlen, /* minimum length to allocate */
+ xfs_rtxlen_t maxlen, /* maximum length to allocate */
+ xfs_rtxlen_t *len, /* out: actual length allocated */
int wasdel, /* was a delayed allocation extent */
- xfs_extlen_t prod, /* extent product factor */
- xfs_rtblock_t *rtblock); /* out: start block allocated */
+ xfs_rtxlen_t prod, /* extent product factor */
+ xfs_rtxnum_t *rtblock); /* out: start rtext allocated */
-/*
- * Free an extent in the realtime subvolume. Length is expressed in
- * realtime extents, as is the block number.
- */
-int /* error */
-xfs_rtfree_extent(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to free */
- xfs_extlen_t len); /* length of extent freed */
/*
* Initialize realtime fields in the mount structure.
@@ -87,8 +62,8 @@ int /* error */
xfs_rtpick_extent(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
- xfs_extlen_t len, /* allocation length (rtextents) */
- xfs_rtblock_t *pick); /* result rt extent */
+ xfs_rtxlen_t len, /* allocation length (rtextents) */
+ xfs_rtxnum_t *pick); /* result rt extent */
/*
* Grow the realtime area of the filesystem.
@@ -98,55 +73,12 @@ xfs_growfs_rt(
struct xfs_mount *mp, /* file system mount structure */
xfs_growfs_rt_t *in); /* user supplied growfs struct */
-/*
- * From xfs_rtbitmap.c
- */
-int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
-int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_extlen_t len, int val,
- xfs_rtblock_t *new, int *stat);
-int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_rtblock_t limit,
- xfs_rtblock_t *rtblock);
-int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_rtblock_t limit,
- xfs_rtblock_t *rtblock);
-int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_extlen_t len, int val);
-int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
- int log, xfs_rtblock_t bbno, int delta,
- struct xfs_buf **rbpp, xfs_fsblock_t *rsb,
- xfs_suminfo_t *sum);
-int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
- xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp,
- xfs_fsblock_t *rsb);
-int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_extlen_t len,
- struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
-int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
- const struct xfs_rtalloc_rec *low_rec,
- const struct xfs_rtalloc_rec *high_rec,
- xfs_rtalloc_query_range_fn fn, void *priv);
-int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtalloc_query_range_fn fn,
- void *priv);
-bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
-int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t start, xfs_extlen_t len,
- bool *is_free);
int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp);
#else
-# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
-# define xfs_rtfree_extent(t,b,l) (ENOSYS)
-# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
-# define xfs_growfs_rt(mp,in) (ENOSYS)
-# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
-# define xfs_rtalloc_query_all(m,t,f,p) (ENOSYS)
-# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
-# define xfs_verify_rtbno(m, r) (false)
-# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS)
-# define xfs_rtalloc_reinit_frextents(m) (0)
+# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (-ENOSYS)
+# define xfs_rtpick_extent(m,t,l,rb) (-ENOSYS)
+# define xfs_growfs_rt(mp,in) (-ENOSYS)
+# define xfs_rtalloc_reinit_frextents(m) (0)
static inline int /* error */
xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */
@@ -157,7 +89,7 @@ xfs_rtmount_init(
xfs_warn(mp, "Not built with CONFIG_XFS_RT");
return -ENOSYS;
}
-# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS))
# define xfs_rtunmount_inodes(m)
#endif /* CONFIG_XFS_RT */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f0ae07828153..764304595e8b 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -42,6 +42,7 @@
#include "xfs_xattr.h"
#include "xfs_iunlink_item.h"
#include "xfs_dahash_test.h"
+#include "xfs_rtbitmap.h"
#include "scrub/stats.h"
#include <linux/magic.h>
@@ -896,7 +897,7 @@ xfs_fs_statfs(
statp->f_blocks = sbp->sb_rblocks;
freertx = percpu_counter_sum_positive(&mp->m_frextents);
- statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
+ statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
}
return 0;
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 8c0bfc9a33b1..305c9d07bf1b 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -24,6 +24,7 @@
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_icache.h"
+#include "xfs_rtbitmap.h"
struct kmem_cache *xfs_trans_cache;
@@ -655,6 +656,10 @@ xfs_trans_unreserve_and_mod_sb(
mp->m_sb.sb_agcount += tp->t_agcount_delta;
mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+ if (tp->t_rextsize_delta) {
+ mp->m_rtxblklog = log2_if_power2(mp->m_sb.sb_rextsize);
+ mp->m_rtxblkmask = mask64_if_power2(mp->m_sb.sb_rextsize);
+ }
mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
mp->m_sb.sb_rextents += tp->t_rextents_delta;
@@ -1196,7 +1201,7 @@ xfs_trans_alloc_inode(
retry:
error = xfs_trans_alloc(mp, resv, dblocks,
- rblocks / mp->m_sb.sb_rextsize,
+ xfs_extlen_to_rtxlen(mp, rblocks),
force ? XFS_TRANS_RESERVE : 0, &tp);
if (error)
return error;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67d8dd2f1bde..bae0fe4d499b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -356,7 +356,6 @@
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
- MEM_KEEP(exit.data*) \
*(.data.unlikely) \
__start_once = .; \
*(.data.once) \
@@ -521,7 +520,6 @@
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
MEM_KEEP(init.rodata) \
- MEM_KEEP(exit.rodata) \
} \
\
/* Built-in module parameters. */ \
@@ -574,7 +572,6 @@
*(.ref.text) \
*(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
- MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
@@ -714,13 +711,10 @@
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
*(.dtors .dtors.*) \
- MEM_DISCARD(exit.data*) \
- MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
*(.text.exit) \
- MEM_DISCARD(exit.text)
#define EXIT_CALL \
*(.exitcall.exit)
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 90b69270f2fa..724c45e3e9a7 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -68,4 +68,9 @@ enum amd_asic_type {
extern const char *amdgpu_asic_name[];
+struct amdgpu_asic_type_quirk {
+ unsigned short device; /* PCI device ID */
+ u8 revision; /* revision ID */
+ unsigned short type; /* real ASIC type */
+};
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
index a702b6ec17f7..7d96985f2d05 100644
--- a/include/drm/i915_pxp_tee_interface.h
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -22,8 +22,10 @@ struct i915_pxp_component_ops {
*/
struct module *owner;
- int (*send)(struct device *dev, const void *message, size_t size);
- int (*recv)(struct device *dev, void *buffer, size_t size);
+ int (*send)(struct device *dev, const void *message, size_t size,
+ unsigned long timeout_ms);
+ int (*recv)(struct device *dev, void *buffer, size_t size,
+ unsigned long timeout_ms);
ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id,
struct scatterlist *sg_in, size_t total_in_len,
struct scatterlist *sg_out);
diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
new file mode 100644
index 000000000000..4e16d31a71c9
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+
+#define GPIOC_0 13
+#define GPIOC_1 14
+#define GPIOC_2 15
+#define GPIOC_3 16
+#define GPIOC_4 17
+#define GPIOC_5 18
+#define GPIOC_6 19
+
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOX_17 37
+#define GPIOX_18 38
+#define GPIOX_19 39
+
+#define GPIOW_0 40
+#define GPIOW_1 41
+#define GPIOW_2 42
+#define GPIOW_3 43
+#define GPIOW_4 44
+#define GPIOW_5 45
+#define GPIOW_6 46
+#define GPIOW_7 47
+#define GPIOW_8 48
+#define GPIOW_9 49
+#define GPIOW_10 50
+#define GPIOW_11 51
+#define GPIOW_12 52
+#define GPIOW_13 53
+#define GPIOW_14 54
+#define GPIOW_15 55
+#define GPIOW_16 56
+
+#define GPIOD_0 57
+#define GPIOD_1 58
+#define GPIOD_2 59
+#define GPIOD_3 60
+#define GPIOD_4 61
+#define GPIOD_5 62
+#define GPIOD_6 63
+#define GPIOD_7 64
+#define GPIOD_8 65
+#define GPIOD_9 66
+#define GPIOD_10 67
+#define GPIOD_11 68
+#define GPIOD_12 69
+
+#define GPIOE_0 70
+#define GPIOE_1 71
+#define GPIOE_2 72
+#define GPIOE_3 73
+#define GPIOE_4 74
+#define GPIOE_5 75
+#define GPIOE_6 76
+
+#define GPIOZ_0 77
+#define GPIOZ_1 78
+#define GPIOZ_2 79
+#define GPIOZ_3 80
+#define GPIOZ_4 81
+#define GPIOZ_5 82
+#define GPIOZ_6 83
+#define GPIOZ_7 84
+#define GPIOZ_8 85
+#define GPIOZ_9 86
+#define GPIOZ_10 87
+#define GPIOZ_11 88
+#define GPIOZ_12 89
+#define GPIOZ_13 90
+
+#define GPIOT_0 91
+#define GPIOT_1 92
+#define GPIOT_2 93
+#define GPIOT_3 94
+#define GPIOT_4 95
+#define GPIOT_5 96
+#define GPIOT_6 97
+#define GPIOT_7 98
+#define GPIOT_8 99
+#define GPIOT_9 100
+#define GPIOT_10 101
+#define GPIOT_11 102
+#define GPIOT_12 103
+#define GPIOT_13 104
+#define GPIOT_14 105
+#define GPIOT_15 106
+#define GPIOT_16 107
+#define GPIOT_17 108
+#define GPIOT_18 109
+#define GPIOT_19 110
+#define GPIOT_20 111
+#define GPIOT_21 112
+#define GPIOT_22 113
+#define GPIOT_23 114
+
+#define GPIOM_0 115
+#define GPIOM_1 116
+#define GPIOM_2 117
+#define GPIOM_3 118
+#define GPIOM_4 119
+#define GPIOM_5 120
+#define GPIOM_6 121
+#define GPIOM_7 122
+#define GPIOM_8 123
+#define GPIOM_9 124
+#define GPIOM_10 125
+#define GPIOM_11 126
+#define GPIOM_12 127
+#define GPIOM_13 128
+
+#define GPIOY_0 129
+#define GPIOY_1 130
+#define GPIOY_2 131
+#define GPIOY_3 132
+#define GPIOY_4 133
+#define GPIOY_5 134
+#define GPIOY_6 135
+#define GPIOY_7 136
+#define GPIOY_8 137
+#define GPIOY_9 138
+#define GPIOY_10 139
+#define GPIOY_11 140
+#define GPIOY_12 141
+#define GPIOY_13 142
+#define GPIOY_14 143
+#define GPIOY_15 144
+#define GPIOY_16 145
+#define GPIOY_17 146
+#define GPIOY_18 147
+
+#define GPIOH_0 148
+#define GPIOH_1 149
+#define GPIOH_2 150
+#define GPIOH_3 151
+#define GPIOH_4 152
+#define GPIOH_5 153
+#define GPIOH_6 154
+#define GPIOH_7 155
+
+#define GPIO_TEST_N 156
+
+#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */
diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h
new file mode 100644
index 000000000000..e903f5f3dd8f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx75.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+
+#define MASTER_QPIC_CORE 0
+#define MASTER_QUP_CORE_0 1
+#define SLAVE_QPIC_CORE 2
+#define SLAVE_QUP_CORE_0 3
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LAGG_CFG 1
+#define SLAVE_MCCC_MASTER 2
+#define SLAVE_GEM_NOC_CFG 3
+#define SLAVE_SNOOP_BWMON 4
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_MSS_PROC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_SF_MEM_NOC 5
+#define MASTER_GIC 6
+#define MASTER_IPA_PCIE 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_MEM_NOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_GEM_NOC 11
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+
+#define MASTER_AUDIO 0
+#define MASTER_GIC_AHB 1
+#define MASTER_PCIE_RSCC 2
+#define MASTER_QDSS_BAM 3
+#define MASTER_QPIC 4
+#define MASTER_QUP_0 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_GEM_NOC_CNOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_CFG 9
+#define MASTER_PCIE_ANOC_CFG 10
+#define MASTER_CRYPTO 11
+#define MASTER_IPA 12
+#define MASTER_MVMSS 13
+#define MASTER_EMAC_0 14
+#define MASTER_EMAC_1 15
+#define MASTER_QDSS_ETR 16
+#define MASTER_QDSS_ETR_1 17
+#define MASTER_SDCC_1 18
+#define MASTER_SDCC_4 19
+#define MASTER_USB3_0 20
+#define SLAVE_ETH0_CFG 21
+#define SLAVE_ETH1_CFG 22
+#define SLAVE_AUDIO 23
+#define SLAVE_CLK_CTL 24
+#define SLAVE_CRYPTO_0_CFG 25
+#define SLAVE_IMEM_CFG 26
+#define SLAVE_IPA_CFG 27
+#define SLAVE_IPC_ROUTER_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_ICBDI_MVMSS_CFG 30
+#define SLAVE_PCIE_0_CFG 31
+#define SLAVE_PCIE_1_CFG 32
+#define SLAVE_PCIE_2_CFG 33
+#define SLAVE_PCIE_RSC_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PRNG 36
+#define SLAVE_QDSS_CFG 37
+#define SLAVE_QPIC 38
+#define SLAVE_QUP_0 39
+#define SLAVE_SDCC_1 40
+#define SLAVE_SDCC_4 41
+#define SLAVE_SPMI_VGI_COEX 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM 44
+#define SLAVE_USB3 45
+#define SLAVE_USB3_PHY_CFG 46
+#define SLAVE_A1NOC_CFG 47
+#define SLAVE_DDRSS_CFG 48
+#define SLAVE_SNOC_GEM_NOC_SF 49
+#define SLAVE_SNOC_CFG 50
+#define SLAVE_PCIE_ANOC_CFG 51
+#define SLAVE_IMEM 52
+#define SLAVE_SERVICE_PCIE_ANOC 53
+#define SLAVE_SERVICE_SNOC 54
+#define SLAVE_PCIE_0 55
+#define SLAVE_PCIE_1 56
+#define SLAVE_PCIE_2 57
+#define SLAVE_QDSS_STM 58
+#define SLAVE_TCU 59
+
+#endif
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
new file mode 100644
index 000000000000..7ae6d84b2bd9
--- /dev/null
+++ b/include/dt-bindings/watchdog/aspeed-wdt.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef DT_BINDINGS_ASPEED_WDT_H
+#define DT_BINDINGS_ASPEED_WDT_H
+
+#define AST2500_WDT_RESET_CPU (1 << 0)
+#define AST2500_WDT_RESET_COPROC (1 << 1)
+#define AST2500_WDT_RESET_SDRAM (1 << 2)
+#define AST2500_WDT_RESET_AHB (1 << 3)
+#define AST2500_WDT_RESET_I2C (1 << 4)
+#define AST2500_WDT_RESET_MAC0 (1 << 5)
+#define AST2500_WDT_RESET_MAC1 (1 << 6)
+#define AST2500_WDT_RESET_GRAPHICS (1 << 7)
+#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8)
+#define AST2500_WDT_RESET_USB_HOST (1 << 9)
+#define AST2500_WDT_RESET_HID_EHCI (1 << 10)
+#define AST2500_WDT_RESET_VIDEO (1 << 11)
+#define AST2500_WDT_RESET_HAC (1 << 12)
+#define AST2500_WDT_RESET_LPC (1 << 13)
+#define AST2500_WDT_RESET_SDIO (1 << 14)
+#define AST2500_WDT_RESET_MIC (1 << 15)
+#define AST2500_WDT_RESET_CRT (1 << 16)
+#define AST2500_WDT_RESET_PWM (1 << 17)
+#define AST2500_WDT_RESET_PECI (1 << 18)
+#define AST2500_WDT_RESET_JTAG (1 << 19)
+#define AST2500_WDT_RESET_ADC (1 << 20)
+#define AST2500_WDT_RESET_GPIO (1 << 21)
+#define AST2500_WDT_RESET_MCTP (1 << 22)
+#define AST2500_WDT_RESET_XDMA (1 << 23)
+#define AST2500_WDT_RESET_SPI (1 << 24)
+#define AST2500_WDT_RESET_SOC_MISC (1 << 25)
+
+#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
+
+#define AST2600_WDT_RESET1_CPU (1 << 0)
+#define AST2600_WDT_RESET1_SDRAM (1 << 1)
+#define AST2600_WDT_RESET1_AHB (1 << 2)
+#define AST2600_WDT_RESET1_SLI (1 << 3)
+#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4)
+#define AST2600_WDT_RESET1_COPROC (1 << 5)
+#define AST2600_WDT_RESET1_USB_A (1 << 6)
+#define AST2600_WDT_RESET1_USB_B (1 << 7)
+#define AST2600_WDT_RESET1_UHCI (1 << 8)
+#define AST2600_WDT_RESET1_GRAPHICS (1 << 9)
+#define AST2600_WDT_RESET1_CRT (1 << 10)
+#define AST2600_WDT_RESET1_VIDEO (1 << 11)
+#define AST2600_WDT_RESET1_HAC (1 << 12)
+#define AST2600_WDT_RESET1_DP (1 << 13)
+#define AST2600_WDT_RESET1_DP_MCU (1 << 14)
+#define AST2600_WDT_RESET1_GP_MCU (1 << 15)
+#define AST2600_WDT_RESET1_MAC0 (1 << 16)
+#define AST2600_WDT_RESET1_MAC1 (1 << 17)
+#define AST2600_WDT_RESET1_SDIO0 (1 << 18)
+#define AST2600_WDT_RESET1_JTAG0 (1 << 19)
+#define AST2600_WDT_RESET1_MCTP0 (1 << 20)
+#define AST2600_WDT_RESET1_MCTP1 (1 << 21)
+#define AST2600_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2600_WDT_RESET1_XDMA1 (1 << 23)
+#define AST2600_WDT_RESET1_GPIO0 (1 << 24)
+#define AST2600_WDT_RESET1_RVAS (1 << 25)
+
+#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
+
+#define AST2600_WDT_RESET2_CPU (1 << 0)
+#define AST2600_WDT_RESET2_SPI (1 << 1)
+#define AST2600_WDT_RESET2_AHB2 (1 << 2)
+#define AST2600_WDT_RESET2_SLI2 (1 << 3)
+#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4)
+#define AST2600_WDT_RESET2_MAC2 (1 << 5)
+#define AST2600_WDT_RESET2_MAC3 (1 << 6)
+#define AST2600_WDT_RESET2_SDIO1 (1 << 7)
+#define AST2600_WDT_RESET2_JTAG1 (1 << 8)
+#define AST2600_WDT_RESET2_GPIO1 (1 << 9)
+#define AST2600_WDT_RESET2_MDIO (1 << 10)
+#define AST2600_WDT_RESET2_LPC (1 << 11)
+#define AST2600_WDT_RESET2_PECI (1 << 12)
+#define AST2600_WDT_RESET2_PWM (1 << 13)
+#define AST2600_WDT_RESET2_ADC (1 << 14)
+#define AST2600_WDT_RESET2_FSI (1 << 15)
+#define AST2600_WDT_RESET2_I2C (1 << 16)
+#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17)
+#define AST2600_WDT_RESET2_I3C0 (1 << 18)
+#define AST2600_WDT_RESET2_I3C1 (1 << 19)
+#define AST2600_WDT_RESET2_I3C2 (1 << 20)
+#define AST2600_WDT_RESET2_I3C3 (1 << 21)
+#define AST2600_WDT_RESET2_I3C4 (1 << 22)
+#define AST2600_WDT_RESET2_I3C5 (1 << 23)
+#define AST2600_WDT_RESET2_ESPI (1 << 26)
+
+#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 24e014760328..54189e0e5f41 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/fw_table.h>
struct irq_domain;
struct irq_domain_ops;
@@ -24,6 +25,16 @@ struct irq_domain_ops;
#endif
#include <acpi/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
#ifdef CONFIG_ACPI
#include <linux/list.h>
@@ -119,21 +130,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-union acpi_subtable_headers {
- struct acpi_subtable_header common;
- struct acpi_hmat_structure hmat;
- struct acpi_prmt_module_header prmt;
- struct acpi_cedt_header cedt;
-};
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
- const unsigned long end);
-
-typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
- void *arg, const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -207,14 +205,6 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- acpi_tbl_entry_handler_arg handler_arg;
- void *arg;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -229,16 +219,6 @@ void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init_or_acpilib acpi_table_parse_entries(char *id,
unsigned long table_size, int entry_id,
@@ -1551,4 +1531,9 @@ static inline void acpi_device_notify(struct device *dev) { }
static inline void acpi_device_notify_remove(struct device *dev) { }
#endif
+static inline void acpi_use_parent_companion(struct device *dev)
+{
+ ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 29cc10220952..f6ea2f57d808 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -51,7 +51,7 @@ static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 99a5201d9e62..dc7ed2f46886 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -33,126 +33,6 @@ struct pci_dev;
extern int amd_iommu_detect(void);
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- * and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
- struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- * a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- * PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
- * the device is required to disable
- * PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- u32 pasid,
- unsigned long address,
- u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER (1 << 6)
-#define PPR_FAULT_RSVD (1 << 7)
-#define PPR_FAULT_GN (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- * PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- * the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
- on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
- super-user privileges */
-
-struct amd_iommu_device_info {
- int max_pasids;
- u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- * a pasid context. This call-back is
- * invoked when the IOMMUv2 driver needs to
- * invalidate a PASID context, for example
- * because the task that is bound to that
- * context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
-
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..17caeca94cab
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 03644237e1ef..99451431e4d6 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -6,10 +6,12 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/errno.h>
#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/bitmap-str.h>
struct device;
@@ -200,14 +202,6 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *dst, int nbits);
-int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
@@ -216,23 +210,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
-
-extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
-
-extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -519,6 +496,66 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
}
/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
+ */
+static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
+
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
*
diff --git a/include/linux/btf.h b/include/linux/btf.h
index c2231c64d60b..59d404e22814 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -84,6 +84,17 @@
*/
#define __bpf_kfunc __used noinline
+#define __bpf_kfunc_start_defs() \
+ __diag_push(); \
+ __diag_ignore_all("-Wmissing-declarations", \
+ "Global kfuncs as their definitions will be in BTF");\
+ __diag_ignore_all("-Wmissing-prototypes", \
+ "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 3b7a0ff4642f..8a582d242f06 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -2,10 +2,11 @@
#ifndef _LINUX_BUILDID_H
#define _LINUX_BUILDID_H
-#include <linux/mm_types.h>
+#include <linux/types.h>
#define BUILD_ID_SIZE_MAX 20
+struct vm_area_struct;
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index 8320ec3b9e37..94ad2c9017c9 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -30,6 +30,10 @@ struct cdx_device_config {
bool bus_master_enable;
};
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
@@ -37,6 +41,19 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
struct cdx_device_config *dev_config);
/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
* CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
* override_only flags.
* @vend: the 16 bit CDX Vendor ID
@@ -44,18 +61,24 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
* @driver_override: the 32 bit CDX Device override_only
*
* This macro is used to create a struct cdx_device_id that matches only a
- * driver_override device.
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
*/
#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
- .vendor = (vend), .device = (dev), .override_only = (driver_override)
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
/**
* struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
* @scan: scan the devices on the controller
* @dev_configure: configuration like reset, master_enable,
* msi_config etc for a CDX device
*/
struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
cdx_scan_cb scan;
cdx_dev_configure_cb dev_configure;
};
@@ -65,12 +88,14 @@ struct cdx_ops {
* @dev: Linux device associated with the CDX controller.
* @priv: private data
* @id: Controller ID
+ * @controller_registered: controller registered with bus
* @ops: CDX controller ops
*/
struct cdx_controller {
struct device *dev;
void *priv;
u32 id;
+ bool controller_registered;
struct cdx_ops *ops;
};
@@ -80,6 +105,10 @@ struct cdx_controller {
* @cdx: CDX controller associated with the device
* @vendor: Vendor ID for CDX device
* @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
* @bus_num: Bus number for this CDX device
* @dev_num: Device number for this device
* @res: array of MMIO region entries
@@ -88,6 +117,8 @@ struct cdx_controller {
* @dma_mask: Default DMA mask
* @flags: CDX device flags
* @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
@@ -97,6 +128,10 @@ struct cdx_device {
struct cdx_controller *cdx;
u16 vendor;
u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
u8 bus_num;
u8 dev_num;
struct resource res[MAX_CDX_DEV_RESOURCES];
@@ -104,6 +139,8 @@ struct cdx_device {
u64 dma_mask;
u16 flags;
u32 req_id;
+ bool is_bus;
+ bool enabled;
const char *driver_override;
};
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index d5a5da838caf..11a92a946016 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -19,12 +19,25 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ kbasename(__FILE__), __LINE__, \
+ &client->fsid, client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
if (0) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
+# define doutc(client, fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__); \
+ } while (0)
# endif
#else
@@ -33,7 +46,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
+ client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
+#define pr_notice_client(client, fmt, ...) \
+ pr_notice("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...) \
+ pr_info("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...) \
+ pr_warn("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...) \
+ pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...) \
+ pr_err("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...) \
+ pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...) \
+ pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f3b3593254b9..ee1d0e5f9789 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -357,6 +357,11 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
+ op == CEPH_MDS_OP_MKNOD || \
+ op == CEPH_MDS_OP_MKDIR || \
+ op == CEPH_MDS_OP_SYMLINK)
+
extern const char *ceph_mds_op_name(int op);
#define CEPH_SETATTR_MODE (1 << 0)
@@ -497,7 +502,7 @@ struct ceph_mds_request_head_legacy {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 2
+#define CEPH_MDS_REQUEST_HEAD_VERSION 3
struct ceph_mds_request_head_old {
__le16 version; /* struct version */
@@ -528,6 +533,9 @@ struct ceph_mds_request_head {
__le32 ext_num_retry; /* new count retry attempts */
__le32 ext_num_fwd; /* new count fwd attempts */
+
+ __le32 struct_len; /* to store size of struct ceph_mds_request_head */
+ __le32 owner_uid, owner_gid; /* used for OPs which create inodes */
} __attribute__ ((packed));
/* cap/lease release record */
diff --git a/include/linux/closure.h b/include/linux/closure.h
index 722a586bb224..de7bb47d8a46 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -154,6 +154,7 @@ struct closure {
struct closure *parent;
atomic_t remaining;
+ bool closure_get_happened;
#ifdef CONFIG_DEBUG_CLOSURES
#define CLOSURE_MAGIC_DEAD 0xc054dead
@@ -185,7 +186,11 @@ static inline unsigned closure_nr_remaining(struct closure *cl)
*/
static inline void closure_sync(struct closure *cl)
{
- if (closure_nr_remaining(cl) != 1)
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+ if (cl->closure_get_happened)
__closure_sync(cl);
}
@@ -233,8 +238,6 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
- /* between atomic_dec() in closure_put() */
- smp_mb__before_atomic();
}
static inline void closure_queue(struct closure *cl)
@@ -259,6 +262,8 @@ static inline void closure_queue(struct closure *cl)
*/
static inline void closure_get(struct closure *cl)
{
+ cl->closure_get_happened = true;
+
#ifdef CONFIG_DEBUG_CLOSURES
BUG_ON((atomic_inc_return(&cl->remaining) &
CLOSURE_REMAINING_MASK) <= 1);
@@ -281,6 +286,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
closure_get(parent);
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ cl->closure_get_happened = false;
closure_debug_create(cl);
closure_set_ip(cl);
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
index d8264417e53c..d527f04400df 100644
--- a/include/linux/comedi/comedi_8254.h
+++ b/include/linux/comedi/comedi_8254.h
@@ -12,6 +12,8 @@
#define _COMEDI_8254_H
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
struct comedi_device;
struct comedi_insn;
@@ -57,10 +59,24 @@ struct comedi_subdevice;
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
/**
* struct comedi_8254 - private data used by this module
- * @iobase: PIO base address of the registers (in/out)
- * @mmio: MMIO base address of the registers (read/write)
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
* @iosize: I/O size used to access the registers (b/w/l)
* @regshift: register gap shift
* @osc_base: cascaded oscillator speed in ns
@@ -76,8 +92,8 @@ struct comedi_subdevice;
* @insn_config: driver specific (*insn_config) callback
*/
struct comedi_8254 {
- unsigned long iobase;
- void __iomem *mmio;
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
unsigned int iosize;
unsigned int regshift;
unsigned int osc_base;
@@ -122,13 +138,24 @@ void comedi_8254_set_busy(struct comedi_8254 *i8254,
void comedi_8254_subdevice_init(struct comedi_subdevice *s,
struct comedi_8254 *i8254);
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
index b2a5bc6b3a49..d24a69da389b 100644
--- a/include/linux/comedi/comedi_8255.h
+++ b/include/linux/comedi/comedi_8255.h
@@ -10,6 +10,8 @@
#ifndef _COMEDI_8255_H
#define _COMEDI_8255_H
+#include <linux/errno.h>
+
#define I8255_SIZE 0x04
#define I8255_DATA_A_REG 0x00
@@ -27,16 +29,26 @@
struct comedi_device;
struct comedi_subdevice;
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
unsigned long regbase);
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
#endif
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index 0a1150900ef3..c08416a7364b 100644
--- a/include/linux/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -633,7 +633,7 @@ extern const struct comedi_lrange range_unknown;
*/
struct comedi_lrange {
int length;
- struct comedi_krange range[];
+ struct comedi_krange range[] __counted_by(length);
};
/**
diff --git a/include/linux/console.h b/include/linux/console.h
index 7de11c763eb3..779d388af8a0 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,6 +101,13 @@ extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
+
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
@@ -156,6 +163,8 @@ static inline int con_debug_leave(void)
* /dev/kmesg which requires a larger output buffer.
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
* printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -166,6 +175,111 @@ enum cons_flags {
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
};
/**
@@ -187,6 +301,11 @@ enum cons_flags {
* @dropped: Number of unreported dropped ringbuffer records
* @data: Driver private data
* @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
*/
struct console {
char name[16];
@@ -206,6 +325,13 @@ struct console {
unsigned long dropped;
void *data;
struct hlist_node node;
+
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
};
#ifdef CONFIG_LOCKDEP
@@ -332,6 +458,16 @@ static inline bool console_is_registered(const struct console *con)
lockdep_assert_console_list_lock_held(); \
hlist_for_each_entry(con, &console_list, node)
+#ifdef CONFIG_PRINTK
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+#else
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+#endif
+
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -340,7 +476,7 @@ enum con_flush_mode {
CONSOLE_REPLAY_ALL,
};
-extern int add_preferred_console(char *name, int idx, char *options);
+extern int add_preferred_console(const char *name, const short idx, char *options);
extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f10fb87d49db..cfb545841a2c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -4,7 +4,7 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
#include <linux/kernel.h>
@@ -97,7 +97,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
@@ -112,7 +112,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -155,7 +155,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
* cpumask_first - get the first cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
@@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
* cpumask_first_zero - get the first unset cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if all cpus are set.
+ * Return: >= nr_cpu_ids if all cpus are set.
*/
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
@@ -178,7 +178,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
* @srcp1: the first input
* @srcp2: the second input
*
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
static inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
@@ -190,7 +190,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
- * Returns >= nr_cpumask_bits if no CPUs set.
+ * Return: >= nr_cpumask_bits if no CPUs set.
*/
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
@@ -199,10 +199,10 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
/**
* cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set.
+ * Return: >= nr_cpu_ids if no further cpus set.
*/
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
@@ -215,10 +215,10 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: >= nr_cpu_ids if no further cpus unset.
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
@@ -254,11 +254,11 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set in both.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
@@ -373,7 +373,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
@@ -388,11 +388,11 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
- * cpumask_nth - get the first cpu in a cpumask
+ * cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
@@ -400,12 +400,12 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
}
/**
- * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
@@ -416,12 +416,12 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
}
/**
- * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -436,9 +436,9 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @srcp3: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -497,7 +497,7 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in @cpumask, else returns false
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
@@ -509,9 +509,9 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
@@ -523,9 +523,9 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
@@ -560,7 +560,7 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -603,7 +603,7 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -617,6 +617,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
static inline bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -630,6 +632,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
static inline bool cpumask_or_equal(const struct cpumask *src1p,
const struct cpumask *src2p,
@@ -643,6 +648,9 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -656,7 +664,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns true if *@src1p is a subset of *@src2p, else returns false
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -668,6 +676,8 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
static inline bool cpumask_empty(const struct cpumask *srcp)
{
@@ -677,6 +687,8 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
static inline bool cpumask_full(const struct cpumask *srcp)
{
@@ -686,6 +698,8 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
@@ -696,6 +710,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
* cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
*/
static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
const struct cpumask *srcp2)
@@ -744,7 +760,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* cpumask_any - pick a "random" cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
@@ -753,7 +769,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -769,7 +785,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -783,7 +799,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -797,7 +813,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
@@ -809,7 +825,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
@@ -817,7 +833,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
static inline unsigned int cpumask_size(void)
{
@@ -831,7 +849,7 @@ static inline unsigned int cpumask_size(void)
* little more difficult, we typedef cpumask_var_t to an array or a
* pointer: doing &mask on an array is a noop, so it still works.
*
- * ie.
+ * i.e.
* cpumask_var_t tmpmask;
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
@@ -887,6 +905,8 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
*/
static inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
@@ -1025,7 +1045,7 @@ set_cpu_dying(unsigned int cpu, bool dying)
}
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -1068,6 +1088,8 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* interface gives only a momentary snapshot and is not protected against
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
*/
static __always_inline unsigned int num_online_cpus(void)
{
@@ -1160,7 +1182,7 @@ static inline bool cpu_dying(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static inline ssize_t
@@ -1183,7 +1205,7 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
- * Returns the length of how many bytes have been copied, excluding
+ * Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
static inline ssize_t
@@ -1204,6 +1226,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
*
* Everything is same with the above cpumap_print_bitmask_to_buf()
* except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
*/
static inline ssize_t
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 0f3a656293b0..acc55626afdc 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -50,6 +50,7 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
@@ -64,6 +65,7 @@ static inline bool is_kdump_kernel(void)
{
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -75,7 +77,8 @@ static inline bool is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -84,8 +87,7 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
/**
diff --git a/include/linux/device.h b/include/linux/device.h
index 56d93a1ffb7b..d7a72a8749ea 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -389,8 +389,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(release, action, data) \
- __devm_add_action(release, action, data, #action)
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
void *data, const char *name)
@@ -403,8 +403,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(
return ret;
}
-#define devm_add_action_or_reset(release, action, data) \
- __devm_add_action_or_reset(release, action, data, #action)
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f0ccca16a0ac..4a658de44ee9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return false;
+}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-/**
- * dma_addressing_limited - return if the device is addressing limited
- * @dev: device to check
- *
- * Return %true if the devices DMA mask is too small to address all memory in
- * the system, else %false. Lack of addressing bits is the prime reason for
- * bounce buffering, but might not be the only one.
- */
-static inline bool dma_addressing_limited(struct device *dev)
-{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
- dma_get_required_mask(dev);
-}
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c3656e590213..3df70d6131c8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -517,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 226a36ed5aa1..689028257fcc 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
/**
* ethtool_sprintf - Write formatted string to ethtool string data
- * @data: Pointer to start of string to update
+ * @data: Pointer to a pointer to the start of string to update
* @fmt: Format of string to write
*
- * Write formatted string to data. Update data to point at start of
+ * Write formatted string to *data. Update *data to point at start of
* next string.
*/
extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 0388e8c20f52..bb37ad5cc954 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -99,6 +99,17 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 64 bit inode number, 32 bit generation number.
+ */
+ FILEID_INO64_GEN = 0x81,
+
+ /*
+ * 64 bit inode number, 32 bit generation number,
+ * 64 bit parent inode number, 32 bit parent generation.
+ */
+ FILEID_INO64_GEN_PARENT = 0x82,
+
+ /*
* 128 bit child FID (struct lu_fid)
* 128 bit parent FID (struct lu_fid)
*/
@@ -129,7 +140,11 @@ struct fid {
u32 parent_ino;
u32 parent_gen;
} i32;
- struct {
+ struct {
+ u64 ino;
+ u32 gen;
+ } __packed i64;
+ struct {
u32 block;
u16 partref;
u16 parent_partref;
@@ -253,6 +268,33 @@ extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
int *max_len, int flags);
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+ return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+ return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+ int fh_flags)
+{
+ /*
+ * If a non-decodeable file handle was requested, we only need to make
+ * sure that filesystem did not opt-out of encoding fid.
+ */
+ if (fh_flags & EXPORT_FH_FID)
+ return exportfs_can_encode_fid(nop);
+
+ /*
+ * If a decodeable file handle was requested, we need to make sure that
+ * filesystem can also decode file handles.
+ */
+ return exportfs_can_decode_fh(nop);
+}
+
static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
int *max_len)
{
@@ -272,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
/*
* Generic helpers for filesystems.
*/
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index a82a4bb6ce68..039fe0ce8d83 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -13,10 +13,10 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -104,6 +104,7 @@ enum f2fs_error {
ERROR_CORRUPTED_VERITY_XATTR,
ERROR_CORRUPTED_XATTR,
ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
ERROR_MAX,
};
@@ -210,14 +211,14 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[];
} __packed;
-#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -243,14 +244,31 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -342,14 +360,6 @@ enum {
#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode number */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
-
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
union {
@@ -363,7 +373,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -378,12 +388,13 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -418,7 +429,7 @@ struct f2fs_sit_block {
* For segment summary
*
* One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * exactly one segment by default. Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -429,12 +440,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
+#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -518,7 +529,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -559,11 +570,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -576,7 +590,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..ff8fa58d5818
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+#include <linux/acpi.h>
+#include <acpi/acpi.h>
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ struct acpi_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 5700451b300f..2a72f55d26eb 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -41,6 +41,8 @@ struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 1d454dc944b3..0aed62f0c633 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -605,9 +605,6 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
void *data, struct lock_class_key *lock_key,
struct lock_class_key *request_key);
-struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc, void *data));
-
struct gpio_device *gpio_device_find(void *data,
int (*match)(struct gpio_chip *gc, void *data));
struct gpio_device *gpio_device_find_by_label(const char *label);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 13b1e65fbdcc..6730ee900ee1 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,6 +21,10 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 90fa83464f00..84ed77c04940 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -96,7 +96,7 @@ enum i3c_dcr {
/**
* struct i3c_device_info - I3C device information
- * @pid: Provisional ID
+ * @pid: Provisioned ID
* @bcr: Bus Characteristic Register
* @dcr: Device Characteristic Register
* @static_addr: static/I2C address
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index db909ef79be4..24c1863b86e2 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -135,6 +135,7 @@ struct i3c_ibi_slot {
* rejected by the master
* @num_slots: number of IBI slots reserved for this device
* @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
* @handler: IBI handler specified at i3c_device_request_ibi() call time. This
* handler will be called from the controller workqueue, and as such
* is allowed to sleep (though it is recommended to process the IBI
@@ -157,6 +158,7 @@ struct i3c_device_ibi_info {
unsigned int max_payload_len;
unsigned int num_slots;
unsigned int enabled;
+ struct workqueue_struct *wq;
void (*handler)(struct i3c_device *dev,
const struct i3c_ibi_payload *payload);
};
@@ -172,7 +174,7 @@ struct i3c_device_ibi_info {
* assigned a dynamic address by the master. Will be used during
* bus initialization to assign it a specific dynamic address
* before starting DAA (Dynamic Address Assignment)
- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
* that may be used to attach boardinfo to i3c_dev_desc when the device
* does not have a static address
* @of_node: optional DT node in case the device has been described in the DT
@@ -475,7 +477,7 @@ struct i3c_master_controller_ops {
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
* @bus: I3C bus exposed by this master
- * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * @wq: workqueue which can be used by master
* drivers if they need to postpone operations that need to take place
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a0dce14090a9..da5f5fa4a3a6 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 202e55b0a28b..d0ce3b71106a 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -427,6 +427,8 @@ struct iio_trigger; /* forward declaration */
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
@@ -511,6 +513,12 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -556,7 +564,9 @@ struct iio_buffer_setup_ops {
* and owner
* @buffer: [DRIVER] any buffer present
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index eff1e6b2595c..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 47de2443e984..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
diff --git a/include/linux/init.h b/include/linux/init.h
index 266c3e1640d4..01b52c9c7526 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -89,9 +89,6 @@
__latent_entropy
#define __meminitdata __section(".meminit.data")
#define __meminitconst __section(".meminit.rodata")
-#define __memexit __section(".memexit.text") __exitused __cold notrace
-#define __memexitdata __section(".memexit.data")
-#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
diff --git a/include/linux/input.h b/include/linux/input.h
index 49790c1bd2c4..de6503c0edb8 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -562,7 +562,7 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
- struct file *effect_owners[];
+ struct file *effect_owners[] __counted_by(max_effects);
};
int input_ff_create(struct input_dev *dev, unsigned int max_effects);
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 3b8580bd33c1..2cf89a538b18 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -47,7 +47,7 @@ struct input_mt {
unsigned int flags;
unsigned int frame;
int *red;
- struct input_mt_slot slots[];
+ struct input_mt_slot slots[] __counted_by(num_slots);
};
static inline void input_mt_set_value(struct input_mt_slot *slot,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8fb1b41b4d15..ec289c1016f5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -66,6 +66,7 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM (1U << 5)
#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
on a stage-2 translation */
@@ -86,6 +87,8 @@ struct iommu_domain_geometry {
* invalidation.
* IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
* represented by mm_struct's.
+ * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
+ * dma_api stuff. Do not use in new drivers.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
@@ -96,6 +99,7 @@ struct iommu_domain_geometry {
__IOMMU_DOMAIN_DMA_API | \
__IOMMU_DOMAIN_DMA_FQ)
#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
@@ -340,13 +344,12 @@ static inline int __iommu_copy_struct_from_user(
* NULL while the @user_data can be optionally provided, the
* new domain must support __IOMMU_DOMAIN_PAGING.
* Upon failure, ERR_PTR must be returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ * UNMANAGED, DMA, and DMA_FQ domain types.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
- * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
- * is to support old IOMMU drivers, new drivers should use
- * default domains, and the common IOMMU DMA ops.
* @device_group: find iommu group for a particular device
* @get_resv_regions: Request list of reserved regions for a device
* @of_xlate: add OF master IDs to iommu grouping
@@ -365,6 +368,13 @@ static inline int __iommu_copy_struct_from_user(
* will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ * translation.
+ * @blocked_domain: An always available, always attachable blocking
+ * translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ * This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ * Do not use in new drivers.
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
@@ -375,11 +385,11 @@ struct iommu_ops {
struct iommu_domain *(*domain_alloc_user)(
struct device *dev, u32 flags, struct iommu_domain *parent,
const struct iommu_user_data *user_data);
+ struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
- void (*set_platform_dma_ops)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
/* Request/Free a list of reserved regions for a device */
@@ -402,6 +412,9 @@ struct iommu_ops {
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
struct module *owner;
+ struct iommu_domain *identity_domain;
+ struct iommu_domain *blocked_domain;
+ struct iommu_domain *default_domain;
};
/**
@@ -420,10 +433,8 @@ struct iommu_ops {
* * ENODEV - device specific errors, not able to be attached
* * <others> - treated as ENODEV by the caller. Use is discouraged
* @set_dev_pasid: set an iommu domain to a pasid of device
- * @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
- * @unmap: unmap a physically contiguous memory region from an iommu domain
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
@@ -442,20 +453,16 @@ struct iommu_domain_ops {
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
@@ -476,6 +483,7 @@ struct iommu_domain_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
* @max_pasids: number of supported PASIDs
*/
struct iommu_device {
@@ -483,6 +491,7 @@ struct iommu_device {
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device *dev;
+ struct iommu_group *singleton_group;
u32 max_pasids;
};
@@ -526,6 +535,7 @@ struct iommu_fault_param {
* @attach_deferred: the dma domain attachment is deferred
* @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
* @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -541,6 +551,7 @@ struct dev_iommu {
u32 attach_deferred:1;
u32 pci_32bit_workaround:1;
u32 require_direct:1;
+ u32 shadow_on_flush:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -768,6 +779,7 @@ extern struct iommu_group *pci_device_group(struct device *dev);
extern struct iommu_group *generic_device_group(struct device *dev);
/* FSL-MC device grouping function */
struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
@@ -1253,7 +1265,7 @@ static inline void iommu_free_global_pasid(ioasid_t pasid) {}
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 25d768d48970..14f5cfabbbc8 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
}
/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
{
return r1->start <= r2->end && r1->end >= r2->start;
}
-static inline bool
-resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource
return true;
}
-static inline bool
-resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cee8fe87e9f4..d9ad21058eed 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -13,6 +13,7 @@
#include <linux/stdarg.h>
#include <linux/align.h>
+#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
@@ -50,12 +51,6 @@
#define READ 0
#define WRITE 1
-/**
- * ARRAY_SIZE - get the number of elements in array @arr
- * @arr: array to be sized
- */
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-
#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 2a36f3218b51..99aaa050ccb7 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -316,6 +316,7 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
};
/*
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 99b8176c3738..ff217a5ce552 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
u32 *ctxlen)
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index fd6e0620658d..b38a56a13f39 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -31,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -94,15 +94,23 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
size_t length, u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 4df6d1c12437..6f3631425f38 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1232,7 +1232,13 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x13];
+ u8 desc_group_mkey_supported[0x1];
+ u8 reserved_at_d4[0xc];
+
+ u8 reserved_at_e0[0x20];
u8 umem_1_buffer_param_a[0x20];
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 9becdc3fa503..b86d51a855f6 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -74,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits {
u8 reserved_at_320[0x8];
u8 pd[0x18];
- u8 reserved_at_340[0xc0];
+ u8 reserved_at_340[0x20];
+
+ u8 desc_group_mkey[0x20];
+
+ u8 reserved_at_380[0x80];
};
struct mlx5_ifc_virtio_net_q_object_bits {
@@ -141,6 +145,7 @@ enum {
MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
};
enum {
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index 057c89867aa2..b8da2db4ecd2 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
int vfsgid_in_group_p(vfsgid_t vfsgid);
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+
vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, kuid_t kuid);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b0678b093cb2..f458469c5ce5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -935,6 +935,12 @@ enum {
* struct cdx_device_id - CDX device identifier
* @vendor: Vendor ID
* @device: Device ID
+ * @subvendor: Subsystem vendor ID (or CDX_ANY_ID)
+ * @subdevice: Subsystem device ID (or CDX_ANY_ID)
+ * @class: Device class
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
* @override_only: Match only when dev->driver_override is this driver.
*
* Type of entries in the "device Id" table for CDX devices supported by
@@ -943,7 +949,15 @@ enum {
struct cdx_device_id {
__u16 vendor;
__u16 device;
+ __u16 subvendor;
+ __u16 subdevice;
+ __u32 class;
+ __u32 class_mask;
__u32 override_only;
};
+struct vchiq_device_id {
+ char name[32];
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d88bb56c18e2..947410faf9e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -287,7 +287,7 @@ struct cfi_private {
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
unsigned long quirks;
- struct flchip chips[]; /* per-chip data structure for each chip */
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 2e3f43788d48..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 3e285c09d16d..badb4c1ac079 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -263,6 +263,7 @@ struct spinand_manufacturer {
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index cd628c4b011e..cd797e00fe35 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -239,6 +239,7 @@ struct nfs_server {
struct list_head delegations;
struct list_head ss_copies;
+ unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 12bbb5c63664..539b57fbf3ce 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1772,7 +1772,7 @@ struct nfs_rpc_ops {
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
- int (*symlink) (struct inode *, struct dentry *, struct page *,
+ int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
index 4efea9dd967c..e10333d78dbb 100644
--- a/include/linux/nvme-keyring.h
+++ b/include/linux/nvme-keyring.h
@@ -6,14 +6,12 @@
#ifndef _NVME_KEYRING_H
#define _NVME_KEYRING_H
-#ifdef CONFIG_NVME_KEYRING
+#if IS_ENABLED(CONFIG_NVME_KEYRING)
key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
-int nvme_keyring_init(void);
-void nvme_keyring_exit(void);
#else
@@ -26,11 +24,5 @@ static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
-static inline int nvme_keyring_init(void)
-{
- return 0;
-}
-static inline void nvme_keyring_exit(void) {}
-
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a7ba74babad7..44325c068b6a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1732,7 +1732,7 @@ struct nvmf_auth_dhchap_success1_data {
__u8 rsvd2;
__u8 rvalid;
__u8 rsvd3[7];
- /* 'hl' bytes of response value if 'rvalid' is set */
+ /* 'hl' bytes of response value */
__u8 rval[];
};
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index dae26295e6be..e3930835235b 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -82,13 +82,13 @@ struct nvmem_cell_info {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
* @keepout: Optional array of keepout ranges (sorted ascending by start).
* @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
* @of_node: If given, this will be used instead of the parent's of_node.
- * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
* @size: Device size.
@@ -112,6 +112,7 @@ struct nvmem_config {
struct module *owner;
const struct nvmem_cell_info *cells;
int ncells;
+ bool add_legacy_fixed_of_cells;
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
enum nvmem_type type;
@@ -120,7 +121,6 @@ struct nvmem_config {
bool ignore_wp;
struct nvmem_layout *layout;
struct device_node *of_node;
- bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 999eddd619b7..fff39bc30629 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -180,8 +180,6 @@ struct ieee1284_info {
struct semaphore irq;
};
-#define PARPORT_NAME_MAX_LEN 15
-
/* A parallel port */
struct parport {
unsigned long base; /* base address */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 4729d54e8995..73de70362b98 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -17,6 +17,7 @@
#include <linux/pinctrl/pinctrl-state.h>
struct device;
+struct gpio_chip;
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
@@ -25,27 +26,30 @@ struct pinctrl_state;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern bool pinctrl_gpio_can_use_line(unsigned gpio);
-extern int pinctrl_gpio_request(unsigned gpio);
-extern void pinctrl_gpio_free(unsigned gpio);
-extern int pinctrl_gpio_direction_input(unsigned gpio);
-extern int pinctrl_gpio_direction_output(unsigned gpio);
-extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
-
-extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
-extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
- const char *name);
-extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
-
-extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
-extern void devm_pinctrl_put(struct pinctrl *p);
-extern int pinctrl_select_default_state(struct device *dev);
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset);
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config);
+
+struct pinctrl * __must_check pinctrl_get(struct device *dev);
+void pinctrl_put(struct pinctrl *p);
+struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name);
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+void devm_pinctrl_put(struct pinctrl *p);
+int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
-extern int pinctrl_pm_select_default_state(struct device *dev);
-extern int pinctrl_pm_select_sleep_state(struct device *dev);
-extern int pinctrl_pm_select_idle_state(struct device *dev);
+int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_sleep_state(struct device *dev);
+int pinctrl_pm_select_idle_state(struct device *dev);
#else
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
@@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+static inline bool
+pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
{
return true;
}
-static inline int pinctrl_gpio_request(unsigned gpio)
+static inline int
+pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline void pinctrl_gpio_free(unsigned gpio)
+static inline void
+pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
}
-static inline int pinctrl_gpio_direction_input(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_direction_output(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+static inline int
+pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
return 0;
}
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index 0639b36f43c5..ee8803f6ad07 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -11,7 +11,7 @@
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/kernel.h> /* ARRAY_SIZE() */
+#include <linux/array_size.h>
#include <linux/pinctrl/pinctrl-state.h>
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ab721cf13a98..7dae17b62a4d 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -4436,8 +4436,20 @@ struct ec_response_i2c_passthru_protect {
* These commands are for sending and receiving message via HDMI CEC
*/
+#define EC_CEC_MAX_PORTS 16
+
#define MAX_CEC_MSG_LEN 16
+/*
+ * Helper macros for packing/unpacking cec_events.
+ * bits[27:0] : bitmask of events from enum mkbp_cec_event
+ * bits[31:28]: port number
+ */
+#define EC_MKBP_EVENT_CEC_PACK(events, port) \
+ (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28))
+#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0))
+#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf)
+
/* CEC message from the AP to be written on the CEC bus */
#define EC_CMD_CEC_WRITE_MSG 0x00B8
@@ -4449,19 +4461,54 @@ struct ec_params_cec_write {
uint8_t msg[MAX_CEC_MSG_LEN];
} __ec_align1;
+/**
+ * struct ec_params_cec_write_v1 - Message to write to the CEC bus
+ * @port: CEC port to write the message on
+ * @msg_len: length of msg in bytes
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write_v1 {
+ uint8_t port;
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* CEC message read from a CEC bus reported back to the AP */
+#define EC_CMD_CEC_READ_MSG 0x00B9
+
+/**
+ * struct ec_params_cec_read - Read a message from the CEC bus
+ * @port: CEC port to read a message on
+ */
+struct ec_params_cec_read {
+ uint8_t port;
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_read - Message read from the CEC bus
+ * @msg_len: length of msg in bytes
+ * @msg: message content read from the CEC bus
+ */
+struct ec_response_cec_read {
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
/* Set various CEC parameters */
#define EC_CMD_CEC_SET 0x00BA
/**
* struct ec_params_cec_set - CEC parameters set
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to set the parameter on
* @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
* or 1 to enable CEC functionality, in case cmd is
* CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
* address between 0 and 15 or 0xff to unregister
*/
struct ec_params_cec_set {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
uint8_t val;
} __ec_align1;
@@ -4471,9 +4518,11 @@ struct ec_params_cec_set {
/**
* struct ec_params_cec_get - CEC parameters get
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to get the parameter on
*/
struct ec_params_cec_get {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
} __ec_align1;
/**
@@ -4487,6 +4536,17 @@ struct ec_response_cec_get {
uint8_t val;
} __ec_align1;
+/* Get the number of CEC ports */
+#define EC_CMD_CEC_PORT_COUNT 0x00C1
+
+/**
+ * struct ec_response_cec_port_count - CEC port count response
+ * @port_count: number of CEC ports
+ */
+struct ec_response_cec_port_count {
+ uint8_t port_count;
+} __ec_align1;
+
/* CEC parameters command */
enum cec_command {
/* CEC reading, writing and events enable */
@@ -4501,6 +4561,8 @@ enum mkbp_cec_event {
EC_MKBP_CEC_SEND_OK = BIT(0),
/* Outgoing message was not acknowledged */
EC_MKBP_CEC_SEND_FAILED = BIT(1),
+ /* Incoming message can be read out by AP */
+ EC_MKBP_CEC_HAVE_DATA = BIT(2),
};
/*****************************************************************************/
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index d661399b217d..6c19d4fbbe39 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,7 +10,7 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <drm/drm_mode.h>
+#include <video/videomode.h>
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
@@ -18,72 +18,21 @@ enum shmob_drm_clk_source {
SHMOB_DRM_CLK_EXTERNAL,
};
-enum shmob_drm_interface {
- SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
- SHMOB_DRM_IFACE_RGB16, /* 16bpp */
- SHMOB_DRM_IFACE_RGB18, /* 18bpp */
- SHMOB_DRM_IFACE_RGB24, /* 24bpp */
- SHMOB_DRM_IFACE_YUV422, /* 16bpp */
- SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
- SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
- SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
- SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
- SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
- SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
- SHMOB_DRM_IFACE_SYS18, /* 18bpp */
- SHMOB_DRM_IFACE_SYS24, /* 24bpp */
-};
-
-struct shmob_drm_backlight_data {
- const char *name;
- int max_brightness;
- int (*get_brightness)(void);
- int (*set_brightness)(int brightness);
-};
-
struct shmob_drm_panel_data {
unsigned int width_mm; /* Panel width in mm */
unsigned int height_mm; /* Panel height in mm */
- struct drm_mode_modeinfo mode;
+ struct videomode mode;
};
-struct shmob_drm_sys_interface_data {
- unsigned int read_latch:6;
- unsigned int read_setup:8;
- unsigned int read_cycle:8;
- unsigned int read_strobe:8;
- unsigned int write_setup:8;
- unsigned int write_cycle:8;
- unsigned int write_strobe:8;
- unsigned int cs_setup:3;
- unsigned int vsync_active_high:1;
- unsigned int vsync_dir_input:1;
-};
-
-#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
-#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
-#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
-#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
-#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
-
struct shmob_drm_interface_data {
- enum shmob_drm_interface interface;
- struct shmob_drm_sys_interface_data sys;
+ unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */
unsigned int clk_div;
- unsigned int flags;
};
struct shmob_drm_platform_data {
enum shmob_drm_clk_source clk_source;
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
- struct shmob_drm_backlight_data backlight;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/property.h b/include/linux/property.h
index 8c3c6685a2ae..9f2585d705a8 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/args.h>
#include <linux/bits.h>
#include <linux/fwnode.h>
#include <linux/stddef.h>
@@ -288,7 +289,7 @@ struct software_node_ref_args {
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \
.node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \
}
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index d2f9f690a9c1..cda3597b84f2 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -41,8 +41,8 @@ struct pwm_args {
};
enum {
- PWMF_REQUESTED = 1 << 0,
- PWMF_EXPORTED = 1 << 1,
+ PWMF_REQUESTED = 0,
+ PWMF_EXPORTED = 1,
};
/*
@@ -71,7 +71,6 @@ struct pwm_state {
* @hwpwm: per-chip relative index of the PWM device
* @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
- * @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
* @state: last applied state
* @last: last implemented state (for PWM_DEBUG)
@@ -82,7 +81,6 @@ struct pwm_device {
unsigned int hwpwm;
unsigned int pwm;
struct pwm_chip *chip;
- void *chip_data;
struct pwm_args args;
struct pwm_state state;
@@ -267,7 +265,6 @@ struct pwm_capture {
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
- * @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -278,13 +275,13 @@ struct pwm_ops {
const struct pwm_state *state);
int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
- struct module *owner;
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
* @ops: callbacks for this PWM controller
+ * @owner: module providing this chip
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
@@ -295,6 +292,7 @@ struct pwm_ops {
struct pwm_chip {
struct device *dev;
const struct pwm_ops *ops;
+ struct module *owner;
int base;
unsigned int npwm;
@@ -383,13 +381,13 @@ static inline void pwm_disable(struct pwm_device *pwm)
/* PWM provider APIs */
int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
unsigned long timeout);
-int pwm_set_chip_data(struct pwm_device *pwm, void *data);
-void *pwm_get_chip_data(struct pwm_device *pwm);
-int pwmchip_add(struct pwm_chip *chip);
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
+#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
void pwmchip_remove(struct pwm_chip *chip);
-int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
+#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
@@ -445,16 +443,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
return -EINVAL;
}
-static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
-{
- return -EINVAL;
-}
-
-static inline void *pwm_get_chip_data(struct pwm_device *pwm)
-{
- return NULL;
-}
-
static inline int pwmchip_add(struct pwm_chip *chip)
{
return -EINVAL;
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 515d7fcb9634..5fb1f12c33f9 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -14,19 +14,25 @@
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
- * @readpos: The next position to read in the buffer.
*/
struct seq_buf {
char *buffer;
size_t size;
size_t len;
- loff_t readpos;
};
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ char __ ## NAME ## _buffer[SIZE] = ""; \
+ struct seq_buf NAME = { \
+ .buffer = &__ ## NAME ## _buffer, \
+ .size = SIZE, \
+ }
+
static inline void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
- s->readpos = 0;
+ if (s->size)
+ s->buffer[0] = '\0';
}
static inline void
@@ -39,7 +45,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
/*
* seq_buf have a buffer that might overflow. When this happens
- * the len and size are set to be equal.
+ * len is set to be greater than size.
*/
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
@@ -72,8 +78,8 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_terminate - Make sure buffer is nul terminated
- * @s: the seq_buf descriptor to terminate.
+ * seq_buf_str - get %NUL-terminated C string from seq_buf
+ * @s: the seq_buf handle
*
* This makes sure that the buffer in @s is nul terminated and
* safe to read as a string.
@@ -84,16 +90,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
*
* After this function is called, s->buffer is safe to use
* in string operations.
+ *
+ * Returns @s->buf after making sure it is terminated.
*/
-static inline void seq_buf_terminate(struct seq_buf *s)
+static inline const char *seq_buf_str(struct seq_buf *s)
{
if (WARN_ON(s->size == 0))
- return;
+ return "";
if (seq_buf_buffer_left(s))
s->buffer[s->len] = 0;
else
s->buffer[s->size - 1] = 0;
+
+ return s->buffer;
}
/**
@@ -143,7 +153,7 @@ extern __printf(2, 0)
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
- int cnt);
+ size_t start, int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index bb6f073bc159..89f7b6c63598 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -588,6 +588,85 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock(struct uart_port *up)
+{
+ spin_lock(&up->lock);
+}
+
+/**
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock_irq(struct uart_port *up)
+{
+ spin_lock_irq(&up->lock);
+}
+
+/**
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ */
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_trylock - Try to lock the UART port
+ * @up: Pointer to UART port structure
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock(struct uart_port *up)
+{
+ return spin_trylock(&up->lock);
+}
+
+/**
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ return spin_trylock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_unlock - Unlock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock(struct uart_port *up)
+{
+ spin_unlock(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock_irq(struct uart_port *up)
+{
+ spin_unlock_irq(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
+ * @up: Pointer to UART port structure
+ * @flags: The saved interrupt flags for restore
+ */
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -956,14 +1035,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
@@ -975,14 +1054,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
@@ -998,12 +1077,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ff56ab804bf6..d6d6ffeeb9a2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -764,6 +764,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+
extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
diff --git a/include/linux/string.h b/include/linux/string.h
index 5077776e995e..ce137830a0b9 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
+#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index af7358277f1c..e9d4377d03c6 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -92,6 +92,7 @@ struct rpc_clnt {
};
const struct cred *cl_cred;
unsigned int cl_max_connect; /* max number of transports not to the same IP */
+ struct super_block *pipefs_sb;
};
/*
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index fd3fe5c8c17f..b717a70219f6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -181,6 +181,8 @@ struct bin_attribute {
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
+ loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
+ loff_t, int);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ec4e9367f5b0..68f3d315d2e1 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -152,7 +152,7 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
- s8 req_usec_ts;
+ bool req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 02333f47c994..6151c210d987 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -175,7 +175,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* enum tb_link_width - Thunderbolt/USB4 link width
* @TB_LINK_WIDTH_SINGLE: Single lane link
* @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
- * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
+ * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters
* @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
*/
enum tb_link_width {
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5eb88a66eb68..d68ff9b1247f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -492,6 +492,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_FREED_BIT,
};
extern struct trace_event_file *trace_get_event_file(const char *instance,
@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
+ * FREED - File descriptor is freed, all fields should be considered invalid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -643,13 +645,14 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -671,6 +674,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
+ atomic_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6be92bf559fe..3691e0e76a1a 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@
struct trace_seq {
char buffer[PAGE_SIZE];
struct seq_buf seq;
+ size_t readpos;
int full;
};
@@ -22,6 +23,7 @@ trace_seq_init(struct trace_seq *s)
{
seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
s->full = 0;
+ s->readpos = 0;
}
/**
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 009072792fa3..7a5fe17b6bf9 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -23,26 +23,69 @@ struct file_operations;
struct eventfs_file;
-struct dentry *eventfs_create_events_dir(const char *name,
- struct dentry *parent);
-
-struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
- struct dentry *parent);
+/**
+ * eventfs_callback - A callback function to create dynamic files in eventfs
+ * @name: The name of the file that is to be created
+ * @mode: return the file mode for the file (RW access, etc)
+ * @data: data to pass to the created file ops
+ * @fops: the file operations of the created file
+ *
+ * The evetnfs files are dynamically created. The struct eventfs_entry array
+ * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will
+ * be used to create the files within those directories. When a lookup
+ * or access to a file within the directory is made, the struct eventfs_entry
+ * array is used to find a callback() with the matching name that is being
+ * referenced (for lookups, the entire array is iterated and each callback
+ * will be called).
+ *
+ * The callback will be called with @name for the name of the file to create.
+ * The callback can return less than 1 to indicate that no file should be
+ * created.
+ *
+ * If a file is to be created, then @mode should be populated with the file
+ * mode (permissions) for which the file is created for. This would be
+ * used to set the created inode i_mode field.
+ *
+ * The @data should be set to the data passed to the other file operations
+ * (read, write, etc). Note, @data will also point to the data passed in
+ * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback
+ * can replace the data if it chooses to. Otherwise, the original data
+ * will be used for the file operation functions.
+ *
+ * The @fops should be set to the file operations that will be used to create
+ * the inode.
+ *
+ * NB. This callback is called while holding internal locks of the eventfs
+ * system. The callback must not call any code that might also call into
+ * the tracefs or eventfs system or it will risk creating a deadlock.
+ */
+typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops);
-struct eventfs_file *eventfs_add_dir(const char *name,
- struct eventfs_file *ef_parent);
+/**
+ * struct eventfs_entry - dynamically created eventfs file call back handler
+ * @name: Then name of the dynamic file in an eventfs directory
+ * @callback: The callback to get the fops of the file when it is created
+ *
+ * See evenfs_callback() typedef for how to set up @callback.
+ */
+struct eventfs_entry {
+ const char *name;
+ eventfs_callback callback;
+};
-int eventfs_add_file(const char *name, umode_t mode,
- struct eventfs_file *ef_parent, void *data,
- const struct file_operations *fops);
+struct eventfs_inode;
-int eventfs_add_events_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
-void eventfs_remove(struct eventfs_file *ef);
+struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
-void eventfs_remove_events_dir(struct dentry *dentry);
+void eventfs_remove_events_dir(struct eventfs_inode *ei);
+void eventfs_remove_dir(struct eventfs_inode *ei);
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644
index 000000000000..de8324a2223c
--- /dev/null
+++ b/include/linux/tsm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TSM_H
+#define __TSM_H
+
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define TSM_INBLOB_MAX 64
+#define TSM_OUTBLOB_MAX SZ_32K
+
+/*
+ * Privilege level is a nested permission concept to allow confidential
+ * guests to partition address space, 4-levels are supported.
+ */
+#define TSM_PRIVLEVEL_MAX 3
+
+/**
+ * struct tsm_desc - option descriptor for generating tsm report blobs
+ * @privlevel: optional privilege level to associate with @outblob
+ * @inblob_len: sizeof @inblob
+ * @inblob: arbitrary input data
+ */
+struct tsm_desc {
+ unsigned int privlevel;
+ size_t inblob_len;
+ u8 inblob[TSM_INBLOB_MAX];
+};
+
+/**
+ * struct tsm_report - track state of report generation relative to options
+ * @desc: input parameters to @report_new()
+ * @outblob_len: sizeof(@outblob)
+ * @outblob: generated evidence to provider to the attestation agent
+ * @auxblob_len: sizeof(@auxblob)
+ * @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ */
+struct tsm_report {
+ struct tsm_desc desc;
+ size_t outblob_len;
+ u8 *outblob;
+ size_t auxblob_len;
+ u8 *auxblob;
+};
+
+/**
+ * struct tsm_ops - attributes and operations for tsm instances
+ * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
+ * @privlevel_floor: convey base privlevel for nested scenarios
+ * @report_new: Populate @report with the report blob and auxblob
+ * (optional), return 0 on successful population, or -errno otherwise
+ *
+ * Implementation specific ops, only one is expected to be registered at
+ * a time i.e. only one of "sev-guest", "tdx-guest", etc.
+ */
+struct tsm_ops {
+ const char *name;
+ const unsigned int privlevel_floor;
+ int (*report_new)(struct tsm_report *report, void *data);
+};
+
+extern const struct config_item_type tsm_report_default_type;
+
+/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
+extern const struct config_item_type tsm_report_extra_type;
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+ const struct config_item_type *type);
+int tsm_unregister(const struct tsm_ops *ops);
+#endif /* __TSM_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index f002d0f25db7..4b6340ac2af2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -390,14 +390,12 @@ int vcs_init(void);
extern const struct class tty_class;
/**
- * tty_kref_get - get a tty reference
- * @tty: tty device
+ * tty_kref_get - get a tty reference
+ * @tty: tty device
*
- * Return a new reference to a tty object. The caller must hold
- * sufficient locks/counts to ensure that their existing reference cannot
- * go away
+ * Returns: a new reference to a tty object. The caller must hold sufficient
+ * locks/counts to ensure that their existing reference cannot go away
*/
-
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
@@ -416,8 +414,8 @@ unsigned int tty_chars_in_buffer(struct tty_struct *tty);
unsigned int tty_write_room(struct tty_struct *tty);
void tty_driver_flush_buffer(struct tty_struct *tty);
void tty_unthrottle(struct tty_struct *tty);
-int tty_throttle_safe(struct tty_struct *tty);
-int tty_unthrottle_safe(struct tty_struct *tty);
+bool tty_throttle_safe(struct tty_struct *tty);
+bool tty_unthrottle_safe(struct tty_struct *tty);
int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount);
@@ -435,14 +433,13 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
speed_t obaud);
/**
- * tty_get_baud_rate - get tty bit rates
- * @tty: tty to query
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
*
- * Returns the baud rate as an integer for this terminal. The
- * termios lock must be held by the caller and the terminal bit
- * flags may be updated.
+ * Returns: the baud rate as an integer for this terminal. The termios lock
+ * must be held by the caller and the terminal bit flags may be updated.
*
- * Locking: none
+ * Locking: none
*/
static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
{
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a21074861f91..8c61643acd49 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1823,22 +1823,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
void usb_free_coherent(struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
-#if 0
-struct urb *usb_buffer_map(struct urb *urb);
-void usb_buffer_dmasync(struct urb *urb);
-void usb_buffer_unmap(struct urb *urb);
-#endif
-
-struct scatterlist;
-int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int nents);
-#if 0
-void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-#endif
-void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 0b4f2d5faa08..5a7f96684ea2 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -64,6 +64,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_PMQOS BIT(15)
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
+#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 6014340ba980..af3cd2aae4bc 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -35,6 +35,14 @@
* are ready. The control transfer will then be kept from completing till
* all the function drivers that requested for USB_GADGET_DELAYED_STAUS
* invoke usb_composite_setup_continue().
+ *
+ * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they
+ * must delay completing the status stage for 0-length control transfers
+ * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from
+ * the gadget driver's setup() callback.
+ * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS,
+ * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS
+ * must be contained within the composite framework.
*/
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 75bda0783395..6532beb587b1 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -711,6 +711,15 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget)
* get_interface. Setting a configuration (or interface) is where
* endpoints should be activated or (config 0) shut down.
*
+ * The gadget driver's setup() callback does not have to queue a response to
+ * ep0 within the setup() call, the driver can do it after setup() returns.
+ * The UDC driver must wait until such a response is queued before proceeding
+ * with the data/status stages of the control transfer.
+ *
+ * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS
+ * being returned from the setup() callback, which is a bug. See the comment
+ * next to USB_GADGET_DELAYED_STATUS for details.
+ *
* (Note that only the default control endpoint is supported. Neither
* hosts nor devices generally support control traffic except to ep0.)
*
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 61d4f0b793dc..00724b4f6e12 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -484,8 +484,25 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+#ifdef CONFIG_USB_PCI_AMD
extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ if (!usb_hcd_amd_remote_wakeup_quirk(dev))
+ return false;
+ if (driver->flags & (HCD_USB11 | HCD_USB3))
+ return true;
+ return false;
+}
+#else /* CONFIG_USB_PCI_AMD */
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ return false;
+}
+#endif
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_USB_PCI */
diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h
new file mode 100644
index 000000000000..47661feda96c
--- /dev/null
+++ b/include/linux/usb/ljca.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_USB_LJCA_H_
+#define _LINUX_USB_LJCA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define LJCA_MAX_GPIO_NUM 64
+
+#define auxiliary_dev_to_ljca_client(auxiliary_dev) \
+ container_of(auxiliary_dev, struct ljca_client, auxdev)
+
+struct ljca_adapter;
+
+/**
+ * typedef ljca_event_cb_t - event callback function signature
+ *
+ * @context: the execution context of who registered this callback
+ * @cmd: the command from device for this event
+ * @evt_data: the event data payload
+ * @len: the event data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len);
+
+/**
+ * struct ljca_client - represent a ljca client device
+ *
+ * @type: ljca client type
+ * @id: ljca client id within same client type
+ * @link: ljca client on the same ljca adapter
+ * @auxdev: auxiliary device object
+ * @adapter: ljca adapter the ljca client sit on
+ * @context: the execution context of the event callback
+ * @event_cb: ljca client driver register this callback to get
+ * firmware asynchronous rx buffer pending notifications
+ * @event_cb_lock: spinlock to protect event callback
+ */
+struct ljca_client {
+ u8 type;
+ u8 id;
+ struct list_head link;
+ struct auxiliary_device auxdev;
+ struct ljca_adapter *adapter;
+
+ void *context;
+ ljca_event_cb_t event_cb;
+ /* lock to protect event_cb */
+ spinlock_t event_cb_lock;
+};
+
+/**
+ * struct ljca_gpio_info - ljca gpio client device info
+ *
+ * @num: ljca gpio client device pin number
+ * @valid_pin_map: ljca gpio client device valid pin mapping
+ */
+struct ljca_gpio_info {
+ unsigned int num;
+ DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM);
+};
+
+/**
+ * struct ljca_i2c_info - ljca i2c client device info
+ *
+ * @id: ljca i2c client device identification number
+ * @capacity: ljca i2c client device capacity
+ * @intr_pin: ljca i2c client device interrupt pin number if exists
+ */
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+/**
+ * struct ljca_spi_info - ljca spi client device info
+ *
+ * @id: ljca spi client device identification number
+ * @capacity: ljca spi client device capacity
+ */
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+/**
+ * ljca_register_event_cb - register a callback function to receive events
+ *
+ * @client: ljca client device
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ *
+ * Return: 0 in case of success, negative value in case of error
+ */
+int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context);
+
+/**
+ * ljca_unregister_event_cb - unregister the callback function for an event
+ *
+ * @client: ljca client device
+ */
+void ljca_unregister_event_cb(struct ljca_client *client);
+
+/**
+ * ljca_transfer - issue a LJCA command and wait for a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device; it should
+ * be 0 when obuf is NULL
+ * @ibuf: any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len: must be initialized to the input buffer size
+ *
+ * Return: the actual length of response data for success, negative value for errors
+ */
+int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len, u8 *ibuf, u8 ibuf_len);
+
+/**
+ * ljca_transfer_noack - issue a LJCA command without a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device
+ *
+ * Return: 0 for success, negative value for errors
+ */
+int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len);
+
+#endif
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index c59fb79a42e8..eb626af0e4e7 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -228,6 +228,7 @@ enum pd_pdo_type {
#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
#define PDO_FIXED_FRS_CURR_SHIFT 23
+#define PDO_FIXED_PEAK_CURR_SHIFT 20
#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index b057250704e8..3a747938cdab 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -376,6 +376,7 @@
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
| (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7)
#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d418c55523a7..372898d9eeb0 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -5,16 +5,6 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 8fa781207970..a05d6f6f2536 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -202,6 +202,8 @@ struct typec_cable_desc {
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
* @pd_revision: USB Power Delivery Specification Revision if supported
+ * @attach: Notification about attached USB device
+ * @deattach: Notification about removed USB device
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
@@ -217,6 +219,9 @@ struct typec_partner_desc {
enum typec_accessory accessory;
struct usb_pd_identity *identity;
u16 pd_revision; /* 0300H = "3.0" */
+
+ void (*attach)(struct typec_partner *partner, struct device *dev);
+ void (*deattach)(struct typec_partner *partner, struct device *dev);
};
/**
@@ -335,4 +340,36 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_
int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
struct usb_power_delivery *pd);
+/**
+ * struct typec_connector - Representation of Type-C port for external drivers
+ * @attach: notification about device removal
+ * @deattach: notification about device removal
+ *
+ * Drivers that control the USB and other ports (DisplayPorts, etc.), that are
+ * connected to the Type-C connectors, can use these callbacks to inform the
+ * Type-C connector class about connections and disconnections. That information
+ * can then be used by the typec-port drivers to power on or off parts that are
+ * needed or not needed - as an example, in USB mode if USB2 device is
+ * enumerated, USB3 components (retimers, phys, and what have you) do not need
+ * to be powered on.
+ *
+ * The attached (enumerated) devices will be liked with the typec-partner device.
+ */
+struct typec_connector {
+ void (*attach)(struct typec_connector *con, struct device *dev);
+ void (*deattach)(struct typec_connector *con, struct device *dev);
+};
+
+static inline void typec_attach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->attach)
+ con->attach(con, dev);
+}
+
+static inline void typec_deattach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->deattach)
+ con->deattach(con, dev);
+}
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index 8d09c2f0a9b8..1f358098522d 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -67,8 +67,10 @@ enum {
#define DP_CAP_UFP_D 1
#define DP_CAP_DFP_D 2
#define DP_CAP_DFP_D_AND_UFP_D 3
-#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */
-#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */
+#define DP_CAP_DP_SIGNALLING(_cap_) (((_cap_) & GENMASK(5, 2)) >> 2)
+#define DP_CAP_SIGNALLING_HBR3 1
+#define DP_CAP_SIGNALLING_UHBR10 2
+#define DP_CAP_SIGNALLING_UHBR20 3
#define DP_CAP_RECEPTACLE BIT(6)
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
@@ -78,6 +80,13 @@ enum {
DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_UHBR_13_5_SUPPORT BIT(26)
+#define DP_CAP_CABLE_TYPE(_cap_) (((_cap_) & GENMASK(29, 28)) >> 28)
+#define DP_CAP_CABLE_TYPE_PASSIVE 0
+#define DP_CAP_CABLE_TYPE_RE_TIMER 1
+#define DP_CAP_CABLE_TYPE_RE_DRIVER 2
+#define DP_CAP_CABLE_TYPE_OPTICAL 3
+#define DP_CAP_DPAM_VERSION BIT(30)
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
@@ -97,13 +106,24 @@ enum {
#define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3)
#define DP_CONF_UFP_U_AS_DFP_D BIT(0)
#define DP_CONF_UFP_U_AS_UFP_D BIT(1)
-#define DP_CONF_SIGNALING_DP BIT(2)
-#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */
+#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2)
+#define DP_CONF_SIGNALLING_SHIFT 2
+#define DP_CONF_SIGNALLING_HBR3 1
+#define DP_CONF_SIGNALLING_UHBR10 2
+#define DP_CONF_SIGNALLING_UHBR20 3
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+#define DP_CONF_UHBR13_5_SUPPORT BIT(26)
+#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28)
+#define DP_CONF_CABLE_TYPE_SHIFT 28
+#define DP_CONF_CABLE_TYPE_PASSIVE 0
+#define DP_CONF_CABLE_TYPE_RE_TIMER 1
+#define DP_CONF_CABLE_TYPE_RE_DRIVER 2
+#define DP_CONF_CABLE_TYPE_OPTICAL 3
+#define DP_CONF_DPAM_VERSION BIT(30)
#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 63dd44b72e0c..c7a2153bd6f5 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -46,6 +46,7 @@ struct typec_thunderbolt_data {
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
+#define TBT_CABLE_ACTIVE_PASSIVE BIT(25)
#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 0e652026b776..db15ac07f8a6 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -204,6 +204,16 @@ struct vdpa_map_file {
* @vdev: vdpa device
* @idx: virtqueue index
* Returns u32: group id for this virtqueue
+ * @get_vq_desc_group: Get the group id for the descriptor table of
+ * a specific virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for the descriptor table
+ * portion of this virtqueue. Could be different
+ * than the one from @get_vq_group, in which case
+ * the access to the descriptor table can be
+ * confined to a separate asid, isolating from
+ * the virtqueue's buffer address access.
* @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
@@ -242,6 +252,17 @@ struct vdpa_map_file {
* @reset: Reset device
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
+ * @compat_reset: Reset device with compatibility quirks to
+ * accommodate older userspace. Only needed by
+ * parent driver which used to have bogus reset
+ * behaviour, and has to maintain such behaviour
+ * for compatibility with older userspace.
+ * Historically compliant driver only has to
+ * implement .reset, Historically non-compliant
+ * driver should implement both.
+ * @vdev: vdpa device
+ * @flags: compatibility quirks for reset
+ * Returns integer: success (0) or error (< 0)
* @suspend: Suspend the device (optional)
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
@@ -317,6 +338,15 @@ struct vdpa_map_file {
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
+ * @reset_map: Reset device memory mapping to the default
+ * state (optional)
+ * Needed for devices that are using device
+ * specific DMA translation and prefer mapping
+ * to be decoupled from the virtio life cycle,
+ * i.e. device .reset op does not reset mapping
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * Returns integer: success (0) or error (< 0)
* @get_vq_dma_dev: Get the dma device for a specific
* virtqueue (optional)
* @vdev: vdpa device
@@ -360,6 +390,7 @@ struct vdpa_config_ops {
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
+ u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
@@ -373,6 +404,8 @@ struct vdpa_config_ops {
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
int (*reset)(struct vdpa_device *vdev);
+ int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
+#define VDPA_RESET_F_CLEAN_MAP 1
int (*suspend)(struct vdpa_device *vdev);
int (*resume)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
@@ -394,6 +427,7 @@ struct vdpa_config_ops {
u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
u64 iova, u64 size);
+ int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
unsigned int asid);
struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
@@ -485,14 +519,17 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
return vdev->dma_dev;
}
-static inline int vdpa_reset(struct vdpa_device *vdev)
+static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
{
const struct vdpa_config_ops *ops = vdev->config;
int ret;
down_write(&vdev->cf_lock);
vdev->features_valid = false;
- ret = ops->reset(vdev);
+ if (ops->compat_reset && flags)
+ ret = ops->compat_reset(vdev, flags);
+ else
+ ret = ops->reset(vdev);
up_write(&vdev->cf_lock);
return ret;
}
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index 067ac1d789bc..d0f2797420f7 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -12,37 +12,48 @@ struct virtio_pci_modern_common_cfg {
__le16 queue_reset; /* read-write */
};
+/**
+ * struct virtio_pci_modern_device - info for modern PCI virtio
+ * @pci_dev: Ptr to the PCI device struct
+ * @common: Position of the common capability in the PCI config
+ * @device: Device-specific data (non-legacy mode)
+ * @notify_base: Base of vq notifications (non-legacy mode)
+ * @notify_pa: Physical base of vq notifications
+ * @isr: Where to read and clear interrupt
+ * @notify_len: So we can sanity-check accesses
+ * @device_len: So we can sanity-check accesses
+ * @notify_map_cap: Capability for when we need to map notifications per-vq
+ * @notify_offset_multiplier: Multiply queue_notify_off by this value
+ * (non-legacy mode).
+ * @modern_bars: Bitmask of BARs
+ * @id: Device and vendor id
+ * @device_id_check: Callback defined before vp_modern_probe() to be used to
+ * verify the PCI device is a vendor's expected device rather
+ * than the standard virtio PCI device
+ * Returns the found device id or ERRNO
+ * @dma_mask: Optional mask instead of the traditional DMA_BIT_MASK(64),
+ * for vendor devices with DMA space address limitations
+ */
struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
struct virtio_pci_common_cfg __iomem *common;
- /* Device-specific data (non-legacy mode) */
void __iomem *device;
- /* Base of vq notifications (non-legacy mode). */
void __iomem *notify_base;
- /* Physical base of vq notifications */
resource_size_t notify_pa;
- /* Where to read and clear interrupt */
u8 __iomem *isr;
- /* So we can sanity-check accesses. */
size_t notify_len;
size_t device_len;
+ size_t common_len;
- /* Capability for when we need to map notifications per-vq. */
int notify_map_cap;
- /* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier;
-
int modern_bars;
-
struct virtio_device_id id;
- /* optional check for vendor virtio device, returns dev_id or -ERRNO */
int (*device_id_check)(struct pci_dev *pdev);
-
- /* optional mask for devices with limited DMA space */
u64 dma_mask;
};
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
deleted file mode 100644
index e9c0cd36c48a..000000000000
--- a/include/linux/vlynq.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __VLYNQ_H__
-#define __VLYNQ_H__
-
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct module;
-
-#define VLYNQ_NUM_IRQS 32
-
-struct vlynq_mapping {
- u32 size;
- u32 offset;
-};
-
-enum vlynq_divisor {
- vlynq_div_auto = 0,
- vlynq_ldiv1,
- vlynq_ldiv2,
- vlynq_ldiv3,
- vlynq_ldiv4,
- vlynq_ldiv5,
- vlynq_ldiv6,
- vlynq_ldiv7,
- vlynq_ldiv8,
- vlynq_rdiv1,
- vlynq_rdiv2,
- vlynq_rdiv3,
- vlynq_rdiv4,
- vlynq_rdiv5,
- vlynq_rdiv6,
- vlynq_rdiv7,
- vlynq_rdiv8,
- vlynq_div_external
-};
-
-struct vlynq_device_id {
- u32 id;
- enum vlynq_divisor divisor;
- unsigned long driver_data;
-};
-
-struct vlynq_regs;
-struct vlynq_device {
- u32 id, dev_id;
- int local_irq;
- int remote_irq;
- enum vlynq_divisor divisor;
- u32 regs_start, regs_end;
- u32 mem_start, mem_end;
- u32 irq_start, irq_end;
- int irq;
- int enabled;
- struct vlynq_regs *local;
- struct vlynq_regs *remote;
- struct device dev;
-};
-
-struct vlynq_driver {
- char *name;
- struct vlynq_device_id *id_table;
- int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
- void (*remove)(struct vlynq_device *dev);
- struct device_driver driver;
-};
-
-struct plat_vlynq_ops {
- int (*on)(struct vlynq_device *dev);
- void (*off)(struct vlynq_device *dev);
-};
-
-static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
-{
- return container_of(drv, struct vlynq_driver, driver);
-}
-
-static inline struct vlynq_device *to_vlynq_device(struct device *device)
-{
- return container_of(device, struct vlynq_device, dev);
-}
-
-extern struct bus_type vlynq_bus_type;
-
-extern int __vlynq_register_driver(struct vlynq_driver *driver,
- struct module *owner);
-
-static inline int vlynq_register_driver(struct vlynq_driver *driver)
-{
- return __vlynq_register_driver(driver, THIS_MODULE);
-}
-
-static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
-{
- return dev_get_drvdata(&dev->dev);
-}
-
-static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
-{
- dev_set_drvdata(&dev->dev, data);
-}
-
-static inline u32 vlynq_mem_start(struct vlynq_device *dev)
-{
- return dev->mem_start;
-}
-
-static inline u32 vlynq_mem_end(struct vlynq_device *dev)
-{
- return dev->mem_end;
-}
-
-static inline u32 vlynq_mem_len(struct vlynq_device *dev)
-{
- return dev->mem_end - dev->mem_start + 1;
-}
-
-static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
-{
- int irq = dev->irq_start + virq;
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq;
-}
-
-static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
-{
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq - dev->irq_start;
-}
-
-extern void vlynq_unregister_driver(struct vlynq_driver *driver);
-extern int vlynq_enable_device(struct vlynq_device *dev);
-extern void vlynq_disable_device(struct vlynq_device *dev);
-extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
-extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
-
-#endif /* __VLYNQ_H__ */
diff --git a/include/media/cec.h b/include/media/cec.h
index 9c007f83569a..53e4b2eb2b26 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -275,7 +275,7 @@ struct cec_adapter {
u32 sequence;
- char input_phys[32];
+ char input_phys[40];
};
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
index bdc654a45521..783bda6d5cc3 100644
--- a/include/media/ipu-bridge.h
+++ b/include/media/ipu-bridge.h
@@ -108,7 +108,7 @@ struct ipu_node_names {
char ivsc_sensor_port[7];
char ivsc_ipu_port[7];
char endpoint[11];
- char remote_port[7];
+ char remote_port[9];
char vcm[16];
};
diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h
index c3d8f12234b1..40fc0264250d 100644
--- a/include/media/mipi-csi2.h
+++ b/include/media/mipi-csi2.h
@@ -19,6 +19,7 @@
#define MIPI_CSI2_DT_NULL 0x10
#define MIPI_CSI2_DT_BLANKING 0x11
#define MIPI_CSI2_DT_EMBEDDED_8B 0x12
+#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */
#define MIPI_CSI2_DT_YUV420_8B 0x18
#define MIPI_CSI2_DT_YUV420_10B 0x19
#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index e0a13505f88d..d82dfdbf6e58 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -284,7 +284,7 @@ struct video_device {
struct v4l2_prio_state *prio;
/* device info */
- char name[32];
+ char name[64];
enum vfl_devnode_type vfl_type;
enum vfl_devnode_direction vfl_dir;
int minor;
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 8a8977a33ec1..f6f111fae33c 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -13,8 +13,6 @@
#include <media/v4l2-subdev.h>
#include <media/v4l2-dev.h>
-#define V4L2_DEVICE_NAME_SIZE (20 + 16)
-
struct v4l2_ctrl_handler;
/**
@@ -49,7 +47,7 @@ struct v4l2_device {
struct media_device *mdev;
struct list_head subdevs;
spinlock_t lock;
- char name[V4L2_DEVICE_NAME_SIZE];
+ char name[36];
void (*notify)(struct v4l2_subdev *sd,
unsigned int notification, void *arg);
struct v4l2_ctrl_handler *ctrl_handler;
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 4ffa914ade3a..3a0e2588361c 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -78,7 +78,7 @@ struct v4l2_subscribed_event {
unsigned int elems;
unsigned int first;
unsigned int in_use;
- struct v4l2_kevent events[];
+ struct v4l2_kevent events[] __counted_by(elems);
};
/**
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index b39586dfba35..ed0a44b6eada 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -143,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
* v4l2_pipeline_pm_get - Increase the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities on.
*
* This function is intended to be called in video node open. It uses
@@ -157,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity);
* v4l2_pipeline_pm_put - Decrease the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities off.
*
* This function is intended to be called in video node release. It uses
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d9fca929c10b..c1f90c1223a7 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -446,7 +446,9 @@ enum v4l2_subdev_pre_streamon_flags {
* @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the
* sub-device. Failure on stop will remove any resources acquired in
* streaming start, while the error code is still returned by the driver.
- * Also see call_s_stream wrapper in v4l2-subdev.c.
+ * The caller shall track the subdev state, and shall not start or stop an
+ * already started or stopped subdev. Also see call_s_stream wrapper in
+ * v4l2-subdev.c.
*
* @g_pixelaspect: callback to return the pixelaspect ratio.
*
@@ -822,8 +824,9 @@ struct v4l2_subdev_state {
* operation shall fail if the pad index it has been called on
* is not valid or in case of unrecoverable failures.
*
- * @set_routing: enable or disable data connection routes described in the
- * subdevice routing table.
+ * @set_routing: Enable or disable data connection routes described in the
+ * subdevice routing table. Subdevs that implement this operation
+ * must set the V4L2_SUBDEV_FL_STREAMS flag.
*
* @enable_streams: Enable the streams defined in streams_mask on the given
* source pad. Subdevs that implement this operation must use the active
@@ -948,8 +951,6 @@ struct v4l2_subdev_internal_ops {
void (*release)(struct v4l2_subdev *sd);
};
-#define V4L2_SUBDEV_NAME_SIZE 32
-
/* Set this flag if this subdev is a i2c device. */
#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
/* Set this flag if this subdev is a spi device. */
@@ -1059,7 +1060,7 @@ struct v4l2_subdev {
const struct v4l2_subdev_ops *ops;
const struct v4l2_subdev_internal_ops *internal_ops;
struct v4l2_ctrl_handler *ctrl_handler;
- char name[V4L2_SUBDEV_NAME_SIZE];
+ char name[52];
u32 grp_id;
void *dev_priv;
void *host_priv;
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
deleted file mode 100644
index 2e01b2e9a1c0..000000000000
--- a/include/media/videobuf-core.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#ifndef _VIDEOBUF_CORE_H
-#define _VIDEOBUF_CORE_H
-
-#include <linux/poll.h>
-#include <linux/videodev2.h>
-
-#define UNSET (-1U)
-
-
-struct videobuf_buffer;
-struct videobuf_queue;
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage video4linux buffers.
- *
- * struct videobuf_buffer holds the data structures used by the helper
- * functions, additionally some commonly used fields for v4l buffers
- * (width, height, lists, waitqueue) are in there. That struct should
- * be used as first element in the drivers buffer struct.
- *
- * about the mmap helpers (videobuf_mmap_*):
- *
- * The mmaper function allows to map any subset of contiguous buffers.
- * This includes one mmap() call for all buffers (which the original
- * video4linux API uses) as well as one mmap() for every single buffer
- * (which v4l2 uses).
- *
- * If there is a valid mapping for a buffer, buffer->baddr/bsize holds
- * userspace address + size which can be fed into the
- * videobuf_dma_init_user function listed above.
- *
- */
-
-struct videobuf_mapping {
- unsigned int count;
- struct videobuf_queue *q;
-};
-
-enum videobuf_state {
- VIDEOBUF_NEEDS_INIT = 0,
- VIDEOBUF_PREPARED = 1,
- VIDEOBUF_QUEUED = 2,
- VIDEOBUF_ACTIVE = 3,
- VIDEOBUF_DONE = 4,
- VIDEOBUF_ERROR = 5,
- VIDEOBUF_IDLE = 6,
-};
-
-struct videobuf_buffer {
- unsigned int i;
- u32 magic;
-
- /* info about the buffer */
- unsigned int width;
- unsigned int height;
- unsigned int bytesperline; /* use only if != 0 */
- unsigned long size;
- enum v4l2_field field;
- enum videobuf_state state;
- struct list_head stream; /* QBUF/DQBUF list */
-
- /* touched by irq handler */
- struct list_head queue;
- wait_queue_head_t done;
- unsigned int field_count;
- u64 ts;
-
- /* Memory type */
- enum v4l2_memory memory;
-
- /* buffer size */
- size_t bsize;
-
- /* buffer offset (mmap + overlay) */
- size_t boff;
-
- /* buffer addr (userland ptr!) */
- unsigned long baddr;
-
- /* for mmap'ed buffers */
- struct videobuf_mapping *map;
-
- /* Private pointer to allow specific methods to store their data */
- int privsize;
- void *priv;
-};
-
-struct videobuf_queue_ops {
- int (*buf_setup)(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size);
- int (*buf_prepare)(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field);
- void (*buf_queue)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
- void (*buf_release)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
-};
-
-#define MAGIC_QTYPE_OPS 0x12261003
-
-/* Helper operations - device type dependent */
-struct videobuf_qtype_ops {
- u32 magic;
-
- struct videobuf_buffer *(*alloc_vb)(size_t size);
- void *(*vaddr) (struct videobuf_buffer *buf);
- int (*iolock) (struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
- int (*sync) (struct videobuf_queue *q,
- struct videobuf_buffer *buf);
- int (*mmap_mapper) (struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma);
-};
-
-struct videobuf_queue {
- struct mutex vb_lock;
- struct mutex *ext_lock;
- spinlock_t *irqlock;
- struct device *dev;
-
- wait_queue_head_t wait; /* wait if queue is empty */
-
- enum v4l2_buf_type type;
- unsigned int msize;
- enum v4l2_field field;
- enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */
- struct videobuf_buffer *bufs[VIDEO_MAX_FRAME];
- const struct videobuf_queue_ops *ops;
- struct videobuf_qtype_ops *int_ops;
-
- unsigned int streaming:1;
- unsigned int reading:1;
-
- /* capture via mmap() + ioctl(QBUF/DQBUF) */
- struct list_head stream;
-
- /* capture via read() */
- unsigned int read_off;
- struct videobuf_buffer *read_buf;
-
- /* driver private data */
- void *priv_data;
-};
-
-static inline void videobuf_queue_lock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_lock(&q->vb_lock);
-}
-
-static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_unlock(&q->vb_lock);
-}
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
- int non_blocking, int intr);
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q);
-
-/* Used on videobuf-dvb */
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct videobuf_qtype_ops *int_ops,
- struct mutex *ext_lock);
-int videobuf_queue_is_busy(struct videobuf_queue *q);
-void videobuf_queue_cancel(struct videobuf_queue *q);
-
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q);
-int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req);
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b);
-int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking);
-int videobuf_streamon(struct videobuf_queue *q);
-int videobuf_streamoff(struct videobuf_queue *q);
-
-void videobuf_stop(struct videobuf_queue *q);
-
-int videobuf_read_start(struct videobuf_queue *q);
-void videobuf_read_stop(struct videobuf_queue *q);
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int vbihack, int nonblocking);
-ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int nonblocking);
-__poll_t videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int __videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int videobuf_mmap_free(struct videobuf_queue *q);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma);
-
-#endif
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
deleted file mode 100644
index 525883b2c53e..000000000000
--- a/include/media/videobuf-dma-contig.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- */
-#ifndef _VIDEOBUF_DMA_CONTIG_H
-#define _VIDEOBUF_DMA_CONTIG_H
-
-#include <linux/dma-mapping.h>
-#include <media/videobuf-core.h>
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-#endif /* _VIDEOBUF_DMA_CONTIG_H */
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
deleted file mode 100644
index 930ff8d454fc..000000000000
--- a/include/media/videobuf-dma-sg.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-#ifndef _VIDEOBUF_DMA_SG_H
-#define _VIDEOBUF_DMA_SG_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage buffers (both userland
- * and kernel) for DMA.
- *
- * videobuf_dma_init_*()
- * creates a buffer. The userland version takes a userspace
- * pointer + length. The kernel version just wants the size and
- * does memory allocation too using vmalloc_32().
- *
- * videobuf_dma_*()
- * see Documentation/core-api/dma-api-howto.rst, these functions to
- * basically the same. The map function does also build a
- * scatterlist for the buffer (and unmap frees it ...)
- *
- * videobuf_dma_free()
- * no comment ...
- *
- */
-
-struct videobuf_dmabuf {
- u32 magic;
-
- /* for userland buffer */
- int offset;
- size_t size;
- struct page **pages;
-
- /* for kernel buffers */
- void *vaddr;
- struct page **vaddr_pages;
- dma_addr_t *dma_addr;
- struct device *dev;
-
- /* for overlay buffers (pci-pci dma) */
- dma_addr_t bus_addr;
-
- /* common */
- struct scatterlist *sglist;
- int sglen;
- unsigned long nr_pages;
- int direction;
-};
-
-struct videobuf_dma_sg_memory {
- u32 magic;
-
- /* for mmap'ed buffers */
- struct videobuf_dmabuf dma;
-};
-
-/*
- * Scatter-gather DMA buffer API.
- *
- * These functions provide a simple way to create a page list and a
- * scatter-gather list from a kernel, userspace of physical address and map the
- * memory for DMA operation.
- *
- * Despite the name, this is totally unrelated to videobuf, except that
- * videobuf-dma-sg uses the same API internally.
- */
-int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-void *videobuf_sg_alloc(size_t size);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-#endif /* _VIDEOBUF_DMA_SG_H */
-
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
deleted file mode 100644
index e930dbb9d7f4..000000000000
--- a/include/media/videobuf-vmalloc.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for vmalloc capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-#ifndef _VIDEOBUF_VMALLOC_H
-#define _VIDEOBUF_VMALLOC_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_vmalloc_memory {
- u32 magic;
-
- void *vaddr;
-
- /* remap_vmalloc_range seems to need to run
- * after mmap() on some cases */
- struct vm_area_struct *vma;
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf);
-
-#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 7f0adda3bf2f..335bbc52171c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -40,8 +40,8 @@ struct flowi_common {
#define FLOWI_FLAG_KNOWN_NH 0x02
__u32 flowic_secid;
kuid_t flowic_uid;
- struct flowi_tunnel flowic_tun_key;
__u32 flowic_multipath_hash;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
index 078d3c52c03f..e5f2f0b73a9a 100644
--- a/include/net/netfilter/nf_conntrack_act_ct.h
+++ b/include/net/netfilter/nf_conntrack_act_ct.h
@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
#endif
}
-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
{
#if IS_ENABLED(CONFIG_NET_ACT_CT)
struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
return act_ct;
act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
return act_ct;
#else
return NULL;
#endif
}
-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
-#if IS_ENABLED(CONFIG_NET_ACT_CT)
- struct nf_conn_act_ct_ext *act_ct_ext;
-
- act_ct_ext = nf_conn_act_ct_ext_find(ct);
- if (dev_net(skb->dev) == &init_net && act_ct_ext)
- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
-#endif
-}
-
#endif /* _NF_CONNTRACK_ACT_CT_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index a375a171ef3c..b56be10838f0 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -124,7 +124,7 @@ struct tcp_ao_info {
#define tcp_hash_fail(msg, family, skb, fmt, ...) \
do { \
const struct tcphdr *th = tcp_hdr(skb); \
- char hdr_flags[5] = {}; \
+ char hdr_flags[6]; \
char *f = hdr_flags; \
\
if (th->fin) \
@@ -133,17 +133,18 @@ do { \
*f++ = 'S'; \
if (th->rst) \
*f++ = 'R'; \
+ if (th->psh) \
+ *f++ = 'P'; \
if (th->ack) \
- *f++ = 'A'; \
- if (f != hdr_flags) \
- *f = ' '; \
+ *f++ = '.'; \
+ *f = 0; \
if ((family) == AF_INET) { \
- net_info_ratelimited("%s for (%pI4, %d)->(%pI4, %d) %s" fmt "\n", \
+ net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
&ip_hdr(skb)->daddr, ntohs(th->dest), \
hdr_flags, ##__VA_ARGS__); \
} else { \
- net_info_ratelimited("%s for [%pI6c]:%u->[%pI6c]:%u %s" fmt "\n", \
+ net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
&ipv6_hdr(skb)->daddr, ntohs(th->dest), \
hdr_flags, ##__VA_ARGS__); \
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index a5ef84944a06..71ae37d3bedd 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -96,7 +96,6 @@ struct tegra_smmu_soc {
struct tegra_mc;
struct tegra_smmu;
-struct gart_device;
#ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -116,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
-#ifdef CONFIG_TEGRA_IOMMU_GART
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
-int tegra_gart_suspend(struct gart_device *gart);
-int tegra_gart_resume(struct gart_device *gart);
-#else
-static inline struct gart_device *
-tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline int tegra_gart_suspend(struct gart_device *gart)
-{
- return -ENODEV;
-}
-
-static inline int tegra_gart_resume(struct gart_device *gart)
-{
- return -ENODEV;
-}
-#endif
-
struct tegra_mc_reset {
const char *name;
unsigned long id;
@@ -185,8 +162,6 @@ struct tegra_mc_ops {
*/
int (*probe)(struct tegra_mc *mc);
void (*remove)(struct tegra_mc *mc);
- int (*suspend)(struct tegra_mc *mc);
- int (*resume)(struct tegra_mc *mc);
irqreturn_t (*handle_irq)(int irq, void *data);
int (*probe_device)(struct tegra_mc *mc, struct device *dev);
};
@@ -225,7 +200,6 @@ struct tegra_mc {
struct tegra_bpmp *bpmp;
struct device *dev;
struct tegra_smmu *smmu;
- struct gart_device *gart;
void __iomem *regs;
void __iomem *bcast_ch_regs;
void __iomem **ch_regs;
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
index fc248ef00e86..008a691c254b 100644
--- a/include/uapi/linux/counter.h
+++ b/include/uapi/linux/counter.h
@@ -38,7 +38,7 @@ enum counter_scope {
*
* For example, if the Count 2 ceiling extension of Counter device 4 is desired,
* set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
- * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by
+ * COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by
* the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
* sysfs attribute.
*/
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index 4c878d84dbda..3a93f17ca943 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2022/23 Siemens Mobility GmbH */
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 606b52e88ce3..3d1987e1bb2d 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -31,6 +31,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
+ IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index c79f2f046a0b..9c2ffdcd6623 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -47,6 +47,10 @@ enum iio_chan_type {
IIO_POSITIONRELATIVE,
IIO_PHASE,
IIO_MASSCONCENTRATION,
+ IIO_DELTA_ANGL,
+ IIO_DELTA_VELOCITY,
+ IIO_COLORTEMP,
+ IIO_CHROMATICITY,
};
enum iio_modifier {
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 81d09ef9aa50..25c8d7677539 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -31,6 +31,12 @@ struct landlock_ruleset_attr {
* this access right.
*/
__u64 handled_access_fs;
+ /**
+ * @handled_access_net: Bitmask of actions (cf. `Network flags`_)
+ * that is handled by this ruleset and should then be forbidden if no
+ * rule explicitly allow them.
+ */
+ __u64 handled_access_net;
};
/*
@@ -54,6 +60,11 @@ enum landlock_rule_type {
* landlock_path_beneath_attr .
*/
LANDLOCK_RULE_PATH_BENEATH = 1,
+ /**
+ * @LANDLOCK_RULE_NET_PORT: Type of a &struct
+ * landlock_net_port_attr .
+ */
+ LANDLOCK_RULE_NET_PORT,
};
/**
@@ -80,6 +91,31 @@ struct landlock_path_beneath_attr {
} __attribute__((packed));
/**
+ * struct landlock_net_port_attr - Network port definition
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+struct landlock_net_port_attr {
+ /**
+ * @allowed_access: Bitmask of allowed access network for a port
+ * (cf. `Network flags`_).
+ */
+ __u64 allowed_access;
+ /**
+ * @port: Network port in host endianness.
+ *
+ * It should be noted that port 0 passed to :manpage:`bind(2)` will
+ * bind to an available port from a specific port range. This can be
+ * configured thanks to the ``/proc/sys/net/ipv4/ip_local_port_range``
+ * sysctl (also used for IPv6). A Landlock rule with port 0 and the
+ * ``LANDLOCK_ACCESS_NET_BIND_TCP`` right means that requesting to bind
+ * on port 0 is allowed and it will automatically translate to binding
+ * on the related port range.
+ */
+ __u64 port;
+};
+
+/**
* DOC: fs_access
*
* A set of actions on kernel objects may be defined by an attribute (e.g.
@@ -189,4 +225,23 @@ struct landlock_path_beneath_attr {
#define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14)
/* clang-format on */
+/**
+ * DOC: net_access
+ *
+ * Network flags
+ * ~~~~~~~~~~~~~~~~
+ *
+ * These flags enable to restrict a sandboxed process to a set of network
+ * actions. This is supported since the Landlock ABI version 4.
+ *
+ * TCP sockets with allowed actions:
+ *
+ * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port.
+ * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to
+ * a remote port.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
+#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
+/* clang-format on */
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index a03c543cb072..f05f747e444d 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1025 */
+/* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,6 +46,7 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index 6e57743628c0..171c5cce3641 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -14,8 +14,8 @@
* FW Client (given by UUID). This opens a communication channel
* between a host client and a FW client. From this point every read and write
* will communicate with the associated FW client.
- * Only in close() (file_operation release()) the communication between
- * the clients is disconnected
+ * Only in close() (file_operation release()) is the communication between
+ * the clients disconnected.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
@@ -51,7 +51,7 @@ struct mei_connect_client_data {
* DOC: set and unset event notification for a connected client
*
* The IOCTL argument is 1 for enabling event notification and 0 for
- * disabling the service
+ * disabling the service.
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
@@ -59,8 +59,8 @@ struct mei_connect_client_data {
/**
* DOC: retrieve notification
*
- * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
- * the ioctl has to be called in order to acknowledge pending event
+ * The IOCTL output argument is 1 if an event was pending and 0 otherwise.
+ * The ioctl has to be called in order to acknowledge pending event.
*
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
@@ -98,14 +98,14 @@ struct mei_connect_client_data_vtag {
* FW Client (given by UUID), and virtual tag (vtag).
* The IOCTL opens a communication channel between a host client and
* a FW client on a tagged channel. From this point on, every read
- * and write will communicate with the associated FW client with
+ * and write will communicate with the associated FW client
* on the tagged channel.
* Upone close() the communication is terminated.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
*
- * The input parameter is UUID of the FW Client, a vtag [0,255]
+ * The input parameter is UUID of the FW Client, a vtag [0,255].
* The output parameter is the properties of the FW client
* (FW protocool version and max message size).
*
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
index c8ae72466ee6..3cd044edee5d 100644
--- a/include/uapi/linux/nfsd_netlink.h
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -3,8 +3,8 @@
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN uapi header */
-#ifndef _UAPI_LINUX_NFSD_H
-#define _UAPI_LINUX_NFSD_H
+#ifndef _UAPI_LINUX_NFSD_NETLINK_H
+#define _UAPI_LINUX_NFSD_NETLINK_H
#define NFSD_FAMILY_NAME "nfsd"
#define NFSD_FAMILY_VERSION 1
@@ -36,4 +36,4 @@ enum {
NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
};
-#endif /* _UAPI_LINUX_NFSD_H */
+#endif /* _UAPI_LINUX_NFSD_NETLINK_H */
diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h
new file mode 100644
index 000000000000..1d39f6f38c96
--- /dev/null
+++ b/include/uapi/linux/npcm-video.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Controls header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _UAPI_LINUX_NPCM_VIDEO_H
+#define _UAPI_LINUX_NPCM_VIDEO_H
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control
+ * details.
+ */
+
+/*
+ * This control is meant to set the mode of NPCM Video Capture/Differentiation
+ * (VCD) engine.
+ *
+ * The VCD engine supports two modes:
+ * COMPLETE - Capture the next complete frame into memory.
+ * DIFF - Compare the incoming frame with the frame stored in memory, and
+ * updates the differentiated frame in memory.
+ */
+#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0)
+
+enum v4l2_npcm_capture_mode {
+ V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */
+ V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */
+};
+
+/*
+ * This control is meant to get the count of compressed HEXTILE rectangles which
+ * is relevant to the number of differentiated frames if VCD is in DIFF mode.
+ * And the count will always be 1 if VCD is in COMPLETE mode.
+ */
+#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1)
+
+#endif /* _UAPI_LINUX_NPCM_VIDEO_H */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 1c9da485318f..b44ba7dcdefc 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -68,6 +68,7 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
+ SEV_RET_INVALID_KEY = 0x27,
SEV_RET_MAX,
} sev_ret_code;
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index add349889d0a..9c007a106330 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -1,22 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * linux/drivers/char/serial_core.h
- *
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _UAPILINUX_SERIAL_CORE_H
#define _UAPILINUX_SERIAL_CORE_H
@@ -27,6 +11,8 @@
* The type definitions. These are from Ted Ts'o's serial.h
* By historical reasons the values from 0 to 13 are defined
* in the include/uapi/linux/serial.h, do not define them here.
+ * Values 0 to 19 are used by setserial from busybox and must never
+ * be modified.
*/
#define PORT_NS16550A 14
#define PORT_XSCALE 15
@@ -245,4 +231,7 @@
/* Sunplus UART */
#define PORT_SUNPLUS 123
+/* Generic type identifier for ports which type is not important to userspace. */
+#define PORT_GENERIC (-1)
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 08b3527e1b93..9c987b04e2d0 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -49,6 +49,7 @@
#define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */
#define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */
#define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */
+#define UART_IIR_FIFO_ENABLED_16750 0xe0 /* 16750: 64 bytes FIFO enabled */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
index 2aa39112cf8d..154a87a1eca9 100644
--- a/include/uapi/linux/sev-guest.h
+++ b/include/uapi/linux/sev-guest.h
@@ -14,9 +14,11 @@
#include <linux/types.h>
+#define SNP_REPORT_USER_DATA_SIZE 64
+
struct snp_report_req {
/* user data that should be included in the report */
- __u8 user_data[64];
+ __u8 user_data[SNP_REPORT_USER_DATA_SIZE];
/* The vmpl level to be included in the report */
__u32 vmpl;
diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h
index c7d2199134d7..f0224a8dc858 100644
--- a/include/uapi/linux/usb/raw_gadget.h
+++ b/include/uapi/linux/usb/raw_gadget.h
@@ -44,6 +44,16 @@ enum usb_raw_event_type {
/* This event is queued when a new control request arrived to ep0. */
USB_RAW_EVENT_CONTROL = 2,
+ /*
+ * These events are queued when the gadget driver is suspended,
+ * resumed, reset, or disconnected. Note that some UDCs (e.g. dwc2)
+ * report a disconnect event instead of a reset.
+ */
+ USB_RAW_EVENT_SUSPEND = 3,
+ USB_RAW_EVENT_RESUME = 4,
+ USB_RAW_EVENT_RESET = 5,
+ USB_RAW_EVENT_DISCONNECT = 6,
+
/* The list might grow in the future. */
};
@@ -54,8 +64,8 @@ enum usb_raw_event_type {
* actual length of the fetched event data.
* @data: A buffer to store the fetched event data.
*
- * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT,
- * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL.
+ * The fetched event data buffer contains struct usb_ctrlrequest for
+ * USB_RAW_EVENT_CONTROL and is empty for other events.
*/
struct usb_raw_event {
__u32 type;
diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h
index 2984aae4a2b4..f74f3aedd49c 100644
--- a/include/uapi/linux/user_events.h
+++ b/include/uapi/linux/user_events.h
@@ -17,6 +17,15 @@
/* Create dynamic location entry within a 32-bit value */
#define DYN_LOC(offset, size) ((size) << 16 | (offset))
+/* List of supported registration flags */
+enum user_reg_flag {
+ /* Event will not delete upon last reference closing */
+ USER_EVENT_REG_PERSIST = 1U << 0,
+
+ /* This value or above is currently non-ABI */
+ USER_EVENT_REG_MAX = 1U << 1,
+};
+
/*
* Describes an event registration and stores the results of the registration.
* This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
@@ -33,7 +42,7 @@ struct user_reg {
/* Input: Enable size in bytes at address */
__u8 enable_size;
- /* Input: Flags for future use, set to 0 */
+ /* Input: Flags to use, if any */
__u16 flags;
/* Input: Address to update when enabled */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index c3604a0a3e30..68db66d4aae8 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -203,6 +203,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
+/*
+ * The base for Nuvoton NPCM driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index f5c48b61ab62..649560c685f1 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -219,4 +219,12 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 2d827d22cd99..d7656908f730 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range {
* DRIVER_OK
*/
#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 78260e5d9985..c3d4e490ce7c 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -804,6 +804,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
+#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 2c712c654165..8881aea60f6f 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -105,6 +105,11 @@
*/
#define VIRTIO_F_NOTIFICATION_DATA 38
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
/*
* This feature indicates that the driver can reset a queue individually.
*/
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index dcb179de4358..e1571603175e 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -248,6 +248,7 @@ enum {
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @padding: reserved for future, not used, has to be zeroed
* @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
@@ -293,7 +294,8 @@ struct ubi_attach_req {
__s32 vid_hdr_offset;
__s16 max_beb_per1024;
__s8 disable_fm;
- __s8 padding[9];
+ __s8 need_resv_pool;
+ __s8 padding[8];
};
/*
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index fea06810b43d..a1e4239c7d75 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -52,7 +52,7 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
@@ -65,7 +65,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
* multiple use.
*/
if (req->flags & REQ_F_PARTIAL_IO)
- return;
+ return false;
io_ring_submit_lock(ctx, issue_flags);
@@ -76,7 +76,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
req->buf_index = buf->bgid;
io_ring_submit_unlock(ctx, issue_flags);
- return;
+ return true;
}
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index d14345ef61fc..f2d615236b2c 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -53,11 +53,11 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
-static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
@@ -80,8 +80,10 @@ static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
} else {
req->buf_index = req->buf_list->bgid;
req->flags &= ~REQ_F_BUFFER_RING;
+ return true;
}
}
+ return false;
}
static inline bool io_do_buffer_select(struct io_kiocb *req)
@@ -91,12 +93,13 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
-static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
if (req->flags & REQ_F_BUFFER_SELECTED)
- io_kbuf_recycle_legacy(req, issue_flags);
+ return io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
- io_kbuf_recycle_ring(req);
+ return io_kbuf_recycle_ring(req);
+ return false;
}
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
diff --git a/io_uring/net.c b/io_uring/net.c
index 7a8e298af81b..75d494dad7e2 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- if (connect->in_progress) {
- struct socket *socket;
-
- ret = -ENOTSOCK;
- socket = sock_from_file(req->file);
- if (socket)
- ret = sock_error(socket->sk);
- goto out;
- }
-
if (req_has_async_data(req)) {
io = req->async_data;
} else {
@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
&& force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
- return -EAGAIN;
- }
- if (ret == -ECONNABORTED) {
+ } else if (ret == -ECONNABORTED) {
if (connect->seen_econnaborted)
goto out;
connect->seen_econnaborted = true;
@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
+ if (connect->in_progress) {
+ /*
+ * At least bluetooth will return -EBADFD on a re-connect
+ * attempt, and it's (supposedly) also valid to get -EISCONN
+ * which means the previous result is good. For both of these,
+ * grab the sock_error() and use that for the completion.
+ */
+ if (ret == -EBADFD || ret == -EISCONN)
+ ret = sock_error(sock_from_file(req->file)->sk);
+ }
if (ret == -ERESTARTSYS)
ret = -EINTR;
out:
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 25a3515a177c..799db44283c7 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -66,7 +66,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rwv,
.issue = io_read,
},
[IORING_OP_WRITEV] = {
@@ -80,7 +80,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rwv,
.issue = io_write,
},
[IORING_OP_FSYNC] = {
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_read,
},
[IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_write,
},
[IORING_OP_POLL_ADD] = {
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 3398e1d944c2..64390d4e20c1 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
- }
-
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
ret = ioprio_check_cap(ioprio);
@@ -110,16 +98,42 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags);
+ return 0;
+}
+
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ int ret;
+
+ ret = io_prep_rw(req, sqe);
+ if (unlikely(ret))
+ return ret;
- /* Have to do this validation here, as this is in io_read() rw->len might
- * have chanaged due to buffer selection
+ /*
+ * Have to do this validation here, as this is in io_read() rw->len
+ * might have chanaged due to buffer selection
*/
- if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
- ret = io_iov_buffer_select_prep(req);
- if (ret)
- return ret;
- }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return io_iov_buffer_select_prep(req);
+
+ return 0;
+}
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+ int ret;
+
+ ret = io_prep_rw(req, sqe);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
return 0;
}
@@ -129,12 +143,20 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
int ret;
+ /* must be used with provided buffers */
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+
ret = io_prep_rw(req, sqe);
if (unlikely(ret))
return ret;
+ if (rw->addr || rw->len)
+ return -EINVAL;
+
req->flags |= REQ_F_APOLL_MULTISHOT;
return 0;
}
@@ -542,6 +564,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
{
if (!force && !io_cold_defs[req->opcode].prep_async)
return 0;
+ /* opcode type doesn't need async data */
+ if (!io_cold_defs[req->opcode].async_size)
+ return 0;
if (!req_has_async_data(req)) {
struct io_async_rw *iorw;
@@ -887,6 +912,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned int cflags = 0;
int ret;
@@ -903,7 +929,12 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* handling arm it.
*/
if (ret == -EAGAIN) {
- io_kbuf_recycle(req, issue_flags);
+ /*
+ * Reset rw->len to 0 again to avoid clamping future mshot
+ * reads, in case the buffer size varies.
+ */
+ if (io_kbuf_recycle(req, issue_flags))
+ rw->len = 0;
return -EAGAIN;
}
@@ -916,6 +947,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* jump to the termination path. This request is then done.
*/
cflags = io_put_kbuf(req, issue_flags);
+ rw->len = 0; /* similarly to above, reset len to 0 */
if (io_fill_cqe_req_aux(req,
issue_flags & IO_URING_F_COMPLETE_DEFER,
diff --git a/io_uring/rw.h b/io_uring/rw.h
index c5aed03d42a4..f9e89b4fe4da 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -16,6 +16,8 @@ struct io_async_rw {
};
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_read(struct io_kiocb *req, unsigned int issue_flags);
int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 833faa04461b..0fae79164187 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -782,9 +782,7 @@ struct bpf_iter_num_kern {
int end; /* final value, exclusive */
} __aligned(8);
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
{
@@ -843,4 +841,4 @@ __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
s->cur = s->end = 0;
}
-__diag_pop();
+__bpf_kfunc_end_defs();
diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
index 209e5135f9fb..f04a468cf6a7 100644
--- a/kernel/bpf/cgroup_iter.c
+++ b/kernel/bpf/cgroup_iter.c
@@ -282,7 +282,7 @@ static struct bpf_iter_reg bpf_cgroup_reg_info = {
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__cgroup, cgroup),
- PTR_TO_BTF_ID_OR_NULL },
+ PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
},
.seq_info = &cgroup_iter_seq_info,
};
@@ -305,9 +305,7 @@ struct bpf_iter_css_kern {
unsigned int flags;
} __attribute__((aligned(8)));
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it,
struct cgroup_subsys_state *start, unsigned int flags)
@@ -358,4 +356,4 @@ __bpf_kfunc void bpf_iter_css_destroy(struct bpf_iter_css *it)
{
}
-__diag_pop(); \ No newline at end of file
+__bpf_kfunc_end_defs();
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 6983af8e093c..e01c741e54e7 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -34,9 +34,7 @@ static bool cpu_valid(u32 cpu)
return cpu < nr_cpu_ids;
}
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global kfuncs as their definitions will be in BTF");
+__bpf_kfunc_start_defs();
/**
* bpf_cpumask_create() - Create a mutable BPF cpumask.
@@ -407,7 +405,7 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
return cpumask_any_and_distribute(src1, src2);
}
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(cpumask_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index e46ac288a108..56b0c1f678ee 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1177,13 +1177,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
ret = -EBUSY;
goto out;
}
- if (!atomic64_read(&map->usercnt)) {
- /* maps with timers must be either held by user space
- * or pinned in bpffs.
- */
- ret = -EPERM;
- goto out;
- }
/* allocate hrtimer via map_kmalloc to use memcg accounting */
t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
if (!t) {
@@ -1196,7 +1189,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
rcu_assign_pointer(t->callback_fn, NULL);
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
t->timer.function = bpf_timer_cb;
- timer->timer = t;
+ WRITE_ONCE(timer->timer, t);
+ /* Guarantee the order between timer->timer and map->usercnt. So
+ * when there are concurrent uref release and bpf timer init, either
+ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
+ * timer or atomic64_read() below returns a zero usercnt.
+ */
+ smp_mb();
+ if (!atomic64_read(&map->usercnt)) {
+ /* maps with timers must be either held by user space
+ * or pinned in bpffs.
+ */
+ WRITE_ONCE(timer->timer, NULL);
+ kfree(t);
+ ret = -EPERM;
+ }
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
return ret;
@@ -1374,7 +1381,7 @@ void bpf_timer_cancel_and_free(void *val)
/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
* this timer, since it won't be initialized.
*/
- timer->timer = NULL;
+ WRITE_ONCE(timer->timer, NULL);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
if (!t)
@@ -1886,9 +1893,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
}
}
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{
@@ -2505,7 +2510,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
WARN(1, "A call to BPF exception callback should never return\n");
}
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(generic_btf_ids)
#ifdef CONFIG_KEXEC_CORE
@@ -2564,15 +2569,17 @@ BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
+#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
-BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
-BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
-BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
+#endif
+BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_dynptr_adjust)
BTF_ID_FLAGS(func, bpf_dynptr_is_null)
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index 6fc9dae9edc8..6abd7c5df4b3 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -193,9 +193,7 @@ static int __init bpf_map_iter_init(void)
late_initcall(bpf_map_iter_init);
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
{
@@ -213,7 +211,7 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
return ret;
}
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(bpf_map_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 654601dd6b49..26082b97894d 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -704,7 +704,7 @@ static struct bpf_iter_reg task_reg_info = {
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__task, task),
- PTR_TO_BTF_ID_OR_NULL },
+ PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
},
.seq_info = &task_seq_info,
.fill_link_info = bpf_iter_fill_link_info,
@@ -822,9 +822,7 @@ struct bpf_iter_task_vma_kern {
struct bpf_iter_task_vma_kern_data *data;
} __attribute__((aligned(8)));
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
struct task_struct *task, u64 addr)
@@ -890,7 +888,9 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
}
}
-__diag_pop();
+__bpf_kfunc_end_defs();
+
+#ifdef CONFIG_CGROUPS
struct bpf_iter_css_task {
__u64 __opaque[1];
@@ -900,9 +900,7 @@ struct bpf_iter_css_task_kern {
struct css_task_iter *css_it;
} __attribute__((aligned(8)));
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
struct cgroup_subsys_state *css, unsigned int flags)
@@ -948,7 +946,9 @@ __bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it)
bpf_mem_free(&bpf_global_ma, kit->css_it);
}
-__diag_pop();
+__bpf_kfunc_end_defs();
+
+#endif /* CONFIG_CGROUPS */
struct bpf_iter_task {
__u64 __opaque[3];
@@ -969,9 +969,7 @@ enum {
BPF_TASK_ITER_PROC_THREADS
};
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
struct task_struct *task__nullable, unsigned int flags)
@@ -1041,7 +1039,7 @@ __bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it)
{
}
-__diag_pop();
+__bpf_kfunc_end_defs();
DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 857d76694517..bd1c42eb540f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3742,7 +3742,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
if (class == BPF_ALU || class == BPF_ALU64) {
if (!bt_is_reg_set(bt, dreg))
return 0;
- if (opcode == BPF_MOV) {
+ if (opcode == BPF_END || opcode == BPF_NEG) {
+ /* sreg is reserved and unused
+ * dreg still need precision before this insn
+ */
+ return 0;
+ } else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
/* dreg = sreg or dreg = (s8, s16, s32)sreg
* dreg needs precision after this insn
@@ -4674,7 +4679,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
insn->imm != 0 && env->bpf_capable) {
struct bpf_reg_state fake_reg = {};
- __mark_reg_known(&fake_reg, (u32)insn->imm);
+ __mark_reg_known(&fake_reg, insn->imm);
fake_reg.type = SCALAR_VALUE;
save_register_state(state, spi, &fake_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
@@ -5388,7 +5393,9 @@ static bool in_rcu_cs(struct bpf_verifier_env *env)
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
BTF_SET_START(rcu_protected_types)
BTF_ID(struct, prog_test_ref_kfunc)
+#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
+#endif
BTF_ID(struct, bpf_cpumask)
BTF_ID(struct, task_struct)
BTF_SET_END(rcu_protected_types)
@@ -10835,7 +10842,9 @@ BTF_ID(func, bpf_dynptr_clone)
BTF_ID(func, bpf_percpu_obj_new_impl)
BTF_ID(func, bpf_percpu_obj_drop_impl)
BTF_ID(func, bpf_throw)
+#ifdef CONFIG_CGROUPS
BTF_ID(func, bpf_iter_css_task_new)
+#endif
BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list)
@@ -10861,7 +10870,11 @@ BTF_ID(func, bpf_dynptr_clone)
BTF_ID(func, bpf_percpu_obj_new_impl)
BTF_ID(func, bpf_percpu_obj_drop_impl)
BTF_ID(func, bpf_throw)
+#ifdef CONFIG_CGROUPS
BTF_ID(func, bpf_iter_css_task_new)
+#else
+BTF_ID_UNUSED
+#endif
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -11394,6 +11407,12 @@ static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
&meta->arg_rbtree_root.field);
}
+/*
+ * css_task iter allowlist is needed to avoid dead locking on css_set_lock.
+ * LSM hooks and iters (both sleepable and non-sleepable) are safe.
+ * Any sleepable progs are also safe since bpf_check_attach_target() enforce
+ * them can only be attached to some specific hook points.
+ */
static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
@@ -11401,10 +11420,12 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
switch (prog_type) {
case BPF_PROG_TYPE_LSM:
return true;
- case BPF_TRACE_ITER:
- return env->prog->aux->sleepable;
+ case BPF_PROG_TYPE_TRACING:
+ if (env->prog->expected_attach_type == BPF_TRACE_ITER)
+ return true;
+ fallthrough;
default:
- return false;
+ return env->prog->aux->sleepable;
}
}
@@ -11663,7 +11684,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_ITER:
if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) {
if (!check_css_task_iter_allowlist(env)) {
- verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n");
+ verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n");
return -EINVAL;
}
}
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d80d7a608141..c0adb7254b45 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -156,19 +156,16 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* optimize away the callsite. Therefore, __weak is needed to ensure that the
* call is still emitted, by telling the compiler that we don't know what the
* function might eventually be.
- *
- * __diag_* below are needed to dismiss the missing prototype warning.
*/
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "kfuncs which will be used in BPF programs");
+
+__bpf_hook_start();
__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
struct cgroup *parent, int cpu)
{
}
-__diag_pop();
+__bpf_hook_end();
/* see cgroup_rstat_flush() */
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 621037a0aa87..ce1bb2301c06 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
if (panic_timeout)
return;
+ debug_locks_off();
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
if (dbg_kdb_mode)
kdb_printf("PANIC: %s\n", msg);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 438b868cbfa9..6b213c8252d6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -272,11 +272,10 @@ char *kdbgetenv(const char *match)
* kdballocenv - This function is used to allocate bytes for
* environment entries.
* Parameters:
- * match A character string representing a numeric value
- * Outputs:
- * *value the unsigned long representation of the env variable 'match'
+ * bytes The number of bytes to allocate in the static buffer.
* Returns:
- * Zero on success, a kdb diagnostic on failure.
+ * A pointer to the allocated space in the buffer on success.
+ * NULL if bytes > size available in the envbuffer.
* Remarks:
* We use a static environment buffer (envbuffer) to hold the values
* of dynamically generated environment variables (see kdb_set). Buffer
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index ed3056eb20b8..73c95815789a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask)
return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
+/*
+ * To check whether all ram resource ranges are covered by dma range map
+ * Returns 0 when further check is needed
+ * Returns 1 if there is some RAM range can't be covered by dma_range_map
+ */
+static int check_ram_in_range_map(unsigned long start_pfn,
+ unsigned long nr_pages, void *data)
+{
+ unsigned long end_pfn = start_pfn + nr_pages;
+ const struct bus_dma_region *bdr = NULL;
+ const struct bus_dma_region *m;
+ struct device *dev = data;
+
+ while (start_pfn < end_pfn) {
+ for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
+ unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
+
+ if (start_pfn >= cpu_start_pfn &&
+ start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
+ bdr = m;
+ break;
+ }
+ }
+ if (!bdr)
+ return 1;
+
+ start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
+ }
+
+ return 0;
+}
+
+bool dma_direct_all_ram_mapped(struct device *dev)
+{
+ if (!dev->dma_range_map)
+ return true;
+ return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
+ check_ram_in_range_map);
+}
+
size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 97ec892ea0b5..18d346118fe8 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -20,6 +20,7 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
+bool dma_direct_all_ram_mapped(struct device *dev);
size_t dma_direct_max_mapping_size(struct device *dev);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index e323ca48f7f2..58db8fd70471 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -793,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_set_coherent_mask);
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+bool dma_addressing_limited(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
+ dma_get_required_mask(dev))
+ return true;
+
+ if (unlikely(ops))
+ return false;
+ return !dma_direct_all_ram_mapped(dev);
+}
+EXPORT_SYMBOL_GPL(dma_addressing_limited);
+
size_t dma_max_mapping_size(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 26202274784f..33d942615be5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
}
for (i = 0; i < mem->nslabs; i++) {
- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
+ mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
+ mem->nslabs - i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
@@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
* alloc_dma_pages() - allocate pages to be used for DMA
* @gfp: GFP flags for the allocation.
* @bytes: Size of the buffer.
+ * @phys_limit: Maximum allowed physical address of the buffer.
*
* Allocate pages from the buddy allocator. If successful, make the allocated
* pages decrypted that they can be used for DMA.
*
- * Return: Decrypted pages, or %NULL on failure.
+ * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
+ * if the allocated physical address was above @phys_limit.
*/
-static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
+static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
{
unsigned int order = get_order(bytes);
struct page *page;
+ phys_addr_t paddr;
void *vaddr;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
- vaddr = page_address(page);
+ paddr = page_to_phys(page);
+ if (paddr + bytes - 1 > phys_limit) {
+ __free_pages(page, order);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ vaddr = phys_to_virt(paddr);
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
goto error;
return page;
error:
- __free_pages(page, order);
+ /* Intentional leak if pages cannot be encrypted again. */
+ if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
+ __free_pages(page, order);
return NULL;
}
@@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
- while ((page = alloc_dma_pages(gfp, bytes)) &&
- page_to_phys(page) + bytes - 1 > phys_limit) {
- /* allocated, but too high */
- __free_pages(page, get_order(bytes));
-
+ while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (__GFP_DMA32 | __GFP_DMA)))
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 61328328c474..ecbc9b6aba3a 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
* symbols are exported and normal relas can be used instead.
*/
if (!sec_vmlinux && sym_vmlinux) {
- pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
+ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
sym_name);
return -EINVAL;
}
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index f5b388e810b9..39a2b61c7232 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y = printk.o
-obj-$(CONFIG_PRINTK) += printk_safe.o
+obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
obj-$(CONFIG_PRINTK_INDEX) += index.o
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 7d4979d5c3ce..6c2afee5ef62 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -3,6 +3,8 @@
* internal.h - printk internal definitions
*/
#include <linux/percpu.h>
+#include <linux/console.h>
+#include "printk_ringbuffer.h"
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
void __init printk_sysctl_init(void);
@@ -12,6 +14,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
#define printk_sysctl_init() do { } while (0)
#endif
+#define con_printk(lvl, con, fmt, ...) \
+ printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
+ (con->flags & CON_NBCON) ? "" : "legacy ", \
+ (con->flags & CON_BOOT) ? "boot" : "", \
+ con->name, con->index, ##__VA_ARGS__)
+
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
@@ -35,6 +43,8 @@ enum printk_info_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
+extern struct printk_ringbuffer *prb;
+
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
@@ -61,6 +71,13 @@ void defer_console_output(void);
u16 printk_parse_prefix(const char *text, int *level,
enum printk_info_flags *flags);
+
+u64 nbcon_seq_read(struct console *con);
+void nbcon_seq_force(struct console *con, u64 seq);
+bool nbcon_alloc(struct console *con);
+void nbcon_init(struct console *con);
+void nbcon_free(struct console *con);
+
#else
#define PRINTK_PREFIX_MAX 0
@@ -76,8 +93,16 @@ u16 printk_parse_prefix(const char *text, int *level,
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
static inline bool printk_percpu_data_ready(void) { return false; }
+static inline u64 nbcon_seq_read(struct console *con) { return 0; }
+static inline void nbcon_seq_force(struct console *con, u64 seq) { }
+static inline bool nbcon_alloc(struct console *con) { return false; }
+static inline void nbcon_init(struct console *con) { }
+static inline void nbcon_free(struct console *con) { }
+
#endif /* CONFIG_PRINTK */
+extern struct printk_buffers printk_shared_pbufs;
+
/**
* struct printk_buffers - Buffers to read/format/output printk messages.
* @outbuf: After formatting, contains text to output.
@@ -105,3 +130,9 @@ struct printk_message {
};
bool other_cpu_in_panic(void);
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_supress);
+
+#ifdef CONFIG_PRINTK
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
+#endif
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
new file mode 100644
index 000000000000..b96077152f49
--- /dev/null
+++ b/kernel/printk/nbcon.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH, John Ogness
+// Copyright (C) 2022 Intel, Thomas Gleixner
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "internal.h"
+/*
+ * Printk console printing implementation for consoles which does not depend
+ * on the legacy style console_lock mechanism.
+ *
+ * The state of the console is maintained in the "nbcon_state" atomic
+ * variable.
+ *
+ * The console is locked when:
+ *
+ * - The 'prio' field contains the priority of the context that owns the
+ * console. Only higher priority contexts are allowed to take over the
+ * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
+ *
+ * - The 'cpu' field denotes on which CPU the console is locked. It is used
+ * to prevent busy waiting on the same CPU. Also it informs the lock owner
+ * that it has lost the lock in a more complex scenario when the lock was
+ * taken over by a higher priority context, released, and taken on another
+ * CPU with the same priority as the interrupted owner.
+ *
+ * The acquire mechanism uses a few more fields:
+ *
+ * - The 'req_prio' field is used by the handover approach to make the
+ * current owner aware that there is a context with a higher priority
+ * waiting for the friendly handover.
+ *
+ * - The 'unsafe' field allows to take over the console in a safe way in the
+ * middle of emitting a message. The field is set only when accessing some
+ * shared resources or when the console device is manipulated. It can be
+ * cleared, for example, after emitting one character when the console
+ * device is in a consistent state.
+ *
+ * - The 'unsafe_takeover' field is set when a hostile takeover took the
+ * console in an unsafe state. The console will stay in the unsafe state
+ * until re-initialized.
+ *
+ * The acquire mechanism uses three approaches:
+ *
+ * 1) Direct acquire when the console is not owned or is owned by a lower
+ * priority context and is in a safe state.
+ *
+ * 2) Friendly handover mechanism uses a request/grant handshake. It is used
+ * when the current owner has lower priority and the console is in an
+ * unsafe state.
+ *
+ * The requesting context:
+ *
+ * a) Sets its priority into the 'req_prio' field.
+ *
+ * b) Waits (with a timeout) for the owning context to unlock the
+ * console.
+ *
+ * c) Takes the lock and clears the 'req_prio' field.
+ *
+ * The owning context:
+ *
+ * a) Observes the 'req_prio' field set on exit from the unsafe
+ * console state.
+ *
+ * b) Gives up console ownership by clearing the 'prio' field.
+ *
+ * 3) Unsafe hostile takeover allows to take over the lock even when the
+ * console is an unsafe state. It is used only in panic() by the final
+ * attempt to flush consoles in a try and hope mode.
+ *
+ * Note that separate record buffers are used in panic(). As a result,
+ * the messages can be read and formatted without any risk even after
+ * using the hostile takeover in unsafe state.
+ *
+ * The release function simply clears the 'prio' field.
+ *
+ * All operations on @console::nbcon_state are atomic cmpxchg based to
+ * handle concurrency.
+ *
+ * The acquire/release functions implement only minimal policies:
+ *
+ * - Preference for higher priority contexts.
+ * - Protection of the panic CPU.
+ *
+ * All other policy decisions must be made at the call sites:
+ *
+ * - What is marked as an unsafe section.
+ * - Whether to spin-wait if there is already an owner and the console is
+ * in an unsafe state.
+ * - Whether to attempt an unsafe hostile takeover.
+ *
+ * The design allows to implement the well known:
+ *
+ * acquire()
+ * output_one_printk_record()
+ * release()
+ *
+ * The output of one printk record might be interrupted with a higher priority
+ * context. The new owner is supposed to reprint the entire interrupted record
+ * from scratch.
+ */
+
+/**
+ * nbcon_state_set - Helper function to set the console state
+ * @con: Console to update
+ * @new: The new state to write
+ *
+ * Only to be used when the console is not yet or no longer visible in the
+ * system. Otherwise use nbcon_state_try_cmpxchg().
+ */
+static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
+{
+ atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
+}
+
+/**
+ * nbcon_state_read - Helper function to read the console state
+ * @con: Console to read
+ * @state: The state to store the result
+ */
+static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
+{
+ state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
+}
+
+/**
+ * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
+ * @con: Console to update
+ * @cur: Old/expected state
+ * @new: New state
+ *
+ * Return: True on success. False on fail and @cur is updated.
+ */
+static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
+ struct nbcon_state *new)
+{
+ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
+}
+
+#ifdef CONFIG_64BIT
+
+#define __seq_to_nbcon_seq(seq) (seq)
+#define __nbcon_seq_to_seq(seq) (seq)
+
+#else /* CONFIG_64BIT */
+
+#define __seq_to_nbcon_seq(seq) ((u32)seq)
+
+static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq)
+{
+ u64 seq;
+ u64 rb_next_seq;
+
+ /*
+ * The provided sequence is only the lower 32 bits of the ringbuffer
+ * sequence. It needs to be expanded to 64bit. Get the next sequence
+ * number from the ringbuffer and fold it.
+ *
+ * Having a 32bit representation in the console is sufficient.
+ * If a console ever gets more than 2^31 records behind
+ * the ringbuffer then this is the least of the problems.
+ *
+ * Also the access to the ring buffer is always safe.
+ */
+ rb_next_seq = prb_next_seq(prb);
+ seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq);
+
+ return seq;
+}
+
+#endif /* CONFIG_64BIT */
+
+/**
+ * nbcon_seq_read - Read the current console sequence
+ * @con: Console to read the sequence of
+ *
+ * Return: Sequence number of the next record to print on @con.
+ */
+u64 nbcon_seq_read(struct console *con)
+{
+ unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
+
+ return __nbcon_seq_to_seq(nbcon_seq);
+}
+
+/**
+ * nbcon_seq_force - Force console sequence to a specific value
+ * @con: Console to work on
+ * @seq: Sequence number value to set
+ *
+ * Only to be used during init (before registration) or in extreme situations
+ * (such as panic with CONSOLE_REPLAY_ALL).
+ */
+void nbcon_seq_force(struct console *con, u64 seq)
+{
+ /*
+ * If the specified record no longer exists, the oldest available record
+ * is chosen. This is especially important on 32bit systems because only
+ * the lower 32 bits of the sequence number are stored. The upper 32 bits
+ * are derived from the sequence numbers available in the ringbuffer.
+ */
+ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
+
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq));
+
+ /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
+ con->seq = 0;
+}
+
+/**
+ * nbcon_seq_try_update - Try to update the console sequence number
+ * @ctxt: Pointer to an acquire context that contains
+ * all information about the acquire mode
+ * @new_seq: The new sequence number to set
+ *
+ * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
+ * the 64bit value). This could be a different value than @new_seq if
+ * nbcon_seq_force() was used or the current context no longer owns the
+ * console. In the later case, it will stop printing anyway.
+ */
+static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+{
+ unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
+ struct console *con = ctxt->console;
+
+ if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
+ __seq_to_nbcon_seq(new_seq))) {
+ ctxt->seq = new_seq;
+ } else {
+ ctxt->seq = nbcon_seq_read(con);
+ }
+}
+
+/**
+ * nbcon_context_try_acquire_direct - Try to acquire directly
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * Acquire the console when it is released. Also acquire the console when
+ * the current owner has a lower priority and the console is in a safe state.
+ *
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
+ * is updated to the latest state when failed to modify it.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU.
+ * Or the current owner or waiter has the same or higher
+ * priority. No acquire method can be successful in
+ * this case.
+ *
+ * -EBUSY: The current owner has a lower priority but the console
+ * in an unsafe state. The caller should try using
+ * the handover acquire method.
+ */
+static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ do {
+ if (other_cpu_in_panic())
+ return -EPERM;
+
+ if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
+ return -EPERM;
+
+ if (cur->unsafe)
+ return -EBUSY;
+
+ /*
+ * The console should never be safe for a direct acquire
+ * if an unsafe hostile takeover has ever happened.
+ */
+ WARN_ON_ONCE(cur->unsafe_takeover);
+
+ new.atom = cur->atom;
+ new.prio = ctxt->prio;
+ new.req_prio = NBCON_PRIO_NONE;
+ new.unsafe = cur->unsafe_takeover;
+ new.cpu = cpu;
+
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
+
+ return 0;
+}
+
+static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
+{
+ /*
+ * The request context is well defined by the @req_prio because:
+ *
+ * - Only a context with a higher priority can take over the request.
+ * - There are only three priorities.
+ * - Only one CPU is allowed to request PANIC priority.
+ * - Lower priorities are ignored during panic() until reboot.
+ *
+ * As a result, the following scenario is *not* possible:
+ *
+ * 1. Another context with a higher priority directly takes ownership.
+ * 2. The higher priority context releases the ownership.
+ * 3. A lower priority context takes the ownership.
+ * 4. Another context with the same priority as this context
+ * creates a request and starts waiting.
+ */
+
+ return (cur->req_prio == expected_prio);
+}
+
+/**
+ * nbcon_context_try_acquire_requested - Try to acquire after having
+ * requested a handover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * This is a helper function for nbcon_context_try_acquire_handover().
+ * It is called when the console is in an unsafe state. The current
+ * owner will release the console on exit from the unsafe region.
+ *
+ * Return: 0 on success and @cur is updated to the new console state.
+ * Otherwise an error code on failure.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU
+ * or this context is no longer the waiter.
+ *
+ * -EBUSY: The console is still locked. The caller should
+ * continue waiting.
+ *
+ * Note: The caller must still remove the request when an error has occurred
+ * except when this context is no longer the waiter.
+ */
+static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ /* Note that the caller must still remove the request! */
+ if (other_cpu_in_panic())
+ return -EPERM;
+
+ /*
+ * Note that the waiter will also change if there was an unsafe
+ * hostile takeover.
+ */
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
+ return -EPERM;
+
+ /* If still locked, caller should continue waiting. */
+ if (cur->prio != NBCON_PRIO_NONE)
+ return -EBUSY;
+
+ /*
+ * The previous owner should have never released ownership
+ * in an unsafe region.
+ */
+ WARN_ON_ONCE(cur->unsafe);
+
+ new.atom = cur->atom;
+ new.prio = ctxt->prio;
+ new.req_prio = NBCON_PRIO_NONE;
+ new.unsafe = cur->unsafe_takeover;
+ new.cpu = cpu;
+
+ if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
+ /*
+ * The acquire could fail only when it has been taken
+ * over by a higher priority context.
+ */
+ WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
+ return -EPERM;
+ }
+
+ /* Handover success. This context now owns the console. */
+ return 0;
+}
+
+/**
+ * nbcon_context_try_acquire_handover - Try to acquire via handover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * The function must be called only when the context has higher priority
+ * than the current owner and the console is in an unsafe state.
+ * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
+ *
+ * The function sets "req_prio" field to make the current owner aware of
+ * the request. Then it waits until the current owner releases the console,
+ * or an even higher context takes over the request, or timeout expires.
+ *
+ * The current owner checks the "req_prio" field on exit from the unsafe
+ * region and releases the console. It does not touch the "req_prio" field
+ * so that the console stays reserved for the waiter.
+ *
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
+ * is updated to the latest state when failed to modify it.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU.
+ * Or a higher priority context has taken over the
+ * console or the handover request.
+ *
+ * -EBUSY: The current owner is on the same CPU so that the hand
+ * shake could not work. Or the current owner is not
+ * willing to wait (zero timeout). Or the console does
+ * not enter the safe state before timeout passed. The
+ * caller might still use the unsafe hostile takeover
+ * when allowed.
+ *
+ * -EAGAIN: @cur has changed when creating the handover request.
+ * The caller should retry with direct acquire.
+ */
+static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+ int timeout;
+ int request_err = -EBUSY;
+
+ /*
+ * Check that the handover is called when the direct acquire failed
+ * with -EBUSY.
+ */
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
+ WARN_ON_ONCE(!cur->unsafe);
+
+ /* Handover is not possible on the same CPU. */
+ if (cur->cpu == cpu)
+ return -EBUSY;
+
+ /*
+ * Console stays unsafe after an unsafe takeover until re-initialized.
+ * Waiting is not going to help in this case.
+ */
+ if (cur->unsafe_takeover)
+ return -EBUSY;
+
+ /* Is the caller willing to wait? */
+ if (ctxt->spinwait_max_us == 0)
+ return -EBUSY;
+
+ /*
+ * Setup a request for the handover. The caller should try to acquire
+ * the console directly when the current state has been modified.
+ */
+ new.atom = cur->atom;
+ new.req_prio = ctxt->prio;
+ if (!nbcon_state_try_cmpxchg(con, cur, &new))
+ return -EAGAIN;
+
+ cur->atom = new.atom;
+
+ /* Wait until there is no owner and then acquire the console. */
+ for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
+ /* On successful acquire, this request is cleared. */
+ request_err = nbcon_context_try_acquire_requested(ctxt, cur);
+ if (!request_err)
+ return 0;
+
+ /*
+ * If the acquire should be aborted, it must be ensured
+ * that the request is removed before returning to caller.
+ */
+ if (request_err == -EPERM)
+ break;
+
+ udelay(1);
+
+ /* Re-read the state because some time has passed. */
+ nbcon_state_read(con, cur);
+ }
+
+ /* Timed out or aborted. Carefully remove handover request. */
+ do {
+ /*
+ * No need to remove request if there is a new waiter. This
+ * can only happen if a higher priority context has taken over
+ * the console or the handover request.
+ */
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
+ return -EPERM;
+
+ /* Unset request for handover. */
+ new.atom = cur->atom;
+ new.req_prio = NBCON_PRIO_NONE;
+ if (nbcon_state_try_cmpxchg(con, cur, &new)) {
+ /*
+ * Request successfully unset. Report failure of
+ * acquiring via handover.
+ */
+ cur->atom = new.atom;
+ return request_err;
+ }
+
+ /*
+ * Unable to remove request. Try to acquire in case
+ * the owner has released the lock.
+ */
+ } while (nbcon_context_try_acquire_requested(ctxt, cur));
+
+ /* Lucky timing. The acquire succeeded while removing the request. */
+ return 0;
+}
+
+/**
+ * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * Acquire the console even in the unsafe state.
+ *
+ * It can be permitted by setting the 'allow_unsafe_takeover' field only
+ * by the final attempt to flush messages in panic().
+ *
+ * Return: 0 on success. -EPERM when not allowed by the context.
+ */
+static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ if (!ctxt->allow_unsafe_takeover)
+ return -EPERM;
+
+ /* Ensure caller is allowed to perform unsafe hostile takeovers. */
+ if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
+ return -EPERM;
+
+ /*
+ * Check that try_acquire_direct() and try_acquire_handover() returned
+ * -EBUSY in the right situation.
+ */
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
+ WARN_ON_ONCE(cur->unsafe != true);
+
+ do {
+ new.atom = cur->atom;
+ new.cpu = cpu;
+ new.prio = ctxt->prio;
+ new.unsafe |= cur->unsafe_takeover;
+ new.unsafe_takeover |= cur->unsafe;
+
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
+
+ return 0;
+}
+
+static struct printk_buffers panic_nbcon_pbufs;
+
+/**
+ * nbcon_context_try_acquire - Try to acquire nbcon console
+ * @ctxt: The context of the caller
+ *
+ * Return: True if the console was acquired. False otherwise.
+ *
+ * If the caller allowed an unsafe hostile takeover, on success the
+ * caller should check the current console state to see if it is
+ * in an unsafe state. Otherwise, on success the caller may assume
+ * the console is not in an unsafe state.
+ */
+__maybe_unused
+static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ int err;
+
+ nbcon_state_read(con, &cur);
+try_again:
+ err = nbcon_context_try_acquire_direct(ctxt, &cur);
+ if (err != -EBUSY)
+ goto out;
+
+ err = nbcon_context_try_acquire_handover(ctxt, &cur);
+ if (err == -EAGAIN)
+ goto try_again;
+ if (err != -EBUSY)
+ goto out;
+
+ err = nbcon_context_try_acquire_hostile(ctxt, &cur);
+out:
+ if (err)
+ return false;
+
+ /* Acquire succeeded. */
+
+ /* Assign the appropriate buffer for this context. */
+ if (atomic_read(&panic_cpu) == cpu)
+ ctxt->pbufs = &panic_nbcon_pbufs;
+ else
+ ctxt->pbufs = con->pbufs;
+
+ /* Set the record sequence for this context to print. */
+ ctxt->seq = nbcon_seq_read(ctxt->console);
+
+ return true;
+}
+
+static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
+ int expected_prio)
+{
+ /*
+ * Since consoles can only be acquired by higher priorities,
+ * owning contexts are uniquely identified by @prio. However,
+ * since contexts can unexpectedly lose ownership, it is
+ * possible that later another owner appears with the same
+ * priority. For this reason @cpu is also needed.
+ */
+
+ if (cur->prio != expected_prio)
+ return false;
+
+ if (cur->cpu != expected_cpu)
+ return false;
+
+ return true;
+}
+
+/**
+ * nbcon_context_release - Release the console
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ */
+static void nbcon_context_release(struct nbcon_context *ctxt)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ struct nbcon_state new;
+
+ nbcon_state_read(con, &cur);
+
+ do {
+ if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
+ break;
+
+ new.atom = cur.atom;
+ new.prio = NBCON_PRIO_NONE;
+
+ /*
+ * If @unsafe_takeover is set, it is kept set so that
+ * the state remains permanently unsafe.
+ */
+ new.unsafe |= cur.unsafe_takeover;
+
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
+
+ ctxt->pbufs = NULL;
+}
+
+/**
+ * nbcon_context_can_proceed - Check whether ownership can proceed
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ * @cur: The current console state
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * Must be invoked when entering the unsafe state to make sure that it still
+ * owns the lock. Also must be invoked when exiting the unsafe context
+ * to eventually free the lock for a higher priority context which asked
+ * for the friendly handover.
+ *
+ * It can be called inside an unsafe section when the console is just
+ * temporary in safe state instead of exiting and entering the unsafe
+ * state.
+ *
+ * Also it can be called in the safe context before doing an expensive
+ * safe operation. It does not make sense to do the operation when
+ * a higher priority context took the lock.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Make sure this context still owns the console. */
+ if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
+ return false;
+
+ /* The console owner can proceed if there is no waiter. */
+ if (cur->req_prio == NBCON_PRIO_NONE)
+ return true;
+
+ /*
+ * A console owner within an unsafe region is always allowed to
+ * proceed, even if there are waiters. It can perform a handover
+ * when exiting the unsafe region. Otherwise the waiter will
+ * need to perform an unsafe hostile takeover.
+ */
+ if (cur->unsafe)
+ return true;
+
+ /* Waiters always have higher priorities than owners. */
+ WARN_ON_ONCE(cur->req_prio <= cur->prio);
+
+ /*
+ * Having a safe point for take over and eventually a few
+ * duplicated characters or a full line is way better than a
+ * hostile takeover. Post processing can take care of the garbage.
+ * Release and hand over.
+ */
+ nbcon_context_release(ctxt);
+
+ /*
+ * It is not clear whether the waiter really took over ownership. The
+ * outermost callsite must make the final decision whether console
+ * ownership is needed for it to proceed. If yes, it must reacquire
+ * ownership (possibly hostile) before carefully proceeding.
+ *
+ * The calling context no longer owns the console so go back all the
+ * way instead of trying to implement reacquire heuristics in tons of
+ * places.
+ */
+ return false;
+}
+
+/**
+ * nbcon_can_proceed - Check whether ownership can proceed
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * It is used in nbcon_enter_unsafe() to make sure that it still owns the
+ * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
+ * for a higher priority context which asked for the friendly handover.
+ *
+ * It can be called inside an unsafe section when the console is just
+ * temporary in safe state instead of exiting and entering the unsafe state.
+ *
+ * Also it can be called in the safe context before doing an expensive safe
+ * operation. It does not make sense to do the operation when a higher
+ * priority context took the lock.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+
+ nbcon_state_read(con, &cur);
+
+ return nbcon_context_can_proceed(ctxt, &cur);
+}
+EXPORT_SYMBOL_GPL(nbcon_can_proceed);
+
+#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
+#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
+
+/**
+ * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ * @unsafe: The new value for the unsafe bit
+ *
+ * Return: True if the unsafe state was updated and this context still
+ * owns the console. Otherwise false if ownership was handed
+ * over or taken.
+ *
+ * This function allows console owners to modify the unsafe status of the
+ * console.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ *
+ * Internal helper to avoid duplicated code.
+ */
+static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
+{
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ struct nbcon_state new;
+
+ nbcon_state_read(con, &cur);
+
+ do {
+ /*
+ * The unsafe bit must not be cleared if an
+ * unsafe hostile takeover has occurred.
+ */
+ if (!unsafe && cur.unsafe_takeover)
+ goto out;
+
+ if (!nbcon_context_can_proceed(ctxt, &cur))
+ return false;
+
+ new.atom = cur.atom;
+ new.unsafe = unsafe;
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
+
+ cur.atom = new.atom;
+out:
+ return nbcon_context_can_proceed(ctxt, &cur);
+}
+
+/**
+ * nbcon_enter_unsafe - Enter an unsafe region in the driver
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ return nbcon_context_enter_unsafe(ctxt);
+}
+EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
+
+/**
+ * nbcon_exit_unsafe - Exit an unsafe region in the driver
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ return nbcon_context_exit_unsafe(ctxt);
+}
+EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+
+/**
+ * nbcon_emit_next_record - Emit a record in the acquired context
+ * @wctxt: The write context that will be handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context. If the caller
+ * wants to do more it must reacquire the console first.
+ *
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+__maybe_unused
+static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
+ struct printk_message pmsg = {
+ .pbufs = ctxt->pbufs,
+ };
+ unsigned long con_dropped;
+ struct nbcon_state cur;
+ unsigned long dropped;
+ bool done;
+
+ /*
+ * The printk buffers are filled within an unsafe section. This
+ * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
+ * clobbering each other.
+ */
+
+ if (!nbcon_context_enter_unsafe(ctxt))
+ return false;
+
+ ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
+ if (!ctxt->backlog)
+ return nbcon_context_exit_unsafe(ctxt);
+
+ /*
+ * @con->dropped is not protected in case of an unsafe hostile
+ * takeover. In that situation the update can be racy so
+ * annotate it accordingly.
+ */
+ con_dropped = data_race(READ_ONCE(con->dropped));
+
+ dropped = con_dropped + pmsg.dropped;
+ if (dropped && !is_extended)
+ console_prepend_dropped(&pmsg, dropped);
+
+ if (!nbcon_context_exit_unsafe(ctxt))
+ return false;
+
+ /* For skipped records just update seq/dropped in @con. */
+ if (pmsg.outbuf_len == 0)
+ goto update_con;
+
+ /* Initialize the write context for driver callbacks. */
+ wctxt->outbuf = &pmsg.pbufs->outbuf[0];
+ wctxt->len = pmsg.outbuf_len;
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+
+ if (con->write_atomic) {
+ done = con->write_atomic(con, wctxt);
+ } else {
+ nbcon_context_release(ctxt);
+ WARN_ON_ONCE(1);
+ done = false;
+ }
+
+ /* If not done, the emit was aborted. */
+ if (!done)
+ return false;
+
+ /*
+ * Since any dropped message was successfully output, reset the
+ * dropped count for the console.
+ */
+ dropped = 0;
+update_con:
+ /*
+ * The dropped count and the sequence number are updated within an
+ * unsafe section. This limits update races to the panic context and
+ * allows the panic context to win.
+ */
+
+ if (!nbcon_context_enter_unsafe(ctxt))
+ return false;
+
+ if (dropped != con_dropped) {
+ /* Counterpart to the READ_ONCE() above. */
+ WRITE_ONCE(con->dropped, dropped);
+ }
+
+ nbcon_seq_try_update(ctxt, pmsg.seq + 1);
+
+ return nbcon_context_exit_unsafe(ctxt);
+}
+
+/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+ * Return: True on success. False otherwise and the console cannot
+ * be used.
+ *
+ * This is not part of nbcon_init() because buffer allocation must
+ * be performed earlier in the console registration process.
+ */
+bool nbcon_alloc(struct console *con)
+{
+ if (con->flags & CON_BOOT) {
+ /*
+ * Boot console printing is synchronized with legacy console
+ * printing, so boot consoles can share the same global printk
+ * buffers.
+ */
+ con->pbufs = &printk_shared_pbufs;
+ } else {
+ con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
+ if (!con->pbufs) {
+ con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * nbcon_init - Initialize the nbcon console specific data
+ * @con: Console to initialize
+ *
+ * nbcon_alloc() *must* be called and succeed before this function
+ * is called.
+ *
+ * This function expects that the legacy @con->seq has been set.
+ */
+void nbcon_init(struct console *con)
+{
+ struct nbcon_state state = { };
+
+ /* nbcon_alloc() must have been called and successful! */
+ BUG_ON(!con->pbufs);
+
+ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
+}
+
+/**
+ * nbcon_free - Free and cleanup the nbcon console specific data
+ * @con: Console to free/cleanup nbcon data
+ */
+void nbcon_free(struct console *con)
+{
+ struct nbcon_state state = { };
+
+ nbcon_state_set(con, &state);
+
+ /* Boot consoles share global printk buffers. */
+ if (!(con->flags & CON_BOOT))
+ kfree(con->pbufs);
+
+ con->pbufs = NULL;
+}
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 0b3af1529778..f2444b581e16 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -102,12 +102,6 @@ DEFINE_STATIC_SRCU(console_srcu);
*/
int __read_mostly suppress_printk;
-/*
- * During panic, heavy printk by other CPUs can delay the
- * panic and risk deadlock on console resources.
- */
-static int __read_mostly suppress_panic_printk;
-
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
.name = "console_lock"
@@ -445,6 +439,12 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
static DEFINE_MUTEX(syslog_lock);
#ifdef CONFIG_PRINTK
+/*
+ * During panic, heavy printk by other CPUs can delay the
+ * panic and risk deadlock on console resources.
+ */
+static int __read_mostly suppress_panic_printk;
+
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
@@ -494,7 +494,7 @@ _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
static struct printk_ringbuffer printk_rb_dynamic;
-static struct printk_ringbuffer *prb = &printk_rb_static;
+struct printk_ringbuffer *prb = &printk_rb_static;
/*
* We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
@@ -698,9 +698,6 @@ out:
return len;
}
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
- bool is_extended, bool may_supress);
-
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
atomic64_t seq;
@@ -1669,7 +1666,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
- len = 0;
prb_for_each_record(seq, prb, seq, &r) {
int textlen;
@@ -2349,22 +2345,6 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
static u64 syslog_seq;
-static size_t record_print_text(const struct printk_record *r,
- bool syslog, bool time)
-{
- return 0;
-}
-static ssize_t info_print_ext_header(char *buf, size_t size,
- struct printk_info *info)
-{
- return 0;
-}
-static ssize_t msg_print_ext_body(char *buf, size_t size,
- char *text, size_t text_len,
- struct dev_printk_info *dev_info) { return 0; }
-static void console_lock_spinning_enable(void) { }
-static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
-static bool suppress_message_printing(int level) { return false; }
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
@@ -2404,13 +2384,21 @@ static void set_user_specified(struct console_cmdline *c, bool user_specified)
console_set_on_cmdline = 1;
}
-static int __add_preferred_console(char *name, int idx, char *options,
+static int __add_preferred_console(const char *name, const short idx, char *options,
char *brl_options, bool user_specified)
{
struct console_cmdline *c;
int i;
/*
+ * We use a signed short index for struct console for device drivers to
+ * indicate a not yet assigned index or port. However, a negative index
+ * value is not valid for preferred console.
+ */
+ if (idx < 0)
+ return -EINVAL;
+
+ /*
* See if this tty is not yet registered, and
* if we have a slot free.
*/
@@ -2513,7 +2501,7 @@ __setup("console=", console_setup);
* commonly to provide a default console (ie from PROM variables) when
* the user has not supplied one.
*/
-int add_preferred_console(char *name, int idx, char *options)
+int add_preferred_console(const char *name, const short idx, char *options)
{
return __add_preferred_console(name, idx, options, NULL, false);
}
@@ -2718,6 +2706,8 @@ static void __console_unlock(void)
up_console_sem();
}
+#ifdef CONFIG_PRINTK
+
/*
* Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
* is achieved by shifting the existing message over and inserting the dropped
@@ -2732,8 +2722,7 @@ static void __console_unlock(void)
*
* If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
*/
-#ifdef CONFIG_PRINTK
-static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
{
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
@@ -2764,9 +2753,6 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d
memcpy(outbuf, scratchbuf, len);
pmsg->outbuf_len += len;
}
-#else
-#define console_prepend_dropped(pmsg, dropped)
-#endif /* CONFIG_PRINTK */
/*
* Read and format the specified record (or a later record if the specified
@@ -2787,8 +2773,8 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d
* of @pmsg are valid. (See the documentation of struct printk_message
* for information about the @pmsg fields.)
*/
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
- bool is_extended, bool may_suppress)
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_suppress)
{
static int panic_console_dropped;
@@ -2847,6 +2833,13 @@ out:
}
/*
+ * Used as the printk buffers for non-panic, serialized console printing.
+ * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
+ * Its usage requires the console_lock held.
+ */
+struct printk_buffers printk_shared_pbufs;
+
+/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
*
@@ -2863,12 +2856,10 @@ out:
*/
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
{
- static struct printk_buffers pbufs;
-
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
- char *outbuf = &pbufs.outbuf[0];
+ char *outbuf = &printk_shared_pbufs.outbuf[0];
struct printk_message pmsg = {
- .pbufs = &pbufs,
+ .pbufs = &printk_shared_pbufs,
};
unsigned long flags;
@@ -2919,6 +2910,16 @@ skip:
return true;
}
+#else
+
+static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
+{
+ *handover = false;
+ return false;
+}
+
+#endif /* CONFIG_PRINTK */
+
/*
* Print out all remaining records to all consoles.
*
@@ -3163,6 +3164,7 @@ void console_flush_on_panic(enum con_flush_mode mode)
if (mode == CONSOLE_REPLAY_ALL) {
struct console *c;
+ short flags;
int cookie;
u64 seq;
@@ -3170,11 +3172,17 @@ void console_flush_on_panic(enum con_flush_mode mode)
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
- /*
- * This is an unsynchronized assignment, but the
- * kernel is in "hope and pray" mode anyway.
- */
- c->seq = seq;
+ flags = console_srcu_read_flags(c);
+
+ if (flags & CON_NBCON) {
+ nbcon_seq_force(c, seq);
+ } else {
+ /*
+ * This is an unsynchronized assignment. On
+ * panic legacy consoles are only best effort.
+ */
+ c->seq = seq;
+ }
}
console_srcu_read_unlock(cookie);
}
@@ -3326,11 +3334,6 @@ static void try_enable_default_console(struct console *newcon)
newcon->flags |= CON_CONSDEV;
}
-#define con_printk(lvl, con, fmt, ...) \
- printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
- (con->flags & CON_BOOT) ? "boot" : "", \
- con->name, con->index, ##__VA_ARGS__)
-
static void console_init_seq(struct console *newcon, bool bootcon_registered)
{
struct console *con;
@@ -3444,6 +3447,15 @@ void register_console(struct console *newcon)
goto unlock;
}
+ if (newcon->flags & CON_NBCON) {
+ /*
+ * Ensure the nbcon console buffers can be allocated
+ * before modifying any global data.
+ */
+ if (!nbcon_alloc(newcon))
+ goto unlock;
+ }
+
/*
* See if we want to enable this console driver by default.
*
@@ -3471,8 +3483,11 @@ void register_console(struct console *newcon)
err = try_enable_preferred_console(newcon, false);
/* printk() messages are not printed to the Braille console. */
- if (err || newcon->flags & CON_BRL)
+ if (err || newcon->flags & CON_BRL) {
+ if (newcon->flags & CON_NBCON)
+ nbcon_free(newcon);
goto unlock;
+ }
/*
* If we have a bootconsole, and are switching to a real console,
@@ -3488,6 +3503,9 @@ void register_console(struct console *newcon)
newcon->dropped = 0;
console_init_seq(newcon, bootcon_registered);
+ if (newcon->flags & CON_NBCON)
+ nbcon_init(newcon);
+
/*
* Put this console in the list - keep the
* preferred driver at the head of the list.
@@ -3579,6 +3597,9 @@ static int unregister_console_locked(struct console *console)
*/
synchronize_srcu(&console_srcu);
+ if (console->flags & CON_NBCON)
+ nbcon_free(console);
+
console_sysfs_notify();
if (console->exit)
@@ -3728,10 +3749,12 @@ late_initcall(printk_late_init);
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
{
- int remaining = timeout_ms;
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
+ unsigned long remaining_jiffies = timeout_jiffies;
struct console *c;
u64 last_diff = 0;
u64 printk_seq;
+ short flags;
int cookie;
u64 diff;
u64 seq;
@@ -3745,6 +3768,9 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
console_unlock();
for (;;) {
+ unsigned long begin_jiffies;
+ unsigned long slept_jiffies;
+
diff = 0;
/*
@@ -3759,6 +3785,9 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
for_each_console_srcu(c) {
if (con && con != c)
continue;
+
+ flags = console_srcu_read_flags(c);
+
/*
* If consoles are not usable, it cannot be expected
* that they make forward progress, so only increment
@@ -3766,31 +3795,33 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
*/
if (!console_is_usable(c))
continue;
- printk_seq = c->seq;
+
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(c);
+ } else {
+ printk_seq = c->seq;
+ }
+
if (printk_seq < seq)
diff += seq - printk_seq;
}
console_srcu_read_unlock(cookie);
if (diff != last_diff && reset_on_progress)
- remaining = timeout_ms;
+ remaining_jiffies = timeout_jiffies;
console_unlock();
/* Note: @diff is 0 if there are no usable consoles. */
- if (diff == 0 || remaining == 0)
+ if (diff == 0 || remaining_jiffies == 0)
break;
- if (remaining < 0) {
- /* no timeout limit */
- msleep(100);
- } else if (remaining < 100) {
- msleep(remaining);
- remaining = 0;
- } else {
- msleep(100);
- remaining -= 100;
- }
+ /* msleep(1) might sleep much longer. Check time by jiffies. */
+ begin_jiffies = jiffies;
+ msleep(1);
+ slept_jiffies = jiffies - begin_jiffies;
+
+ remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
last_diff = diff;
}
@@ -4194,7 +4225,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
prb_rec_init_rd(&r, &info, buf, size);
- len = 0;
prb_for_each_record(seq, prb, seq, &r) {
if (r.info->seq >= iter->next_seq)
break;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 0d866eaa4cc8..b531c33e9545 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -500,6 +500,7 @@ static inline void rcu_expedite_gp(void) { }
static inline void rcu_unexpedite_gp(void) { }
static inline void rcu_async_hurry(void) { }
static inline void rcu_async_relax(void) { }
+static inline bool rcu_cpu_online(int cpu) { return true; }
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
@@ -509,6 +510,7 @@ void rcu_unexpedite_gp(void);
void rcu_async_hurry(void);
void rcu_async_relax(void);
void rcupdate_announce_bootup_oddness(void);
+bool rcu_cpu_online(int cpu);
#ifdef CONFIG_TASKS_RCU_GENERIC
void show_rcu_tasks_gp_kthreads(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 1fa631168594..f54d5782eca0 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -895,10 +895,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop)
synchronize_rcu();
}
+/* Check for quiescent states since the pregp's synchronize_rcu() */
+static bool rcu_tasks_is_holdout(struct task_struct *t)
+{
+ int cpu;
+
+ /* Has the task been seen voluntarily sleeping? */
+ if (!READ_ONCE(t->on_rq))
+ return false;
+
+ /*
+ * Idle tasks (or idle injection) within the idle loop are RCU-tasks
+ * quiescent states. But CPU boot code performed by the idle task
+ * isn't a quiescent state.
+ */
+ if (is_idle_task(t))
+ return false;
+
+ cpu = task_cpu(t);
+
+ /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
+ if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
+ return false;
+
+ return true;
+}
+
/* Per-task initial processing. */
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
{
- if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
+ if (t != current && rcu_tasks_is_holdout(t)) {
get_task_struct(t);
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
WRITE_ONCE(t->rcu_tasks_holdout, true);
@@ -947,7 +973,7 @@ static void check_holdout_task(struct task_struct *t,
if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
- !READ_ONCE(t->on_rq) ||
+ !rcu_tasks_is_holdout(t) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
WRITE_ONCE(t->rcu_tasks_holdout, false);
@@ -1525,7 +1551,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
} else {
// The task is not running, so C-language access is safe.
nesting = t->trc_reader_nesting;
- WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
+ WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
n_heavy_reader_ofl_updates++;
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d3a97e129020..3ac3c846105f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
}
/*
- * Return true if the specified CPU has passed through a quiescent
- * state by virtue of being in or having passed through an dynticks
- * idle state since the last call to dyntick_save_progress_counter()
- * for this same CPU, or by virtue of having been offline.
+ * Returns positive if the specified CPU has passed through a quiescent state
+ * by virtue of being in or having passed through an dynticks idle state since
+ * the last call to dyntick_save_progress_counter() for this same CPU, or by
+ * virtue of having been offline.
+ *
+ * Returns negative if the specified CPU needs a force resched.
+ *
+ * Returns zero otherwise.
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
unsigned long jtsq;
+ int ret = 0;
struct rcu_node *rnp = rdp->mynode;
/*
@@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
(time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
rcu_state.cbovld)) {
WRITE_ONCE(rdp->rcu_urgent_qs, true);
- resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+ ret = -1;
}
/*
@@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (time_after(jiffies, rcu_state.jiffies_resched)) {
if (time_after(jiffies,
READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
- resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+ ret = -1;
}
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
@@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
}
}
- return 0;
+ return ret;
}
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
@@ -2271,15 +2276,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
{
int cpu;
unsigned long flags;
- unsigned long mask;
- struct rcu_data *rdp;
struct rcu_node *rnp;
rcu_state.cbovld = rcu_state.cbovldnext;
rcu_state.cbovldnext = false;
rcu_for_each_leaf_node(rnp) {
+ unsigned long mask = 0;
+ unsigned long rsmask = 0;
+
cond_resched_tasks_rcu_qs();
- mask = 0;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.cbovldnext |= !!rnp->cbovldmask;
if (rnp->qsmask == 0) {
@@ -2297,11 +2302,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
continue;
}
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
+ struct rcu_data *rdp;
+ int ret;
+
rdp = per_cpu_ptr(&rcu_data, cpu);
- if (f(rdp)) {
+ ret = f(rdp);
+ if (ret > 0) {
mask |= rdp->grpmask;
rcu_disable_urgency_upon_qs(rdp);
}
+ if (ret < 0)
+ rsmask |= rdp->grpmask;
}
if (mask != 0) {
/* Idle/offline CPUs, report (releases rnp->lock). */
@@ -2310,6 +2321,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
+
+ for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
+ resched_cpu(cpu);
}
}
@@ -4195,6 +4209,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
}
+bool rcu_cpu_online(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+ return rcu_rdp_cpu_online(rdp);
+}
+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
diff --git a/kernel/resource.c b/kernel/resource.c
index b1763b2fd7ef..866ef3663a0b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -56,33 +56,17 @@ struct resource_constraint {
static DEFINE_RWLOCK(resource_lock);
-static struct resource *next_resource(struct resource *p)
+static struct resource *next_resource(struct resource *p, bool skip_children)
{
- if (p->child)
+ if (!skip_children && p->child)
return p->child;
while (!p->sibling && p->parent)
p = p->parent;
return p->sibling;
}
-static struct resource *next_resource_skip_children(struct resource *p)
-{
- while (!p->sibling && p->parent)
- p = p->parent;
- return p->sibling;
-}
-
#define for_each_resource(_root, _p, _skip_children) \
- for ((_p) = (_root)->child; (_p); \
- (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
- next_resource(_p))
-
-static void *r_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct resource *p = v;
- (*pos)++;
- return (void *)next_resource(p);
-}
+ for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
#ifdef CONFIG_PROC_FS
@@ -91,14 +75,28 @@ enum { MAX_IORES_LEVEL = 5 };
static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(resource_lock)
{
- struct resource *p = pde_data(file_inode(m->file));
- loff_t l = 0;
+ struct resource *root = pde_data(file_inode(m->file));
+ struct resource *p;
+ loff_t l = *pos;
+
read_lock(&resource_lock);
- for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
- ;
+ for_each_resource(root, p, false) {
+ if (l-- == 0)
+ break;
+ }
+
return p;
}
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct resource *p = v;
+
+ (*pos)++;
+
+ return (void *)next_resource(p, false);
+}
+
static void r_stop(struct seq_file *m, void *v)
__releases(resource_lock)
{
@@ -336,7 +334,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
read_lock(&resource_lock);
- for (p = iomem_resource.child; p; p = next_resource(p)) {
+ for_each_resource(&iomem_resource, p, false) {
/* If we passed the resource we are looking for, stop */
if (p->start > end) {
p = NULL;
@@ -1641,13 +1639,12 @@ __setup("reserve=", reserve_setup);
*/
int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
{
- struct resource *p = &iomem_resource;
resource_size_t end = addr + size - 1;
+ struct resource *p;
int err = 0;
- loff_t l;
read_lock(&resource_lock);
- for (p = p->child; p ; p = r_next(NULL, p, &l)) {
+ for_each_resource(&iomem_resource, p, false) {
/*
* We can probably skip the resources without
* IORESOURCE_IO attribute?
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index df697c74d519..84e8a0f6e4e0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1252,9 +1252,7 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
};
#ifdef CONFIG_KEYS
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "kfuncs which will be used in BPF programs");
+__bpf_kfunc_start_defs();
/**
* bpf_lookup_user_key - lookup a key by its serial
@@ -1404,7 +1402,7 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
}
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(key_sig_kfunc_set)
BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 515cafdb18d9..43cc47d7faaf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2056,7 +2056,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
retries = 10;
success = false;
while (retries--) {
- struct list_head *head_page, *prev_page, *r;
+ struct list_head *head_page, *prev_page;
struct list_head *last_page, *first_page;
struct list_head *head_page_with_bit;
struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
@@ -2075,9 +2075,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
last_page->next = head_page_with_bit;
first_page->prev = prev_page;
- r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
-
- if (r == head_page_with_bit) {
+ /* caution: head_page_with_bit gets updated on cmpxchg failure */
+ if (try_cmpxchg(&prev_page->next,
+ &head_page_with_bit, first_page)) {
/*
* yay, we replaced the page pointer to our new list,
* now, we just have to update to head page's prev
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index abaaf516fcae..9aebf904ff97 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -54,12 +54,6 @@
#include "trace.h"
#include "trace_output.h"
-/*
- * On boot up, the ring buffer is set to the minimum size, so that
- * we do not waste memory on systems that are not using tracing.
- */
-bool ring_buffer_expanded;
-
#ifdef CONFIG_FTRACE_STARTUP_TEST
/*
* We need to change this state when a selftest is running.
@@ -202,7 +196,7 @@ static int __init set_cmdline_ftrace(char *str)
strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(NULL);
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
@@ -247,7 +241,7 @@ static int __init boot_alloc_snapshot(char *str)
} else {
allocate_snapshot = true;
/* We also need the main ring buffer expanded */
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(NULL);
}
return 1;
}
@@ -490,6 +484,13 @@ static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
+void trace_set_ring_buffer_expanded(struct trace_array *tr)
+{
+ if (!tr)
+ tr = &global_trace;
+ tr->ring_buffer_expanded = true;
+}
+
LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr)
@@ -1730,15 +1731,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
- if (trace_seq_used(s) <= s->seq.readpos)
+ if (trace_seq_used(s) <= s->readpos)
return -EBUSY;
- len = trace_seq_used(s) - s->seq.readpos;
+ len = trace_seq_used(s) - s->readpos;
if (cnt > len)
cnt = len;
- memcpy(buf, s->buffer + s->seq.readpos, cnt);
+ memcpy(buf, s->buffer + s->readpos, cnt);
- s->seq.readpos += cnt;
+ s->readpos += cnt;
return cnt;
}
@@ -2012,7 +2013,7 @@ static int run_tracer_selftest(struct tracer *type)
#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
/* If we expanded the buffers, make sure the max is expanded too */
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
tr->allocated_snapshot = true;
@@ -2038,7 +2039,7 @@ static int run_tracer_selftest(struct tracer *type)
tr->allocated_snapshot = false;
/* Shrink the max buffer again */
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
}
@@ -3403,7 +3404,7 @@ void trace_printk_init_buffers(void)
pr_warn("**********************************************************\n");
/* Expand the buffers to set size */
- tracing_update_buffers();
+ tracing_update_buffers(&global_trace);
buffers_allocated = 1;
@@ -3827,15 +3828,6 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
return false;
}
-static const char *show_buffer(struct trace_seq *s)
-{
- struct seq_buf *seq = &s->seq;
-
- seq_buf_terminate(seq);
-
- return seq->buffer;
-}
-
static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
static int test_can_verify_check(const char *fmt, ...)
@@ -3975,7 +3967,7 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
*/
if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
"fmt: '%s' current_buffer: '%s'",
- fmt, show_buffer(&iter->seq))) {
+ fmt, seq_buf_str(&iter->seq.seq))) {
int ret;
/* Try to safely read the string */
@@ -4986,6 +4978,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
if (ret)
return ret;
+ mutex_lock(&event_mutex);
+
+ /* Fail if the file is marked for removal */
+ if (file->flags & EVENT_FILE_FL_FREED) {
+ trace_array_put(file->tr);
+ ret = -ENODEV;
+ } else {
+ event_file_get(file);
+ }
+
+ mutex_unlock(&event_mutex);
+ if (ret)
+ return ret;
+
filp->private_data = inode->i_private;
return 0;
@@ -4996,6 +5002,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
struct trace_event_file *file = inode->i_private;
trace_array_put(file->tr);
+ event_file_put(file);
return 0;
}
@@ -6374,7 +6381,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
* we use the size that was given, and we can forget about
* expanding it later.
*/
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(tr);
/* May be called before buffers are initialized */
if (!tr->array_buffer.buffer)
@@ -6452,6 +6459,7 @@ out:
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
+ * @tr: The tracing instance
*
* To save on memory when the tracing is never used on a system with it
* configured in. The ring buffers are set to a minimum size. But once
@@ -6460,13 +6468,13 @@ out:
*
* This function is to be called when a tracer is about to be used.
*/
-int tracing_update_buffers(void)
+int tracing_update_buffers(struct trace_array *tr)
{
int ret = 0;
mutex_lock(&trace_types_lock);
- if (!ring_buffer_expanded)
- ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
+ if (!tr->ring_buffer_expanded)
+ ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock);
@@ -6520,7 +6528,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
mutex_lock(&trace_types_lock);
- if (!ring_buffer_expanded) {
+ if (!tr->ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0)
@@ -7006,7 +7014,7 @@ waitagain:
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
+ if (iter->seq.readpos >= trace_seq_used(&iter->seq))
trace_seq_init(&iter->seq);
/*
@@ -7192,7 +7200,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
}
if (buf_size_same) {
- if (!ring_buffer_expanded)
+ if (!tr->ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
size >> 10,
trace_buf_size >> 10);
@@ -7249,10 +7257,10 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
- if (!ring_buffer_expanded)
+ if (!tr->ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
r = sprintf(buf, "%lu\n", size);
else
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
@@ -7646,7 +7654,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
unsigned long val;
int ret;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -9550,6 +9558,9 @@ static struct trace_array *trace_array_create(const char *name)
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
+ /* The ring buffer is defaultly expanded */
+ trace_set_ring_buffer_expanded(tr);
+
if (ftrace_allocate_ftrace_ops(tr) < 0)
goto out_free_tr;
@@ -9759,7 +9770,6 @@ static __init void create_trace_instances(struct dentry *d_tracer)
static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{
- struct trace_event_file *file;
int cpu;
trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
@@ -9792,11 +9802,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
trace_create_file("trace_marker", 0220, d_tracer,
tr, &tracing_mark_fops);
- file = __find_event_file(tr, "ftrace", "print");
- if (file && file->ef)
- eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
- file, &event_trigger_fops);
- tr->trace_marker_file = file;
+ tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
trace_create_file("trace_marker_raw", 0220, d_tracer,
tr, &tracing_mark_raw_fops);
@@ -10444,7 +10450,7 @@ __init static int tracer_alloc_buffers(void)
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
- if (ring_buffer_expanded)
+ if (global_trace.ring_buffer_expanded)
ring_buf_size = trace_buf_size;
else
ring_buf_size = 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 77debe53f07c..b7f4ea25a194 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -381,7 +381,7 @@ struct trace_array {
struct dentry *dir;
struct dentry *options;
struct dentry *percpu_dir;
- struct dentry *event_dir;
+ struct eventfs_inode *event_dir;
struct trace_options *topts;
struct list_head systems;
struct list_head events;
@@ -410,6 +410,11 @@ struct trace_array {
struct cond_snapshot *cond_snapshot;
#endif
struct trace_func_repeats __percpu *last_func_repeats;
+ /*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+ bool ring_buffer_expanded;
};
enum {
@@ -761,7 +766,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
-extern bool ring_buffer_expanded;
+extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
extern bool tracing_selftest_disabled;
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1305,7 +1310,7 @@ static inline void trace_branch_disable(void)
#endif /* CONFIG_BRANCH_TRACER */
/* set ring buffers to default size if not already done so */
-int tracing_update_buffers(void);
+int tracing_update_buffers(struct trace_array *tr);
union trace_synth_field {
u8 as_u8;
@@ -1344,7 +1349,7 @@ struct trace_subsystem_dir {
struct list_head list;
struct event_subsystem *subsystem;
struct trace_array *tr;
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
int ref_count;
int nr_events;
};
@@ -1664,6 +1669,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
char *glob,
struct event_trigger_data *trigger_data);
+extern void event_file_get(struct trace_event_file *file);
+extern void event_file_put(struct trace_event_file *file);
+
/**
* struct event_trigger_ops - callbacks for trace event triggers
*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f49d6ddb6342..f29e815ca5b2 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -984,19 +984,41 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
return;
if (!--dir->nr_events) {
- eventfs_remove(dir->ef);
+ eventfs_remove_dir(dir->ei);
list_del(&dir->list);
__put_system_dir(dir);
}
}
+void event_file_get(struct trace_event_file *file)
+{
+ atomic_inc(&file->ref);
+}
+
+void event_file_put(struct trace_event_file *file)
+{
+ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
+ if (file->flags & EVENT_FILE_FL_FREED)
+ kmem_cache_free(file_cachep, file);
+ return;
+ }
+
+ if (atomic_dec_and_test(&file->ref)) {
+ /* Count should only go to zero when it is freed */
+ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
+ return;
+ kmem_cache_free(file_cachep, file);
+ }
+}
+
static void remove_event_file_dir(struct trace_event_file *file)
{
- eventfs_remove(file->ef);
+ eventfs_remove_dir(file->ei);
list_del(&file->list);
remove_subsystem(file->system);
free_event_filter(file->filter);
- kmem_cache_free(file_cachep, file);
+ file->flags |= EVENT_FILE_FL_FREED;
+ event_file_put(file);
}
/*
@@ -1166,7 +1188,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
if (!cnt)
return 0;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -1369,7 +1391,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
flags = file->flags;
mutex_unlock(&event_mutex);
- if (!file)
+ if (!file || flags & EVENT_FILE_FL_FREED)
return -ENODEV;
if (flags & EVENT_FILE_FL_ENABLED &&
@@ -1397,18 +1419,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
- ret = tracing_update_buffers();
- if (ret < 0)
- return ret;
-
switch (val) {
case 0:
case 1:
ret = -ENODEV;
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (likely(file))
+ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) {
+ ret = tracing_update_buffers(file->tr);
+ if (ret < 0) {
+ mutex_unlock(&event_mutex);
+ return ret;
+ }
ret = ftrace_event_enable_disable(file, val);
+ }
mutex_unlock(&event_mutex);
break;
@@ -1482,7 +1506,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(dir->tr);
if (ret < 0)
return ret;
@@ -1681,7 +1705,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (file)
+ if (file && !(file->flags & EVENT_FILE_FL_FREED))
print_event_filter(file, s);
mutex_unlock(&event_mutex);
@@ -1956,7 +1980,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
if (!cnt)
return 0;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -2280,14 +2304,40 @@ create_new_subsystem(const char *name)
return NULL;
}
-static struct eventfs_file *
+static int system_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (strcmp(name, "filter") == 0)
+ *fops = &ftrace_subsystem_filter_fops;
+
+ else if (strcmp(name, "enable") == 0)
+ *fops = &ftrace_system_enable_fops;
+
+ else
+ return 0;
+
+ *mode = TRACE_MODE_WRITE;
+ return 1;
+}
+
+static struct eventfs_inode *
event_subsystem_dir(struct trace_array *tr, const char *name,
- struct trace_event_file *file, struct dentry *parent)
+ struct trace_event_file *file, struct eventfs_inode *parent)
{
struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
- struct eventfs_file *ef;
- int res;
+ struct eventfs_inode *ei;
+ int nr_entries;
+ static struct eventfs_entry system_entries[] = {
+ {
+ .name = "filter",
+ .callback = system_callback,
+ },
+ {
+ .name = "enable",
+ .callback = system_callback,
+ }
+ };
/* First see if we did not already create this dir */
list_for_each_entry(dir, &tr->systems, list) {
@@ -2295,7 +2345,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
if (strcmp(system->name, name) == 0) {
dir->nr_events++;
file->system = dir;
- return dir->ef;
+ return dir->ei;
}
}
@@ -2319,39 +2369,29 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
} else
__get_system(system);
- ef = eventfs_add_subsystem_dir(name, parent);
- if (IS_ERR(ef)) {
+ /* ftrace only has directories no files */
+ if (strcmp(name, "ftrace") == 0)
+ nr_entries = 0;
+ else
+ nr_entries = ARRAY_SIZE(system_entries);
+
+ ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
+ if (IS_ERR(ei)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
}
- dir->ef = ef;
+ dir->ei = ei;
dir->tr = tr;
dir->ref_count = 1;
dir->nr_events = 1;
dir->subsystem = system;
file->system = dir;
- /* the ftrace system is special, do not create enable or filter files */
- if (strcmp(name, "ftrace") != 0) {
-
- res = eventfs_add_file("filter", TRACE_MODE_WRITE,
- dir->ef, dir,
- &ftrace_subsystem_filter_fops);
- if (res) {
- kfree(system->filter);
- system->filter = NULL;
- pr_warn("Could not create tracefs '%s/filter' entry\n", name);
- }
-
- eventfs_add_file("enable", TRACE_MODE_WRITE, dir->ef, dir,
- &ftrace_system_enable_fops);
- }
-
list_add(&dir->list, &tr->systems);
- return dir->ef;
+ return dir->ei;
out_free:
kfree(dir);
@@ -2400,15 +2440,134 @@ event_define_fields(struct trace_event_call *call)
return ret;
}
+static int event_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ struct trace_event_file *file = *data;
+ struct trace_event_call *call = file->event_call;
+
+ if (strcmp(name, "format") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_event_format_fops;
+ *data = call;
+ return 1;
+ }
+
+ /*
+ * Only event directories that can be enabled should have
+ * triggers or filters, with the exception of the "print"
+ * event that can have a "trigger" file.
+ */
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
+ if (call->class->reg && strcmp(name, "enable") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_enable_fops;
+ return 1;
+ }
+
+ if (strcmp(name, "filter") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_event_filter_fops;
+ return 1;
+ }
+ }
+
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ strcmp(trace_event_name(call), "print") == 0) {
+ if (strcmp(name, "trigger") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &event_trigger_fops;
+ return 1;
+ }
+ }
+
+#ifdef CONFIG_PERF_EVENTS
+ if (call->event.type && call->class->reg &&
+ strcmp(name, "id") == 0) {
+ *mode = TRACE_MODE_READ;
+ *data = (void *)(long)call->event.type;
+ *fops = &ftrace_event_id_fops;
+ return 1;
+ }
+#endif
+
+#ifdef CONFIG_HIST_TRIGGERS
+ if (strcmp(name, "hist") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &event_hist_fops;
+ return 1;
+ }
+#endif
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+ if (strcmp(name, "hist_debug") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &event_hist_debug_fops;
+ return 1;
+ }
+#endif
+#ifdef CONFIG_TRACE_EVENT_INJECT
+ if (call->event.type && call->class->reg &&
+ strcmp(name, "inject") == 0) {
+ *mode = 0200;
+ *fops = &event_inject_fops;
+ return 1;
+ }
+#endif
+ return 0;
+}
+
static int
-event_create_dir(struct dentry *parent, struct trace_event_file *file)
+event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
{
struct trace_event_call *call = file->event_call;
- struct eventfs_file *ef_subsystem = NULL;
struct trace_array *tr = file->tr;
- struct eventfs_file *ef;
+ struct eventfs_inode *e_events;
+ struct eventfs_inode *ei;
const char *name;
+ int nr_entries;
int ret;
+ static struct eventfs_entry event_entries[] = {
+ {
+ .name = "enable",
+ .callback = event_callback,
+ },
+ {
+ .name = "filter",
+ .callback = event_callback,
+ },
+ {
+ .name = "trigger",
+ .callback = event_callback,
+ },
+ {
+ .name = "format",
+ .callback = event_callback,
+ },
+#ifdef CONFIG_PERF_EVENTS
+ {
+ .name = "id",
+ .callback = event_callback,
+ },
+#endif
+#ifdef CONFIG_HIST_TRIGGERS
+ {
+ .name = "hist",
+ .callback = event_callback,
+ },
+#endif
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+ {
+ .name = "hist_debug",
+ .callback = event_callback,
+ },
+#endif
+#ifdef CONFIG_TRACE_EVENT_INJECT
+ {
+ .name = "inject",
+ .callback = event_callback,
+ },
+#endif
+ };
/*
* If the trace point header did not define TRACE_SYSTEM
@@ -2418,29 +2577,20 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
return -ENODEV;
- ef_subsystem = event_subsystem_dir(tr, call->class->system, file, parent);
- if (!ef_subsystem)
+ e_events = event_subsystem_dir(tr, call->class->system, file, parent);
+ if (!e_events)
return -ENOMEM;
+ nr_entries = ARRAY_SIZE(event_entries);
+
name = trace_event_name(call);
- ef = eventfs_add_dir(name, ef_subsystem);
- if (IS_ERR(ef)) {
+ ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
+ if (IS_ERR(ei)) {
pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
- file->ef = ef;
-
- if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
- eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
- &ftrace_enable_fops);
-
-#ifdef CONFIG_PERF_EVENTS
- if (call->event.type && call->class->reg)
- eventfs_add_file("id", TRACE_MODE_READ, file->ef,
- (void *)(long)call->event.type,
- &ftrace_event_id_fops);
-#endif
+ file->ei = ei;
ret = event_define_fields(call);
if (ret < 0) {
@@ -2448,35 +2598,6 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
return ret;
}
- /*
- * Only event directories that can be enabled should have
- * triggers or filters.
- */
- if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
- eventfs_add_file("filter", TRACE_MODE_WRITE, file->ef,
- file, &ftrace_event_filter_fops);
-
- eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
- file, &event_trigger_fops);
- }
-
-#ifdef CONFIG_HIST_TRIGGERS
- eventfs_add_file("hist", TRACE_MODE_READ, file->ef, file,
- &event_hist_fops);
-#endif
-#ifdef CONFIG_HIST_TRIGGERS_DEBUG
- eventfs_add_file("hist_debug", TRACE_MODE_READ, file->ef, file,
- &event_hist_debug_fops);
-#endif
- eventfs_add_file("format", TRACE_MODE_READ, file->ef, call,
- &ftrace_event_format_fops);
-
-#ifdef CONFIG_TRACE_EVENT_INJECT
- if (call->event.type && call->class->reg)
- eventfs_add_file("inject", 0200, file->ef, file,
- &event_inject_fops);
-#endif
-
return 0;
}
@@ -2803,6 +2924,7 @@ trace_create_new_event(struct trace_event_call *call,
atomic_set(&file->tm_ref, 0);
INIT_LIST_HEAD(&file->triggers);
list_add(&file->list, &tr->events);
+ event_file_get(file);
return file;
}
@@ -2824,7 +2946,7 @@ static __init int setup_trace_triggers(char *str)
int i;
strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(NULL);
disable_tracing_selftest("running event triggers");
buf = bootup_trigger_buf;
@@ -3614,37 +3736,72 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
static __init int setup_trace_event(char *str)
{
strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(NULL);
disable_tracing_selftest("running event tracing");
return 1;
}
__setup("trace_event=", setup_trace_event);
+static int events_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (strcmp(name, "enable") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_tr_enable_fops;
+ return 1;
+ }
+
+ if (strcmp(name, "header_page") == 0)
+ *data = ring_buffer_print_page_header;
+
+ else if (strcmp(name, "header_event") == 0)
+ *data = ring_buffer_print_entry_header;
+
+ else
+ return 0;
+
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_show_header_fops;
+ return 1;
+}
+
/* Expects to have event_mutex held when called */
static int
create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
{
- struct dentry *d_events;
+ struct eventfs_inode *e_events;
struct dentry *entry;
- int error = 0;
+ int nr_entries;
+ static struct eventfs_entry events_entries[] = {
+ {
+ .name = "enable",
+ .callback = events_callback,
+ },
+ {
+ .name = "header_page",
+ .callback = events_callback,
+ },
+ {
+ .name = "header_event",
+ .callback = events_callback,
+ },
+ };
entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
tr, &ftrace_set_event_fops);
if (!entry)
return -ENOMEM;
- d_events = eventfs_create_events_dir("events", parent);
- if (IS_ERR(d_events)) {
+ nr_entries = ARRAY_SIZE(events_entries);
+
+ e_events = eventfs_create_events_dir("events", parent, events_entries,
+ nr_entries, tr);
+ if (IS_ERR(e_events)) {
pr_warn("Could not create tracefs 'events' directory\n");
return -ENOMEM;
}
- error = eventfs_add_events_file("enable", TRACE_MODE_WRITE, d_events,
- tr, &ftrace_tr_enable_fops);
- if (error)
- return -ENOMEM;
-
/* There are not as crucial, just warn if they are not created */
trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
@@ -3654,16 +3811,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
TRACE_MODE_WRITE, parent, tr,
&ftrace_set_event_notrace_pid_fops);
- /* ring buffer internal formats */
- eventfs_add_events_file("header_page", TRACE_MODE_READ, d_events,
- ring_buffer_print_page_header,
- &ftrace_show_header_fops);
-
- eventfs_add_events_file("header_event", TRACE_MODE_READ, d_events,
- ring_buffer_print_entry_header,
- &ftrace_show_header_fops);
-
- tr->event_dir = d_events;
+ tr->event_dir = e_events;
return 0;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 33264e510d16..0c611b281a5b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -2349,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
struct event_filter *filter = NULL;
int err;
+ if (file->flags & EVENT_FILE_FL_FREED)
+ return -ENODEV;
+
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(file);
filter = event_filter(file);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index d06938ae0717..1abc07fba1b9 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -774,23 +774,16 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
{
const char *system = NULL, *name = NULL;
struct trace_event_call *call;
- int len;
if (!str)
return;
- /* sizeof() contains the nul byte */
- len = sizeof(HIST_PREFIX) + strlen(str);
kfree(last_cmd);
- last_cmd = kzalloc(len, GFP_KERNEL);
+
+ last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str);
if (!last_cmd)
return;
- strcpy(last_cmd, HIST_PREFIX);
- /* Again, sizeof() contains the nul byte */
- len -= sizeof(HIST_PREFIX);
- strncat(last_cmd, str, len);
-
if (file) {
call = file->event_call;
system = call->class->system;
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 14cb275a0bab..846e02c0fb59 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
if ((unsigned long)str_val < TASK_SIZE)
- ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
else
#endif
ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index b87f41187c6a..9365ce407426 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -50,18 +50,6 @@
#define EVENT_STATUS_OTHER BIT(7)
/*
- * User register flags are not allowed yet, keep them here until we are
- * ready to expose them out to the user ABI.
- */
-enum user_reg_flag {
- /* Event will not delete upon last reference closing */
- USER_EVENT_REG_PERSIST = 1U << 0,
-
- /* This value or above is currently non-ABI */
- USER_EVENT_REG_MAX = 1U << 1,
-};
-
-/*
* Stores the system name, tables, and locks for a group of events. This
* allows isolation for events by various means.
*/
@@ -220,6 +208,17 @@ static u32 user_event_key(char *name)
return jhash(name, strlen(name), 0);
}
+static bool user_event_capable(u16 reg_flags)
+{
+ /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
+ if (reg_flags & USER_EVENT_REG_PERSIST) {
+ if (!perfmon_capable())
+ return false;
+ }
+
+ return true;
+}
+
static struct user_event *user_event_get(struct user_event *user)
{
refcount_inc(&user->refcnt);
@@ -1811,6 +1810,9 @@ static int user_event_free(struct dyn_event *ev)
if (!user_event_last_ref(user))
return -EBUSY;
+ if (!user_event_capable(user->reg_flags))
+ return -EPERM;
+
return destroy_user_event(user);
}
@@ -1926,10 +1928,13 @@ static int user_event_parse(struct user_event_group *group, char *name,
int argc = 0;
char **argv;
- /* User register flags are not ready yet */
- if (reg_flags != 0 || flags != NULL)
+ /* Currently don't support any text based flags */
+ if (flags != NULL)
return -EINVAL;
+ if (!user_event_capable(reg_flags))
+ return -EPERM;
+
/* Prevent dyn_event from racing */
mutex_lock(&event_mutex);
user = find_user_event(group, name, &key);
@@ -2062,6 +2067,9 @@ static int delete_user_event(struct user_event_group *group, char *name)
if (!user_event_last_ref(user))
return -EBUSY;
+ if (!user_event_capable(user->reg_flags))
+ return -EPERM;
+
return destroy_user_event(user);
}
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index bac06ee3b98b..7be97229ddf8 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -370,8 +370,12 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
*/
int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
{
+ int ret;
__trace_seq_init(s);
- return seq_buf_to_user(&s->seq, ubuf, cnt);
+ ret = seq_buf_to_user(&s->seq, ubuf, s->readpos, cnt);
+ if (ret > 0)
+ s->readpos += ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(trace_seq_to_user);
diff --git a/lib/Kconfig b/lib/Kconfig
index 2d90935d5a21..3ea1c830efab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -772,3 +772,6 @@ config ASN1_ENCODER
config POLYNOMIAL
tristate
+
+config FIRMWARE_TABLE
+ bool
diff --git a/lib/Makefile b/lib/Makefile
index 9e0972f7b764..6b09731d8e61 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -48,7 +48,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
- generic-radix-tree.o
+ generic-radix-tree.o bitmap-str.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -409,6 +409,8 @@ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
+obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
+
# FORTIFY_SOURCE compile-time behavior tests
TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
diff --git a/lib/bitmap-str.c b/lib/bitmap-str.c
new file mode 100644
index 000000000000..be745209507a
--- /dev/null
+++ b/lib/bitmap-str.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitmap.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "kstrtox.h"
+
+/**
+ * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parse_user);
+
+/**
+ * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * @buf: page aligned buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges if list is specified or hex digits grouped into comma-separated
+ * sets of 8 digits/set. Returns the number of characters written to buf.
+ *
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
+ */
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits)
+{
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
+
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
+}
+EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+
+/**
+ * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * true: print in decimal list format
+ * false: print in hexadecimal bitmask format
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ */
+static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ const char *fmt = list ? "%*pbl\n" : "%*pb\n";
+ ssize_t size;
+ void *data;
+
+ data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
+ if (!data)
+ return -ENOMEM;
+
+ size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
+ kfree(data);
+
+ return size;
+}
+
+/**
+ * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
+ * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
+ * bitmask and decimal list to userspace by sysfs ABI.
+ * Drivers might be using a normal attribute for this kind of ABIs. A
+ * normal attribute typically has show entry as below::
+ *
+ * static ssize_t example_attribute_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * ...
+ * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ * }
+ *
+ * show entry of attribute has no offset and count parameters and this
+ * means the file is limited to one page only.
+ * bitmap_print_to_pagebuf() API works terribly well for this kind of
+ * normal attribute with buf parameter and without offset, count::
+ *
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * int nmaskbits)
+ * {
+ * }
+ *
+ * The problem is once we have a large bitmap, we have a chance to get a
+ * bitmask or list more than one page. Especially for list, it could be
+ * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
+ * It turns out bin_attribute is a way to break this limit. bin_attribute
+ * has show entry as below::
+ *
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * struct bin_attribute *attr, char *buf,
+ * loff_t offset, size_t count)
+ * {
+ * ...
+ * }
+ *
+ * With the new offset and count parameters, this makes sysfs ABI be able
+ * to support file size more than one page. For example, offset could be
+ * >= 4096.
+ * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
+ * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
+ * make those drivers be able to support large bitmask and list after they
+ * move to use bin_attribute. In result, we have to pass the corresponding
+ * parameters such as off, count from bin_attribute show entry to this API.
+ *
+ * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
+ * is similar with cpumap_print_to_pagebuf(), the difference is that
+ * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
+ * the destination buffer is exactly one page and won't be more than one page.
+ * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
+ * hand, mainly serves bin_attribute which doesn't work with exact one page,
+ * and it can break the size limit of converted decimal list and hexadecimal
+ * bitmask.
+ *
+ * WARNING!
+ *
+ * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
+ * It is intended to workaround sysfs limitations discussed above and should be
+ * used carefully in general case for the following reasons:
+ *
+ * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
+ * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
+ * - @off and @count are NOT offset and number of bits to print.
+ * - If printing part of bitmap as list, the resulting string is not a correct
+ * list representation of bitmap. Particularly, some bits within or out of
+ * related interval may be erroneously set or unset. The format of the string
+ * may be broken, so bitmap_parselist-like parser may fail parsing it.
+ * - If printing the whole bitmap as list by parts, user must ensure the order
+ * of calls of the function such that the offset is incremented linearly.
+ * - If printing the whole bitmap as list by parts, user must keep bitmap
+ * unchanged between the very first and very last call. Otherwise concatenated
+ * result may be incorrect, and format may be broken.
+ *
+ * Returns the number of characters actually printed to @buf
+ */
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
+
+/**
+ * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above bitmap_print_bitmask_to_buf() except
+ * the print format.
+ */
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_list_to_buf);
+
+/*
+ * Region 9-38:4/10 describes the following bitmap structure:
+ * 0 9 12 18 38 N
+ * .........****......****......****..................
+ * ^ ^ ^ ^ ^
+ * start off group_len end nbits
+ */
+struct region {
+ unsigned int start;
+ unsigned int off;
+ unsigned int group_len;
+ unsigned int end;
+ unsigned int nbits;
+};
+
+static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
+{
+ unsigned int start;
+
+ for (start = r->start; start <= r->end; start += r->group_len)
+ bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
+}
+
+static int bitmap_check_region(const struct region *r)
+{
+ if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
+ return -EINVAL;
+
+ if (r->end >= r->nbits)
+ return -ERANGE;
+
+ return 0;
+}
+
+static const char *bitmap_getnum(const char *str, unsigned int *num,
+ unsigned int lastbit)
+{
+ unsigned long long n;
+ unsigned int len;
+
+ if (str[0] == 'N') {
+ *num = lastbit;
+ return str + 1;
+ }
+
+ len = _parse_integer(str, 10, &n);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
+ return ERR_PTR(-EOVERFLOW);
+
+ *num = n;
+ return str + len;
+}
+
+static inline bool end_of_str(char c)
+{
+ return c == '\0' || c == '\n';
+}
+
+static inline bool __end_of_region(char c)
+{
+ return isspace(c) || c == ',';
+}
+
+static inline bool end_of_region(char c)
+{
+ return __end_of_region(c) || end_of_str(c);
+}
+
+/*
+ * The format allows commas and whitespaces at the beginning
+ * of the region.
+ */
+static const char *bitmap_find_region(const char *str)
+{
+ while (__end_of_region(*str))
+ str++;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+ while (start <= end && __end_of_region(*end))
+ end--;
+
+ return end;
+}
+
+static const char *bitmap_parse_region(const char *str, struct region *r)
+{
+ unsigned int lastbit = r->nbits - 1;
+
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
+ str = bitmap_getnum(str, &r->start, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (end_of_region(*str))
+ goto no_end;
+
+ if (*str != '-')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->end, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+check_pattern:
+ if (end_of_region(*str))
+ goto no_pattern;
+
+ if (*str != ':')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->off, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (*str != '/')
+ return ERR_PTR(-EINVAL);
+
+ return bitmap_getnum(str + 1, &r->group_len, lastbit);
+
+no_end:
+ r->end = r->start;
+no_pattern:
+ r->off = r->end + 1;
+ r->group_len = r->end + 1;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read user string from this buffer; must be terminated
+ * with a \0 or \n.
+ * @maskp: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
+ * The value 'N' can be used as a dynamically substituted token for the
+ * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
+ * dynamic, so if system changes cause the bitmap width to change, such
+ * as more cores in a CPU list, then any ranges using N will also change.
+ *
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: wrong region format
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
+ * - ``-EOVERFLOW``: integer overflow in the input parameters
+ */
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
+{
+ struct region r;
+ long ret;
+
+ r.nbits = nmaskbits;
+ bitmap_zero(maskp, r.nbits);
+
+ while (buf) {
+ buf = bitmap_find_region(buf);
+ if (buf == NULL)
+ return 0;
+
+ buf = bitmap_parse_region(buf, &r);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_check_region(&r);
+ if (ret)
+ return ret;
+
+ bitmap_set_region(&r, maskp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+
+/**
+ * bitmap_parselist_user() - convert user buffer's list format ASCII
+ * string to bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for bitmap_parselist(), providing it with user buffer.
+ */
+int bitmap_parselist_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parselist(buf, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parselist_user);
+
+static const char *bitmap_get_x32_reverse(const char *start,
+ const char *end, u32 *num)
+{
+ u32 ret = 0;
+ int c, i;
+
+ for (i = 0; i < 32; i += 4) {
+ c = hex_to_bin(*end--);
+ if (c < 0)
+ return ERR_PTR(-EINVAL);
+
+ ret |= c << i;
+
+ if (start > end || __end_of_region(*end))
+ goto out;
+ }
+
+ if (hex_to_bin(*end--) >= 0)
+ return ERR_PTR(-EOVERFLOW);
+out:
+ *num = ret;
+ return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0 or \n. In that case,
+ * UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks. Each chunk defines exactly 32
+ * bits of the resultant bitmask. No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ const char *end = strnchrnul(start, buflen, '\n') - 1;
+ int chunks = BITS_TO_U32(nmaskbits);
+ u32 *bitmap = (u32 *)maskp;
+ int unset_bit;
+ int chunk;
+
+ for (chunk = 0; ; chunk++) {
+ end = bitmap_find_region_reverse(start, end);
+ if (start > end)
+ break;
+
+ if (!chunks--)
+ return -EOVERFLOW;
+
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
+ if (IS_ERR(end))
+ return PTR_ERR(end);
+ }
+
+ unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+ if (unset_bit < nmaskbits) {
+ bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+ return 0;
+ }
+
+ if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+ return -EOVERFLOW;
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ddb31015e38a..09522af227f1 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -6,21 +6,10 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
-#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/device.h>
-#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-
-#include <asm/page.h>
-
-#include "kstrtox.h"
/**
* DOC: bitmap introduction
@@ -440,508 +429,6 @@ again:
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
-/*
- * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
- * second version by Paul Jackson, third by Joe Korty.
- */
-
-/**
- * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- */
-int bitmap_parse_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- char *buf;
- int ret;
-
- buf = memdup_user_nul(ubuf, ulen);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
-
- kfree(buf);
- return ret;
-}
-EXPORT_SYMBOL(bitmap_parse_user);
-
-/**
- * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
- * @list: indicates whether the bitmap must be list
- * @buf: page aligned buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- *
- * Output format is a comma-separated list of decimal numbers and
- * ranges if list is specified or hex digits grouped into comma-separated
- * sets of 8 digits/set. Returns the number of characters written to buf.
- *
- * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
- * area and that sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output. Returns the number of characters
- * actually printed to @buf, excluding terminating '\0'.
- */
-int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
- int nmaskbits)
-{
- ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
-
- return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
-}
-EXPORT_SYMBOL(bitmap_print_to_pagebuf);
-
-/**
- * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
- * @list: indicates whether the bitmap must be list
- * true: print in decimal list format
- * false: print in hexadecimal bitmask format
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- */
-static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- const char *fmt = list ? "%*pbl\n" : "%*pb\n";
- ssize_t size;
- void *data;
-
- data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
- if (!data)
- return -ENOMEM;
-
- size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
- kfree(data);
-
- return size;
-}
-
-/**
- * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- *
- * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
- * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
- * bitmask and decimal list to userspace by sysfs ABI.
- * Drivers might be using a normal attribute for this kind of ABIs. A
- * normal attribute typically has show entry as below::
- *
- * static ssize_t example_attribute_show(struct device *dev,
- * struct device_attribute *attr, char *buf)
- * {
- * ...
- * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
- * }
- *
- * show entry of attribute has no offset and count parameters and this
- * means the file is limited to one page only.
- * bitmap_print_to_pagebuf() API works terribly well for this kind of
- * normal attribute with buf parameter and without offset, count::
- *
- * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
- * int nmaskbits)
- * {
- * }
- *
- * The problem is once we have a large bitmap, we have a chance to get a
- * bitmask or list more than one page. Especially for list, it could be
- * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
- * It turns out bin_attribute is a way to break this limit. bin_attribute
- * has show entry as below::
- *
- * static ssize_t
- * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
- * struct bin_attribute *attr, char *buf,
- * loff_t offset, size_t count)
- * {
- * ...
- * }
- *
- * With the new offset and count parameters, this makes sysfs ABI be able
- * to support file size more than one page. For example, offset could be
- * >= 4096.
- * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
- * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
- * make those drivers be able to support large bitmask and list after they
- * move to use bin_attribute. In result, we have to pass the corresponding
- * parameters such as off, count from bin_attribute show entry to this API.
- *
- * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
- * is similar with cpumap_print_to_pagebuf(), the difference is that
- * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
- * the destination buffer is exactly one page and won't be more than one page.
- * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
- * hand, mainly serves bin_attribute which doesn't work with exact one page,
- * and it can break the size limit of converted decimal list and hexadecimal
- * bitmask.
- *
- * WARNING!
- *
- * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
- * It is intended to workaround sysfs limitations discussed above and should be
- * used carefully in general case for the following reasons:
- *
- * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
- * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
- * - @off and @count are NOT offset and number of bits to print.
- * - If printing part of bitmap as list, the resulting string is not a correct
- * list representation of bitmap. Particularly, some bits within or out of
- * related interval may be erroneously set or unset. The format of the string
- * may be broken, so bitmap_parselist-like parser may fail parsing it.
- * - If printing the whole bitmap as list by parts, user must ensure the order
- * of calls of the function such that the offset is incremented linearly.
- * - If printing the whole bitmap as list by parts, user must keep bitmap
- * unchanged between the very first and very last call. Otherwise concatenated
- * result may be incorrect, and format may be broken.
- *
- * Returns the number of characters actually printed to @buf
- */
-int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
-}
-EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
-
-/**
- * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- *
- * Everything is same with the above bitmap_print_bitmask_to_buf() except
- * the print format.
- */
-int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
-}
-EXPORT_SYMBOL(bitmap_print_list_to_buf);
-
-/*
- * Region 9-38:4/10 describes the following bitmap structure:
- * 0 9 12 18 38 N
- * .........****......****......****..................
- * ^ ^ ^ ^ ^
- * start off group_len end nbits
- */
-struct region {
- unsigned int start;
- unsigned int off;
- unsigned int group_len;
- unsigned int end;
- unsigned int nbits;
-};
-
-static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
-{
- unsigned int start;
-
- for (start = r->start; start <= r->end; start += r->group_len)
- bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
-}
-
-static int bitmap_check_region(const struct region *r)
-{
- if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
- return -EINVAL;
-
- if (r->end >= r->nbits)
- return -ERANGE;
-
- return 0;
-}
-
-static const char *bitmap_getnum(const char *str, unsigned int *num,
- unsigned int lastbit)
-{
- unsigned long long n;
- unsigned int len;
-
- if (str[0] == 'N') {
- *num = lastbit;
- return str + 1;
- }
-
- len = _parse_integer(str, 10, &n);
- if (!len)
- return ERR_PTR(-EINVAL);
- if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
- return ERR_PTR(-EOVERFLOW);
-
- *num = n;
- return str + len;
-}
-
-static inline bool end_of_str(char c)
-{
- return c == '\0' || c == '\n';
-}
-
-static inline bool __end_of_region(char c)
-{
- return isspace(c) || c == ',';
-}
-
-static inline bool end_of_region(char c)
-{
- return __end_of_region(c) || end_of_str(c);
-}
-
-/*
- * The format allows commas and whitespaces at the beginning
- * of the region.
- */
-static const char *bitmap_find_region(const char *str)
-{
- while (__end_of_region(*str))
- str++;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-static const char *bitmap_find_region_reverse(const char *start, const char *end)
-{
- while (start <= end && __end_of_region(*end))
- end--;
-
- return end;
-}
-
-static const char *bitmap_parse_region(const char *str, struct region *r)
-{
- unsigned int lastbit = r->nbits - 1;
-
- if (!strncasecmp(str, "all", 3)) {
- r->start = 0;
- r->end = lastbit;
- str += 3;
-
- goto check_pattern;
- }
-
- str = bitmap_getnum(str, &r->start, lastbit);
- if (IS_ERR(str))
- return str;
-
- if (end_of_region(*str))
- goto no_end;
-
- if (*str != '-')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->end, lastbit);
- if (IS_ERR(str))
- return str;
-
-check_pattern:
- if (end_of_region(*str))
- goto no_pattern;
-
- if (*str != ':')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->off, lastbit);
- if (IS_ERR(str))
- return str;
-
- if (*str != '/')
- return ERR_PTR(-EINVAL);
-
- return bitmap_getnum(str + 1, &r->group_len, lastbit);
-
-no_end:
- r->end = r->start;
-no_pattern:
- r->off = r->end + 1;
- r->group_len = r->end + 1;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-/**
- * bitmap_parselist - convert list format ASCII string to bitmap
- * @buf: read user string from this buffer; must be terminated
- * with a \0 or \n.
- * @maskp: write resulting mask here
- * @nmaskbits: number of bits in mask to be written
- *
- * Input format is a comma-separated list of decimal numbers and
- * ranges. Consecutively set bits are shown as two hyphen-separated
- * decimal numbers, the smallest and largest bit numbers set in
- * the range.
- * Optionally each range can be postfixed to denote that only parts of it
- * should be set. The range will divided to groups of specific size.
- * From each group will be used only defined amount of bits.
- * Syntax: range:used_size/group_size
- * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
- * The value 'N' can be used as a dynamically substituted token for the
- * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
- * dynamic, so if system changes cause the bitmap width to change, such
- * as more cores in a CPU list, then any ranges using N will also change.
- *
- * Returns: 0 on success, -errno on invalid input strings. Error values:
- *
- * - ``-EINVAL``: wrong region format
- * - ``-EINVAL``: invalid character in string
- * - ``-ERANGE``: bit number specified too large for mask
- * - ``-EOVERFLOW``: integer overflow in the input parameters
- */
-int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
-{
- struct region r;
- long ret;
-
- r.nbits = nmaskbits;
- bitmap_zero(maskp, r.nbits);
-
- while (buf) {
- buf = bitmap_find_region(buf);
- if (buf == NULL)
- return 0;
-
- buf = bitmap_parse_region(buf, &r);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_check_region(&r);
- if (ret)
- return ret;
-
- bitmap_set_region(&r, maskp);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parselist);
-
-
-/**
- * bitmap_parselist_user() - convert user buffer's list format ASCII
- * string to bitmap
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for bitmap_parselist(), providing it with user buffer.
- */
-int bitmap_parselist_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- char *buf;
- int ret;
-
- buf = memdup_user_nul(ubuf, ulen);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_parselist(buf, maskp, nmaskbits);
-
- kfree(buf);
- return ret;
-}
-EXPORT_SYMBOL(bitmap_parselist_user);
-
-static const char *bitmap_get_x32_reverse(const char *start,
- const char *end, u32 *num)
-{
- u32 ret = 0;
- int c, i;
-
- for (i = 0; i < 32; i += 4) {
- c = hex_to_bin(*end--);
- if (c < 0)
- return ERR_PTR(-EINVAL);
-
- ret |= c << i;
-
- if (start > end || __end_of_region(*end))
- goto out;
- }
-
- if (hex_to_bin(*end--) >= 0)
- return ERR_PTR(-EOVERFLOW);
-out:
- *num = ret;
- return end;
-}
-
-/**
- * bitmap_parse - convert an ASCII hex string into a bitmap.
- * @start: pointer to buffer containing string.
- * @buflen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0 or \n. In that case,
- * UINT_MAX may be provided instead of string length.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Commas group hex digits into chunks. Each chunk defines exactly 32
- * bits of the resultant bitmask. No chunk may specify a value larger
- * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended. %-EINVAL is returned for illegal
- * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
- * Leading, embedded and trailing whitespace accepted.
- */
-int bitmap_parse(const char *start, unsigned int buflen,
- unsigned long *maskp, int nmaskbits)
-{
- const char *end = strnchrnul(start, buflen, '\n') - 1;
- int chunks = BITS_TO_U32(nmaskbits);
- u32 *bitmap = (u32 *)maskp;
- int unset_bit;
- int chunk;
-
- for (chunk = 0; ; chunk++) {
- end = bitmap_find_region_reverse(start, end);
- if (start > end)
- break;
-
- if (!chunks--)
- return -EOVERFLOW;
-
-#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
- end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
-#else
- end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
-#endif
- if (IS_ERR(end))
- return PTR_ERR(end);
- }
-
- unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
- if (unset_bit < nmaskbits) {
- bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
- return 0;
- }
-
- if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
- return -EOVERFLOW;
-
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parse);
-
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
@@ -988,7 +475,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigne
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
@@ -1037,7 +524,7 @@ EXPORT_SYMBOL(bitmap_remap);
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
@@ -1220,169 +707,6 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
}
#endif /* CONFIG_NUMA */
-/*
- * Common code for bitmap_*_region() routines.
- * bitmap: array of unsigned longs corresponding to the bitmap
- * pos: the beginning of the region
- * order: region size (log base 2 of number of bits)
- * reg_op: operation(s) to perform on that region of bitmap
- *
- * Can set, verify and/or release a region of bits in a bitmap,
- * depending on which combination of REG_OP_* flag bits is set.
- *
- * A region of a bitmap is a sequence of bits in the bitmap, of
- * some size '1 << order' (a power of two), aligned to that same
- * '1 << order' power of two.
- *
- * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
- * Returns 0 in all other cases and reg_ops.
- */
-
-enum {
- REG_OP_ISFREE, /* true if region is all zero bits */
- REG_OP_ALLOC, /* set all bits in region */
- REG_OP_RELEASE, /* clear all bits in region */
-};
-
-static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
-{
- int nbits_reg; /* number of bits in region */
- int index; /* index first long of region in bitmap */
- int offset; /* bit offset region in bitmap[index] */
- int nlongs_reg; /* num longs spanned by region in bitmap */
- int nbitsinlong; /* num bits of region in each spanned long */
- unsigned long mask; /* bitmask for one long of region */
- int i; /* scans bitmap by longs */
- int ret = 0; /* return value */
-
- /*
- * Either nlongs_reg == 1 (for small orders that fit in one long)
- * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
- */
- nbits_reg = 1 << order;
- index = pos / BITS_PER_LONG;
- offset = pos - (index * BITS_PER_LONG);
- nlongs_reg = BITS_TO_LONGS(nbits_reg);
- nbitsinlong = min(nbits_reg, BITS_PER_LONG);
-
- /*
- * Can't do "mask = (1UL << nbitsinlong) - 1", as that
- * overflows if nbitsinlong == BITS_PER_LONG.
- */
- mask = (1UL << (nbitsinlong - 1));
- mask += mask - 1;
- mask <<= offset;
-
- switch (reg_op) {
- case REG_OP_ISFREE:
- for (i = 0; i < nlongs_reg; i++) {
- if (bitmap[index + i] & mask)
- goto done;
- }
- ret = 1; /* all bits in region free (zero) */
- break;
-
- case REG_OP_ALLOC:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] |= mask;
- break;
-
- case REG_OP_RELEASE:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] &= ~mask;
- break;
- }
-done:
- return ret;
-}
-
-/**
- * bitmap_find_free_region - find a contiguous aligned mem region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @bits: number of bits in the bitmap
- * @order: region size (log base 2 of number of bits) to find
- *
- * Find a region of free (zero) bits in a @bitmap of @bits bits and
- * allocate them (set them to one). Only consider regions of length
- * a power (@order) of two, aligned to that power of two, which
- * makes the search algorithm much faster.
- *
- * Return the bit offset in bitmap of the allocated region,
- * or -errno on failure.
- */
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
-{
- unsigned int pos, end; /* scans bitmap by regions of size order */
-
- for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- continue;
- __reg_op(bitmap, pos, order, REG_OP_ALLOC);
- return pos;
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(bitmap_find_free_region);
-
-/**
- * bitmap_release_region - release allocated bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to release
- * @order: region size (log base 2 of number of bits) to release
- *
- * This is the complement to __bitmap_find_free_region() and releases
- * the found region (by clearing it in the bitmap).
- *
- * No return value.
- */
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
-{
- __reg_op(bitmap, pos, order, REG_OP_RELEASE);
-}
-EXPORT_SYMBOL(bitmap_release_region);
-
-/**
- * bitmap_allocate_region - allocate bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to allocate
- * @order: region size (log base 2 of number of bits) to allocate
- *
- * Allocate (set bits in) a specified region of a bitmap.
- *
- * Return 0 on success, or %-EBUSY if specified region wasn't
- * free (not all bits were zero).
- */
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
-{
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- return -EBUSY;
- return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
-}
-EXPORT_SYMBOL(bitmap_allocate_region);
-
-/**
- * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
- * @dst: destination buffer
- * @src: bitmap to copy
- * @nbits: number of bits in the bitmap
- *
- * Require nbits % BITS_PER_LONG == 0.
- */
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
-{
- unsigned int i;
-
- for (i = 0; i < nbits/BITS_PER_LONG; i++) {
- if (BITS_PER_LONG == 64)
- dst[i] = cpu_to_le64(src[i]);
- else
- dst[i] = cpu_to_le32(src[i]);
- }
-}
-EXPORT_SYMBOL(bitmap_copy_le);
-#endif
-
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
diff --git a/lib/closure.c b/lib/closure.c
index 0855e698ced1..f86c9eeafb35 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -21,6 +21,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
if (!r) {
+ smp_acquire__after_ctrl_dep();
+
+ cl->closure_get_happened = false;
+
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
@@ -43,7 +47,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
/* For clearing flags with the same atomic op as a put */
void closure_sub(struct closure *cl, int v)
{
- closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+ closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
}
EXPORT_SYMBOL(closure_sub);
@@ -52,7 +56,7 @@ EXPORT_SYMBOL(closure_sub);
*/
void closure_put(struct closure *cl)
{
- closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+ closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
}
EXPORT_SYMBOL(closure_put);
@@ -90,6 +94,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
return false;
+ cl->closure_get_happened = true;
closure_set_waiting(cl, _RET_IP_);
atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
llist_add(&cl->list, &waitlist->list);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 34335c1e7265..e77ee9d46f71 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -14,7 +14,7 @@
* @start: the start point of the iteration
* @wrap: assume @n crossing @start terminates the iteration
*
- * Returns >= nr_cpu_ids on completion
+ * Return: >= nr_cpu_ids on completion
*
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
@@ -48,8 +48,9 @@ EXPORT_SYMBOL(cpumask_next_wrap);
* @node: memory node from which to allocate or %NUMA_NO_NODE
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
- * a nop returning a constant 1 (in <linux/cpumask.h>)
- * Returns TRUE if memory allocation succeeded, FALSE otherwise.
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * Return: TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
@@ -115,7 +116,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
* @i: index number
* @node: local numa_node
*
- * Returns online CPU according to a numa aware policy; local cpus are returned
+ * Return: online CPU according to a numa aware policy; local cpus are returned
* first, followed by non-local ones, then it wraps around.
*
* For those who wants to enumerate all CPUs based on their NUMA distances,
@@ -163,7 +164,7 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
*
- * Returns >= nr_cpu_ids if the intersection is empty.
+ * Return: >= nr_cpu_ids if the intersection is empty.
*/
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -182,6 +183,12 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
}
EXPORT_SYMBOL(cpumask_any_and_distribute);
+/**
+ * cpumask_any_distribute - Return an arbitrary cpu from srcp
+ * @srcp: &cpumask for selection
+ *
+ * Return: >= nr_cpu_ids if the intersection is empty.
+ */
unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
unsigned int next, prev;
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index c035fde66aeb..7ee468ef21ec 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,7 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fw_table.c b/lib/fw_table.c
new file mode 100644
index 000000000000..b51f30a28e47
--- /dev/null
+++ b/lib/fw_table.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fw_tables.c - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#include <linux/errno.h>
+#include <linux/fw_table.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON,
+ ACPI_SUBTABLE_HMAT,
+ ACPI_SUBTABLE_PRMT,
+ ACPI_SUBTABLE_CEDT,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.type;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.type;
+ case ACPI_SUBTABLE_PRMT:
+ return 0;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.type;
+ }
+ return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.length;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.length;
+ case ACPI_SUBTABLE_PRMT:
+ return entry->hdr->prmt.length;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.length;
+ }
+ return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return sizeof(entry->hdr->common);
+ case ACPI_SUBTABLE_HMAT:
+ return sizeof(entry->hdr->hmat);
+ case ACPI_SUBTABLE_PRMT:
+ return sizeof(entry->hdr->prmt);
+ case ACPI_SUBTABLE_CEDT:
+ return sizeof(entry->hdr->cedt);
+ }
+ return 0;
+}
+
+static enum acpi_subtable_type __init_or_acpilib
+acpi_get_subtable_type(char *id)
+{
+ if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+ return ACPI_SUBTABLE_HMAT;
+ if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+ return ACPI_SUBTABLE_PRMT;
+ if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+ return ACPI_SUBTABLE_CEDT;
+ return ACPI_SUBTABLE_COMMON;
+}
+
+static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
+{
+ return proc->handler || proc->handler_arg;
+}
+
+static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
+{
+ if (proc->handler)
+ return proc->handler(hdr, end);
+ if (proc->handler_arg)
+ return proc->handler_arg(hdr, proc->arg, end);
+ return -EINVAL;
+}
+
+/**
+ * acpi_parse_entries_array - for each proc_num find a suitable subtable
+ *
+ * @id: table id (for debugging purposes)
+ * @table_size: size of the root table
+ * @table_header: where does the table start?
+ * @proc: array of acpi_subtable_proc struct containing entry id
+ * and associated handler with it
+ * @proc_num: how big proc is?
+ * @max_entries: how many entries can we process?
+ *
+ * For each proc_num find a subtable with proc->id and run proc->handler
+ * on it. Assumption is that there's only single handler for particular
+ * entry id.
+ *
+ * The table_size is not the size of the complete ACPI table (the length
+ * field in the header struct), but only the size of the root table; i.e.,
+ * the offset from the very first byte of the complete ACPI table, to the
+ * first byte of the very first subtable.
+ *
+ * On success returns sum of all matching entries for all proc handlers.
+ * Otherwise, -ENODEV or -EINVAL is returned.
+ */
+int __init_or_acpilib
+acpi_parse_entries_array(char *id, unsigned long table_size,
+ struct acpi_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
+{
+ unsigned long table_end, subtable_len, entry_len;
+ struct acpi_subtable_entry entry;
+ int count = 0;
+ int errs = 0;
+ int i;
+
+ table_end = (unsigned long)table_header + table_header->length;
+
+ /* Parse all entries looking for a match. */
+
+ entry.type = acpi_get_subtable_type(id);
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)table_header + table_size);
+ subtable_len = acpi_get_subtable_header_length(&entry);
+
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
+ if (max_entries && count >= max_entries)
+ break;
+
+ for (i = 0; i < proc_num; i++) {
+ if (acpi_get_entry_type(&entry) != proc[i].id)
+ continue;
+ if (!has_handler(&proc[i]) ||
+ (!errs &&
+ call_handler(&proc[i], entry.hdr, table_end))) {
+ errs++;
+ continue;
+ }
+
+ proc[i].count++;
+ break;
+ }
+ if (i != proc_num)
+ count++;
+
+ /*
+ * If entry->length is 0, break from this loop to avoid
+ * infinite loop.
+ */
+ entry_len = acpi_get_entry_length(&entry);
+ if (entry_len == 0) {
+ pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
+ return -EINVAL;
+ }
+
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)entry.hdr + entry_len);
+ }
+
+ if (max_entries && count > max_entries) {
+ pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
+ id, proc->id, count);
+ }
+
+ return errs ? -EINVAL : count;
+}
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 45c450f423fa..010c730ca7fc 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -109,9 +109,7 @@ void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
if (s->size == 0 || s->len == 0)
return;
- seq_buf_terminate(s);
-
- start = s->buffer;
+ start = seq_buf_str(s);
while ((lf = strchr(start, '\n'))) {
int len = lf - start + 1;
@@ -189,6 +187,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_puts);
/**
* seq_buf_putc - sequence printing of simple character
@@ -210,6 +209,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_putc);
/**
* seq_buf_putmem - write raw data into the sequenc buffer
@@ -324,23 +324,24 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
* seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
+ * @start: The first byte in the buffer to copy
* @cnt: The amount to copy
*
* Copies the sequence buffer into the userspace memory pointed to
- * by @ubuf. It starts from the last read position (@s->readpos)
- * and writes up to @cnt characters or till it reaches the end of
- * the content in the buffer (@s->len), which ever comes first.
+ * by @ubuf. It starts from @start and writes up to @cnt characters
+ * or until it reaches the end of the content in the buffer (@s->len),
+ * whichever comes first.
*
* On success, it returns a positive number of the number of bytes
* it copied.
*
* On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the
- * sequence (@s->len == @s->readpos).
+ * sequence (@s->len == @start).
*
* Returns -EFAULT if the copy to userspace fails.
*/
-int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
+int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
{
int len;
int ret;
@@ -350,20 +351,17 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
len = seq_buf_used(s);
- if (len <= s->readpos)
+ if (len <= start)
return -EBUSY;
- len -= s->readpos;
+ len -= start;
if (cnt > len)
cnt = len;
- ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+ ret = copy_to_user(ubuf, s->buffer + start, cnt);
if (ret == cnt)
return -EFAULT;
- cnt -= ret;
-
- s->readpos += cnt;
- return cnt;
+ return cnt - ret;
}
/**
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index f2ea9f30c7c5..65f22c2578b0 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -330,6 +330,29 @@ static void __init test_copy(void)
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
+static void __init test_bitmap_region(void)
+{
+ int pos, order;
+
+ DECLARE_BITMAP(bmap, 1000);
+
+ bitmap_zero(bmap, 1000);
+
+ for (order = 0; order < 10; order++) {
+ pos = bitmap_find_free_region(bmap, 1000, order);
+ if (order == 0)
+ expect_eq_uint(pos, 0);
+ else
+ expect_eq_uint(pos, order < 9 ? BIT(order) : -ENOMEM);
+ }
+
+ bitmap_release_region(bmap, 0, 0);
+ for (order = 1; order < 9; order++)
+ bitmap_release_region(bmap, BIT(order), order);
+
+ expect_eq_uint(bitmap_weight(bmap, 1000), 0);
+}
+
#define EXP2_IN_BITS (sizeof(exp2) * 8)
static void __init test_replace(void)
@@ -1227,6 +1250,7 @@ static void __init selftest(void)
test_zero_clear();
test_fill_set();
test_copy();
+ test_bitmap_region();
test_replace();
test_bitmap_arr32();
test_bitmap_arr64();
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index afb88b24fa74..3e3733a7084f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -60,7 +60,8 @@
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
-static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
+noinline
+static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars)
{
const char *cp;
unsigned long long result = 0ULL;
@@ -95,7 +96,7 @@ static noinline unsigned long long simple_strntoull(const char *startp, size_t m
noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- return simple_strntoull(cp, INT_MAX, endp, base);
+ return simple_strntoull(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoull);
@@ -130,8 +131,8 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtol);
-static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
- unsigned int base)
+noinline
+static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars)
{
/*
* simple_strntoull() safely handles receiving max_chars==0 in the
@@ -140,9 +141,9 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
* and the content of *cp is irrelevant.
*/
if (*cp == '-' && max_chars > 0)
- return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
+ return -simple_strntoull(cp + 1, endp, base, max_chars - 1);
- return simple_strntoull(cp, max_chars, endp, base);
+ return simple_strntoull(cp, endp, base, max_chars);
}
/**
@@ -155,7 +156,7 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- return simple_strntoll(cp, INT_MAX, endp, base);
+ return simple_strntoll(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoll);
@@ -3648,13 +3649,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
if (is_sign)
- val.s = simple_strntoll(str,
- field_width >= 0 ? field_width : INT_MAX,
- &next, base);
+ val.s = simple_strntoll(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
else
- val.u = simple_strntoull(str,
- field_width >= 0 ? field_width : INT_MAX,
- &next, base);
+ val.u = simple_strntoull(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
diff --git a/mm/memblock.c b/mm/memblock.c
index fd492e5bbdbc..5a88d6d24d79 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -424,7 +424,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
* of memory that aren't suitable for allocation
*/
if (!memblock_can_resize)
- return -1;
+ panic("memblock: cannot resize %s array\n", type->name);
/* Calculate new doubled size */
old_size = type->max * sizeof(struct memblock_region);
diff --git a/net/9p/client.c b/net/9p/client.c
index 86bbc7147fc1..e265a0ca6bdd 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
return 0;
if (!p9_is_proto_dotl(c)) {
- char *ename;
+ char *ename = NULL;
err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
&ename, &ecode);
- if (err)
+ if (err) {
+ kfree(ename);
goto out_err;
+ }
if (p9_is_proto_dotu(c) && ecode < 512)
err = -ecode;
@@ -1979,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
goto error;
}
p9_debug(P9_DEBUG_9P,
- ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
+ ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
file_fid->fid, attr_fid->fid, attr_name);
req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c4015f30f9fa..1a3948b8c493 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -671,10 +671,14 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
m, current, &req->tc, req->tc.id);
- if (m->err < 0)
- return m->err;
spin_lock(&m->req_lock);
+
+ if (m->err < 0) {
+ spin_unlock(&m->req_lock);
+ return m->err;
+ }
+
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
@@ -832,14 +836,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
goto out_free_ts;
if (!(ts->rd->f_mode & FMODE_READ))
goto out_put_rd;
- /* prevent workers from hanging on IO when fd is a pipe */
- ts->rd->f_flags |= O_NONBLOCK;
+ /* Prevent workers from hanging on IO when fd is a pipe.
+ * It's technically possible for userspace or concurrent mounts to
+ * modify this flag concurrently, which will likely result in a
+ * broken filesystem. However, just having bad flags here should
+ * not crash the kernel or cause any other sort of bug, so mark this
+ * particular data race as intentional so that tooling (like KCSAN)
+ * can allow it and detect further problems.
+ */
+ data_race(ts->rd->f_flags |= O_NONBLOCK);
ts->wr = fget(wfd);
if (!ts->wr)
goto out_put_rd;
if (!(ts->wr->f_mode & FMODE_WRITE))
goto out_put_wr;
- ts->wr->f_flags |= O_NONBLOCK;
+ data_race(ts->wr->f_flags |= O_NONBLOCK);
client->trans = ts;
client->status = Connected;
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 1fffe2bed5b0..dfdbe1ca5338 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -54,7 +54,6 @@ struct xen_9pfs_front_priv {
char *tag;
struct p9_client *client;
- int num_rings;
struct xen_9pfs_dataring *rings;
};
@@ -131,7 +130,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
if (list_entry_is_head(priv, &xen_9pfs_devs, list))
return -EINVAL;
- num = p9_req->tc.tag % priv->num_rings;
+ num = p9_req->tc.tag % XEN_9PFS_NUM_RINGS;
ring = &priv->rings[num];
again:
@@ -279,7 +278,7 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
list_del(&priv->list);
write_unlock(&xen_9pfs_lock);
- for (i = 0; i < priv->num_rings; i++) {
+ for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
struct xen_9pfs_dataring *ring = &priv->rings[i];
cancel_work_sync(&ring->work);
@@ -408,15 +407,14 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
- priv->num_rings = XEN_9PFS_NUM_RINGS;
- priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
+ priv->rings = kcalloc(XEN_9PFS_NUM_RINGS, sizeof(*priv->rings),
GFP_KERNEL);
if (!priv->rings) {
kfree(priv);
return -ENOMEM;
}
- for (i = 0; i < priv->num_rings; i++) {
+ for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
priv->rings[i].priv = priv;
ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
max_ring_order);
@@ -434,10 +432,11 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
- priv->num_rings);
+ XEN_9PFS_NUM_RINGS);
if (ret)
goto error_xenbus;
- for (i = 0; i < priv->num_rings; i++) {
+
+ for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
char str[16];
BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 0841f8d82419..c9fdcc5cdce1 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -503,9 +503,8 @@ out:
* architecture dependent calling conventions. 7+ can be supported in the
* future.
*/
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
+
__bpf_kfunc int bpf_fentry_test1(int a)
{
return a + 1;
@@ -605,7 +604,7 @@ __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{
}
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(bpf_test_modify_return_ids)
BTF_ID_FLAGS(func, bpf_modify_return_test)
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 8f19253024b0..741360219552 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -135,3 +135,4 @@ static void __exit ebtable_broute_fini(void)
module_init(ebtable_broute_init);
module_exit(ebtable_broute_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Force packets to be routed instead of bridged");
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 278f324e6752..dacd81b12e62 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -116,3 +116,4 @@ static void __exit ebtable_filter_fini(void)
module_init(ebtable_filter_init);
module_exit(ebtable_filter_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy filter table");
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 9066f7f376d5..0f2a8c6118d4 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -116,3 +116,4 @@ static void __exit ebtable_nat_fini(void)
module_init(ebtable_nat_init);
module_exit(ebtable_nat_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy stateless nat table");
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index aa23479b20b2..99d82676f780 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2595,3 +2595,4 @@ EXPORT_SYMBOL(ebt_do_table);
module_init(ebtables_init);
module_exit(ebtables_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy core");
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 71056ee84773..b5c406a6e765 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -416,3 +416,4 @@ module_exit(nf_conntrack_l3proto_bridge_fini);
MODULE_ALIAS("nf_conntrack-" __stringify(AF_BRIDGE));
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bridge IPv4 and IPv6 connection tracking");
diff --git a/net/core/filter.c b/net/core/filter.c
index 21d75108c2e9..383f96b0a1c7 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -11767,9 +11767,7 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
return func;
}
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
struct bpf_dynptr_kern *ptr__uninit)
{
@@ -11816,7 +11814,7 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
-__diag_pop();
+__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
struct bpf_dynptr_kern *ptr__uninit)
@@ -11879,10 +11877,7 @@ static int __init bpf_kfunc_init(void)
}
late_initcall(bpf_kfunc_init);
-/* Disables missing prototype warnings */
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
/* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code.
*
@@ -11916,7 +11911,7 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
return sk->sk_prot->diag_destroy(sk, ECONNABORTED);
}
-__diag_pop()
+__bpf_kfunc_end_defs();
BTF_SET8_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5e409b98aba0..dec544337236 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -217,8 +217,12 @@ static int page_pool_init(struct page_pool *pool,
return -ENOMEM;
#endif
- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
+ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
+#ifdef CONFIG_PAGE_POOL_STATS
+ free_percpu(pool->recycle_stats);
+#endif
return -ENOMEM;
+ }
atomic_set(&pool->pages_state_release_cnt, 0);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index df4789ab512d..b6f1d6dab3f2 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -696,9 +696,7 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
return nxdpf;
}
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
/**
* bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
@@ -738,7 +736,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
return -EOPNOTSUPP;
}
-__diag_pop();
+__bpf_kfunc_end_defs();
BTF_SET8_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1b8cbfda6e5d..44b033fe1ef6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
- if (security_inet_conn_request(sk, skb, req))
- goto drop_and_free;
-
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
@@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
ireq->ireq_family = AF_INET;
ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+ if (security_inet_conn_request(sk, skb, req))
+ goto drop_and_free;
+
/*
* Step 3: Process LISTEN state
*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8d344b219f84..4550b680665a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
- if (security_inet_conn_request(sk, skb, req))
- goto drop_and_free;
-
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
ireq->ir_mark = inet_request_mark(sk, skb);
+ if (security_inet_conn_request(sk, skb, req))
+ goto drop_and_free;
+
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index 9cbae0169249..788dfdc498a9 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -15,7 +15,7 @@ const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_
[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY, },
[DEVLINK_PORT_FN_ATTR_STATE] = NLA_POLICY_MAX(NLA_U8, 1),
[DEVLINK_PORT_FN_ATTR_OPSTATE] = NLA_POLICY_MAX(NLA_U8, 1),
- [DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(3),
+ [DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(15),
};
const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1] = {
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index b71dab630a87..80cdc6f6b34c 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
skb = skb_copy_expand(frame->skb_std, 0,
skb_tailroom(frame->skb_std) + HSR_HLEN,
GFP_ATOMIC);
- prp_fill_rct(skb, frame, port);
-
- return skb;
+ return prp_fill_rct(skb, frame, port);
}
static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c
index 3760a14b6b57..4da03bf45c9b 100644
--- a/net/ipv4/fou_bpf.c
+++ b/net/ipv4/fou_bpf.c
@@ -22,9 +22,7 @@ enum bpf_fou_encap_type {
FOU_BPF_ENCAP_GUE,
};
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in BTF");
+__bpf_kfunc_start_defs();
/* bpf_skb_set_fou_encap - Set FOU encap parameters
*
@@ -100,7 +98,7 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
return 0;
}
-__diag_pop()
+__bpf_kfunc_end_defs();
BTF_SET8_START(fou_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 56f6ecc43451..4d42d0756fd7 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -170,3 +170,4 @@ module_init(iptable_nat_init);
module_exit(iptable_nat_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("iptables legacy nat table");
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index ca5e5b21587c..0e7f53964d0a 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -108,3 +108,4 @@ static void __exit iptable_raw_fini(void)
module_init(iptable_raw_init);
module_exit(iptable_raw_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("iptables legacy raw table");
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 265b39bc435b..482e733c3375 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -186,3 +186,4 @@ module_init(nf_defrag_init);
module_exit(nf_defrag_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 defragmentation support");
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index f33aeab9424f..f01b038fc1cd 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -336,3 +336,4 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
EXPORT_SYMBOL_GPL(nf_send_unreach);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 packet rejection core");
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 98b25e5d147b..d37282c06e3d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -306,7 +306,7 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
treq->af_specific = af_ops;
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
- treq->req_usec_ts = -1;
+ treq->req_usec_ts = false;
#if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index ef5472ed6158..7696417d0640 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -1315,7 +1315,8 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
key->maclen = cmd->maclen ?: 12; /* 12 is the default in RFC5925 */
/* Check: maclen + tcp-ao header <= (MAX_TCP_OPTION_SPACE - mss
- * - tstamp - wscale - sackperm),
+ * - tstamp (including sackperm)
+ * - wscale),
* see tcp_syn_options(), tcp_synack_options(), commit 33ad798c924b.
*
* In order to allow D-SACK with TCP-AO, the header size should be:
@@ -1342,9 +1343,9 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
* large to leave sufficient option space.
*/
syn_tcp_option_space = MAX_TCP_OPTION_SPACE;
+ syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED;
- syn_tcp_option_space -= TCPOLEN_SACKPERM_ALIGNED;
if (tcp_ao_len(key) > syn_tcp_option_space) {
err = -EMSGSIZE;
goto err_kfree;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 50aaa1527150..bcb55d98004c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -7115,7 +7115,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
req->syncookie = want_cookie;
tcp_rsk(req)->af_specific = af_ops;
tcp_rsk(req)->ts_off = 0;
- tcp_rsk(req)->req_usec_ts = -1;
+ tcp_rsk(req)->req_usec_ts = false;
#if IS_ENABLED(CONFIG_MPTCP)
tcp_rsk(req)->is_mptcp = 0;
#endif
@@ -7143,9 +7143,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (!dst)
goto drop_and_free;
- if (tmp_opt.tstamp_ok)
+ if (tmp_opt.tstamp_ok) {
+ tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
-
+ }
if (!want_cookie && !isn) {
int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f558c054cf6e..eb13a55d660c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -601,6 +601,44 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
}
#endif
+static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
+ const struct tcp_request_sock *tcprsk,
+ struct tcp_out_options *opts,
+ struct tcp_key *key, __be32 *ptr)
+{
+#ifdef CONFIG_TCP_AO
+ u8 maclen = tcp_ao_maclen(key->ao_key);
+
+ if (tcprsk) {
+ u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
+
+ *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
+ (tcprsk->ao_keyid << 8) |
+ (tcprsk->ao_rcv_next));
+ } else {
+ struct tcp_ao_key *rnext_key;
+ struct tcp_ao_info *ao_info;
+
+ ao_info = rcu_dereference_check(tp->ao_info,
+ lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
+ rnext_key = READ_ONCE(ao_info->rnext_key);
+ if (WARN_ON_ONCE(!rnext_key))
+ return ptr;
+ *ptr++ = htonl((TCPOPT_AO << 24) |
+ (tcp_ao_len(key->ao_key) << 16) |
+ (key->ao_key->sndid << 8) |
+ (rnext_key->rcvid));
+ }
+ opts->hash_location = (__u8 *)ptr;
+ ptr += maclen / sizeof(*ptr);
+ if (unlikely(maclen % sizeof(*ptr))) {
+ memset(ptr, TCPOPT_NOP, sizeof(*ptr));
+ ptr++;
+ }
+#endif
+ return ptr;
+}
+
/* Write previously computed TCP options to the packet.
*
* Beware: Something in the Internet is very sensitive to the ordering of
@@ -629,37 +667,7 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
opts->hash_location = (__u8 *)ptr;
ptr += 4;
} else if (tcp_key_is_ao(key)) {
-#ifdef CONFIG_TCP_AO
- u8 maclen = tcp_ao_maclen(key->ao_key);
-
- if (tcprsk) {
- u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
-
- *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
- (tcprsk->ao_keyid << 8) |
- (tcprsk->ao_rcv_next));
- } else {
- struct tcp_ao_key *rnext_key;
- struct tcp_ao_info *ao_info;
-
- ao_info = rcu_dereference_check(tp->ao_info,
- lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
- rnext_key = READ_ONCE(ao_info->rnext_key);
- if (WARN_ON_ONCE(!rnext_key))
- goto out_ao;
- *ptr++ = htonl((TCPOPT_AO << 24) |
- (tcp_ao_len(key->ao_key) << 16) |
- (key->ao_key->sndid << 8) |
- (rnext_key->rcvid));
- }
- opts->hash_location = (__u8 *)ptr;
- ptr += maclen / sizeof(*ptr);
- if (unlikely(maclen % sizeof(*ptr))) {
- memset(ptr, TCPOPT_NOP, sizeof(*ptr));
- ptr++;
- }
-out_ao:
-#endif
+ ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
}
if (unlikely(opts->mss)) {
*ptr++ = htonl((TCPOPT_MSS << 24) |
@@ -3693,8 +3701,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
memset(&opts, 0, sizeof(opts));
- if (tcp_rsk(req)->req_usec_ts < 0)
- tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
now = tcp_clock_ns();
#ifdef CONFIG_SYN_COOKIES
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c
index 65a8eaae2fec..55b310a722c7 100644
--- a/net/ipv4/tcp_sigpool.c
+++ b/net/ipv4/tcp_sigpool.c
@@ -231,7 +231,7 @@ static void cpool_schedule_cleanup(struct kref *kref)
*/
void tcp_sigpool_release(unsigned int id)
{
- if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+ if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
return;
/* slow-path */
@@ -245,7 +245,7 @@ EXPORT_SYMBOL_GPL(tcp_sigpool_release);
*/
void tcp_sigpool_get(unsigned int id)
{
- if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+ if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
return;
kref_get(&cpool[id].kref);
}
@@ -256,7 +256,7 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
struct crypto_ahash *hash;
rcu_read_lock_bh();
- if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) {
+ if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) {
rcu_read_unlock_bh();
return -EINVAL;
}
@@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_sigpool_end);
*/
size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len)
{
- if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+ if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
return -EINVAL;
return strscpy(buf, cpool[id].alg, buf_len);
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index bf3cb3a13600..52cf104e3478 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -170,3 +170,4 @@ module_init(ip6table_nat_init);
module_exit(ip6table_nat_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ip6tables legacy nat table");
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 08861d5d1f4d..fc9f6754028f 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -106,3 +106,4 @@ static void __exit ip6table_raw_fini(void)
module_init(ip6table_raw_init);
module_exit(ip6table_raw_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ip6tables legacy raw table");
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index d59b296b4f51..be7817fbc024 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -182,3 +182,4 @@ module_init(nf_defrag_init);
module_exit(nf_defrag_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6 defragmentation support");
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 58ccdb08c0fd..d45bc54b7ea5 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -413,3 +413,4 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
EXPORT_SYMBOL_GPL(nf_send_unreach6);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6 packet rejection core");
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 500f6ed3b8cf..12eedc6ca2cc 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -181,14 +181,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
treq = tcp_rsk(req);
treq->tfo_listener = false;
- if (security_inet_conn_request(sk, skb, req))
- goto out_free;
-
req->mss = mss;
ireq->ir_rmt_port = th->source;
ireq->ir_num = ntohs(th->dest);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+
+ if (security_inet_conn_request(sk, skb, req))
+ goto out_free;
+
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fc3fddeb6f36..0ed6e34d6edd 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1823,7 +1823,7 @@ static int __init iucv_init(void)
rc = -EPROTONOSUPPORT;
goto out;
}
- ctl_set_bit(0, 1);
+ system_ctl_set_bit(0, CR0_IUCV_BIT);
rc = iucv_query_maxconn();
if (rc)
goto out_ctl;
@@ -1871,7 +1871,7 @@ out_dev:
out_int:
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
out_ctl:
- ctl_clear_bit(0, 1);
+ system_ctl_clear_bit(0, 1);
out:
return rc;
}
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index dd1d8ffd5f59..65d1f6755f98 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1946,4 +1946,5 @@ module_init(kcm_init);
module_exit(kcm_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KCM (Kernel Connection Multiplexor) sockets");
MODULE_ALIAS_NETPROTO(PF_KCM);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 7cac441862e2..51bccfb00a9c 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
skb->transport_header += llc_len;
skb_pull(skb, llc_len);
if (skb->protocol == htons(ETH_P_802_2)) {
- __be16 pdulen = eth_hdr(skb)->h_proto;
- s32 data_size = ntohs(pdulen) - llc_len;
+ __be16 pdulen;
+ s32 data_size;
+
+ if (skb->mac_len < ETH_HLEN)
+ return 0;
+
+ pdulen = eth_hdr(skb)->h_proto;
+ data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
!pskb_may_pull(skb, data_size))
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index 79d1cef8f15a..06fb8e6944b0 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
int rc = 1;
u32 data_size;
+ if (skb->mac_len < ETH_HLEN)
+ return 1;
+
llc_pdu_decode_sa(skb, mac_da);
llc_pdu_decode_da(skb, mac_sa);
llc_pdu_decode_ssap(skb, &dsap);
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 05c6ae092053..f50654292510 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
u32 data_size;
struct sk_buff *nskb;
+ if (skb->mac_len < ETH_HLEN)
+ goto out;
+
/* The test request command is type U (llc_len = 3) */
data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 3230506ae3ff..a2c16b501087 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2450,3 +2450,4 @@ static void __exit ip_vs_cleanup(void)
module_init(ip_vs_init);
module_exit(ip_vs_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IP Virtual Server");
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 5e6ec32aff2b..75f4c231f4a0 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -270,3 +270,4 @@ static void __exit ip_vs_dh_cleanup(void)
module_init(ip_vs_dh_init);
module_exit(ip_vs_dh_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs destination hashing scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_fo.c b/net/netfilter/ipvs/ip_vs_fo.c
index b846cc385279..ab117e5bc34e 100644
--- a/net/netfilter/ipvs/ip_vs_fo.c
+++ b/net/netfilter/ipvs/ip_vs_fo.c
@@ -72,3 +72,4 @@ static void __exit ip_vs_fo_cleanup(void)
module_init(ip_vs_fo_init);
module_exit(ip_vs_fo_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted failover scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index ef1f45e43b63..f53899d12416 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -635,3 +635,4 @@ static void __exit ip_vs_ftp_exit(void)
module_init(ip_vs_ftp_init);
module_exit(ip_vs_ftp_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs ftp helper");
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index cf78ba4ce5ff..8ceec7a2fa8f 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -632,3 +632,4 @@ static void __exit ip_vs_lblc_cleanup(void)
module_init(ip_vs_lblc_init);
module_exit(ip_vs_lblc_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs locality-based least-connection scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 9eddf118b40e..0fb64707213f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -817,3 +817,4 @@ static void __exit ip_vs_lblcr_cleanup(void)
module_init(ip_vs_lblcr_init);
module_exit(ip_vs_lblcr_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs locality-based least-connection with replication scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 9d34d81fc6f1..c2764505e380 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -86,3 +86,4 @@ static void __exit ip_vs_lc_cleanup(void)
module_init(ip_vs_lc_init);
module_exit(ip_vs_lc_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs least connection scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index f56862a87518..ed7f5c889b41 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -136,3 +136,4 @@ static void __exit ip_vs_nq_cleanup(void)
module_init(ip_vs_nq_init);
module_exit(ip_vs_nq_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs never queue scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
index c03066fdd5ca..c7708b809700 100644
--- a/net/netfilter/ipvs/ip_vs_ovf.c
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -79,3 +79,4 @@ static void __exit ip_vs_ovf_cleanup(void)
module_init(ip_vs_ovf_init);
module_exit(ip_vs_ovf_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs overflow connection scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 0ac6705a61d3..e4ce1d9a63f9 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -185,3 +185,4 @@ static void __exit ip_vs_sip_cleanup(void)
module_init(ip_vs_sip_init);
module_exit(ip_vs_sip_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs sip helper");
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 38495c6f6c7c..6baa34dff9f0 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -122,4 +122,5 @@ static void __exit ip_vs_rr_cleanup(void)
module_init(ip_vs_rr_init);
module_exit(ip_vs_rr_cleanup);
+MODULE_DESCRIPTION("ipvs round-robin scheduler");
MODULE_LICENSE("GPL");
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 7663288e5358..a46f99a56618 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -137,3 +137,4 @@ static void __exit ip_vs_sed_cleanup(void)
module_init(ip_vs_sed_init);
module_exit(ip_vs_sed_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs shortest expected delay scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index c2028e412092..92e77d7a6b50 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -376,3 +376,4 @@ static void __exit ip_vs_sh_cleanup(void)
module_init(ip_vs_sh_init);
module_exit(ip_vs_sh_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs source hashing scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c
index 3308e4cc740a..8d5419edde50 100644
--- a/net/netfilter/ipvs/ip_vs_twos.c
+++ b/net/netfilter/ipvs/ip_vs_twos.c
@@ -137,3 +137,4 @@ static void __exit ip_vs_twos_cleanup(void)
module_init(ip_vs_twos_init);
module_exit(ip_vs_twos_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs power of twos choice scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 09f584b564a0..9fa500927c0a 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -109,3 +109,4 @@ static void __exit ip_vs_wlc_cleanup(void)
module_init(ip_vs_wlc_init);
module_exit(ip_vs_wlc_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted least connection scheduler");
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 1bc7a0789d85..85ce0d04afac 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -263,3 +263,4 @@ static void __exit ip_vs_wrr_cleanup(void)
module_init(ip_vs_wrr_init);
module_exit(ip_vs_wrr_cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted round-robin scheduler");
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index b21799d468d2..475358ec8212 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -230,9 +230,7 @@ static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
return 0;
}
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in nf_conntrack BTF");
+__bpf_kfunc_start_defs();
/* bpf_xdp_ct_alloc - Allocate a new CT entry
*
@@ -467,7 +465,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
return nf_ct_change_status_common(nfct, status);
}
-__diag_pop()
+__bpf_kfunc_end_defs();
BTF_SET8_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index 9fb9b8031298..cfa0fe0356de 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -82,3 +82,4 @@ out:
EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcast connection tracking helper");
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 334db22199c1..fb0ae15e96df 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -57,6 +57,7 @@
#include "nf_internals.h"
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("List and change connection tracking table");
struct ctnetlink_list_dump_ctx {
struct nf_conn *last;
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index c928ff63b10e..f36727ed91e1 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -699,3 +699,4 @@ MODULE_ALIAS("ip_conntrack");
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 and IPv6 connection tracking");
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 141ee7783223..6e3b2f58855f 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -12,9 +12,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in nf_nat BTF");
+__bpf_kfunc_start_defs();
/* bpf_ct_set_nat_info - Set source or destination nat address
*
@@ -54,7 +52,7 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
}
-__diag_pop()
+__bpf_kfunc_end_defs();
BTF_SET8_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index c4e0516a8dfa..c3d7ecbc777c 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -1263,6 +1263,7 @@ static void __exit nf_nat_cleanup(void)
}
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Network address translation core");
module_init(nf_nat_init);
module_exit(nf_nat_cleanup);
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index 6616ba5d0b04..5b37487d9d11 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
+{
+ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
+
+ if (ifa_addr_type & IPV6_ADDR_MAPPED)
+ return false;
+
+ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
+ return false;
+
+ if (scope) {
+ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
+
+ if (!(scope & ifa_scope))
+ return false;
+ }
+
+ return true;
+}
+
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum)
@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
if (hooknum == NF_INET_LOCAL_OUT) {
newdst.in6 = loopback_addr;
} else {
+ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
bool addr = false;
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
+ const struct inet6_ifaddr *ifa;
+
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
+ continue;
+
newdst.in6 = ifa->addr;
addr = true;
break;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3c1fd8283bf4..a761ee6796f6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -6520,6 +6520,12 @@ static int nft_setelem_deactivate(const struct net *net,
return ret;
}
+static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
+{
+ list_del_rcu(&catchall->list);
+ kfree_rcu(catchall, rcu);
+}
+
static void nft_setelem_catchall_remove(const struct net *net,
const struct nft_set *set,
struct nft_elem_priv *elem_priv)
@@ -6528,8 +6534,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
if (catchall->elem == elem_priv) {
- list_del_rcu(&catchall->list);
- kfree_rcu(catchall, rcu);
+ nft_setelem_catchall_destroy(catchall);
break;
}
}
@@ -9678,11 +9683,12 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
unsigned int gc_seq,
bool sync)
{
- struct nft_set_elem_catchall *catchall;
+ struct nft_set_elem_catchall *catchall, *next;
const struct nft_set *set = gc->set;
+ struct nft_elem_priv *elem_priv;
struct nft_set_ext *ext;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_expired(ext))
@@ -9700,7 +9706,13 @@ dead_elem:
if (!gc)
return NULL;
- nft_trans_gc_elem_add(gc, catchall->elem);
+ elem_priv = catchall->elem;
+ if (sync) {
+ nft_setelem_data_deactivate(gc->net, gc->set, elem_priv);
+ nft_setelem_catchall_destroy(catchall);
+ }
+
+ nft_trans_gc_elem_add(gc, elem_priv);
}
return gc;
@@ -11386,4 +11398,5 @@ module_exit(nf_tables_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Framework for packet filtering and classification");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 50723ba08289..c0fc431991e8 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -447,4 +447,5 @@ module_init(nfnl_osf_init);
module_exit(nfnl_osf_fini);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Passive OS fingerprint matching");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index 98e4946100c5..40e230d8b712 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -137,6 +137,7 @@ module_init(nft_chain_nat_init);
module_exit(nft_chain_nat_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("nftables network address translation support");
#ifdef CONFIG_NF_TABLES_IPV4
MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
#endif
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 04b51f285332..1bfe258018da 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -204,4 +204,5 @@ bool nft_fib_reduce(struct nft_regs_track *track,
EXPORT_SYMBOL_GPL(nft_fib_reduce);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Query routing table from nftables");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index a5268e6dd32f..358e742afad7 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -270,4 +270,5 @@ module_exit(nft_fwd_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nftables netdev packet forwarding support");
MODULE_ALIAS_NFT_AF_EXPR(5, "fwd");
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 7ddb9a78e3fc..ef93e0d3bee0 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
{
struct recent_table *t = pde_data(file_inode(file));
struct recent_entry *e;
- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
+ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
const char *c = buf;
union nf_inet_addr addr = {};
u_int16_t family;
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 9c4f231be275..1eeff9422856 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -257,5 +257,6 @@ static void __exit netlink_diag_exit(void)
module_init(netlink_diag_init);
module_exit(netlink_diag_exit);
+MODULE_DESCRIPTION("Netlink-based socket monitoring/diagnostic interface (sock_diag)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 0b9a785dea45..3019a4406ca4 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
if (err)
return err;
- nf_conn_act_ct_ext_add(ct);
+ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index ac85d4644a3c..df8a271948a1 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
conn->idle_timestamp = jiffies;
if (atomic_dec_and_test(&conn->active))
rxrpc_set_service_reap_timer(conn->rxnet,
- jiffies + rxrpc_connection_expiry);
+ jiffies + rxrpc_connection_expiry * HZ);
}
rxrpc_put_call(call, rxrpc_call_put_io_thread);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 7d910aee4f8c..c553a30e9c83 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
struct rxrpc_local *local =
container_of(timer, struct rxrpc_local, client_conn_reap_timer);
- if (local->kill_all_client_conns &&
+ if (!local->kill_all_client_conns &&
test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
rxrpc_wake_up_io_thread(local);
}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9d3f26bf0440..c39252d61ebb 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1098,7 +1098,7 @@ repeat:
}
} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
if (unlikely(!rcu_access_pointer(a->goto_chain))) {
- net_warn_ratelimited("can't go to NULL chain!\n");
+ tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
return TC_ACT_SHOT;
}
tcf_action_goto_chain_exec(a, res);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 9583645e86c2..0db0ecf1d110 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
}
+static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
+{
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
+ if (act_ct_ext) {
+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
+ }
+}
+
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
bool tcp, bool bidirectional)
@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
else
ctinfo = IP_CT_ESTABLISHED_REPLY;
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ tcf_ct_flow_ct_ext_ifidx_update(flow);
flow_offload_refresh(nf_ft, flow, force_refresh);
if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
/* Process this flow in SW to allow promoting to ASSURED */
@@ -1034,7 +1047,7 @@ do_nat:
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
if (!nf_ct_is_confirmed(ct))
- nf_conn_act_ct_ext_add(ct);
+ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
/* This will take care of sending queued events
* even if the connection is already confirmed.
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c9a811f4c7ee..393b78729216 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -677,4 +677,5 @@ static void __exit gate_cleanup_module(void)
module_init(gate_init_module);
module_exit(gate_cleanup_module);
+MODULE_DESCRIPTION("TC gate action");
MODULE_LICENSE("GPL v2");
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1daeb2182b70..1976bd163986 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1658,6 +1658,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
int act_index,
u32 *last_executed_chain)
{
+ u32 orig_reason = res->drop_reason;
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 16;
const struct tcf_proto *first_tp;
@@ -1712,8 +1713,14 @@ reclassify:
goto reset;
}
#endif
- if (err >= 0)
+ if (err >= 0) {
+ /* Policy drop or drop reason is over-written by
+ * classifiers with a bogus value(0) */
+ if (err == TC_ACT_SHOT &&
+ res->drop_reason == SKB_NOT_DROPPED_YET)
+ tcf_set_drop_reason(res, orig_reason);
return err;
+ }
}
if (unlikely(n)) {
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 1b92c33b5f81..a1f56931330c 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -341,4 +341,5 @@ static void __exit exit_basic(void)
module_init(init_basic)
module_exit(exit_basic)
+MODULE_DESCRIPTION("TC basic classifier");
MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index bd9322d71910..7ee8dbf49ed0 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -222,4 +222,5 @@ static void __exit exit_cgroup_cls(void)
module_init(init_cgroup_cls);
module_exit(exit_cgroup_cls);
+MODULE_DESCRIPTION("TC cgroup classifier");
MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index c49d6af0e048..afc534ee0a18 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -446,4 +446,5 @@ static void __exit exit_fw(void)
module_init(init_fw)
module_exit(exit_fw)
+MODULE_DESCRIPTION("SKB mark based TC classifier");
MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 1424bfeaca73..12a505db4183 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -684,4 +684,5 @@ static void __exit exit_route4(void)
module_init(init_route4)
module_exit(exit_route4)
+MODULE_DESCRIPTION("Routing table realm based TC classifier");
MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 6663e971a13e..d5bdfd4a7655 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1489,4 +1489,5 @@ static void __exit exit_u32(void)
module_init(init_u32)
module_exit(exit_u32)
+MODULE_DESCRIPTION("Universal 32bit based TC Classifier");
MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index cac870eb7897..9a0b85190a2c 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -574,3 +574,4 @@ static void __exit cbs_module_exit(void)
module_init(cbs_module_init)
module_exit(cbs_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Credit Based shaper");
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 19c851125901..ae1da08e268f 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -513,3 +513,4 @@ module_init(choke_module_init)
module_exit(choke_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Choose and keep responsive flows scheduler");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 19901e77cd3b..097740a9afea 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -495,3 +495,4 @@ static void __exit drr_exit(void)
module_init(drr_init);
module_exit(drr_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Deficit Round Robin scheduler");
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 61d1f0e32cf3..4808159a5466 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -513,3 +513,4 @@ static void __exit etf_module_exit(void)
module_init(etf_module_init)
module_exit(etf_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Earliest TxTime First (ETF) qdisc");
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index b10efeaf0629..f7c88495946b 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -826,3 +826,4 @@ static void __exit ets_exit(void)
module_init(ets_init);
module_exit(ets_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Enhanced Transmission Selection(ETS) scheduler");
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index e1040421b797..450f5c67ac49 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -269,3 +269,4 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
return q ? : ERR_PTR(err);
}
EXPORT_SYMBOL(fifo_create_dflt);
+MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 0fd18c344ab5..3a31c47fea9b 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -919,14 +919,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
[TCA_FQ_HORIZON] = { .type = NLA_U32 },
[TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
- [TCA_FQ_PRIOMAP] = {
- .type = NLA_BINARY,
- .len = sizeof(struct tc_prio_qopt),
- },
- [TCA_FQ_WEIGHTS] = {
- .type = NLA_BINARY,
- .len = FQ_BANDS * sizeof(s32),
- },
+ [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
+ [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
};
/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 872d127c9db4..8c61eb3dc943 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -945,3 +945,4 @@ module_init(gred_module_init)
module_exit(gred_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic Random Early Detection qdisc");
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 880c5f16b29c..16c45da4036a 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1693,5 +1693,6 @@ hfsc_cleanup(void)
}
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hierarchical Fair Service Curve scheduler");
module_init(hfsc_init);
module_exit(hfsc_cleanup);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0d947414e616..7349233eaa9b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -2179,3 +2179,4 @@ static void __exit htb_module_exit(void)
module_init(htb_module_init)
module_exit(htb_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler");
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index a463a63192c3..5fa9eaa79bfc 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -370,3 +370,4 @@ module_exit(ingress_module_exit);
MODULE_ALIAS("sch_clsact");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 793009f445c0..43e53ee00a56 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -789,3 +789,4 @@ module_init(mqprio_module_init);
module_exit(mqprio_module_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Classful multiqueue prio qdisc");
diff --git a/net/sched/sch_mqprio_lib.c b/net/sched/sch_mqprio_lib.c
index 83b3793c4012..b3a5572c167b 100644
--- a/net/sched/sch_mqprio_lib.c
+++ b/net/sched/sch_mqprio_lib.c
@@ -129,3 +129,4 @@ void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE],
EXPORT_SYMBOL_GPL(mqprio_fp_to_offload);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Shared mqprio qdisc code currently between taprio and mqprio");
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 75c9c860182b..d66d5f0ec080 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -410,3 +410,4 @@ module_init(multiq_module_init)
module_exit(multiq_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multi queue to hardware queue mapping qdisc");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6ba2dc191ed9..fa678eb88528 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -1307,3 +1307,4 @@ static void __exit netem_module_exit(void)
module_init(netem_module_init)
module_exit(netem_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Network characteristics emulator qdisc");
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 35f49edf63db..992f0c8d7988 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -226,3 +226,4 @@ static void __exit plug_module_exit(void)
module_init(plug_module_init)
module_exit(plug_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qdisc to plug and unplug traffic via netlink control");
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fdc5ef52c3ee..8ecdd3ef6f8e 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -433,3 +433,4 @@ module_init(prio_module_init)
module_exit(prio_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple 3-band priority qdisc");
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 28315166fe8e..48a604c320c7 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1535,3 +1535,4 @@ static void __exit qfq_exit(void)
module_init(qfq_init);
module_exit(qfq_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Quick Fair Queueing Plus qdisc");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 16277b6a0238..607b6c8b3a9b 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -563,3 +563,4 @@ module_init(red_module_init)
module_exit(red_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Random Early Detection qdisc");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 66dcb18638fe..eb77558fa367 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -937,3 +937,4 @@ static void __exit sfq_module_exit(void)
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Stochastic Fairness qdisc");
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 5df2dacb7b1a..28beb11762d8 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -307,3 +307,4 @@ module_init(skbprio_module_init)
module_exit(skbprio_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 2e1949de4171..31a8252bd09c 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -2572,3 +2572,4 @@ static void __exit taprio_module_exit(void)
module_init(taprio_module_init);
module_exit(taprio_module_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Time Aware Priority qdisc");
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 17d2d00ddb18..dd6b1a723bf7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -621,3 +621,4 @@ static void __exit tbf_module_exit(void)
module_init(tbf_module_init)
module_exit(tbf_module_exit)
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Token Bucket Filter qdisc");
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 7721239c185f..59304611dc00 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -523,3 +523,4 @@ module_init(teql_init);
module_exit(teql_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("True (or trivial) link equalizer qdisc");
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index abd2667734d4..da97f946b79b 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
if (!smc->use_fallback) {
rc = smc_close_active(smc);
- sock_set_flag(sk, SOCK_DEAD);
+ smc_sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
} else {
if (sk->sk_state != SMC_CLOSED) {
@@ -1743,7 +1743,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
if (new_clcsock)
sock_release(new_clcsock);
new_sk->sk_state = SMC_CLOSED;
- sock_set_flag(new_sk, SOCK_DEAD);
+ smc_sock_set_flag(new_sk, SOCK_DEAD);
sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 24745fde4ac2..e377980b8414 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+ set_bit(flag, &sk->sk_flags);
+}
+
#endif /* __SMC_H */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 89105e95b452..3c06625ceb20 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
{
struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
struct smc_connection *conn = cdcpend->conn;
+ struct smc_buf_desc *sndbuf_desc;
struct smc_sock *smc;
int diff;
+ sndbuf_desc = conn->sndbuf_desc;
smc = container_of(conn, struct smc_sock, conn);
bh_lock_sock(&smc->sk);
- if (!wc_status) {
- diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
+ if (!wc_status && sndbuf_desc) {
+ diff = smc_curs_diff(sndbuf_desc->len,
&cdcpend->conn->tx_curs_fin,
&cdcpend->cursor);
/* sndbuf_space is decreased in smc_sendmsg */
@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
union smc_host_cursor cfed;
int rc;
- if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
- return -ENOBUFS;
-
smc_cdc_add_pending_send(conn, pend);
conn->tx_cdc_seq++;
@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smc->sk.sk_shutdown |= RCV_SHUTDOWN;
if (smc->clcsock && smc->clcsock->sk)
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
- sock_set_flag(&smc->sk, SOCK_DONE);
+ smc_sock_set_flag(&smc->sk, SOCK_DONE);
sock_hold(&smc->sk); /* sock_put in close_work */
if (!queue_work(smc_close_wq, &conn->close_work))
sock_put(&smc->sk);
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index dbdf03e8aa5b..10219f55aad1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
struct sock *sk = &smc->sk;
release_sock(sk);
- cancel_work_sync(&smc->conn.close_work);
+ if (cancel_work_sync(&smc->conn.close_work))
+ sock_put(sk);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
}
@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
break;
}
- sock_set_flag(sk, SOCK_DEAD);
+ smc_sock_set_flag(sk, SOCK_DEAD);
sk->sk_state_change(sk);
if (release_clcsock) {
diff --git a/net/socket.c b/net/socket.c
index 0d1c4e78fc7f..3379c64217a4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1685,20 +1685,16 @@ struct file *__sys_socket_file(int family, int type, int protocol)
* Therefore, __weak is needed to ensure that the call is still
* emitted, by telling the compiler that we don't know what the
* function might eventually be.
- *
- * __diag_* below are needed to dismiss the missing prototype warning.
*/
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "A fmod_ret entry point for BPF programs");
+__bpf_hook_start();
__weak noinline int update_socket_protocol(int family, int type, int protocol)
{
return protocol;
}
-__diag_pop();
+__bpf_hook_end();
int __sys_socket(int family, int type, int protocol)
{
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 9c210273d06b..daa9582ec861 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
pipefs_sb = rpc_get_sb_net(net);
if (pipefs_sb) {
- __rpc_clnt_remove_pipedir(clnt);
+ if (pipefs_sb == clnt->pipefs_sb)
+ __rpc_clnt_remove_pipedir(clnt);
rpc_put_sb_net(net);
}
}
@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
{
struct dentry *dentry;
+ clnt->pipefs_sb = pipefs_sb;
+
if (clnt->cl_program->pipe_dir_name != NULL) {
dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
if (IS_ERR(dentry))
@@ -2171,6 +2174,7 @@ call_connect_status(struct rpc_task *task)
task->tk_status = 0;
switch (status) {
case -ECONNREFUSED:
+ case -ECONNRESET:
/* A positive refusal suggests a rebind is needed. */
if (RPC_IS_SOFTCONN(task))
break;
@@ -2179,7 +2183,6 @@ call_connect_status(struct rpc_task *task)
goto out_retry;
}
fallthrough;
- case -ECONNRESET:
case -ECONNABORTED:
case -ENETDOWN:
case -ENETUNREACH:
@@ -2220,7 +2223,7 @@ call_connect_status(struct rpc_task *task)
}
xprt_switch_put(xps);
if (!task->tk_xprt)
- return;
+ goto out;
}
goto out_retry;
case -ENOBUFS:
@@ -2235,6 +2238,7 @@ out_next:
out_retry:
/* Check for timeouts before looping back to call_bind */
task->tk_action = call_bind;
+out:
rpc_check_timeout(task);
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 5988a5c5ff3f..102c3818bc54 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt);
+ if (IS_ERR(child)) {
+ /* rpcb_map_release() has freed the arguments */
+ return;
+ }
xprt->stat.bind_count++;
rpc_put_task(child);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ab453ede54f0..2364c485540c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -283,7 +283,7 @@ out_unlock:
xprt_clear_locked(xprt);
out_sleep:
task->tk_status = -EAGAIN;
- if (RPC_IS_SOFT(task))
+ if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
xprt_request_timeout(req));
else
@@ -349,7 +349,7 @@ out_unlock:
xprt_clear_locked(xprt);
out_sleep:
task->tk_status = -EAGAIN;
- if (RPC_IS_SOFT(task))
+ if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
xprt_request_timeout(req));
else
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index a15bf2ede89b..58f3dc8d0d71 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1181,6 +1181,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ transport->xprt_err = 0;
clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
@@ -2772,18 +2773,13 @@ static void xs_wake_error(struct sock_xprt *transport)
{
int sockerr;
- if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
- return;
- mutex_lock(&transport->recv_mutex);
- if (transport->sock == NULL)
- goto out;
if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
- goto out;
+ return;
sockerr = xchg(&transport->xprt_err, 0);
- if (sockerr < 0)
+ if (sockerr < 0) {
xprt_wake_pending_tasks(&transport->xprt, sockerr);
-out:
- mutex_unlock(&transport->recv_mutex);
+ xs_tcp_force_close(&transport->xprt);
+ }
}
static void xs_wake_pending(struct sock_xprt *transport)
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index e8fd257c0e68..1a9a5bdaccf4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_LINK_NAME },
[TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
[TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
[TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_BEARER_NAME },
[TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
[TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index e22c81435ef7..f6dc896bf44c 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -130,6 +130,8 @@ static void virtio_transport_init_hdr(struct sk_buff *skb,
hdr->dst_port = cpu_to_le32(dst_port);
hdr->flags = cpu_to_le32(info->flags);
hdr->len = cpu_to_le32(payload_len);
+ hdr->buf_alloc = cpu_to_le32(0);
+ hdr->fwd_cnt = cpu_to_le32(0);
}
static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
@@ -1369,11 +1371,17 @@ virtio_transport_recv_connected(struct sock *sk,
vsk->peer_shutdown |= RCV_SHUTDOWN;
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
vsk->peer_shutdown |= SEND_SHUTDOWN;
- if (vsk->peer_shutdown == SHUTDOWN_MASK &&
- vsock_stream_has_data(vsk) <= 0 &&
- !sock_flag(sk, SOCK_DONE)) {
- (void)virtio_transport_reset(vsk, NULL);
- virtio_transport_do_close(vsk, true);
+ if (vsk->peer_shutdown == SHUTDOWN_MASK) {
+ if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
+ (void)virtio_transport_reset(vsk, NULL);
+ virtio_transport_do_close(vsk, true);
+ }
+ /* Remove this socket anyway because the remote peer sent
+ * the shutdown. This way a new connection will succeed
+ * if the remote peer uses the same source port,
+ * even if the old socket is still unreleased, but now disconnected.
+ */
+ vsock_remove_sock(vsk);
}
if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
sk->sk_state_change(sk);
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index d74f3fd20f2b..7d5e920141e9 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -27,9 +27,7 @@ struct bpf_xfrm_info {
int link;
};
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in xfrm_interface BTF");
+__bpf_kfunc_start_defs();
/* bpf_skb_get_xfrm_info - Get XFRM metadata
*
@@ -93,7 +91,7 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
return 0;
}
-__diag_pop()
+__bpf_kfunc_end_defs();
BTF_SET8_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
diff --git a/rust/Makefile b/rust/Makefile
index 28dd31cf1179..543b37f6c77f 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -381,6 +381,8 @@ $(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
cmd_rustc_procmacro = \
$(RUSTC_OR_CLIPPY) $(rust_common_flags) \
+ -Clinker-flavor=gcc -Clinker=$(HOSTCC) \
+ -Clink-args='$(call escsq,$(KBUILD_HOSTLDFLAGS))' \
--emit=dep-info=$(depfile) --emit=link=$@ --extern proc_macro \
--crate-type proc-macro \
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
index e2056c8b902c..08596c0ef070 100644
--- a/samples/landlock/sandboxer.c
+++ b/samples/landlock/sandboxer.c
@@ -8,6 +8,8 @@
*/
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__
+#include <arpa/inet.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/landlock.h>
@@ -51,7 +53,9 @@ static inline int landlock_restrict_self(const int ruleset_fd,
#define ENV_FS_RO_NAME "LL_FS_RO"
#define ENV_FS_RW_NAME "LL_FS_RW"
-#define ENV_PATH_TOKEN ":"
+#define ENV_TCP_BIND_NAME "LL_TCP_BIND"
+#define ENV_TCP_CONNECT_NAME "LL_TCP_CONNECT"
+#define ENV_DELIMITER ":"
static int parse_path(char *env_path, const char ***const path_list)
{
@@ -60,13 +64,13 @@ static int parse_path(char *env_path, const char ***const path_list)
if (env_path) {
num_paths++;
for (i = 0; env_path[i]; i++) {
- if (env_path[i] == ENV_PATH_TOKEN[0])
+ if (env_path[i] == ENV_DELIMITER[0])
num_paths++;
}
}
*path_list = malloc(num_paths * sizeof(**path_list));
for (i = 0; i < num_paths; i++)
- (*path_list)[i] = strsep(&env_path, ENV_PATH_TOKEN);
+ (*path_list)[i] = strsep(&env_path, ENV_DELIMITER);
return num_paths;
}
@@ -81,8 +85,8 @@ static int parse_path(char *env_path, const char ***const path_list)
/* clang-format on */
-static int populate_ruleset(const char *const env_var, const int ruleset_fd,
- const __u64 allowed_access)
+static int populate_ruleset_fs(const char *const env_var, const int ruleset_fd,
+ const __u64 allowed_access)
{
int num_paths, i, ret = 1;
char *env_path_name;
@@ -143,6 +147,39 @@ out_free_name:
return ret;
}
+static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
+ const __u64 allowed_access)
+{
+ int ret = 1;
+ char *env_port_name, *strport;
+ struct landlock_net_port_attr net_port = {
+ .allowed_access = allowed_access,
+ .port = 0,
+ };
+
+ env_port_name = getenv(env_var);
+ if (!env_port_name)
+ return 0;
+ env_port_name = strdup(env_port_name);
+ unsetenv(env_var);
+
+ while ((strport = strsep(&env_port_name, ENV_DELIMITER))) {
+ net_port.port = atoi(strport);
+ if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &net_port, 0)) {
+ fprintf(stderr,
+ "Failed to update the ruleset with port \"%llu\": %s\n",
+ net_port.port, strerror(errno));
+ goto out_free_name;
+ }
+ }
+ ret = 0;
+
+out_free_name:
+ free(env_port_name);
+ return ret;
+}
+
/* clang-format off */
#define ACCESS_FS_ROUGHLY_READ ( \
@@ -166,39 +203,58 @@ out_free_name:
/* clang-format on */
-#define LANDLOCK_ABI_LAST 3
+#define LANDLOCK_ABI_LAST 4
int main(const int argc, char *const argv[], char *const *const envp)
{
const char *cmd_path;
char *const *cmd_argv;
int ruleset_fd, abi;
+ char *env_port_name;
__u64 access_fs_ro = ACCESS_FS_ROUGHLY_READ,
access_fs_rw = ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_WRITE;
+
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = access_fs_rw,
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
};
if (argc < 2) {
fprintf(stderr,
- "usage: %s=\"...\" %s=\"...\" %s <cmd> [args]...\n\n",
- ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
+ "usage: %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\"%s "
+ "<cmd> [args]...\n\n",
+ ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME,
+ ENV_TCP_CONNECT_NAME, argv[0]);
fprintf(stderr,
"Launch a command in a restricted environment.\n\n");
- fprintf(stderr, "Environment variables containing paths, "
- "each separated by a colon:\n");
+ fprintf(stderr,
+ "Environment variables containing paths and ports "
+ "each separated by a colon:\n");
fprintf(stderr,
"* %s: list of paths allowed to be used in a read-only way.\n",
ENV_FS_RO_NAME);
fprintf(stderr,
- "* %s: list of paths allowed to be used in a read-write way.\n",
+ "* %s: list of paths allowed to be used in a read-write way.\n\n",
ENV_FS_RW_NAME);
fprintf(stderr,
+ "Environment variables containing ports are optional "
+ "and could be skipped.\n");
+ fprintf(stderr,
+ "* %s: list of ports allowed to bind (server).\n",
+ ENV_TCP_BIND_NAME);
+ fprintf(stderr,
+ "* %s: list of ports allowed to connect (client).\n",
+ ENV_TCP_CONNECT_NAME);
+ fprintf(stderr,
"\nexample:\n"
"%s=\"/bin:/lib:/usr:/proc:/etc:/dev/urandom\" "
"%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" "
+ "%s=\"9418\" "
+ "%s=\"80:443\" "
"%s bash -i\n\n",
- ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
+ ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME,
+ ENV_TCP_CONNECT_NAME, argv[0]);
fprintf(stderr,
"This sandboxer can use Landlock features "
"up to ABI version %d.\n",
@@ -255,7 +311,12 @@ int main(const int argc, char *const argv[], char *const *const envp)
case 2:
/* Removes LANDLOCK_ACCESS_FS_TRUNCATE for ABI < 3 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_TRUNCATE;
-
+ __attribute__((fallthrough));
+ case 3:
+ /* Removes network support for ABI < 4 */
+ ruleset_attr.handled_access_net &=
+ ~(LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP);
fprintf(stderr,
"Hint: You should update the running kernel "
"to leverage Landlock features "
@@ -274,18 +335,42 @@ int main(const int argc, char *const argv[], char *const *const envp)
access_fs_ro &= ruleset_attr.handled_access_fs;
access_fs_rw &= ruleset_attr.handled_access_fs;
+ /* Removes bind access attribute if not supported by a user. */
+ env_port_name = getenv(ENV_TCP_BIND_NAME);
+ if (!env_port_name) {
+ ruleset_attr.handled_access_net &=
+ ~LANDLOCK_ACCESS_NET_BIND_TCP;
+ }
+ /* Removes connect access attribute if not supported by a user. */
+ env_port_name = getenv(ENV_TCP_CONNECT_NAME);
+ if (!env_port_name) {
+ ruleset_attr.handled_access_net &=
+ ~LANDLOCK_ACCESS_NET_CONNECT_TCP;
+ }
+
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
if (ruleset_fd < 0) {
perror("Failed to create a ruleset");
return 1;
}
- if (populate_ruleset(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) {
+
+ if (populate_ruleset_fs(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) {
+ goto err_close_ruleset;
+ }
+ if (populate_ruleset_fs(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) {
goto err_close_ruleset;
}
- if (populate_ruleset(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) {
+
+ if (populate_ruleset_net(ENV_TCP_BIND_NAME, ruleset_fd,
+ LANDLOCK_ACCESS_NET_BIND_TCP)) {
+ goto err_close_ruleset;
+ }
+ if (populate_ruleset_net(ENV_TCP_CONNECT_NAME, ruleset_fd,
+ LANDLOCK_ACCESS_NET_CONNECT_TCP)) {
goto err_close_ruleset;
}
+
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("Failed to restrict privileges");
goto err_close_ruleset;
diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf
new file mode 100644
index 000000000000..82377e470aed
--- /dev/null
+++ b/scripts/Makefile.btf
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+pahole-ver := $(CONFIG_PAHOLE_VERSION)
+pahole-flags-y :=
+
+# pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
+ifeq ($(call test-le, $(pahole-ver), 121),y)
+pahole-flags-$(call test-ge, $(pahole-ver), 118) += --skip_encoding_btf_vars
+endif
+
+pahole-flags-$(call test-ge, $(pahole-ver), 121) += --btf_gen_floats
+
+pahole-flags-$(call test-ge, $(pahole-ver), 122) += -j
+
+pahole-flags-$(CONFIG_PAHOLE_HAS_LANG_EXCLUDE) += --lang_exclude=rust
+
+pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized
+
+export PAHOLE_FLAGS := $(pahole-flags-y)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index da37bfa97211..9a3063735e71 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -425,7 +425,7 @@ $(obj)/lib.a: $(lib-y) FORCE
$(call if_changed,ar)
quiet_cmd_ld_multi_m = LD [M] $@
- cmd_ld_multi_m = $(LD) $(ld_flags) -r -o $@ @$(patsubst %.o,%.mod,$@) $(cmd_objtool)
+ cmd_ld_multi_m = $(LD) $(ld_flags) -r -o $@ @$< $(cmd_objtool)
define rule_ld_multi_m
$(call cmd_and_savecmd,ld_multi_m)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 8f7f842b54f9..08d83d9db31a 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -91,6 +91,8 @@ hostcxx_flags = -Wp,-MMD,$(depfile) \
# current working directory, which may be not accessible in the out-of-tree
# modules case.
hostrust_flags = --out-dir $(dir $@) --emit=dep-info=$(depfile) \
+ -Clinker-flavor=gcc -Clinker=$(HOSTCC) \
+ -Clink-args='$(call escsq,$(KBUILD_HOSTLDFLAGS))' \
$(KBUILD_HOSTRUSTFLAGS) $(HOST_EXTRARUSTFLAGS) \
$(HOSTRUSTFLAGS_$(target-stem))
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index b3a6aa8fbe8c..8568d256d6fb 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -28,14 +28,11 @@ quiet_cmd_cc_o_c = CC [M] $@
%.mod.o: %.mod.c FORCE
$(call if_changed_dep,cc_o_c)
-ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
-
quiet_cmd_ld_ko_o = LD [M] $@
cmd_ld_ko_o += \
$(LD) -r $(KBUILD_LDFLAGS) \
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
- -T scripts/module.lds -o $@ $(filter %.o, $^); \
- $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
+ -T scripts/module.lds -o $@ $(filter %.o, $^)
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 2bcab02da965..3addd1c0b989 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -69,9 +69,9 @@ $(linux-tarballs): .tmp_HEAD FORCE
# ---------------------------------------------------------------------------
quiet_cmd_mkspec = GEN $@
- cmd_mkspec = $(srctree)/scripts/package/mkspec > $@
+ cmd_mkspec = $(srctree)/scripts/package/mkspec $@
-kernel.spec: FORCE
+rpmbuild/SPECS/kernel.spec: FORCE
$(call cmd,mkspec)
PHONY += rpm-sources
@@ -88,14 +88,12 @@ srcrpm-pkg: private build-type := s
binrpm-pkg: private build-type := b
rpm-pkg srcrpm-pkg: rpm-sources
-rpm-pkg srcrpm-pkg binrpm-pkg: kernel.spec
- +$(strip rpmbuild -b$(build-type) kernel.spec \
+rpm-pkg srcrpm-pkg binrpm-pkg: rpmbuild/SPECS/kernel.spec
+ +$(strip rpmbuild -b$(build-type) rpmbuild/SPECS/kernel.spec \
--define='_topdir $(abspath rpmbuild)' \
$(if $(filter a b, $(build-type)), \
--target $(UTS_MACHINE)-linux --build-in-place --noprep --define='_smp_mflags %{nil}' \
$$(rpm -q rpm >/dev/null 2>&1 || echo --nodeps)) \
- $(if $(filter b, $(build-type)), \
- --without devel) \
$(RPMOPTS))
# deb-pkg srcdeb-pkg bindeb-pkg
diff --git a/scripts/Makefile.userprogs b/scripts/Makefile.userprogs
index fb415297337a..f3a7e1ef3753 100644
--- a/scripts/Makefile.userprogs
+++ b/scripts/Makefile.userprogs
@@ -19,19 +19,19 @@ user-cobjs := $(addprefix $(obj)/, $(user-cobjs))
user_ccflags = -Wp,-MMD,$(depfile) $(KBUILD_USERCFLAGS) $(userccflags) \
$($(target-stem)-userccflags)
user_ldflags = $(KBUILD_USERLDFLAGS) $(userldflags) $($(target-stem)-userldflags)
+user_ldlibs = $(userldlibs) $($(target-stem)-userldlibs)
# Create an executable from a single .c file
quiet_cmd_user_cc_c = CC [U] $@
cmd_user_cc_c = $(CC) $(user_ccflags) $(user_ldflags) -o $@ $< \
- $($(target-stem)-userldlibs)
+ $(user_ldlibs)
$(user-csingle): $(obj)/%: $(src)/%.c FORCE
$(call if_changed_dep,user_cc_c)
# Link an executable based on list of .o files
quiet_cmd_user_ld = LD [U] $@
cmd_user_ld = $(CC) $(user_ldflags) -o $@ \
- $(addprefix $(obj)/, $($(target-stem)-objs)) \
- $($(target-stem)-userldlibs)
+ $(addprefix $(obj)/, $($(target-stem)-objs)) $(user_ldlibs)
$(user-cmulti): FORCE
$(call if_changed,user_ld)
$(call multi_depend, $(user-cmulti), , -objs)
diff --git a/scripts/Makefile.vdsoinst b/scripts/Makefile.vdsoinst
new file mode 100644
index 000000000000..1022d9fdd976
--- /dev/null
+++ b/scripts/Makefile.vdsoinst
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ==========================================================================
+# Install unstripped copies of vDSO
+# ==========================================================================
+
+PHONY := __default
+__default:
+ @:
+
+include $(srctree)/scripts/Kbuild.include
+
+install-dir := $(MODLIB)/vdso
+
+define gen_install_rules
+
+src := $$(firstword $$(subst :,$(space),$(1)))
+dest := $(install-dir)/$$(or $$(word 2,$$(subst :,$(space),$(1))),$$(patsubst %.dbg,%,$$(notdir $(1))))
+
+__default: $$(dest)
+$$(dest): $$(src) FORCE
+ $$(call cmd,install)
+
+# Some architectures create .build-id symlinks
+ifneq ($(filter arm sparc x86, $(SRCARCH)),)
+link := $(install-dir)/.build-id/$$(shell $(READELF) -n $$(src) | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
+
+__default: $$(link)
+$$(link): $$(dest) FORCE
+ $$(call cmd,symlink)
+endif
+
+endef
+
+$(foreach x, $(sort $(INSTALL_FILES)), $(eval $(call gen_install_rules,$(x))))
+
+quiet_cmd_install = INSTALL $@
+ cmd_install = mkdir -p $(dir $@); cp $< $@
+
+quiet_cmd_symlink = SYMLINK $@
+ cmd_symlink = mkdir -p $(dir $@); ln -sf --relative $< $@
+
+PHONY += FORCE
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index a84cc5737c2c..180952fb91c1 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -19,7 +19,7 @@ _DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
_FILENAME_PATTERN = r'^\..*\.cmd$'
-_LINE_PATTERN = r'^savedcmd_[^ ]*\.o := (.* )([^ ]*\.[cS]) *(;|$)'
+_LINE_PATTERN = r'^(saved)?cmd_[^ ]*\.o := (?P<command_prefix>.* )(?P<file_path>[^ ]*\.[cS]) *(;|$)'
_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
# The tools/ directory adopts a different build system, and produces .cmd
# files in a different format. Do not support it.
@@ -213,15 +213,15 @@ def main():
result = line_matcher.match(f.readline())
if result:
try:
- entry = process_line(directory, result.group(1),
- result.group(2))
+ entry = process_line(directory, result.group('command_prefix'),
+ result.group('file_path'))
compile_commands.append(entry)
except ValueError as err:
logging.info('Could not add line from %s: %s',
cmdfile, err)
with open(output, 'wt') as f:
- json.dump(compile_commands, f, indent=2, sort_keys=True)
+ json.dump(sorted(compile_commands, key=lambda x: x["file"]), f, indent=2, sort_keys=True)
if __name__ == '__main__':
diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
index 3266708a8658..f31ffd09e1ea 100755
--- a/scripts/clang-tools/run-clang-tools.py
+++ b/scripts/clang-tools/run-clang-tools.py
@@ -33,6 +33,11 @@ def parse_arguments():
path_help = "Path to the compilation database to parse"
parser.add_argument("path", type=str, help=path_help)
+ checks_help = "Checks to pass to the analysis"
+ parser.add_argument("-checks", type=str, default=None, help=checks_help)
+ header_filter_help = "Pass the -header-filter value to the tool"
+ parser.add_argument("-header-filter", type=str, default=None, help=header_filter_help)
+
return parser.parse_args()
@@ -45,14 +50,27 @@ def init(l, a):
def run_analysis(entry):
# Disable all checks, then re-enable the ones we want
- checks = []
- checks.append("-checks=-*")
- if args.type == "clang-tidy":
- checks.append("linuxkernel-*")
+ global args
+ checks = None
+ if args.checks:
+ checks = args.checks.split(',')
else:
- checks.append("clang-analyzer-*")
- checks.append("-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling")
- p = subprocess.run(["clang-tidy", "-p", args.path, ",".join(checks), entry["file"]],
+ checks = ["-*"]
+ if args.type == "clang-tidy":
+ checks.append("linuxkernel-*")
+ else:
+ checks.append("clang-analyzer-*")
+ checks.append("-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling")
+ file = entry["file"]
+ if not file.endswith(".c") and not file.endswith(".cpp"):
+ with lock:
+ print(f"Skipping non-C file: '{file}'", file=sys.stderr)
+ return
+ pargs = ["clang-tidy", "-p", args.path, "-checks=" + ",".join(checks)]
+ if args.header_filter:
+ pargs.append("-header-filter=" + args.header_filter)
+ pargs.append(file)
+ p = subprocess.run(pargs,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=entry["directory"])
diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
index 07f6dc4c5cf6..e6c41427c02f 100755
--- a/scripts/dummy-tools/gcc
+++ b/scripts/dummy-tools/gcc
@@ -91,6 +91,16 @@ if arg_contain -S "$@"; then
fi
exit 0
fi
+
+ # For arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh
+ if arg_contain -m64 "$@" && arg_contain -fpatchable-function-entry=2 "$@"; then
+ echo "func:"
+ echo ".section __patchable_function_entries"
+ echo ".localentry"
+ echo " nop"
+ echo " nop"
+ exit 0
+ fi
fi
# To set GCC_PLUGINS
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index c9e38ad937fd..3c54125eb373 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -5,7 +5,7 @@ CFLAGS_REMOVE_empty.o += $(CC_FLAGS_LTO)
hostprogs-always-y += modpost mk_elfconfig
always-y += empty.o
-modpost-objs := modpost.o file2alias.o sumversion.o
+modpost-objs := modpost.o file2alias.o sumversion.o symsearch.o
devicetable-offsets-file := devicetable-offsets.h
@@ -16,7 +16,7 @@ targets += $(devicetable-offsets-file) devicetable-offsets.s
# dependencies on generated files need to be listed explicitly
-$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
+$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o $(obj)/symsearch.o: $(obj)/elfconfig.h
$(obj)/file2alias.o: $(obj)/$(devicetable-offsets-file)
quiet_cmd_elfconfig = MKELF $@
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index abe65f8968dd..e91a3c38143b 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -265,7 +265,14 @@ int main(void)
DEVID(cdx_device_id);
DEVID_FIELD(cdx_device_id, vendor);
DEVID_FIELD(cdx_device_id, device);
+ DEVID_FIELD(cdx_device_id, subvendor);
+ DEVID_FIELD(cdx_device_id, subdevice);
+ DEVID_FIELD(cdx_device_id, class);
+ DEVID_FIELD(cdx_device_id, class_mask);
DEVID_FIELD(cdx_device_id, override_only);
+ DEVID(vchiq_device_id);
+ DEVID_FIELD(vchiq_device_id, name);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 7056751c29b1..4829680a0a6d 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
/* Looks like: tee:uuid */
static int do_tee_entry(const char *filename, void *symval, char *alias)
{
- DEF_FIELD(symval, tee_client_device_id, uuid);
+ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
- uuid.b[15]);
+ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
+ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
+ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
+ uuid->b[15]);
add_wildcard(alias);
return 1;
@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
/* Looks like: ishtp:{guid} */
static int do_ishtp_entry(const char *filename, void *symval, char *alias)
{
- DEF_FIELD(symval, ishtp_device_id, guid);
+ DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
strcpy(alias, ISHTP_MODULE_PREFIX "{");
- add_guid(alias, guid);
+ add_guid(alias, *guid);
strcat(alias, "}");
return 1;
@@ -1458,6 +1458,10 @@ static int do_cdx_entry(const char *filename, void *symval,
{
DEF_FIELD(symval, cdx_device_id, vendor);
DEF_FIELD(symval, cdx_device_id, device);
+ DEF_FIELD(symval, cdx_device_id, subvendor);
+ DEF_FIELD(symval, cdx_device_id, subdevice);
+ DEF_FIELD(symval, cdx_device_id, class);
+ DEF_FIELD(symval, cdx_device_id, class_mask);
DEF_FIELD(symval, cdx_device_id, override_only);
switch (override_only) {
@@ -1475,6 +1479,18 @@ static int do_cdx_entry(const char *filename, void *symval,
ADD(alias, "v", vendor != CDX_ANY_ID, vendor);
ADD(alias, "d", device != CDX_ANY_ID, device);
+ ADD(alias, "sv", subvendor != CDX_ANY_ID, subvendor);
+ ADD(alias, "sd", subdevice != CDX_ANY_ID, subdevice);
+ ADD(alias, "c", class_mask == 0xFFFFFF, class);
+
+ return 1;
+}
+
+static int do_vchiq_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD_ADDR(symval, vchiq_device_id, name);
+ sprintf(alias, "vchiq:%s", *name);
+
return 1;
}
@@ -1558,6 +1574,7 @@ static const struct devtable devtable[] = {
{"dfl", SIZE_dfl_device_id, do_dfl_entry},
{"ishtp", SIZE_ishtp_device_id, do_ishtp_entry},
{"cdx", SIZE_cdx_device_id, do_cdx_entry},
+ {"vchiq", SIZE_vchiq_device_id, do_vchiq_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index ea72af229d2b..973b5e5ae2dd 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -22,7 +22,6 @@
#include <errno.h>
#include "modpost.h"
#include "../../include/linux/license.h"
-#include "../../include/linux/module_symbol.h"
static bool module_enabled;
/* Are we using CONFIG_MODVERSIONS? */
@@ -577,11 +576,14 @@ static int parse_elf(struct elf_info *info, const char *filename)
*p = TO_NATIVE(*p);
}
+ symsearch_init(info);
+
return 1;
}
static void parse_elf_finish(struct elf_info *info)
{
+ symsearch_finish(info);
release_file(info->hdr, info->size);
}
@@ -792,24 +794,16 @@ static void check_section(const char *modname, struct elf_info *elf,
#define ALL_INIT_DATA_SECTIONS \
".init.setup", ".init.rodata", ".meminit.rodata", \
".init.data", ".meminit.data"
-#define ALL_EXIT_DATA_SECTIONS \
- ".exit.data", ".memexit.data"
-
-#define ALL_INIT_TEXT_SECTIONS \
- ".init.text", ".meminit.text"
-#define ALL_EXIT_TEXT_SECTIONS \
- ".exit.text", ".memexit.text"
#define ALL_PCI_INIT_SECTIONS \
".pci_fixup_early", ".pci_fixup_header", ".pci_fixup_final", \
".pci_fixup_enable", ".pci_fixup_resume", \
".pci_fixup_resume_early", ".pci_fixup_suspend"
-#define ALL_XXXINIT_SECTIONS MEM_INIT_SECTIONS
-#define ALL_XXXEXIT_SECTIONS MEM_EXIT_SECTIONS
+#define ALL_XXXINIT_SECTIONS ".meminit.*"
#define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS
-#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS
+#define ALL_EXIT_SECTIONS ".exit.*"
#define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \
@@ -819,20 +813,13 @@ static void check_section(const char *modname, struct elf_info *elf,
".coldtext", ".softirqentry.text"
#define INIT_SECTIONS ".init.*"
-#define MEM_INIT_SECTIONS ".meminit.*"
-#define EXIT_SECTIONS ".exit.*"
-#define MEM_EXIT_SECTIONS ".memexit.*"
-
-#define ALL_TEXT_SECTIONS ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \
+#define ALL_TEXT_SECTIONS ".init.text", ".meminit.text", ".exit.text", \
TEXT_SECTIONS, OTHER_TEXT_SECTIONS
enum mismatch {
- TEXT_TO_ANY_INIT,
- DATA_TO_ANY_INIT,
- TEXTDATA_TO_ANY_EXIT,
+ TEXTDATA_TO_ANY_INIT_EXIT,
XXXINIT_TO_SOME_INIT,
- XXXEXIT_TO_SOME_EXIT,
ANY_INIT_TO_ANY_EXIT,
ANY_EXIT_TO_ANY_INIT,
EXTABLE_TO_NON_TEXT,
@@ -863,19 +850,9 @@ static const struct sectioncheck sectioncheck[] = {
* normal code and data
*/
{
- .fromsec = { TEXT_SECTIONS, NULL },
- .bad_tosec = { ALL_INIT_SECTIONS, NULL },
- .mismatch = TEXT_TO_ANY_INIT,
-},
-{
- .fromsec = { DATA_SECTIONS, NULL },
- .bad_tosec = { ALL_XXXINIT_SECTIONS, INIT_SECTIONS, NULL },
- .mismatch = DATA_TO_ANY_INIT,
-},
-{
.fromsec = { TEXT_SECTIONS, DATA_SECTIONS, NULL },
- .bad_tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = TEXTDATA_TO_ANY_EXIT,
+ .bad_tosec = { ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL },
+ .mismatch = TEXTDATA_TO_ANY_INIT_EXIT,
},
/* Do not reference init code/data from meminit code/data */
{
@@ -883,12 +860,6 @@ static const struct sectioncheck sectioncheck[] = {
.bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = XXXINIT_TO_SOME_INIT,
},
-/* Do not reference exit code/data from memexit code/data */
-{
- .fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
- .bad_tosec = { EXIT_SECTIONS, NULL },
- .mismatch = XXXEXIT_TO_SOME_EXIT,
-},
/* Do not use exit code/data from init code */
{
.fromsec = { ALL_INIT_SECTIONS, NULL },
@@ -1006,19 +977,7 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
/* symbols in data sections that may refer to any init/exit sections */
if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
match(tosec, PATTERNS(ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
- match(fromsym, PATTERNS("*_template", // scsi uses *_template a lot
- "*_timer", // arm uses ops structures named _timer a lot
- "*_sht", // scsi also used *_sht to some extent
- "*_ops",
- "*_probe",
- "*_probe_one",
- "*_console")))
- return 0;
-
- /* symbols in data sections that may refer to meminit sections */
- if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
- match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_XXXEXIT_SECTIONS)) &&
- match(fromsym, PATTERNS("*driver")))
+ match(fromsym, PATTERNS("*_ops", "*_probe", "*_console")))
return 0;
/*
@@ -1028,7 +987,7 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
*/
if (!extra_warn &&
match(fromsec, PATTERNS(DATA_SECTIONS)) &&
- match(tosec, PATTERNS(EXIT_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_EXIT_SECTIONS)) &&
match(fromsym, PATTERNS("*driver")))
return 0;
@@ -1050,71 +1009,10 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
return 1;
}
-/*
- * If there's no name there, ignore it; likewise, ignore it if it's
- * one of the magic symbols emitted used by current tools.
- *
- * Otherwise if find_symbols_between() returns those symbols, they'll
- * fail the whitelist tests and cause lots of false alarms ... fixable
- * only by merging __exit and __init sections into __text, bloating
- * the kernel (which is especially evil on embedded platforms).
- */
-static inline bool is_valid_name(struct elf_info *elf, Elf_Sym *sym)
-{
- const char *name = elf->strtab + sym->st_name;
-
- if (!name || !strlen(name))
- return false;
- return !is_mapping_symbol(name);
-}
-
-/* Look up the nearest symbol based on the section and the address */
-static Elf_Sym *find_nearest_sym(struct elf_info *elf, Elf_Addr addr,
- unsigned int secndx, bool allow_negative,
- Elf_Addr min_distance)
-{
- Elf_Sym *sym;
- Elf_Sym *near = NULL;
- Elf_Addr sym_addr, distance;
- bool is_arm = (elf->hdr->e_machine == EM_ARM);
-
- for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
- if (get_secindex(elf, sym) != secndx)
- continue;
- if (!is_valid_name(elf, sym))
- continue;
-
- sym_addr = sym->st_value;
-
- /*
- * For ARM Thumb instruction, the bit 0 of st_value is set
- * if the symbol is STT_FUNC type. Mask it to get the address.
- */
- if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC)
- sym_addr &= ~1;
-
- if (addr >= sym_addr)
- distance = addr - sym_addr;
- else if (allow_negative)
- distance = sym_addr - addr;
- else
- continue;
-
- if (distance <= min_distance) {
- min_distance = distance;
- near = sym;
- }
-
- if (min_distance == 0)
- break;
- }
- return near;
-}
-
static Elf_Sym *find_fromsym(struct elf_info *elf, Elf_Addr addr,
unsigned int secndx)
{
- return find_nearest_sym(elf, addr, secndx, false, ~0);
+ return symsearch_find_nearest(elf, addr, secndx, false, ~0);
}
static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
@@ -1127,7 +1025,8 @@ static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
* Strive to find a better symbol name, but the resulting name may not
* match the symbol referenced in the original code.
*/
- return find_nearest_sym(elf, addr, get_secindex(elf, sym), true, 20);
+ return symsearch_find_nearest(elf, addr, get_secindex(elf, sym),
+ true, 20);
}
static bool is_executable_section(struct elf_info *elf, unsigned int secndx)
@@ -1248,10 +1147,10 @@ static void check_export_symbol(struct module *mod, struct elf_info *elf,
ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
s->is_func = true;
- if (match(secname, PATTERNS(INIT_SECTIONS)))
+ if (match(secname, PATTERNS(ALL_INIT_SECTIONS)))
warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
mod->name, name);
- else if (match(secname, PATTERNS(EXIT_SECTIONS)))
+ else if (match(secname, PATTERNS(ALL_EXIT_SECTIONS)))
warn("%s: %s: EXPORT_SYMBOL used for exit symbol. Remove __exit or EXPORT_SYMBOL.\n",
mod->name, name);
}
@@ -1468,32 +1367,20 @@ static void get_rel_type_and_sym(struct elf_info *elf, uint64_t r_info,
return;
}
- if (is_64bit) {
- Elf64_Xword r_info64 = r_info;
-
- r_info = TO_NATIVE(r_info64);
- } else {
- Elf32_Word r_info32 = r_info;
-
- r_info = TO_NATIVE(r_info32);
- }
+ if (is_64bit)
+ r_info = TO_NATIVE((Elf64_Xword)r_info);
+ else
+ r_info = TO_NATIVE((Elf32_Word)r_info);
*r_type = ELF_R_TYPE(r_info);
*r_sym = ELF_R_SYM(r_info);
}
static void section_rela(struct module *mod, struct elf_info *elf,
- Elf_Shdr *sechdr)
+ unsigned int fsecndx, const char *fromsec,
+ const Elf_Rela *start, const Elf_Rela *stop)
{
- Elf_Rela *rela;
- unsigned int fsecndx = sechdr->sh_info;
- const char *fromsec = sec_name(elf, fsecndx);
- Elf_Rela *start = (void *)elf->hdr + sechdr->sh_offset;
- Elf_Rela *stop = (void *)start + sechdr->sh_size;
-
- /* if from section (name) is know good then skip it */
- if (match(fromsec, section_white_list))
- return;
+ const Elf_Rela *rela;
for (rela = start; rela < stop; rela++) {
Elf_Addr taddr, r_offset;
@@ -1523,17 +1410,10 @@ static void section_rela(struct module *mod, struct elf_info *elf,
}
static void section_rel(struct module *mod, struct elf_info *elf,
- Elf_Shdr *sechdr)
+ unsigned int fsecndx, const char *fromsec,
+ const Elf_Rel *start, const Elf_Rel *stop)
{
- Elf_Rel *rel;
- unsigned int fsecndx = sechdr->sh_info;
- const char *fromsec = sec_name(elf, fsecndx);
- Elf_Rel *start = (void *)elf->hdr + sechdr->sh_offset;
- Elf_Rel *stop = (void *)start + sechdr->sh_size;
-
- /* if from section (name) is know good then skip it */
- if (match(fromsec, section_white_list))
- return;
+ const Elf_Rel *rel;
for (rel = start; rel < stop; rel++) {
Elf_Sym *tsym;
@@ -1581,16 +1461,33 @@ static void section_rel(struct module *mod, struct elf_info *elf,
static void check_sec_ref(struct module *mod, struct elf_info *elf)
{
int i;
- Elf_Shdr *sechdrs = elf->sechdrs;
/* Walk through all sections */
for (i = 0; i < elf->num_sections; i++) {
- check_section(mod->name, elf, &elf->sechdrs[i]);
+ Elf_Shdr *sechdr = &elf->sechdrs[i];
+
+ check_section(mod->name, elf, sechdr);
/* We want to process only relocation sections and not .init */
- if (sechdrs[i].sh_type == SHT_RELA)
- section_rela(mod, elf, &elf->sechdrs[i]);
- else if (sechdrs[i].sh_type == SHT_REL)
- section_rel(mod, elf, &elf->sechdrs[i]);
+ if (sechdr->sh_type == SHT_REL || sechdr->sh_type == SHT_RELA) {
+ /* section to which the relocation applies */
+ unsigned int secndx = sechdr->sh_info;
+ const char *secname = sec_name(elf, secndx);
+ const void *start, *stop;
+
+ /* If the section is known good, skip it */
+ if (match(secname, section_white_list))
+ continue;
+
+ start = sym_get_data_by_offset(elf, i, 0);
+ stop = start + sechdr->sh_size;
+
+ if (sechdr->sh_type == SHT_RELA)
+ section_rela(mod, elf, secndx, secname,
+ start, stop);
+ else
+ section_rel(mod, elf, secndx, secname,
+ start, stop);
+ }
}
}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 5f94c2c9f2d9..69baf014da4f 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <byteswap.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -10,6 +11,7 @@
#include <fcntl.h>
#include <unistd.h>
#include <elf.h>
+#include "../../include/linux/module_symbol.h"
#include "list.h"
#include "elfconfig.h"
@@ -50,21 +52,19 @@
#define ELF_R_TYPE ELF64_R_TYPE
#endif
-#if KERNEL_ELFDATA != HOST_ELFDATA
+#define bswap(x) \
+({ \
+ _Static_assert(sizeof(x) == 1 || sizeof(x) == 2 || \
+ sizeof(x) == 4 || sizeof(x) == 8, "bug"); \
+ (typeof(x))(sizeof(x) == 2 ? bswap_16(x) : \
+ sizeof(x) == 4 ? bswap_32(x) : \
+ sizeof(x) == 8 ? bswap_64(x) : \
+ x); \
+})
-static inline void __endian(const void *src, void *dest, unsigned int size)
-{
- unsigned int i;
- for (i = 0; i < size; i++)
- ((unsigned char*)dest)[i] = ((unsigned char*)src)[size - i-1];
-}
+#if KERNEL_ELFDATA != HOST_ELFDATA
-#define TO_NATIVE(x) \
-({ \
- typeof(x) __x; \
- __endian(&(x), &(__x), sizeof(__x)); \
- __x; \
-})
+#define TO_NATIVE(x) (bswap(x))
#else /* endianness matches */
@@ -128,6 +128,8 @@ struct elf_info {
* take shndx from symtab_shndx_start[N] instead */
Elf32_Word *symtab_shndx_start;
Elf32_Word *symtab_shndx_stop;
+
+ struct symsearch *symsearch;
};
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
@@ -154,6 +156,28 @@ static inline unsigned int get_secindex(const struct elf_info *info,
return index;
}
+/*
+ * If there's no name there, ignore it; likewise, ignore it if it's
+ * one of the magic symbols emitted used by current tools.
+ *
+ * Internal symbols created by tools should be ignored by modpost.
+ */
+static inline bool is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+{
+ const char *name = elf->strtab + sym->st_name;
+
+ if (!name || !strlen(name))
+ return false;
+ return !is_mapping_symbol(name);
+}
+
+/* symsearch.c */
+void symsearch_init(struct elf_info *elf);
+void symsearch_finish(struct elf_info *elf);
+Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr,
+ unsigned int secndx, bool allow_negative,
+ Elf_Addr min_distance);
+
/* file2alias.c */
void handle_moddevtable(struct module *mod, struct elf_info *info,
Elf_Sym *sym, const char *symname);
diff --git a/scripts/mod/symsearch.c b/scripts/mod/symsearch.c
new file mode 100644
index 000000000000..aa4ed51f9960
--- /dev/null
+++ b/scripts/mod/symsearch.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Helper functions for finding the symbol in an ELF which is "nearest"
+ * to a given address.
+ */
+
+#include "modpost.h"
+
+struct syminfo {
+ unsigned int symbol_index;
+ unsigned int section_index;
+ Elf_Addr addr;
+};
+
+/*
+ * Container used to hold an entire binary search table.
+ * Entries in table are ascending, sorted first by section_index,
+ * then by addr, and last by symbol_index. The sorting by
+ * symbol_index is used to ensure predictable behavior when
+ * multiple symbols are present with the same address; all
+ * symbols past the first are effectively ignored, by eliding
+ * them in symsearch_fixup().
+ */
+struct symsearch {
+ unsigned int table_size;
+ struct syminfo table[];
+};
+
+static int syminfo_compare(const void *s1, const void *s2)
+{
+ const struct syminfo *sym1 = s1;
+ const struct syminfo *sym2 = s2;
+
+ if (sym1->section_index > sym2->section_index)
+ return 1;
+ if (sym1->section_index < sym2->section_index)
+ return -1;
+ if (sym1->addr > sym2->addr)
+ return 1;
+ if (sym1->addr < sym2->addr)
+ return -1;
+ if (sym1->symbol_index > sym2->symbol_index)
+ return 1;
+ if (sym1->symbol_index < sym2->symbol_index)
+ return -1;
+ return 0;
+}
+
+static unsigned int symbol_count(struct elf_info *elf)
+{
+ unsigned int result = 0;
+
+ for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
+ if (is_valid_name(elf, sym))
+ result++;
+ }
+ return result;
+}
+
+/*
+ * Populate the search array that we just allocated.
+ * Be slightly paranoid here. The ELF file is mmap'd and could
+ * conceivably change between symbol_count() and symsearch_populate().
+ * If we notice any difference, bail out rather than potentially
+ * propagating errors or crashing.
+ */
+static void symsearch_populate(struct elf_info *elf,
+ struct syminfo *table,
+ unsigned int table_size)
+{
+ bool is_arm = (elf->hdr->e_machine == EM_ARM);
+
+ for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
+ if (is_valid_name(elf, sym)) {
+ if (table_size-- == 0)
+ fatal("%s: size mismatch\n", __func__);
+ table->symbol_index = sym - elf->symtab_start;
+ table->section_index = get_secindex(elf, sym);
+ table->addr = sym->st_value;
+
+ /*
+ * For ARM Thumb instruction, the bit 0 of st_value is
+ * set if the symbol is STT_FUNC type. Mask it to get
+ * the address.
+ */
+ if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ table->addr &= ~1;
+
+ table++;
+ }
+ }
+
+ if (table_size != 0)
+ fatal("%s: size mismatch\n", __func__);
+}
+
+/*
+ * Do any fixups on the table after sorting.
+ * For now, this just finds adjacent entries which have
+ * the same section_index and addr, and it propagates
+ * the first symbol_index over the subsequent entries,
+ * so that only one symbol_index is seen for any given
+ * section_index and addr. This ensures that whether
+ * we're looking at an address from "above" or "below"
+ * that we see the same symbol_index.
+ * This does leave some duplicate entries in the table;
+ * in practice, these are a small fraction of the
+ * total number of entries, and they are harmless to
+ * the binary search algorithm other than a few occasional
+ * unnecessary comparisons.
+ */
+static void symsearch_fixup(struct syminfo *table, unsigned int table_size)
+{
+ /* Don't look at index 0, it will never change. */
+ for (unsigned int i = 1; i < table_size; i++) {
+ if (table[i].addr == table[i - 1].addr &&
+ table[i].section_index == table[i - 1].section_index) {
+ table[i].symbol_index = table[i - 1].symbol_index;
+ }
+ }
+}
+
+void symsearch_init(struct elf_info *elf)
+{
+ unsigned int table_size = symbol_count(elf);
+
+ elf->symsearch = NOFAIL(malloc(sizeof(struct symsearch) +
+ sizeof(struct syminfo) * table_size));
+ elf->symsearch->table_size = table_size;
+
+ symsearch_populate(elf, elf->symsearch->table, table_size);
+ qsort(elf->symsearch->table, table_size,
+ sizeof(struct syminfo), syminfo_compare);
+
+ symsearch_fixup(elf->symsearch->table, table_size);
+}
+
+void symsearch_finish(struct elf_info *elf)
+{
+ free(elf->symsearch);
+ elf->symsearch = NULL;
+}
+
+/*
+ * Find the syminfo which is in secndx and "nearest" to addr.
+ * allow_negative: allow returning a symbol whose address is > addr.
+ * min_distance: ignore symbols which are further away than this.
+ *
+ * Returns a pointer into the symbol table for success.
+ * Returns NULL if no legal symbol is found within the requested range.
+ */
+Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr,
+ unsigned int secndx, bool allow_negative,
+ Elf_Addr min_distance)
+{
+ unsigned int hi = elf->symsearch->table_size;
+ unsigned int lo = 0;
+ struct syminfo *table = elf->symsearch->table;
+ struct syminfo target;
+
+ target.addr = addr;
+ target.section_index = secndx;
+ target.symbol_index = ~0; /* compares greater than any actual index */
+ while (hi > lo) {
+ unsigned int mid = lo + (hi - lo) / 2; /* Avoids overflow */
+
+ if (syminfo_compare(&table[mid], &target) > 0)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ /*
+ * table[hi], if it exists, is the first entry in the array which
+ * lies beyond target. table[hi - 1], if it exists, is the last
+ * entry in the array which comes before target, including the
+ * case where it perfectly matches the section and the address.
+ *
+ * Note -- if the address we're looking up falls perfectly
+ * in the middle of two symbols, this is written to always
+ * prefer the symbol with the lower address.
+ */
+ Elf_Sym *result = NULL;
+
+ if (allow_negative &&
+ hi < elf->symsearch->table_size &&
+ table[hi].section_index == secndx &&
+ table[hi].addr - addr <= min_distance) {
+ min_distance = table[hi].addr - addr;
+ result = &elf->symtab_start[table[hi].symbol_index];
+ }
+ if (hi > 0 &&
+ table[hi - 1].section_index == secndx &&
+ addr - table[hi - 1].addr <= min_distance) {
+ result = &elf->symtab_start[table[hi - 1].symbol_index];
+ }
+ return result;
+}
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index d41608efb747..ce201bfa8377 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -9,6 +9,12 @@
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
+output=$1
+
+mkdir -p "$(dirname "${output}")"
+
+exec >"${output}"
+
if grep -q CONFIG_MODULES=y include/config/auto.conf; then
echo '%define with_devel %{?_without_devel: 0} %{?!_without_devel: 1}'
else
diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh
deleted file mode 100755
index 728d55190d97..000000000000
--- a/scripts/pahole-flags.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-extra_paholeopt=
-
-if ! [ -x "$(command -v ${PAHOLE})" ]; then
- exit 0
-fi
-
-pahole_ver=$($(dirname $0)/pahole-version.sh ${PAHOLE})
-
-if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
- # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
- extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars"
-fi
-if [ "${pahole_ver}" -ge "121" ]; then
- extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
-fi
-if [ "${pahole_ver}" -ge "122" ]; then
- extra_paholeopt="${extra_paholeopt} -j"
-fi
-if [ "${pahole_ver}" -ge "124" ]; then
- # see PAHOLE_HAS_LANG_EXCLUDE
- extra_paholeopt="${extra_paholeopt} --lang_exclude=rust"
-fi
-if [ "${pahole_ver}" -ge "125" ]; then
- extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_inconsistent_proto --btf_gen_optimized"
-fi
-
-echo ${extra_paholeopt}
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index 8b1a636f8543..385610fe3936 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -38,4 +38,4 @@ rm -rf include/ksym
find . -name '*.usyms' | xargs rm -f
-rm -f binkernel.spec
+rm -f *.spec
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 53a0070ff5df..38650e52ef57 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -423,7 +423,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
/* high level check about policy management - fine grained in
* below after unpack
*/
- error = aa_may_manage_policy(label, ns, mask);
+ error = aa_may_manage_policy(current_cred(), label, ns, mask);
if (error)
goto end_section;
@@ -486,7 +486,8 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
/* high level check about policy management - fine grained in
* below after unpack
*/
- error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
+ error = aa_may_manage_policy(current_cred(), label, ns,
+ AA_MAY_REMOVE_POLICY);
if (error)
goto out;
@@ -618,23 +619,23 @@ static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
if (profile_unconfined(profile))
return;
- if (rules->file.dfa && *match_str == AA_CLASS_FILE) {
- state = aa_dfa_match_len(rules->file.dfa,
- rules->file.start[AA_CLASS_FILE],
+ if (rules->file->dfa && *match_str == AA_CLASS_FILE) {
+ state = aa_dfa_match_len(rules->file->dfa,
+ rules->file->start[AA_CLASS_FILE],
match_str + 1, match_len - 1);
if (state) {
struct path_cond cond = { };
- tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
+ tmp = *(aa_lookup_fperms(rules->file, state, &cond));
}
- } else if (rules->policy.dfa) {
+ } else if (rules->policy->dfa) {
if (!RULE_MEDIATES(rules, *match_str))
return; /* no change to current perms */
- state = aa_dfa_match_len(rules->policy.dfa,
- rules->policy.start[0],
+ state = aa_dfa_match_len(rules->policy->dfa,
+ rules->policy->start[0],
match_str, match_len);
if (state)
- tmp = *aa_lookup_perms(&rules->policy, state);
+ tmp = *aa_lookup_perms(rules->policy, state);
}
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum_raw(perms, &tmp);
@@ -1095,7 +1096,7 @@ static int seq_profile_attach_show(struct seq_file *seq, void *v)
struct aa_profile *profile = labels_profile(label);
if (profile->attach.xmatch_str)
seq_printf(seq, "%s\n", profile->attach.xmatch_str);
- else if (profile->attach.xmatch.dfa)
+ else if (profile->attach.xmatch->dfa)
seq_puts(seq, "<unknown>\n");
else
seq_printf(seq, "%s\n", profile->base.name);
@@ -1314,7 +1315,6 @@ SEQ_RAWDATA_FOPS(compressed_size);
static int decompress_zstd(char *src, size_t slen, char *dst, size_t dlen)
{
-#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
if (slen < dlen) {
const size_t wksp_len = zstd_dctx_workspace_bound();
zstd_dctx *ctx;
@@ -1341,7 +1341,6 @@ cleanup:
kvfree(wksp);
return ret;
}
-#endif
if (dlen < slen)
return -EINVAL;
@@ -1806,7 +1805,8 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
int error;
label = begin_current_label_crit_section();
- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+ error = aa_may_manage_policy(current_cred(), label, NULL,
+ AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
return error;
@@ -1855,7 +1855,8 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
int error;
label = begin_current_label_crit_section();
- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+ error = aa_may_manage_policy(current_cred(), label, NULL,
+ AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
return error;
@@ -2339,10 +2340,16 @@ static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("post_nnp_subset", 1),
AA_SFS_FILE_BOOLEAN("computed_longest_left", 1),
AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach),
+ AA_SFS_FILE_BOOLEAN("disconnected.path", 1),
AA_SFS_FILE_STRING("version", "1.2"),
{ }
};
+static struct aa_sfs_entry aa_sfs_entry_unconfined[] = {
+ AA_SFS_FILE_BOOLEAN("change_profile", 1),
+ { }
+};
+
static struct aa_sfs_entry aa_sfs_entry_versions[] = {
AA_SFS_FILE_BOOLEAN("v5", 1),
AA_SFS_FILE_BOOLEAN("v6", 1),
@@ -2352,11 +2359,15 @@ static struct aa_sfs_entry aa_sfs_entry_versions[] = {
{ }
};
+#define PERMS32STR "allow deny subtree cond kill complain prompt audit quiet hide xindex tag label"
static struct aa_sfs_entry aa_sfs_entry_policy[] = {
AA_SFS_DIR("versions", aa_sfs_entry_versions),
AA_SFS_FILE_BOOLEAN("set_load", 1),
/* number of out of band transitions supported */
AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED),
+ AA_SFS_FILE_U64("permstable32_version", 1),
+ AA_SFS_FILE_STRING("permstable32", PERMS32STR),
+ AA_SFS_DIR("unconfined_restrictions", aa_sfs_entry_unconfined),
{ }
};
@@ -2368,6 +2379,7 @@ static struct aa_sfs_entry aa_sfs_entry_mount[] = {
static struct aa_sfs_entry aa_sfs_entry_ns[] = {
AA_SFS_FILE_BOOLEAN("profile", 1),
AA_SFS_FILE_BOOLEAN("pivot_root", 0),
+ AA_SFS_FILE_STRING("mask", "userns_create"),
{ }
};
@@ -2382,6 +2394,12 @@ static struct aa_sfs_entry aa_sfs_entry_query[] = {
AA_SFS_DIR("label", aa_sfs_entry_query_label),
{ }
};
+
+static struct aa_sfs_entry aa_sfs_entry_io_uring[] = {
+ AA_SFS_FILE_STRING("mask", "sqpoll override_creds"),
+ { }
+};
+
static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("policy", aa_sfs_entry_policy),
AA_SFS_DIR("domain", aa_sfs_entry_domain),
@@ -2395,6 +2413,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace),
AA_SFS_DIR("signal", aa_sfs_entry_signal),
AA_SFS_DIR("query", aa_sfs_entry_query),
+ AA_SFS_DIR("io_uring", aa_sfs_entry_io_uring),
{ }
};
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 5a7978aa4b19..45beb1c5f747 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -58,8 +58,8 @@ static const char *const aa_class_names[] = {
"io_uring",
"module",
"lsm",
- "unknown",
- "unknown",
+ "namespace",
+ "io_uring",
"unknown",
"unknown",
"unknown",
@@ -85,37 +85,36 @@ static const char *const aa_class_names[] = {
/**
* audit_pre() - core AppArmor function.
* @ab: audit buffer to fill (NOT NULL)
- * @ca: audit structure containing data to audit (NOT NULL)
+ * @va: audit structure containing data to audit (NOT NULL)
*
- * Record common AppArmor audit data from @sa
+ * Record common AppArmor audit data from @va
*/
-static void audit_pre(struct audit_buffer *ab, void *ca)
+static void audit_pre(struct audit_buffer *ab, void *va)
{
- struct common_audit_data *sa = ca;
+ struct apparmor_audit_data *ad = aad_of_va(va);
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=\"%s\"",
- aa_audit_type[aad(sa)->type]);
+ aa_audit_type[ad->type]);
}
- if (aad(sa)->op) {
- audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
- }
+ if (ad->op)
+ audit_log_format(ab, " operation=\"%s\"", ad->op);
- if (aad(sa)->class)
+ if (ad->class)
audit_log_format(ab, " class=\"%s\"",
- aad(sa)->class <= AA_CLASS_LAST ?
- aa_class_names[aad(sa)->class] :
+ ad->class <= AA_CLASS_LAST ?
+ aa_class_names[ad->class] :
"unknown");
- if (aad(sa)->info) {
- audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
- if (aad(sa)->error)
- audit_log_format(ab, " error=%d", aad(sa)->error);
+ if (ad->info) {
+ audit_log_format(ab, " info=\"%s\"", ad->info);
+ if (ad->error)
+ audit_log_format(ab, " error=%d", ad->error);
}
- if (aad(sa)->label) {
- struct aa_label *label = aad(sa)->label;
+ if (ad->subj_label) {
+ struct aa_label *label = ad->subj_label;
if (label_isprofile(label)) {
struct aa_profile *profile = labels_profile(label);
@@ -134,42 +133,44 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
}
}
- if (aad(sa)->name) {
+ if (ad->name) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, aad(sa)->name);
+ audit_log_untrustedstring(ab, ad->name);
}
}
/**
* aa_audit_msg - Log a message to the audit subsystem
- * @sa: audit event structure (NOT NULL)
+ * @type: audit type for the message
+ * @ad: audit event structure (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*/
-void aa_audit_msg(int type, struct common_audit_data *sa,
+void aa_audit_msg(int type, struct apparmor_audit_data *ad,
void (*cb) (struct audit_buffer *, void *))
{
- aad(sa)->type = type;
- common_lsm_audit(sa, audit_pre, cb);
+ ad->type = type;
+ common_lsm_audit(&ad->common, audit_pre, cb);
}
/**
* aa_audit - Log a profile based audit event to the audit subsystem
* @type: audit type for the message
* @profile: profile to check against (NOT NULL)
- * @sa: audit event (NOT NULL)
+ * @ad: audit event (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*
* Handle default message switching based off of audit mode flags
*
* Returns: error on failure
*/
-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+int aa_audit(int type, struct aa_profile *profile,
+ struct apparmor_audit_data *ad,
void (*cb) (struct audit_buffer *, void *))
{
AA_BUG(!profile);
if (type == AUDIT_APPARMOR_AUTO) {
- if (likely(!aad(sa)->error)) {
+ if (likely(!ad->error)) {
if (AUDIT_MODE(profile) != AUDIT_ALL)
return 0;
type = AUDIT_APPARMOR_AUDIT;
@@ -181,24 +182,24 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
- return aad(sa)->error;
+ return ad->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
type = AUDIT_APPARMOR_KILL;
- aad(sa)->label = &profile->label;
+ ad->subj_label = &profile->label;
- aa_audit_msg(type, sa, cb);
+ aa_audit_msg(type, ad, cb);
- if (aad(sa)->type == AUDIT_APPARMOR_KILL)
+ if (ad->type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL,
- sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
- sa->u.tsk : current);
+ ad->common.type == LSM_AUDIT_DATA_TASK &&
+ ad->common.u.tsk ? ad->common.u.tsk : current);
- if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
- return complain_error(aad(sa)->error);
+ if (ad->type == AUDIT_APPARMOR_ALLOWED)
+ return complain_error(ad->error);
- return aad(sa)->error;
+ return ad->error;
}
struct aa_audit_rule {
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 326a51838ef2..9934df16c843 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -38,8 +38,8 @@ static DEFINE_PER_CPU(struct audit_cache, audit_cache);
/**
* audit_cb - call back for capability components of audit struct
- * @ab - audit buffer (NOT NULL)
- * @va - audit struct to audit data from (NOT NULL)
+ * @ab: audit buffer (NOT NULL)
+ * @va: audit struct to audit data from (NOT NULL)
*/
static void audit_cb(struct audit_buffer *ab, void *va)
{
@@ -51,7 +51,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
/**
* audit_caps - audit a capability
- * @sa: audit data
+ * @ad: audit data
* @profile: profile being tested for confinement (NOT NULL)
* @cap: capability tested
* @error: error code returned by test
@@ -59,9 +59,9 @@ static void audit_cb(struct audit_buffer *ab, void *va)
* Do auditing of capability and handle, audit/complain/kill modes switching
* and duplicate message elimination.
*
- * Returns: 0 or sa->error on success, error code on failure
+ * Returns: 0 or ad->error on success, error code on failure
*/
-static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile,
int cap, int error)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
@@ -69,7 +69,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
- aad(sa)->error = error;
+ ad->error = error;
if (likely(!error)) {
/* test if auditing is being forced */
@@ -101,7 +101,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
}
put_cpu_var(audit_cache);
- return aa_audit(type, profile, sa, audit_cb);
+ return aa_audit(type, profile, ad, audit_cb);
}
/**
@@ -109,12 +109,12 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
* @profile: profile being enforced (NOT NULL, NOT unconfined)
* @cap: capability to test if allowed
* @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
- * @sa: audit data (MAY BE NULL indicating no auditing)
+ * @ad: audit data (MAY BE NULL indicating no auditing)
*
* Returns: 0 if allowed else -EPERM
*/
static int profile_capable(struct aa_profile *profile, int cap,
- unsigned int opts, struct common_audit_data *sa)
+ unsigned int opts, struct apparmor_audit_data *ad)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
@@ -132,14 +132,15 @@ static int profile_capable(struct aa_profile *profile, int cap,
/* audit the cap request in complain mode but note that it
* should be optional.
*/
- aad(sa)->info = "optional: no audit";
+ ad->info = "optional: no audit";
}
- return audit_caps(sa, profile, cap, error);
+ return audit_caps(ad, profile, cap, error);
}
/**
* aa_capable - test permission to use capability
+ * @subj_cred: cred we are testing capability against
* @label: label being tested for capability (NOT NULL)
* @cap: capability to be tested
* @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
@@ -148,15 +149,17 @@ static int profile_capable(struct aa_profile *profile, int cap,
*
* Returns: 0 on success, or else an error code.
*/
-int aa_capable(struct aa_label *label, int cap, unsigned int opts)
+int aa_capable(const struct cred *subj_cred, struct aa_label *label,
+ int cap, unsigned int opts)
{
struct aa_profile *profile;
int error = 0;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
- sa.u.cap = cap;
+ ad.subj_cred = subj_cred;
+ ad.common.u.cap = cap;
error = fn_for_each_confined(label, profile,
- profile_capable(profile, cap, opts, &sa));
+ profile_capable(profile, cap, opts, &ad));
return error;
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index f3715cda59c5..89fbeab4b33b 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -31,6 +31,7 @@
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
+ * @to_cred: cred of task changing domain
* @to_label: profile to change to (NOT NULL)
* @info: message if there is an error
*
@@ -39,28 +40,34 @@
*
* Returns: %0 or error if change not allowed
*/
-static int may_change_ptraced_domain(struct aa_label *to_label,
+static int may_change_ptraced_domain(const struct cred *to_cred,
+ struct aa_label *to_label,
const char **info)
{
struct task_struct *tracer;
struct aa_label *tracerl = NULL;
+ const struct cred *tracer_cred = NULL;
+
int error = 0;
rcu_read_lock();
tracer = ptrace_parent(current);
- if (tracer)
+ if (tracer) {
/* released below */
tracerl = aa_get_task_label(tracer);
-
+ tracer_cred = get_task_cred(tracer);
+ }
/* not ptraced */
if (!tracer || unconfined(tracerl))
goto out;
- error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
+ error = aa_may_ptrace(tracer_cred, tracerl, to_cred, to_label,
+ PTRACE_MODE_ATTACH);
out:
rcu_read_unlock();
aa_put_label(tracerl);
+ put_cred(tracer_cred);
if (error)
*info = "ptrace prevents transition";
@@ -70,7 +77,7 @@ out:
/**** TODO: dedup to aa_label_match - needs perm and dfa, merging
* specifically this is an exact copy of aa_label_match except
* aa_compute_perms is replaced with aa_compute_fperms
- * and policy.dfa with file.dfa
+ * and policy->dfa with file->dfa
****/
/* match a profile and its associated ns component if needed
* Assumes visibility test has already been done.
@@ -86,16 +93,16 @@ static inline aa_state_t match_component(struct aa_profile *profile,
const char *ns_name;
if (stack)
- state = aa_dfa_match(rules->file.dfa, state, "&");
+ state = aa_dfa_match(rules->file->dfa, state, "&");
if (profile->ns == tp->ns)
- return aa_dfa_match(rules->file.dfa, state, tp->base.hname);
+ return aa_dfa_match(rules->file->dfa, state, tp->base.hname);
/* try matching with namespace name and then profile */
ns_name = aa_ns_name(profile->ns, tp->ns, true);
- state = aa_dfa_match_len(rules->file.dfa, state, ":", 1);
- state = aa_dfa_match(rules->file.dfa, state, ns_name);
- state = aa_dfa_match_len(rules->file.dfa, state, ":", 1);
- return aa_dfa_match(rules->file.dfa, state, tp->base.hname);
+ state = aa_dfa_match_len(rules->file->dfa, state, ":", 1);
+ state = aa_dfa_match(rules->file->dfa, state, ns_name);
+ state = aa_dfa_match_len(rules->file->dfa, state, ":", 1);
+ return aa_dfa_match(rules->file->dfa, state, tp->base.hname);
}
/**
@@ -143,12 +150,12 @@ next:
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
- state = aa_dfa_match(rules->file.dfa, state, "//&");
+ state = aa_dfa_match(rules->file->dfa, state, "//&");
state = match_component(profile, tp, false, state);
if (!state)
goto fail;
}
- *perms = *(aa_lookup_fperms(&(rules->file), state, &cond));
+ *perms = *(aa_lookup_fperms(rules->file, state, &cond));
aa_apply_modes_to_perms(profile, perms);
if ((perms->allow & request) != request)
return -EACCES;
@@ -203,7 +210,7 @@ static int label_components_match(struct aa_profile *profile,
return 0;
next:
- tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
+ tmp = *(aa_lookup_fperms(rules->file, state, &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
label_for_each_cont(i, label, tp) {
@@ -212,7 +219,7 @@ next:
state = match_component(profile, tp, stack, start);
if (!state)
goto fail;
- tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
+ tmp = *(aa_lookup_fperms(rules->file, state, &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
}
@@ -265,6 +272,7 @@ static int label_match(struct aa_profile *profile, struct aa_label *label,
* @stack: whether this is a stacking request
* @request: requested perms
* @start: state to start matching in
+ * @perms: Returns computed perms (NOT NULL)
*
*
* Returns: permission set
@@ -309,7 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
might_sleep();
/* transition from exec match to xattr set */
- state = aa_dfa_outofband_transition(attach->xmatch.dfa, state);
+ state = aa_dfa_outofband_transition(attach->xmatch->dfa, state);
d = bprm->file->f_path.dentry;
for (i = 0; i < attach->xattr_count; i++) {
@@ -323,20 +331,20 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
* that not present xattr can be distinguished from a 0
* length value or rule that matches any value
*/
- state = aa_dfa_null_transition(attach->xmatch.dfa,
+ state = aa_dfa_null_transition(attach->xmatch->dfa,
state);
/* Check xattr value */
- state = aa_dfa_match_len(attach->xmatch.dfa, state,
+ state = aa_dfa_match_len(attach->xmatch->dfa, state,
value, size);
- index = ACCEPT_TABLE(attach->xmatch.dfa)[state];
- perm = attach->xmatch.perms[index].allow;
+ index = ACCEPT_TABLE(attach->xmatch->dfa)[state];
+ perm = attach->xmatch->perms[index].allow;
if (!(perm & MAY_EXEC)) {
ret = -EINVAL;
goto out;
}
}
/* transition to next element */
- state = aa_dfa_outofband_transition(attach->xmatch.dfa, state);
+ state = aa_dfa_outofband_transition(attach->xmatch->dfa, state);
if (size < 0) {
/*
* No xattr match, so verify if transition to
@@ -359,11 +367,11 @@ out:
/**
* find_attach - do attachment search for unconfined processes
- * @bprm - binprm structure of transitioning task
+ * @bprm: binprm structure of transitioning task
* @ns: the current namespace (NOT NULL)
- * @head - profile list to walk (NOT NULL)
- * @name - to match against (NOT NULL)
- * @info - info message if there was an error (NOT NULL)
+ * @head: profile list to walk (NOT NULL)
+ * @name: to match against (NOT NULL)
+ * @info: info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
* preference where an exact match is preferred over a name which uses
@@ -405,16 +413,16 @@ restart:
* as another profile, signal a conflict and refuse to
* match.
*/
- if (attach->xmatch.dfa) {
+ if (attach->xmatch->dfa) {
unsigned int count;
aa_state_t state;
u32 index, perm;
- state = aa_dfa_leftmatch(attach->xmatch.dfa,
- attach->xmatch.start[AA_CLASS_XMATCH],
+ state = aa_dfa_leftmatch(attach->xmatch->dfa,
+ attach->xmatch->start[AA_CLASS_XMATCH],
name, &count);
- index = ACCEPT_TABLE(attach->xmatch.dfa)[state];
- perm = attach->xmatch.perms[index].allow;
+ index = ACCEPT_TABLE(attach->xmatch->dfa)[state];
+ perm = attach->xmatch->perms[index].allow;
/* any accepting state means a valid match. */
if (perm & MAY_EXEC) {
int ret = 0;
@@ -517,7 +525,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
/* TODO: move lookup parsing to unpack time so this is a straight
* index into the resultant label
*/
- for (*name = rules->file.trans.table[index]; !label && *name;
+ for (*name = rules->file->trans.table[index]; !label && *name;
*name = next_name(xtype, *name)) {
if (xindex & AA_X_CHILD) {
struct aa_profile *new_profile;
@@ -545,6 +553,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
* @name: name to lookup (NOT NULL)
* @xindex: index into x transition table
* @lookupname: returns: name used in lookup if one was specified (NOT NULL)
+ * @info: info message if there was an error (NOT NULL)
*
* find label for a transition index
*
@@ -570,7 +579,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
break;
case AA_X_TABLE:
/* TODO: fix when perm mapping done at unload */
- stack = rules->file.trans.table[xindex & AA_X_INDEX_MASK];
+ stack = rules->file->trans.table[xindex & AA_X_INDEX_MASK];
if (*stack != '&') {
/* released by caller */
new = x_table_lookup(profile, xindex, lookupname);
@@ -619,7 +628,8 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
return new;
}
-static struct aa_label *profile_transition(struct aa_profile *profile,
+static struct aa_label *profile_transition(const struct cred *subj_cred,
+ struct aa_profile *profile,
const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
bool *secure_exec)
@@ -628,7 +638,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
typeof(*rules), list);
struct aa_label *new = NULL;
const char *info = NULL, *name = NULL, *target = NULL;
- aa_state_t state = rules->file.start[AA_CLASS_FILE];
+ aa_state_t state = rules->file->start[AA_CLASS_FILE];
struct aa_perms perms = {};
bool nonewprivs = false;
int error = 0;
@@ -662,7 +672,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
}
/* find exec permissions for name */
- state = aa_str_perms(&(rules->file), state, name, cond, &perms);
+ state = aa_str_perms(rules->file, state, name, cond, &perms);
if (perms.allow & MAY_EXEC) {
/* exec permission determine how to transition */
new = x_to_label(profile, bprm, name, perms.xindex, &target,
@@ -709,7 +719,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
}
audit:
- aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
+ aa_audit_file(subj_cred, profile, &perms, OP_EXEC, MAY_EXEC, name,
+ target, new,
cond->uid, info, error);
if (!new || nonewprivs) {
aa_put_label(new);
@@ -719,14 +730,15 @@ audit:
return new;
}
-static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
+static int profile_onexec(const struct cred *subj_cred,
+ struct aa_profile *profile, struct aa_label *onexec,
bool stack, const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
bool *secure_exec)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
- aa_state_t state = rules->file.start[AA_CLASS_FILE];
+ aa_state_t state = rules->file->start[AA_CLASS_FILE];
struct aa_perms perms = {};
const char *xname = NULL, *info = "change_profile onexec";
int error = -EACCES;
@@ -759,7 +771,7 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
}
/* find exec permissions for name */
- state = aa_str_perms(&(rules->file), state, xname, cond, &perms);
+ state = aa_str_perms(rules->file, state, xname, cond, &perms);
if (!(perms.allow & AA_MAY_ONEXEC)) {
info = "no change_onexec valid for executable";
goto audit;
@@ -768,7 +780,7 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
* onexec permission is linked to exec with a standard pairing
* exec\0change_profile
*/
- state = aa_dfa_null_transition(rules->file.dfa, state);
+ state = aa_dfa_null_transition(rules->file->dfa, state);
error = change_profile_perms(profile, onexec, stack, AA_MAY_ONEXEC,
state, &perms);
if (error) {
@@ -787,13 +799,15 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
}
audit:
- return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
+ return aa_audit_file(subj_cred, profile, &perms, OP_EXEC,
+ AA_MAY_ONEXEC, xname,
NULL, onexec, cond->uid, info, error);
}
/* ensure none ns domain transitions are correctly applied with onexec */
-static struct aa_label *handle_onexec(struct aa_label *label,
+static struct aa_label *handle_onexec(const struct cred *subj_cred,
+ struct aa_label *label,
struct aa_label *onexec, bool stack,
const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
@@ -810,26 +824,28 @@ static struct aa_label *handle_onexec(struct aa_label *label,
if (!stack) {
error = fn_for_each_in_ns(label, profile,
- profile_onexec(profile, onexec, stack,
+ profile_onexec(subj_cred, profile, onexec, stack,
bprm, buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_newest_label(onexec),
- profile_transition(profile, bprm, buffer,
+ profile_transition(subj_cred, profile, bprm,
+ buffer,
cond, unsafe));
} else {
/* TODO: determine how much we want to loosen this */
error = fn_for_each_in_ns(label, profile,
- profile_onexec(profile, onexec, stack, bprm,
+ profile_onexec(subj_cred, profile, onexec, stack, bprm,
buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_label_merge(&profile->label, onexec,
GFP_KERNEL),
- profile_transition(profile, bprm, buffer,
+ profile_transition(subj_cred, profile, bprm,
+ buffer,
cond, unsafe));
}
@@ -838,7 +854,8 @@ static struct aa_label *handle_onexec(struct aa_label *label,
/* TODO: get rid of GLOBAL_ROOT_UID */
error = fn_for_each_in_ns(label, profile,
- aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
+ aa_audit_file(subj_cred, profile, &nullperms,
+ OP_CHANGE_ONEXEC,
AA_MAY_ONEXEC, bprm->filename, NULL,
onexec, GLOBAL_ROOT_UID,
"failed to build target label", -ENOMEM));
@@ -857,6 +874,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
{
struct aa_task_ctx *ctx;
struct aa_label *label, *new = NULL;
+ const struct cred *subj_cred;
struct aa_profile *profile;
char *buffer = NULL;
const char *info = NULL;
@@ -869,6 +887,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
file_inode(bprm->file)->i_mode
};
+ subj_cred = current_cred();
ctx = task_ctx(current);
AA_BUG(!cred_label(bprm->cred));
AA_BUG(!ctx);
@@ -895,11 +914,12 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
/* Test for onexec first as onexec override other x transitions. */
if (ctx->onexec)
- new = handle_onexec(label, ctx->onexec, ctx->token,
+ new = handle_onexec(subj_cred, label, ctx->onexec, ctx->token,
bprm, buffer, &cond, &unsafe);
else
new = fn_label_build(label, profile, GFP_KERNEL,
- profile_transition(profile, bprm, buffer,
+ profile_transition(subj_cred, profile, bprm,
+ buffer,
&cond, &unsafe));
AA_BUG(!new);
@@ -934,7 +954,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
/* TODO: test needs to be profile of label to new */
- error = may_change_ptraced_domain(new, &info);
+ error = may_change_ptraced_domain(bprm->cred, new, &info);
if (error)
goto audit;
}
@@ -971,7 +991,8 @@ done:
audit:
error = fn_for_each(label, profile,
- aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
+ aa_audit_file(current_cred(), profile, &nullperms,
+ OP_EXEC, MAY_EXEC,
bprm->filename, NULL, new,
vfsuid_into_kuid(vfsuid), info, error));
aa_put_label(new);
@@ -987,7 +1008,8 @@ audit:
*
* Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
*/
-static struct aa_label *build_change_hat(struct aa_profile *profile,
+static struct aa_label *build_change_hat(const struct cred *subj_cred,
+ struct aa_profile *profile,
const char *name, bool sibling)
{
struct aa_profile *root, *hat = NULL;
@@ -1019,7 +1041,8 @@ static struct aa_label *build_change_hat(struct aa_profile *profile,
aa_put_profile(root);
audit:
- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
+ aa_audit_file(subj_cred, profile, &nullperms, OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT,
name, hat ? hat->base.hname : NULL,
hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
error);
@@ -1035,7 +1058,8 @@ audit:
*
* Returns: label for hat transition or ERR_PTR. Does not return NULL
*/
-static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+static struct aa_label *change_hat(const struct cred *subj_cred,
+ struct aa_label *label, const char *hats[],
int count, int flags)
{
struct aa_profile *profile, *root, *hat = NULL;
@@ -1111,7 +1135,8 @@ fail:
*/
/* TODO: get rid of GLOBAL_ROOT_UID */
if (count > 1 || COMPLAIN_MODE(profile)) {
- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
+ aa_audit_file(subj_cred, profile, &nullperms,
+ OP_CHANGE_HAT,
AA_MAY_CHANGEHAT, name, NULL, NULL,
GLOBAL_ROOT_UID, info, error);
}
@@ -1120,7 +1145,8 @@ fail:
build:
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
- build_change_hat(profile, name, sibling),
+ build_change_hat(subj_cred, profile, name,
+ sibling),
aa_get_label(&profile->label));
if (!new) {
info = "label build failed";
@@ -1150,7 +1176,7 @@ build:
*/
int aa_change_hat(const char *hats[], int count, u64 token, int flags)
{
- const struct cred *cred;
+ const struct cred *subj_cred;
struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label, *previous, *new = NULL, *target = NULL;
struct aa_profile *profile;
@@ -1159,8 +1185,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
int error = 0;
/* released below */
- cred = get_current_cred();
- label = aa_get_newest_cred_label(cred);
+ subj_cred = get_current_cred();
+ label = aa_get_newest_cred_label(subj_cred);
previous = aa_get_newest_label(ctx->previous);
/*
@@ -1180,7 +1206,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
}
if (count) {
- new = change_hat(label, hats, count, flags);
+ new = change_hat(subj_cred, label, hats, count, flags);
AA_BUG(!new);
if (IS_ERR(new)) {
error = PTR_ERR(new);
@@ -1189,7 +1215,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
goto out;
}
- error = may_change_ptraced_domain(new, &info);
+ /* target cred is the same as current except new label */
+ error = may_change_ptraced_domain(subj_cred, new, &info);
if (error)
goto fail;
@@ -1242,7 +1269,7 @@ out:
aa_put_label(new);
aa_put_label(previous);
aa_put_label(label);
- put_cred(cred);
+ put_cred(subj_cred);
return error;
@@ -1252,7 +1279,7 @@ kill:
fail:
fn_for_each_in_ns(label, profile,
- aa_audit_file(profile, &perms, OP_CHANGE_HAT,
+ aa_audit_file(subj_cred, profile, &perms, OP_CHANGE_HAT,
AA_MAY_CHANGEHAT, NULL, NULL, target,
GLOBAL_ROOT_UID, info, error));
@@ -1261,6 +1288,7 @@ fail:
static int change_profile_perms_wrapper(const char *op, const char *name,
+ const struct cred *subj_cred,
struct aa_profile *profile,
struct aa_label *target, bool stack,
u32 request, struct aa_perms *perms)
@@ -1272,16 +1300,19 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
if (!error)
error = change_profile_perms(profile, target, stack, request,
- rules->file.start[AA_CLASS_FILE],
+ rules->file->start[AA_CLASS_FILE],
perms);
if (error)
- error = aa_audit_file(profile, perms, op, request, name,
+ error = aa_audit_file(subj_cred, profile, perms, op, request,
+ name,
NULL, target, GLOBAL_ROOT_UID, info,
error);
return error;
}
+const char *stack_msg = "change_profile unprivileged unconfined converted to stacking";
+
/**
* aa_change_profile - perform a one-way profile transition
* @fqname: name of profile may include namespace (NOT NULL)
@@ -1304,6 +1335,7 @@ int aa_change_profile(const char *fqname, int flags)
const char *auditname = fqname; /* retain leading & if stack */
bool stack = flags & AA_CHANGE_STACK;
struct aa_task_ctx *ctx = task_ctx(current);
+ const struct cred *subj_cred = get_current_cred();
int error = 0;
char *op;
u32 request;
@@ -1340,6 +1372,28 @@ int aa_change_profile(const char *fqname, int flags)
op = OP_CHANGE_PROFILE;
}
+ /* This should move to a per profile test. Requires pushing build
+ * into callback
+ */
+ if (!stack && unconfined(label) &&
+ label == &labels_ns(label)->unconfined->label &&
+ aa_unprivileged_unconfined_restricted &&
+ /* TODO: refactor so this check is a fn */
+ cap_capable(current_cred(), &init_user_ns, CAP_MAC_OVERRIDE,
+ CAP_OPT_NOAUDIT)) {
+ /* regardless of the request in this case apparmor
+ * stacks against unconfined so admin set policy can't be
+ * by-passed
+ */
+ stack = true;
+ perms.audit = request;
+ (void) fn_for_each_in_ns(label, profile,
+ aa_audit_file(subj_cred, profile, &perms, op,
+ request, auditname, NULL, target,
+ GLOBAL_ROOT_UID, stack_msg, 0));
+ perms.audit = 0;
+ }
+
if (*fqname == '&') {
stack = true;
/* don't have label_parse() do stacking */
@@ -1381,6 +1435,7 @@ int aa_change_profile(const char *fqname, int flags)
*/
error = fn_for_each_in_ns(label, profile,
change_profile_perms_wrapper(op, auditname,
+ subj_cred,
profile, target, stack,
request, &perms));
if (error)
@@ -1391,7 +1446,7 @@ int aa_change_profile(const char *fqname, int flags)
check:
/* check if tracing task is allowed to trace target domain */
- error = may_change_ptraced_domain(target, &info);
+ error = may_change_ptraced_domain(subj_cred, target, &info);
if (error && !fn_for_each_in_ns(label, profile,
COMPLAIN_MODE(profile)))
goto audit;
@@ -1446,12 +1501,13 @@ check:
}
/* full transition will be built in exec path */
- error = aa_set_current_onexec(target, stack);
+ aa_set_current_onexec(target, stack);
}
audit:
error = fn_for_each_in_ns(label, profile,
- aa_audit_file(profile, &perms, op, request, auditname,
+ aa_audit_file(subj_cred,
+ profile, &perms, op, request, auditname,
NULL, new ? new : target,
GLOBAL_ROOT_UID, info, error));
@@ -1459,6 +1515,7 @@ out:
aa_put_label(new);
aa_put_label(target);
aa_put_label(label);
+ put_cred(subj_cred);
return error;
}
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 698b124e649f..c03eb7c19f16 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -44,38 +44,40 @@ static u32 map_mask_to_chr_mask(u32 mask)
static void file_audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
- kuid_t fsuid = current_fsuid();
+ struct apparmor_audit_data *ad = aad(sa);
+ kuid_t fsuid = ad->subj_cred ? ad->subj_cred->fsuid : current_fsuid();
char str[10];
- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
+ if (ad->request & AA_AUDIT_FILE_MASK) {
aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
- map_mask_to_chr_mask(aad(sa)->request));
+ map_mask_to_chr_mask(ad->request));
audit_log_format(ab, " requested_mask=\"%s\"", str);
}
- if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
+ if (ad->denied & AA_AUDIT_FILE_MASK) {
aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
- map_mask_to_chr_mask(aad(sa)->denied));
+ map_mask_to_chr_mask(ad->denied));
audit_log_format(ab, " denied_mask=\"%s\"", str);
}
- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
+ if (ad->request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " fsuid=%d",
from_kuid(&init_user_ns, fsuid));
audit_log_format(ab, " ouid=%d",
- from_kuid(&init_user_ns, aad(sa)->fs.ouid));
+ from_kuid(&init_user_ns, ad->fs.ouid));
}
- if (aad(sa)->peer) {
+ if (ad->peer) {
audit_log_format(ab, " target=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAG_VIEW_SUBNS, GFP_KERNEL);
- } else if (aad(sa)->fs.target) {
+ } else if (ad->fs.target) {
audit_log_format(ab, " target=");
- audit_log_untrustedstring(ab, aad(sa)->fs.target);
+ audit_log_untrustedstring(ab, ad->fs.target);
}
}
/**
* aa_audit_file - handle the auditing of file operations
+ * @subj_cred: cred of the subject
* @profile: the profile being enforced (NOT NULL)
* @perms: the permissions computed for the request (NOT NULL)
* @op: operation being mediated
@@ -89,59 +91,74 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: %0 or error on failure
*/
-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
+int aa_audit_file(const struct cred *subj_cred,
+ struct aa_profile *profile, struct aa_perms *perms,
const char *op, u32 request, const char *name,
const char *target, struct aa_label *tlabel,
kuid_t ouid, const char *info, int error)
{
int type = AUDIT_APPARMOR_AUTO;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
-
- sa.u.tsk = NULL;
- aad(&sa)->request = request;
- aad(&sa)->name = name;
- aad(&sa)->fs.target = target;
- aad(&sa)->peer = tlabel;
- aad(&sa)->fs.ouid = ouid;
- aad(&sa)->info = info;
- aad(&sa)->error = error;
- sa.u.tsk = NULL;
-
- if (likely(!aad(&sa)->error)) {
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
+
+ ad.subj_cred = subj_cred;
+ ad.request = request;
+ ad.name = name;
+ ad.fs.target = target;
+ ad.peer = tlabel;
+ ad.fs.ouid = ouid;
+ ad.info = info;
+ ad.error = error;
+ ad.common.u.tsk = NULL;
+
+ if (likely(!ad.error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
- aad(&sa)->request &= mask;
+ ad.request &= mask;
- if (likely(!aad(&sa)->request))
+ if (likely(!ad.request))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
- aad(&sa)->request = aad(&sa)->request & ~perms->allow;
- AA_BUG(!aad(&sa)->request);
+ ad.request = ad.request & ~perms->allow;
+ AA_BUG(!ad.request);
- if (aad(&sa)->request & perms->kill)
+ if (ad.request & perms->kill)
type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
- if ((aad(&sa)->request & perms->quiet) &&
+ if ((ad.request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
- aad(&sa)->request &= ~perms->quiet;
+ ad.request &= ~perms->quiet;
- if (!aad(&sa)->request)
- return aad(&sa)->error;
+ if (!ad.request)
+ return ad.error;
}
- aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
- return aa_audit(type, profile, &sa, file_audit_cb);
+ ad.denied = ad.request & ~perms->allow;
+ return aa_audit(type, profile, &ad, file_audit_cb);
}
-static int path_name(const char *op, struct aa_label *label,
+/**
+ * is_deleted - test if a file has been completely unlinked
+ * @dentry: dentry of file to test for deletion (NOT NULL)
+ *
+ * Returns: true if deleted else false
+ */
+static inline bool is_deleted(struct dentry *dentry)
+{
+ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
+ return true;
+ return false;
+}
+
+static int path_name(const char *op, const struct cred *subj_cred,
+ struct aa_label *label,
const struct path *path, int flags, char *buffer,
const char **name, struct path_cond *cond, u32 request)
{
@@ -153,7 +170,8 @@ static int path_name(const char *op, struct aa_label *label,
labels_profile(label)->disconnected);
if (error) {
fn_for_each_confined(label, profile,
- aa_audit_file(profile, &nullperms, op, request, *name,
+ aa_audit_file(subj_cred,
+ profile, &nullperms, op, request, *name,
NULL, NULL, cond->uid, info, error));
return error;
}
@@ -164,7 +182,7 @@ static int path_name(const char *op, struct aa_label *label,
struct aa_perms default_perms = {};
/**
* aa_lookup_fperms - convert dfa compressed perms to internal perms
- * @dfa: dfa to lookup perms for (NOT NULL)
+ * @file_rules: the aa_policydb to lookup perms for (NOT NULL)
* @state: state in dfa
* @cond: conditions to consider (NOT NULL)
*
@@ -188,8 +206,8 @@ struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
/**
* aa_str_perms - find permission that match @name
- * @dfa: to match against (MAYBE NULL)
- * @state: state to start matching in
+ * @file_rules: the aa_policydb to match against (NOT NULL)
+ * @start: state to start matching in
* @name: string to match against dfa (NOT NULL)
* @cond: conditions to consider for permission set computation (NOT NULL)
* @perms: Returns - the permissions found when matching @name
@@ -207,9 +225,9 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
return state;
}
-static int __aa_path_perm(const char *op, struct aa_profile *profile,
- const char *name, u32 request,
- struct path_cond *cond, int flags,
+static int __aa_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_profile *profile, const char *name,
+ u32 request, struct path_cond *cond, int flags,
struct aa_perms *perms)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
@@ -218,16 +236,18 @@ static int __aa_path_perm(const char *op, struct aa_profile *profile,
if (profile_unconfined(profile))
return 0;
- aa_str_perms(&(rules->file), rules->file.start[AA_CLASS_FILE],
+ aa_str_perms(rules->file, rules->file->start[AA_CLASS_FILE],
name, cond, perms);
if (request & ~perms->allow)
e = -EACCES;
- return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
+ return aa_audit_file(subj_cred,
+ profile, perms, op, request, name, NULL, NULL,
cond->uid, NULL, e);
}
-static int profile_path_perm(const char *op, struct aa_profile *profile,
+static int profile_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_profile *profile,
const struct path *path, char *buffer, u32 request,
struct path_cond *cond, int flags,
struct aa_perms *perms)
@@ -238,18 +258,19 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
if (profile_unconfined(profile))
return 0;
- error = path_name(op, &profile->label, path,
+ error = path_name(op, subj_cred, &profile->label, path,
flags | profile->path_flags, buffer, &name, cond,
request);
if (error)
return error;
- return __aa_path_perm(op, profile, name, request, cond, flags,
- perms);
+ return __aa_path_perm(op, subj_cred, profile, name, request, cond,
+ flags, perms);
}
/**
* aa_path_perm - do permissions check & audit for @path
* @op: operation being checked
+ * @subj_cred: subject cred
* @label: profile being enforced (NOT NULL)
* @path: path to check permissions of (NOT NULL)
* @flags: any additional path flags beyond what the profile specifies
@@ -258,7 +279,8 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
*
* Returns: %0 else error if access denied or other error
*/
-int aa_path_perm(const char *op, struct aa_label *label,
+int aa_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label,
const struct path *path, int flags, u32 request,
struct path_cond *cond)
{
@@ -273,8 +295,8 @@ int aa_path_perm(const char *op, struct aa_label *label,
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
- profile_path_perm(op, profile, path, buffer, request,
- cond, flags, &perms));
+ profile_path_perm(op, subj_cred, profile, path, buffer,
+ request, cond, flags, &perms));
aa_put_buffer(buffer);
@@ -301,7 +323,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
return true;
}
-static int profile_path_link(struct aa_profile *profile,
+static int profile_path_link(const struct cred *subj_cred,
+ struct aa_profile *profile,
const struct path *link, char *buffer,
const struct path *target, char *buffer2,
struct path_cond *cond)
@@ -315,29 +338,31 @@ static int profile_path_link(struct aa_profile *profile,
aa_state_t state;
int error;
- error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
+ error = path_name(OP_LINK, subj_cred, &profile->label, link,
+ profile->path_flags,
buffer, &lname, cond, AA_MAY_LINK);
if (error)
goto audit;
/* buffer2 freed below, tname is pointer in buffer2 */
- error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
+ error = path_name(OP_LINK, subj_cred, &profile->label, target,
+ profile->path_flags,
buffer2, &tname, cond, AA_MAY_LINK);
if (error)
goto audit;
error = -EACCES;
/* aa_str_perms - handles the case of the dfa being NULL */
- state = aa_str_perms(&(rules->file),
- rules->file.start[AA_CLASS_FILE], lname,
+ state = aa_str_perms(rules->file,
+ rules->file->start[AA_CLASS_FILE], lname,
cond, &lperms);
if (!(lperms.allow & AA_MAY_LINK))
goto audit;
/* test to see if target can be paired with link */
- state = aa_dfa_null_transition(rules->file.dfa, state);
- aa_str_perms(&(rules->file), state, tname, cond, &perms);
+ state = aa_dfa_null_transition(rules->file->dfa, state);
+ aa_str_perms(rules->file, state, tname, cond, &perms);
/* force audit/quiet masks for link are stored in the second entry
* in the link pair.
@@ -359,7 +384,7 @@ static int profile_path_link(struct aa_profile *profile,
/* Do link perm subset test requiring allowed permission on link are
* a subset of the allowed permissions on target.
*/
- aa_str_perms(&(rules->file), rules->file.start[AA_CLASS_FILE],
+ aa_str_perms(rules->file, rules->file->start[AA_CLASS_FILE],
tname, cond, &perms);
/* AA_MAY_LINK is not considered in the subset test */
@@ -381,12 +406,14 @@ done_tests:
error = 0;
audit:
- return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
+ return aa_audit_file(subj_cred,
+ profile, &lperms, OP_LINK, request, lname, tname,
NULL, cond->uid, info, error);
}
/**
* aa_path_link - Handle hard link permission check
+ * @subj_cred: subject cred
* @label: the label being enforced (NOT NULL)
* @old_dentry: the target dentry (NOT NULL)
* @new_dir: directory the new link will be created in (NOT NULL)
@@ -403,7 +430,8 @@ audit:
*
* Returns: %0 if allowed else error
*/
-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+int aa_path_link(const struct cred *subj_cred,
+ struct aa_label *label, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry)
{
struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
@@ -424,8 +452,8 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
goto out;
error = fn_for_each_confined(label, profile,
- profile_path_link(profile, &link, buffer, &target,
- buffer2, &cond));
+ profile_path_link(subj_cred, profile, &link, buffer,
+ &target, buffer2, &cond));
out:
aa_put_buffer(buffer);
aa_put_buffer(buffer2);
@@ -453,7 +481,8 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
spin_unlock(&fctx->lock);
}
-static int __file_path_perm(const char *op, struct aa_label *label,
+static int __file_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied, bool in_atomic)
{
@@ -480,7 +509,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
/* check every profile in task label not in current cache */
error = fn_for_each_not_in_set(flabel, label, profile,
- profile_path_perm(op, profile, &file->f_path, buffer,
+ profile_path_perm(op, subj_cred, profile,
+ &file->f_path, buffer,
request, &cond, flags, &perms));
if (denied && !error) {
/*
@@ -493,12 +523,14 @@ static int __file_path_perm(const char *op, struct aa_label *label,
*/
if (label == flabel)
error = fn_for_each(label, profile,
- profile_path_perm(op, profile, &file->f_path,
+ profile_path_perm(op, subj_cred,
+ profile, &file->f_path,
buffer, request, &cond, flags,
&perms));
else
error = fn_for_each_not_in_set(label, flabel, profile,
- profile_path_perm(op, profile, &file->f_path,
+ profile_path_perm(op, subj_cred,
+ profile, &file->f_path,
buffer, request, &cond, flags,
&perms));
}
@@ -510,7 +542,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return error;
}
-static int __file_sock_perm(const char *op, struct aa_label *label,
+static int __file_sock_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied)
{
@@ -524,11 +557,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
return 0;
/* TODO: improve to skip profiles cached in flabel */
- error = aa_sock_file_perm(label, op, request, sock);
+ error = aa_sock_file_perm(subj_cred, label, op, request, sock);
if (denied) {
/* TODO: improve to skip profiles checked above */
/* check every profile in file label to is cached */
- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
+ last_error(error, aa_sock_file_perm(subj_cred, flabel, op,
+ request, sock));
}
if (!error)
update_file_ctx(file_ctx(file), label, request);
@@ -539,6 +573,7 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
+ * @subj_cred: subject cred
* @label: label being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
@@ -546,7 +581,8 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
*
* Returns: %0 if access allowed else error
*/
-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+int aa_file_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label, struct file *file,
u32 request, bool in_atomic)
{
struct aa_file_ctx *fctx;
@@ -582,19 +618,19 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
/* TODO: label cross check */
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
- error = __file_path_perm(op, label, flabel, file, request,
- denied, in_atomic);
+ error = __file_path_perm(op, subj_cred, label, flabel, file,
+ request, denied, in_atomic);
else if (S_ISSOCK(file_inode(file)->i_mode))
- error = __file_sock_perm(op, label, flabel, file, request,
- denied);
+ error = __file_sock_perm(op, subj_cred, label, flabel, file,
+ request, denied);
aa_put_label(flabel);
done:
return error;
}
-static void revalidate_tty(struct aa_label *label)
+static void revalidate_tty(const struct cred *subj_cred, struct aa_label *label)
{
struct tty_struct *tty;
int drop_tty = 0;
@@ -612,8 +648,8 @@ static void revalidate_tty(struct aa_label *label)
struct tty_file_private, list);
file = file_priv->file;
- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
- IN_ATOMIC))
+ if (aa_file_perm(OP_INHERIT, subj_cred, label, file,
+ MAY_READ | MAY_WRITE, IN_ATOMIC))
drop_tty = 1;
}
spin_unlock(&tty->files_lock);
@@ -623,12 +659,17 @@ static void revalidate_tty(struct aa_label *label)
no_tty();
}
+struct cred_label {
+ const struct cred *cred;
+ struct aa_label *label;
+};
+
static int match_file(const void *p, struct file *file, unsigned int fd)
{
- struct aa_label *label = (struct aa_label *)p;
+ struct cred_label *cl = (struct cred_label *)p;
- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
- IN_ATOMIC))
+ if (aa_file_perm(OP_INHERIT, cl->cred, cl->label, file,
+ aa_map_file_to_perms(file), IN_ATOMIC))
return fd + 1;
return 0;
}
@@ -638,13 +679,17 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
void aa_inherit_files(const struct cred *cred, struct files_struct *files)
{
struct aa_label *label = aa_get_newest_cred_label(cred);
+ struct cred_label cl = {
+ .cred = cred,
+ .label = label,
+ };
struct file *devnull = NULL;
unsigned int n;
- revalidate_tty(label);
+ revalidate_tty(cred, label);
/* Revalidate access to inherited open files. */
- n = iterate_fd(files, 0, match_file, label);
+ n = iterate_fd(files, 0, match_file, &cl);
if (!n) /* none found? */
goto out;
@@ -654,7 +699,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
/* replace all the matching ones with this */
do {
replace_fd(n - 1, devnull, 0);
- } while ((n = iterate_fd(files, n, match_file, label)) != 0);
+ } while ((n = iterate_fd(files, n, match_file, &cl)) != 0);
if (devnull)
fput(devnull);
out:
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 8a81557c9d59..f83934913b0f 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -30,9 +30,10 @@
#define AA_CLASS_NET 14
#define AA_CLASS_LABEL 16
#define AA_CLASS_POSIX_MQUEUE 17
-#define AA_CLASS_IO_URING 18
#define AA_CLASS_MODULE 19
#define AA_CLASS_DISPLAY_LSM 20
+#define AA_CLASS_NS 21
+#define AA_CLASS_IO_URING 22
#define AA_CLASS_X 31
#define AA_CLASS_DBUS 32
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index c328f07f11cd..acbb03b9bd25 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -103,13 +103,18 @@ enum audit_type {
#define OP_PROF_LOAD "profile_load"
#define OP_PROF_RM "profile_remove"
+#define OP_USERNS_CREATE "userns_create"
+
+#define OP_URING_OVERRIDE "uring_override"
+#define OP_URING_SQPOLL "uring_sqpoll"
struct apparmor_audit_data {
int error;
int type;
u16 class;
const char *op;
- struct aa_label *label;
+ const struct cred *subj_cred;
+ struct aa_label *subj_label;
const char *name;
const char *info;
u32 request;
@@ -151,34 +156,39 @@ struct apparmor_audit_data {
const char *data;
unsigned long flags;
} mnt;
+ struct {
+ struct aa_label *target;
+ } uring;
};
+
+ struct common_audit_data common;
};
/* macros for dealing with apparmor_audit_data structure */
-#define aad(SA) ((SA)->apparmor_audit_data)
+#define aad(SA) (container_of(SA, struct apparmor_audit_data, common))
+#define aad_of_va(VA) aad((struct common_audit_data *)(VA))
+
#define DEFINE_AUDIT_DATA(NAME, T, C, X) \
/* TODO: cleanup audit init so we don't need _aad = {0,} */ \
- struct apparmor_audit_data NAME ## _aad = { \
+ struct apparmor_audit_data NAME = { \
.class = (C), \
.op = (X), \
- }; \
- struct common_audit_data NAME = \
- { \
- .type = (T), \
- .u.tsk = NULL, \
- }; \
- NAME.apparmor_audit_data = &(NAME ## _aad)
-
-void aa_audit_msg(int type, struct common_audit_data *sa,
+ .common.type = (T), \
+ .common.u.tsk = NULL, \
+ .common.apparmor_audit_data = &NAME, \
+ };
+
+void aa_audit_msg(int type, struct apparmor_audit_data *ad,
void (*cb) (struct audit_buffer *, void *));
-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+int aa_audit(int type, struct aa_profile *profile,
+ struct apparmor_audit_data *ad,
void (*cb) (struct audit_buffer *, void *));
-#define aa_audit_error(ERROR, SA, CB) \
+#define aa_audit_error(ERROR, AD, CB) \
({ \
- aad((SA))->error = (ERROR); \
- aa_audit_msg(AUDIT_APPARMOR_ERROR, (SA), (CB)); \
- aad((SA))->error; \
+ (AD)->error = (ERROR); \
+ aa_audit_msg(AUDIT_APPARMOR_ERROR, (AD), (CB)); \
+ (AD)->error; \
})
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index d420e2d10b31..d6dcc604ec0c 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -36,7 +36,8 @@ struct aa_caps {
extern struct aa_sfs_entry aa_sfs_entry_caps[];
-int aa_capable(struct aa_label *label, int cap, unsigned int opts);
+int aa_capable(const struct cred *subj_cred, struct aa_label *label,
+ int cap, unsigned int opts);
static inline void aa_free_cap_rules(struct aa_caps *caps)
{
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 5be620af33ba..6e8f2aa66cd6 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -45,43 +45,6 @@ struct aa_file_ctx {
u32 allow;
};
-/**
- * aa_alloc_file_ctx - allocate file_ctx
- * @label: initial label of task creating the file
- * @gfp: gfp flags for allocation
- *
- * Returns: file_ctx or NULL on failure
- */
-static inline struct aa_file_ctx *aa_alloc_file_ctx(struct aa_label *label,
- gfp_t gfp)
-{
- struct aa_file_ctx *ctx;
-
- ctx = kzalloc(sizeof(struct aa_file_ctx), gfp);
- if (ctx) {
- spin_lock_init(&ctx->lock);
- rcu_assign_pointer(ctx->label, aa_get_label(label));
- }
- return ctx;
-}
-
-/**
- * aa_free_file_ctx - free a file_ctx
- * @ctx: file_ctx to free (MAYBE_NULL)
- */
-static inline void aa_free_file_ctx(struct aa_file_ctx *ctx)
-{
- if (ctx) {
- aa_put_label(rcu_access_pointer(ctx->label));
- kfree_sensitive(ctx);
- }
-}
-
-static inline struct aa_label *aa_get_file_label(struct aa_file_ctx *ctx)
-{
- return aa_get_label_rcu(&ctx->label);
-}
-
/*
* The xindex is broken into 3 parts
* - index - an index into either the exec name table or the variable table
@@ -108,7 +71,8 @@ struct path_cond {
#define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
+int aa_audit_file(const struct cred *cred,
+ struct aa_profile *profile, struct aa_perms *perms,
const char *op, u32 request, const char *name,
const char *target, struct aa_label *tlabel, kuid_t ouid,
const char *info, int error);
@@ -119,14 +83,16 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
const char *name, struct path_cond *cond,
struct aa_perms *perms);
-int aa_path_perm(const char *op, struct aa_label *label,
- const struct path *path, int flags, u32 request,
- struct path_cond *cond);
+int aa_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
+ int flags, u32 request, struct path_cond *cond);
-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
- const struct path *new_dir, struct dentry *new_dentry);
+int aa_path_link(const struct cred *subj_cred, struct aa_label *label,
+ struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry);
-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+int aa_file_perm(const char *op, const struct cred *subj_cred,
+ struct aa_label *label, struct file *file,
u32 request, bool in_atomic);
void aa_inherit_files(const struct cred *cred, struct files_struct *files);
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index a1ac6ffb95e9..74d17052f76b 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -13,6 +13,8 @@
#include <linux/sched.h>
-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
+int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
+ const struct cred *target_cred, struct aa_label *target,
+ int sig);
#endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 73c8a32c6861..d7a894b1031f 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -16,6 +16,8 @@
#include "match.h"
+extern struct aa_dfa *stacksplitdfa;
+
/*
* DEBUG remains global (no per profile flag) since it is mostly used in sysctl
* which is not related to profile accesses.
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 58fbf67139b9..4bb0405c9190 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -102,9 +102,6 @@ struct aa_dfa {
struct table_header *tables[YYTD_ID_TSIZE];
};
-extern struct aa_dfa *nulldfa;
-extern struct aa_dfa *stacksplitdfa;
-
#define byte_to_byte(X) (X)
#define UNPACK_ARRAY(TABLE, BLOB, LEN, TTYPE, BTYPE, NTOHX) \
@@ -122,9 +119,6 @@ static inline size_t table_size(size_t len, size_t el_size)
return ALIGN(sizeof(struct table_header) + len * el_size, 8);
}
-int aa_setup_dfa_engine(void);
-void aa_teardown_dfa_engine(void);
-
#define aa_state_t unsigned int
struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags);
diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
index a710683b2496..46834f828179 100644
--- a/security/apparmor/include/mount.h
+++ b/security/apparmor/include/mount.h
@@ -25,26 +25,36 @@
#define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
-int aa_remount(struct aa_label *label, const struct path *path,
+int aa_remount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
unsigned long flags, void *data);
-int aa_bind_mount(struct aa_label *label, const struct path *path,
+int aa_bind_mount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
const char *old_name, unsigned long flags);
-int aa_mount_change_type(struct aa_label *label, const struct path *path,
+int aa_mount_change_type(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
unsigned long flags);
-int aa_move_mount(struct aa_label *label, const struct path *path,
- const char *old_name);
+int aa_move_mount_old(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
+ const char *old_name);
+int aa_move_mount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *from_path,
+ const struct path *to_path);
-int aa_new_mount(struct aa_label *label, const char *dev_name,
+int aa_new_mount(const struct cred *subj_cred,
+ struct aa_label *label, const char *dev_name,
const struct path *path, const char *type, unsigned long flags,
void *data);
-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
+int aa_umount(const struct cred *subj_cred,
+ struct aa_label *label, struct vfsmount *mnt, int flags);
-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+int aa_pivotroot(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *old_path,
const struct path *new_path);
#endif /* __AA_MOUNT_H */
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
index 6fa440b5daed..67bf888c3bd6 100644
--- a/security/apparmor/include/net.h
+++ b/security/apparmor/include/net.h
@@ -52,7 +52,11 @@ struct aa_sk_ctx {
};
#define SK_CTX(X) ((X)->sk_security)
-#define SOCK_ctx(X) SOCK_INODE(X)->i_security
+static inline struct aa_sk_ctx *aa_sock(const struct sock *sk)
+{
+ return sk->sk_security;
+}
+
#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
struct lsm_network_audit NAME ## _net = { .sk = (SK), \
.family = (F)}; \
@@ -61,9 +65,9 @@ struct aa_sk_ctx {
LSM_AUDIT_DATA_NONE, \
AA_CLASS_NET, \
OP); \
- NAME.u.net = &(NAME ## _net); \
- aad(&NAME)->net.type = (T); \
- aad(&NAME)->net.protocol = (P)
+ NAME.common.u.net = &(NAME ## _net); \
+ NAME.net.type = (T); \
+ NAME.net.protocol = (P)
#define DEFINE_AUDIT_SK(NAME, OP, SK) \
DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
@@ -90,21 +94,24 @@ struct aa_secmark {
extern struct aa_sfs_entry aa_sfs_entry_network[];
void audit_net_cb(struct audit_buffer *ab, void *va);
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+int aa_profile_af_perm(struct aa_profile *profile,
+ struct apparmor_audit_data *ad,
u32 request, u16 family, int type);
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request, u16 family,
int type, int protocol);
static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
- struct common_audit_data *sa,
+ struct apparmor_audit_data *ad,
u32 request,
struct sock *sk)
{
- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
+ return aa_profile_af_perm(profile, ad, request, sk->sk_family,
sk->sk_type);
}
int aa_sk_perm(const char *op, u32 request, struct sock *sk);
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request,
struct socket *sock);
int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 797a7a00644d..0f7e913c3fc2 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -48,6 +48,9 @@
#define AA_LINK_SUBSET AA_MAY_LOCK /* overlaid */
+#define AA_MAY_CREATE_SQPOLL AA_MAY_CREATE
+#define AA_MAY_OVERRIDE_CRED AA_MAY_APPEND
+#define AA_URING_PERM_MASK (AA_MAY_OVERRIDE_CRED | AA_MAY_CREATE_SQPOLL)
#define PERMS_CHRS_MASK (MAY_READ | MAY_WRITE | AA_MAY_CREATE | \
AA_MAY_DELETE | AA_MAY_LINK | AA_MAY_LOCK | \
@@ -212,8 +215,8 @@ void aa_profile_match_label(struct aa_profile *profile,
int type, u32 request, struct aa_perms *perms);
int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
u32 request, int type, u32 *deny,
- struct common_audit_data *sa);
+ struct apparmor_audit_data *ad);
int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
- u32 request, struct common_audit_data *sa,
+ u32 request, struct apparmor_audit_data *ad,
void (*cb)(struct audit_buffer *, void *));
#endif /* __AA_PERM_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 545f791cabda..75088cc310b6 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -34,6 +34,7 @@
struct aa_ns;
extern int unprivileged_userns_apparmor_policy;
+extern int aa_unprivileged_unconfined_restricted;
extern const char *const aa_profile_mode_names[];
#define APPARMOR_MODE_NAMES_MAX_INDEX 4
@@ -74,12 +75,14 @@ enum profile_mode {
/* struct aa_policydb - match engine for a policy
+ * count: refcount for the pdb
* dfa: dfa pattern match
* perms: table of permissions
* strs: table of strings, index by x
* start: set of start states for the different classes of data
*/
struct aa_policydb {
+ struct kref count;
struct aa_dfa *dfa;
struct {
struct aa_perms *perms;
@@ -89,13 +92,36 @@ struct aa_policydb {
aa_state_t start[AA_CLASS_LAST + 1];
};
-static inline void aa_destroy_policydb(struct aa_policydb *policy)
+extern struct aa_policydb *nullpdb;
+
+struct aa_policydb *aa_alloc_pdb(gfp_t gfp);
+void aa_pdb_free_kref(struct kref *kref);
+
+/**
+ * aa_get_pdb - increment refcount on @pdb
+ * @pdb: policydb (MAYBE NULL)
+ *
+ * Returns: pointer to @pdb if @pdb is NULL will return NULL
+ * Requires: @pdb must be held with valid refcount when called
+ */
+static inline struct aa_policydb *aa_get_pdb(struct aa_policydb *pdb)
{
- aa_put_dfa(policy->dfa);
- if (policy->perms)
- kvfree(policy->perms);
- aa_free_str_table(&policy->trans);
+ if (pdb)
+ kref_get(&(pdb->count));
+
+ return pdb;
+}
+/**
+ * aa_put_pdb - put a pdb refcount
+ * @pdb: pdb to put refcount (MAYBE NULL)
+ *
+ * Requires: if @pdb != NULL that a valid refcount be held
+ */
+static inline void aa_put_pdb(struct aa_policydb *pdb)
+{
+ if (pdb)
+ kref_put(&pdb->count, aa_pdb_free_kref);
}
static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy,
@@ -139,8 +165,8 @@ struct aa_ruleset {
int size;
/* TODO: merge policy and file */
- struct aa_policydb policy;
- struct aa_policydb file;
+ struct aa_policydb *policy;
+ struct aa_policydb *file;
struct aa_caps caps;
struct aa_rlimit rlimits;
@@ -159,7 +185,7 @@ struct aa_ruleset {
*/
struct aa_attachment {
const char *xmatch_str;
- struct aa_policydb xmatch;
+ struct aa_policydb *xmatch;
unsigned int xmatch_len;
int xattr_count;
char **xattrs;
@@ -227,10 +253,6 @@ extern enum profile_mode aa_g_profile_mode;
#define profiles_ns(P) ((P)->ns)
#define name_is_shared(A, B) ((A)->hname && (A)->hname == (B)->hname)
-void aa_add_profile(struct aa_policy *common, struct aa_profile *profile);
-
-
-void aa_free_proxy_kref(struct kref *kref);
struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp);
struct aa_profile *aa_alloc_profile(const char *name, struct aa_proxy *proxy,
gfp_t gfp);
@@ -239,14 +261,12 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
struct aa_profile *aa_new_learning_profile(struct aa_profile *parent, bool hat,
const char *base, gfp_t gfp);
void aa_free_profile(struct aa_profile *profile);
-void aa_free_profile_kref(struct kref *kref);
struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname,
size_t n);
struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *name);
struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
const char *fqname, size_t n);
-struct aa_profile *aa_match_profile(struct aa_ns *ns, const char *name);
ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_label *label,
u32 mask, struct aa_loaddata *udata);
@@ -254,9 +274,6 @@ ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_label *label,
char *name, size_t size);
void __aa_profile_list_release(struct list_head *head);
-#define PROF_ADD 1
-#define PROF_REPLACE 0
-
#define profile_unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
/**
@@ -276,10 +293,10 @@ static inline aa_state_t RULE_MEDIATES(struct aa_ruleset *rules,
unsigned char class)
{
if (class <= AA_CLASS_LAST)
- return rules->policy.start[class];
+ return rules->policy->start[class];
else
- return aa_dfa_match_len(rules->policy.dfa,
- rules->policy.start[0], &class, 1);
+ return aa_dfa_match_len(rules->policy->dfa,
+ rules->policy->start[0], &class, 1);
}
static inline aa_state_t RULE_MEDIATES_AF(struct aa_ruleset *rules, u16 AF)
@@ -289,7 +306,7 @@ static inline aa_state_t RULE_MEDIATES_AF(struct aa_ruleset *rules, u16 AF)
if (!state)
return DFA_NOMATCH;
- return aa_dfa_match_len(rules->policy.dfa, state, (char *) &be_af, 2);
+ return aa_dfa_match_len(rules->policy->dfa, state, (char *) &be_af, 2);
}
static inline aa_state_t ANY_RULE_MEDIATES(struct list_head *head,
@@ -370,9 +387,12 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
return profile->audit;
}
-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
+bool aa_policy_view_capable(const struct cred *subj_cred,
+ struct aa_label *label, struct aa_ns *ns);
+bool aa_policy_admin_capable(const struct cred *subj_cred,
+ struct aa_label *label, struct aa_ns *ns);
+int aa_may_manage_policy(const struct cred *subj_cred,
+ struct aa_label *label, struct aa_ns *ns,
u32 mask);
bool aa_current_policy_view_capable(struct aa_ns *ns);
bool aa_current_policy_admin_capable(struct aa_ns *ns);
diff --git a/security/apparmor/include/policy_ns.h b/security/apparmor/include/policy_ns.h
index 33d665516fc1..d646070fd966 100644
--- a/security/apparmor/include/policy_ns.h
+++ b/security/apparmor/include/policy_ns.h
@@ -86,10 +86,7 @@ const char *aa_ns_name(struct aa_ns *parent, struct aa_ns *child, bool subns);
void aa_free_ns(struct aa_ns *ns);
int aa_alloc_root_ns(void);
void aa_free_root_ns(void);
-void aa_free_ns_kref(struct kref *kref);
-struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name);
-struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n);
struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n);
struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n);
struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
@@ -151,15 +148,4 @@ static inline struct aa_ns *__aa_find_ns(struct list_head *head,
return __aa_findn_ns(head, name, strlen(name));
}
-static inline struct aa_ns *__aa_lookup_ns(struct aa_ns *base,
- const char *hname)
-{
- return __aa_lookupn_ns(base, hname, strlen(hname));
-}
-
-static inline struct aa_ns *aa_lookup_ns(struct aa_ns *view, const char *name)
-{
- return aa_lookupn_ns(view, name, strlen(name));
-}
-
#endif /* AA_NAMESPACE_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 961d85d328ea..ad2c0da8e64f 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,7 +33,8 @@ struct aa_rlimit {
extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
+ struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim);
void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
index 13437d62c70f..b1aaaf60fa8b 100644
--- a/security/apparmor/include/task.h
+++ b/security/apparmor/include/task.h
@@ -30,7 +30,7 @@ struct aa_task_ctx {
};
int aa_replace_current_label(struct aa_label *label);
-int aa_set_current_onexec(struct aa_label *label, bool stack);
+void aa_set_current_onexec(struct aa_label *label, bool stack);
int aa_set_current_hat(struct aa_label *label, u64 token);
int aa_restore_previous_label(u64 cookie);
struct aa_label *aa_get_task_label(struct task_struct *task);
@@ -91,8 +91,15 @@ static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
"segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
"xcpu xfsz vtalrm prof winch io pwr sys emt lost"
-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
+ const struct cred *tracee_cred, struct aa_label *tracee,
u32 request);
+
+#define AA_USERNS_CREATE 8
+
+int aa_profile_ns_perm(struct aa_profile *profile,
+ struct apparmor_audit_data *ad, u32 request);
+
#endif /* __AA_TASK_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 5acde746775f..0cdf4340b02d 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -52,31 +52,33 @@ static const char *audit_signal_mask(u32 mask)
static void audit_signal_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
+ if (ad->request & AA_SIGNAL_PERM_MASK) {
audit_log_format(ab, " requested_mask=\"%s\"",
- audit_signal_mask(aad(sa)->request));
- if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
+ audit_signal_mask(ad->request));
+ if (ad->denied & AA_SIGNAL_PERM_MASK) {
audit_log_format(ab, " denied_mask=\"%s\"",
- audit_signal_mask(aad(sa)->denied));
+ audit_signal_mask(ad->denied));
}
}
- if (aad(sa)->signal == SIGUNKNOWN)
+ if (ad->signal == SIGUNKNOWN)
audit_log_format(ab, "signal=unknown(%d)",
- aad(sa)->unmappedsig);
- else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
- audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
+ ad->unmappedsig);
+ else if (ad->signal < MAXMAPPED_SIGNAME)
+ audit_log_format(ab, " signal=%s", sig_names[ad->signal]);
else
audit_log_format(ab, " signal=rtmin+%d",
- aad(sa)->signal - SIGRT_BASE);
+ ad->signal - SIGRT_BASE);
audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAGS_NONE, GFP_ATOMIC);
}
-static int profile_signal_perm(struct aa_profile *profile,
+static int profile_signal_perm(const struct cred *cred,
+ struct aa_profile *profile,
struct aa_label *peer, u32 request,
- struct common_audit_data *sa)
+ struct apparmor_audit_data *ad)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
@@ -87,24 +89,29 @@ static int profile_signal_perm(struct aa_profile *profile,
!ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
return 0;
- aad(sa)->peer = peer;
+ ad->subj_cred = cred;
+ ad->peer = peer;
/* TODO: secondary cache check <profile, profile, perm> */
- state = aa_dfa_next(rules->policy.dfa,
- rules->policy.start[AA_CLASS_SIGNAL],
- aad(sa)->signal);
+ state = aa_dfa_next(rules->policy->dfa,
+ rules->policy->start[AA_CLASS_SIGNAL],
+ ad->signal);
aa_label_match(profile, rules, peer, state, false, request, &perms);
aa_apply_modes_to_perms(profile, &perms);
- return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
+ return aa_check_perms(profile, &perms, request, ad, audit_signal_cb);
}
-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
+int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
+ const struct cred *target_cred, struct aa_label *target,
+ int sig)
{
struct aa_profile *profile;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
- aad(&sa)->signal = map_signal_num(sig);
- aad(&sa)->unmappedsig = sig;
+ ad.signal = map_signal_num(sig);
+ ad.unmappedsig = sig;
return xcheck_labels(sender, target, profile,
- profile_signal_perm(profile, target, MAY_WRITE, &sa),
- profile_signal_perm(profile, sender, MAY_READ, &sa));
+ profile_signal_perm(subj_cred, profile, target,
+ MAY_WRITE, &ad),
+ profile_signal_perm(target_cred, profile, sender,
+ MAY_READ, &ad));
}
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 8a2af96f4da5..c71e4615dd46 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -154,13 +154,14 @@ static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
/**
* vec_cmp - label comparison for set ordering
- * @a: label to compare (NOT NULL)
- * @vec: vector of profiles to compare (NOT NULL)
- * @n: length of @vec
- *
- * Returns: <0 if a < vec
- * ==0 if a == vec
- * >0 if a > vec
+ * @a: aa_profile to compare (NOT NULL)
+ * @an: length of @a
+ * @b: aa_profile to compare (NOT NULL)
+ * @bn: length of @b
+ *
+ * Returns: <0 if @a < @b
+ * ==0 if @a == @b
+ * >0 if @a > @b
*/
static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
{
@@ -256,6 +257,7 @@ static inline int unique(struct aa_profile **vec, int n)
* aa_vec_unique - canonical sort and unique a list of profiles
* @n: number of refcounted profiles in the list (@n > 0)
* @vec: list of profiles to sort and merge
+ * @flags: null terminator flags of @vec
*
* Returns: the number of duplicates eliminated == references put
*
@@ -584,7 +586,7 @@ bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub)
/**
* __label_remove - remove @label from the label set
- * @l: label to remove
+ * @label: label to remove
* @new: label to redirect to
*
* Requires: labels_set(@label)->lock write_lock
@@ -917,8 +919,8 @@ struct aa_label *aa_label_find(struct aa_label *label)
/**
* aa_label_insert - insert label @label into @ls or return existing label
- * @ls - labelset to insert @label into
- * @label - label to insert
+ * @ls: labelset to insert @label into
+ * @label: label to insert
*
* Requires: caller to hold a valid ref on @label
*
@@ -1204,7 +1206,6 @@ struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b)
/**
* aa_label_merge - attempt to insert new merged label of @a and @b
- * @ls: set of labels to insert label into (NOT NULL)
* @a: label to merge with @b (NOT NULL)
* @b: label to merge with @a (NOT NULL)
* @gfp: memory allocation type
@@ -1269,21 +1270,22 @@ static inline aa_state_t match_component(struct aa_profile *profile,
const char *ns_name;
if (profile->ns == tp->ns)
- return aa_dfa_match(rules->policy.dfa, state, tp->base.hname);
+ return aa_dfa_match(rules->policy->dfa, state, tp->base.hname);
/* try matching with namespace name and then profile */
ns_name = aa_ns_name(profile->ns, tp->ns, true);
- state = aa_dfa_match_len(rules->policy.dfa, state, ":", 1);
- state = aa_dfa_match(rules->policy.dfa, state, ns_name);
- state = aa_dfa_match_len(rules->policy.dfa, state, ":", 1);
- return aa_dfa_match(rules->policy.dfa, state, tp->base.hname);
+ state = aa_dfa_match_len(rules->policy->dfa, state, ":", 1);
+ state = aa_dfa_match(rules->policy->dfa, state, ns_name);
+ state = aa_dfa_match_len(rules->policy->dfa, state, ":", 1);
+ return aa_dfa_match(rules->policy->dfa, state, tp->base.hname);
}
/**
* label_compound_match - find perms for full compound label
* @profile: profile to find perms for
+ * @rules: ruleset to search
* @label: label to check access permissions for
- * @start: state to start match in
+ * @state: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: perms struct to set
@@ -1321,12 +1323,12 @@ next:
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
- state = aa_dfa_match(rules->policy.dfa, state, "//&");
+ state = aa_dfa_match(rules->policy->dfa, state, "//&");
state = match_component(profile, rules, tp, state);
if (!state)
goto fail;
}
- *perms = *aa_lookup_perms(&rules->policy, state);
+ *perms = *aa_lookup_perms(rules->policy, state);
aa_apply_modes_to_perms(profile, perms);
if ((perms->allow & request) != request)
return -EACCES;
@@ -1379,7 +1381,7 @@ static int label_components_match(struct aa_profile *profile,
return 0;
next:
- tmp = *aa_lookup_perms(&rules->policy, state);
+ tmp = *aa_lookup_perms(rules->policy, state);
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
label_for_each_cont(i, label, tp) {
@@ -1388,7 +1390,7 @@ next:
state = match_component(profile, rules, tp, start);
if (!state)
goto fail;
- tmp = *aa_lookup_perms(&rules->policy, state);
+ tmp = *aa_lookup_perms(rules->policy, state);
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
}
@@ -2037,7 +2039,7 @@ out:
/**
* __label_update - insert updated version of @label into labelset
- * @label - the label to update/replace
+ * @label: the label to update/replace
*
* Returns: new label that is up to date
* else NULL on failure
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index a630c951bb3b..4c198d273f09 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -27,7 +27,7 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
/**
* aa_free_str_table - free entries str table
- * @str: the string table to free (MAYBE NULL)
+ * @t: the string table to free (MAYBE NULL)
*/
void aa_free_str_table(struct aa_str_table *t)
{
@@ -85,6 +85,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
/**
* skipn_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
+ * @n: length of str to parse, will stop at \0 if encountered before n
*
* Returns a pointer to the first non-whitespace character in @str.
* if all whitespace will return NULL
@@ -143,10 +144,10 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
void aa_info_message(const char *str)
{
if (audit_enabled) {
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
- aad(&sa)->info = str;
- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
+ ad.info = str;
+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, NULL);
}
printk(KERN_INFO "AppArmor: %s\n", str);
}
@@ -281,21 +282,22 @@ void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->request) {
+ if (ad->request) {
audit_log_format(ab, " requested_mask=");
- aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
+ aa_audit_perm_mask(ab, ad->request, aa_file_perm_chrs,
PERMS_CHRS_MASK, aa_file_perm_names,
PERMS_NAMES_MASK);
}
- if (aad(sa)->denied) {
+ if (ad->denied) {
audit_log_format(ab, "denied_mask=");
- aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
+ aa_audit_perm_mask(ab, ad->denied, aa_file_perm_chrs,
PERMS_CHRS_MASK, aa_file_perm_names,
PERMS_NAMES_MASK);
}
audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAGS_NONE, GFP_ATOMIC);
}
@@ -339,8 +341,8 @@ void aa_profile_match_label(struct aa_profile *profile,
/* TODO: doesn't yet handle extended types */
aa_state_t state;
- state = aa_dfa_next(rules->policy.dfa,
- rules->policy.start[AA_CLASS_LABEL],
+ state = aa_dfa_next(rules->policy->dfa,
+ rules->policy->start[AA_CLASS_LABEL],
type);
aa_label_match(profile, rules, label, state, false, request, perms);
}
@@ -349,21 +351,20 @@ void aa_profile_match_label(struct aa_profile *profile,
/* currently unused */
int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
u32 request, int type, u32 *deny,
- struct common_audit_data *sa)
+ struct apparmor_audit_data *ad)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms;
- aad(sa)->label = &profile->label;
- aad(sa)->peer = &target->label;
- aad(sa)->request = request;
+ ad->peer = &target->label;
+ ad->request = request;
aa_profile_match_label(profile, rules, &target->label, type, request,
&perms);
aa_apply_modes_to_perms(profile, &perms);
*deny |= request & perms.deny;
- return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
+ return aa_check_perms(profile, &perms, request, ad, aa_audit_perms_cb);
}
/**
@@ -371,8 +372,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
* @profile: profile being checked
* @perms: perms computed for the request
* @request: requested perms
- * @deny: Returns: explicit deny set
- * @sa: initialized audit structure (MAY BE NULL if not auditing)
+ * @ad: initialized audit structure (MAY BE NULL if not auditing)
* @cb: callback fn for type specific fields (MAY BE NULL)
*
* Returns: 0 if permission else error code
@@ -385,7 +385,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
* with a positive value.
*/
int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
- u32 request, struct common_audit_data *sa,
+ u32 request, struct apparmor_audit_data *ad,
void (*cb)(struct audit_buffer *, void *))
{
int type, error;
@@ -394,7 +394,7 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
if (likely(!denied)) {
/* mask off perms that are not being force audited */
request &= perms->audit;
- if (!request || !sa)
+ if (!request || !ad)
return 0;
type = AUDIT_APPARMOR_AUDIT;
@@ -413,16 +413,16 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
error = -ENOENT;
denied &= ~perms->quiet;
- if (!sa || !denied)
+ if (!ad || !denied)
return error;
}
- if (sa) {
- aad(sa)->label = &profile->label;
- aad(sa)->request = request;
- aad(sa)->denied = denied;
- aad(sa)->error = error;
- aa_audit_msg(type, sa, cb);
+ if (ad) {
+ ad->subj_label = &profile->label;
+ ad->request = request;
+ ad->denied = denied;
+ ad->error = error;
+ aa_audit_msg(type, ad, cb);
}
if (type == AUDIT_APPARMOR_ALLOWED)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 3fa325d5efac..4981bdf02993 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -49,12 +49,19 @@ union aa_buffer {
DECLARE_FLEX_ARRAY(char, buffer);
};
+struct aa_local_cache {
+ unsigned int hold;
+ unsigned int count;
+ struct list_head head;
+};
+
#define RESERVE_COUNT 2
static int reserve_count = RESERVE_COUNT;
static int buffer_count;
static LIST_HEAD(aa_global_buffers);
static DEFINE_SPINLOCK(aa_buffers_lock);
+static DEFINE_PER_CPU(struct aa_local_cache, aa_local_buffers);
/*
* LSM hook functions
@@ -116,15 +123,17 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
struct aa_label *tracer, *tracee;
+ const struct cred *cred;
int error;
+ cred = get_task_cred(child);
+ tracee = cred_label(cred); /* ref count on cred */
tracer = __begin_current_label_crit_section();
- tracee = aa_get_task_label(child);
- error = aa_may_ptrace(tracer, tracee,
+ error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
(mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
: AA_PTRACE_TRACE);
- aa_put_label(tracee);
__end_current_label_crit_section(tracer);
+ put_cred(cred);
return error;
}
@@ -132,12 +141,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
static int apparmor_ptrace_traceme(struct task_struct *parent)
{
struct aa_label *tracer, *tracee;
+ const struct cred *cred;
int error;
tracee = __begin_current_label_crit_section();
- tracer = aa_get_task_label(parent);
- error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
- aa_put_label(tracer);
+ cred = get_task_cred(parent);
+ tracer = cred_label(cred); /* ref count on cred */
+ error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
+ AA_PTRACE_TRACE);
+ put_cred(cred);
__end_current_label_crit_section(tracee);
return error;
@@ -188,7 +200,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
label = aa_get_newest_cred_label(cred);
if (!unconfined(label))
- error = aa_capable(label, cap, opts);
+ error = aa_capable(cred, label, cap, opts);
aa_put_label(label);
return error;
@@ -211,7 +223,8 @@ static int common_perm(const char *op, const struct path *path, u32 mask,
label = __begin_current_label_crit_section();
if (!unconfined(label))
- error = aa_path_perm(op, label, path, 0, mask, cond);
+ error = aa_path_perm(op, current_cred(), label, path, 0, mask,
+ cond);
__end_current_label_crit_section(label);
return error;
@@ -357,7 +370,8 @@ static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_
label = begin_current_label_crit_section();
if (!unconfined(label))
- error = aa_path_link(label, old_dentry, new_dir, new_dentry);
+ error = aa_path_link(current_cred(), label, old_dentry, new_dir,
+ new_dentry);
end_current_label_crit_section(label);
return error;
@@ -396,23 +410,27 @@ static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_d
vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
cond_exchange.uid = vfsuid_into_kuid(vfsuid);
- error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
+ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
+ label, &new_path, 0,
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
AA_MAY_SETATTR | AA_MAY_DELETE,
&cond_exchange);
if (!error)
- error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
+ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
+ label, &old_path,
0, MAY_WRITE | AA_MAY_SETATTR |
AA_MAY_CREATE, &cond_exchange);
}
if (!error)
- error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
+ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
+ label, &old_path, 0,
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
AA_MAY_SETATTR | AA_MAY_DELETE,
&cond);
if (!error)
- error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
+ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
+ label, &new_path,
0, MAY_WRITE | AA_MAY_SETATTR |
AA_MAY_CREATE, &cond);
@@ -467,7 +485,8 @@ static int apparmor_file_open(struct file *file)
vfsuid = i_uid_into_vfsuid(idmap, inode);
cond.uid = vfsuid_into_kuid(vfsuid);
- error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
+ error = aa_path_perm(OP_OPEN, file->f_cred,
+ label, &file->f_path, 0,
aa_map_file_to_perms(file), &cond);
/* todo cache full allowed permissions set and state */
fctx->allow = aa_map_file_to_perms(file);
@@ -507,7 +526,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask,
return -EACCES;
label = __begin_current_label_crit_section();
- error = aa_file_perm(op, label, file, mask, in_atomic);
+ error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
__end_current_label_crit_section(label);
return error;
@@ -570,6 +589,114 @@ static int apparmor_file_mprotect(struct vm_area_struct *vma,
false);
}
+#ifdef CONFIG_IO_URING
+static const char *audit_uring_mask(u32 mask)
+{
+ if (mask & AA_MAY_CREATE_SQPOLL)
+ return "sqpoll";
+ if (mask & AA_MAY_OVERRIDE_CRED)
+ return "override_creds";
+ return "";
+}
+
+static void audit_uring_cb(struct audit_buffer *ab, void *va)
+{
+ struct apparmor_audit_data *ad = aad_of_va(va);
+
+ if (ad->request & AA_URING_PERM_MASK) {
+ audit_log_format(ab, " requested=\"%s\"",
+ audit_uring_mask(ad->request));
+ if (ad->denied & AA_URING_PERM_MASK) {
+ audit_log_format(ab, " denied=\"%s\"",
+ audit_uring_mask(ad->denied));
+ }
+ }
+ if (ad->uring.target) {
+ audit_log_format(ab, " tcontext=");
+ aa_label_xaudit(ab, labels_ns(ad->subj_label),
+ ad->uring.target,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+}
+
+static int profile_uring(struct aa_profile *profile, u32 request,
+ struct aa_label *new, int cap,
+ struct apparmor_audit_data *ad)
+{
+ unsigned int state;
+ struct aa_ruleset *rules;
+ int error = 0;
+
+ AA_BUG(!profile);
+
+ rules = list_first_entry(&profile->rules, typeof(*rules), list);
+ state = RULE_MEDIATES(rules, AA_CLASS_IO_URING);
+ if (state) {
+ struct aa_perms perms = { };
+
+ if (new) {
+ aa_label_match(profile, rules, new, state,
+ false, request, &perms);
+ } else {
+ perms = *aa_lookup_perms(rules->policy, state);
+ }
+ aa_apply_modes_to_perms(profile, &perms);
+ error = aa_check_perms(profile, &perms, request, ad,
+ audit_uring_cb);
+ }
+
+ return error;
+}
+
+/**
+ * apparmor_uring_override_creds - check the requested cred override
+ * @new: the target creds
+ *
+ * Check to see if the current task is allowed to override it's credentials
+ * to service an io_uring operation.
+ */
+static int apparmor_uring_override_creds(const struct cred *new)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error;
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
+ OP_URING_OVERRIDE);
+
+ ad.uring.target = cred_label(new);
+ label = __begin_current_label_crit_section();
+ error = fn_for_each(label, profile,
+ profile_uring(profile, AA_MAY_OVERRIDE_CRED,
+ cred_label(new), CAP_SYS_ADMIN, &ad));
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * apparmor_uring_sqpoll - check if a io_uring polling thread can be created
+ *
+ * Check to see if the current task is allowed to create a new io_uring
+ * kernel polling thread.
+ */
+static int apparmor_uring_sqpoll(void)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error;
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
+ OP_URING_SQPOLL);
+
+ label = __begin_current_label_crit_section();
+ error = fn_for_each(label, profile,
+ profile_uring(profile, AA_MAY_CREATE_SQPOLL,
+ NULL, CAP_SYS_ADMIN, &ad));
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+#endif /* CONFIG_IO_URING */
+
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data)
{
@@ -585,23 +712,42 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
label = __begin_current_label_crit_section();
if (!unconfined(label)) {
if (flags & MS_REMOUNT)
- error = aa_remount(label, path, flags, data);
+ error = aa_remount(current_cred(), label, path, flags,
+ data);
else if (flags & MS_BIND)
- error = aa_bind_mount(label, path, dev_name, flags);
+ error = aa_bind_mount(current_cred(), label, path,
+ dev_name, flags);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE))
- error = aa_mount_change_type(label, path, flags);
+ error = aa_mount_change_type(current_cred(), label,
+ path, flags);
else if (flags & MS_MOVE)
- error = aa_move_mount(label, path, dev_name);
+ error = aa_move_mount_old(current_cred(), label, path,
+ dev_name);
else
- error = aa_new_mount(label, dev_name, path, type,
- flags, data);
+ error = aa_new_mount(current_cred(), label, dev_name,
+ path, type, flags, data);
}
__end_current_label_crit_section(label);
return error;
}
+static int apparmor_move_mount(const struct path *from_path,
+ const struct path *to_path)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+ error = aa_move_mount(current_cred(), label, from_path,
+ to_path);
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
{
struct aa_label *label;
@@ -609,7 +755,7 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
label = __begin_current_label_crit_section();
if (!unconfined(label))
- error = aa_umount(label, mnt, flags);
+ error = aa_umount(current_cred(), label, mnt, flags);
__end_current_label_crit_section(label);
return error;
@@ -623,7 +769,7 @@ static int apparmor_sb_pivotroot(const struct path *old_path,
label = aa_get_current_label();
if (!unconfined(label))
- error = aa_pivotroot(label, old_path, new_path);
+ error = aa_pivotroot(current_cred(), label, old_path, new_path);
aa_put_label(label);
return error;
@@ -662,7 +808,7 @@ static int apparmor_setprocattr(const char *name, void *value,
char *command, *largs = NULL, *args = value;
size_t arg_size;
int error;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
OP_SETPROCATTR);
if (size == 0)
@@ -722,11 +868,11 @@ out:
return error;
fail:
- aad(&sa)->label = begin_current_label_crit_section();
- aad(&sa)->info = name;
- aad(&sa)->error = error = -EINVAL;
- aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
- end_current_label_crit_section(aad(&sa)->label);
+ ad.subj_label = begin_current_label_crit_section();
+ ad.info = name;
+ ad.error = error = -EINVAL;
+ aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
+ end_current_label_crit_section(ad.subj_label);
goto out;
}
@@ -766,9 +912,9 @@ static void apparmor_bprm_committed_creds(const struct linux_binprm *bprm)
static void apparmor_current_getsecid_subj(u32 *secid)
{
- struct aa_label *label = aa_get_current_label();
+ struct aa_label *label = __begin_current_label_crit_section();
*secid = label->secid;
- aa_put_label(label);
+ __end_current_label_crit_section(label);
}
static void apparmor_task_getsecid_obj(struct task_struct *p, u32 *secid)
@@ -785,7 +931,8 @@ static int apparmor_task_setrlimit(struct task_struct *task,
int error = 0;
if (!unconfined(label))
- error = aa_task_setrlimit(label, task, resource, new_rlim);
+ error = aa_task_setrlimit(current_cred(), label, task,
+ resource, new_rlim);
__end_current_label_crit_section(label);
return error;
@@ -794,26 +941,48 @@ static int apparmor_task_setrlimit(struct task_struct *task,
static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
+ const struct cred *tc;
struct aa_label *cl, *tl;
int error;
+ tc = get_task_cred(target);
+ tl = aa_get_newest_cred_label(tc);
if (cred) {
/*
* Dealing with USB IO specific behavior
*/
cl = aa_get_newest_cred_label(cred);
- tl = aa_get_task_label(target);
- error = aa_may_signal(cl, tl, sig);
+ error = aa_may_signal(cred, cl, tc, tl, sig);
aa_put_label(cl);
- aa_put_label(tl);
return error;
+ } else {
+ cl = __begin_current_label_crit_section();
+ error = aa_may_signal(current_cred(), cl, tc, tl, sig);
+ __end_current_label_crit_section(cl);
}
-
- cl = __begin_current_label_crit_section();
- tl = aa_get_task_label(target);
- error = aa_may_signal(cl, tl, sig);
aa_put_label(tl);
- __end_current_label_crit_section(cl);
+ put_cred(tc);
+
+ return error;
+}
+
+static int apparmor_userns_create(const struct cred *cred)
+{
+ struct aa_label *label;
+ struct aa_profile *profile;
+ int error = 0;
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_NS,
+ OP_USERNS_CREATE);
+
+ ad.subj_cred = current_cred();
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ error = fn_for_each(label, profile,
+ aa_profile_ns_perm(profile, &ad,
+ AA_USERNS_CREATE));
+ }
+ end_current_label_crit_section(label);
return error;
}
@@ -829,7 +998,7 @@ static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
if (!ctx)
return -ENOMEM;
- SK_CTX(sk) = ctx;
+ sk->sk_security = ctx;
return 0;
}
@@ -839,9 +1008,9 @@ static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
*/
static void apparmor_sk_free_security(struct sock *sk)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
- SK_CTX(sk) = NULL;
+ sk->sk_security = NULL;
aa_put_label(ctx->label);
aa_put_label(ctx->peer);
kfree(ctx);
@@ -853,8 +1022,8 @@ static void apparmor_sk_free_security(struct sock *sk)
static void apparmor_sk_clone_security(const struct sock *sk,
struct sock *newsk)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
- struct aa_sk_ctx *new = SK_CTX(newsk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
+ struct aa_sk_ctx *new = aa_sock(newsk);
if (new->label)
aa_put_label(new->label);
@@ -879,7 +1048,8 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
if (!(kern || unconfined(label)))
error = af_select(family,
create_perm(label, family, type, protocol),
- aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
+ aa_af_perm(current_cred(), label,
+ OP_CREATE, AA_MAY_CREATE,
family, type, protocol));
end_current_label_crit_section(label);
@@ -907,7 +1077,7 @@ static int apparmor_socket_post_create(struct socket *sock, int family,
label = aa_get_current_label();
if (sock->sk) {
- struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
+ struct aa_sk_ctx *ctx = aa_sock(sock->sk);
aa_put_label(ctx->label);
ctx->label = aa_get_label(label);
@@ -1092,7 +1262,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
*/
static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
if (!skb->secmark)
return 0;
@@ -1105,7 +1275,7 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
static struct aa_label *sk_peer_label(struct sock *sk)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
if (ctx->peer)
return ctx->peer;
@@ -1186,7 +1356,7 @@ static int apparmor_socket_getpeersec_dgram(struct socket *sock,
*/
static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
if (!ctx->label)
ctx->label = aa_get_current_label();
@@ -1196,7 +1366,7 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *ctx = aa_sock(sk);
if (!skb->secmark)
return 0;
@@ -1221,6 +1391,7 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(capget, apparmor_capget),
LSM_HOOK_INIT(capable, apparmor_capable),
+ LSM_HOOK_INIT(move_mount, apparmor_move_mount),
LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
@@ -1294,6 +1465,7 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(task_getsecid_obj, apparmor_task_getsecid_obj),
LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
LSM_HOOK_INIT(task_kill, apparmor_task_kill),
+ LSM_HOOK_INIT(userns_create, apparmor_userns_create),
#ifdef CONFIG_AUDIT
LSM_HOOK_INIT(audit_rule_init, aa_audit_rule_init),
@@ -1305,6 +1477,11 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(secid_to_secctx, apparmor_secid_to_secctx),
LSM_HOOK_INIT(secctx_to_secid, apparmor_secctx_to_secid),
LSM_HOOK_INIT(release_secctx, apparmor_release_secctx),
+
+#ifdef CONFIG_IO_URING
+ LSM_HOOK_INIT(uring_override_creds, apparmor_uring_override_creds),
+ LSM_HOOK_INIT(uring_sqpoll, apparmor_uring_sqpoll),
+#endif
};
/*
@@ -1635,11 +1812,32 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
char *aa_get_buffer(bool in_atomic)
{
union aa_buffer *aa_buf;
+ struct aa_local_cache *cache;
bool try_again = true;
gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ /* use per cpu cached buffers first */
+ cache = get_cpu_ptr(&aa_local_buffers);
+ if (!list_empty(&cache->head)) {
+ aa_buf = list_first_entry(&cache->head, union aa_buffer, list);
+ list_del(&aa_buf->list);
+ cache->hold--;
+ cache->count--;
+ put_cpu_ptr(&aa_local_buffers);
+ return &aa_buf->buffer[0];
+ }
+ put_cpu_ptr(&aa_local_buffers);
+
+ if (!spin_trylock(&aa_buffers_lock)) {
+ cache = get_cpu_ptr(&aa_local_buffers);
+ cache->hold += 1;
+ put_cpu_ptr(&aa_local_buffers);
+ spin_lock(&aa_buffers_lock);
+ } else {
+ cache = get_cpu_ptr(&aa_local_buffers);
+ put_cpu_ptr(&aa_local_buffers);
+ }
retry:
- spin_lock(&aa_buffers_lock);
if (buffer_count > reserve_count ||
(in_atomic && !list_empty(&aa_global_buffers))) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
@@ -1665,6 +1863,7 @@ retry:
if (!aa_buf) {
if (try_again) {
try_again = false;
+ spin_lock(&aa_buffers_lock);
goto retry;
}
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
@@ -1676,15 +1875,34 @@ retry:
void aa_put_buffer(char *buf)
{
union aa_buffer *aa_buf;
+ struct aa_local_cache *cache;
if (!buf)
return;
aa_buf = container_of(buf, union aa_buffer, buffer[0]);
- spin_lock(&aa_buffers_lock);
- list_add(&aa_buf->list, &aa_global_buffers);
- buffer_count++;
- spin_unlock(&aa_buffers_lock);
+ cache = get_cpu_ptr(&aa_local_buffers);
+ if (!cache->hold) {
+ put_cpu_ptr(&aa_local_buffers);
+
+ if (spin_trylock(&aa_buffers_lock)) {
+ /* put back on global list */
+ list_add(&aa_buf->list, &aa_global_buffers);
+ buffer_count++;
+ spin_unlock(&aa_buffers_lock);
+ cache = get_cpu_ptr(&aa_local_buffers);
+ put_cpu_ptr(&aa_local_buffers);
+ return;
+ }
+ /* contention on global list, fallback to percpu */
+ cache = get_cpu_ptr(&aa_local_buffers);
+ cache->hold += 1;
+ }
+
+ /* cache in percpu list */
+ list_add(&aa_buf->list, &cache->head);
+ cache->count++;
+ put_cpu_ptr(&aa_local_buffers);
}
/*
@@ -1727,6 +1945,15 @@ static int __init alloc_buffers(void)
int i, num;
/*
+ * per cpu set of cached allocated buffers used to help reduce
+ * lock contention
+ */
+ for_each_possible_cpu(i) {
+ per_cpu(aa_local_buffers, i).hold = 0;
+ per_cpu(aa_local_buffers, i).count = 0;
+ INIT_LIST_HEAD(&per_cpu(aa_local_buffers, i).head);
+ }
+ /*
* A function may require two buffers at once. Usually the buffers are
* used for a short period of time and are shared. On UP kernel buffers
* two should be enough, with more CPUs it is possible that more
@@ -1765,6 +1992,7 @@ static int apparmor_dointvec(struct ctl_table *table, int write,
}
static struct ctl_table apparmor_sysctl_table[] = {
+#ifdef CONFIG_USER_NS
{
.procname = "unprivileged_userns_apparmor_policy",
.data = &unprivileged_userns_apparmor_policy,
@@ -1772,6 +2000,7 @@ static struct ctl_table apparmor_sysctl_table[] = {
.mode = 0600,
.proc_handler = apparmor_dointvec,
},
+#endif /* CONFIG_USER_NS */
{
.procname = "apparmor_display_secid_mode",
.data = &apparmor_display_secid_mode,
@@ -1779,7 +2008,13 @@ static struct ctl_table apparmor_sysctl_table[] = {
.mode = 0600,
.proc_handler = apparmor_dointvec,
},
-
+ {
+ .procname = "apparmor_restrict_unprivileged_unconfined",
+ .data = &aa_unprivileged_unconfined_restricted,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = apparmor_dointvec,
+ },
{ }
};
@@ -1809,7 +2044,7 @@ static unsigned int apparmor_ip_postroute(void *priv,
if (sk == NULL)
return NF_ACCEPT;
- ctx = SK_CTX(sk);
+ ctx = aa_sock(sk);
if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
skb->secmark, sk))
return NF_ACCEPT;
@@ -1868,6 +2103,69 @@ static int __init apparmor_nf_ip_init(void)
__initcall(apparmor_nf_ip_init);
#endif
+static char nulldfa_src[] = {
+ #include "nulldfa.in"
+};
+struct aa_dfa *nulldfa;
+
+static char stacksplitdfa_src[] = {
+ #include "stacksplitdfa.in"
+};
+struct aa_dfa *stacksplitdfa;
+struct aa_policydb *nullpdb;
+
+static int __init aa_setup_dfa_engine(void)
+{
+ int error = -ENOMEM;
+
+ nullpdb = aa_alloc_pdb(GFP_KERNEL);
+ if (!nullpdb)
+ return -ENOMEM;
+
+ nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
+ TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32));
+ if (IS_ERR(nulldfa)) {
+ error = PTR_ERR(nulldfa);
+ goto fail;
+ }
+ nullpdb->dfa = aa_get_dfa(nulldfa);
+ nullpdb->perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
+ if (!nullpdb->perms)
+ goto fail;
+ nullpdb->size = 2;
+
+ stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
+ sizeof(stacksplitdfa_src),
+ TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32));
+ if (IS_ERR(stacksplitdfa)) {
+ error = PTR_ERR(stacksplitdfa);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ aa_put_pdb(nullpdb);
+ aa_put_dfa(nulldfa);
+ nullpdb = NULL;
+ nulldfa = NULL;
+ stacksplitdfa = NULL;
+
+ return error;
+}
+
+static void __init aa_teardown_dfa_engine(void)
+{
+ aa_put_dfa(stacksplitdfa);
+ aa_put_dfa(nulldfa);
+ aa_put_pdb(nullpdb);
+ nullpdb = NULL;
+ stacksplitdfa = NULL;
+ nulldfa = NULL;
+}
+
static int __init apparmor_init(void)
{
int error;
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index b97ef5e1db73..517d77d3c34c 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -21,50 +21,6 @@
#define base_idx(X) ((X) & 0xffffff)
-static char nulldfa_src[] = {
- #include "nulldfa.in"
-};
-struct aa_dfa *nulldfa;
-
-static char stacksplitdfa_src[] = {
- #include "stacksplitdfa.in"
-};
-struct aa_dfa *stacksplitdfa;
-
-int __init aa_setup_dfa_engine(void)
-{
- int error;
-
- nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
- TO_ACCEPT1_FLAG(YYTD_DATA32) |
- TO_ACCEPT2_FLAG(YYTD_DATA32));
- if (IS_ERR(nulldfa)) {
- error = PTR_ERR(nulldfa);
- nulldfa = NULL;
- return error;
- }
-
- stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
- sizeof(stacksplitdfa_src),
- TO_ACCEPT1_FLAG(YYTD_DATA32) |
- TO_ACCEPT2_FLAG(YYTD_DATA32));
- if (IS_ERR(stacksplitdfa)) {
- aa_put_dfa(nulldfa);
- nulldfa = NULL;
- error = PTR_ERR(stacksplitdfa);
- stacksplitdfa = NULL;
- return error;
- }
-
- return 0;
-}
-
-void __init aa_teardown_dfa_engine(void)
-{
- aa_put_dfa(stacksplitdfa);
- aa_put_dfa(nulldfa);
-}
-
/**
* unpack_table - unpack a dfa table (one of accept, default, base, next check)
* @blob: data to unpack (NOT NULL)
@@ -136,7 +92,7 @@ fail:
/**
* verify_table_headers - verify that the tables headers are as expected
- * @tables - array of dfa tables to check (NOT NULL)
+ * @tables: array of dfa tables to check (NOT NULL)
* @flags: flags controlling what type of accept table are acceptable
*
* Assumes dfa has gone through the first pass verification done by unpacking
@@ -283,7 +239,7 @@ static void dfa_free(struct aa_dfa *dfa)
/**
* aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa)
- * @kr: kref callback for freeing of a dfa (NOT NULL)
+ * @kref: kref callback for freeing of a dfa (NOT NULL)
*/
void aa_dfa_free_kref(struct kref *kref)
{
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index cdfa430ae216..fb30204c761a 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -86,32 +86,34 @@ static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->mnt.type) {
+ if (ad->mnt.type) {
audit_log_format(ab, " fstype=");
- audit_log_untrustedstring(ab, aad(sa)->mnt.type);
+ audit_log_untrustedstring(ab, ad->mnt.type);
}
- if (aad(sa)->mnt.src_name) {
+ if (ad->mnt.src_name) {
audit_log_format(ab, " srcname=");
- audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
+ audit_log_untrustedstring(ab, ad->mnt.src_name);
}
- if (aad(sa)->mnt.trans) {
+ if (ad->mnt.trans) {
audit_log_format(ab, " trans=");
- audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
+ audit_log_untrustedstring(ab, ad->mnt.trans);
}
- if (aad(sa)->mnt.flags) {
+ if (ad->mnt.flags) {
audit_log_format(ab, " flags=\"");
- audit_mnt_flags(ab, aad(sa)->mnt.flags);
+ audit_mnt_flags(ab, ad->mnt.flags);
audit_log_format(ab, "\"");
}
- if (aad(sa)->mnt.data) {
+ if (ad->mnt.data) {
audit_log_format(ab, " options=");
- audit_log_untrustedstring(ab, aad(sa)->mnt.data);
+ audit_log_untrustedstring(ab, ad->mnt.data);
}
}
/**
* audit_mount - handle the auditing of mount operations
+ * @subj_cred: cred of the subject
* @profile: the profile being enforced (NOT NULL)
* @op: operation being mediated (NOT NULL)
* @name: name of object being mediated (MAYBE NULL)
@@ -127,14 +129,15 @@ static void audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: %0 or error on failure
*/
-static int audit_mount(struct aa_profile *profile, const char *op,
+static int audit_mount(const struct cred *subj_cred,
+ struct aa_profile *profile, const char *op,
const char *name, const char *src_name,
const char *type, const char *trans,
unsigned long flags, const void *data, u32 request,
struct aa_perms *perms, const char *info, int error)
{
int audit_type = AUDIT_APPARMOR_AUTO;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
if (likely(!error)) {
u32 mask = perms->audit;
@@ -165,17 +168,18 @@ static int audit_mount(struct aa_profile *profile, const char *op,
return error;
}
- aad(&sa)->name = name;
- aad(&sa)->mnt.src_name = src_name;
- aad(&sa)->mnt.type = type;
- aad(&sa)->mnt.trans = trans;
- aad(&sa)->mnt.flags = flags;
+ ad.subj_cred = subj_cred;
+ ad.name = name;
+ ad.mnt.src_name = src_name;
+ ad.mnt.type = type;
+ ad.mnt.trans = trans;
+ ad.mnt.flags = flags;
if (data && (perms->audit & AA_AUDIT_DATA))
- aad(&sa)->mnt.data = data;
- aad(&sa)->info = info;
- aad(&sa)->error = error;
+ ad.mnt.data = data;
+ ad.info = info;
+ ad.error = error;
- return aa_audit(audit_type, profile, &sa, audit_cb);
+ return aa_audit(audit_type, profile, &ad, audit_cb);
}
/**
@@ -283,6 +287,7 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
/**
* match_mnt_path_str - handle path matching for mount
+ * @subj_cred: cred of confined subject
* @profile: the confining profile
* @mntpath: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
@@ -295,7 +300,8 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
*
* Returns: 0 on success else error
*/
-static int match_mnt_path_str(struct aa_profile *profile,
+static int match_mnt_path_str(const struct cred *subj_cred,
+ struct aa_profile *profile,
const struct path *mntpath, char *buffer,
const char *devname, const char *type,
unsigned long flags, void *data, bool binary,
@@ -326,8 +332,8 @@ static int match_mnt_path_str(struct aa_profile *profile,
}
error = -EACCES;
- pos = do_match_mnt(&rules->policy,
- rules->policy.start[AA_CLASS_MOUNT],
+ pos = do_match_mnt(rules->policy,
+ rules->policy->start[AA_CLASS_MOUNT],
mntpnt, devname, type, flags, data, binary, &perms);
if (pos) {
info = mnt_info_table[pos];
@@ -336,12 +342,14 @@ static int match_mnt_path_str(struct aa_profile *profile,
error = 0;
audit:
- return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
+ return audit_mount(subj_cred, profile, OP_MOUNT, mntpnt, devname,
+ type, NULL,
flags, data, AA_MAY_MOUNT, &perms, info, error);
}
/**
* match_mnt - handle path matching for mount
+ * @subj_cred: cred of the subject
* @profile: the confining profile
* @path: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
@@ -354,7 +362,8 @@ audit:
*
* Returns: 0 on success else error
*/
-static int match_mnt(struct aa_profile *profile, const struct path *path,
+static int match_mnt(const struct cred *subj_cred,
+ struct aa_profile *profile, const struct path *path,
char *buffer, const struct path *devpath, char *devbuffer,
const char *type, unsigned long flags, void *data,
bool binary)
@@ -378,11 +387,12 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
devname = ERR_PTR(error);
}
- return match_mnt_path_str(profile, path, buffer, devname, type, flags,
- data, binary, info);
+ return match_mnt_path_str(subj_cred, profile, path, buffer, devname,
+ type, flags, data, binary, info);
}
-int aa_remount(struct aa_label *label, const struct path *path,
+int aa_remount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
unsigned long flags, void *data)
{
struct aa_profile *profile;
@@ -399,14 +409,16 @@ int aa_remount(struct aa_label *label, const struct path *path,
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
- match_mnt(profile, path, buffer, NULL, NULL, NULL,
+ match_mnt(subj_cred, profile, path, buffer, NULL,
+ NULL, NULL,
flags, data, binary));
aa_put_buffer(buffer);
return error;
}
-int aa_bind_mount(struct aa_label *label, const struct path *path,
+int aa_bind_mount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
const char *dev_name, unsigned long flags)
{
struct aa_profile *profile;
@@ -433,8 +445,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
goto out;
error = fn_for_each_confined(label, profile,
- match_mnt(profile, path, buffer, &old_path, old_buffer,
- NULL, flags, NULL, false));
+ match_mnt(subj_cred, profile, path, buffer, &old_path,
+ old_buffer, NULL, flags, NULL, false));
out:
aa_put_buffer(buffer);
aa_put_buffer(old_buffer);
@@ -443,7 +455,8 @@ out:
return error;
}
-int aa_mount_change_type(struct aa_label *label, const struct path *path,
+int aa_mount_change_type(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *path,
unsigned long flags)
{
struct aa_profile *profile;
@@ -461,50 +474,63 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
- match_mnt(profile, path, buffer, NULL, NULL, NULL,
+ match_mnt(subj_cred, profile, path, buffer, NULL,
+ NULL, NULL,
flags, NULL, false));
aa_put_buffer(buffer);
return error;
}
-int aa_move_mount(struct aa_label *label, const struct path *path,
- const char *orig_name)
+int aa_move_mount(const struct cred *subj_cred,
+ struct aa_label *label, const struct path *from_path,
+ const struct path *to_path)
{
struct aa_profile *profile;
- char *buffer = NULL, *old_buffer = NULL;
- struct path old_path;
+ char *to_buffer = NULL, *from_buffer = NULL;
int error;
AA_BUG(!label);
- AA_BUG(!path);
+ AA_BUG(!from_path);
+ AA_BUG(!to_path);
+
+ to_buffer = aa_get_buffer(false);
+ from_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!to_buffer || !from_buffer)
+ goto out;
+ error = fn_for_each_confined(label, profile,
+ match_mnt(subj_cred, profile, to_path, to_buffer,
+ from_path, from_buffer,
+ NULL, MS_MOVE, NULL, false));
+out:
+ aa_put_buffer(to_buffer);
+ aa_put_buffer(from_buffer);
+
+ return error;
+}
+
+int aa_move_mount_old(const struct cred *subj_cred, struct aa_label *label,
+ const struct path *path, const char *orig_name)
+{
+ struct path old_path;
+ int error;
if (!orig_name || !*orig_name)
return -EINVAL;
-
error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
if (error)
return error;
- buffer = aa_get_buffer(false);
- old_buffer = aa_get_buffer(false);
- error = -ENOMEM;
- if (!buffer || !old_buffer)
- goto out;
- error = fn_for_each_confined(label, profile,
- match_mnt(profile, path, buffer, &old_path, old_buffer,
- NULL, MS_MOVE, NULL, false));
-out:
- aa_put_buffer(buffer);
- aa_put_buffer(old_buffer);
+ error = aa_move_mount(subj_cred, label, &old_path, path);
path_put(&old_path);
return error;
}
-int aa_new_mount(struct aa_label *label, const char *dev_name,
- const struct path *path, const char *type, unsigned long flags,
- void *data)
+int aa_new_mount(const struct cred *subj_cred, struct aa_label *label,
+ const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
{
struct aa_profile *profile;
char *buffer = NULL, *dev_buffer = NULL;
@@ -549,12 +575,14 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
goto out;
}
error = fn_for_each_confined(label, profile,
- match_mnt(profile, path, buffer, dev_path, dev_buffer,
+ match_mnt(subj_cred, profile, path, buffer,
+ dev_path, dev_buffer,
type, flags, data, binary));
} else {
error = fn_for_each_confined(label, profile,
- match_mnt_path_str(profile, path, buffer, dev_name,
- type, flags, data, binary, NULL));
+ match_mnt_path_str(subj_cred, profile, path,
+ buffer, dev_name,
+ type, flags, data, binary, NULL));
}
out:
@@ -566,7 +594,8 @@ out:
return error;
}
-static int profile_umount(struct aa_profile *profile, const struct path *path,
+static int profile_umount(const struct cred *subj_cred,
+ struct aa_profile *profile, const struct path *path,
char *buffer)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
@@ -587,19 +616,21 @@ static int profile_umount(struct aa_profile *profile, const struct path *path,
if (error)
goto audit;
- state = aa_dfa_match(rules->policy.dfa,
- rules->policy.start[AA_CLASS_MOUNT],
+ state = aa_dfa_match(rules->policy->dfa,
+ rules->policy->start[AA_CLASS_MOUNT],
name);
- perms = *aa_lookup_perms(&rules->policy, state);
+ perms = *aa_lookup_perms(rules->policy, state);
if (AA_MAY_UMOUNT & ~perms.allow)
error = -EACCES;
audit:
- return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
+ return audit_mount(subj_cred, profile, OP_UMOUNT, name, NULL, NULL,
+ NULL, 0, NULL,
AA_MAY_UMOUNT, &perms, info, error);
}
-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+int aa_umount(const struct cred *subj_cred, struct aa_label *label,
+ struct vfsmount *mnt, int flags)
{
struct aa_profile *profile;
char *buffer = NULL;
@@ -614,7 +645,7 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
- profile_umount(profile, &path, buffer));
+ profile_umount(subj_cred, profile, &path, buffer));
aa_put_buffer(buffer);
return error;
@@ -624,7 +655,8 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
*
* Returns: label for transition or ERR_PTR. Does not return NULL
*/
-static struct aa_label *build_pivotroot(struct aa_profile *profile,
+static struct aa_label *build_pivotroot(const struct cred *subj_cred,
+ struct aa_profile *profile,
const struct path *new_path,
char *new_buffer,
const struct path *old_path,
@@ -658,18 +690,19 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
goto audit;
error = -EACCES;
- state = aa_dfa_match(rules->policy.dfa,
- rules->policy.start[AA_CLASS_MOUNT],
+ state = aa_dfa_match(rules->policy->dfa,
+ rules->policy->start[AA_CLASS_MOUNT],
new_name);
- state = aa_dfa_null_transition(rules->policy.dfa, state);
- state = aa_dfa_match(rules->policy.dfa, state, old_name);
- perms = *aa_lookup_perms(&rules->policy, state);
+ state = aa_dfa_null_transition(rules->policy->dfa, state);
+ state = aa_dfa_match(rules->policy->dfa, state, old_name);
+ perms = *aa_lookup_perms(rules->policy, state);
if (AA_MAY_PIVOTROOT & perms.allow)
error = 0;
audit:
- error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
+ error = audit_mount(subj_cred, profile, OP_PIVOTROOT, new_name,
+ old_name,
NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
&perms, info, error);
if (error)
@@ -678,7 +711,8 @@ audit:
return aa_get_newest_label(&profile->label);
}
-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+int aa_pivotroot(const struct cred *subj_cred, struct aa_label *label,
+ const struct path *old_path,
const struct path *new_path)
{
struct aa_profile *profile;
@@ -696,7 +730,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
if (!old_buffer || !new_buffer)
goto out;
target = fn_label_build(label, profile, GFP_KERNEL,
- build_pivotroot(profile, new_path, new_buffer,
+ build_pivotroot(subj_cred, profile, new_path,
+ new_buffer,
old_path, old_buffer));
if (!target) {
info = "label build failed";
@@ -722,7 +757,8 @@ out:
fail:
/* TODO: add back in auditing of new_name and old_name */
error = fn_for_each(label, profile,
- audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
+ audit_mount(subj_cred, profile, OP_PIVOTROOT,
+ NULL /*new_name */,
NULL /* old_name */,
NULL, NULL,
0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
index 788be1609a86..87e934b2b548 100644
--- a/security/apparmor/net.c
+++ b/security/apparmor/net.c
@@ -71,6 +71,7 @@ static const char * const net_mask_names[] = {
void audit_net_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
if (address_family_names[sa->u.net->family])
audit_log_format(ab, " family=\"%s\"",
@@ -78,35 +79,36 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
else
audit_log_format(ab, " family=\"unknown(%d)\"",
sa->u.net->family);
- if (sock_type_names[aad(sa)->net.type])
+ if (sock_type_names[ad->net.type])
audit_log_format(ab, " sock_type=\"%s\"",
- sock_type_names[aad(sa)->net.type]);
+ sock_type_names[ad->net.type]);
else
audit_log_format(ab, " sock_type=\"unknown(%d)\"",
- aad(sa)->net.type);
- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
+ ad->net.type);
+ audit_log_format(ab, " protocol=%d", ad->net.protocol);
- if (aad(sa)->request & NET_PERMS_MASK) {
+ if (ad->request & NET_PERMS_MASK) {
audit_log_format(ab, " requested_mask=");
- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
+ aa_audit_perm_mask(ab, ad->request, NULL, 0,
net_mask_names, NET_PERMS_MASK);
- if (aad(sa)->denied & NET_PERMS_MASK) {
+ if (ad->denied & NET_PERMS_MASK) {
audit_log_format(ab, " denied_mask=");
- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
+ aa_audit_perm_mask(ab, ad->denied, NULL, 0,
net_mask_names, NET_PERMS_MASK);
}
}
- if (aad(sa)->peer) {
+ if (ad->peer) {
audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAGS_NONE, GFP_ATOMIC);
}
}
/* Generic af perm */
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- u32 request, u16 family, int type)
+int aa_profile_af_perm(struct aa_profile *profile,
+ struct apparmor_audit_data *ad, u32 request, u16 family,
+ int type)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
@@ -125,26 +127,28 @@ int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
buffer[0] = cpu_to_be16(family);
buffer[1] = cpu_to_be16((u16) type);
- state = aa_dfa_match_len(rules->policy.dfa, state, (char *) &buffer,
+ state = aa_dfa_match_len(rules->policy->dfa, state, (char *) &buffer,
4);
- perms = *aa_lookup_perms(&rules->policy, state);
+ perms = *aa_lookup_perms(rules->policy, state);
aa_apply_modes_to_perms(profile, &perms);
- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
}
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
- int type, int protocol)
+int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request, u16 family, int type, int protocol)
{
struct aa_profile *profile;
- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
+ DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol);
return fn_for_each_confined(label, profile,
- aa_profile_af_perm(profile, &sa, request, family,
+ aa_profile_af_perm(profile, &ad, request, family,
type));
}
-static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+static int aa_label_sk_perm(const struct cred *subj_cred,
+ struct aa_label *label,
+ const char *op, u32 request,
struct sock *sk)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
@@ -155,10 +159,11 @@ static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
if (ctx->label != kernel_t && !unconfined(label)) {
struct aa_profile *profile;
- DEFINE_AUDIT_SK(sa, op, sk);
+ DEFINE_AUDIT_SK(ad, op, sk);
+ ad.subj_cred = subj_cred;
error = fn_for_each_confined(label, profile,
- aa_profile_af_sk_perm(profile, &sa, request, sk));
+ aa_profile_af_sk_perm(profile, &ad, request, sk));
}
return error;
@@ -174,21 +179,21 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
/* TODO: switch to begin_current_label ???? */
label = begin_current_label_crit_section();
- error = aa_label_sk_perm(label, op, request, sk);
+ error = aa_label_sk_perm(current_cred(), label, op, request, sk);
end_current_label_crit_section(label);
return error;
}
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
- struct socket *sock)
+int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request, struct socket *sock)
{
AA_BUG(!label);
AA_BUG(!sock);
AA_BUG(!sock->sk);
- return aa_label_sk_perm(label, op, request, sock->sk);
+ return aa_label_sk_perm(subj_cred, label, op, request, sock->sk);
}
#ifdef CONFIG_NETWORK_SECMARK
@@ -214,7 +219,7 @@ static int apparmor_secmark_init(struct aa_secmark *secmark)
}
static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
- struct common_audit_data *sa)
+ struct apparmor_audit_data *ad)
{
int i, ret;
struct aa_perms perms = { };
@@ -245,17 +250,17 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
aa_apply_modes_to_perms(profile, &perms);
- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
}
int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
u32 secid, const struct sock *sk)
{
struct aa_profile *profile;
- DEFINE_AUDIT_SK(sa, op, sk);
+ DEFINE_AUDIT_SK(ad, op, sk);
return fn_for_each_confined(label, profile,
aa_secmark_perm(profile, request, secid,
- &sa));
+ &ad));
}
#endif
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index b38f7b2a5e1d..ed4c9803c8fa 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -88,6 +88,7 @@
#include "include/resource.h"
int unprivileged_userns_apparmor_policy = 1;
+int aa_unprivileged_unconfined_restricted;
const char *const aa_profile_mode_names[] = {
"enforce",
@@ -98,6 +99,41 @@ const char *const aa_profile_mode_names[] = {
};
+static void aa_free_pdb(struct aa_policydb *policy)
+{
+ if (policy) {
+ aa_put_dfa(policy->dfa);
+ if (policy->perms)
+ kvfree(policy->perms);
+ aa_free_str_table(&policy->trans);
+ }
+}
+
+/**
+ * aa_pdb_free_kref - free aa_policydb by kref (called by aa_put_pdb)
+ * @kref: kref callback for freeing of a dfa (NOT NULL)
+ */
+void aa_pdb_free_kref(struct kref *kref)
+{
+ struct aa_policydb *pdb = container_of(kref, struct aa_policydb, count);
+
+ aa_free_pdb(pdb);
+}
+
+
+struct aa_policydb *aa_alloc_pdb(gfp_t gfp)
+{
+ struct aa_policydb *pdb = kzalloc(sizeof(struct aa_policydb), gfp);
+
+ if (!pdb)
+ return NULL;
+
+ kref_init(&pdb->count);
+
+ return pdb;
+}
+
+
/**
* __add_profile - add a profiles to list and label tree
* @list: list to add it to (NOT NULL)
@@ -200,15 +236,15 @@ static void free_attachment(struct aa_attachment *attach)
for (i = 0; i < attach->xattr_count; i++)
kfree_sensitive(attach->xattrs[i]);
kfree_sensitive(attach->xattrs);
- aa_destroy_policydb(&attach->xmatch);
+ aa_put_pdb(attach->xmatch);
}
static void free_ruleset(struct aa_ruleset *rules)
{
int i;
- aa_destroy_policydb(&rules->file);
- aa_destroy_policydb(&rules->policy);
+ aa_put_pdb(rules->file);
+ aa_put_pdb(rules->policy);
aa_free_cap_rules(&rules->caps);
aa_free_rlimit_rules(&rules->rlimits);
@@ -255,6 +291,7 @@ void aa_free_profile(struct aa_profile *profile)
aa_put_ns(profile->ns);
kfree_sensitive(profile->rename);
+ kfree_sensitive(profile->disconnected);
free_attachment(&profile->attach);
@@ -285,6 +322,7 @@ void aa_free_profile(struct aa_profile *profile)
/**
* aa_alloc_profile - allocate, initialize and return a new profile
* @hname: name of the profile (NOT NULL)
+ * @proxy: proxy to use OR null if to allocate a new one
* @gfp: allocation type
*
* Returns: refcount profile or NULL on failure
@@ -588,16 +626,8 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
/* TODO: ideally we should inherit abi from parent */
profile->label.flags |= FLAG_NULL;
rules = list_first_entry(&profile->rules, typeof(*rules), list);
- rules->file.dfa = aa_get_dfa(nulldfa);
- rules->file.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
- if (!rules->file.perms)
- goto fail;
- rules->file.size = 2;
- rules->policy.dfa = aa_get_dfa(nulldfa);
- rules->policy.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
- if (!rules->policy.perms)
- goto fail;
- rules->policy.size = 2;
+ rules->file = aa_get_pdb(nullpdb);
+ rules->policy = aa_get_pdb(nullpdb);
if (parent) {
profile->path_flags = parent->path_flags;
@@ -608,11 +638,6 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
}
return profile;
-
-fail:
- aa_free_profile(profile);
-
- return NULL;
}
/**
@@ -721,16 +746,17 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->iface.ns) {
+ if (ad->iface.ns) {
audit_log_format(ab, " ns=");
- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
+ audit_log_untrustedstring(ab, ad->iface.ns);
}
}
/**
* audit_policy - Do auditing of policy changes
- * @label: label to check if it can manage policy
+ * @subj_label: label to check if it can manage policy
* @op: policy operation being performed
* @ns_name: name of namespace being manipulated
* @name: name of profile being manipulated (NOT NULL)
@@ -739,19 +765,19 @@ static void audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: the error to be returned after audit is done
*/
-static int audit_policy(struct aa_label *label, const char *op,
+static int audit_policy(struct aa_label *subj_label, const char *op,
const char *ns_name, const char *name,
const char *info, int error)
{
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
- aad(&sa)->iface.ns = ns_name;
- aad(&sa)->name = name;
- aad(&sa)->info = info;
- aad(&sa)->error = error;
- aad(&sa)->label = label;
+ ad.iface.ns = ns_name;
+ ad.name = name;
+ ad.info = info;
+ ad.error = error;
+ ad.subj_label = subj_label;
- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb);
return error;
}
@@ -759,31 +785,35 @@ static int audit_policy(struct aa_label *label, const char *op,
/* don't call out to other LSMs in the stack for apparmor policy admin
* permissions
*/
-static int policy_ns_capable(struct aa_label *label,
+static int policy_ns_capable(const struct cred *subj_cred,
+ struct aa_label *label,
struct user_namespace *userns, int cap)
{
int err;
/* check for MAC_ADMIN cap in cred */
- err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
+ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE);
if (!err)
- err = aa_capable(label, cap, CAP_OPT_NONE);
+ err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE);
return err;
}
/**
* aa_policy_view_capable - check if viewing policy in at @ns is allowed
- * label: label that is trying to view policy in ns
- * ns: namespace being viewed by @label (may be NULL if @label's ns)
+ * @subj_cred: cred of subject
+ * @label: label that is trying to view policy in ns
+ * @ns: namespace being viewed by @label (may be NULL if @label's ns)
+ *
* Returns: true if viewing policy is allowed
*
* If @ns is NULL then the namespace being viewed is assumed to be the
* tasks current namespace.
*/
-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
+bool aa_policy_view_capable(const struct cred *subj_cred,
+ struct aa_label *label, struct aa_ns *ns)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = subj_cred->user_ns;
struct aa_ns *view_ns = labels_view(label);
bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
in_egroup_p(make_kgid(user_ns, 0));
@@ -800,15 +830,17 @@ bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
return response;
}
-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
+bool aa_policy_admin_capable(const struct cred *subj_cred,
+ struct aa_label *label, struct aa_ns *ns)
{
- struct user_namespace *user_ns = current_user_ns();
- bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
+ struct user_namespace *user_ns = subj_cred->user_ns;
+ bool capable = policy_ns_capable(subj_cred, label, user_ns,
+ CAP_MAC_ADMIN) == 0;
AA_DEBUG("cap_mac_admin? %d\n", capable);
AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
- return aa_policy_view_capable(label, ns) && capable &&
+ return aa_policy_view_capable(subj_cred, label, ns) && capable &&
!aa_g_lock_policy;
}
@@ -818,7 +850,7 @@ bool aa_current_policy_view_capable(struct aa_ns *ns)
bool res;
label = __begin_current_label_crit_section();
- res = aa_policy_view_capable(label, ns);
+ res = aa_policy_view_capable(current_cred(), label, ns);
__end_current_label_crit_section(label);
return res;
@@ -830,7 +862,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
bool res;
label = __begin_current_label_crit_section();
- res = aa_policy_admin_capable(label, ns);
+ res = aa_policy_admin_capable(current_cred(), label, ns);
__end_current_label_crit_section(label);
return res;
@@ -838,12 +870,15 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
/**
* aa_may_manage_policy - can the current task manage policy
+ * @subj_cred: subjects cred
* @label: label to check if it can manage policy
+ * @ns: namespace being managed by @label (may be NULL if @label's ns)
* @mask: contains the policy manipulation operation being done
*
* Returns: 0 if the task is allowed to manipulate policy else error
*/
-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
+int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label,
+ struct aa_ns *ns, u32 mask)
{
const char *op;
@@ -859,7 +894,7 @@ int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
return audit_policy(label, op, NULL, NULL, "policy_locked",
-EACCES);
- if (!aa_policy_admin_capable(label, ns))
+ if (!aa_policy_admin_capable(subj_cred, label, ns))
return audit_policy(label, op, NULL, NULL, "not policy admin",
-EACCES);
@@ -950,11 +985,11 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
/**
* __lookup_replace - lookup replacement information for a profile
- * @ns - namespace the lookup occurs in
- * @hname - name of profile to lookup
- * @noreplace - true if not replacing an existing profile
- * @p - Returns: profile to be replaced
- * @info - Returns: info string on why lookup failed
+ * @ns: namespace the lookup occurs in
+ * @hname: name of profile to lookup
+ * @noreplace: true if not replacing an existing profile
+ * @p: Returns - profile to be replaced
+ * @info: Returns - info string on why lookup failed
*
* Returns: profile to replace (no ref) on success else ptr error
*/
diff --git a/security/apparmor/policy_compat.c b/security/apparmor/policy_compat.c
index 0cb02da8a319..423227670e68 100644
--- a/security/apparmor/policy_compat.c
+++ b/security/apparmor/policy_compat.c
@@ -143,6 +143,7 @@ static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
* compute_fperms - convert dfa compressed perms to internal perms and store
* them so they can be retrieved later.
* @dfa: a dfa using fperms to remap to internal permissions
+ * @size: Returns the permission table size
*
* Returns: remapped perm table
*/
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index fd5b7afbcb48..1f02cfe1d974 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -160,43 +160,6 @@ void aa_free_ns(struct aa_ns *ns)
}
/**
- * aa_findn_ns - look up a profile namespace on the namespace list
- * @root: namespace to search in (NOT NULL)
- * @name: name of namespace to find (NOT NULL)
- * @n: length of @name
- *
- * Returns: a refcounted namespace on the list, or NULL if no namespace
- * called @name exists.
- *
- * refcount released by caller
- */
-struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n)
-{
- struct aa_ns *ns = NULL;
-
- rcu_read_lock();
- ns = aa_get_ns(__aa_findn_ns(&root->sub_ns, name, n));
- rcu_read_unlock();
-
- return ns;
-}
-
-/**
- * aa_find_ns - look up a profile namespace on the namespace list
- * @root: namespace to search in (NOT NULL)
- * @name: name of namespace to find (NOT NULL)
- *
- * Returns: a refcounted namespace on the list, or NULL if no namespace
- * called @name exists.
- *
- * refcount released by caller
- */
-struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name)
-{
- return aa_findn_ns(root, name, strlen(name));
-}
-
-/**
* __aa_lookupn_ns - lookup the namespace matching @hname
* @view: namespace to search in (NOT NULL)
* @hname: hierarchical ns name (NOT NULL)
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 913ec8d0eb63..47ec097d6741 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -34,17 +34,18 @@
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->iface.ns) {
+ if (ad->iface.ns) {
audit_log_format(ab, " ns=");
- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
+ audit_log_untrustedstring(ab, ad->iface.ns);
}
- if (aad(sa)->name) {
+ if (ad->name) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, aad(sa)->name);
+ audit_log_untrustedstring(ab, ad->name);
}
- if (aad(sa)->iface.pos)
- audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
+ if (ad->iface.pos)
+ audit_log_format(ab, " offset=%ld", ad->iface.pos);
}
/**
@@ -63,18 +64,18 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
int error)
{
struct aa_profile *profile = labels_profile(aa_current_raw_label());
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
if (e)
- aad(&sa)->iface.pos = e->pos - e->start;
- aad(&sa)->iface.ns = ns_name;
+ ad.iface.pos = e->pos - e->start;
+ ad.iface.ns = ns_name;
if (new)
- aad(&sa)->name = new->base.hname;
+ ad.name = new->base.hname;
else
- aad(&sa)->name = name;
- aad(&sa)->info = info;
- aad(&sa)->error = error;
+ ad.name = name;
+ ad.info = info;
+ ad.error = error;
- return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
+ return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb);
}
void __aa_loaddata_update(struct aa_loaddata *data, long revision)
@@ -705,24 +706,29 @@ fail_reset:
return -EPROTO;
}
-static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy,
+static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
bool required_dfa, bool required_trans,
const char **info)
{
+ struct aa_policydb *pdb;
void *pos = e->pos;
int i, flags, error = -EPROTO;
ssize_t size;
- size = unpack_perms_table(e, &policy->perms);
+ pdb = aa_alloc_pdb(GFP_KERNEL);
+ if (!pdb)
+ return -ENOMEM;
+
+ size = unpack_perms_table(e, &pdb->perms);
if (size < 0) {
error = size;
- policy->perms = NULL;
+ pdb->perms = NULL;
*info = "failed to unpack - perms";
goto fail;
}
- policy->size = size;
+ pdb->size = size;
- if (policy->perms) {
+ if (pdb->perms) {
/* perms table present accept is index */
flags = TO_ACCEPT1_FLAG(YYTD_DATA32);
} else {
@@ -731,13 +737,13 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy,
TO_ACCEPT2_FLAG(YYTD_DATA32);
}
- policy->dfa = unpack_dfa(e, flags);
- if (IS_ERR(policy->dfa)) {
- error = PTR_ERR(policy->dfa);
- policy->dfa = NULL;
+ pdb->dfa = unpack_dfa(e, flags);
+ if (IS_ERR(pdb->dfa)) {
+ error = PTR_ERR(pdb->dfa);
+ pdb->dfa = NULL;
*info = "failed to unpack - dfa";
goto fail;
- } else if (!policy->dfa) {
+ } else if (!pdb->dfa) {
if (required_dfa) {
*info = "missing required dfa";
goto fail;
@@ -751,18 +757,18 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy,
* sadly start was given different names for file and policydb
* but since it is optional we can try both
*/
- if (!aa_unpack_u32(e, &policy->start[0], "start"))
+ if (!aa_unpack_u32(e, &pdb->start[0], "start"))
/* default start state */
- policy->start[0] = DFA_START;
- if (!aa_unpack_u32(e, &policy->start[AA_CLASS_FILE], "dfa_start")) {
+ pdb->start[0] = DFA_START;
+ if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) {
/* default start state for xmatch and file dfa */
- policy->start[AA_CLASS_FILE] = DFA_START;
+ pdb->start[AA_CLASS_FILE] = DFA_START;
} /* setup class index */
for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) {
- policy->start[i] = aa_dfa_next(policy->dfa, policy->start[0],
+ pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0],
i);
}
- if (!unpack_trans_table(e, &policy->trans) && required_trans) {
+ if (!unpack_trans_table(e, &pdb->trans) && required_trans) {
*info = "failed to unpack profile transition table";
goto fail;
}
@@ -770,9 +776,11 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy,
/* TODO: move compat mapping here, requires dfa merging first */
/* TODO: move verify here, it has to be done after compat mappings */
out:
+ *policy = pdb;
return 0;
fail:
+ aa_put_pdb(pdb);
e->pos = pos;
return error;
}
@@ -807,7 +815,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
const char *info = "failed to unpack profile";
size_t ns_len;
struct rhashtable_params params = { 0 };
- char *key = NULL;
+ char *key = NULL, *disconnected = NULL;
struct aa_data *data;
int error = -EPROTO;
kernel_cap_t tmpcap;
@@ -856,15 +864,15 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
}
/* neither xmatch_len not xmatch_perms are optional if xmatch is set */
- if (profile->attach.xmatch.dfa) {
+ if (profile->attach.xmatch->dfa) {
if (!aa_unpack_u32(e, &tmp, NULL)) {
info = "missing xmatch len";
goto fail;
}
profile->attach.xmatch_len = tmp;
- profile->attach.xmatch.start[AA_CLASS_XMATCH] = DFA_START;
- if (!profile->attach.xmatch.perms) {
- error = aa_compat_map_xmatch(&profile->attach.xmatch);
+ profile->attach.xmatch->start[AA_CLASS_XMATCH] = DFA_START;
+ if (!profile->attach.xmatch->perms) {
+ error = aa_compat_map_xmatch(profile->attach.xmatch);
if (error) {
info = "failed to convert xmatch permission table";
goto fail;
@@ -873,7 +881,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
}
/* disconnected attachment string is optional */
- (void) aa_unpack_str(e, &profile->disconnected, "disconnected");
+ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
+ profile->disconnected = disconnected;
/* per profile debug flags (complain, audit) */
if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
@@ -980,16 +989,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
if (error)
goto fail;
/* Fixup: drop when we get rid of start array */
- if (aa_dfa_next(rules->policy.dfa, rules->policy.start[0],
+ if (aa_dfa_next(rules->policy->dfa, rules->policy->start[0],
AA_CLASS_FILE))
- rules->policy.start[AA_CLASS_FILE] =
- aa_dfa_next(rules->policy.dfa,
- rules->policy.start[0],
+ rules->policy->start[AA_CLASS_FILE] =
+ aa_dfa_next(rules->policy->dfa,
+ rules->policy->start[0],
AA_CLASS_FILE);
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
- if (!rules->policy.perms) {
- error = aa_compat_map_policy(&rules->policy,
+ if (!rules->policy->perms) {
+ error = aa_compat_map_policy(rules->policy,
e->version);
if (error) {
info = "failed to remap policydb permission table";
@@ -997,44 +1006,25 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
}
}
} else {
- rules->policy.dfa = aa_get_dfa(nulldfa);
- rules->policy.perms = kcalloc(2, sizeof(struct aa_perms),
- GFP_KERNEL);
- if (!rules->policy.perms)
- goto fail;
- rules->policy.size = 2;
+ rules->policy = aa_get_pdb(nullpdb);
}
/* get file rules */
error = unpack_pdb(e, &rules->file, false, true, &info);
if (error) {
goto fail;
- } else if (rules->file.dfa) {
- if (!rules->file.perms) {
- error = aa_compat_map_file(&rules->file);
+ } else if (rules->file->dfa) {
+ if (!rules->file->perms) {
+ error = aa_compat_map_file(rules->file);
if (error) {
info = "failed to remap file permission table";
goto fail;
}
}
- } else if (rules->policy.dfa &&
- rules->policy.start[AA_CLASS_FILE]) {
- rules->file.dfa = aa_get_dfa(rules->policy.dfa);
- rules->file.start[AA_CLASS_FILE] = rules->policy.start[AA_CLASS_FILE];
- rules->file.perms = kcalloc(rules->policy.size,
- sizeof(struct aa_perms),
- GFP_KERNEL);
- if (!rules->file.perms)
- goto fail;
- memcpy(rules->file.perms, rules->policy.perms,
- rules->policy.size * sizeof(struct aa_perms));
- rules->file.size = rules->policy.size;
+ } else if (rules->policy->dfa &&
+ rules->policy->start[AA_CLASS_FILE]) {
+ rules->file = aa_get_pdb(rules->policy);
} else {
- rules->file.dfa = aa_get_dfa(nulldfa);
- rules->file.perms = kcalloc(2, sizeof(struct aa_perms),
- GFP_KERNEL);
- if (!rules->file.perms)
- goto fail;
- rules->file.size = 2;
+ rules->file = aa_get_pdb(nullpdb);
}
error = -EPROTO;
if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
@@ -1170,7 +1160,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
/**
* verify_dfa_accept_index - verify accept indexes are in range of perms table
* @dfa: the dfa to check accept indexes are in range
- * table_size: the permission table size the indexes should be within
+ * @table_size: the permission table size the indexes should be within
*/
static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size)
{
@@ -1241,26 +1231,32 @@ static int verify_profile(struct aa_profile *profile)
if (!rules)
return 0;
- if ((rules->file.dfa && !verify_dfa_accept_index(rules->file.dfa,
- rules->file.size)) ||
- (rules->policy.dfa &&
- !verify_dfa_accept_index(rules->policy.dfa, rules->policy.size))) {
+ if (rules->file->dfa && !verify_dfa_accept_index(rules->file->dfa,
+ rules->file->size)) {
+ audit_iface(profile, NULL, NULL,
+ "Unpack: file Invalid named transition", NULL,
+ -EPROTO);
+ return -EPROTO;
+ }
+ if (rules->policy->dfa &&
+ !verify_dfa_accept_index(rules->policy->dfa, rules->policy->size)) {
audit_iface(profile, NULL, NULL,
- "Unpack: Invalid named transition", NULL, -EPROTO);
+ "Unpack: policy Invalid named transition", NULL,
+ -EPROTO);
return -EPROTO;
}
- if (!verify_perms(&rules->file)) {
+ if (!verify_perms(rules->file)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
}
- if (!verify_perms(&rules->policy)) {
+ if (!verify_perms(rules->policy)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
}
- if (!verify_perms(&profile->attach.xmatch)) {
+ if (!verify_perms(profile->attach.xmatch)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index e85948164896..dcc94c3153d5 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -30,18 +30,20 @@ struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
audit_log_format(ab, " rlimit=%s value=%lu",
- rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
- if (aad(sa)->peer) {
+ rlim_names[ad->rlim.rlim], ad->rlim.max);
+ if (ad->peer) {
audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAGS_NONE, GFP_ATOMIC);
}
}
/**
* audit_resource - audit setting resource limit
+ * @subj_cred: cred setting the resource
* @profile: profile being enforced (NOT NULL)
* @resource: rlimit being auditing
* @value: value being set
@@ -49,22 +51,24 @@ static void audit_cb(struct audit_buffer *ab, void *va)
* @info: info being auditing
* @error: error value
*
- * Returns: 0 or sa->error else other error code on failure
+ * Returns: 0 or ad->error else other error code on failure
*/
-static int audit_resource(struct aa_profile *profile, unsigned int resource,
+static int audit_resource(const struct cred *subj_cred,
+ struct aa_profile *profile, unsigned int resource,
unsigned long value, struct aa_label *peer,
const char *info, int error)
{
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
OP_SETRLIMIT);
- aad(&sa)->rlim.rlim = resource;
- aad(&sa)->rlim.max = value;
- aad(&sa)->peer = peer;
- aad(&sa)->info = info;
- aad(&sa)->error = error;
+ ad.subj_cred = subj_cred;
+ ad.rlim.rlim = resource;
+ ad.rlim.max = value;
+ ad.peer = peer;
+ ad.info = info;
+ ad.error = error;
- return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
+ return aa_audit(AUDIT_APPARMOR_AUTO, profile, &ad, audit_cb);
}
/**
@@ -81,7 +85,8 @@ int aa_map_resource(int resource)
return rlim_map[resource];
}
-static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
+static int profile_setrlimit(const struct cred *subj_cred,
+ struct aa_profile *profile, unsigned int resource,
struct rlimit *new_rlim)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
@@ -91,22 +96,24 @@ static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
rules->rlimits.limits[resource].rlim_max)
e = -EACCES;
- return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
- e);
+ return audit_resource(subj_cred, profile, resource, new_rlim->rlim_max,
+ NULL, NULL, e);
}
/**
* aa_task_setrlimit - test permission to set an rlimit
- * @label - label confining the task (NOT NULL)
- * @task - task the resource is being set on
- * @resource - the resource being set
- * @new_rlim - the new resource limit (NOT NULL)
+ * @subj_cred: cred setting the limit
+ * @label: label confining the task (NOT NULL)
+ * @task: task the resource is being set on
+ * @resource: the resource being set
+ * @new_rlim: the new resource limit (NOT NULL)
*
* Control raising the processes hard limit.
*
* Returns: 0 or error code if setting resource failed
*/
-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
+ struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
struct aa_profile *profile;
@@ -125,14 +132,15 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
*/
if (label != peer &&
- aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
+ aa_capable(subj_cred, label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
error = fn_for_each(label, profile,
- audit_resource(profile, resource,
+ audit_resource(subj_cred, profile, resource,
new_rlim->rlim_max, peer,
"cap_sys_resource", -EACCES));
else
error = fn_for_each_confined(label, profile,
- profile_setrlimit(profile, resource, new_rlim));
+ profile_setrlimit(subj_cred, profile, resource,
+ new_rlim));
aa_put_label(peer);
return error;
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index 84d16a29bfcb..f29a2e80e6bf 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -93,9 +93,8 @@ int aa_replace_current_label(struct aa_label *label)
* aa_set_current_onexec - set the tasks change_profile to happen onexec
* @label: system label to set at exec (MAYBE NULL to clear value)
* @stack: whether stacking should be done
- * Returns: 0 or error on failure
*/
-int aa_set_current_onexec(struct aa_label *label, bool stack)
+void aa_set_current_onexec(struct aa_label *label, bool stack)
{
struct aa_task_ctx *ctx = task_ctx(current);
@@ -103,8 +102,6 @@ int aa_set_current_onexec(struct aa_label *label, bool stack)
aa_put_label(ctx->onexec);
ctx->onexec = label;
ctx->token = stack;
-
- return 0;
}
/**
@@ -208,70 +205,75 @@ static const char *audit_ptrace_mask(u32 mask)
static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
+ struct apparmor_audit_data *ad = aad(sa);
- if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
+ if (ad->request & AA_PTRACE_PERM_MASK) {
audit_log_format(ab, " requested_mask=\"%s\"",
- audit_ptrace_mask(aad(sa)->request));
+ audit_ptrace_mask(ad->request));
- if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
+ if (ad->denied & AA_PTRACE_PERM_MASK) {
audit_log_format(ab, " denied_mask=\"%s\"",
- audit_ptrace_mask(aad(sa)->denied));
+ audit_ptrace_mask(ad->denied));
}
}
audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
FLAGS_NONE, GFP_ATOMIC);
}
/* assumes check for RULE_MEDIATES is already done */
/* TODO: conditionals */
-static int profile_ptrace_perm(struct aa_profile *profile,
- struct aa_label *peer, u32 request,
- struct common_audit_data *sa)
+static int profile_ptrace_perm(const struct cred *cred,
+ struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+ struct apparmor_audit_data *ad)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms = { };
- aad(sa)->peer = peer;
+ ad->subj_cred = cred;
+ ad->peer = peer;
aa_profile_match_label(profile, rules, peer, AA_CLASS_PTRACE, request,
&perms);
aa_apply_modes_to_perms(profile, &perms);
- return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
+ return aa_check_perms(profile, &perms, request, ad, audit_ptrace_cb);
}
-static int profile_tracee_perm(struct aa_profile *tracee,
+static int profile_tracee_perm(const struct cred *cred,
+ struct aa_profile *tracee,
struct aa_label *tracer, u32 request,
- struct common_audit_data *sa)
+ struct apparmor_audit_data *ad)
{
if (profile_unconfined(tracee) || unconfined(tracer) ||
!ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
return 0;
- return profile_ptrace_perm(tracee, tracer, request, sa);
+ return profile_ptrace_perm(cred, tracee, tracer, request, ad);
}
-static int profile_tracer_perm(struct aa_profile *tracer,
+static int profile_tracer_perm(const struct cred *cred,
+ struct aa_profile *tracer,
struct aa_label *tracee, u32 request,
- struct common_audit_data *sa)
+ struct apparmor_audit_data *ad)
{
if (profile_unconfined(tracer))
return 0;
if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
- return profile_ptrace_perm(tracer, tracee, request, sa);
+ return profile_ptrace_perm(cred, tracer, tracee, request, ad);
/* profile uses the old style capability check for ptrace */
if (&tracer->label == tracee)
return 0;
- aad(sa)->label = &tracer->label;
- aad(sa)->peer = tracee;
- aad(sa)->request = 0;
- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
- CAP_OPT_NONE);
+ ad->subj_label = &tracer->label;
+ ad->peer = tracee;
+ ad->request = 0;
+ ad->error = aa_capable(cred, &tracer->label, CAP_SYS_PTRACE,
+ CAP_OPT_NONE);
- return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
+ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, ad, audit_ptrace_cb);
}
/**
@@ -282,7 +284,8 @@ static int profile_tracer_perm(struct aa_profile *tracer,
*
* Returns: %0 else error code if permission denied or error
*/
-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
+ const struct cred *tracee_cred, struct aa_label *tracee,
u32 request)
{
struct aa_profile *profile;
@@ -290,6 +293,49 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_PTRACE, OP_PTRACE);
return xcheck_labels(tracer, tracee, profile,
- profile_tracer_perm(profile, tracee, request, &sa),
- profile_tracee_perm(profile, tracer, xrequest, &sa));
+ profile_tracer_perm(tracer_cred, profile, tracee,
+ request, &sa),
+ profile_tracee_perm(tracee_cred, profile, tracer,
+ xrequest, &sa));
+}
+
+/* call back to audit ptrace fields */
+static void audit_ns_cb(struct audit_buffer *ab, void *va)
+{
+ struct apparmor_audit_data *ad = aad_of_va(va);
+
+ if (ad->request & AA_USERNS_CREATE)
+ audit_log_format(ab, " requested=\"userns_create\"");
+
+ if (ad->denied & AA_USERNS_CREATE)
+ audit_log_format(ab, " denied=\"userns_create\"");
+}
+
+int aa_profile_ns_perm(struct aa_profile *profile,
+ struct apparmor_audit_data *ad,
+ u32 request)
+{
+ struct aa_perms perms = { };
+ int error = 0;
+
+ ad->subj_label = &profile->label;
+ ad->request = request;
+
+ if (!profile_unconfined(profile)) {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules),
+ list);
+ aa_state_t state;
+
+ state = RULE_MEDIATES(rules, ad->class);
+ if (!state)
+ /* TODO: add flag to complain about unmediated */
+ return 0;
+ perms = *aa_lookup_perms(rules->policy, state);
+ aa_apply_modes_to_perms(profile, &perms);
+ error = aa_check_perms(profile, &perms, request, ad,
+ audit_ns_cb);
+ }
+
+ return error;
}
diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig
index c1e862a38410..c4bf0d5eff39 100644
--- a/security/landlock/Kconfig
+++ b/security/landlock/Kconfig
@@ -3,6 +3,7 @@
config SECURITY_LANDLOCK
bool "Landlock support"
depends on SECURITY
+ select SECURITY_NETWORK
select SECURITY_PATH
help
Landlock is a sandboxing mechanism that enables processes to restrict
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
index 7bbd2f413b3e..c2e116f2a299 100644
--- a/security/landlock/Makefile
+++ b/security/landlock/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
landlock-y := setup.o syscalls.o object.o ruleset.o \
cred.o ptrace.o fs.o
+
+landlock-$(CONFIG_INET) += net.o
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 1c0c198f6fdb..bc7c126deea2 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -151,16 +151,6 @@ retry:
/* clang-format on */
/*
- * All access rights that are denied by default whether they are handled or not
- * by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
- * entries when we need to get the absolute handled access masks.
- */
-/* clang-format off */
-#define ACCESS_INITIALLY_DENIED ( \
- LANDLOCK_ACCESS_FS_REFER)
-/* clang-format on */
-
-/*
* @path: Should have been checked by get_path_from_fd().
*/
int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
@@ -168,7 +158,9 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
access_mask_t access_rights)
{
int err;
- struct landlock_object *object;
+ struct landlock_id id = {
+ .type = LANDLOCK_KEY_INODE,
+ };
/* Files only get access rights that make sense. */
if (!d_is_dir(path->dentry) &&
@@ -178,20 +170,19 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
return -EINVAL;
/* Transforms relative access rights to absolute ones. */
- access_rights |=
- LANDLOCK_MASK_ACCESS_FS &
- ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
- object = get_inode_object(d_backing_inode(path->dentry));
- if (IS_ERR(object))
- return PTR_ERR(object);
+ access_rights |= LANDLOCK_MASK_ACCESS_FS &
+ ~landlock_get_fs_access_mask(ruleset, 0);
+ id.key.object = get_inode_object(d_backing_inode(path->dentry));
+ if (IS_ERR(id.key.object))
+ return PTR_ERR(id.key.object);
mutex_lock(&ruleset->lock);
- err = landlock_insert_rule(ruleset, object, access_rights);
+ err = landlock_insert_rule(ruleset, id, access_rights);
mutex_unlock(&ruleset->lock);
/*
* No need to check for an error because landlock_insert_rule()
* increments the refcount for the new object if needed.
*/
- landlock_put_object(object);
+ landlock_put_object(id.key.object);
return err;
}
@@ -208,6 +199,9 @@ find_rule(const struct landlock_ruleset *const domain,
{
const struct landlock_rule *rule;
const struct inode *inode;
+ struct landlock_id id = {
+ .type = LANDLOCK_KEY_INODE,
+ };
/* Ignores nonexistent leafs. */
if (d_is_negative(dentry))
@@ -215,67 +209,13 @@ find_rule(const struct landlock_ruleset *const domain,
inode = d_backing_inode(dentry);
rcu_read_lock();
- rule = landlock_find_rule(
- domain, rcu_dereference(landlock_inode(inode)->object));
+ id.key.object = rcu_dereference(landlock_inode(inode)->object);
+ rule = landlock_find_rule(domain, id);
rcu_read_unlock();
return rule;
}
/*
- * @layer_masks is read and may be updated according to the access request and
- * the matching rule.
- *
- * Returns true if the request is allowed (i.e. relevant layer masks for the
- * request are empty).
- */
-static inline bool
-unmask_layers(const struct landlock_rule *const rule,
- const access_mask_t access_request,
- layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
-{
- size_t layer_level;
-
- if (!access_request || !layer_masks)
- return true;
- if (!rule)
- return false;
-
- /*
- * An access is granted if, for each policy layer, at least one rule
- * encountered on the pathwalk grants the requested access,
- * regardless of its position in the layer stack. We must then check
- * the remaining layers for each inode, from the first added layer to
- * the last one. When there is multiple requested accesses, for each
- * policy layer, the full set of requested accesses may not be granted
- * by only one rule, but by the union (binary OR) of multiple rules.
- * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
- */
- for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
- const struct landlock_layer *const layer =
- &rule->layers[layer_level];
- const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
- const unsigned long access_req = access_request;
- unsigned long access_bit;
- bool is_empty;
-
- /*
- * Records in @layer_masks which layer grants access to each
- * requested access.
- */
- is_empty = true;
- for_each_set_bit(access_bit, &access_req,
- ARRAY_SIZE(*layer_masks)) {
- if (layer->access & BIT_ULL(access_bit))
- (*layer_masks)[access_bit] &= ~layer_bit;
- is_empty = is_empty && !(*layer_masks)[access_bit];
- }
- if (is_empty)
- return true;
- }
- return false;
-}
-
-/*
* Allows access to pseudo filesystems that will never be mountable (e.g.
* sockfs, pipefs), but can still be reachable through
* /proc/<pid>/fd/<file-descriptor>
@@ -287,64 +227,35 @@ static inline bool is_nouser_or_private(const struct dentry *dentry)
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
}
-static inline access_mask_t
-get_handled_accesses(const struct landlock_ruleset *const domain)
+static access_mask_t
+get_raw_handled_fs_accesses(const struct landlock_ruleset *const domain)
{
- access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
+ access_mask_t access_dom = 0;
size_t layer_level;
for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
- access_dom |= domain->fs_access_masks[layer_level];
- return access_dom & LANDLOCK_MASK_ACCESS_FS;
+ access_dom |=
+ landlock_get_raw_fs_access_mask(domain, layer_level);
+ return access_dom;
}
-/**
- * init_layer_masks - Initialize layer masks from an access request
- *
- * Populates @layer_masks such that for each access right in @access_request,
- * the bits for all the layers are set where this access right is handled.
- *
- * @domain: The domain that defines the current restrictions.
- * @access_request: The requested access rights to check.
- * @layer_masks: The layer masks to populate.
- *
- * Returns: An access mask where each access right bit is set which is handled
- * in any of the active layers in @domain.
- */
-static inline access_mask_t
-init_layer_masks(const struct landlock_ruleset *const domain,
- const access_mask_t access_request,
- layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+static access_mask_t
+get_handled_fs_accesses(const struct landlock_ruleset *const domain)
{
- access_mask_t handled_accesses = 0;
- size_t layer_level;
+ /* Handles all initially denied by default access rights. */
+ return get_raw_handled_fs_accesses(domain) |
+ LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+}
- memset(layer_masks, 0, sizeof(*layer_masks));
- /* An empty access request can happen because of O_WRONLY | O_RDWR. */
- if (!access_request)
- return 0;
+static const struct landlock_ruleset *get_current_fs_domain(void)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
- /* Saves all handled accesses per layer. */
- for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
- const unsigned long access_req = access_request;
- unsigned long access_bit;
+ if (!dom || !get_raw_handled_fs_accesses(dom))
+ return NULL;
- for_each_set_bit(access_bit, &access_req,
- ARRAY_SIZE(*layer_masks)) {
- /*
- * Artificially handles all initially denied by default
- * access rights.
- */
- if (BIT_ULL(access_bit) &
- (domain->fs_access_masks[layer_level] |
- ACCESS_INITIALLY_DENIED)) {
- (*layer_masks)[access_bit] |=
- BIT_ULL(layer_level);
- handled_accesses |= BIT_ULL(access_bit);
- }
- }
- }
- return handled_accesses;
+ return dom;
}
/*
@@ -519,7 +430,7 @@ static bool is_access_to_paths_allowed(
* a superset of the meaningful requested accesses).
*/
access_masked_parent1 = access_masked_parent2 =
- get_handled_accesses(domain);
+ get_handled_fs_accesses(domain);
is_dom_check = true;
} else {
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
@@ -531,18 +442,22 @@ static bool is_access_to_paths_allowed(
}
if (unlikely(dentry_child1)) {
- unmask_layers(find_rule(domain, dentry_child1),
- init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
- &_layer_masks_child1),
- &_layer_masks_child1);
+ landlock_unmask_layers(
+ find_rule(domain, dentry_child1),
+ landlock_init_layer_masks(
+ domain, LANDLOCK_MASK_ACCESS_FS,
+ &_layer_masks_child1, LANDLOCK_KEY_INODE),
+ &_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
layer_masks_child1 = &_layer_masks_child1;
child1_is_directory = d_is_dir(dentry_child1);
}
if (unlikely(dentry_child2)) {
- unmask_layers(find_rule(domain, dentry_child2),
- init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
- &_layer_masks_child2),
- &_layer_masks_child2);
+ landlock_unmask_layers(
+ find_rule(domain, dentry_child2),
+ landlock_init_layer_masks(
+ domain, LANDLOCK_MASK_ACCESS_FS,
+ &_layer_masks_child2, LANDLOCK_KEY_INODE),
+ &_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
layer_masks_child2 = &_layer_masks_child2;
child2_is_directory = d_is_dir(dentry_child2);
}
@@ -594,15 +509,16 @@ static bool is_access_to_paths_allowed(
}
rule = find_rule(domain, walker_path.dentry);
- allowed_parent1 = unmask_layers(rule, access_masked_parent1,
- layer_masks_parent1);
- allowed_parent2 = unmask_layers(rule, access_masked_parent2,
- layer_masks_parent2);
+ allowed_parent1 = landlock_unmask_layers(
+ rule, access_masked_parent1, layer_masks_parent1,
+ ARRAY_SIZE(*layer_masks_parent1));
+ allowed_parent2 = landlock_unmask_layers(
+ rule, access_masked_parent2, layer_masks_parent2,
+ ARRAY_SIZE(*layer_masks_parent2));
/* Stops when a rule from each layer grants access. */
if (allowed_parent1 && allowed_parent2)
break;
-
jump_up:
if (walker_path.dentry == walker_path.mnt->mnt_root) {
if (follow_up(&walker_path)) {
@@ -641,7 +557,8 @@ static inline int check_access_path(const struct landlock_ruleset *const domain,
{
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
- access_request = init_layer_masks(domain, access_request, &layer_masks);
+ access_request = landlock_init_layer_masks(
+ domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
if (is_access_to_paths_allowed(domain, path, access_request,
&layer_masks, NULL, 0, NULL, NULL))
return 0;
@@ -651,8 +568,7 @@ static inline int check_access_path(const struct landlock_ruleset *const domain,
static inline int current_check_access_path(const struct path *const path,
const access_mask_t access_request)
{
- const struct landlock_ruleset *const dom =
- landlock_get_current_domain();
+ const struct landlock_ruleset *const dom = get_current_fs_domain();
if (!dom)
return 0;
@@ -727,16 +643,18 @@ static bool collect_domain_accesses(
if (is_nouser_or_private(dir))
return true;
- access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
- layer_masks_dom);
+ access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
+ layer_masks_dom,
+ LANDLOCK_KEY_INODE);
dget(dir);
while (true) {
struct dentry *parent_dentry;
/* Gets all layers allowing all domain accesses. */
- if (unmask_layers(find_rule(domain, dir), access_dom,
- layer_masks_dom)) {
+ if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
+ layer_masks_dom,
+ ARRAY_SIZE(*layer_masks_dom))) {
/*
* Stops when all handled accesses are allowed by at
* least one rule in each layer.
@@ -815,8 +733,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
struct dentry *const new_dentry,
const bool removable, const bool exchange)
{
- const struct landlock_ruleset *const dom =
- landlock_get_current_domain();
+ const struct landlock_ruleset *const dom = get_current_fs_domain();
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
@@ -850,9 +767,9 @@ static int current_check_refer_path(struct dentry *const old_dentry,
* The LANDLOCK_ACCESS_FS_REFER access right is not required
* for same-directory referer (i.e. no reparenting).
*/
- access_request_parent1 = init_layer_masks(
+ access_request_parent1 = landlock_init_layer_masks(
dom, access_request_parent1 | access_request_parent2,
- &layer_masks_parent1);
+ &layer_masks_parent1, LANDLOCK_KEY_INODE);
if (is_access_to_paths_allowed(
dom, new_dir, access_request_parent1,
&layer_masks_parent1, NULL, 0, NULL, NULL))
@@ -1050,7 +967,7 @@ static int hook_sb_mount(const char *const dev_name,
const struct path *const path, const char *const type,
const unsigned long flags, void *const data)
{
- if (!landlock_get_current_domain())
+ if (!get_current_fs_domain())
return 0;
return -EPERM;
}
@@ -1058,7 +975,7 @@ static int hook_sb_mount(const char *const dev_name,
static int hook_move_mount(const struct path *const from_path,
const struct path *const to_path)
{
- if (!landlock_get_current_domain())
+ if (!get_current_fs_domain())
return 0;
return -EPERM;
}
@@ -1069,14 +986,14 @@ static int hook_move_mount(const struct path *const from_path,
*/
static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
{
- if (!landlock_get_current_domain())
+ if (!get_current_fs_domain())
return 0;
return -EPERM;
}
static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
{
- if (!landlock_get_current_domain())
+ if (!get_current_fs_domain())
return 0;
return -EPERM;
}
@@ -1092,7 +1009,7 @@ static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
static int hook_sb_pivotroot(const struct path *const old_path,
const struct path *const new_path)
{
- if (!landlock_get_current_domain())
+ if (!get_current_fs_domain())
return 0;
return -EPERM;
}
@@ -1128,8 +1045,7 @@ static int hook_path_mknod(const struct path *const dir,
struct dentry *const dentry, const umode_t mode,
const unsigned int dev)
{
- const struct landlock_ruleset *const dom =
- landlock_get_current_domain();
+ const struct landlock_ruleset *const dom = get_current_fs_domain();
if (!dom)
return 0;
@@ -1208,8 +1124,7 @@ static int hook_file_open(struct file *const file)
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
access_mask_t open_access_request, full_access_request, allowed_access;
const access_mask_t optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
- const struct landlock_ruleset *const dom =
- landlock_get_current_domain();
+ const struct landlock_ruleset *const dom = get_current_fs_domain();
if (!dom)
return 0;
@@ -1229,7 +1144,8 @@ static int hook_file_open(struct file *const file)
if (is_access_to_paths_allowed(
dom, &file->f_path,
- init_layer_masks(dom, full_access_request, &layer_masks),
+ landlock_init_layer_masks(dom, full_access_request,
+ &layer_masks, LANDLOCK_KEY_INODE),
&layer_masks, NULL, 0, NULL, NULL)) {
allowed_access = full_access_request;
} else {
diff --git a/security/landlock/limits.h b/security/landlock/limits.h
index 82288f0e9e5e..93c9c6f91556 100644
--- a/security/landlock/limits.h
+++ b/security/landlock/limits.h
@@ -21,6 +21,12 @@
#define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_TRUNCATE
#define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1)
#define LANDLOCK_NUM_ACCESS_FS __const_hweight64(LANDLOCK_MASK_ACCESS_FS)
+#define LANDLOCK_SHIFT_ACCESS_FS 0
+
+#define LANDLOCK_LAST_ACCESS_NET LANDLOCK_ACCESS_NET_CONNECT_TCP
+#define LANDLOCK_MASK_ACCESS_NET ((LANDLOCK_LAST_ACCESS_NET << 1) - 1)
+#define LANDLOCK_NUM_ACCESS_NET __const_hweight64(LANDLOCK_MASK_ACCESS_NET)
+#define LANDLOCK_SHIFT_ACCESS_NET LANDLOCK_NUM_ACCESS_FS
/* clang-format on */
diff --git a/security/landlock/net.c b/security/landlock/net.c
new file mode 100644
index 000000000000..aaa92c2b1f08
--- /dev/null
+++ b/security/landlock/net.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Network management and hooks
+ *
+ * Copyright © 2022-2023 Huawei Tech. Co., Ltd.
+ * Copyright © 2022-2023 Microsoft Corporation
+ */
+
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <net/ipv6.h>
+
+#include "common.h"
+#include "cred.h"
+#include "limits.h"
+#include "net.h"
+#include "ruleset.h"
+
+int landlock_append_net_rule(struct landlock_ruleset *const ruleset,
+ const u16 port, access_mask_t access_rights)
+{
+ int err;
+ const struct landlock_id id = {
+ .key.data = (__force uintptr_t)htons(port),
+ .type = LANDLOCK_KEY_NET_PORT,
+ };
+
+ BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data));
+
+ /* Transforms relative access rights to absolute ones. */
+ access_rights |= LANDLOCK_MASK_ACCESS_NET &
+ ~landlock_get_net_access_mask(ruleset, 0);
+
+ mutex_lock(&ruleset->lock);
+ err = landlock_insert_rule(ruleset, id, access_rights);
+ mutex_unlock(&ruleset->lock);
+
+ return err;
+}
+
+static access_mask_t
+get_raw_handled_net_accesses(const struct landlock_ruleset *const domain)
+{
+ access_mask_t access_dom = 0;
+ size_t layer_level;
+
+ for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
+ access_dom |= landlock_get_net_access_mask(domain, layer_level);
+ return access_dom;
+}
+
+static const struct landlock_ruleset *get_current_net_domain(void)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom || !get_raw_handled_net_accesses(dom))
+ return NULL;
+
+ return dom;
+}
+
+static int current_check_access_socket(struct socket *const sock,
+ struct sockaddr *const address,
+ const int addrlen,
+ const access_mask_t access_request)
+{
+ __be16 port;
+ layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_NET] = {};
+ const struct landlock_rule *rule;
+ access_mask_t handled_access;
+ struct landlock_id id = {
+ .type = LANDLOCK_KEY_NET_PORT,
+ };
+ const struct landlock_ruleset *const dom = get_current_net_domain();
+
+ if (!dom)
+ return 0;
+ if (WARN_ON_ONCE(dom->num_layers < 1))
+ return -EACCES;
+
+ /* Checks if it's a (potential) TCP socket. */
+ if (sock->type != SOCK_STREAM)
+ return 0;
+
+ /* Checks for minimal header length to safely read sa_family. */
+ if (addrlen < offsetofend(typeof(*address), sa_family))
+ return -EINVAL;
+
+ switch (address->sa_family) {
+ case AF_UNSPEC:
+ case AF_INET:
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+ port = ((struct sockaddr_in *)address)->sin_port;
+ break;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ port = ((struct sockaddr_in6 *)address)->sin6_port;
+ break;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+ default:
+ return 0;
+ }
+
+ /* Specific AF_UNSPEC handling. */
+ if (address->sa_family == AF_UNSPEC) {
+ /*
+ * Connecting to an address with AF_UNSPEC dissolves the TCP
+ * association, which have the same effect as closing the
+ * connection while retaining the socket object (i.e., the file
+ * descriptor). As for dropping privileges, closing
+ * connections is always allowed.
+ *
+ * For a TCP access control system, this request is legitimate.
+ * Let the network stack handle potential inconsistencies and
+ * return -EINVAL if needed.
+ */
+ if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP)
+ return 0;
+
+ /*
+ * For compatibility reason, accept AF_UNSPEC for bind
+ * accesses (mapped to AF_INET) only if the address is
+ * INADDR_ANY (cf. __inet_bind). Checking the address is
+ * required to not wrongfully return -EACCES instead of
+ * -EAFNOSUPPORT.
+ *
+ * We could return 0 and let the network stack handle these
+ * checks, but it is safer to return a proper error and test
+ * consistency thanks to kselftest.
+ */
+ if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) {
+ /* addrlen has already been checked for AF_UNSPEC. */
+ const struct sockaddr_in *const sockaddr =
+ (struct sockaddr_in *)address;
+
+ if (sock->sk->__sk_common.skc_family != AF_INET)
+ return -EINVAL;
+
+ if (sockaddr->sin_addr.s_addr != htonl(INADDR_ANY))
+ return -EAFNOSUPPORT;
+ }
+ } else {
+ /*
+ * Checks sa_family consistency to not wrongfully return
+ * -EACCES instead of -EINVAL. Valid sa_family changes are
+ * only (from AF_INET or AF_INET6) to AF_UNSPEC.
+ *
+ * We could return 0 and let the network stack handle this
+ * check, but it is safer to return a proper error and test
+ * consistency thanks to kselftest.
+ */
+ if (address->sa_family != sock->sk->__sk_common.skc_family)
+ return -EINVAL;
+ }
+
+ id.key.data = (__force uintptr_t)port;
+ BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data));
+
+ rule = landlock_find_rule(dom, id);
+ handled_access = landlock_init_layer_masks(
+ dom, access_request, &layer_masks, LANDLOCK_KEY_NET_PORT);
+ if (landlock_unmask_layers(rule, handled_access, &layer_masks,
+ ARRAY_SIZE(layer_masks)))
+ return 0;
+
+ return -EACCES;
+}
+
+static int hook_socket_bind(struct socket *const sock,
+ struct sockaddr *const address, const int addrlen)
+{
+ return current_check_access_socket(sock, address, addrlen,
+ LANDLOCK_ACCESS_NET_BIND_TCP);
+}
+
+static int hook_socket_connect(struct socket *const sock,
+ struct sockaddr *const address,
+ const int addrlen)
+{
+ return current_check_access_socket(sock, address, addrlen,
+ LANDLOCK_ACCESS_NET_CONNECT_TCP);
+}
+
+static struct security_hook_list landlock_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(socket_bind, hook_socket_bind),
+ LSM_HOOK_INIT(socket_connect, hook_socket_connect),
+};
+
+__init void landlock_add_net_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/net.h b/security/landlock/net.h
new file mode 100644
index 000000000000..09960c237a13
--- /dev/null
+++ b/security/landlock/net.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Network management and hooks
+ *
+ * Copyright © 2022-2023 Huawei Tech. Co., Ltd.
+ */
+
+#ifndef _SECURITY_LANDLOCK_NET_H
+#define _SECURITY_LANDLOCK_NET_H
+
+#include "common.h"
+#include "ruleset.h"
+#include "setup.h"
+
+#if IS_ENABLED(CONFIG_INET)
+__init void landlock_add_net_hooks(void);
+
+int landlock_append_net_rule(struct landlock_ruleset *const ruleset,
+ const u16 port, access_mask_t access_rights);
+#else /* IS_ENABLED(CONFIG_INET) */
+static inline void landlock_add_net_hooks(void)
+{
+}
+
+static inline int
+landlock_append_net_rule(struct landlock_ruleset *const ruleset, const u16 port,
+ access_mask_t access_rights)
+{
+ return -EAFNOSUPPORT;
+}
+#endif /* IS_ENABLED(CONFIG_INET) */
+
+#endif /* _SECURITY_LANDLOCK_NET_H */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index 996484f98bfd..ffedc99f2b68 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -29,33 +29,43 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers)
struct landlock_ruleset *new_ruleset;
new_ruleset =
- kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
+ kzalloc(struct_size(new_ruleset, access_masks, num_layers),
GFP_KERNEL_ACCOUNT);
if (!new_ruleset)
return ERR_PTR(-ENOMEM);
refcount_set(&new_ruleset->usage, 1);
mutex_init(&new_ruleset->lock);
- new_ruleset->root = RB_ROOT;
+ new_ruleset->root_inode = RB_ROOT;
+
+#if IS_ENABLED(CONFIG_INET)
+ new_ruleset->root_net_port = RB_ROOT;
+#endif /* IS_ENABLED(CONFIG_INET) */
+
new_ruleset->num_layers = num_layers;
/*
* hierarchy = NULL
* num_rules = 0
- * fs_access_masks[] = 0
+ * access_masks[] = 0
*/
return new_ruleset;
}
struct landlock_ruleset *
-landlock_create_ruleset(const access_mask_t fs_access_mask)
+landlock_create_ruleset(const access_mask_t fs_access_mask,
+ const access_mask_t net_access_mask)
{
struct landlock_ruleset *new_ruleset;
/* Informs about useless ruleset. */
- if (!fs_access_mask)
+ if (!fs_access_mask && !net_access_mask)
return ERR_PTR(-ENOMSG);
new_ruleset = create_ruleset(1);
- if (!IS_ERR(new_ruleset))
- new_ruleset->fs_access_masks[0] = fs_access_mask;
+ if (IS_ERR(new_ruleset))
+ return new_ruleset;
+ if (fs_access_mask)
+ landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
+ if (net_access_mask)
+ landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
return new_ruleset;
}
@@ -68,8 +78,25 @@ static void build_check_rule(void)
BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
}
+static bool is_object_pointer(const enum landlock_key_type key_type)
+{
+ switch (key_type) {
+ case LANDLOCK_KEY_INODE:
+ return true;
+
+#if IS_ENABLED(CONFIG_INET)
+ case LANDLOCK_KEY_NET_PORT:
+ return false;
+#endif /* IS_ENABLED(CONFIG_INET) */
+
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+}
+
static struct landlock_rule *
-create_rule(struct landlock_object *const object,
+create_rule(const struct landlock_id id,
const struct landlock_layer (*const layers)[], const u32 num_layers,
const struct landlock_layer *const new_layer)
{
@@ -90,8 +117,13 @@ create_rule(struct landlock_object *const object,
if (!new_rule)
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&new_rule->node);
- landlock_get_object(object);
- new_rule->object = object;
+ if (is_object_pointer(id.type)) {
+ /* This should be catched by insert_rule(). */
+ WARN_ON_ONCE(!id.key.object);
+ landlock_get_object(id.key.object);
+ }
+
+ new_rule->key = id.key;
new_rule->num_layers = new_num_layers;
/* Copies the original layer stack. */
memcpy(new_rule->layers, layers,
@@ -102,12 +134,32 @@ create_rule(struct landlock_object *const object,
return new_rule;
}
-static void free_rule(struct landlock_rule *const rule)
+static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
+ const enum landlock_key_type key_type)
+{
+ switch (key_type) {
+ case LANDLOCK_KEY_INODE:
+ return &ruleset->root_inode;
+
+#if IS_ENABLED(CONFIG_INET)
+ case LANDLOCK_KEY_NET_PORT:
+ return &ruleset->root_net_port;
+#endif /* IS_ENABLED(CONFIG_INET) */
+
+ default:
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void free_rule(struct landlock_rule *const rule,
+ const enum landlock_key_type key_type)
{
might_sleep();
if (!rule)
return;
- landlock_put_object(rule->object);
+ if (is_object_pointer(key_type))
+ landlock_put_object(rule->key.object);
kfree(rule);
}
@@ -117,19 +169,21 @@ static void build_check_ruleset(void)
.num_rules = ~0,
.num_layers = ~0,
};
- typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
+ typeof(ruleset.access_masks[0]) access_masks = ~0;
BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
- BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
+ BUILD_BUG_ON(access_masks <
+ ((LANDLOCK_MASK_ACCESS_FS << LANDLOCK_SHIFT_ACCESS_FS) |
+ (LANDLOCK_MASK_ACCESS_NET << LANDLOCK_SHIFT_ACCESS_NET)));
}
/**
* insert_rule - Create and insert a rule in a ruleset
*
* @ruleset: The ruleset to be updated.
- * @object: The object to build the new rule with. The underlying kernel
- * object must be held by the caller.
+ * @id: The ID to build the new rule with. The underlying kernel object, if
+ * any, must be held by the caller.
* @layers: One or multiple layers to be copied into the new rule.
* @num_layers: The number of @layers entries.
*
@@ -143,26 +197,35 @@ static void build_check_ruleset(void)
* access rights.
*/
static int insert_rule(struct landlock_ruleset *const ruleset,
- struct landlock_object *const object,
+ const struct landlock_id id,
const struct landlock_layer (*const layers)[],
- size_t num_layers)
+ const size_t num_layers)
{
struct rb_node **walker_node;
struct rb_node *parent_node = NULL;
struct landlock_rule *new_rule;
+ struct rb_root *root;
might_sleep();
lockdep_assert_held(&ruleset->lock);
- if (WARN_ON_ONCE(!object || !layers))
+ if (WARN_ON_ONCE(!layers))
return -ENOENT;
- walker_node = &(ruleset->root.rb_node);
+
+ if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
+ return -ENOENT;
+
+ root = get_root(ruleset, id.type);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ walker_node = &root->rb_node;
while (*walker_node) {
struct landlock_rule *const this =
rb_entry(*walker_node, struct landlock_rule, node);
- if (this->object != object) {
+ if (this->key.data != id.key.data) {
parent_node = *walker_node;
- if (this->object < object)
+ if (this->key.data < id.key.data)
walker_node = &((*walker_node)->rb_right);
else
walker_node = &((*walker_node)->rb_left);
@@ -194,24 +257,24 @@ static int insert_rule(struct landlock_ruleset *const ruleset,
* Intersects access rights when it is a merge between a
* ruleset and a domain.
*/
- new_rule = create_rule(object, &this->layers, this->num_layers,
+ new_rule = create_rule(id, &this->layers, this->num_layers,
&(*layers)[0]);
if (IS_ERR(new_rule))
return PTR_ERR(new_rule);
- rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
- free_rule(this);
+ rb_replace_node(&this->node, &new_rule->node, root);
+ free_rule(this, id.type);
return 0;
}
- /* There is no match for @object. */
+ /* There is no match for @id. */
build_check_ruleset();
if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
return -E2BIG;
- new_rule = create_rule(object, layers, num_layers, NULL);
+ new_rule = create_rule(id, layers, num_layers, NULL);
if (IS_ERR(new_rule))
return PTR_ERR(new_rule);
rb_link_node(&new_rule->node, parent_node, walker_node);
- rb_insert_color(&new_rule->node, &ruleset->root);
+ rb_insert_color(&new_rule->node, root);
ruleset->num_rules++;
return 0;
}
@@ -229,7 +292,7 @@ static void build_check_layer(void)
/* @ruleset must be locked by the caller. */
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
- struct landlock_object *const object,
+ const struct landlock_id id,
const access_mask_t access)
{
struct landlock_layer layers[] = { {
@@ -239,7 +302,7 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset,
} };
build_check_layer();
- return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
+ return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
}
static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
@@ -258,10 +321,51 @@ static void put_hierarchy(struct landlock_hierarchy *hierarchy)
}
}
+static int merge_tree(struct landlock_ruleset *const dst,
+ struct landlock_ruleset *const src,
+ const enum landlock_key_type key_type)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ struct rb_root *src_root;
+ int err = 0;
+
+ might_sleep();
+ lockdep_assert_held(&dst->lock);
+ lockdep_assert_held(&src->lock);
+
+ src_root = get_root(src, key_type);
+ if (IS_ERR(src_root))
+ return PTR_ERR(src_root);
+
+ /* Merges the @src tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
+ node) {
+ struct landlock_layer layers[] = { {
+ .level = dst->num_layers,
+ } };
+ const struct landlock_id id = {
+ .key = walker_rule->key,
+ .type = key_type,
+ };
+
+ if (WARN_ON_ONCE(walker_rule->num_layers != 1))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
+ return -EINVAL;
+
+ layers[0].access = walker_rule->layers[0].access;
+
+ err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
+ if (err)
+ return err;
+ }
+ return err;
+}
+
static int merge_ruleset(struct landlock_ruleset *const dst,
struct landlock_ruleset *const src)
{
- struct landlock_rule *walker_rule, *next_rule;
int err = 0;
might_sleep();
@@ -281,29 +385,19 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
err = -EINVAL;
goto out_unlock;
}
- dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
+ dst->access_masks[dst->num_layers - 1] = src->access_masks[0];
- /* Merges the @src tree. */
- rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
- node) {
- struct landlock_layer layers[] = { {
- .level = dst->num_layers,
- } };
+ /* Merges the @src inode tree. */
+ err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
+ if (err)
+ goto out_unlock;
- if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
- err = -EINVAL;
- goto out_unlock;
- }
- if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
- err = -EINVAL;
- goto out_unlock;
- }
- layers[0].access = walker_rule->layers[0].access;
- err = insert_rule(dst, walker_rule->object, &layers,
- ARRAY_SIZE(layers));
- if (err)
- goto out_unlock;
- }
+#if IS_ENABLED(CONFIG_INET)
+ /* Merges the @src network port tree. */
+ err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
+ if (err)
+ goto out_unlock;
+#endif /* IS_ENABLED(CONFIG_INET) */
out_unlock:
mutex_unlock(&src->lock);
@@ -311,10 +405,41 @@ out_unlock:
return err;
}
+static int inherit_tree(struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const child,
+ const enum landlock_key_type key_type)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ struct rb_root *parent_root;
+ int err = 0;
+
+ might_sleep();
+ lockdep_assert_held(&parent->lock);
+ lockdep_assert_held(&child->lock);
+
+ parent_root = get_root(parent, key_type);
+ if (IS_ERR(parent_root))
+ return PTR_ERR(parent_root);
+
+ /* Copies the @parent inode or network tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
+ parent_root, node) {
+ const struct landlock_id id = {
+ .key = walker_rule->key,
+ .type = key_type,
+ };
+
+ err = insert_rule(child, id, &walker_rule->layers,
+ walker_rule->num_layers);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
static int inherit_ruleset(struct landlock_ruleset *const parent,
struct landlock_ruleset *const child)
{
- struct landlock_rule *walker_rule, *next_rule;
int err = 0;
might_sleep();
@@ -325,23 +450,25 @@ static int inherit_ruleset(struct landlock_ruleset *const parent,
mutex_lock(&child->lock);
mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
- /* Copies the @parent tree. */
- rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
- &parent->root, node) {
- err = insert_rule(child, walker_rule->object,
- &walker_rule->layers,
- walker_rule->num_layers);
- if (err)
- goto out_unlock;
- }
+ /* Copies the @parent inode tree. */
+ err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
+ if (err)
+ goto out_unlock;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Copies the @parent network port tree. */
+ err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
+ if (err)
+ goto out_unlock;
+#endif /* IS_ENABLED(CONFIG_INET) */
if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
err = -EINVAL;
goto out_unlock;
}
/* Copies the parent layer stack and leaves a space for the new layer. */
- memcpy(child->fs_access_masks, parent->fs_access_masks,
- flex_array_size(parent, fs_access_masks, parent->num_layers));
+ memcpy(child->access_masks, parent->access_masks,
+ flex_array_size(parent, access_masks, parent->num_layers));
if (WARN_ON_ONCE(!parent->hierarchy)) {
err = -EINVAL;
@@ -361,8 +488,16 @@ static void free_ruleset(struct landlock_ruleset *const ruleset)
struct landlock_rule *freeme, *next;
might_sleep();
- rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
- free_rule(freeme);
+ rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
+ node)
+ free_rule(freeme, LANDLOCK_KEY_INODE);
+
+#if IS_ENABLED(CONFIG_INET)
+ rbtree_postorder_for_each_entry_safe(freeme, next,
+ &ruleset->root_net_port, node)
+ free_rule(freeme, LANDLOCK_KEY_NET_PORT);
+#endif /* IS_ENABLED(CONFIG_INET) */
+
put_hierarchy(ruleset->hierarchy);
kfree(ruleset);
}
@@ -453,23 +588,151 @@ out_put_dom:
*/
const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset *const ruleset,
- const struct landlock_object *const object)
+ const struct landlock_id id)
{
+ const struct rb_root *root;
const struct rb_node *node;
- if (!object)
+ root = get_root((struct landlock_ruleset *)ruleset, id.type);
+ if (IS_ERR(root))
return NULL;
- node = ruleset->root.rb_node;
+ node = root->rb_node;
+
while (node) {
struct landlock_rule *this =
rb_entry(node, struct landlock_rule, node);
- if (this->object == object)
+ if (this->key.data == id.key.data)
return this;
- if (this->object < object)
+ if (this->key.data < id.key.data)
node = node->rb_right;
else
node = node->rb_left;
}
return NULL;
}
+
+/*
+ * @layer_masks is read and may be updated according to the access request and
+ * the matching rule.
+ * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
+ *
+ * Returns true if the request is allowed (i.e. relevant layer masks for the
+ * request are empty).
+ */
+bool landlock_unmask_layers(const struct landlock_rule *const rule,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[],
+ const size_t masks_array_size)
+{
+ size_t layer_level;
+
+ if (!access_request || !layer_masks)
+ return true;
+ if (!rule)
+ return false;
+
+ /*
+ * An access is granted if, for each policy layer, at least one rule
+ * encountered on the pathwalk grants the requested access,
+ * regardless of its position in the layer stack. We must then check
+ * the remaining layers for each inode, from the first added layer to
+ * the last one. When there is multiple requested accesses, for each
+ * policy layer, the full set of requested accesses may not be granted
+ * by only one rule, but by the union (binary OR) of multiple rules.
+ * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
+ */
+ for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
+ const struct landlock_layer *const layer =
+ &rule->layers[layer_level];
+ const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
+ const unsigned long access_req = access_request;
+ unsigned long access_bit;
+ bool is_empty;
+
+ /*
+ * Records in @layer_masks which layer grants access to each
+ * requested access.
+ */
+ is_empty = true;
+ for_each_set_bit(access_bit, &access_req, masks_array_size) {
+ if (layer->access & BIT_ULL(access_bit))
+ (*layer_masks)[access_bit] &= ~layer_bit;
+ is_empty = is_empty && !(*layer_masks)[access_bit];
+ }
+ if (is_empty)
+ return true;
+ }
+ return false;
+}
+
+typedef access_mask_t
+get_access_mask_t(const struct landlock_ruleset *const ruleset,
+ const u16 layer_level);
+
+/**
+ * landlock_init_layer_masks - Initialize layer masks from an access request
+ *
+ * Populates @layer_masks such that for each access right in @access_request,
+ * the bits for all the layers are set where this access right is handled.
+ *
+ * @domain: The domain that defines the current restrictions.
+ * @access_request: The requested access rights to check.
+ * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
+ * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
+ * @key_type: The key type to switch between access masks of different types.
+ *
+ * Returns: An access mask where each access right bit is set which is handled
+ * in any of the active layers in @domain.
+ */
+access_mask_t
+landlock_init_layer_masks(const struct landlock_ruleset *const domain,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[],
+ const enum landlock_key_type key_type)
+{
+ access_mask_t handled_accesses = 0;
+ size_t layer_level, num_access;
+ get_access_mask_t *get_access_mask;
+
+ switch (key_type) {
+ case LANDLOCK_KEY_INODE:
+ get_access_mask = landlock_get_fs_access_mask;
+ num_access = LANDLOCK_NUM_ACCESS_FS;
+ break;
+
+#if IS_ENABLED(CONFIG_INET)
+ case LANDLOCK_KEY_NET_PORT:
+ get_access_mask = landlock_get_net_access_mask;
+ num_access = LANDLOCK_NUM_ACCESS_NET;
+ break;
+#endif /* IS_ENABLED(CONFIG_INET) */
+
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ memset(layer_masks, 0,
+ array_size(sizeof((*layer_masks)[0]), num_access));
+
+ /* An empty access request can happen because of O_WRONLY | O_RDWR. */
+ if (!access_request)
+ return 0;
+
+ /* Saves all handled accesses per layer. */
+ for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
+ const unsigned long access_req = access_request;
+ unsigned long access_bit;
+
+ for_each_set_bit(access_bit, &access_req, num_access) {
+ if (BIT_ULL(access_bit) &
+ get_access_mask(domain, layer_level)) {
+ (*layer_masks)[access_bit] |=
+ BIT_ULL(layer_level);
+ handled_accesses |= BIT_ULL(access_bit);
+ }
+ }
+ }
+ return handled_accesses;
+}
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index 55b1df8f66a8..c7f1526784fd 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -15,16 +15,35 @@
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
+#include <uapi/linux/landlock.h>
#include "limits.h"
#include "object.h"
+/*
+ * All access rights that are denied by default whether they are handled or not
+ * by a ruleset/layer. This must be ORed with all ruleset->access_masks[]
+ * entries when we need to get the absolute handled access masks.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \
+ LANDLOCK_ACCESS_FS_REFER)
+/* clang-format on */
+
typedef u16 access_mask_t;
/* Makes sure all filesystem access rights can be stored. */
static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
+/* Makes sure all network access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
+/* Ruleset access masks. */
+typedef u32 access_masks_t;
+/* Makes sure all ruleset access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_masks_t) >=
+ LANDLOCK_NUM_ACCESS_FS + LANDLOCK_NUM_ACCESS_NET);
+
typedef u16 layer_mask_t;
/* Makes sure all layers can be checked. */
static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
@@ -45,6 +64,52 @@ struct landlock_layer {
};
/**
+ * union landlock_key - Key of a ruleset's red-black tree
+ */
+union landlock_key {
+ /**
+ * @object: Pointer to identify a kernel object (e.g. an inode).
+ */
+ struct landlock_object *object;
+ /**
+ * @data: Raw data to identify an arbitrary 32-bit value
+ * (e.g. a TCP port).
+ */
+ uintptr_t data;
+};
+
+/**
+ * enum landlock_key_type - Type of &union landlock_key
+ */
+enum landlock_key_type {
+ /**
+ * @LANDLOCK_KEY_INODE: Type of &landlock_ruleset.root_inode's node
+ * keys.
+ */
+ LANDLOCK_KEY_INODE = 1,
+ /**
+ * @LANDLOCK_KEY_NET_PORT: Type of &landlock_ruleset.root_net_port's
+ * node keys.
+ */
+ LANDLOCK_KEY_NET_PORT,
+};
+
+/**
+ * struct landlock_id - Unique rule identifier for a ruleset
+ */
+struct landlock_id {
+ /**
+ * @key: Identifies either a kernel object (e.g. an inode) or
+ * a raw value (e.g. a TCP port).
+ */
+ union landlock_key key;
+ /**
+ * @type: Type of a landlock_ruleset's root tree.
+ */
+ const enum landlock_key_type type;
+};
+
+/**
* struct landlock_rule - Access rights tied to an object
*/
struct landlock_rule {
@@ -53,12 +118,13 @@ struct landlock_rule {
*/
struct rb_node node;
/**
- * @object: Pointer to identify a kernel object (e.g. an inode). This
- * is used as a key for this ruleset element. This pointer is set once
- * and never modified. It always points to an allocated object because
- * each rule increments the refcount of its object.
+ * @key: A union to identify either a kernel object (e.g. an inode) or
+ * a raw data value (e.g. a network socket port). This is used as a key
+ * for this ruleset element. The pointer is set once and never
+ * modified. It always points to an allocated object because each rule
+ * increments the refcount of its object.
*/
- struct landlock_object *object;
+ union landlock_key key;
/**
* @num_layers: Number of entries in @layers.
*/
@@ -94,11 +160,23 @@ struct landlock_hierarchy {
*/
struct landlock_ruleset {
/**
- * @root: Root of a red-black tree containing &struct landlock_rule
- * nodes. Once a ruleset is tied to a process (i.e. as a domain), this
- * tree is immutable until @usage reaches zero.
+ * @root_inode: Root of a red-black tree containing &struct
+ * landlock_rule nodes with inode object. Once a ruleset is tied to a
+ * process (i.e. as a domain), this tree is immutable until @usage
+ * reaches zero.
*/
- struct rb_root root;
+ struct rb_root root_inode;
+
+#if IS_ENABLED(CONFIG_INET)
+ /**
+ * @root_net_port: Root of a red-black tree containing &struct
+ * landlock_rule nodes with network port. Once a ruleset is tied to a
+ * process (i.e. as a domain), this tree is immutable until @usage
+ * reaches zero.
+ */
+ struct rb_root root_net_port;
+#endif /* IS_ENABLED(CONFIG_INET) */
+
/**
* @hierarchy: Enables hierarchy identification even when a parent
* domain vanishes. This is needed for the ptrace protection.
@@ -110,7 +188,7 @@ struct landlock_ruleset {
* section. This is only used by
* landlock_put_ruleset_deferred() when @usage reaches zero.
* The fields @lock, @usage, @num_rules, @num_layers and
- * @fs_access_masks are then unused.
+ * @access_masks are then unused.
*/
struct work_struct work_free;
struct {
@@ -137,30 +215,31 @@ struct landlock_ruleset {
*/
u32 num_layers;
/**
- * @fs_access_masks: Contains the subset of filesystem
- * actions that are restricted by a ruleset. A domain
- * saves all layers of merged rulesets in a stack
- * (FAM), starting from the first layer to the last
- * one. These layers are used when merging rulesets,
- * for user space backward compatibility (i.e.
- * future-proof), and to properly handle merged
+ * @access_masks: Contains the subset of filesystem and
+ * network actions that are restricted by a ruleset.
+ * A domain saves all layers of merged rulesets in a
+ * stack (FAM), starting from the first layer to the
+ * last one. These layers are used when merging
+ * rulesets, for user space backward compatibility
+ * (i.e. future-proof), and to properly handle merged
* rulesets without overlapping access rights. These
* layers are set once and never changed for the
* lifetime of the ruleset.
*/
- access_mask_t fs_access_masks[];
+ access_masks_t access_masks[];
};
};
};
struct landlock_ruleset *
-landlock_create_ruleset(const access_mask_t fs_access_mask);
+landlock_create_ruleset(const access_mask_t access_mask_fs,
+ const access_mask_t access_mask_net);
void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
- struct landlock_object *const object,
+ const struct landlock_id id,
const access_mask_t access);
struct landlock_ruleset *
@@ -169,7 +248,7 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent,
const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset *const ruleset,
- const struct landlock_object *const object);
+ const struct landlock_id id);
static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset)
{
@@ -177,4 +256,68 @@ static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset)
refcount_inc(&ruleset->usage);
}
+static inline void
+landlock_add_fs_access_mask(struct landlock_ruleset *const ruleset,
+ const access_mask_t fs_access_mask,
+ const u16 layer_level)
+{
+ access_mask_t fs_mask = fs_access_mask & LANDLOCK_MASK_ACCESS_FS;
+
+ /* Should already be checked in sys_landlock_create_ruleset(). */
+ WARN_ON_ONCE(fs_access_mask != fs_mask);
+ ruleset->access_masks[layer_level] |=
+ (fs_mask << LANDLOCK_SHIFT_ACCESS_FS);
+}
+
+static inline void
+landlock_add_net_access_mask(struct landlock_ruleset *const ruleset,
+ const access_mask_t net_access_mask,
+ const u16 layer_level)
+{
+ access_mask_t net_mask = net_access_mask & LANDLOCK_MASK_ACCESS_NET;
+
+ /* Should already be checked in sys_landlock_create_ruleset(). */
+ WARN_ON_ONCE(net_access_mask != net_mask);
+ ruleset->access_masks[layer_level] |=
+ (net_mask << LANDLOCK_SHIFT_ACCESS_NET);
+}
+
+static inline access_mask_t
+landlock_get_raw_fs_access_mask(const struct landlock_ruleset *const ruleset,
+ const u16 layer_level)
+{
+ return (ruleset->access_masks[layer_level] >>
+ LANDLOCK_SHIFT_ACCESS_FS) &
+ LANDLOCK_MASK_ACCESS_FS;
+}
+
+static inline access_mask_t
+landlock_get_fs_access_mask(const struct landlock_ruleset *const ruleset,
+ const u16 layer_level)
+{
+ /* Handles all initially denied by default access rights. */
+ return landlock_get_raw_fs_access_mask(ruleset, layer_level) |
+ LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+}
+
+static inline access_mask_t
+landlock_get_net_access_mask(const struct landlock_ruleset *const ruleset,
+ const u16 layer_level)
+{
+ return (ruleset->access_masks[layer_level] >>
+ LANDLOCK_SHIFT_ACCESS_NET) &
+ LANDLOCK_MASK_ACCESS_NET;
+}
+
+bool landlock_unmask_layers(const struct landlock_rule *const rule,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[],
+ const size_t masks_array_size);
+
+access_mask_t
+landlock_init_layer_masks(const struct landlock_ruleset *const domain,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[],
+ const enum landlock_key_type key_type);
+
#endif /* _SECURITY_LANDLOCK_RULESET_H */
diff --git a/security/landlock/setup.c b/security/landlock/setup.c
index 0f6113528fa4..3e11d303542f 100644
--- a/security/landlock/setup.c
+++ b/security/landlock/setup.c
@@ -12,6 +12,7 @@
#include "common.h"
#include "cred.h"
#include "fs.h"
+#include "net.h"
#include "ptrace.h"
#include "setup.h"
@@ -29,6 +30,7 @@ static int __init landlock_init(void)
landlock_add_cred_hooks();
landlock_add_ptrace_hooks();
landlock_add_fs_hooks();
+ landlock_add_net_hooks();
landlock_initialized = true;
pr_info("Up and running.\n");
return 0;
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 245cc650a4dc..898358f57fa0 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -29,6 +29,7 @@
#include "cred.h"
#include "fs.h"
#include "limits.h"
+#include "net.h"
#include "ruleset.h"
#include "setup.h"
@@ -74,7 +75,8 @@ static void build_check_abi(void)
{
struct landlock_ruleset_attr ruleset_attr;
struct landlock_path_beneath_attr path_beneath_attr;
- size_t ruleset_size, path_beneath_size;
+ struct landlock_net_port_attr net_port_attr;
+ size_t ruleset_size, path_beneath_size, net_port_size;
/*
* For each user space ABI structures, first checks that there is no
@@ -82,13 +84,19 @@ static void build_check_abi(void)
* struct size.
*/
ruleset_size = sizeof(ruleset_attr.handled_access_fs);
+ ruleset_size += sizeof(ruleset_attr.handled_access_net);
BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size);
- BUILD_BUG_ON(sizeof(ruleset_attr) != 8);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != 16);
path_beneath_size = sizeof(path_beneath_attr.allowed_access);
path_beneath_size += sizeof(path_beneath_attr.parent_fd);
BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size);
BUILD_BUG_ON(sizeof(path_beneath_attr) != 12);
+
+ net_port_size = sizeof(net_port_attr.allowed_access);
+ net_port_size += sizeof(net_port_attr.port);
+ BUILD_BUG_ON(sizeof(net_port_attr) != net_port_size);
+ BUILD_BUG_ON(sizeof(net_port_attr) != 16);
}
/* Ruleset handling */
@@ -129,7 +137,7 @@ static const struct file_operations ruleset_fops = {
.write = fop_dummy_write,
};
-#define LANDLOCK_ABI_VERSION 3
+#define LANDLOCK_ABI_VERSION 4
/**
* sys_landlock_create_ruleset - Create a new ruleset
@@ -188,8 +196,14 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
LANDLOCK_MASK_ACCESS_FS)
return -EINVAL;
+ /* Checks network content (and 32-bits cast). */
+ if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) !=
+ LANDLOCK_MASK_ACCESS_NET)
+ return -EINVAL;
+
/* Checks arguments and transforms to kernel struct. */
- ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs);
+ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs,
+ ruleset_attr.handled_access_net);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
@@ -274,13 +288,84 @@ out_fdput:
return err;
}
+static int add_rule_path_beneath(struct landlock_ruleset *const ruleset,
+ const void __user *const rule_attr)
+{
+ struct landlock_path_beneath_attr path_beneath_attr;
+ struct path path;
+ int res, err;
+ access_mask_t mask;
+
+ /* Copies raw user space buffer. */
+ res = copy_from_user(&path_beneath_attr, rule_attr,
+ sizeof(path_beneath_attr));
+ if (res)
+ return -EFAULT;
+
+ /*
+ * Informs about useless rule: empty allowed_access (i.e. deny rules)
+ * are ignored in path walks.
+ */
+ if (!path_beneath_attr.allowed_access)
+ return -ENOMSG;
+
+ /* Checks that allowed_access matches the @ruleset constraints. */
+ mask = landlock_get_raw_fs_access_mask(ruleset, 0);
+ if ((path_beneath_attr.allowed_access | mask) != mask)
+ return -EINVAL;
+
+ /* Gets and checks the new rule. */
+ err = get_path_from_fd(path_beneath_attr.parent_fd, &path);
+ if (err)
+ return err;
+
+ /* Imports the new rule. */
+ err = landlock_append_fs_rule(ruleset, &path,
+ path_beneath_attr.allowed_access);
+ path_put(&path);
+ return err;
+}
+
+static int add_rule_net_port(struct landlock_ruleset *ruleset,
+ const void __user *const rule_attr)
+{
+ struct landlock_net_port_attr net_port_attr;
+ int res;
+ access_mask_t mask;
+
+ /* Copies raw user space buffer. */
+ res = copy_from_user(&net_port_attr, rule_attr, sizeof(net_port_attr));
+ if (res)
+ return -EFAULT;
+
+ /*
+ * Informs about useless rule: empty allowed_access (i.e. deny rules)
+ * are ignored by network actions.
+ */
+ if (!net_port_attr.allowed_access)
+ return -ENOMSG;
+
+ /* Checks that allowed_access matches the @ruleset constraints. */
+ mask = landlock_get_net_access_mask(ruleset, 0);
+ if ((net_port_attr.allowed_access | mask) != mask)
+ return -EINVAL;
+
+ /* Denies inserting a rule with port greater than 65535. */
+ if (net_port_attr.port > U16_MAX)
+ return -EINVAL;
+
+ /* Imports the new rule. */
+ return landlock_append_net_rule(ruleset, net_port_attr.port,
+ net_port_attr.allowed_access);
+}
+
/**
* sys_landlock_add_rule - Add a new rule to a ruleset
*
* @ruleset_fd: File descriptor tied to the ruleset that should be extended
* with the new rule.
- * @rule_type: Identify the structure type pointed to by @rule_attr (only
- * %LANDLOCK_RULE_PATH_BENEATH for now).
+ * @rule_type: Identify the structure type pointed to by @rule_attr:
+ * %LANDLOCK_RULE_PATH_BENEATH or %LANDLOCK_RULE_NET_PORT.
* @rule_attr: Pointer to a rule (only of type &struct
* landlock_path_beneath_attr for now).
* @flags: Must be 0.
@@ -291,9 +376,13 @@ out_fdput:
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - %EAFNOSUPPORT: @rule_type is %LANDLOCK_RULE_NET_PORT but TCP/IP is not
+ * supported by the running kernel;
* - %EINVAL: @flags is not 0, or inconsistent access in the rule (i.e.
- * &landlock_path_beneath_attr.allowed_access is not a subset of the
- * ruleset handled accesses);
+ * &landlock_path_beneath_attr.allowed_access or
+ * &landlock_net_port_attr.allowed_access is not a subset of the
+ * ruleset handled accesses), or &landlock_net_port_attr.port is
+ * greater than 65535;
* - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access);
* - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a
* member of @rule_attr is not a file descriptor as expected;
@@ -306,10 +395,8 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
const enum landlock_rule_type, rule_type,
const void __user *const, rule_attr, const __u32, flags)
{
- struct landlock_path_beneath_attr path_beneath_attr;
- struct path path;
struct landlock_ruleset *ruleset;
- int res, err;
+ int err;
if (!landlock_initialized)
return -EOPNOTSUPP;
@@ -323,48 +410,17 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- if (rule_type != LANDLOCK_RULE_PATH_BENEATH) {
+ switch (rule_type) {
+ case LANDLOCK_RULE_PATH_BENEATH:
+ err = add_rule_path_beneath(ruleset, rule_attr);
+ break;
+ case LANDLOCK_RULE_NET_PORT:
+ err = add_rule_net_port(ruleset, rule_attr);
+ break;
+ default:
err = -EINVAL;
- goto out_put_ruleset;
- }
-
- /* Copies raw user space buffer, only one type for now. */
- res = copy_from_user(&path_beneath_attr, rule_attr,
- sizeof(path_beneath_attr));
- if (res) {
- err = -EFAULT;
- goto out_put_ruleset;
+ break;
}
-
- /*
- * Informs about useless rule: empty allowed_access (i.e. deny rules)
- * are ignored in path walks.
- */
- if (!path_beneath_attr.allowed_access) {
- err = -ENOMSG;
- goto out_put_ruleset;
- }
- /*
- * Checks that allowed_access matches the @ruleset constraints
- * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits).
- */
- if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) !=
- ruleset->fs_access_masks[0]) {
- err = -EINVAL;
- goto out_put_ruleset;
- }
-
- /* Gets and checks the new rule. */
- err = get_path_from_fd(path_beneath_attr.parent_fd, &path);
- if (err)
- goto out_put_ruleset;
-
- /* Imports the new rule. */
- err = landlock_append_fs_rule(ruleset, &path,
- path_beneath_attr.allowed_access);
- path_put(&path);
-
-out_put_ruleset:
landlock_put_ruleset(ruleset);
return err;
}
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index fac42486a8cf..5fb3fb3d97e0 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -20,7 +20,15 @@ else
Q=@
endif
-ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
+# If the user is running make -s (silent mode), suppress echoing of commands
+# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
+ifeq ($(filter 3.%,$(MAKE_VERSION)),)
+short-opts := $(firstword -$(MAKEFLAGS))
+else
+short-opts := $(filter-out --%,$(MAKEFLAGS))
+endif
+
+ifneq ($(findstring s,$(short-opts)),)
quiet=silent_
endif
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index 0a5c2bb60030..2eaaa7123b04 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -59,6 +59,10 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
[IIO_MASSCONCENTRATION] = "massconcentration",
+ [IIO_DELTA_ANGL] = "deltaangl",
+ [IIO_DELTA_VELOCITY] = "deltavelocity",
+ [IIO_COLORTEMP] = "colortemp",
+ [IIO_CHROMATICITY] = "chromaticity",
};
static const char * const iio_ev_type_text[] = {
@@ -173,6 +177,10 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_POSITIONRELATIVE:
case IIO_PHASE:
case IIO_MASSCONCENTRATION:
+ case IIO_DELTA_ANGL:
+ case IIO_DELTA_VELOCITY:
+ case IIO_COLORTEMP:
+ case IIO_CHROMATICITY:
break;
default:
return false;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 44bbf80f0cfd..0d0a7a19d6f9 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -54,9 +54,12 @@ enum autochan {
static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
unsigned int bytes = 0;
- int i = 0;
+ int i = 0, max = 0;
+ unsigned int misalignment;
while (i < num_channels) {
+ if (channels[i].bytes > max)
+ max = channels[i].bytes;
if (bytes % channels[i].bytes == 0)
channels[i].location = bytes;
else
@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
bytes = channels[i].location + channels[i].bytes;
i++;
}
+ /*
+ * We want the data in next sample to also be properly aligned so
+ * we'll add padding at the end if needed. Adding padding only
+ * works for channel data which size is 2^n bytes.
+ */
+ misalignment = bytes % max;
+ if (misalignment)
+ bytes += max - misalignment;
return bytes;
}
diff --git a/tools/include/asm-generic/unaligned.h b/tools/include/asm-generic/unaligned.h
index 47387c607035..156743d399ae 100644
--- a/tools/include/asm-generic/unaligned.h
+++ b/tools/include/asm-generic/unaligned.h
@@ -1,10 +1,13 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UNALIGNED_H
+#define __ASM_GENERIC_UNALIGNED_H
+
/*
- * Copied from the kernel sources to tools/perf/:
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
*/
-
-#ifndef __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H
-#define __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
@@ -19,5 +22,135 @@
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
-#endif /* __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H */
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
+{
+ *p++ = val >> 40;
+ *p++ = val >> 32;
+ *p++ = val >> 24;
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be48(const u64 val, void *p)
+{
+ __put_unaligned_be48(val, p);
+}
+
+static inline u64 __get_unaligned_be48(const u8 *p)
+{
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
+ p[3] << 16 | p[4] << 8 | p[5];
+}
+
+static inline u64 get_unaligned_be48(const void *p)
+{
+ return __get_unaligned_be48(p);
+}
+#pragma GCC diagnostic pop
+#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
index 9fc429d2852d..a77b74c5fb65 100644
--- a/tools/lib/api/io.h
+++ b/tools/lib/api/io.h
@@ -180,6 +180,7 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l
return line_len;
err_out:
free(line);
+ *line_out = NULL;
return -ENOMEM;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index b8b066d0dc5e..3acbbccc1901 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -738,3 +738,12 @@ int perf_evlist__nr_groups(struct perf_evlist *evlist)
}
return nr_groups;
}
+
+void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
+{
+ if (!evsel->system_wide) {
+ evsel->system_wide = true;
+ if (evlist->needs_map_propagation)
+ __perf_evlist__propagate_maps(evlist, evsel);
+ }
+}
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index 3339bc2f1765..d86ffe8ed483 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -135,4 +135,6 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
+
+void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel);
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
index d5d771ccdc7b..f80ddfc80129 100644
--- a/tools/lib/perf/include/internal/rc_check.h
+++ b/tools/lib/perf/include/internal/rc_check.h
@@ -9,8 +9,12 @@
* Enable reference count checking implicitly with leak checking, which is
* integrated into address sanitizer.
*/
-#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
#define REFCNT_CHECKING 1
+#elif defined(__has_feature)
+#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
+#define REFCNT_CHECKING 1
+#endif
#endif
/*
@@ -50,6 +54,9 @@
/* A put operation removing the indirection layer. */
#define RC_CHK_PUT(object) {}
+/* Pointer equality when the indirection may or may not be there. */
+#define RC_CHK_EQUAL(object1, object2) (object1 == object2)
+
#else
/* Replaces "struct foo" so that the pointer may be interposed. */
@@ -97,6 +104,10 @@
} \
} while(0)
+/* Pointer equality when the indirection may or may not be there. */
+#define RC_CHK_EQUAL(object1, object2) (object1 == object2 || \
+ (object1 && object2 && object1->orig == object2->orig))
+
#endif
#endif /* __LIBPERF_INTERNAL_RC_CHECK_H */
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index 75b744b47986..bc5065bd99b2 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -121,6 +121,8 @@ const char *devlink_port_fn_opstate_str(enum devlink_port_fn_opstate value)
static const char * const devlink_port_fn_attr_cap_strmap[] = {
[0] = "roce-bit",
[1] = "migratable-bit",
+ [2] = "ipsec-crypto-bit",
+ [3] = "ipsec-packet-bit",
};
const char *devlink_port_fn_attr_cap_str(enum devlink_port_fn_attr_cap value)
diff --git a/tools/net/ynl/generated/nfsd-user.c b/tools/net/ynl/generated/nfsd-user.c
index fec6828680ce..360b6448c6e9 100644
--- a/tools/net/ynl/generated/nfsd-user.c
+++ b/tools/net/ynl/generated/nfsd-user.c
@@ -50,9 +50,116 @@ struct ynl_policy_nest nfsd_rpc_status_nest = {
/* Common nested types */
/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
/* NFSD_CMD_RPC_STATUS_GET - dump */
-void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
+int nfsd_rpc_status_get_rsp_dump_parse(const struct nlmsghdr *nlh, void *data)
{
- struct nfsd_rpc_status_get_list *next = rsp;
+ struct nfsd_rpc_status_get_rsp_dump *dst;
+ struct ynl_parse_arg *yarg = data;
+ unsigned int n_compound_ops = 0;
+ const struct nlattr *attr;
+ int i;
+
+ dst = yarg->data;
+
+ if (dst->compound_ops)
+ return ynl_error_parse(yarg, "attribute already present (rpc-status.compound-ops)");
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NFSD_A_RPC_STATUS_XID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.xid = 1;
+ dst->xid = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_FLAGS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.flags = 1;
+ dst->flags = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_PROG) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.prog = 1;
+ dst->prog = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_VERSION) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.version = 1;
+ dst->version = mnl_attr_get_u8(attr);
+ } else if (type == NFSD_A_RPC_STATUS_PROC) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.proc = 1;
+ dst->proc = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_SERVICE_TIME) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.service_time = 1;
+ dst->service_time = mnl_attr_get_u64(attr);
+ } else if (type == NFSD_A_RPC_STATUS_SADDR4) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.saddr4 = 1;
+ dst->saddr4 = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_DADDR4) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.daddr4 = 1;
+ dst->daddr4 = mnl_attr_get_u32(attr);
+ } else if (type == NFSD_A_RPC_STATUS_SADDR6) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.saddr6_len = len;
+ dst->saddr6 = malloc(len);
+ memcpy(dst->saddr6, mnl_attr_get_payload(attr), len);
+ } else if (type == NFSD_A_RPC_STATUS_DADDR6) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.daddr6_len = len;
+ dst->daddr6 = malloc(len);
+ memcpy(dst->daddr6, mnl_attr_get_payload(attr), len);
+ } else if (type == NFSD_A_RPC_STATUS_SPORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sport = 1;
+ dst->sport = mnl_attr_get_u16(attr);
+ } else if (type == NFSD_A_RPC_STATUS_DPORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dport = 1;
+ dst->dport = mnl_attr_get_u16(attr);
+ } else if (type == NFSD_A_RPC_STATUS_COMPOUND_OPS) {
+ n_compound_ops++;
+ }
+ }
+
+ if (n_compound_ops) {
+ dst->compound_ops = calloc(n_compound_ops, sizeof(*dst->compound_ops));
+ dst->n_compound_ops = n_compound_ops;
+ i = 0;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == NFSD_A_RPC_STATUS_COMPOUND_OPS) {
+ dst->compound_ops[i] = mnl_attr_get_u32(attr);
+ i++;
+ }
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void
+nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp)
+{
+ struct nfsd_rpc_status_get_rsp_list *next = rsp;
while ((void *)next != YNL_LIST_END) {
rsp = next;
@@ -65,15 +172,16 @@ void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
}
}
-struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
+struct nfsd_rpc_status_get_rsp_list *
+nfsd_rpc_status_get_dump(struct ynl_sock *ys)
{
struct ynl_dump_state yds = {};
struct nlmsghdr *nlh;
int err;
yds.ys = ys;
- yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_list);
- yds.cb = nfsd_rpc_status_get_rsp_parse;
+ yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_rsp_list);
+ yds.cb = nfsd_rpc_status_get_rsp_dump_parse;
yds.rsp_cmd = NFSD_CMD_RPC_STATUS_GET;
yds.rsp_policy = &nfsd_rpc_status_nest;
@@ -86,7 +194,7 @@ struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
return yds.first;
free_list:
- nfsd_rpc_status_get_list_free(yds.first);
+ nfsd_rpc_status_get_rsp_list_free(yds.first);
return NULL;
}
diff --git a/tools/net/ynl/generated/nfsd-user.h b/tools/net/ynl/generated/nfsd-user.h
index b6b69501031a..989c6e209ced 100644
--- a/tools/net/ynl/generated/nfsd-user.h
+++ b/tools/net/ynl/generated/nfsd-user.h
@@ -21,13 +21,47 @@ const char *nfsd_op_str(int op);
/* Common nested types */
/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
/* NFSD_CMD_RPC_STATUS_GET - dump */
-struct nfsd_rpc_status_get_list {
- struct nfsd_rpc_status_get_list *next;
- struct nfsd_rpc_status_get_rsp obj __attribute__ ((aligned (8)));
+struct nfsd_rpc_status_get_rsp_dump {
+ struct {
+ __u32 xid:1;
+ __u32 flags:1;
+ __u32 prog:1;
+ __u32 version:1;
+ __u32 proc:1;
+ __u32 service_time:1;
+ __u32 saddr4:1;
+ __u32 daddr4:1;
+ __u32 saddr6_len;
+ __u32 daddr6_len;
+ __u32 sport:1;
+ __u32 dport:1;
+ } _present;
+
+ __u32 xid /* big-endian */;
+ __u32 flags;
+ __u32 prog;
+ __u8 version;
+ __u32 proc;
+ __s64 service_time;
+ __u32 saddr4 /* big-endian */;
+ __u32 daddr4 /* big-endian */;
+ void *saddr6;
+ void *daddr6;
+ __u16 sport /* big-endian */;
+ __u16 dport /* big-endian */;
+ unsigned int n_compound_ops;
+ __u32 *compound_ops;
+};
+
+struct nfsd_rpc_status_get_rsp_list {
+ struct nfsd_rpc_status_get_rsp_list *next;
+ struct nfsd_rpc_status_get_rsp_dump obj __attribute__((aligned(8)));
};
-void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp);
+void
+nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp);
-struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys);
+struct nfsd_rpc_status_get_rsp_list *
+nfsd_rpc_status_get_dump(struct ynl_sock *ys);
#endif /* _LINUX_NFSD_GEN_H */
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 13427436bfb7..c4003a83cd5d 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -3,6 +3,7 @@
import argparse
import collections
+import filecmp
import os
import re
import shutil
@@ -1168,7 +1169,7 @@ class CodeWriter:
if out_file is None:
self._out = os.sys.stdout
else:
- self._out = tempfile.TemporaryFile('w+')
+ self._out = tempfile.NamedTemporaryFile('w+')
self._out_file = out_file
def __del__(self):
@@ -1177,6 +1178,10 @@ class CodeWriter:
def close_out_file(self):
if self._out == os.sys.stdout:
return
+ # Avoid modifying the file if contents didn't change
+ self._out.flush()
+ if os.path.isfile(self._out_file) and filecmp.cmp(self._out.name, self._out_file, shallow=False):
+ return
with open(self._out_file, 'w+') as out_file:
self._out.seek(0)
shutil.copyfileobj(self._out, out_file)
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index ca5789625cd2..8331bd28b10e 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -124,6 +124,14 @@ Options of *pipe*
--loop=::
Specify number of loops.
+-G::
+--cgroups=::
+Names of cgroups for sender and receiver, separated by a comma.
+This is useful to check cgroup context switching overhead.
+Note that perf doesn't create nor delete the cgroups, so users should
+make sure that the cgroups exist and are accessible before use.
+
+
Example of *pipe*
^^^^^^^^^^^^^^^^^
@@ -141,6 +149,17 @@ Example of *pipe*
Total time:0.016 sec
16.948000 usecs/op
59004 ops/sec
+
+% perf bench sched pipe -G AAA,BBB
+(executing 1000000 pipe operations between cgroups)
+# Running 'sched/pipe' benchmark:
+# Executed 1000000 pipe operations between two processes
+
+ Total time: 6.886 [sec]
+
+ 6.886208 usecs/op
+ 145217 ops/sec
+
---------------------
SUITES FOR 'syscall'
diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
index 3c36324712b6..109ace1d5e90 100644
--- a/tools/perf/Documentation/perf-kwork.txt
+++ b/tools/perf/Documentation/perf-kwork.txt
@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
SYNOPSIS
--------
[verse]
-'perf kwork' {record}
+'perf kwork' {record|report|latency|timehist|top}
DESCRIPTION
-----------
@@ -23,6 +23,8 @@ There are several variants of 'perf kwork':
'perf kwork timehist' provides an analysis of kernel work events.
+ 'perf kwork top' to report the task cpu usage.
+
Example usage:
perf kwork record -- sleep 1
perf kwork report
@@ -30,6 +32,8 @@ There are several variants of 'perf kwork':
perf kwork latency
perf kwork latency -b
perf kwork timehist
+ perf kwork top
+ perf kwork top -b
By default it shows the individual work events such as irq, workqeueu,
including the run time and delay (time between raise and actually entry):
@@ -66,7 +70,7 @@ OPTIONS
-k::
--kwork::
- List of kwork to profile (irq, softirq, workqueue, etc)
+ List of kwork to profile (irq, softirq, workqueue, sched, etc)
-v::
--verbose::
@@ -175,6 +179,36 @@ OPTIONS for 'perf kwork timehist'
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+OPTIONS for 'perf kwork top'
+---------------------------------
+
+-b::
+--use-bpf::
+ Use BPF to measure task cpu usage.
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-n::
+--name::
+ Only show events for the given name.
+
+-s::
+--sort::
+ Sort by key(s): rate, runtime, tid
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 30eea576721f..503abcba1438 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -208,6 +208,13 @@ CONTENTION OPTIONS
Show results using a CSV-style output to make it easy to import directly
into spreadsheets. Columns are separated by the string specified in SEP.
+--lock-cgroup::
+ Show lock contention stat by cgroup. Requires --use-bpf.
+
+-G::
+--cgroup-filter=<value>::
+ Show lock contention only in the given cgroups (comma separated list).
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d5217be012d7..1889f66addf2 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -374,6 +374,9 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), samples are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
+User space tasks can migrate between CPUs, so when tracing selected CPUs,
+a dummy event is created to track sideband for all CPUs.
+
-B::
--no-buildid::
Do not save the build ids of binaries in the perf.data files. This skips
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index d66b52407e19..b3e6ed10f40c 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -216,12 +216,6 @@ ifeq ($(call get-executable,$(BISON)),)
dummy := $(error Error: $(BISON) is missing on this system, please install it)
endif
-ifeq ($(BUILD_BPF_SKEL),1)
- ifeq ($(call get-executable,$(CLANG)),)
- dummy := $(error $(CLANG) is missing on this system, please install it to be able to build with BUILD_BPF_SKEL=1)
- endif
-endif
-
ifneq ($(OUTPUT),)
ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
@@ -530,6 +524,16 @@ ifdef CORESIGHT
endif
endif
+ifndef NO_ZLIB
+ ifeq ($(feature-zlib), 1)
+ CFLAGS += -DHAVE_ZLIB_SUPPORT
+ EXTLIBS += -lz
+ $(call detected,CONFIG_ZLIB)
+ else
+ NO_ZLIB := 1
+ endif
+endif
+
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
@@ -571,22 +575,28 @@ ifndef NO_LIBELF
ifndef NO_LIBBPF
ifeq ($(feature-bpf), 1)
- CFLAGS += -DHAVE_LIBBPF_SUPPORT
- $(call detected,CONFIG_LIBBPF)
-
# detecting libbpf without LIBBPF_DYNAMIC, so make VF=1 shows libbpf detection status
$(call feature_check,libbpf)
ifdef LIBBPF_DYNAMIC
ifeq ($(feature-libbpf), 1)
EXTLIBS += -lbpf
+ CFLAGS += -DHAVE_LIBBPF_SUPPORT
+ $(call detected,CONFIG_LIBBPF)
$(call detected,CONFIG_LIBBPF_DYNAMIC)
else
dummy := $(error Error: No libbpf devel library found or older than v1.0, please install/update libbpf-devel);
endif
else
- # Libbpf will be built as a static library from tools/lib/bpf.
- LIBBPF_STATIC := 1
+ ifeq ($(NO_ZLIB), 1)
+ dummy := $(warning Warning: Statically building libbpf not possible as zlib is missing)
+ NO_LIBBPF := 1
+ else
+ # Libbpf will be built as a static library from tools/lib/bpf.
+ LIBBPF_STATIC := 1
+ $(call detected,CONFIG_LIBBPF)
+ CFLAGS += -DHAVE_LIBBPF_SUPPORT
+ endif
endif
endif
endif # NO_LIBBPF
@@ -663,16 +673,43 @@ ifndef NO_LIBBPF
endif
endif
-ifdef BUILD_BPF_SKEL
- $(call feature_check,clang-bpf-co-re)
- ifeq ($(feature-clang-bpf-co-re), 0)
- dummy := $(error Error: clang too old/not installed. Please install recent clang to build with BUILD_BPF_SKEL)
- endif
+ifndef BUILD_BPF_SKEL
+ # BPF skeletons control a large number of perf features, by default
+ # they are enabled.
+ BUILD_BPF_SKEL := 1
+endif
+
+ifeq ($(BUILD_BPF_SKEL),1)
ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
- dummy := $(error Error: BPF skeleton support requires libbpf)
+ dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required)
+ BUILD_BPF_SKEL := 0
+ else ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),)
+ dummy := $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool)
+ BUILD_BPF_SKEL := 0
+ else ifeq ($(filter -DHAVE_ZLIB_SUPPORT, $(CFLAGS)),)
+ dummy := $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool)
+ BUILD_BPF_SKEL := 0
+ else ifeq ($(call get-executable,$(CLANG)),)
+ dummy := $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing)
+ BUILD_BPF_SKEL := 0
+ else
+ CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
+ ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1)
+ dummy := $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1)
+ BUILD_BPF_SKEL := 0
+ endif
+ endif
+ ifeq ($(BUILD_BPF_SKEL),1)
+ $(call feature_check,clang-bpf-co-re)
+ ifeq ($(feature-clang-bpf-co-re), 0)
+ dummy := $(warning Warning: Disabled BPF skeletons as clang is too old)
+ BUILD_BPF_SKEL := 0
+ endif
+ endif
+ ifeq ($(BUILD_BPF_SKEL),1)
+ $(call detected,CONFIG_PERF_BPF_SKEL)
+ CFLAGS += -DHAVE_BPF_SKEL
endif
- $(call detected,CONFIG_PERF_BPF_SKEL)
- CFLAGS += -DHAVE_BPF_SKEL
endif
ifndef GEN_VMLINUX_H
@@ -946,16 +983,6 @@ ifndef NO_DEMANGLE
endif
endif
-ifndef NO_ZLIB
- ifeq ($(feature-zlib), 1)
- CFLAGS += -DHAVE_ZLIB_SUPPORT
- EXTLIBS += -lz
- $(call detected,CONFIG_ZLIB)
- else
- NO_ZLIB := 1
- endif
-endif
-
ifndef NO_LZMA
ifeq ($(feature-lzma), 1)
CFLAGS += -DHAVE_LZMA_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 14dedd11a1f5..d88da787e815 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -69,6 +69,10 @@ include ../scripts/utilities.mak
# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
# for dwarf backtrace post unwind.
#
+# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked,
+# this will remove multiple features and tools, such as 'perf trace',
+# that need it to read tracefs event format files, etc.
+#
# Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
# for reading the 32-bit compatibility VDSO in 64-bit mode
#
@@ -120,7 +124,7 @@ include ../scripts/utilities.mak
#
# Define NO_LIBDEBUGINFOD if you do not want support debuginfod
#
-# Define BUILD_BPF_SKEL to enable BPF skeletons
+# Set BUILD_BPF_SKEL to 0 to override BUILD_BPF_SKEL and not build BPF skeletons
#
# Define BUILD_NONDISTRO to enable building an linking against libbfd and
# libiberty distribution license incompatible libraries.
@@ -351,10 +355,13 @@ export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf*.so
+# Use the detected configuration
+-include $(OUTPUT).config-detected
+
ifeq ($(CONFIG_LIBTRACEEVENT),y)
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
else
- PYTHON_EXT_SRCS := $(shell grep -v '^\#\|util/trace-event.c' util/python-ext-sources)
+ PYTHON_EXT_SRCS := $(shell grep -v ^\#\\\|util/trace-event.c util/python-ext-sources)
endif
PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBAPI)
@@ -1043,13 +1050,14 @@ SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
+SKELETONS += $(SKEL_OUT)/kwork_top.skel.h
SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h
SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h
$(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT):
$(Q)$(MKDIR) -p $@
-ifdef BUILD_BPF_SKEL
+ifeq ($(CONFIG_PERF_BPF_SKEL),y)
BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
# Get Clang's default includes on this system, as opposed to those seen by
# '--target=bpf'. This fixes "missing" files on some architectures/distros,
@@ -1127,11 +1135,11 @@ bpf-skel: $(SKELETONS)
.PRECIOUS: $(SKEL_TMP_OUT)/%.bpf.o
-else # BUILD_BPF_SKEL
+else # CONFIG_PERF_BPF_SKEL
bpf-skel:
-endif # BUILD_BPF_SKEL
+endif # CONFIG_PERF_BPF_SKEL
bpf-skel-clean:
$(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index b8d6a953fd74..2cf873d71dff 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -205,8 +205,17 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
- if (!perf_cpu_map__has(event_cpus, cpu) ||
- !perf_cpu_map__has(online_cpus, cpu))
+ /*
+ * In per-cpu case, do the validation for CPUs to work with.
+ * In per-thread case, the CPU map is empty. Since the traced
+ * program can run on any CPUs in this case, thus don't skip
+ * validation.
+ */
+ if (!perf_cpu_map__empty(event_cpus) &&
+ !perf_cpu_map__has(event_cpus, cpu))
+ continue;
+
+ if (!perf_cpu_map__has(online_cpus, cpu))
continue;
err = cs_etm_validate_context_id(itr, evsel, i);
@@ -433,6 +442,15 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
"contextid", 1);
}
+ /*
+ * When the option '--timestamp' or '-T' is enabled, the PERF_SAMPLE_TIME
+ * bit is set for all events. In this case, always enable Arm CoreSight
+ * timestamp tracing.
+ */
+ if (opts->sample_time_set)
+ evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
+ "timestamp", 1);
+
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
if (err)
@@ -917,16 +935,9 @@ out:
* (CFG_CHG and evsel__set_config_if_unset()). If no default is set then user
* changes aren't tracked.
*/
-struct perf_event_attr *
-cs_etm_get_default_config(struct perf_pmu *pmu __maybe_unused)
+void
+cs_etm_get_default_config(const struct perf_pmu *pmu __maybe_unused,
+ struct perf_event_attr *attr)
{
- struct perf_event_attr *attr;
-
- attr = zalloc(sizeof(struct perf_event_attr));
- if (!attr)
- return NULL;
-
attr->sample_period = 1;
-
- return attr;
}
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index a9623b128ece..7f3af3b97f3b 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -14,22 +14,22 @@
#include "../../../util/pmu.h"
#include "../../../util/cs-etm.h"
-struct perf_event_attr
-*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
{
#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
- return cs_etm_get_default_config(pmu);
+ pmu->perf_event_attr_init_default = cs_etm_get_default_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
- return arm_spe_pmu_default_config(pmu);
+ pmu->selectable = true;
+ pmu->is_uncore = false;
+ pmu->perf_event_attr_init_default = arm_spe_pmu_default_config;
} else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) {
pmu->selectable = true;
#endif
}
#endif
- return NULL;
}
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 9cc3d6dcb849..e3acc739bd00 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -113,6 +113,25 @@ arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts,
}
}
+static __u64 arm_spe_pmu__sample_period(const struct perf_pmu *arm_spe_pmu)
+{
+ static __u64 sample_period;
+
+ if (sample_period)
+ return sample_period;
+
+ /*
+ * If kernel driver doesn't advertise a minimum,
+ * use max allowable by PMSIDR_EL1.INTERVAL
+ */
+ if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
+ &sample_period) != 1) {
+ pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
+ sample_period = 4096;
+ }
+ return sample_period;
+}
+
static int arm_spe_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
@@ -136,7 +155,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
return -EINVAL;
}
evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = arm_spe_pmu->default_config->sample_period;
+ evsel->core.attr.sample_period = arm_spe_pmu__sample_period(arm_spe_pmu);
evsel->needs_auxtrace_mmap = true;
arm_spe_evsel = evsel;
opts->full_auxtrace = true;
@@ -495,29 +514,8 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
return &sper->itr;
}
-struct perf_event_attr
-*arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu)
+void
+arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu, struct perf_event_attr *attr)
{
- struct perf_event_attr *attr;
-
- attr = zalloc(sizeof(struct perf_event_attr));
- if (!attr) {
- pr_err("arm_spe default config cannot allocate a perf_event_attr\n");
- return NULL;
- }
-
- /*
- * If kernel driver doesn't advertise a minimum,
- * use max allowable by PMSIDR_EL1.INTERVAL
- */
- if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
- &attr->sample_period) != 1) {
- pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
- attr->sample_period = 4096;
- }
-
- arm_spe_pmu->selectable = true;
- arm_spe_pmu->is_uncore = false;
-
- return attr;
+ attr->sample_period = arm_spe_pmu__sample_period(arm_spe_pmu);
}
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
index 615084eb88d8..2a4eab2d160e 100644
--- a/tools/perf/arch/arm64/util/pmu.c
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -10,20 +10,16 @@
const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
- struct perf_pmu *pmu = pmu__find_core_pmu();
+ struct perf_pmu *pmu;
- if (pmu)
- return perf_pmu__find_metrics_table(pmu);
-
- return NULL;
-}
-
-const struct pmu_events_table *pmu_events_table__find(void)
-{
- struct perf_pmu *pmu = pmu__find_core_pmu();
+ /* Metrics aren't currently supported on heterogeneous Arm systems */
+ if (perf_pmus__num_core_pmus() > 1)
+ return NULL;
+ /* Doesn't matter which one here because they'll all be the same */
+ pmu = perf_pmus__find_core_pmu();
if (pmu)
- return perf_pmu__find_events_table(pmu);
+ return perf_pmu__find_metrics_table(pmu);
return NULL;
}
@@ -32,7 +28,7 @@ double perf_pmu__cpu_slots_per_cycle(void)
{
char path[PATH_MAX];
unsigned long long slots = 0;
- struct perf_pmu *pmu = pmu__find_core_pmu();
+ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
if (pmu) {
perf_pmu__pathname_scnprintf(path, sizeof(path),
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index c8d0dc775e5d..6b00efd53638 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -34,7 +34,7 @@ get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *bufp;
- if (asprintf(&bufp, "%.8lx", mfspr(SPRN_PVR)) < 0)
+ if (asprintf(&bufp, "0x%.8lx", mfspr(SPRN_PVR)) < 0)
bufp = NULL;
return bufp;
diff --git a/tools/perf/arch/s390/util/pmu.c b/tools/perf/arch/s390/util/pmu.c
index 11f03f32e3fd..886c30e001fa 100644
--- a/tools/perf/arch/s390/util/pmu.c
+++ b/tools/perf/arch/s390/util/pmu.c
@@ -13,11 +13,10 @@
#define S390_PMUPAI_EXT "pai_ext"
#define S390_PMUCPUM_CF "cpum_cf"
-struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu)
+void perf_pmu__arch_init(struct perf_pmu *pmu)
{
if (!strcmp(pmu->name, S390_PMUPAI_CRYPTO) ||
!strcmp(pmu->name, S390_PMUPAI_EXT) ||
!strcmp(pmu->name, S390_PMUCPUM_CF))
pmu->selectable = true;
- return NULL;
}
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 5f4ac4fc7fcf..5cdf457f5cbe 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -74,12 +74,15 @@ static struct ins x86__instructions[] = {
{ .name = "movdqa", .ops = &mov_ops, },
{ .name = "movdqu", .ops = &mov_ops, },
{ .name = "movsd", .ops = &mov_ops, },
- { .name = "movslq", .ops = &mov_ops, },
{ .name = "movss", .ops = &mov_ops, },
+ { .name = "movsb", .ops = &mov_ops, },
+ { .name = "movsw", .ops = &mov_ops, },
+ { .name = "movsl", .ops = &mov_ops, },
{ .name = "movupd", .ops = &mov_ops, },
{ .name = "movups", .ops = &mov_ops, },
- { .name = "movzbl", .ops = &mov_ops, },
- { .name = "movzwl", .ops = &mov_ops, },
+ { .name = "movzb", .ops = &mov_ops, },
+ { .name = "movzw", .ops = &mov_ops, },
+ { .name = "movzl", .ops = &mov_ops, },
{ .name = "mulsd", .ops = &mov_ops, },
{ .name = "mulss", .ops = &mov_ops, },
{ .name = "nop", .ops = &nop_ops, },
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 31807791589e..fa0c718b9e72 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -60,36 +60,31 @@ struct intel_pt_recording {
size_t priv_size;
};
-static int intel_pt_parse_terms_with_default(struct perf_pmu *pmu,
+static int intel_pt_parse_terms_with_default(const struct perf_pmu *pmu,
const char *str,
u64 *config)
{
- struct list_head *terms;
+ struct parse_events_terms terms;
struct perf_event_attr attr = { .size = 0, };
int err;
- terms = malloc(sizeof(struct list_head));
- if (!terms)
- return -ENOMEM;
-
- INIT_LIST_HEAD(terms);
-
- err = parse_events_terms(terms, str, /*input=*/ NULL);
+ parse_events_terms__init(&terms);
+ err = parse_events_terms(&terms, str, /*input=*/ NULL);
if (err)
goto out_free;
attr.config = *config;
- err = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/true, /*err=*/NULL);
+ err = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/true, /*err=*/NULL);
if (err)
goto out_free;
*config = attr.config;
out_free:
- parse_events_terms__delete(terms);
+ parse_events_terms__exit(&terms);
return err;
}
-static int intel_pt_parse_terms(struct perf_pmu *pmu, const char *str, u64 *config)
+static int intel_pt_parse_terms(const struct perf_pmu *pmu, const char *str, u64 *config)
{
*config = 0;
return intel_pt_parse_terms_with_default(pmu, str, config);
@@ -182,7 +177,7 @@ static int intel_pt_pick_bit(int bits, int target)
return pick;
}
-static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
+static u64 intel_pt_default_config(const struct perf_pmu *intel_pt_pmu)
{
char buf[256];
int mtc, mtc_periods = 0, mtc_period;
@@ -261,20 +256,17 @@ static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
return 0;
}
-struct perf_event_attr *
-intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
+void intel_pt_pmu_default_config(const struct perf_pmu *intel_pt_pmu,
+ struct perf_event_attr *attr)
{
- struct perf_event_attr *attr;
-
- attr = zalloc(sizeof(struct perf_event_attr));
- if (!attr)
- return NULL;
-
- attr->config = intel_pt_default_config(intel_pt_pmu);
+ static u64 config;
+ static bool initialized;
- intel_pt_pmu->selectable = true;
-
- return attr;
+ if (!initialized) {
+ config = intel_pt_default_config(intel_pt_pmu);
+ initialized = true;
+ }
+ attr->config = config;
}
static const char *intel_pt_find_filter(struct evlist *evlist,
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index f428cffb0378..469555ae9b3c 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -17,158 +17,19 @@
#include "../../../util/pmus.h"
#include "env.h"
-struct pmu_alias {
- char *name;
- char *alias;
- struct list_head list;
-};
-
-static LIST_HEAD(pmu_alias_name_list);
-static bool cached_list;
-
-struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
{
#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) {
pmu->auxtrace = true;
- return intel_pt_pmu_default_config(pmu);
+ pmu->selectable = true;
+ pmu->perf_event_attr_init_default = intel_pt_pmu_default_config;
}
if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME)) {
pmu->auxtrace = true;
pmu->selectable = true;
}
#endif
- return NULL;
-}
-
-static void pmu_alias__delete(struct pmu_alias *pmu_alias)
-{
- if (!pmu_alias)
- return;
-
- zfree(&pmu_alias->name);
- zfree(&pmu_alias->alias);
- free(pmu_alias);
-}
-
-static struct pmu_alias *pmu_alias__new(char *name, char *alias)
-{
- struct pmu_alias *pmu_alias = zalloc(sizeof(*pmu_alias));
-
- if (pmu_alias) {
- pmu_alias->name = strdup(name);
- if (!pmu_alias->name)
- goto out_delete;
-
- pmu_alias->alias = strdup(alias);
- if (!pmu_alias->alias)
- goto out_delete;
- }
- return pmu_alias;
-
-out_delete:
- pmu_alias__delete(pmu_alias);
- return NULL;
-}
-
-static int setup_pmu_alias_list(void)
-{
- int fd, dirfd;
- DIR *dir;
- struct dirent *dent;
- struct pmu_alias *pmu_alias;
- char buf[MAX_PMU_NAME_LEN];
- FILE *file;
- int ret = -ENOMEM;
-
- dirfd = perf_pmu__event_source_devices_fd();
- if (dirfd < 0)
- return -1;
-
- dir = fdopendir(dirfd);
- if (!dir)
- return -errno;
-
- while ((dent = readdir(dir))) {
- if (!strcmp(dent->d_name, ".") ||
- !strcmp(dent->d_name, ".."))
- continue;
-
- fd = perf_pmu__pathname_fd(dirfd, dent->d_name, "alias", O_RDONLY);
- if (fd < 0)
- continue;
-
- file = fdopen(fd, "r");
- if (!file)
- continue;
-
- if (!fgets(buf, sizeof(buf), file)) {
- fclose(file);
- continue;
- }
-
- fclose(file);
-
- /* Remove the last '\n' */
- buf[strlen(buf) - 1] = 0;
-
- pmu_alias = pmu_alias__new(dent->d_name, buf);
- if (!pmu_alias)
- goto close_dir;
-
- list_add_tail(&pmu_alias->list, &pmu_alias_name_list);
- }
-
- ret = 0;
-
-close_dir:
- closedir(dir);
- return ret;
-}
-
-static const char *__pmu_find_real_name(const char *name)
-{
- struct pmu_alias *pmu_alias;
-
- list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) {
- if (!strcmp(name, pmu_alias->alias))
- return pmu_alias->name;
- }
-
- return name;
-}
-
-const char *pmu_find_real_name(const char *name)
-{
- if (cached_list)
- return __pmu_find_real_name(name);
-
- setup_pmu_alias_list();
- cached_list = true;
-
- return __pmu_find_real_name(name);
-}
-
-static const char *__pmu_find_alias_name(const char *name)
-{
- struct pmu_alias *pmu_alias;
-
- list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) {
- if (!strcmp(name, pmu_alias->name))
- return pmu_alias->alias;
- }
- return NULL;
-}
-
-const char *pmu_find_alias_name(const char *name)
-{
- if (cached_list)
- return __pmu_find_alias_name(name);
-
- setup_pmu_alias_list();
- cached_list = true;
-
- return __pmu_find_alias_name(name);
}
int perf_pmus__num_mem_pmus(void)
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index fa1f8f998814..93dcd9dba3d0 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -36,6 +36,7 @@ static bool use_pipes = false;
static unsigned int nr_loops = 100;
static bool thread_mode = false;
static unsigned int num_groups = 10;
+static unsigned int total_children = 0;
static struct list_head sender_contexts = LIST_HEAD_INIT(sender_contexts);
static struct list_head receiver_contexts = LIST_HEAD_INIT(receiver_contexts);
@@ -55,6 +56,13 @@ struct receiver_context {
int wakefd;
};
+union messaging_worker {
+ pthread_t thread;
+ pid_t pid;
+};
+
+static union messaging_worker *worker_tab;
+
static void fdpair(int fds[2])
{
if (use_pipes) {
@@ -98,7 +106,7 @@ static void *sender(struct sender_context *ctx)
again:
ret = write(ctx->out_fds[j], data + done,
- sizeof(data)-done);
+ sizeof(data) - done);
if (ret < 0)
err(EXIT_FAILURE, "SENDER: write");
done += ret;
@@ -139,30 +147,12 @@ again:
return NULL;
}
-static pthread_t create_worker(void *ctx, void *(*func)(void *))
+static void create_thread_worker(union messaging_worker *worker,
+ void *ctx, void *(*func)(void *))
{
pthread_attr_t attr;
- pthread_t childid;
int ret;
- if (!thread_mode) {
- /* process mode */
- /* Fork the receiver. */
- switch (fork()) {
- case -1:
- err(EXIT_FAILURE, "fork()");
- break;
- case 0:
- (*func) (ctx);
- exit(0);
- break;
- default:
- break;
- }
-
- return (pthread_t)0;
- }
-
if (pthread_attr_init(&attr) != 0)
err(EXIT_FAILURE, "pthread_attr_init:");
@@ -171,15 +161,37 @@ static pthread_t create_worker(void *ctx, void *(*func)(void *))
err(EXIT_FAILURE, "pthread_attr_setstacksize");
#endif
- ret = pthread_create(&childid, &attr, func, ctx);
+ ret = pthread_create(&worker->thread, &attr, func, ctx);
if (ret != 0)
err(EXIT_FAILURE, "pthread_create failed");
pthread_attr_destroy(&attr);
- return childid;
}
-static void reap_worker(pthread_t id)
+static void create_process_worker(union messaging_worker *worker,
+ void *ctx, void *(*func)(void *))
+{
+ /* Fork the receiver. */
+ worker->pid = fork();
+
+ if (worker->pid == -1) {
+ err(EXIT_FAILURE, "fork()");
+ } else if (worker->pid == 0) {
+ (*func) (ctx);
+ exit(0);
+ }
+}
+
+static void create_worker(union messaging_worker *worker,
+ void *ctx, void *(*func)(void *))
+{
+ if (!thread_mode)
+ return create_process_worker(worker, ctx, func);
+ else
+ return create_thread_worker(worker, ctx, func);
+}
+
+static void reap_worker(union messaging_worker *worker)
{
int proc_status;
void *thread_status;
@@ -190,19 +202,19 @@ static void reap_worker(pthread_t id)
if (!WIFEXITED(proc_status))
exit(1);
} else {
- pthread_join(id, &thread_status);
+ pthread_join(worker->thread, &thread_status);
}
}
/* One group of senders and receivers */
-static unsigned int group(pthread_t *pth,
+static unsigned int group(union messaging_worker *worker,
unsigned int num_fds,
int ready_out,
int wakefd)
{
unsigned int i;
- struct sender_context *snd_ctx = malloc(sizeof(struct sender_context)
- + num_fds * sizeof(int));
+ struct sender_context *snd_ctx = malloc(sizeof(struct sender_context) +
+ num_fds * sizeof(int));
if (!snd_ctx)
err(EXIT_FAILURE, "malloc()");
@@ -226,7 +238,7 @@ static unsigned int group(pthread_t *pth,
ctx->ready_out = ready_out;
ctx->wakefd = wakefd;
- pth[i] = create_worker(ctx, (void *)receiver);
+ create_worker(worker + i, ctx, (void *)receiver);
snd_ctx->out_fds[i] = fds[1];
if (!thread_mode)
@@ -239,7 +251,7 @@ static unsigned int group(pthread_t *pth,
snd_ctx->wakefd = wakefd;
snd_ctx->num_fds = num_fds;
- pth[num_fds+i] = create_worker(snd_ctx, (void *)sender);
+ create_worker(worker + num_fds + i, snd_ctx, (void *)sender);
}
/* Close the fds we have left */
@@ -251,6 +263,17 @@ static unsigned int group(pthread_t *pth,
return num_fds * 2;
}
+static void sig_handler(int sig __maybe_unused)
+{
+ unsigned int i;
+
+ /*
+ * When exit abnormally, kill all forked child processes.
+ */
+ for (i = 0; i < total_children; i++)
+ kill(worker_tab[i].pid, SIGKILL);
+}
+
static const struct option options[] = {
OPT_BOOLEAN('p', "pipe", &use_pipes,
"Use pipe() instead of socketpair()"),
@@ -268,27 +291,30 @@ static const char * const bench_sched_message_usage[] = {
int bench_sched_messaging(int argc, const char **argv)
{
- unsigned int i, total_children;
+ unsigned int i;
struct timeval start, stop, diff;
unsigned int num_fds = 20;
int readyfds[2], wakefds[2];
char dummy;
- pthread_t *pth_tab;
struct sender_context *pos, *n;
argc = parse_options(argc, argv, options,
bench_sched_message_usage, 0);
- pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t));
- if (!pth_tab)
+ worker_tab = malloc(num_fds * 2 * num_groups * sizeof(union messaging_worker));
+ if (!worker_tab)
err(EXIT_FAILURE, "main:malloc()");
fdpair(readyfds);
fdpair(wakefds);
- total_children = 0;
+ if (!thread_mode) {
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+ }
+
for (i = 0; i < num_groups; i++)
- total_children += group(pth_tab+total_children, num_fds,
+ total_children += group(worker_tab + total_children, num_fds,
readyfds[1], wakefds[0]);
/* Wait for everyone to be ready */
@@ -304,7 +330,7 @@ int bench_sched_messaging(int argc, const char **argv)
/* Reap them all */
for (i = 0; i < total_children; i++)
- reap_worker(pth_tab[i]);
+ reap_worker(worker_tab + i);
gettimeofday(&stop, NULL);
@@ -332,7 +358,7 @@ int bench_sched_messaging(int argc, const char **argv)
break;
}
- free(pth_tab);
+ free(worker_tab);
list_for_each_entry_safe(pos, n, &sender_contexts, list) {
list_del_init(&pos->list);
free(pos);
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index a960e7a93aec..3af6d3c55aba 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -10,7 +10,9 @@
* Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
*/
#include <subcmd/parse-options.h>
+#include <api/fs/fs.h>
#include "bench.h"
+#include "util/cgroup.h"
#include <unistd.h>
#include <stdio.h>
@@ -19,6 +21,7 @@
#include <sys/wait.h>
#include <string.h>
#include <errno.h>
+#include <fcntl.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -31,6 +34,7 @@ struct thread_data {
int nr;
int pipe_read;
int pipe_write;
+ bool cgroup_failed;
pthread_t pthread;
};
@@ -40,9 +44,48 @@ static int loops = LOOPS_DEFAULT;
/* Use processes by default: */
static bool threaded;
+static char *cgrp_names[2];
+static struct cgroup *cgrps[2];
+
+static int parse_two_cgroups(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
+{
+ char *p = strdup(str);
+ char *q;
+ int ret = -1;
+
+ if (p == NULL) {
+ fprintf(stderr, "memory allocation failure\n");
+ return -1;
+ }
+
+ q = strchr(p, ',');
+ if (q == NULL) {
+ fprintf(stderr, "it should have two cgroup names: %s\n", p);
+ goto out;
+ }
+ *q = '\0';
+
+ cgrp_names[0] = strdup(p);
+ cgrp_names[1] = strdup(q + 1);
+
+ if (cgrp_names[0] == NULL || cgrp_names[1] == NULL) {
+ fprintf(stderr, "memory allocation failure\n");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ free(p);
+ return ret;
+}
+
static const struct option options[] = {
OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"),
+ OPT_CALLBACK('G', "cgroups", NULL, "SEND,RECV",
+ "Put sender and receivers in given cgroups",
+ parse_two_cgroups),
OPT_END()
};
@@ -51,12 +94,89 @@ static const char * const bench_sched_pipe_usage[] = {
NULL
};
+static int enter_cgroup(int nr)
+{
+ char buf[32];
+ int fd, len, ret;
+ int saved_errno;
+ struct cgroup *cgrp;
+ pid_t pid;
+
+ if (cgrp_names[nr] == NULL)
+ return 0;
+
+ if (cgrps[nr] == NULL) {
+ cgrps[nr] = cgroup__new(cgrp_names[nr], /*do_open=*/true);
+ if (cgrps[nr] == NULL)
+ goto err;
+ }
+ cgrp = cgrps[nr];
+
+ if (threaded)
+ pid = syscall(__NR_gettid);
+ else
+ pid = getpid();
+
+ snprintf(buf, sizeof(buf), "%d\n", pid);
+ len = strlen(buf);
+
+ /* try cgroup v2 interface first */
+ if (threaded)
+ fd = openat(cgrp->fd, "cgroup.threads", O_WRONLY);
+ else
+ fd = openat(cgrp->fd, "cgroup.procs", O_WRONLY);
+
+ /* try cgroup v1 if failed */
+ if (fd < 0 && errno == ENOENT)
+ fd = openat(cgrp->fd, "tasks", O_WRONLY);
+
+ if (fd < 0)
+ goto err;
+
+ ret = write(fd, buf, len);
+ close(fd);
+
+ if (ret != len) {
+ printf("Cannot enter to cgroup: %s\n", cgrp->name);
+ return -1;
+ }
+ return 0;
+
+err:
+ saved_errno = errno;
+ printf("Failed to open cgroup file in %s\n", cgrp_names[nr]);
+
+ if (saved_errno == ENOENT) {
+ char mnt[PATH_MAX];
+
+ if (cgroupfs_find_mountpoint(mnt, sizeof(mnt), "perf_event") == 0)
+ printf(" Hint: create the cgroup first, like 'mkdir %s/%s'\n",
+ mnt, cgrp_names[nr]);
+ } else if (saved_errno == EACCES && geteuid() > 0) {
+ printf(" Hint: try to run as root\n");
+ }
+
+ return -1;
+}
+
+static void exit_cgroup(int nr)
+{
+ cgroup__put(cgrps[nr]);
+ free(cgrp_names[nr]);
+}
+
static void *worker_thread(void *__tdata)
{
struct thread_data *td = __tdata;
int m = 0, i;
int ret;
+ ret = enter_cgroup(td->nr);
+ if (ret < 0) {
+ td->cgroup_failed = true;
+ return NULL;
+ }
+
for (i = 0; i < loops; i++) {
if (!td->nr) {
ret = read(td->pipe_read, &m, sizeof(int));
@@ -76,7 +196,8 @@ static void *worker_thread(void *__tdata)
int bench_sched_pipe(int argc, const char **argv)
{
- struct thread_data threads[2], *td;
+ struct thread_data threads[2] = {};
+ struct thread_data *td;
int pipe_1[2], pipe_2[2];
struct timeval start, stop, diff;
unsigned long long result_usec = 0;
@@ -112,9 +233,7 @@ int bench_sched_pipe(int argc, const char **argv)
}
}
-
if (threaded) {
-
for (t = 0; t < nr_threads; t++) {
td = threads + t;
@@ -128,7 +247,6 @@ int bench_sched_pipe(int argc, const char **argv)
ret = pthread_join(td->pthread, NULL);
BUG_ON(ret);
}
-
} else {
pid = fork();
assert(pid >= 0);
@@ -147,6 +265,12 @@ int bench_sched_pipe(int argc, const char **argv)
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
+ exit_cgroup(0);
+ exit_cgroup(1);
+
+ if (threads[0].cgroup_failed || threads[1].cgroup_failed)
+ return 0;
+
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
printf("# Executed %d pipe operations between two %s\n\n",
diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
index 914c0817fe8a..5c71fdc419dd 100644
--- a/tools/perf/bench/uprobe.c
+++ b/tools/perf/bench/uprobe.c
@@ -89,6 +89,7 @@ static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench)
return err;
cleanup:
bench_uprobe_bpf__destroy(skel);
+ skel = NULL;
return err;
}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index cd381693658b..e2a40f1d9225 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -277,8 +277,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
char filename[PATH_MAX];
struct build_id bid;
- if (dso__build_id_filename(dso, filename, sizeof(filename), false) &&
- filename__read_build_id(filename, &bid) == -1) {
+ if (!dso__build_id_filename(dso, filename, sizeof(filename), false))
+ return true;
+
+ if (filename__read_build_id(filename, &bid) == -1) {
if (errno == ENOENT)
return false;
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index 14bf7a8429e7..f007a9b27065 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -45,6 +45,11 @@
#define PRINT_BRACKETPAIR_WIDTH 2
#define PRINT_TIME_UNIT_SEC_WIDTH 2
#define PRINT_TIME_UNIT_MESC_WIDTH 3
+#define PRINT_PID_WIDTH 7
+#define PRINT_TASK_NAME_WIDTH 16
+#define PRINT_CPU_USAGE_WIDTH 6
+#define PRINT_CPU_USAGE_DECIMAL_WIDTH 2
+#define PRINT_CPU_USAGE_HIST_WIDTH 30
#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
@@ -131,6 +136,34 @@ static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
return 0;
}
+static int cpu_usage_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->cpu_usage > r->cpu_usage)
+ return 1;
+ if (l->cpu_usage < r->cpu_usage)
+ return -1;
+
+ return 0;
+}
+
+static int id_or_cpu_r_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->id < r->id)
+ return 1;
+ if (l->id > r->id)
+ return -1;
+
+ if (l->id != 0)
+ return 0;
+
+ if (l->cpu < r->cpu)
+ return 1;
+ if (l->cpu > r->cpu)
+ return -1;
+
+ return 0;
+}
+
static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
const char *tok, struct list_head *list)
{
@@ -155,12 +188,22 @@ static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
.name = "avg",
.cmp = avg_latency_cmp,
};
+ static struct sort_dimension rate_sort_dimension = {
+ .name = "rate",
+ .cmp = cpu_usage_cmp,
+ };
+ static struct sort_dimension tid_sort_dimension = {
+ .name = "tid",
+ .cmp = id_or_cpu_r_cmp,
+ };
struct sort_dimension *available_sorts[] = {
&id_sort_dimension,
&max_sort_dimension,
&count_sort_dimension,
&runtime_sort_dimension,
&avg_sort_dimension,
+ &rate_sort_dimension,
+ &tid_sort_dimension,
};
if (kwork->report == KWORK_REPORT_LATENCY)
@@ -361,6 +404,17 @@ static void profile_update_timespan(struct perf_kwork *kwork,
kwork->timeend = sample->time;
}
+static bool profile_name_match(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ if (kwork->profile_name && work->name &&
+ (strcmp(work->name, kwork->profile_name) != 0)) {
+ return false;
+ }
+
+ return true;
+}
+
static bool profile_event_match(struct perf_kwork *kwork,
struct kwork_work *work,
struct perf_sample *sample)
@@ -376,10 +430,14 @@ static bool profile_event_match(struct perf_kwork *kwork,
((ptime->end != 0) && (ptime->end < time)))
return false;
- if ((kwork->profile_name != NULL) &&
- (work->name != NULL) &&
- (strcmp(work->name, kwork->profile_name) != 0))
+ /*
+ * report top needs to collect the runtime of all tasks to
+ * calculate the load of each core.
+ */
+ if ((kwork->report != KWORK_REPORT_TOP) &&
+ !profile_name_match(kwork, work)) {
return false;
+ }
profile_update_timespan(kwork, sample);
return true;
@@ -392,13 +450,14 @@ static int work_push_atom(struct perf_kwork *kwork,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine,
- struct kwork_work **ret_work)
+ struct kwork_work **ret_work,
+ bool overwrite)
{
- struct kwork_atom *atom, *dst_atom;
+ struct kwork_atom *atom, *dst_atom, *last_atom;
struct kwork_work *work, key;
BUG_ON(class->work_init == NULL);
- class->work_init(class, &key, evsel, sample, machine);
+ class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
atom = atom_new(kwork, sample);
if (atom == NULL)
@@ -406,12 +465,14 @@ static int work_push_atom(struct perf_kwork *kwork,
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
if (work == NULL) {
- free(atom);
+ atom_free(atom);
return -1;
}
- if (!profile_event_match(kwork, work, sample))
+ if (!profile_event_match(kwork, work, sample)) {
+ atom_free(atom);
return 0;
+ }
if (dst_type < KWORK_TRACE_MAX) {
dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
@@ -425,6 +486,17 @@ static int work_push_atom(struct perf_kwork *kwork,
if (ret_work != NULL)
*ret_work = work;
+ if (overwrite) {
+ last_atom = list_last_entry_or_null(&work->atom_list[src_type],
+ struct kwork_atom, list);
+ if (last_atom) {
+ atom_del(last_atom);
+
+ kwork->nr_skipped_events[src_type]++;
+ kwork->nr_skipped_events[KWORK_TRACE_MAX]++;
+ }
+ }
+
list_add_tail(&atom->list, &work->atom_list[src_type]);
return 0;
@@ -443,7 +515,7 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
struct kwork_work *work, key;
BUG_ON(class->work_init == NULL);
- class->work_init(class, &key, evsel, sample, machine);
+ class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
if (ret_work != NULL)
@@ -471,6 +543,38 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
return NULL;
}
+static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
+ u64 id, int cpu)
+{
+ struct rb_node *next;
+ struct kwork_work *work;
+
+ next = rb_first_cached(root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+ if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
+ (cpu == -1 && work->id == id))
+ return work;
+
+ next = rb_next(next);
+ }
+
+ return NULL;
+}
+
+static struct kwork_class *get_kwork_class(struct perf_kwork *kwork,
+ enum kwork_class_type type)
+{
+ struct kwork_class *class;
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ if (class->type == type)
+ return class;
+ }
+
+ return NULL;
+}
+
static void report_update_exit_event(struct kwork_work *work,
struct kwork_atom *atom,
struct perf_sample *sample)
@@ -500,7 +604,7 @@ static int report_entry_event(struct perf_kwork *kwork,
{
return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
KWORK_TRACE_MAX, evsel, sample,
- machine, NULL);
+ machine, NULL, true);
}
static int report_exit_event(struct perf_kwork *kwork,
@@ -555,7 +659,7 @@ static int latency_raise_event(struct perf_kwork *kwork,
{
return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
KWORK_TRACE_MAX, evsel, sample,
- machine, NULL);
+ machine, NULL, true);
}
static int latency_entry_event(struct perf_kwork *kwork,
@@ -714,7 +818,7 @@ static int timehist_raise_event(struct perf_kwork *kwork,
{
return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
KWORK_TRACE_MAX, evsel, sample,
- machine, NULL);
+ machine, NULL, true);
}
static int timehist_entry_event(struct perf_kwork *kwork,
@@ -728,7 +832,7 @@ static int timehist_entry_event(struct perf_kwork *kwork,
ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
KWORK_TRACE_RAISE, evsel, sample,
- machine, &work);
+ machine, &work, true);
if (ret)
return ret;
@@ -775,6 +879,84 @@ out:
return ret;
}
+static void top_update_runtime(struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample)
+{
+ u64 delta;
+ u64 exit_time = sample->time;
+ u64 entry_time = atom->time;
+
+ if ((entry_time != 0) && (exit_time >= entry_time)) {
+ delta = exit_time - entry_time;
+ work->total_runtime += delta;
+ }
+}
+
+static int top_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL, true);
+}
+
+static int top_exit_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_work *work, *sched_work;
+ struct kwork_class *sched_class;
+ struct kwork_atom *atom;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (!work)
+ return -1;
+
+ if (atom) {
+ sched_class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
+ if (sched_class) {
+ sched_work = find_work_by_id(&sched_class->work_root,
+ work->id, work->cpu);
+ if (sched_work)
+ top_update_runtime(work, atom, sample);
+ }
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static int top_sched_switch_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom;
+ struct kwork_work *work;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (!work)
+ return -1;
+
+ if (atom) {
+ top_update_runtime(work, atom, sample);
+ atom_del(atom);
+ }
+
+ return top_entry_event(kwork, class, evsel, sample, machine);
+}
+
static struct kwork_class kwork_irq;
static int process_irq_handler_entry_event(struct perf_tool *tool,
struct evsel *evsel,
@@ -819,16 +1001,24 @@ static int irq_class_init(struct kwork_class *class,
return 0;
}
-static void irq_work_init(struct kwork_class *class,
+static void irq_work_init(struct perf_kwork *kwork,
+ struct kwork_class *class,
struct kwork_work *work,
+ enum kwork_trace_type src_type __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
work->class = class;
work->cpu = sample->cpu;
- work->id = evsel__intval(evsel, sample, "irq");
- work->name = evsel__strval(evsel, sample, "name");
+
+ if (kwork->report == KWORK_REPORT_TOP) {
+ work->id = evsel__intval_common(evsel, sample, "common_pid");
+ work->name = NULL;
+ } else {
+ work->id = evsel__intval(evsel, sample, "irq");
+ work->name = evsel__strval(evsel, sample, "name");
+ }
}
static void irq_work_name(struct kwork_work *work, char *buf, int len)
@@ -938,18 +1128,27 @@ static char *evsel__softirq_name(struct evsel *evsel, u64 num)
return name;
}
-static void softirq_work_init(struct kwork_class *class,
+static void softirq_work_init(struct perf_kwork *kwork,
+ struct kwork_class *class,
struct kwork_work *work,
+ enum kwork_trace_type src_type __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
- u64 num = evsel__intval(evsel, sample, "vec");
+ u64 num;
- work->id = num;
work->class = class;
work->cpu = sample->cpu;
- work->name = evsel__softirq_name(evsel, num);
+
+ if (kwork->report == KWORK_REPORT_TOP) {
+ work->id = evsel__intval_common(evsel, sample, "common_pid");
+ work->name = NULL;
+ } else {
+ num = evsel__intval(evsel, sample, "vec");
+ work->id = num;
+ work->name = evsel__softirq_name(evsel, num);
+ }
}
static void softirq_work_name(struct kwork_work *work, char *buf, int len)
@@ -1029,8 +1228,10 @@ static int workqueue_class_init(struct kwork_class *class,
return 0;
}
-static void workqueue_work_init(struct kwork_class *class,
+static void workqueue_work_init(struct perf_kwork *kwork __maybe_unused,
+ struct kwork_class *class,
struct kwork_work *work,
+ enum kwork_trace_type src_type __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1064,10 +1265,77 @@ static struct kwork_class kwork_workqueue = {
.work_name = workqueue_work_name,
};
+static struct kwork_class kwork_sched;
+static int process_sched_switch_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->sched_switch_event)
+ return kwork->tp_handler->sched_switch_event(kwork, &kwork_sched,
+ evsel, sample, machine);
+ return 0;
+}
+
+const struct evsel_str_handler sched_tp_handlers[] = {
+ { "sched:sched_switch", process_sched_switch_event, },
+};
+
+static int sched_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session,
+ sched_tp_handlers)) {
+ pr_err("Failed to set sched tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static void sched_work_init(struct perf_kwork *kwork __maybe_unused,
+ struct kwork_class *class,
+ struct kwork_work *work,
+ enum kwork_trace_type src_type,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ work->class = class;
+ work->cpu = sample->cpu;
+
+ if (src_type == KWORK_TRACE_EXIT) {
+ work->id = evsel__intval(evsel, sample, "prev_pid");
+ work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
+ } else if (src_type == KWORK_TRACE_ENTRY) {
+ work->id = evsel__intval(evsel, sample, "next_pid");
+ work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
+ }
+}
+
+static void sched_work_name(struct kwork_work *work, char *buf, int len)
+{
+ snprintf(buf, len, "%s", work->name);
+}
+
+static struct kwork_class kwork_sched = {
+ .name = "sched",
+ .type = KWORK_CLASS_SCHED,
+ .nr_tracepoints = ARRAY_SIZE(sched_tp_handlers),
+ .tp_handlers = sched_tp_handlers,
+ .class_init = sched_class_init,
+ .work_init = sched_work_init,
+ .work_name = sched_work_name,
+};
+
static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
[KWORK_CLASS_IRQ] = &kwork_irq,
[KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
+ [KWORK_CLASS_SCHED] = &kwork_sched,
};
static void print_separator(int len)
@@ -1291,11 +1559,132 @@ static void print_bad_events(struct perf_kwork *kwork)
}
}
-static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
+const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
+const char *graph_idle = " ";
+static void top_print_per_cpu_load(struct perf_kwork *kwork)
+{
+ int i, load_width;
+ u64 total, load, load_ratio;
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ total = stat->cpus_runtime[i].total;
+ load = stat->cpus_runtime[i].load;
+ if (test_bit(i, stat->all_cpus_bitmap) && total) {
+ load_ratio = load * 10000 / total;
+ load_width = PRINT_CPU_USAGE_HIST_WIDTH *
+ load_ratio / 10000;
+
+ printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n",
+ PRINT_CPU_WIDTH, i,
+ load_width, graph_load,
+ PRINT_CPU_USAGE_HIST_WIDTH - load_width,
+ graph_idle,
+ PRINT_CPU_USAGE_WIDTH,
+ PRINT_CPU_USAGE_DECIMAL_WIDTH,
+ (double)load_ratio / 100);
+ }
+ }
+}
+
+static void top_print_cpu_usage(struct perf_kwork *kwork)
+{
+ struct kwork_top_stat *stat = &kwork->top_stat;
+ u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle;
+ u64 hardirq_time = stat->cpus_runtime[MAX_NR_CPUS].irq;
+ u64 softirq_time = stat->cpus_runtime[MAX_NR_CPUS].softirq;
+ int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS);
+ u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total;
+
+ printf("Total : %*.*f ms, %d cpus\n",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)cpus_total_time / NSEC_PER_MSEC,
+ cpus_nr);
+
+ printf("%%Cpu(s): %*.*f%% id, %*.*f%% hi, %*.*f%% si\n",
+ PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
+ cpus_total_time ? (double)idle_time * 100 / cpus_total_time : 0,
+
+ PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
+ cpus_total_time ? (double)hardirq_time * 100 / cpus_total_time : 0,
+
+ PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
+ cpus_total_time ? (double)softirq_time * 100 / cpus_total_time : 0);
+
+ top_print_per_cpu_load(kwork);
+}
+
+static void top_print_header(struct perf_kwork *kwork __maybe_unused)
+{
+ int ret;
+
+ printf("\n ");
+ ret = printf(" %*s %s%*s%s %*s %*s %-*s",
+ PRINT_PID_WIDTH, "PID",
+
+ kwork->use_bpf ? " " : "",
+ kwork->use_bpf ? PRINT_PID_WIDTH : 0,
+ kwork->use_bpf ? "SPID" : "",
+ kwork->use_bpf ? " " : "",
+
+ PRINT_CPU_USAGE_WIDTH, "%CPU",
+ PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
+ PRINT_TASK_NAME_WIDTH, "COMMAND");
+ printf("\n ");
+ print_separator(ret);
+}
+
+static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
+{
+ int ret = 0;
+
+ printf(" ");
+
+ /*
+ * pid
+ */
+ ret += printf(" %*ld ", PRINT_PID_WIDTH, work->id);
+
+ /*
+ * tgid
+ */
+ if (kwork->use_bpf)
+ ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
+
+ /*
+ * cpu usage
+ */
+ ret += printf(" %*.*f ",
+ PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
+ (double)work->cpu_usage / 100);
+
+ /*
+ * total runtime
+ */
+ ret += printf(" %*.*f ms ",
+ PRINT_RUNTIME_WIDTH + RPINT_DECIMAL_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->total_runtime / NSEC_PER_MSEC);
+
+ /*
+ * command
+ */
+ if (kwork->use_bpf)
+ ret += printf(" %s%s%s",
+ work->is_kthread ? "[" : "",
+ work->name,
+ work->is_kthread ? "]" : "");
+ else
+ ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
+
+ printf("\n");
+ return ret;
+}
+
+static void work_sort(struct perf_kwork *kwork,
+ struct kwork_class *class, struct rb_root_cached *root)
{
struct rb_node *node;
struct kwork_work *data;
- struct rb_root_cached *root = &class->work_root;
pr_debug("Sorting %s ...\n", class->name);
for (;;) {
@@ -1315,7 +1704,7 @@ static void perf_kwork__sort(struct perf_kwork *kwork)
struct kwork_class *class;
list_for_each_entry(class, &kwork->class_list, list)
- work_sort(kwork, class);
+ work_sort(kwork, class, &class->work_root);
}
static int perf_kwork__check_config(struct perf_kwork *kwork,
@@ -1338,6 +1727,11 @@ static int perf_kwork__check_config(struct perf_kwork *kwork,
.entry_event = timehist_entry_event,
.exit_event = timehist_exit_event,
};
+ static struct trace_kwork_handler top_ops = {
+ .entry_event = timehist_entry_event,
+ .exit_event = top_exit_event,
+ .sched_switch_event = top_sched_switch_event,
+ };
switch (kwork->report) {
case KWORK_REPORT_RUNTIME:
@@ -1349,6 +1743,9 @@ static int perf_kwork__check_config(struct perf_kwork *kwork,
case KWORK_REPORT_TIMEHIST:
kwork->tp_handler = &timehist_ops;
break;
+ case KWORK_REPORT_TOP:
+ kwork->tp_handler = &top_ops;
+ break;
default:
pr_debug("Invalid report type %d\n", kwork->report);
return -1;
@@ -1469,7 +1866,7 @@ static void sig_handler(int sig)
* Simply capture termination signal so that
* the program can continue after pause returns
*/
- pr_debug("Captuer signal %d\n", sig);
+ pr_debug("Capture signal %d\n", sig);
}
static int perf_kwork__report_bpf(struct perf_kwork *kwork)
@@ -1595,6 +1992,248 @@ static int perf_kwork__timehist(struct perf_kwork *kwork)
return perf_kwork__read_events(kwork);
}
+static void top_calc_total_runtime(struct perf_kwork *kwork)
+{
+ struct kwork_class *class;
+ struct kwork_work *work;
+ struct rb_node *next;
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
+ if (!class)
+ return;
+
+ next = rb_first_cached(&class->work_root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+ BUG_ON(work->cpu >= MAX_NR_CPUS);
+ stat->cpus_runtime[work->cpu].total += work->total_runtime;
+ stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
+ next = rb_next(next);
+ }
+}
+
+static void top_calc_idle_time(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ if (work->id == 0) {
+ stat->cpus_runtime[work->cpu].idle += work->total_runtime;
+ stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
+ }
+}
+
+static void top_calc_irq_runtime(struct perf_kwork *kwork,
+ enum kwork_class_type type,
+ struct kwork_work *work)
+{
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ if (type == KWORK_CLASS_IRQ) {
+ stat->cpus_runtime[work->cpu].irq += work->total_runtime;
+ stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
+ } else if (type == KWORK_CLASS_SOFTIRQ) {
+ stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
+ stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
+ }
+}
+
+static void top_subtract_irq_runtime(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ struct kwork_class *class;
+ struct kwork_work *data;
+ unsigned int i;
+ int irq_class_list[] = {KWORK_CLASS_IRQ, KWORK_CLASS_SOFTIRQ};
+
+ for (i = 0; i < ARRAY_SIZE(irq_class_list); i++) {
+ class = get_kwork_class(kwork, irq_class_list[i]);
+ if (!class)
+ continue;
+
+ data = find_work_by_id(&class->work_root,
+ work->id, work->cpu);
+ if (!data)
+ continue;
+
+ if (work->total_runtime > data->total_runtime) {
+ work->total_runtime -= data->total_runtime;
+ top_calc_irq_runtime(kwork, irq_class_list[i], data);
+ }
+ }
+}
+
+static void top_calc_cpu_usage(struct perf_kwork *kwork)
+{
+ struct kwork_class *class;
+ struct kwork_work *work;
+ struct rb_node *next;
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
+ if (!class)
+ return;
+
+ next = rb_first_cached(&class->work_root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+
+ if (work->total_runtime == 0)
+ goto next;
+
+ __set_bit(work->cpu, stat->all_cpus_bitmap);
+
+ top_subtract_irq_runtime(kwork, work);
+
+ work->cpu_usage = work->total_runtime * 10000 /
+ stat->cpus_runtime[work->cpu].total;
+
+ top_calc_idle_time(kwork, work);
+next:
+ next = rb_next(next);
+ }
+}
+
+static void top_calc_load_runtime(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ struct kwork_top_stat *stat = &kwork->top_stat;
+
+ if (work->id != 0) {
+ stat->cpus_runtime[work->cpu].load += work->total_runtime;
+ stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
+ }
+}
+
+static void top_merge_tasks(struct perf_kwork *kwork)
+{
+ struct kwork_work *merged_work, *data;
+ struct kwork_class *class;
+ struct rb_node *node;
+ int cpu;
+ struct rb_root_cached merged_root = RB_ROOT_CACHED;
+
+ class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
+ if (!class)
+ return;
+
+ for (;;) {
+ node = rb_first_cached(&class->work_root);
+ if (!node)
+ break;
+
+ rb_erase_cached(node, &class->work_root);
+ data = rb_entry(node, struct kwork_work, node);
+
+ if (!profile_name_match(kwork, data))
+ continue;
+
+ cpu = data->cpu;
+ merged_work = find_work_by_id(&merged_root, data->id,
+ data->id == 0 ? cpu : -1);
+ if (!merged_work) {
+ work_insert(&merged_root, data, &kwork->cmp_id);
+ } else {
+ merged_work->total_runtime += data->total_runtime;
+ merged_work->cpu_usage += data->cpu_usage;
+ }
+
+ top_calc_load_runtime(kwork, data);
+ }
+
+ work_sort(kwork, class, &merged_root);
+}
+
+static void perf_kwork__top_report(struct perf_kwork *kwork)
+{
+ struct kwork_work *work;
+ struct rb_node *next;
+
+ printf("\n");
+
+ top_print_cpu_usage(kwork);
+ top_print_header(kwork);
+ next = rb_first_cached(&kwork->sorted_work_root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+ process_skipped_events(kwork, work);
+
+ if (work->total_runtime == 0)
+ goto next;
+
+ top_print_work(kwork, work);
+
+next:
+ next = rb_next(next);
+ }
+
+ printf("\n");
+}
+
+static int perf_kwork__top_bpf(struct perf_kwork *kwork)
+{
+ int ret;
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ ret = perf_kwork__top_prepare_bpf(kwork);
+ if (ret)
+ return -1;
+
+ printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
+
+ perf_kwork__top_start();
+
+ /*
+ * a simple pause, wait here for stop signal
+ */
+ pause();
+
+ perf_kwork__top_finish();
+
+ perf_kwork__top_read_bpf(kwork);
+
+ perf_kwork__top_cleanup_bpf();
+
+ return 0;
+
+}
+
+static int perf_kwork__top(struct perf_kwork *kwork)
+{
+ struct __top_cpus_runtime *cpus_runtime;
+ int ret = 0;
+
+ cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
+ if (!cpus_runtime)
+ return -1;
+
+ kwork->top_stat.cpus_runtime = cpus_runtime;
+ bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS);
+
+ if (kwork->use_bpf)
+ ret = perf_kwork__top_bpf(kwork);
+ else
+ ret = perf_kwork__read_events(kwork);
+
+ if (ret)
+ goto out;
+
+ top_calc_total_runtime(kwork);
+ top_calc_cpu_usage(kwork);
+ top_merge_tasks(kwork);
+
+ setup_pager();
+
+ perf_kwork__top_report(kwork);
+
+out:
+ free(kwork->top_stat.cpus_runtime);
+ return ret;
+}
+
static void setup_event_list(struct perf_kwork *kwork,
const struct option *options,
const char * const usage_msg[])
@@ -1603,8 +2242,11 @@ static void setup_event_list(struct perf_kwork *kwork,
struct kwork_class *class;
char *tmp, *tok, *str;
+ /*
+ * set default events list if not specified
+ */
if (kwork->event_list_str == NULL)
- goto null_event_list_str;
+ kwork->event_list_str = "irq, softirq, workqueue";
str = strdup(kwork->event_list_str);
for (tok = strtok_r(str, ", ", &tmp);
@@ -1623,17 +2265,6 @@ static void setup_event_list(struct perf_kwork *kwork,
}
free(str);
-null_event_list_str:
- /*
- * config all kwork events if not specified
- */
- if (list_empty(&kwork->class_list)) {
- for (i = 0; i < KWORK_CLASS_MAX; i++) {
- list_add_tail(&kwork_class_supported_list[i]->list,
- &kwork->class_list);
- }
- }
-
pr_debug("Config event list:");
list_for_each_entry(class, &kwork->class_list, list)
pr_debug(" %s", class->name);
@@ -1692,9 +2323,10 @@ int cmd_kwork(int argc, const char **argv)
static struct perf_kwork kwork = {
.class_list = LIST_HEAD_INIT(kwork.class_list),
.tool = {
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .sample = perf_kwork__process_tracepoint_sample,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .sample = perf_kwork__process_tracepoint_sample,
+ .ordered_events = true,
},
.atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
.sort_list = LIST_HEAD_INIT(kwork.sort_list),
@@ -1721,13 +2353,14 @@ int cmd_kwork(int argc, const char **argv)
};
static const char default_report_sort_order[] = "runtime, max, count";
static const char default_latency_sort_order[] = "avg, max, count";
+ static const char default_top_sort_order[] = "rate, runtime";
const struct option kwork_options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
- "list of kwork to profile (irq, softirq, workqueue, etc)"),
+ "list of kwork to profile (irq, softirq, workqueue, sched, etc)"),
OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
OPT_END()
};
@@ -1788,6 +2421,23 @@ int cmd_kwork(int argc, const char **argv)
"input file name"),
OPT_PARENT(kwork_options)
};
+ const struct option top_options[] = {
+ OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
+ "sort by key(s): rate, runtime, tid"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
+ "Use BPF to measure task cpu usage"),
+#endif
+ OPT_PARENT(kwork_options)
+ };
const char *kwork_usage[] = {
NULL,
NULL
@@ -1804,8 +2454,12 @@ int cmd_kwork(int argc, const char **argv)
"perf kwork timehist [<options>]",
NULL
};
+ const char * const top_usage[] = {
+ "perf kwork top [<options>]",
+ NULL
+ };
const char *const kwork_subcommands[] = {
- "record", "report", "latency", "timehist", NULL
+ "record", "report", "latency", "timehist", "top", NULL
};
argc = parse_options_subcommand(argc, argv, kwork_options,
@@ -1814,12 +2468,12 @@ int cmd_kwork(int argc, const char **argv)
if (!argc)
usage_with_options(kwork_usage, kwork_options);
- setup_event_list(&kwork, kwork_options, kwork_usage);
sort_dimension__add(&kwork, "id", &kwork.cmp_id);
- if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
+ setup_event_list(&kwork, kwork_options, kwork_usage);
return perf_kwork__record(&kwork, argc, argv);
- else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
+ } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
kwork.sort_order = default_report_sort_order;
if (argc > 1) {
argc = parse_options(argc, argv, report_options, report_usage, 0);
@@ -1828,6 +2482,7 @@ int cmd_kwork(int argc, const char **argv)
}
kwork.report = KWORK_REPORT_RUNTIME;
setup_sorting(&kwork, report_options, report_usage);
+ setup_event_list(&kwork, kwork_options, kwork_usage);
return perf_kwork__report(&kwork);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
kwork.sort_order = default_latency_sort_order;
@@ -1838,6 +2493,7 @@ int cmd_kwork(int argc, const char **argv)
}
kwork.report = KWORK_REPORT_LATENCY;
setup_sorting(&kwork, latency_options, latency_usage);
+ setup_event_list(&kwork, kwork_options, kwork_usage);
return perf_kwork__report(&kwork);
} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
if (argc > 1) {
@@ -1846,7 +2502,21 @@ int cmd_kwork(int argc, const char **argv)
usage_with_options(timehist_usage, timehist_options);
}
kwork.report = KWORK_REPORT_TIMEHIST;
+ setup_event_list(&kwork, kwork_options, kwork_usage);
return perf_kwork__timehist(&kwork);
+ } else if (strlen(argv[0]) > 2 && strstarts("top", argv[0])) {
+ kwork.sort_order = default_top_sort_order;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, top_options, top_usage, 0);
+ if (argc)
+ usage_with_options(top_usage, top_options);
+ }
+ kwork.report = KWORK_REPORT_TOP;
+ if (!kwork.event_list_str)
+ kwork.event_list_str = "sched, irq, softirq";
+ setup_event_list(&kwork, kwork_options, kwork_usage);
+ setup_sorting(&kwork, top_options, top_usage);
+ return perf_kwork__top(&kwork);
} else
usage_with_options(kwork_usage, kwork_options);
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b141f2134274..a3ff2f4edbaa 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -10,6 +10,7 @@
#include "util/thread.h"
#include "util/header.h"
#include "util/target.h"
+#include "util/cgroup.h"
#include "util/callchain.h"
#include "util/lock-contention.h"
#include "util/bpf_skel/lock_data.h"
@@ -60,6 +61,7 @@ static bool combine_locks;
static bool show_thread_stats;
static bool show_lock_addrs;
static bool show_lock_owner;
+static bool show_lock_cgroups;
static bool use_bpf;
static unsigned long bpf_map_entries = MAX_ENTRIES;
static int max_stack_depth = CONTENTION_STACK_DEPTH;
@@ -524,6 +526,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
struct map *kmap;
struct symbol *sym;
u64 ip;
+ const char *arch = perf_env__arch(machine->env);
if (list_empty(&callstack_filters))
return true;
@@ -531,7 +534,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
for (int i = 0; i < max_stack_depth; i++) {
struct callstack_filter *filter;
- if (!callstack || !callstack[i])
+ /*
+ * In powerpc, the callchain saved by kernel always includes
+ * first three entries as the NIP (next instruction pointer),
+ * LR (link register), and the contents of LR save area in the
+ * second stack frame. In certain scenarios its possible to have
+ * invalid kernel instruction addresses in either LR or the second
+ * stack frame's LR. In that case, kernel will store that address as
+ * zero.
+ *
+ * The below check will continue to look into callstack,
+ * incase first or second callstack index entry has 0
+ * address for powerpc.
+ */
+ if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
+ (i != 1 && i != 2))))
break;
ip = callstack[i];
@@ -619,6 +636,7 @@ static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
*key = tid;
break;
case LOCK_AGGR_CALLER:
+ case LOCK_AGGR_CGROUP:
default:
pr_err("Invalid aggregation mode: %d\n", aggr_mode);
return -EINVAL;
@@ -1103,6 +1121,7 @@ static int report_lock_contention_begin_event(struct evsel *evsel,
if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
name = "Unknown";
break;
+ case LOCK_AGGR_CGROUP:
case LOCK_AGGR_TASK:
default:
break;
@@ -1628,6 +1647,9 @@ static void lock_filter_finish(void)
zfree(&filters.syms);
filters.nr_syms = 0;
+
+ zfree(&filters.cgrps);
+ filters.nr_cgrps = 0;
}
static void sort_contention_result(void)
@@ -1653,6 +1675,9 @@ static void print_header_stdio(void)
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %16s %s\n\n", "address", "symbol");
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, " %s\n\n", "cgroup");
+ break;
default:
break;
}
@@ -1680,6 +1705,9 @@ static void print_header_csv(const char *sep)
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%s%s %s%s %s\n", "address", sep, "symbol", sep, "type");
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, "%s\n", "cgroup");
+ break;
default:
break;
}
@@ -1720,6 +1748,9 @@ static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat
fprintf(lock_output, " %016llx %s (%s)\n", (unsigned long long)st->addr,
st->name, get_type_name(st->flags));
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, " %s\n", st->name);
+ break;
default:
break;
}
@@ -1770,6 +1801,9 @@ static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *s
fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
st->name, sep, get_type_name(st->flags));
break;
+ case LOCK_AGGR_CGROUP:
+ fprintf(lock_output, "%s\n",st->name);
+ break;
default:
break;
}
@@ -1999,6 +2033,27 @@ static int check_lock_contention_options(const struct option *options,
return -1;
}
+ if (show_lock_cgroups && !use_bpf) {
+ pr_err("Cgroups are available only with BPF\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "use-bpf", 0);
+ return -1;
+ }
+
+ if (show_lock_cgroups && show_lock_addrs) {
+ pr_err("Cannot use cgroup and addr mode together\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "lock-addr", 0);
+ return -1;
+ }
+
+ if (show_lock_cgroups && show_thread_stats) {
+ pr_err("Cannot use cgroup and thread mode together\n");
+ parse_options_usage(usage, options, "lock-cgroup", 0);
+ parse_options_usage(NULL, options, "threads", 0);
+ return -1;
+ }
+
if (symbol_conf.field_sep) {
if (strstr(symbol_conf.field_sep, ":") || /* part of type flags */
strstr(symbol_conf.field_sep, "+") || /* part of caller offset */
@@ -2040,6 +2095,7 @@ static int __cmd_contention(int argc, const char **argv)
.filters = &filters,
.save_callstack = needs_callstack(),
.owner = show_lock_owner,
+ .cgroups = RB_ROOT,
};
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
@@ -2059,7 +2115,8 @@ static int __cmd_contention(int argc, const char **argv)
con.machine = &session->machines.host;
con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
- show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
+ show_lock_addrs ? LOCK_AGGR_ADDR :
+ show_lock_cgroups ? LOCK_AGGR_CGROUP : LOCK_AGGR_CALLER;
if (con.aggr_mode == LOCK_AGGR_CALLER)
con.save_callstack = true;
@@ -2158,7 +2215,7 @@ static int __cmd_contention(int argc, const char **argv)
out_delete:
lock_filter_finish();
evlist__delete(con.evlist);
- lock_contention_finish();
+ lock_contention_finish(&con);
perf_session__delete(session);
zfree(&lockhash_table);
return err;
@@ -2421,6 +2478,7 @@ static int parse_call_stack(const struct option *opt __maybe_unused, const char
entry = malloc(sizeof(*entry) + strlen(tok) + 1);
if (entry == NULL) {
pr_err("Memory allocation failure\n");
+ free(s);
return -1;
}
@@ -2450,6 +2508,56 @@ static int parse_output(const struct option *opt __maybe_unused, const char *str
return 0;
}
+static bool add_lock_cgroup(char *name)
+{
+ u64 *tmp;
+ struct cgroup *cgrp;
+
+ cgrp = cgroup__new(name, /*do_open=*/false);
+ if (cgrp == NULL) {
+ pr_err("Failed to create cgroup: %s\n", name);
+ return false;
+ }
+
+ if (read_cgroup_id(cgrp) < 0) {
+ pr_err("Failed to read cgroup id for %s\n", name);
+ cgroup__put(cgrp);
+ return false;
+ }
+
+ tmp = realloc(filters.cgrps, (filters.nr_cgrps + 1) * sizeof(*filters.cgrps));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp[filters.nr_cgrps++] = cgrp->id;
+ filters.cgrps = tmp;
+ cgroup__put(cgrp);
+ return true;
+}
+
+static int parse_cgroup_filter(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (!add_lock_cgroup(tok)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ free(s);
+ return ret;
+}
+
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
@@ -2523,6 +2631,9 @@ int cmd_lock(int argc, const char **argv)
OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep, "separator",
"print result in CSV format with custom separator"),
+ OPT_BOOLEAN(0, "lock-cgroup", &show_lock_cgroups, "show lock stats by cgroup"),
+ OPT_CALLBACK('G', "cgroup-filter", NULL, "CGROUPS",
+ "Filter specific cgroups", parse_cgroup_filter),
OPT_PARENT(lock_options)
};
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 34bb31f08bb5..dcf288a4fb9a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -906,6 +906,65 @@ static int record__config_off_cpu(struct record *rec)
return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
}
+static bool record__tracking_system_wide(struct record *rec)
+{
+ struct evlist *evlist = rec->evlist;
+ struct evsel *evsel;
+
+ /*
+ * If non-dummy evsel exists, system_wide sideband is need to
+ * help parse sample information.
+ * For example, PERF_EVENT_MMAP event to help parse symbol,
+ * and PERF_EVENT_COMM event to help parse task executable name.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if (!evsel__is_dummy_event(evsel))
+ return true;
+ }
+
+ return false;
+}
+
+static int record__config_tracking_events(struct record *rec)
+{
+ struct record_opts *opts = &rec->opts;
+ struct evlist *evlist = rec->evlist;
+ bool system_wide = false;
+ struct evsel *evsel;
+
+ /*
+ * For initial_delay, system wide or a hybrid system, we need to add
+ * tracking event so that we can track PERF_RECORD_MMAP to cover the
+ * delay of waiting or event synthesis.
+ */
+ if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
+ perf_pmus__num_core_pmus() > 1) {
+
+ /*
+ * User space tasks can migrate between CPUs, so when tracing
+ * selected CPUs, sideband for all CPUs is still needed.
+ */
+ if (!!opts->target.cpu_list && record__tracking_system_wide(rec))
+ system_wide = true;
+
+ evsel = evlist__findnew_tracking_event(evlist, system_wide);
+ if (!evsel)
+ return -ENOMEM;
+
+ /*
+ * Enable the tracking event when the process is forked for
+ * initial_delay, immediately for system wide.
+ */
+ if (opts->target.initial_delay && !evsel->immediate &&
+ !target__has_cpu(&opts->target))
+ evsel->core.attr.enable_on_exec = 1;
+ else
+ evsel->immediate = 1;
+ }
+
+ return 0;
+}
+
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
@@ -1286,33 +1345,6 @@ static int record__open(struct record *rec)
struct record_opts *opts = &rec->opts;
int rc = 0;
- /*
- * For initial_delay, system wide or a hybrid system, we need to add a
- * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
- * of waiting or event synthesis.
- */
- if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
- perf_pmus__num_core_pmus() > 1) {
- pos = evlist__get_tracking_event(evlist);
- if (!evsel__is_dummy_event(pos)) {
- /* Set up dummy event. */
- if (evlist__add_dummy(evlist))
- return -ENOMEM;
- pos = evlist__last(evlist);
- evlist__set_tracking_event(evlist, pos);
- }
-
- /*
- * Enable the dummy event when the process is forked for
- * initial_delay, immediately for system wide.
- */
- if (opts->target.initial_delay && !pos->immediate &&
- !target__has_cpu(&opts->target))
- pos->core.attr.enable_on_exec = 1;
- else
- pos->immediate = 1;
- }
-
evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
@@ -4195,6 +4227,12 @@ int cmd_record(int argc, const char **argv)
goto out;
}
+ err = record__config_tracking_events(rec);
+ if (err) {
+ pr_err("record__config_tracking_events failed, error %d\n", err);
+ goto out;
+ }
+
err = record__init_thread_masks(rec);
if (err) {
pr_err("Failed to initialize parallel data streaming masks\n");
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index dcedfe00f04d..9cb1da2dc0c0 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -691,10 +691,25 @@ static int report__browse_hists(struct report *rep)
static int report__collapse_hists(struct report *rep)
{
+ struct perf_session *session = rep->session;
+ struct evlist *evlist = session->evlist;
struct ui_progress prog;
struct evsel *pos;
int ret = 0;
+ /*
+ * The pipe data needs to setup hierarchy hpp formats now, because it
+ * cannot know about evsels in the data before reading the data. The
+ * normal file data saves the event (attribute) info in the header
+ * section, but pipe does not have the luxury.
+ */
+ if (perf_data__is_pipe(session->data)) {
+ if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) {
+ ui__error("Failed to setup hierarchy output formats\n");
+ return -1;
+ }
+ }
+
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
evlist__for_each_entry(rep->session->evlist, pos) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 9ab300b6f131..dd6065afbbaf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1385,7 +1385,7 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
{
pid_t l_tid, r_tid;
- if (RC_CHK_ACCESS(l->thread) == RC_CHK_ACCESS(r->thread))
+ if (RC_CHK_EQUAL(l->thread, r->thread))
return 0;
l_tid = thread__tid(l->thread);
r_tid = thread__tid(r->thread);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 517bf25750c8..b1f57401ff23 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -3899,7 +3899,7 @@ int cmd_script(int argc, const char **argv)
"comma separated output fields prepend with 'type:'. "
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
- "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff"
+ "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff,"
"addr,symoff,srcline,period,iregs,uregs,brstack,"
"brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
"brstackinsnlen,brstackoff,callindent,insn,insnlen,synth,"
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 07b48f6df48e..a3af805a1d57 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- if (evsel_list->core.user_requested_cpus)
+ if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else
nr = 0;
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index e5859c70e195..ac20c2b9bbc2 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -81,6 +81,7 @@ static void library_status(void)
STATUS(HAVE_ZSTD_SUPPORT, zstd);
STATUS(HAVE_LIBPFM, libpfm4);
STATUS(HAVE_LIBTRACEEVENT, libtraceevent);
+ STATUS(HAVE_BPF_SKEL, bpf_skeletons);
}
int cmd_version(int argc, const char **argv)
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index e21caadda7c1..66ba33dbcef2 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -162,6 +162,7 @@ check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/ex
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
check arch/x86/include/asm/amd-ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"'
check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"'
+check include/asm-generic/unaligned.h '-I "^#include <linux/unaligned/packed_struct.h>" -I "^#include <asm/byteorder.h>" -I "^#pragma GCC diagnostic"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 978249d7868c..f224d79b89e6 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -164,7 +164,7 @@ __perf_main ()
$prev_skip_opts == @(record|stat|top) ]]; then
local cur1=${COMP_WORDS[COMP_CWORD]}
- local raw_evts=$($cmd list --raw-dump)
+ local raw_evts=$($cmd list --raw-dump hw sw cache tracepoint pmu sdt)
local arr s tmp result cpu_evts
# aarch64 doesn't have /sys/bus/event_source/devices/cpu/events
@@ -198,6 +198,14 @@ __perf_main ()
else
__perfcomp_colon "$evts" "$cur1"
fi
+ elif [[ $prev == @("--pfm-events") &&
+ $prev_skip_opts == @(record|stat|top) ]]; then
+ local evts=$($cmd list --raw-dump pfm)
+ __perfcomp "$evts" "$cur"
+ elif [[ $prev == @("-M"|"--metrics") &&
+ $prev_skip_opts == @(stat) ]]; then
+ local metrics=$($cmd list --raw-dump metric metricgroup)
+ __perfcomp "$metrics" "$cur"
else
# List subcommands for perf commands
if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
index 1e7e8901a445..e2848a9d4848 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
@@ -1,362 +1,384 @@
[
{
+ "MetricName": "branch_miss_pred_rate",
"MetricExpr": "BR_MIS_PRED / BR_PRED",
"BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
- "MetricGroup": "Branch Prediction",
- "MetricName": "Misprediction"
+ "MetricGroup": "branch",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
- "BriefDescription": "Branch predictor misprediction rate",
- "MetricGroup": "Branch Prediction",
- "MetricName": "Misprediction (retired)"
- },
- {
- "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)",
+ "MetricName": "bus_utilization",
+ "MetricExpr": "((BUS_ACCESS / (BUS_CYCLES * 1)) * 100)",
"BriefDescription": "Core-to-uncore bus utilization",
"MetricGroup": "Bus",
- "MetricName": "Bus utilization"
+ "ScaleUnit": "1percent of bus cycles"
},
{
- "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
- "BriefDescription": "L1D cache miss rate",
- "MetricGroup": "Cache",
- "MetricName": "L1D cache miss"
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
+ "MetricName": "Miss_Ratio;l1d_cache_read_miss",
"MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
"BriefDescription": "L1D cache read miss rate",
"MetricGroup": "Cache",
- "MetricName": "L1D cache read miss"
+ "ScaleUnit": "1per cache read access"
},
{
- "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
- "BriefDescription": "L1I cache miss rate",
- "MetricGroup": "Cache",
- "MetricName": "L1I cache miss"
- },
- {
- "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
- "BriefDescription": "L2 cache miss rate",
- "MetricGroup": "Cache",
- "MetricName": "L2 cache miss"
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
+ "MetricName": "l1i_cache_read_miss_rate",
"MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
"BriefDescription": "L1I cache read miss rate",
"MetricGroup": "Cache",
- "MetricName": "L1I cache read miss"
+ "ScaleUnit": "1per cache access"
},
{
+ "MetricName": "l2d_cache_read_miss_rate",
"MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
"BriefDescription": "L2 cache read miss rate",
"MetricGroup": "Cache",
- "MetricName": "L2 cache read miss"
+ "ScaleUnit": "1per cache read access"
},
{
- "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED",
+ "MetricName": "l1d_cache_miss_mpki",
+ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
"BriefDescription": "Misses per thousand instructions (data)",
"MetricGroup": "Cache",
- "MetricName": "MPKI data"
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED",
+ "MetricName": "l1i_cache_miss_mpki",
+ "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
"BriefDescription": "Misses per thousand instructions (instruction)",
"MetricGroup": "Cache",
- "MetricName": "MPKI instruction"
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "ASE_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations",
- "MetricGroup": "Instruction",
- "MetricName": "ASE mix"
+ "MetricName": "simd_percentage",
+ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "CRYPTO_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of crypto data processing operations",
- "MetricGroup": "Instruction",
- "MetricName": "Crypto mix"
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "VFP_SPEC / (duration_time *1000000000)",
+ "MetricName": "gflops",
+ "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
"BriefDescription": "Giga-floating point operations per second",
- "MetricGroup": "Instruction",
- "MetricName": "GFLOPS_ISSUED"
+ "MetricGroup": "InstructionMix"
},
{
- "MetricExpr": "DP_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of integer data processing operations",
- "MetricGroup": "Instruction",
- "MetricName": "Integer mix"
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "INST_RETIRED / CPU_CYCLES",
- "BriefDescription": "Instructions per cycle",
- "MetricGroup": "Instruction",
- "MetricName": "IPC"
+ "MetricName": "ipc",
+ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "MetricGroup": "General",
+ "ScaleUnit": "1per cycle"
},
{
- "MetricExpr": "LD_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of load operations",
- "MetricGroup": "Instruction",
- "MetricName": "Load mix"
+ "MetricName": "load_percentage",
+ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "LDST_SPEC/ OP_SPEC",
- "BriefDescription": "Proportion of load & store operations",
- "MetricGroup": "Instruction",
- "MetricName": "Load-store mix"
+ "MetricName": "load_store_spec_rate",
+ "MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "INST_RETIRED / (duration_time * 1000000)",
+ "MetricName": "retired_mips",
+ "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
"BriefDescription": "Millions of instructions per second",
- "MetricGroup": "Instruction",
- "MetricName": "MIPS_RETIRED"
+ "MetricGroup": "InstructionMix"
},
{
- "MetricExpr": "INST_SPEC / (duration_time * 1000000)",
+ "MetricName": "spec_utilization_mips",
+ "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
"BriefDescription": "Millions of instructions per second",
- "MetricGroup": "Instruction",
- "MetricName": "MIPS_UTILIZATION"
- },
- {
- "MetricExpr": "PC_WRITE_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of software change of PC operations",
- "MetricGroup": "Instruction",
- "MetricName": "PC write mix"
+ "MetricGroup": "PEutilization"
},
{
- "MetricExpr": "ST_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of store operations",
- "MetricGroup": "Instruction",
- "MetricName": "Store mix"
+ "MetricName": "pc_write_spec_rate",
+ "MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "VFP_SPEC / OP_SPEC",
- "BriefDescription": "Proportion of FP operations",
- "MetricGroup": "Instruction",
- "MetricName": "VFP mix"
+ "MetricName": "store_percentage",
+ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))",
- "BriefDescription": "Proportion of slots lost",
- "MetricGroup": "Speculation / TDA",
- "MetricName": "CPU lost"
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)",
- "BriefDescription": "Proportion of slots retiring",
- "MetricGroup": "Speculation / TDA",
- "MetricName": "CPU utilization"
+ "MetricName": "retired_rate",
+ "MetricExpr": "OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": "OP_RETIRED - OP_SPEC",
- "BriefDescription": "Operations lost due to misspeculation",
- "MetricGroup": "Speculation / TDA",
- "MetricName": "Operations lost"
+ "MetricName": "wasted",
+ "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)",
- "BriefDescription": "Proportion of operations lost",
- "MetricGroup": "Speculation / TDA",
- "MetricName": "Operations lost (ratio)"
+ "MetricName": "wasted_rate",
+ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": "OP_RETIRED / OP_SPEC",
- "BriefDescription": "Proportion of operations retired",
- "MetricGroup": "Speculation / TDA",
- "MetricName": "Operations retired"
- },
- {
- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "MetricName": "stall_backend_cache_rate",
+ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
"BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
"MetricGroup": "Stall",
- "MetricName": "Stall backend cache cycles"
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+ "MetricName": "stall_backend_resource_rate",
+ "MetricExpr": "((STALL_BACKEND_RESOURCE / CPU_CYCLES) * 100)",
"BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
"MetricGroup": "Stall",
- "MetricName": "Stall backend resource cycles"
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "MetricName": "stall_backend_tlb_rate",
+ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
"BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
"MetricGroup": "Stall",
- "MetricName": "Stall backend tlb cycles"
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "MetricName": "stall_frontend_cache_rate",
+ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
"BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
"MetricGroup": "Stall",
- "MetricName": "Stall frontend cache cycles"
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+ "MetricName": "stall_frontend_tlb_rate",
+ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
"BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
"MetricGroup": "Stall",
- "MetricName": "Stall frontend tlb cycles"
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "DTLB_WALK / L1D_TLB",
- "BriefDescription": "D-side walk per d-side translation request",
- "MetricGroup": "TLB",
- "MetricName": "DTLB walks"
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
},
{
- "MetricExpr": "ITLB_WALK / L1I_TLB",
- "BriefDescription": "I-side walk per i-side translation request",
- "MetricGroup": "TLB",
- "MetricName": "ITLB walks"
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
},
{
- "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)",
- "BriefDescription": "Fraction of slots backend bound",
- "MetricGroup": "TopDownL1",
- "MetricName": "backend"
+ "ArchStdEvent": "backend_bound"
},
{
- "MetricExpr": "1 - (retiring + lost + backend)",
- "BriefDescription": "Fraction of slots frontend bound",
- "MetricGroup": "TopDownL1",
- "MetricName": "frontend"
+ "ArchStdEvent": "frontend_bound",
+ "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
},
{
- "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))",
+ "MetricName": "slots_lost_misspeculation_fraction",
+ "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
"BriefDescription": "Fraction of slots lost due to misspeculation",
- "MetricGroup": "TopDownL1",
- "MetricName": "lost"
+ "MetricGroup": "Default;TopdownL1",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))",
+ "MetricName": "retired_fraction",
+ "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
"BriefDescription": "Fraction of slots retiring, useful work",
- "MetricGroup": "TopDownL1",
- "MetricName": "retiring"
+ "MetricGroup": "Default;TopdownL1",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "backend - backend_memory",
+ "MetricName": "backend_core",
+ "MetricExpr": "(backend_bound / 100) - backend_memory",
"BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
- "MetricGroup": "TopDownL2",
- "MetricName": "backend_core"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ",
+ "MetricName": "backend_memory",
+ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
"BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
- "MetricGroup": "TopDownL2",
- "MetricName": "backend_memory"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "100%"
},
{
- "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost",
+ "MetricName": "branch_mispredict",
+ "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
"BriefDescription": "Fraction of slots lost due to branch misprediciton",
- "MetricGroup": "TopDownL2",
- "MetricName": "branch_mispredict"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "frontend - frontend_latency",
+ "MetricName": "frontend_bandwidth",
+ "MetricExpr": "frontend_bound - frontend_latency",
"BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
- "MetricGroup": "TopDownL2",
- "MetricName": "frontend_bandwidth"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES",
+ "MetricName": "frontend_latency",
+ "MetricExpr": "((STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES) * 100",
"BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
- "MetricGroup": "TopDownL2",
- "MetricName": "frontend_latency"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "lost - branch_mispredict",
+ "MetricName": "other_miss_pred",
+ "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
"BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
- "MetricGroup": "TopDownL2",
- "MetricName": "other_clears"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)",
+ "MetricName": "pipe_utilization",
+ "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
"BriefDescription": "Fraction of execute slots utilized",
- "MetricGroup": "TopDownL2",
- "MetricName": "pipe_utilization"
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
+ "MetricName": "d_cache_l2_miss_rate",
+ "MetricExpr": "((STALL_BACKEND_MEM / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
- "MetricGroup": "TopDownL3",
- "MetricName": "d_cache_l2_miss"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "MetricName": "d_cache_miss_rate",
+ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
- "MetricGroup": "TopDownL3",
- "MetricName": "d_cache_miss"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "MetricName": "d_tlb_miss_rate",
+ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
- "MetricGroup": "TopDownL3",
- "MetricName": "d_tlb_miss"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
+ "MetricName": "fsu_pipe_utilization",
+ "MetricExpr": "((FSU_ISSUED / (CPU_CYCLES * 2)) * 100)",
"BriefDescription": "Fraction of FSU execute slots utilized",
- "MetricGroup": "TopDownL3",
- "MetricName": "fsu_pipe_utilization"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "MetricName": "i_cache_miss_rate",
+ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
- "MetricGroup": "TopDownL3",
- "MetricName": "i_cache_miss"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ",
+ "MetricName": "i_tlb_miss_rate",
+ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
- "MetricGroup": "TopDownL3",
- "MetricName": "i_tlb_miss"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)",
+ "MetricName": "ixu_pipe_utilization",
+ "MetricExpr": "((IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)) * 100)",
"BriefDescription": "Fraction of IXU execute slots utilized",
- "MetricGroup": "TopDownL3",
- "MetricName": "ixu_pipe_utilization"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
+ "MetricName": "stall_recovery_rate",
+ "MetricExpr": "((IDR_STALL_FLUSH / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
- "MetricGroup": "TopDownL3",
- "MetricName": "recovery"
- },
- {
- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage",
- "MetricGroup": "TopDownL3",
- "MetricName": "resource"
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ",
+ "MetricName": "stall_fsu_sched_rate",
+ "MetricExpr": "((IDR_STALL_FSU_SCHED / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
- "MetricGroup": "TopDownL4",
- "MetricName": "stall_fsu_sched"
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ",
+ "MetricName": "stall_ixu_sched_rate",
+ "MetricExpr": "((IDR_STALL_IXU_SCHED / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
- "MetricGroup": "TopDownL4",
- "MetricName": "stall_ixu_sched"
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ",
+ "MetricName": "stall_lob_id_rate",
+ "MetricExpr": "((IDR_STALL_LOB_ID / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
- "MetricGroup": "TopDownL4",
- "MetricName": "stall_lob_id"
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
+ "MetricName": "stall_rob_id_rate",
+ "MetricExpr": "((IDR_STALL_ROB_ID / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
- "MetricGroup": "TopDownL4",
- "MetricName": "stall_rob_id"
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ",
+ "MetricName": "stall_sob_id_rate",
+ "MetricExpr": "((IDR_STALL_SOB_ID / CPU_CYCLES) * 100)",
"BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
- "MetricGroup": "TopDownL4",
- "MetricName": "stall_sob_id"
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "1percent of cycles"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
new file mode 100644
index 000000000000..428605c37d10
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
@@ -0,0 +1,266 @@
+[
+ {
+ "EventName": "hnf_cache_miss",
+ "EventidCode": "0x1",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts total cache misses in first lookup result (high priority).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_slc_sf_cache_access",
+ "EventidCode": "0x2",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of cache accesses in first access (high priority).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_cache_fill",
+ "EventidCode": "0x3",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts total allocations in HN SLC (all cache line allocations to SLC).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_pocq_retry",
+ "EventidCode": "0x4",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of retried requests.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_pocq_reqs_recvd",
+ "EventidCode": "0x5",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of requests that HN receives.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_sf_hit",
+ "EventidCode": "0x6",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of SF hits.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_sf_evictions",
+ "EventidCode": "0x7",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of SF eviction cache invalidations initiated.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_dir_snoops_sent",
+ "EventidCode": "0x8",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of directed snoops sent (not including SF back invalidation).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_brd_snoops_sent",
+ "EventidCode": "0x9",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of multicast snoops sent (not including SF back invalidation).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_slc_eviction",
+ "EventidCode": "0xa",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of SLC evictions (dirty only).",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_slc_fill_invalid_way",
+ "EventidCode": "0xb",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of SLC fills to an invalid way.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_mc_retries",
+ "EventidCode": "0xc",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of retried transactions by the MC.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_mc_reqs",
+ "EventidCode": "0xd",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of requests that are sent to MC.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hnf_qos_hh_retry",
+ "EventidCode": "0xe",
+ "NodeType": "0x5",
+ "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_s0_rdata_beats",
+ "EventidCode": "0x1",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 0. This event measures the read bandwidth, including CMO responses.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_s1_rdata_beats",
+ "EventidCode": "0x2",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 1. This event measures the read bandwidth, including CMO responses.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_s2_rdata_beats",
+ "EventidCode": "0x3",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 2. This event measures the read bandwidth, including CMO responses.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_rxdat_flits",
+ "EventidCode": "0x4",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of RXDAT flits received. This event measures the true read data bandwidth, excluding CMOs.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_txdat_flits",
+ "EventidCode": "0x5",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of TXDAT flits dispatched. This event measures the write bandwidth.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_txreq_flits_total",
+ "EventidCode": "0x6",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of TXREQ flits dispatched. This event measures the total request bandwidth.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "rnid_txreq_flits_retried",
+ "EventidCode": "0x7",
+ "NodeType": "0xa",
+ "BriefDescription": "Number of retried TXREQ flits dispatched. This event measures the retry rate.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "sbsx_txrsp_retryack",
+ "EventidCode": "0x4",
+ "NodeType": "0x7",
+ "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "sbsx_txdat_flitv",
+ "EventidCode": "0x5",
+ "NodeType": "0x7",
+ "BriefDescription": "Number of TXDAT flits dispatched from XP to SBSX. This event is a measure of the write bandwidth.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "sbsx_arvalid_no_arready",
+ "EventidCode": "0x21",
+ "NodeType": "0x7",
+ "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AR channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "sbsx_awvalid_no_awready",
+ "EventidCode": "0x22",
+ "NodeType": "0x7",
+ "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AW channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "sbsx_wvalid_no_wready",
+ "EventidCode": "0x23",
+ "NodeType": "0x7",
+ "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on W channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_txrsp_retryack",
+ "EventidCode": "0x2a",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_arvalid_no_arready",
+ "EventidCode": "0x2b",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AR channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_arready_no_arvalid",
+ "EventidCode": "0x2c",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of cycles the AR channel is waiting for new requests from HN-I bridge.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_awvalid_no_awready",
+ "EventidCode": "0x2d",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AW channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_awready_no_awvalid",
+ "EventidCode": "0x2e",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of cycles the AW channel is waiting for new requests from HN-I bridge.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_wvalid_no_wready",
+ "EventidCode": "0x2f",
+ "NodeType": "0x4",
+ "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on W channel.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "EventName": "hni_txdat_stall",
+ "EventidCode": "0x30",
+ "NodeType": "0x4",
+ "BriefDescription": "TXDAT valid but no link credit available.",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json
new file mode 100644
index 000000000000..f7823bd265db
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json
@@ -0,0 +1,74 @@
+[
+ {
+ "MetricName": "slc_miss_rate",
+ "BriefDescription": "The system level cache miss rate.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "hnf_cache_miss / hnf_slc_sf_cache_access",
+ "ScaleUnit": "100%",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "hnf_message_retry_rate",
+ "BriefDescription": "HN-F message retry rate indicates whether a lack of credits is causing the bottlenecks.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "hnf_pocq_retry / hnf_pocq_reqs_recvd",
+ "ScaleUnit": "100%",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "sf_hit_rate",
+ "BriefDescription": "Snoop filter hit rate can be used to measure the snoop filter efficiency.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "hnf_sf_hit / hnf_slc_sf_cache_access",
+ "ScaleUnit": "100%",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "mc_message_retry_rate",
+ "BriefDescription": "The memory controller request retries rate indicates whether the memory controller is the bottleneck.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "hnf_mc_retries / hnf_mc_reqs",
+ "ScaleUnit": "100%",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "rni_actual_read_bandwidth.all",
+ "BriefDescription": "This event measure the actual bandwidth that RN-I bridge sends to the interconnect.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "rnid_rxdat_flits * 32 / 1e6 / duration_time",
+ "ScaleUnit": "1MB/s",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "rni_actual_write_bandwidth.all",
+ "BriefDescription": "This event measures the actual write bandwidth at RN-I bridges.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "rnid_txdat_flits * 32 / 1e6 / duration_time",
+ "ScaleUnit": "1MB/s",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "rni_retry_rate",
+ "BriefDescription": "RN-I bridge retry rate indicates whether the memory controller is the bottleneck.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "rnid_txreq_flits_retried / rnid_txreq_flits_total",
+ "ScaleUnit": "100%",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ },
+ {
+ "MetricName": "sbsx_actual_write_bandwidth.all",
+ "BriefDescription": "sbsx actual write bandwidth.",
+ "MetricGroup": "cmn",
+ "MetricExpr": "sbsx_txdat_flitv * 32 / 1e6 / duration_time",
+ "ScaleUnit": "1MB/s",
+ "Unit": "arm_cmn",
+ "Compat": "(434|436|43c|43a).*"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
deleted file mode 100644
index 79f2016c53b0..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
+++ /dev/null
@@ -1,8 +0,0 @@
-[
- {
- "ArchStdEvent": "BR_MIS_PRED"
- },
- {
- "ArchStdEvent": "BR_PRED"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
index 579c1c993d17..2e11a8c4a484 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
@@ -1,20 +1,18 @@
[
{
- "ArchStdEvent": "CPU_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS",
+ "PublicDescription": "Counts memory transactions issued by the CPU to the external bus, including snoop requests and snoop responses. Each beat of data is counted individually."
},
{
- "ArchStdEvent": "BUS_ACCESS"
+ "ArchStdEvent": "BUS_CYCLES",
+ "PublicDescription": "Counts bus cycles in the CPU. Bus cycles represent a clock cycle in which a transaction could be sent or received on the interface from the CPU to the external bus. Since that interface is driven at the same clock speed as the CPU, this event is a duplicate of CPU_CYCLES."
},
{
- "ArchStdEvent": "BUS_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ "PublicDescription": "Counts memory read transactions seen on the external bus. Each beat of data is counted individually."
},
{
- "ArchStdEvent": "BUS_ACCESS_RD"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR"
- },
- {
- "ArchStdEvent": "CNT_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ "PublicDescription": "Counts memory write transactions seen on the external bus. Each beat of data is counted individually."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
deleted file mode 100644
index 0141f749bff3..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
+++ /dev/null
@@ -1,155 +0,0 @@
-[
- {
- "ArchStdEvent": "L1I_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L1I_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L1D_CACHE"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L1I_CACHE"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB"
- },
- {
- "ArchStdEvent": "L2D_CACHE"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB"
- },
- {
- "ArchStdEvent": "L2D_CACHE_ALLOCATE"
- },
- {
- "ArchStdEvent": "L1D_TLB"
- },
- {
- "ArchStdEvent": "L1I_TLB"
- },
- {
- "ArchStdEvent": "L3D_CACHE_ALLOCATE"
- },
- {
- "ArchStdEvent": "L3D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L3D_CACHE"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L2D_TLB"
- },
- {
- "ArchStdEvent": "DTLB_WALK"
- },
- {
- "ArchStdEvent": "ITLB_WALK"
- },
- {
- "ArchStdEvent": "LL_CACHE_RD"
- },
- {
- "ArchStdEvent": "LL_CACHE_MISS_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_LMISS_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L1D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_TLB_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L2D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_TLB_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_WR"
- },
- {
- "ArchStdEvent": "L3D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1I_CACHE_LMISS"
- },
- {
- "ArchStdEvent": "L2D_CACHE_LMISS_RD"
- },
- {
- "ArchStdEvent": "L3D_CACHE_LMISS_RD"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
index 344a2d552ad5..4404b8e91690 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
@@ -1,47 +1,62 @@
[
{
- "ArchStdEvent": "EXC_TAKEN"
+ "ArchStdEvent": "EXC_TAKEN",
+ "PublicDescription": "Counts any taken architecturally visible exceptions such as IRQ, FIQ, SError, and other synchronous exceptions. Exceptions are counted whether or not they are taken locally."
},
{
- "ArchStdEvent": "MEMORY_ERROR"
+ "ArchStdEvent": "EXC_RETURN",
+ "PublicDescription": "Counts any architecturally executed exception return instructions. Eg: AArch64: ERET"
},
{
- "ArchStdEvent": "EXC_UNDEF"
+ "ArchStdEvent": "EXC_UNDEF",
+ "PublicDescription": "Counts the number of synchronous exceptions which are taken locally that are due to attempting to execute an instruction that is UNDEFINED. Attempting to execute instruction bit patterns that have not been allocated. Attempting to execute instructions when they are disabled. Attempting to execute instructions at an inappropriate Exception level. Attempting to execute an instruction when the value of PSTATE.IL is 1."
},
{
- "ArchStdEvent": "EXC_SVC"
+ "ArchStdEvent": "EXC_SVC",
+ "PublicDescription": "Counts SVC exceptions taken locally."
},
{
- "ArchStdEvent": "EXC_PABORT"
+ "ArchStdEvent": "EXC_PABORT",
+ "PublicDescription": "Counts synchronous exceptions that are taken locally and caused by Instruction Aborts."
},
{
- "ArchStdEvent": "EXC_DABORT"
+ "ArchStdEvent": "EXC_DABORT",
+ "PublicDescription": "Counts exceptions that are taken locally and are caused by data aborts or SErrors. Conditions that could cause those exceptions are attempting to read or write memory where the MMU generates a fault, attempting to read or write memory with a misaligned address, interrupts from the nSEI inputs and internally generated SErrors."
},
{
- "ArchStdEvent": "EXC_IRQ"
+ "ArchStdEvent": "EXC_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are taken locally."
},
{
- "ArchStdEvent": "EXC_FIQ"
+ "ArchStdEvent": "EXC_FIQ",
+ "PublicDescription": "Counts FIQ exceptions including the virtual FIQs that are taken locally."
},
{
- "ArchStdEvent": "EXC_SMC"
+ "ArchStdEvent": "EXC_SMC",
+ "PublicDescription": "Counts SMC exceptions take to EL3."
},
{
- "ArchStdEvent": "EXC_HVC"
+ "ArchStdEvent": "EXC_HVC",
+ "PublicDescription": "Counts HVC exceptions taken to EL2."
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT"
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Instruction Aborts. For example, attempting to execute an instruction with a misaligned PC."
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT"
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Data Aborts or SError interrupts. Conditions that could cause those exceptions are:\n\n1. Attempting to read or write memory where the MMU generates a fault,\n2. Attempting to read or write memory with a misaligned address,\n3. Interrupts from the SEI input.\n4. internally generated SErrors."
},
{
- "ArchStdEvent": "EXC_TRAP_OTHER"
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ "PublicDescription": "Counts the number of synchronous trap exceptions which are not taken locally and are not SVC, SMC, HVC, data aborts, Instruction Aborts, or interrupts."
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ"
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are not taken locally."
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ"
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ "PublicDescription": "Counts FIQs which are not taken locally but taken from EL0, EL1,\n or EL2 to EL3 (which would be the normal behavior for FIQs when not executing\n in EL3)."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/fp_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/fp_operation.json
new file mode 100644
index 000000000000..a09e226e7138
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/fp_operation.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC",
+ "PublicDescription": "Counts speculatively executed scalable single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC",
+ "PublicDescription": "Counts speculatively executed non-scalable single precision floating point operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/general.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/general.json
new file mode 100644
index 000000000000..428810f855b8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/general.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES",
+ "PublicDescription": "Counts CPU clock cycles (not timer cycles). The clock measured by this event is defined as the physical clock driving the CPU logic."
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES",
+ "PublicDescription": "Counts constant frequency cycles"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
deleted file mode 100644
index e29b88fb7f24..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
+++ /dev/null
@@ -1,119 +0,0 @@
-[
- {
- "ArchStdEvent": "SW_INCR"
- },
- {
- "ArchStdEvent": "INST_RETIRED"
- },
- {
- "ArchStdEvent": "EXC_RETURN"
- },
- {
- "ArchStdEvent": "CID_WRITE_RETIRED"
- },
- {
- "ArchStdEvent": "INST_SPEC"
- },
- {
- "ArchStdEvent": "TTBR_WRITE_RETIRED"
- },
- {
- "ArchStdEvent": "BR_RETIRED"
- },
- {
- "ArchStdEvent": "BR_MIS_PRED_RETIRED"
- },
- {
- "ArchStdEvent": "OP_RETIRED"
- },
- {
- "ArchStdEvent": "OP_SPEC"
- },
- {
- "ArchStdEvent": "LDREX_SPEC"
- },
- {
- "ArchStdEvent": "STREX_PASS_SPEC"
- },
- {
- "ArchStdEvent": "STREX_FAIL_SPEC"
- },
- {
- "ArchStdEvent": "STREX_SPEC"
- },
- {
- "ArchStdEvent": "LD_SPEC"
- },
- {
- "ArchStdEvent": "ST_SPEC"
- },
- {
- "ArchStdEvent": "DP_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SPEC"
- },
- {
- "ArchStdEvent": "VFP_SPEC"
- },
- {
- "ArchStdEvent": "PC_WRITE_SPEC"
- },
- {
- "ArchStdEvent": "CRYPTO_SPEC"
- },
- {
- "ArchStdEvent": "BR_IMMED_SPEC"
- },
- {
- "ArchStdEvent": "BR_RETURN_SPEC"
- },
- {
- "ArchStdEvent": "BR_INDIRECT_SPEC"
- },
- {
- "ArchStdEvent": "ISB_SPEC"
- },
- {
- "ArchStdEvent": "DSB_SPEC"
- },
- {
- "ArchStdEvent": "DMB_SPEC"
- },
- {
- "ArchStdEvent": "RC_LD_SPEC"
- },
- {
- "ArchStdEvent": "RC_ST_SPEC"
- },
- {
- "ArchStdEvent": "ASE_INST_SPEC"
- },
- {
- "ArchStdEvent": "SVE_INST_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_FULL_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
- },
- {
- "ArchStdEvent": "SVE_LDFF_SPEC"
- },
- {
- "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
- },
- {
- "ArchStdEvent": "FP_SCALE_OPS_SPEC"
- },
- {
- "ArchStdEvent": "FP_FIXED_OPS_SPEC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1d_cache.json
new file mode 100644
index 000000000000..ed83e1c5affe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1d_cache.json
@@ -0,0 +1,54 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load or store operations that missed in the level 1 data cache. This event only counts one event per cache line. This event does not count cache line allocations from preload instructions or from hardware cache prefetching."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE",
+ "PublicDescription": "Counts level 1 data cache accesses from any load/store operations. Atomic operations that resolve in the CPUs caches (near atomic operations) counts as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other level 1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of dirty data from the L1 data cache to the L2 cache. This occurs when either a dirty cache line is evicted from L1 data cache and allocated in the L2 cache or dirty data is written to the L2 and possibly to the next level of cache. This event counts both victim cache line evictions and cache write-backs from snoops or cache maintenance operations. The following cache operations are not counted:\n\n1. Invalidations which do not result in data being transferred out of the L1 (such as evictions of clean data),\n2. Full line writes which write to L2 without writing L1, such as write streaming mode."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts cache line refills into the level 1 data cache from any memory read operations, that incurred additional latency."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ "PublicDescription": "Counts level 1 data cache accesses from any load operation. Atomic load operations that resolve in the CPUs caches counts as both a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ "PublicDescription": "Counts level 1 data cache accesses generated by store operations. This event also counts accesses caused by a DC ZVA (data cache zero, specified by virtual address) instruction. Near atomic operations that resolve in the CPUs caches count as a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load instructions where the memory read operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed store instructions where the memory write operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ "PublicDescription": "Counts level 1 data cache refills where the cache line data came from caches inside the immediate cluster of the core."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ "PublicDescription": "Counts level 1 data cache refills for which the cache line data came from outside the immediate cluster of the core, like an SLC in the system interconnect or DRAM."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts dirty cache line evictions from the level 1 data cache caused by a new cache line allocation. This event does not count evictions caused by cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 1 data cache that are a result of a coherency operation made by another CPU. Event count includes cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 1 data cache caused by:\n\n- Cache Maintenance Operations (CMO) that operate by a virtual address.\n- Broadcast cache coherency operations from another CPU in the system.\n\nThis event does not count for the following conditions:\n\n1. A cache refill invalidates a cache line.\n2. A CMO which is executed on that CPU and invalidates a cache line specified by set/way.\n\nNote that CMOs that operate by set/way cannot be broadcast from one CPU to another."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1i_cache.json
new file mode 100644
index 000000000000..633f1030359d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l1i_cache.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills in the level 1 instruction cache caused by a missed instruction fetch. Instruction fetches may include accessing multiple instructions, but the single cache line allocation is counted once."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE",
+ "PublicDescription": "Counts instruction fetches which access the level 1 instruction cache. Instruction cache accesses caused by cache maintenance operations are not counted."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS",
+ "PublicDescription": "Counts cache line refills into the level 1 instruction cache, that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l2_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l2_cache.json
new file mode 100644
index 000000000000..0e31d0daf88b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l2_cache.json
@@ -0,0 +1,50 @@
+[
+ {
+ "ArchStdEvent": "L2D_CACHE",
+ "PublicDescription": "Counts level 2 cache accesses. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the first level caches or translation resolutions due to accesses. This event also counts write back of dirty data from level 1 data cache to the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills into the level 2 cache. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of data from the L2 cache to outside the CPU. This includes snoops to the L2 (from other CPUs) which return data even if the snoops cause an invalidation. L2 cache line invalidations which do not write data outside the CPU and snoops which return data from an L1 cache are not counted. Data would not be written outside the cache when invalidating a clean cache line."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE",
+ "PublicDescription": "TBD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ "PublicDescription": "Counts level 2 cache accesses due to memory read operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ "PublicDescription": "Counts level 2 cache accesses due to memory write operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts refills for memory accesses due to memory read operation counted by L2D_CACHE_RD. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts refills for memory accesses due to memory write operation counted by L2D_CACHE_WR. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts evictions from the level 2 cache because of a line being allocated into the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 2 cache that are a result of either:\n\n1. Cache maintenance operations,\n\n2. Snoop responses or,\n\n3. Direct cache transfers to another CPU due to a forwarding snoop request."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 2 cache by cache maintenance operations that operate by a virtual address, or by external coherency operations. This event does not count if either:\n\n1. A cache refill invalidates a cache line or,\n2. A Cache Maintenance Operation (CMO), which invalidates a cache line specified by set/way, is executed on that CPU.\n\nCMOs that operate by set/way cannot be broadcast from one CPU to another."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts cache line refills into the level 2 unified cache from any memory read operations that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l3_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l3_cache.json
new file mode 100644
index 000000000000..45bfba532df7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/l3_cache.json
@@ -0,0 +1,22 @@
+[
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE",
+ "PublicDescription": "Counts level 3 cache line allocates that do not fetch data from outside the level 3 data or unified cache. For example, allocates due to streaming stores."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL",
+ "PublicDescription": "Counts level 3 accesses that receive data from outside the L3 cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE",
+ "PublicDescription": "Counts level 3 cache accesses. level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD",
+ "PublicDescription": "TBD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts any cache line refill into the level 3 cache from memory read operations that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/ll_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/ll_cache.json
new file mode 100644
index 000000000000..bb712d57d58a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/ll_cache.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "LL_CACHE_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are either hit in the system level cache or missed in the SLC and are returned from any other external sources."
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster but missed in the system level cache. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are missed in the System level Cache. The data source of the transaction is indicated by a field in the CHI transaction returning to the CPU. This event does not count reads caused by cache maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
index 5aff6e93c1ad..9041f6e0befb 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
@@ -1,23 +1,22 @@
[
{
- "ArchStdEvent": "MEM_ACCESS"
+ "ArchStdEvent": "MEM_ACCESS",
+ "PublicDescription": "Counts memory accesses issued by the CPU load store unit, where those accesses are issued due to load or store operations. This event counts memory accesses no matter whether the data is received from any level of cache hierarchy or external memory. If memory accesses are broken up into smaller transactions than what were specified in the load or store instructions, then the event counts those smaller memory transactions."
},
{
- "ArchStdEvent": "REMOTE_ACCESS"
+ "ArchStdEvent": "MEMORY_ERROR",
+ "PublicDescription": "Counts any detected correctable or uncorrectable physical memory errors (ECC or parity) in protected CPUs RAMs. On the core, this event counts errors in the caches (including data and tag rams). Any detected memory error (from either a speculative and abandoned access, or an architecturally executed access) is counted. Note that errors are only detected when the actual protected memory is accessed by an operation."
},
{
- "ArchStdEvent": "MEM_ACCESS_RD"
+ "ArchStdEvent": "REMOTE_ACCESS",
+ "PublicDescription": "Counts accesses to another chip, which is implemented as a different CMN mesh in the system. If the CHI bus response back to the core indicates that the data source is from another chip (mesh), then the counter is updated. If no data is returned, even if the system snoops another chip/mesh, then the counter is not updated."
},
{
- "ArchStdEvent": "MEM_ACCESS_WR"
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to load operations. The event counts any memory load access, no matter whether the data is received from any level of cache hierarchy or external memory. The event also counts atomic load operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
},
{
- "ArchStdEvent": "UNALIGNED_LD_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_ST_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to store operations. The event counts any memory store access, no matter whether the data is located in any level of cache or external memory. The event also counts atomic load and store operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/metrics.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/metrics.json
new file mode 100644
index 000000000000..3fd8fefec46a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/metrics.json
@@ -0,0 +1,233 @@
+[
+ {
+ "ArchStdEvent": "backend_bound"
+ },
+ {
+ "MetricName": "backend_stalled_cycles",
+ "MetricExpr": "((STALL_BACKEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the backend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
+ },
+ {
+ "ArchStdEvent": "bad_speculation",
+ "MetricExpr": "(100 * (((1 - (OP_RETIRED / OP_SPEC)) * (1 - (STALL_SLOT / (CPU_CYCLES * 8)))) + ((BR_MIS_PRED * 4) / CPU_CYCLES)))"
+ },
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "MetricExpr": "(BR_MIS_PRED_RETIRED / BR_RETIRED)",
+ "BriefDescription": "This metric measures the ratio of branches mispredicted to the total number of branches architecturally executed. This gives an indication of the effectiveness of the branch prediction unit.",
+ "MetricGroup": "Miss_Ratio;Branch_Effectiveness",
+ "ScaleUnit": "1per branch"
+ },
+ {
+ "MetricName": "branch_mpki",
+ "MetricExpr": "((BR_MIS_PRED_RETIRED / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of branch mispredictions per thousand instructions executed.",
+ "MetricGroup": "MPKI;Branch_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "branch_percentage",
+ "MetricExpr": "(((BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures branch operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "dtlb_mpki",
+ "MetricExpr": "((DTLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of data TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "ArchStdEvent": "frontend_bound",
+ "MetricExpr": "(100 * ((STALL_SLOT_FRONTEND / (CPU_CYCLES * 8)) - ((BR_MIS_PRED * 4) / CPU_CYCLES)))"
+ },
+ {
+ "MetricName": "frontend_stalled_cycles",
+ "MetricExpr": "((STALL_FRONTEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the frontend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
+ },
+ {
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "ipc",
+ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "MetricGroup": "General",
+ "ScaleUnit": "1per cycle"
+ },
+ {
+ "MetricName": "itlb_mpki",
+ "MetricExpr": "((ITLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of instruction TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1d_cache_mpki",
+ "MetricExpr": "((L1D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 data cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1d_tlb_miss_ratio",
+ "MetricExpr": "(L1D_TLB_REFILL / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 data TLB accesses missed to the total number of level 1 data TLB accesses. This gives an indication of the effectiveness of the level 1 data TLB.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1d_tlb_mpki",
+ "MetricExpr": "((L1D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1i_cache_mpki",
+ "MetricExpr": "((L1I_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1i_tlb_miss_ratio",
+ "MetricExpr": "(L1I_TLB_REFILL / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction TLB accesses missed to the total number of level 1 instruction TLB accesses. This gives an indication of the effectiveness of the level 1 instruction TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1i_tlb_mpki",
+ "MetricExpr": "((L1I_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l2_cache_mpki",
+ "MetricExpr": "((L2D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified cache accesses missed per thousand instructions executed. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "MPKI;L2_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l2_tlb_miss_ratio",
+ "MetricExpr": "(L2D_TLB_REFILL / L2D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 2 unified TLB accesses missed to the total number of level 2 unified TLB accesses. This gives an indication of the effectiveness of the level 2 TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l2_tlb_mpki",
+ "MetricExpr": "((L2D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "ll_cache_read_hit_ratio",
+ "MetricExpr": "((LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses hit in the cache to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "ll_cache_read_miss_ratio",
+ "MetricExpr": "(LL_CACHE_MISS_RD / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses missed to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "Miss_Ratio;LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "ll_cache_read_mpki",
+ "MetricExpr": "((LL_CACHE_MISS_RD / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of last level cache read accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;LL_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "load_percentage",
+ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "ArchStdEvent": "retiring"
+ },
+ {
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "simd_percentage",
+ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "store_percentage",
+ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
deleted file mode 100644
index f9fae15f7555..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
+++ /dev/null
@@ -1,23 +0,0 @@
-[
- {
- "ArchStdEvent": "STALL_FRONTEND"
- },
- {
- "ArchStdEvent": "STALL_BACKEND"
- },
- {
- "ArchStdEvent": "STALL"
- },
- {
- "ArchStdEvent": "STALL_SLOT_BACKEND"
- },
- {
- "ArchStdEvent": "STALL_SLOT_FRONTEND"
- },
- {
- "ArchStdEvent": "STALL_SLOT"
- },
- {
- "ArchStdEvent": "STALL_BACKEND_MEM"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/retired.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/retired.json
new file mode 100644
index 000000000000..f297b049b62f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/retired.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR",
+ "PublicDescription": "Counts software writes to the PMSWINC_EL0 (software PMU increment) register. The PMSWINC_EL0 register is a manually updated counter for use by application software.\n\nThis event could be used to measure any user program event, such as accesses to a particular data structure (by writing to the PMSWINC_EL0 register each time the data structure is accessed).\n\nTo use the PMSWINC_EL0 register and event, developers must insert instructions that write to the PMSWINC_EL0 register into the source code.\n\nSince the SW_INCR event records writes to the PMSWINC_EL0 register, there is no need to do a read/increment/write sequence to the PMSWINC_EL0 register."
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED",
+ "PublicDescription": "Counts instructions that have been architecturally executed."
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED",
+ "PublicDescription": "Counts architecturally executed writes to the CONTEXTIDR register, which usually contain the kernel PID and can be output with hardware trace."
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "PublicDescription": "Counts architectural writes to TTBR0/1_EL1. If virtualization host extensions are enabled (by setting the HCR_EL2.E2H bit to 1), then accesses to TTBR0/1_EL1 that are redirected to TTBR0/1_EL2, or accesses to TTBR0/1_EL12, are counted. TTBRn registers are typically updated when the kernel is swapping user-space threads or applications."
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED",
+ "PublicDescription": "Counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted."
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "PublicDescription": "Counts branches counted by BR_RETIRED which were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED",
+ "PublicDescription": "Counts micro-operations that are architecturally executed. This is a count of number of micro-operations retired from the commit queue in a single cycle."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spe.json
new file mode 100644
index 000000000000..5de8b0f3a440
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spe.json
@@ -0,0 +1,18 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP",
+ "PublicDescription": "Counts statistical profiling sample population, the count of all operations that could be sampled but may or may not be chosen for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED",
+ "PublicDescription": "Counts statistical profiling samples taken for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE",
+ "PublicDescription": "Counts statistical profiling samples taken which are not removed by filtering."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION",
+ "PublicDescription": "Counts statistical profiling samples that have collided with a previous sample and so therefore not taken."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spec_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spec_operation.json
new file mode 100644
index 000000000000..1af961f8a6c8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/spec_operation.json
@@ -0,0 +1,110 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED",
+ "PublicDescription": "Counts branches which are speculatively executed and mispredicted."
+ },
+ {
+ "ArchStdEvent": "BR_PRED",
+ "PublicDescription": "Counts branches speculatively executed and were predicted right."
+ },
+ {
+ "ArchStdEvent": "INST_SPEC",
+ "PublicDescription": "Counts operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "OP_SPEC",
+ "PublicDescription": "Counts micro-operations speculatively executed. This is the count of the number of micro-operations dispatched in a cycle."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "PublicDescription": "Counts unaligned memory read operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses. The event does not count preload operations (PLD, PLI)."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "PublicDescription": "Counts unaligned memory write operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "PublicDescription": "Counts unaligned memory operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ "PublicDescription": "Counts Load-Exclusive operations that have been speculatively executed. Eg: LDREX, LDX"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have not successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ "PublicDescription": "Counts speculatively executed load operations including Single Instruction Multiple Data (SIMD) load operations."
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ "PublicDescription": "Counts speculatively executed store operations including Single Instruction Multiple Data (SIMD) store operations."
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ "PublicDescription": "Counts speculatively executed logical or arithmetic instructions such as MOV/MVN operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD operations excluding load, store and move micro-operations that move data to or from SIMD (vector) registers."
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ "PublicDescription": "Counts speculatively executed floating point operations. This event does not count operations that move data to or from floating point (vector) registers."
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ "PublicDescription": "Counts speculatively executed operations which cause software changes of the PC. Those operations include all taken branch operations."
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ "PublicDescription": "Counts speculatively executed cryptographic operations except for PMULL and VMULL operations."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ "PublicDescription": "Counts immediate branch operations which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ "PublicDescription": "Counts procedure return operations (RET) which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "PublicDescription": "Counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations. Eg: BR Xn, RET"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ "PublicDescription": "Counts ISB operations that are executed."
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ "PublicDescription": "Counts DSB operations that are speculatively issued to Load/Store unit in the CPU."
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ "PublicDescription": "Counts DMB operations that are speculatively issued to the Load/Store unit in the CPU. This event does not count implied barriers from load acquire/store release operations."
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ "PublicDescription": "Counts any load acquire operations that are speculatively executed. Eg: LDAR, LDARH, LDARB"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ "PublicDescription": "Counts any store release operations that are speculatively executed. Eg: STLR, STLRH, STLRB'"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/stall.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/stall.json
new file mode 100644
index 000000000000..bbbebc805034
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/stall.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "PublicDescription": "Counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. All the frontend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND",
+ "PublicDescription": "Counts cycles whenever the rename unit is unable to send any micro-operations to the backend of the pipeline because of backend resource constraints. Backend resource constraints can include issue stage fullness, execution stage fullness, or other internal pipeline resource fullness. All the backend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL",
+ "PublicDescription": "Counts cycles when no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent from the rename unit to the backend due to backend resource constraints."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend due to frontend resource constraints."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM",
+ "PublicDescription": "Counts cycles when the backend is stalled because there is a pending demand load request in progress in the last level core cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/sve.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/sve.json
new file mode 100644
index 000000000000..5137e2075a76
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/sve.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "SVE_INST_SPEC",
+ "PublicDescription": "Counts speculatively executed operations that are SVE operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with no active predicate elements."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with all predicate elements active."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with at least one but not all active predicate elements."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC",
+ "PublicDescription": "Counts speculatively executed SVE first fault or non-fault load operations."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC",
+ "PublicDescription": "Counts speculatively executed SVE first fault or non-fault load operations that clear at least one bit in the FFR."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/tlb.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/tlb.json
new file mode 100644
index 000000000000..b550af1831f5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/tlb.json
@@ -0,0 +1,66 @@
+[
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL",
+ "PublicDescription": "Counts level 1 instruction TLB refills from any Instruction fetch. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL",
+ "PublicDescription": "Counts level 1 data TLB accesses that resulted in TLB refills. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an AT(address translation) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by any memory load or store operation. Note that load or store instructions can be broken up into multiple memory operations. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB",
+ "PublicDescription": "Counts level 1 instruction TLB accesses, whether the access hits or misses in the TLB. This event counts both demand accesses and prefetch or preload generated accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory operations from both data and instruction fetch, except for those caused by TLB maintenance operations and hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB",
+ "PublicDescription": "Counts level 2 TLB accesses except those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK",
+ "PublicDescription": "Counts data memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Note that partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK",
+ "PublicDescription": "Counts instruction memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 1 data TLB refills caused by memory read operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 1 data TLB refills caused by data side memory write operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count with an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by memory read operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR",
+ "PublicDescription": "Counts any L1 data side TLB accesses caused by memory write operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations or hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 32674ddd2b63..5b58db5032c1 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -4,7 +4,7 @@
# where
# MIDR Processor version
# Variant[23:20] and Revision [3:0] should be zero.
-# Version could be used to track version of of JSON file
+# Version could be used to track version of JSON file
# but currently unused.
# JSON/file/pathname is the path to JSON file, relative
# to tools/perf/pmu-events/arch/arm64/.
diff --git a/tools/perf/pmu-events/arch/nds32/mapfile.csv b/tools/perf/pmu-events/arch/nds32/mapfile.csv
index efb395f26883..c76e5fbdac23 100644
--- a/tools/perf/pmu-events/arch/nds32/mapfile.csv
+++ b/tools/perf/pmu-events/arch/nds32/mapfile.csv
@@ -4,7 +4,7 @@
# where
# MIDR Processor version
# Variant[23:20] and Revision [3:0] should be zero.
-# Version could be used to track version of of JSON file
+# Version could be used to track version of JSON file
# but currently unused.
# JSON/file/pathname is the path to JSON file, relative
# to tools/perf/pmu-events/arch/arm64/.
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index 4abdfc3f9692..f4908af7ad66 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -3,7 +3,7 @@
#
# where
# PVR Processor version
-# Version could be used to track version of of JSON file
+# Version could be used to track version of JSON file
# but currently unused.
# JSON/file/pathname is the path to JSON file, relative
# to tools/perf/pmu-events/arch/powerpc/.
@@ -13,6 +13,6 @@
#
# Power8 entries
-004[bcd][[:xdigit:]]{4},1,power8,core
-004e[[:xdigit:]]{4},1,power9,core
-0080[[:xdigit:]]{4},1,power10,core
+0x004[bcd][[:xdigit:]]{4},1,power8,core
+0x004e[[:xdigit:]]{4},1,power9,core
+0x0080[[:xdigit:]]{4},1,power10,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
new file mode 100644
index 000000000000..6b0356f2d301
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
@@ -0,0 +1,1787 @@
+[
+ {
+ "EventCode": "0x200FE",
+ "EventName": "PM_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x300FE",
+ "EventName": "PM_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x400FE",
+ "EventName": "PM_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x000300000000C040",
+ "EventName": "PM_INST_FROM_L2",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x000340000000C040",
+ "EventName": "PM_DATA_FROM_L2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x000300000010C040",
+ "EventName": "PM_INST_FROM_L2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x000340000020C040",
+ "EventName": "PM_DATA_FROM_L2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x003F00000000C040",
+ "EventName": "PM_INST_FROM_L1MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L1 due to a demand miss."
+ },
+ {
+ "EventCode": "0x003F40000000C040",
+ "EventName": "PM_DATA_FROM_L1MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L1 due to a demand miss."
+ },
+ {
+ "EventCode": "0x003F00000010C040",
+ "EventName": "PM_INST_FROM_L1MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L1 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x003F40000020C040",
+ "EventName": "PM_DATA_FROM_L1MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L1 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x000040000000C040",
+ "EventName": "PM_DATA_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x000040000020C040",
+ "EventName": "PM_DATA_FROM_L2_NO_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x004040000000C040",
+ "EventName": "PM_DATA_FROM_L2_MEPF",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x004040000020C040",
+ "EventName": "PM_DATA_FROM_L2_MEPF_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x008040000000C040",
+ "EventName": "PM_DATA_FROM_L2_LDHITST_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict on ld-hit-store from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x008040000020C040",
+ "EventName": "PM_DATA_FROM_L2_LDHITST_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict on ld-hit-store from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x00C040000000C040",
+ "EventName": "PM_DATA_FROM_L2_OTHER_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict other than ld-hit-store from the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x00C040000020C040",
+ "EventName": "PM_DATA_FROM_L2_OTHER_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict other than ld-hit-store from the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x000380000000C040",
+ "EventName": "PM_INST_FROM_L2MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x000380000010C040",
+ "EventName": "PM_INST_FROM_L2MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0003C0000020C040",
+ "EventName": "PM_DATA_FROM_L2MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x010300000000C040",
+ "EventName": "PM_INST_FROM_L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x010340000000C040",
+ "EventName": "PM_DATA_FROM_L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x010300000010C040",
+ "EventName": "PM_INST_FROM_L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x010340000020C040",
+ "EventName": "PM_DATA_FROM_L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x010040000000C040",
+ "EventName": "PM_DATA_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x010040000020C040",
+ "EventName": "PM_DATA_FROM_L3_NO_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x014040000000C040",
+ "EventName": "PM_DATA_FROM_L3_MEPF",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x014040000020C040",
+ "EventName": "PM_DATA_FROM_L3_MEPF_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x01C040000000C040",
+ "EventName": "PM_DATA_FROM_L3_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x01C040000020C040",
+ "EventName": "PM_DATA_FROM_L3_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x000780000000C040",
+ "EventName": "PM_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x000780000010C040",
+ "EventName": "PM_INST_FROM_L3MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0007C0000020C040",
+ "EventName": "PM_DATA_FROM_L3MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080040000000C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080040000020C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x084040000000C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x084040000020C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080100000000C040",
+ "EventName": "PM_INST_FROM_L21_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080140000000C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080100000010C040",
+ "EventName": "PM_INST_FROM_L21_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080140000020C040",
+ "EventName": "PM_DATA_FROM_L21_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x088040000000C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x088040000020C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x08C040000000C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x08C040000020C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x088100000000C040",
+ "EventName": "PM_INST_FROM_L31_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x088140000000C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x088100000010C040",
+ "EventName": "PM_INST_FROM_L31_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x088140000020C040",
+ "EventName": "PM_DATA_FROM_L31_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080240000000C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080240000020C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x084240000000C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x084240000020C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080300000000C040",
+ "EventName": "PM_INST_FROM_REGENT_L2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080340000000C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x080300000010C040",
+ "EventName": "PM_INST_FROM_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080340000020C040",
+ "EventName": "PM_DATA_FROM_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0040000000C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0040000020C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A4040000000C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A4040000020C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0100000000C040",
+ "EventName": "PM_INST_FROM_L21_NON_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0140000000C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0100000010C040",
+ "EventName": "PM_INST_FROM_L21_NON_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0140000020C040",
+ "EventName": "PM_DATA_FROM_L21_NON_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A8040000000C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A8040000020C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0AC040000000C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0AC040000020C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A8100000000C040",
+ "EventName": "PM_INST_FROM_L31_NON_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A8140000000C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A8100000010C040",
+ "EventName": "PM_INST_FROM_L31_NON_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A8140000020C040",
+ "EventName": "PM_DATA_FROM_L31_NON_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0240000000C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0240000020C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A4240000000C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A4240000020C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0300000000C040",
+ "EventName": "PM_INST_FROM_NON_REGENT_L2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0340000000C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss."
+ },
+ {
+ "EventCode": "0x0A0300000010C040",
+ "EventName": "PM_INST_FROM_NON_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0A0340000020C040",
+ "EventName": "PM_DATA_FROM_NON_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x094100000000C040",
+ "EventName": "PM_INST_FROM_LMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x094040000000C040",
+ "EventName": "PM_DATA_FROM_LMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x094100000010C040",
+ "EventName": "PM_INST_FROM_LMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x094040000020C040",
+ "EventName": "PM_DATA_FROM_LMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x098040000000C040",
+ "EventName": "PM_DATA_FROM_L_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache due to a demand miss."
+ },
+ {
+ "EventCode": "0x098040000020C040",
+ "EventName": "PM_DATA_FROM_L_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x09C040000000C040",
+ "EventName": "PM_DATA_FROM_L_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x09C040000020C040",
+ "EventName": "PM_DATA_FROM_L_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x098100000000C040",
+ "EventName": "PM_INST_FROM_L_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x098140000000C040",
+ "EventName": "PM_DATA_FROM_L_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x098100000010C040",
+ "EventName": "PM_INST_FROM_L_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x098140000020C040",
+ "EventName": "PM_DATA_FROM_L_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0040000000C040",
+ "EventName": "PM_DATA_FROM_RL2_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0040000020C040",
+ "EventName": "PM_DATA_FROM_RL2_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C4040000000C040",
+ "EventName": "PM_DATA_FROM_RL2_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C4040000020C040",
+ "EventName": "PM_DATA_FROM_RL2_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0100000000C040",
+ "EventName": "PM_INST_FROM_RL2",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0140000000C040",
+ "EventName": "PM_DATA_FROM_RL2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0100000010C040",
+ "EventName": "PM_INST_FROM_RL2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0140000020C040",
+ "EventName": "PM_DATA_FROM_RL2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C8040000000C040",
+ "EventName": "PM_DATA_FROM_RL3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C8040000020C040",
+ "EventName": "PM_DATA_FROM_RL3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0CC040000000C040",
+ "EventName": "PM_DATA_FROM_RL3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0CC040000020C040",
+ "EventName": "PM_DATA_FROM_RL3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C8100000000C040",
+ "EventName": "PM_INST_FROM_RL3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C8140000000C040",
+ "EventName": "PM_DATA_FROM_RL3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C8100000010C040",
+ "EventName": "PM_INST_FROM_RL3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C8140000020C040",
+ "EventName": "PM_DATA_FROM_RL3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0240000000C040",
+ "EventName": "PM_DATA_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0240000020C040",
+ "EventName": "PM_DATA_FROM_RL2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C4240000000C040",
+ "EventName": "PM_DATA_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C4240000020C040",
+ "EventName": "PM_DATA_FROM_RL2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0300000000C040",
+ "EventName": "PM_INST_FROM_RL2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0340000000C040",
+ "EventName": "PM_DATA_FROM_RL2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0300000010C040",
+ "EventName": "PM_INST_FROM_RL2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0340000020C040",
+ "EventName": "PM_DATA_FROM_RL2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0D4100000000C040",
+ "EventName": "PM_INST_FROM_RMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from remote memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x0D4040000000C040",
+ "EventName": "PM_DATA_FROM_RMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from remote memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x0D4100000010C040",
+ "EventName": "PM_INST_FROM_RMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from remote memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0D4040000020C040",
+ "EventName": "PM_DATA_FROM_RMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from remote memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0D8040000000C040",
+ "EventName": "PM_DATA_FROM_R_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache due to a demand miss."
+ },
+ {
+ "EventCode": "0x0D8040000020C040",
+ "EventName": "PM_DATA_FROM_R_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0DC040000000C040",
+ "EventName": "PM_DATA_FROM_R_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0DC040000020C040",
+ "EventName": "PM_DATA_FROM_R_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0D8100000000C040",
+ "EventName": "PM_INST_FROM_R_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0D8140000000C040",
+ "EventName": "PM_DATA_FROM_R_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0D8100000010C040",
+ "EventName": "PM_INST_FROM_R_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0D8140000020C040",
+ "EventName": "PM_DATA_FROM_R_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0040000000C040",
+ "EventName": "PM_DATA_FROM_DL2_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0040000020C040",
+ "EventName": "PM_DATA_FROM_DL2_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E4040000000C040",
+ "EventName": "PM_DATA_FROM_DL2_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E4040000020C040",
+ "EventName": "PM_DATA_FROM_DL2_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0100000000C040",
+ "EventName": "PM_INST_FROM_DL2",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0140000000C040",
+ "EventName": "PM_DATA_FROM_DL2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0100000010C040",
+ "EventName": "PM_INST_FROM_DL2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0140000020C040",
+ "EventName": "PM_DATA_FROM_DL2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E8040000000C040",
+ "EventName": "PM_DATA_FROM_DL3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E8040000020C040",
+ "EventName": "PM_DATA_FROM_DL3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0EC040000000C040",
+ "EventName": "PM_DATA_FROM_DL3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0EC040000020C040",
+ "EventName": "PM_DATA_FROM_DL3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E8100000000C040",
+ "EventName": "PM_INST_FROM_DL3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E8140000000C040",
+ "EventName": "PM_DATA_FROM_DL3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E8100000010C040",
+ "EventName": "PM_INST_FROM_DL3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E8140000020C040",
+ "EventName": "PM_DATA_FROM_DL3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0240000000C040",
+ "EventName": "PM_DATA_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0240000020C040",
+ "EventName": "PM_DATA_FROM_DL2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E4240000000C040",
+ "EventName": "PM_DATA_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E4240000020C040",
+ "EventName": "PM_DATA_FROM_DL2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0300000000C040",
+ "EventName": "PM_INST_FROM_DL2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0340000000C040",
+ "EventName": "PM_DATA_FROM_DL2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0E0300000010C040",
+ "EventName": "PM_INST_FROM_DL2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0E0340000020C040",
+ "EventName": "PM_DATA_FROM_DL2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0F4100000000C040",
+ "EventName": "PM_INST_FROM_DMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from distant memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x0F4040000000C040",
+ "EventName": "PM_DATA_FROM_DMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from distant memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x0F4100000010C040",
+ "EventName": "PM_INST_FROM_DMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from distant memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0F4040000020C040",
+ "EventName": "PM_DATA_FROM_DMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from distant memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0F8040000000C040",
+ "EventName": "PM_DATA_FROM_D_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache due to a demand miss."
+ },
+ {
+ "EventCode": "0x0F8040000020C040",
+ "EventName": "PM_DATA_FROM_D_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0FC040000000C040",
+ "EventName": "PM_DATA_FROM_D_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0FC040000020C040",
+ "EventName": "PM_DATA_FROM_D_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0F8100000000C040",
+ "EventName": "PM_INST_FROM_D_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0F8140000000C040",
+ "EventName": "PM_DATA_FROM_D_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss."
+ },
+ {
+ "EventCode": "0x0F8100000010C040",
+ "EventName": "PM_INST_FROM_D_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0F8140000020C040",
+ "EventName": "PM_DATA_FROM_D_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080B00000000C040",
+ "EventName": "PM_INST_FROM_ONCHIP_CACHE",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x080B40000000C040",
+ "EventName": "PM_DATA_FROM_ONCHIP_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x080B00000010C040",
+ "EventName": "PM_INST_FROM_ONCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x080B40000020C040",
+ "EventName": "PM_DATA_FROM_ONCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0B00000000C040",
+ "EventName": "PM_INST_FROM_OFFCHIP_CACHE",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0B40000000C040",
+ "EventName": "PM_DATA_FROM_OFFCHIP_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss."
+ },
+ {
+ "EventCode": "0x0C0B00000010C040",
+ "EventName": "PM_INST_FROM_OFFCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x0C0B40000020C040",
+ "EventName": "PM_DATA_FROM_OFFCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x095900000000C040",
+ "EventName": "PM_INST_FROM_ANY_MEMORY",
+ "BriefDescription": "The processor's instruction cache was reloaded from any chip's memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x095840000000C040",
+ "EventName": "PM_DATA_FROM_ANY_MEMORY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from any chip's memory (MC slow) due to a demand miss."
+ },
+ {
+ "EventCode": "0x095900000010C040",
+ "EventName": "PM_INST_FROM_ANY_MEMORY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from any chip's memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x095840000020C040",
+ "EventName": "PM_DATA_FROM_ANY_MEMORY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from any chip's memory (MC slow) due to a demand miss or prefetch reload."
+ },
+ {
+ "EventCode": "0x000300000000C142",
+ "EventName": "PM_MRK_INST_FROM_L2",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x000340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x000300000010C142",
+ "EventName": "PM_MRK_INST_FROM_L2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x000340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x003F00000000C142",
+ "EventName": "PM_MRK_INST_FROM_L1MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L1 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x003F40000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L1MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L1 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x003F00000010C142",
+ "EventName": "PM_MRK_INST_FROM_L1MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L1 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x003F40000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L1MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L1 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x000040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x000040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x004040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_MEPF",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x004040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_MEPF_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x008040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_LDHITST_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict on ld-hit-store from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x008040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_LDHITST_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict on ld-hit-store from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x00C040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_OTHER_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict other than ld-hit-store from the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x00C040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2_OTHER_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data that had a dispatch conflict other than ld-hit-store from the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x000380000000C142",
+ "EventName": "PM_MRK_INST_FROM_L2MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0003C0000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x000380000010C142",
+ "EventName": "PM_MRK_INST_FROM_L2MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0003C0000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x010300000000C142",
+ "EventName": "PM_MRK_INST_FROM_L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x010340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x010300000010C142",
+ "EventName": "PM_MRK_INST_FROM_L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x010340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x010040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x010040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded without dispatch conflicts with data NOT in the MEPF state from the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x014040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_MEPF",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x014040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_MEPF_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with data in the MEPF state without dispatch conflicts from the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x01C040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_CONFLICT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x01C040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L3_CONFLICT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x000780000000C142",
+ "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0007C0000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x000780000010C142",
+ "EventName": "PM_MRK_INST_FROM_L3MISS_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0007C0000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x084040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x084040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080100000000C142",
+ "EventName": "PM_MRK_INST_FROM_L21_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080100000010C142",
+ "EventName": "PM_MRK_INST_FROM_L21_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x088040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x088040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x08C040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x08C040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x088100000000C142",
+ "EventName": "PM_MRK_INST_FROM_L31_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x088140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x088100000010C142",
+ "EventName": "PM_MRK_INST_FROM_L31_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x088140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x084240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x084240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080300000000C142",
+ "EventName": "PM_MRK_INST_FROM_REGENT_L2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080300000010C142",
+ "EventName": "PM_MRK_INST_FROM_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in the same regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A4040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A4040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0100000000C142",
+ "EventName": "PM_MRK_INST_FROM_L21_NON_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0100000010C142",
+ "EventName": "PM_MRK_INST_FROM_L21_NON_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L21_NON_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0AC040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0AC040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8100000000C142",
+ "EventName": "PM_MRK_INST_FROM_L31_NON_REGENT",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8100000010C142",
+ "EventName": "PM_MRK_INST_FROM_L31_NON_REGENT_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A8140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L31_NON_REGENT_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A4240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A4240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0300000000C142",
+ "EventName": "PM_MRK_INST_FROM_NON_REGENT_L2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0300000010C142",
+ "EventName": "PM_MRK_INST_FROM_NON_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0A0340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_NON_REGENT_L2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 on the same chip in a different regent due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x094100000000C142",
+ "EventName": "PM_MRK_INST_FROM_LMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x094040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_LMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x094100000010C142",
+ "EventName": "PM_MRK_INST_FROM_LMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x094040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_LMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x098040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x098040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x09C040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x09C040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x098100000000C142",
+ "EventName": "PM_MRK_INST_FROM_L_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x098140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x098100000010C142",
+ "EventName": "PM_MRK_INST_FROM_L_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x098140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_L_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the local chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C4040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C4040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0100000000C142",
+ "EventName": "PM_MRK_INST_FROM_RL2",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0100000010C142",
+ "EventName": "PM_MRK_INST_FROM_RL2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0CC040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0CC040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8100000000C142",
+ "EventName": "PM_MRK_INST_FROM_RL3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8100000010C142",
+ "EventName": "PM_MRK_INST_FROM_RL3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C8140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C4240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C4240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0300000000C142",
+ "EventName": "PM_MRK_INST_FROM_RL2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0300000010C142",
+ "EventName": "PM_MRK_INST_FROM_RL2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a remote chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D4100000000C142",
+ "EventName": "PM_MRK_INST_FROM_RMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from remote memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D4040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_RMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from remote memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D4100000010C142",
+ "EventName": "PM_MRK_INST_FROM_RMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from remote memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D4040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_RMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from remote memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0DC040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0DC040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8100000000C142",
+ "EventName": "PM_MRK_INST_FROM_R_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8100000010C142",
+ "EventName": "PM_MRK_INST_FROM_R_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0D8140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_R_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a remote chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E4040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E4040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0100000000C142",
+ "EventName": "PM_MRK_INST_FROM_DL2",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0100000010C142",
+ "EventName": "PM_MRK_INST_FROM_DL2_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0EC040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0EC040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8100000000C142",
+ "EventName": "PM_MRK_INST_FROM_DL3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8100000010C142",
+ "EventName": "PM_MRK_INST_FROM_DL3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E8140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a valid line that was not in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E4240000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E4240000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded with a line in the M (exclusive) state from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0300000000C142",
+ "EventName": "PM_MRK_INST_FROM_DL2L3",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0340000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0300000010C142",
+ "EventName": "PM_MRK_INST_FROM_DL2L3_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0E0340000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a distant chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F4100000000C142",
+ "EventName": "PM_MRK_INST_FROM_DMEM",
+ "BriefDescription": "The processor's instruction cache was reloaded from distant memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F4040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_DMEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from distant memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F4100000010C142",
+ "EventName": "PM_MRK_INST_FROM_DMEM_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from distant memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F4040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_DMEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from distant memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0FC040000000C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_MEM",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0FC040000020C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_MEM_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8100000000C142",
+ "EventName": "PM_MRK_INST_FROM_D_OC_ANY",
+ "BriefDescription": "The processor's instruction cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8140000000C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_ANY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8100000010C142",
+ "EventName": "PM_MRK_INST_FROM_D_OC_ANY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0F8140000020C142",
+ "EventName": "PM_MRK_DATA_FROM_D_OC_ANY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a distant chip's OpenCAPI cache or memory due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080B00000000C142",
+ "EventName": "PM_MRK_INST_FROM_ONCHIP_CACHE",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080B40000000C142",
+ "EventName": "PM_MRK_DATA_FROM_ONCHIP_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x080B00000010C142",
+ "EventName": "PM_MRK_INST_FROM_ONCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x080B40000020C142",
+ "EventName": "PM_MRK_DATA_FROM_ONCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from the same chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0B00000000C142",
+ "EventName": "PM_MRK_INST_FROM_OFFCHIP_CACHE",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0B40000000C142",
+ "EventName": "PM_MRK_DATA_FROM_OFFCHIP_CACHE",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0B00000010C142",
+ "EventName": "PM_MRK_INST_FROM_OFFCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x0C0B40000020C142",
+ "EventName": "PM_MRK_DATA_FROM_OFFCHIP_CACHE_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from another core's L2 or L3 from a different chip due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x095900000000C142",
+ "EventName": "PM_MRK_INST_FROM_ANY_MEMORY",
+ "BriefDescription": "The processor's instruction cache was reloaded from any chip's memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x095840000000C142",
+ "EventName": "PM_MRK_DATA_FROM_ANY_MEMORY",
+ "BriefDescription": "The processor's L1 data cache was reloaded from any chip's memory (MC slow) due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x095900000010C142",
+ "EventName": "PM_MRK_INST_FROM_ANY_MEMORY_ALL",
+ "BriefDescription": "The processor's instruction cache was reloaded from any chip's memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ },
+ {
+ "EventCode": "0x095840000020C142",
+ "EventName": "PM_MRK_DATA_FROM_ANY_MEMORY_ALL",
+ "BriefDescription": "The processor's L1 data cache was reloaded from any chip's memory (MC slow) due to a demand miss or prefetch reload for a marked instruction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
index 4d66b75c6ad5..a36621858ea3 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
@@ -435,6 +435,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of completed instructions that were demand fetches that missed the L1 and L2 instruction cache",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "L2_INST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of completed instructions that were demand fetches that reloaded from beyond the L3 icache",
"MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "General",
@@ -467,6 +474,13 @@
"MetricName": "LOADS_PER_INST"
},
{
+ "BriefDescription": "Percentage of demand loads that reloaded from the L2 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
"MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "dL1_Reloads",
@@ -474,6 +488,34 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of demand loads that reloaded using modified data from another core's L2 or L3 on a remote chip, per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL2L3_MOD_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded using shared data from another core's L2 or L3 on a remote chip, per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL2L3_SHR_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from the L3 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded with data brought into the L3 by prefetch per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MEPF_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of demand loads that reloaded from beyond the L3 per completed instruction",
"MetricExpr": "PM_DATA_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "dL1_Reloads",
@@ -481,6 +523,66 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of demand loads that reloaded using modified data from another core's L2 or L3 on a distant chip, per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL2L3_MOD_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded using shared data from another core's L2 or L3 on a distant chip, per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL2L3_SHR_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from local memory per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_LMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from remote memory per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from distant memory per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of data reloads from local memory per data reloads from any memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / (PM_DATA_FROM_LMEM + PM_DATA_FROM_RMEM + PM_DATA_FROM_DMEM)",
+ "MetricGroup": "Memory",
+ "MetricName": "MEM_LOCALITY",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Number of data reloads from local memory per data reloads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "Memory",
+ "MetricName": "LD_LMEM_PER_LD_RMEM"
+ },
+ {
+ "BriefDescription": "Number of data reloads from local memory per data reloads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "Memory",
+ "MetricName": "LD_LMEM_PER_LD_DMEM"
+ },
+ {
+ "BriefDescription": "Number of data reloads from local memory per data reloads from distant and remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "Memory",
+ "MetricName": "LD_LMEM_PER_LD_MEM"
+ },
+ {
"BriefDescription": "Percentage of ITLB misses per completed run instruction",
"MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "General",
@@ -488,6 +590,12 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Number of data reloads from remote memory per data reloads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "Memory",
+ "MetricName": "LD_RMEM_PER_LD_DMEM"
+ },
+ {
"BriefDescription": "Percentage of DERAT misses with 4k page size per completed instruction",
"MetricExpr": "PM_DERAT_MISS_4K / PM_RUN_INST_CMPL * 100",
"MetricGroup": "Translation",
@@ -502,6 +610,76 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of ICache misses that were reloaded from the L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Stats",
+ "MetricName": "INST_FROM_L2",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache misses that were reloaded from the L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Stats",
+ "MetricName": "INST_FROM_L3",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache misses that were reloaded from local memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Stats",
+ "MetricName": "INST_FROM_LMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache misses that were reloaded from remote memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Stats",
+ "MetricName": "INST_FROM_RMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache misses that were reloaded from distant memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Stats",
+ "MetricName": "INST_FROM_DMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache reloads from the L2 per completed instruction",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L2_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache reloads from the L3 per completed instruction",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L3_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache reloads from local memory per completed instruction",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_LMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache reloads from remote memory per completed instruction",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_RMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of ICache reloads from distant memory per completed instruction",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_DMEM_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Average number of run cycles per completed instruction",
"MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "General",
@@ -608,6 +786,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L2",
"MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_DEMAND_MISS_L1 * 100",
"MetricGroup": "dL1_Reloads",
@@ -615,6 +800,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L3",
"MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_DEMAND_MISS_L1 * 100",
"MetricGroup": "dL1_Reloads",
@@ -622,6 +814,188 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local L3 with modified data",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MEPF",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on the same regent with modified data",
+ "MetricExpr": "PM_DATA_FROM_L21_REGENT_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L21_REGENT_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on the same regent with shared data",
+ "MetricExpr": "PM_DATA_FROM_L21_REGENT_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L21_REGENT_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on the same chip in a different regent with modified data",
+ "MetricExpr": "PM_DATA_FROM_L21_NON_REGENT_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L21_NON_REGENT_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on the same chip in a different regent with shared data",
+ "MetricExpr": "PM_DATA_FROM_L21_NON_REGENT_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L21_NON_REGENT_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on the same regent with modified data",
+ "MetricExpr": "PM_DATA_FROM_L31_REGENT_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L31_REGENT_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on the same regent with shared data",
+ "MetricExpr": "PM_DATA_FROM_L31_REGENT_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L31_REGENT_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on the same chip in a different regent with modified data",
+ "MetricExpr": "PM_DATA_FROM_L31_NON_REGENT_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L31_NON_REGENT_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on the same chip in a different regent with shared data",
+ "MetricExpr": "PM_DATA_FROM_L31_NON_REGENT_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L31_NON_REGENT_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on a remote chip with modified data",
+ "MetricExpr": "PM_DATA_FROM_RL2_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL2_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on a remote chip with shared data",
+ "MetricExpr": "PM_DATA_FROM_RL2_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL2_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on a remote chip with modified data",
+ "MetricExpr": "PM_DATA_FROM_RL3_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL3_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on a remote chip with shared data",
+ "MetricExpr": "PM_DATA_FROM_RL3_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RL3_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on a distant chip with modified data",
+ "MetricExpr": "PM_DATA_FROM_DL2_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL2_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L2 on a distant chip with shared data",
+ "MetricExpr": "PM_DATA_FROM_DL2_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL2_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on a distant chip with modified data",
+ "MetricExpr": "PM_DATA_FROM_DL3_MOD * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL3_MOD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from another core's L3 on a distant chip with shared data",
+ "MetricExpr": "PM_DATA_FROM_DL3_SHR * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DL3_SHR",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local chip's memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_LMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local chip's OpenCAPI Cache",
+ "MetricExpr": "PM_DATA_FROM_L_OC_CACHE * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L_OC_CACHE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from the local chip's OpenCAPI memory",
+ "MetricExpr": "PM_DATA_FROM_L_OC_MEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L_OC_MEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a remote chip's memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_RMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a remote chip's OpenCAPI Cache",
+ "MetricExpr": "PM_DATA_FROM_R_OC_CACHE * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_R_OC_CACHE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a remote chip's OpenCAPI memory",
+ "MetricExpr": "PM_DATA_FROM_R_OC_MEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_R_OC_MEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a distant chip's memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_DMEM",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a distant chip's OpenCAPI Cache",
+ "MetricExpr": "PM_DATA_FROM_D_OC_CACHE * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_D_OC_CACHE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from a distant chip's OpenCAPI memory",
+ "MetricExpr": "PM_DATA_FROM_D_OC_MEM * 100 / PM_LD_DEMAND_MISS_L1",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_D_OC_MEM",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of cycles stalled due to the NTC instruction waiting for a load miss to resolve from a source beyond the local L2 and local L3",
"MetricExpr": "DMISS_L3MISS_STALL_CPI / RUN_CPI * 100",
"MetricGroup": "General",
@@ -687,6 +1061,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of ICache misses that were reloaded from beyond the local L2",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L2_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of icache misses that were reloaded from beyond the local L3",
"MetricExpr": "PM_INST_FROM_L3MISS / PM_L1_ICACHE_MISS * 100",
"MetricGroup": "Instruction_Misses",
@@ -694,6 +1075,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of ICache reloads from beyond the L2 per completed instruction",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L2_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of icache reloads from the beyond the L3 per completed instruction",
"MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "Instruction_Misses",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index 0e21e7ba1959..fcf8a8ebe7bd 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -90,11 +90,6 @@
"BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
},
{
- "EventCode": "0x300FE",
- "EventName": "PM_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
- },
- {
"EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
"BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
@@ -113,10 +108,5 @@
"EventCode": "0x400F0",
"EventName": "PM_LD_DEMAND_MISS_L1_FIN",
"BriefDescription": "Load missed L1, counted at finish time."
- },
- {
- "EventCode": "0x400FE",
- "EventName": "PM_DATA_FROM_MEMORY",
- "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index c606ae03cd27..0e0253d0e757 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -195,7 +195,7 @@
"BriefDescription": "Threshold counter exceeded a value of 128."
},
{
- "EventCode": "0x400FA",
+ "EventCode": "0x500FA",
"EventName": "PM_RUN_INST_CMPL",
"BriefDescription": "PowerPC instruction completed while the run latch is set."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
index ea73900d248a..a96f76797da0 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -10,11 +10,6 @@
"BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
},
{
- "EventCode": "0x200FE",
- "EventName": "PM_DATA_FROM_L2MISS",
- "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
- },
- {
"EventCode": "0x300F0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1."
diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
index c7e7528db315..4d423b149ad1 100644
--- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
+++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
@@ -12,5 +12,13 @@
"EventName": "sys_ccn_pmu.read_cycles",
"Unit": "sys_ccn_pmu",
"Compat": "0x01"
+ },
+ {
+ "BriefDescription": "Counts total cache misses in first lookup result (high priority)",
+ "EventidCode": "0x1",
+ "NodeType": "0x5",
+ "EventName": "sys_cmn_pmu.hnf_cache_miss",
+ "Unit": "sys_cmn_pmu",
+ "Compat": "(434|436|43c|43a).*"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index c6780d5c456b..3388b58b8f1a 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -99,7 +99,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
@@ -395,13 +395,13 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"MetricName": "tma_info_inst_mix_ipbranch",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
"MetricName": "tma_info_inst_mix_ipcall",
"Unit": "cpu_atom"
@@ -726,7 +726,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the numer of issue slots that result in retirement slots.",
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots.",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
index 81349100fe32..542ba4a81996 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -394,31 +394,61 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"Invert": "1",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/memory.json b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
index 73d92d5c9f9d..23d36164433f 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
@@ -248,7 +248,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -278,7 +278,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
index cb5b8611064b..f9876bef16da 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -239,6 +239,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Taken branch instructions retired.",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -412,6 +421,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
@@ -842,7 +860,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -1145,7 +1163,7 @@
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
- "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
"UMask": "0x8",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
index 34fc052d00e4..8bf020a9dfa8 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
@@ -25,6 +25,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Deprecated": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_DAT_REQUESTS.RD",
"PerPkg": "1",
@@ -33,6 +34,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
index 06e67e34e1bf..c150c14ac6ed 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -328,12 +328,12 @@
"MetricName": "tma_info_inst_mix_idiv_uop_ratio"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"MetricName": "tma_info_inst_mix_ipbranch"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
"MetricName": "tma_info_inst_mix_ipcall"
},
@@ -616,7 +616,7 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the numer of issue slots that result in retirement slots.",
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots.",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
index 37259d38a222..863a3ba2b4b2 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
@@ -59,7 +59,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -77,7 +77,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
index fa53ff11a509..3153bab527a9 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
@@ -91,6 +91,14 @@
"UMask": "0xf7"
},
{
+ "BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
"Deprecated": "1",
"EventCode": "0xc4",
@@ -184,6 +192,14 @@
"UMask": "0x7e"
},
{
+ "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
"Deprecated": "1",
"EventCode": "0xc5",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
index 4af695a5e755..8bf020a9dfa8 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
@@ -8,6 +8,56 @@
"Unit": "ARB"
},
{
+ "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Deprecated": "1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
"BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
@@ -16,11 +66,27 @@
"Unit": "ARB"
},
{
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
"BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index 8d2f4edfb597..42284c02c11d 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -63,7 +63,7 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Non-CISC nacro instructions decoded",
+ "BriefDescription": "Non-CISC macro instructions decoded",
"EventCode": "0xAA",
"EventName": "MACRO_INSTS.NON_CISC_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index 8fc62b8f667d..e1f55fcfa0d0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -49,6 +49,12 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
"MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
"MetricGroup": "smi",
@@ -652,7 +658,7 @@
},
{
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
"MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
"PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
@@ -691,6 +697,12 @@
"MetricName": "tma_info_system_smt_2t_utilization"
},
{
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
index 8a327e0f1441..910395977a6e 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
@@ -253,7 +253,7 @@
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -261,7 +261,7 @@
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -269,7 +269,7 @@
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -277,7 +277,7 @@
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -285,7 +285,7 @@
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -293,7 +293,7 @@
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -301,7 +301,7 @@
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -309,7 +309,7 @@
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -317,7 +317,7 @@
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
index e61a23f68899..b9fb216bee16 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
@@ -271,7 +271,7 @@
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -279,7 +279,7 @@
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -287,7 +287,7 @@
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -295,7 +295,7 @@
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -303,7 +303,7 @@
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -311,7 +311,7 @@
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -319,7 +319,7 @@
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -327,7 +327,7 @@
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -335,7 +335,7 @@
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index fbb111e40829..84c132af3dfa 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1837,7 +1837,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
new file mode 100644
index 000000000000..ab09bd9fb409
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
@@ -0,0 +1,888 @@
+[
+ {
+ "BriefDescription": "L1D.HWPF_MISS",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Deprecated": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "L2_LINES_OUT.NON_SILENT",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf0"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80082380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1830004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F80040022",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "EventCode": "0x2c",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
new file mode 100644
index 000000000000..4a9d211e9d4f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
@@ -0,0 +1,193 @@
+[
+ {
+ "BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1c"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
new file mode 100644
index 000000000000..9e53da55d0c1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
@@ -0,0 +1,389 @@
+[
+ {
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
new file mode 100644
index 000000000000..e8bf7c9c44e1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
@@ -0,0 +1,343 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "PublicDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "PublicDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F04C04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70CC04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
new file mode 100644
index 000000000000..2f375a6badcd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
@@ -0,0 +1,315 @@
+[
+ {
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
new file mode 100644
index 000000000000..6dcf3b763af4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
@@ -0,0 +1,975 @@
+[
+ {
+ "BriefDescription": "AMX retired arithmetic BF16 operations.",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.BF16",
+ "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "AMX retired arithmetic integer 8-bit operations.",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.INT8",
+ "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FP_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.INT_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1b"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_MISC.MBA_STALLS",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.MBA_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
new file mode 100644
index 000000000000..bf5a511b99d1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
@@ -0,0 +1,5988 @@
+[
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CHA clock cycles while the event is enabled",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x12",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoE",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5e",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5e",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : No SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1fffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x17e0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local non-prefetch requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local non-prefetch requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, not including prefetch",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Request : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Read transactions.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fc101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Exclusive State",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Forward State",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a44ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Miss",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "UMask": "0xbdfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x19d0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x19c1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand CRd Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1850ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Demand Data Reads that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand RFO Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1848ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1844ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x189dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x199dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1910ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1981ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1908ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x19c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Modified State",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fe001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote non-snoop requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote non-snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote non-snoop transactions to the LLC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "UMask": "0x15dfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1a10ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1a01ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a04ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache that come from a remote socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x1a02ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1a08ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote snoop requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote snoop transactions to the LLC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1c19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1bc8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x9c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Shared State",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Shared State",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Writes",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Requests that install or change a line in the LLC. Examples: Writebacks from Core L2's and UPI. Prefetches into the LLC.",
+ "UMask": "0x842ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote Writes",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x17c2ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IA traffic",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IO traffic",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in E state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x12",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in F or S state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_FS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1c",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in M state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in any state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_MESF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x200f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x800f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in E state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in M state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in S state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Off",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the LLC.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near memory set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the SF",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in TOR",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "No Reject in the CHA due to a pending read to the same Near Memory set in the TOR.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests made into the CHA",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write requests made into the CHA",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write Requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes Remote",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "EventCode": "0x2d",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "EventCode": "0x2d",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoop for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoop for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspFwd",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspI Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspS Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DDR Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests from IA Cores",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlush from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlush events that are initiated from the Core",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlushOpt from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlushOpt events that are initiated from the Core",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read from local IA that hits in the snoop filter",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that hits in the snoop filter",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that hits in the snoop filter",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that hits in the snoop filter",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that hits in the snoop filter",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;ItoM from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; ItoM events that are initiated from the Core",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA.",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; misses from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for CRd misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode CRd",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c80b8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8138201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8168201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20c8168201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target DDR memory",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting local memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target local memory",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8268201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8a68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target PMM memory",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8968201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting local memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target local memory",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting remote memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target remote memory",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting remote memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and target remote memory",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that misses in the snoop filter",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10cccf8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccd68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8868201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFOMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8038201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8068201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20c8068201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccc68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;SpecItoM from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; SpecItoM events that are initiated from the Core",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc3fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc37ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc2fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc67ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoM from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO with the opcode ItoM",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoMCacheNears from IO devices.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO devices with the opcode ItoMCacheNears. This event indicates a partial write request.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for RdCur from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO with the opcode RdCur",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMIO Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMIO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMIO Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PMM Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Remote Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All Snoops from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All Snoops from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All snoops to this LLC that came from remote sockets",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All Snoops from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DDR Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that hits in the snoop filter",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that hits in the snoop filter",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that hits in the snoop filter",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that hits in the snoop filter",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that hits in the snoop filter",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c80b8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8138201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8168201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20c8168201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target DDR memory",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting local memory",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target local memory",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8268201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8a68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target PMM memory",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8968201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting remote memory",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target remote memory",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that misses in the snoop filter",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10cccf8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccd68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8868201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFOMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8038201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8068201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20c8068201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccc68201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ItoM from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMIO Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMIO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMIO Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PMM Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All Snoops from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All Snoops from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All snoops to this LLC that came from remote sockets",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RRQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All Snoops from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WBQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WBQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json
new file mode 100644
index 000000000000..f3e84fd88de3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json
@@ -0,0 +1,450 @@
+[
+ {
+ "BriefDescription": "Counts the number of lfclk ticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CXLCM_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx AGF 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req AGF0",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req AGF 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with AK set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.AK_HDR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with BE set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.BE_HDR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of control flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.CTRL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Headerless flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.NO_HDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of protocol flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.PROT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with SZ set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.SZ_HDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.VALID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of valid messages in the flit",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.VALID_MSG",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of CRC errors detected",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.CRC_ERRORS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Init flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.INIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of LLCRD flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Retry flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Data Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Req Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Rsp Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Mem Data Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Mem Rxx Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with AK set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.AK_HDR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with BE set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.BE_HDR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of control flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.CTRL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Headerless flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.NO_HDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of protocol flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.PROT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with SZ set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.SZ_HDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.VALID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp1 Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp0 Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Counts the number of uclk ticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CXLDP_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Data AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Req AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Data AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Req AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Rsp AGF 0",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Rsp AGF 1",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLDP"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
new file mode 100644
index 000000000000..09d840c7da4c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
@@ -0,0 +1,6199 @@
+[
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of IRP clock cycles while the event is enabled",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF - request insert from TC.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF occupancy",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0x0b",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0x0a",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "EventCode": "0x1c",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0AD1 both. Stalls on both AD0 and AD1 will count as 2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "EventCode": "0x1a",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "EventCode": "0x1b",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1d",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0d",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0e",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0x0c",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "M2M Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the mesh to memory (M2M)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress : Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : 2LM Hit?",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.PMM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_DIRECT2UPITXN_OVERRIDE.PMM_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a direct to UPI transaction was overridden. : Counts the number of times D2K wasn't honored even though the incoming request had d2k set",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored : Counts cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored : Counts non cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times egress did D2K (Direct to KTI)",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with any directory to non persistent memory",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory A to non persistent memory",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory I to non persistent memory",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory S to non persistent memory",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x120",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x220",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM or HBM)",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory (DRAM or HBM)",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000004",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000001",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Count when Starve Glocab counter is at 7",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_IGR_STARVE_WINNER.MASK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x120",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "PerPkg": "1",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x220",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Writes - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x840",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x820",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "PMM - Ch0 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x880",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1010",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1001",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1004",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1002",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1008",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1040",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1020",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "PMM - Ch1 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x1080",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Full Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1840",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1820",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1880",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": UPI - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": XPT - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Channels",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean NearMem Read Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clean full line read hits (reads and RFOs).",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty NearMem Read Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts dirty full line read hits (reads and RFOs).",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_TAG_MISS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2M_TAG_MISS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Requests",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Snoops",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M3UPI Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of M2UPI clock cycles while the event is enabled",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M3UPI CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "EventCode": "0x2b",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2C Sent : Count cases BL sends direct to core",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "EventCode": "0x2a",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Credits Consumed",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : D2K Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Transmit Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : VNA In Use",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : All",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : TSV High",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request naturally serviced during hold-off period",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request forcibly serviced during service window",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Three Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Slot Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : All Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : All",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : TSV High",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Attempt",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Success",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN0",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN1",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Any In Use",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Corrected",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 1",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 10",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 4",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 5",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FlowQ Generated Prefetch",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : WB on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : NCB on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : REQ on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : SNP on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : WB on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : NCB on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : REQ on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : SNP on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : WB on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : NCB on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : REQ on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : SNP on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : WB on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : NCB on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : REQ on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : SNP on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xc0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message is making arbitration request",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message took bypass path",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message lost arbitration",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it became too old",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD credited)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "AD credited : Number of allocations into the CRS Egress",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AK)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK : Number of allocations into the CRS Egress",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AKC)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "AKC : Number of allocations into the CRS Egress",
+ "UMask": "0x40",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL credited)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "BL credited : Number of allocations into the CRS Egress",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (IV)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "IV : Number of allocations into the CRS Egress",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AD)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AK)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AKC)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "AKC : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (BL)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (IV)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV",
+ "PerPkg": "1",
+ "PublicDescription": "IV : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "EventCode": "0x15",
+ "EventName": "UNC_MDF_FAST_ASSERTED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "AD bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "EventCode": "0x15",
+ "EventName": "UNC_MDF_FAST_ASSERTED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "BL bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "UPI Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI LL clock cycles while the event is enabled",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2C",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2K",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack).",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected",
+ "EventCode": "0x0b",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "LLR Requests Sent",
+ "EventCode": "0x08",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "PerPkg": "1",
+ "PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs..",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "EventCode": "0x3a",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Idle",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All LLCRD Not Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x17",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All LLCTRL",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All LLCTRL : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "All Null Flits",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Protocol Header",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All ProtDDR : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x87",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "EventCode": "0x4f",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "EventCode": "0x4f",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
new file mode 100644
index 000000000000..557080b74ee5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
@@ -0,0 +1,3587 @@
+[
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x25",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "IIO Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "Number of IIO clock cycles while the event is enabled",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0-7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 0",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7000001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 1",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 1",
+ "UMask": "0x7000002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 2",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7000004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 3",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 3",
+ "UMask": "0x7000008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 4",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7000010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 5",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 1",
+ "UMask": "0x7000020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 6",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7000040",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 3",
+ "UMask": "0x7000080",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0-7",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0-7 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part0-7 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part0 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part1 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part2 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part3 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part0-7 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part0 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part1 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part2 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part3 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Passing data to be written : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Issuing final read or write of line : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Processing response from IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Issuing to IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Request Ownership : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Writing line : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Passing data to be written : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing final read or write of line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Processing response from IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing to IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Request Ownership : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Writing line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
+ "UMask": "0xc0",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_256T_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.PWT_OCCUPANCY_MSB",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : All : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "EventCode": "0x8f",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "EventCode": "0xd0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Outbound cacheline requests issued : 64B requests issued to device : Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "EventCode": "0xd1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Outbound TLP (transaction layer packet) requests issued : To device : Each time an outbound completes all its passes it advances the pointer",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PWT occupancy. Does not include 9th bit of occupancy (will undercount if PWT is greater than 255 per cycle).",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : PCIe Request complete",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : PCIe Request complete : Only for posted requests : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Writing line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Writing line : Only for posted requests : Only for posted requests",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Issuing final read or write of line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Issuing final read or write of line : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Passing data to be written",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Passing data to be written : Only for posted requests : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Passing data to be written",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Passing data to be written : Only for posted requests",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Issuing final read or write of line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Request Ownership",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Request Ownership : Only for posted requests",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Writing line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Writing line : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "M2P Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of M2P clock cycles while the event is enabled",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
new file mode 100644
index 000000000000..3ff9e9b722c8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
@@ -0,0 +1,3308 @@
+[
+ {
+ "BriefDescription": "Cycles - at UCLK",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2HBM_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2HBM_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts non cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with any directory to non persistent memory",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory A to non persistent memory",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory I to non persistent memory",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory S to non persistent memory",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory",
+ "UMask": "0x120",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory",
+ "UMask": "0x220",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory",
+ "UMask": "0x140",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory",
+ "UMask": "0x240",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory",
+ "UMask": "0x102",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory",
+ "UMask": "0x202",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory",
+ "UMask": "0x110",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory",
+ "UMask": "0x210",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory",
+ "UMask": "0x108",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory",
+ "UMask": "0x208",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on AkAd cmp message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.AD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on any packet type",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on Bl Cmp message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.BL_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on NM fill write message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.CROSSTILE_NMWR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2Cha message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2CHA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2c message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2k message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000004",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count when Starve Glocab counter is at 7",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2HBM_IGR_STARVE_WINNER.MASK7",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - Ch0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - Ch1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - All Channels",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "PerPkg": "1",
+ "UMask": "0x810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "PerPkg": "1",
+ "UMask": "0x801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1010",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1002",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1010",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1004",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1002",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1008",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1804",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1808",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": UPI - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": XPT - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Channels",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2HBM_RxC_AD.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2HBM_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2HBM_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2HBM_RxC_BL.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts anytime a BL packet is added to Ingress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2HBM_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts anytime a BL packet is added to Ingress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2HBM_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2HBM_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M2HBM_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2HBM_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2HBM_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2HBM_TxC_AD.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts anytime a AD packet is added to Egress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2HBM_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts anytime a AD packet is added to Egress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2HBM_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) : Inserts - CMS0 - Near Side",
+ "EventCode": "0x0E",
+ "EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of BL transactions to CMS add port 0",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) : Inserts - CMS1 - Far Side",
+ "EventCode": "0x0E",
+ "EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of BL transactions to CMS add port 1",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : All",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2HBM_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2HBM_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to read",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x11",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Read in PCH0",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Read in PCH1",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x10",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Underfill Read transaction on Page Empty or Page Miss",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x44",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to write",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x22",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Write in PCH0",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Write in PCH1",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x20",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "HBM RD_CAS and WR_CAS Commands",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "HBM RD_CAS and WR_CAS Commands",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS commands issued (regular and underfill)",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0xcf",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Regular read CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill read CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Regular read CAS commands issued (does not include underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill read CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM RD_CAS and WR_CAS Commands. : HBM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_32B",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_64B",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_32B",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_64B",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at DCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_MCHBM_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PREALL.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PREALL.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All Precharge Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Precharge All Commands: Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x3",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at HCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_MCHBM_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All precharge events",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge from MC page table",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x88",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharges from Page Table",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
+ "UMask": "0x8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge due to read on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x11",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharge due to read",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x44",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge due to write on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x22",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharge due to write",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x20",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "EventCode": "0x19",
+ "EventName": "UNC_MCHBM_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of inserts into the read buffer.",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of elements in the read buffer per cycle.",
+ "EventCode": "0x1a",
+ "EventName": "UNC_MCHBM_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_MCHBM_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_MCHBM_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_MCHBM_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_MCHBM_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : All DRAM Read and Write actions : DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0 : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1 : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM Read CAS commands issued on this channel. This includes underfills.",
+ "UMask": "0xcf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/out auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0xc1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Underfill Read Issued : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xf0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xd0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xe0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_32B",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_64B",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_32B",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_64B",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at DCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM DCLK clock cycles while the event is enabled",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at HCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM HCLK clock cycles while the event is enabled",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue inserts",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of read requests allocated in the PMM Read Pending Queue.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Queue Cycles Not Empty",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue inserts",
+ "EventCode": "0xe7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of write requests allocated in the PMM Write Pending Queue.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the PMM DIMM.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "PerPkg": "1",
+ "PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "PerPkg": "1",
+ "PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read, write, underfill, or PGT.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x88",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharges from Page Table",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharges from Page Table : Counts the number of DRAM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x44",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to write on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x22",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. This includes reads to both DDR and PMEM. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of inserts into the read buffer destined for DDR. Does not count reads destined for PMEM.",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of elements in the read buffer, including reads to both DDR and PMEM.",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM read completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM write completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM read completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM write completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Alloc",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Dealloc",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Valid",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Reject",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : PMM",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.CANARY",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit in near memory cache (DDR4)",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check miss, no data at this line",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check miss, existing data may be evicted to PMM",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit due to memory read",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.NM_RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit due to memory write",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.NM_WR_HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
new file mode 100644
index 000000000000..8948e85074f0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
@@ -0,0 +1,197 @@
+[
+ {
+ "BriefDescription": "PCU PCLK Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of PCU PCLK Clock cycles while the event is enabled",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2b",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2d",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C0",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C3",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C6",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0x0a",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json
new file mode 100644
index 000000000000..a1e3b8d2ebe7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json
@@ -0,0 +1,165 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
index 954e8198c7a5..bef1f5ef6f31 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
@@ -271,7 +271,7 @@
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -279,7 +279,7 @@
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -287,7 +287,7 @@
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -295,7 +295,7 @@
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -303,7 +303,7 @@
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -311,7 +311,7 @@
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -319,7 +319,7 @@
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -327,7 +327,7 @@
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -335,7 +335,7 @@
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index a6eed0d9a26d..b43a6c6d8b7f 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -1525,7 +1525,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 7082ad5ba961..e98602c66707 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -1821,7 +1821,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 6ddc7d1c61d5..46570b522095 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -8,16 +8,16 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "L1D miss oustandings duration in cycles",
+ "BriefDescription": "L1D miss outstanding duration in cycles",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
@@ -506,7 +506,7 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "BriefDescription": "Cacheable and noncacheable code read requests",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Demand code read requests sent to uncore.",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
index 87c958213c7a..89c6d47cc077 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
@@ -73,7 +73,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"PublicDescription": "Counts number of X87 uops executed.",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index 89004a6c9ed1..4ee100024ca9 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -142,35 +142,35 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"SampleAfterValue": "2000003",
"UMask": "0x30"
},
{
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
- "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_OCCUR",
- "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
@@ -178,7 +178,7 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
@@ -196,7 +196,7 @@
"UMask": "0x30"
},
{
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index c8f7d5e66504..0e8e77253978 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -8,11 +8,11 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
index ccf451534d16..914d2cfb3d3d 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
@@ -140,7 +140,7 @@
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -148,21 +148,21 @@
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -170,21 +170,21 @@
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -192,14 +192,14 @@
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -293,7 +293,7 @@
"EventCode": "0xd",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
index 874f15ea8228..0fc907e5cf3c 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
@@ -140,7 +140,7 @@
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -148,21 +148,21 @@
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -170,21 +170,21 @@
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
@@ -192,14 +192,14 @@
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index d9876cb06b08..8da3a5a7be73 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -6,14 +6,20 @@
"SampleAfterValue": "200003"
},
{
- "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of core cycles the fetch stalled for all icache misses.",
+ "BriefDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
- "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of L2HWP allocated into XQ GP",
+ "EventCode": "0x3E",
+ "EventName": "L2_PREFETCHER.ALLOC_XQ",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of L2 cache misses",
"EventCode": "0x2E",
"EventName": "L2_REQUESTS.MISS",
@@ -28,7 +34,7 @@
"UMask": "0x4f"
},
{
- "BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) excluding SW prefetches filling only to L2 cache and L1 evictions (automatically exlcudes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times",
+ "BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) excluding SW prefetches filling only to L2 cache and L1 evictions (automatically excludes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times",
"EventCode": "0x30",
"EventName": "L2_REQUESTS_REJECT.ALL",
"SampleAfterValue": "200003"
@@ -50,11 +56,12 @@
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state",
+ "BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state (Precise Event)",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.HITM",
"PEBS": "1",
+ "PublicDescription": "This event counts the number of load micro-ops retired that got data from another core's cache. (Precise Event).",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
@@ -67,20 +74,22 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of load micro-ops retired that hit in the L2",
+ "BriefDescription": "Counts the number of load micro-ops retired that hit in the L2 (Precise Event)",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
"PEBS": "1",
+ "PublicDescription": "This event counts the number of load micro-uops retired that hit in the L2 (Precise Event)",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of load micro-ops retired that miss in the L2",
+ "BriefDescription": "Counts the number of load micro-ops retired that miss in the L2 (Precise Event)",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
"PEBS": "1",
+ "PublicDescription": "This event counts the number of load micro-ops retired that miss in the L2 (Precise Event)",
"SampleAfterValue": "100007",
"UMask": "0x4"
},
@@ -621,6 +630,15 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Accounts for responses which miss its own tile's L2.",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18001981F8",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
@@ -1665,15 +1683,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that provides no supplier details",
- "EventCode": "0xB7",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000020020",
- "SampleAfterValue": "100007",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts Software Prefetches that accounts for any response",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
index ecc96f32f167..089aa3ef345d 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
@@ -8,18 +8,18 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of vector SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "BriefDescription": "Counts the number of packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.PACKED_SIMD",
- "PublicDescription": "This event counts the number of packed vector SSE, AVX, AVX2, and AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "PublicDescription": "The length of the packed operation (128bits, 256bits or 512bits) is not taken into account when updating the counter; all count the same (+1). \r\nMask (k) registers are ignored. For example: a micro-op operating with a mask that only enables one element or even zero elements will still trigger this counter (+1)\r\nThis event is defined at the micro-op level and not instruction level. Most instructions are implemented with one micro-op but not all.",
"SampleAfterValue": "200003",
"UMask": "0x40"
},
{
- "BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt.",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.SCALAR_SIMD",
- "PublicDescription": "This event counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "PublicDescription": "This event is defined at the micro-op level and not instruction level. Most instructions are implemented with one micro-op but not all.",
"SampleAfterValue": "200003",
"UMask": "0x20"
}
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index 3dc532107ead..5b2e71750976 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -1,13 +1,13 @@
[
{
- "BriefDescription": "Counts the number of branch instructions retired",
+ "BriefDescription": "Counts the number of branch instructions retired (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
"SampleAfterValue": "200003"
},
{
- "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of near CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
@@ -15,7 +15,7 @@
"UMask": "0xf9"
},
{
- "BriefDescription": "Counts the number of far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -23,7 +23,7 @@
"UMask": "0xbf"
},
{
- "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
@@ -31,7 +31,7 @@
"UMask": "0xfb"
},
{
- "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps.",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
@@ -39,7 +39,7 @@
"UMask": "0x7e"
},
{
- "BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP.",
+ "BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -47,7 +47,7 @@
"UMask": "0xeb"
},
{
- "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -55,7 +55,7 @@
"UMask": "0xfd"
},
{
- "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "BriefDescription": "Counts the number of near RET branch instructions retired. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
@@ -63,7 +63,7 @@
"UMask": "0xf7"
},
{
- "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken.",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken. (Precise Event)",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -71,14 +71,14 @@
"UMask": "0xfe"
},
{
- "BriefDescription": "Counts the number of mispredicted branch instructions retired",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
"SampleAfterValue": "200003"
},
{
- "BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CALL",
"PEBS": "1",
@@ -86,7 +86,7 @@
"UMask": "0xf9"
},
{
- "BriefDescription": "Counts the number of mispredicted far branch instructions retired.",
+ "BriefDescription": "Counts the number of mispredicted far branch instructions retired. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -94,7 +94,7 @@
"UMask": "0xbf"
},
{
- "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
@@ -102,7 +102,7 @@
"UMask": "0xfb"
},
{
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps.",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
@@ -110,7 +110,7 @@
"UMask": "0x7e"
},
{
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP.",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -118,7 +118,7 @@
"UMask": "0xeb"
},
{
- "BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired.",
+ "BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.REL_CALL",
"PEBS": "1",
@@ -126,7 +126,7 @@
"UMask": "0xfd"
},
{
- "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -134,7 +134,7 @@
"UMask": "0xf7"
},
{
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken.",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken. (Precise Event)",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -189,7 +189,14 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Counts all nukes",
+ "BriefDescription": "Counts the number of instructions retired (Precise Event)",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_PS",
+ "PEBS": "2",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts all machine clears",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.ALL",
"SampleAfterValue": "200003",
@@ -261,20 +268,22 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with a store",
+ "BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with a store (Precise Event)",
"Data_LA": "1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
"PEBS": "1",
+ "PublicDescription": "This event counts the number of retired loads that were prohibited from receiving forwarded data from a previous store because of address mismatch.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of occurrences a retired load that is a cache line split. Each split should be counted only once.",
+ "BriefDescription": "Counts the number of occurrences a retired load was pushed into the rehab queue because it sees a cache line split. Each split should be counted only once. (Precise Event)",
"Data_LA": "1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LD_SPLITS",
"PEBS": "1",
+ "PublicDescription": "This event counts the number of retired loads which was pushed into the recycled queue that experienced cache line boundary splits (Precise event). Not that each split should be counted only once.",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -286,7 +295,7 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Counts the store micro-ops retired that were pushed in the rehad queue because the store address buffer is full",
+ "BriefDescription": "Counts the store micro-ops retired that were pushed in the rehab queue because the store address buffer is full",
"EventCode": "0x03",
"EventName": "RECYCLEQ.STA_FULL",
"SampleAfterValue": "200003",
@@ -301,7 +310,7 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full.",
+ "BriefDescription": "Counts the total number of core cycles allocation pipeline is stalled when any one of the reservation stations is full.",
"EventCode": "0xCB",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
index 1b8dcfa5461c..120e4813d82a 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
@@ -2558,7 +2558,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
@@ -2566,7 +2566,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
@@ -2574,7 +2574,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AK_NON_UPI",
"PerPkg": "1",
@@ -2582,7 +2582,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCB_VN0",
"PerPkg": "1",
@@ -2590,7 +2590,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
@@ -2598,7 +2598,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
@@ -2606,7 +2606,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_WB_VN0",
"PerPkg": "1",
@@ -2614,7 +2614,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.IV_NON_UPI",
"PerPkg": "1",
@@ -2622,7 +2622,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ALLOW_SNP",
"PerPkg": "1",
@@ -2630,7 +2630,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ANY_REJECT_IRQ0",
"PerPkg": "1",
@@ -2638,7 +2638,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.PA_MATCH",
"PerPkg": "1",
@@ -2646,7 +2646,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_VICTIM",
"PerPkg": "1",
@@ -2654,7 +2654,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_WAY",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index 99a8fa8f19cc..9be30a33b43b 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -1,6 +1,6 @@
[
{
- "BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss",
+ "BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss (Precise Event)",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
new file mode 100644
index 000000000000..1823149067b5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
@@ -0,0 +1,219 @@
+[
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only, per core event",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
new file mode 100644
index 000000000000..5e4ef81b43d6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
@@ -0,0 +1,27 @@
+[
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nSoftware can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
new file mode 100644
index 000000000000..51d70ba00bd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
@@ -0,0 +1,183 @@
+[
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "23",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/other.json b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
new file mode 100644
index 000000000000..69adaed5686d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
@@ -0,0 +1,62 @@
+[
+ {
+ "BriefDescription": "Counts cacheable demand data reads Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads were supplied by DRAM.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
new file mode 100644
index 000000000000..2bde664fdc0f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
@@ -0,0 +1,217 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with an older store (size mismatch) - unknown_sta/bad_forward",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nSoftware can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the IQ. Also, includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
new file mode 100644
index 000000000000..bb9458799f1c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
@@ -0,0 +1,56 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 3a8770e29fe8..e571683f59f3 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,39 +1,41 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-(97|9A|B7|BA|BF),v1.21,alderlake,core
-GenuineIntel-6-BE,v1.21,alderlaken,core
-GenuineIntel-6-(1C|26|27|35|36),v4,bonnell,core
+GenuineIntel-6-(97|9A|B7|BA|BF),v1.23,alderlake,core
+GenuineIntel-6-BE,v1.23,alderlaken,core
+GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core
GenuineIntel-6-(3D|47),v28,broadwell,core
-GenuineIntel-6-56,v10,broadwellde,core
-GenuineIntel-6-4F,v21,broadwellx,core
-GenuineIntel-6-55-[56789ABCDEF],v1.19,cascadelakex,core
+GenuineIntel-6-56,v11,broadwellde,core
+GenuineIntel-6-4F,v22,broadwellx,core
+GenuineIntel-6-55-[56789ABCDEF],v1.20,cascadelakex,core
GenuineIntel-6-9[6C],v1.04,elkhartlake,core
+GenuineIntel-6-CF,v1.01,emeraldrapids,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
GenuineIntel-6-B6,v1.00,grandridge,core
GenuineIntel-6-A[DE],v1.01,graniterapids,core
GenuineIntel-6-(3C|45|46),v33,haswell,core
-GenuineIntel-6-3F,v27,haswellx,core
+GenuineIntel-6-3F,v28,haswellx,core
GenuineIntel-6-7[DE],v1.19,icelake,core
GenuineIntel-6-6[AC],v1.21,icelakex,core
GenuineIntel-6-3A,v24,ivybridge,core
-GenuineIntel-6-3E,v23,ivytown,core
-GenuineIntel-6-2D,v23,jaketown,core
-GenuineIntel-6-(57|85),v10,knightslanding,core
-GenuineIntel-6-A[AC],v1.04,meteorlake,core
-GenuineIntel-6-1[AEF],v3,nehalemep,core
-GenuineIntel-6-2E,v3,nehalemex,core
+GenuineIntel-6-3E,v24,ivytown,core
+GenuineIntel-6-2D,v24,jaketown,core
+GenuineIntel-6-(57|85),v16,knightslanding,core
+GenuineIntel-6-BD,v1.00,lunarlake,core
+GenuineIntel-6-A[AC],v1.06,meteorlake,core
+GenuineIntel-6-1[AEF],v4,nehalemep,core
+GenuineIntel-6-2E,v4,nehalemex,core
GenuineIntel-6-A7,v1.01,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-(8F|CF),v1.15,sapphirerapids,core
+GenuineIntel-6-8F,v1.16,sapphirerapids,core
GenuineIntel-6-AF,v1.00,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core
-GenuineIntel-6-55-[01234],v1.31,skylakex,core
+GenuineIntel-6-55-[01234],v1.32,skylakex,core
GenuineIntel-6-86,v1.21,snowridgex,core
GenuineIntel-6-8[CD],v1.13,tigerlake,core
-GenuineIntel-6-2C,v4,westmereep-dp,core
-GenuineIntel-6-25,v3,westmereep-sp,core
-GenuineIntel-6-2F,v3,westmereex,core
+GenuineIntel-6-2C,v5,westmereep-dp,core
+GenuineIntel-6-25,v4,westmereep-sp,core
+GenuineIntel-6-2F,v4,westmereex,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
AuthenticAMD-25-([245][[:xdigit:]]|[[:xdigit:]]),v1,amdzen3,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index 1de0200b32f6..5fef87502d4b 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -967,6 +967,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
@@ -977,6 +987,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -987,6 +1007,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index 8264419500a5..9da8689eda81 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -461,6 +461,27 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
@@ -470,22 +491,22 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"Invert": "1",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
index 2605e1d0ba9f..a5b83293f157 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -67,6 +67,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
@@ -76,6 +84,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"CounterMask": "3",
"EventCode": "0x47",
@@ -280,6 +297,26 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
index f4c603599df4..d55e792c0c43 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -8,6 +8,46 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts streaming stores that have any type of response.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index 352c5efafc06..deaa7aba93f7 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -218,6 +218,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
@@ -293,6 +302,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
@@ -733,7 +751,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -1046,6 +1064,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
@@ -1069,6 +1095,22 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
@@ -1077,6 +1119,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
@@ -1157,6 +1207,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
@@ -1399,6 +1457,14 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the total number of uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles with retired uop(s).",
"CounterMask": "1",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json
new file mode 100644
index 000000000000..2af92e43b28a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
index 1a132fcda964..5113a4e059e4 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -287,7 +287,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "L2 lines alloacated",
+ "BriefDescription": "L2 lines allocated",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
index c03f8990fa82..196ae1d9b157 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
@@ -8,7 +8,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
+ "BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
index a4142cd2ca86..0042e53fdc78 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -287,7 +287,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "L2 lines alloacated",
+ "BriefDescription": "L2 lines allocated",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
index c03f8990fa82..196ae1d9b157 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
@@ -8,7 +8,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
+ "BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
index a0191c8b708d..0c880e415669 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -1551,7 +1551,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
index 860a415e5e79..9e53da55d0c1 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -333,29 +333,56 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"Invert": "1",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"SampleAfterValue": "1000003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index 222212abd811..06c6d67cb76b 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -1945,13 +1945,6 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
- "MetricGroup": "transaction",
- "MetricName": "tsx_cycles_per_elision",
- "ScaleUnit": "1cycles / elision"
- },
- {
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
"MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
index 3fa660694bc7..cf6fa70f37c1 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -4591,7 +4591,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -4825,7 +4825,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -4870,7 +4870,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -4924,7 +4924,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -4942,7 +4942,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -4977,7 +4977,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -5128,7 +5128,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -5154,7 +5154,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -5260,7 +5260,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
@@ -5295,7 +5295,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
- "EventCode": "0x35",
+ "EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
"PerPkg": "1",
"PortMask": "0x000",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 2795a404bb58..faa615c57893 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1473,7 +1473,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index fa4209809c57..4a8f8eeb7525 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1781,7 +1781,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index fab084e1bc69..f11860f39c18 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -63,6 +63,12 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
"MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
"MetricGroup": "smi",
@@ -1027,6 +1033,12 @@
"MetricName": "tma_info_system_smt_2t_utilization"
},
{
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
@@ -1539,7 +1551,7 @@
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(el\\-start) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
index c03f8990fa82..196ae1d9b157 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
@@ -8,7 +8,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
+ "BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
index e00c301640f3..d025e2c0cf1c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
@@ -182,7 +182,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "L2 lines alloacated",
+ "BriefDescription": "L2 lines allocated",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
index c03f8990fa82..196ae1d9b157 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
@@ -8,7 +8,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
+ "BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/cache.json b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
index 6c7c52733dda..18d61d43e4c9 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
@@ -182,7 +182,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "L2 lines alloacated",
+ "BriefDescription": "L2 lines allocated",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
index c03f8990fa82..196ae1d9b157 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
@@ -8,7 +8,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
+ "BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
index 1c61d18a4b5f..026236558d05 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
@@ -45,7 +45,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Early Branch Prediciton Unit clears",
+ "BriefDescription": "Early Branch Prediction Unit clears",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
index 12bd043a05e3..13727421d424 100644
--- a/tools/perf/pmu-events/empty-pmu-events.c
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -245,6 +245,14 @@ static const struct pmu_event pmu_events__test_soc_sys[] = {
.pmu = "uncore_sys_ccn_pmu",
},
{
+ .name = "sys_cmn_pmu.hnf_cache_miss",
+ .event = "eventid=0x1,type=0x5",
+ .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ",
+ .compat = "(434|436|43c|43a).*",
+ .topic = "uncore",
+ .pmu = "uncore_sys_cmn_pmu",
+ },
+ {
.name = 0,
.event = 0,
.desc = 0,
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 72ba4a9239c6..3c091ab75305 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -289,6 +289,7 @@ class JsonEvent:
'cpu_core': 'cpu_core',
'cpu_atom': 'cpu_atom',
'ali_drw': 'ali_drw',
+ 'arm_cmn': 'arm_cmn',
}
return table[unit] if unit in table else f'uncore_{unit.lower()}'
@@ -298,6 +299,7 @@ class JsonEvent:
if 'ExtSel' in jd:
eventcode |= int(jd['ExtSel']) << 8
configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
+ eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
self.name = jd['EventName'].lower() if 'EventName' in jd else None
self.topic = ''
self.compat = jd.get('Compat')
@@ -335,7 +337,13 @@ class JsonEvent:
if precise and self.desc and '(Precise Event)' not in self.desc:
extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
'event)')
- event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
+ event = None
+ if configcode is not None:
+ event = f'config={llx(configcode)}'
+ elif eventidcode is not None:
+ event = f'eventid={llx(eventidcode)}'
+ else:
+ event = f'event={llx(eventcode)}'
event_fields = [
('AnyThread', 'any='),
('PortMask', 'ch_mask='),
@@ -345,6 +353,7 @@ class JsonEvent:
('Invert', 'inv='),
('SampleAfterValue', 'period='),
('UMask', 'umask='),
+ ('NodeType', 'type='),
]
for key, value in event_fields:
if key in jd and jd[key] != '0':
@@ -764,8 +773,8 @@ static const struct pmu_sys_events pmu_sys_event_tables[] = {
continue
_args.output_file.write(f"""\t{{
\t\t.metric_table = {{
-\t\t\t.entries = {tblname},
-\t\t\t.length = ARRAY_SIZE({tblname})
+\t\t\t.pmus = {tblname},
+\t\t\t.num_pmus = ARRAY_SIZE({tblname})
\t\t}},
\t\t.name = \"{tblname}\",
\t}},
@@ -967,68 +976,99 @@ int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
return 0;
}
-const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
+static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
{
- const struct pmu_events_table *table = NULL;
- char *cpuid = perf_pmu__getcpuid(pmu);
+ static struct {
+ const struct pmu_events_map *map;
+ struct perf_pmu *pmu;
+ } last_result;
+ static struct {
+ const struct pmu_events_map *map;
+ char *cpuid;
+ } last_map_search;
+ static bool has_last_result, has_last_map_search;
+ const struct pmu_events_map *map = NULL;
+ char *cpuid = NULL;
size_t i;
- /* on some platforms which uses cpus map, cpuid can be NULL for
+ if (has_last_result && last_result.pmu == pmu)
+ return last_result.map;
+
+ cpuid = perf_pmu__getcpuid(pmu);
+
+ /*
+ * On some platforms which uses cpus map, cpuid can be NULL for
* PMUs other than CORE PMUs.
*/
if (!cpuid)
- return NULL;
+ goto out_update_last_result;
+
+ if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
+ map = last_map_search.map;
+ free(cpuid);
+ } else {
+ i = 0;
+ for (;;) {
+ map = &pmu_events_map[i++];
+
+ if (!map->arch) {
+ map = NULL;
+ break;
+ }
+
+ if (!strcmp_cpuid_str(map->cpuid, cpuid))
+ break;
+ }
+ free(last_map_search.cpuid);
+ last_map_search.cpuid = cpuid;
+ last_map_search.map = map;
+ has_last_map_search = true;
+ }
+out_update_last_result:
+ last_result.pmu = pmu;
+ last_result.map = map;
+ has_last_result = true;
+ return map;
+}
- i = 0;
- for (;;) {
- const struct pmu_events_map *map = &pmu_events_map[i++];
- if (!map->arch)
- break;
+const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
+{
+ const struct pmu_events_map *map = map_for_pmu(pmu);
- if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
- table = &map->event_table;
- break;
- }
- }
- free(cpuid);
- if (!pmu || !table)
- return table;
+ if (!map)
+ return NULL;
- for (i = 0; i < table->num_pmus; i++) {
- const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ if (!pmu)
+ return &map->event_table;
+
+ for (size_t i = 0; i < map->event_table.num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
if (pmu__name_match(pmu, pmu_name))
- return table;
+ return &map->event_table;
}
return NULL;
}
const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
{
- const struct pmu_metrics_table *table = NULL;
- char *cpuid = perf_pmu__getcpuid(pmu);
- int i;
+ const struct pmu_events_map *map = map_for_pmu(pmu);
- /* on some platforms which uses cpus map, cpuid can be NULL for
- * PMUs other than CORE PMUs.
- */
- if (!cpuid)
+ if (!map)
return NULL;
- i = 0;
- for (;;) {
- const struct pmu_events_map *map = &pmu_events_map[i++];
- if (!map->arch)
- break;
+ if (!pmu)
+ return &map->metric_table;
- if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
- table = &map->metric_table;
- break;
- }
+ for (size_t i = 0; i < map->metric_table.num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+
+ if (pmu__name_match(pmu, pmu_name))
+ return &map->metric_table;
}
- free(cpuid);
- return table;
+ return NULL;
}
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py
index 3e673f25d5fd..92acd89ed97a 100644
--- a/tools/perf/pmu-events/metric.py
+++ b/tools/perf/pmu-events/metric.py
@@ -558,8 +558,7 @@ def ParsePerfJson(orig: str) -> Expression:
# Convert accidentally converted scientific notation constants back
py = re.sub(r'([0-9]+)Event\(r"(e[0-9]+)"\)', r'\1\2', py)
# Convert all the known keywords back from events to just the keyword
- keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event', 'strcmp_cpuid_str',
- 'cpuid_not_more_than']
+ keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event', 'strcmp_cpuid_str']
for kw in keywords:
py = re.sub(rf'Event\(r"{kw}"\)', kw, py)
try:
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 63d5e6d5f165..2b45ffa462a6 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -66,6 +66,7 @@ perf-y += dlfilter-test.o
perf-y += sigtrap.o
perf-y += event_groups.o
perf-y += symbols.o
+perf-y += util.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy
index 2f3e3eb728eb..a1e1d6a263bf 100644
--- a/tools/perf/tests/attr/system-wide-dummy
+++ b/tools/perf/tests/attr/system-wide-dummy
@@ -9,8 +9,10 @@ flags=8
type=1
size=136
config=9
-sample_period=4000
-sample_type=455
+sample_period=1
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER
+sample_type=65671
read_format=4|20
# Event will be enabled right away.
disabled=0
@@ -18,12 +20,12 @@ inherit=1
pinned=0
exclusive=0
exclude_user=0
-exclude_kernel=0
-exclude_hv=0
+exclude_kernel=1
+exclude_hv=1
exclude_idle=0
mmap=1
comm=1
-freq=1
+freq=0
inherit_stat=0
enable_on_exec=0
task=1
@@ -32,7 +34,7 @@ precise_ip=0
mmap_data=0
sample_id_all=1
exclude_host=0
-exclude_guest=0
+exclude_guest=1
exclude_callchain_kernel=0
exclude_callchain_user=0
mmap2=1
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index 317730b906dd..198e8429a1bf 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -10,9 +10,9 @@ cpu=0
enable_on_exec=0
# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
-# PERF_SAMPLE_ID | PERF_SAMPLE_PERIOD
+# PERF_SAMPLE_PERIOD | PERF_SAMPLE_IDENTIFIER
# + PERF_SAMPLE_CPU added by -C 0
-sample_type=455
+sample_type=65927
# Dummy event handles mmaps, comm and task.
mmap=0
diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0
new file mode 100644
index 000000000000..576ec48b3aaf
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-dummy-C0
@@ -0,0 +1,55 @@
+[config]
+command = record
+args = --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event]
+fd=1
+group_fd=-1
+cpu=0
+pid=-1
+flags=8
+type=1
+size=136
+config=9
+sample_period=4000
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_PERIOD
+# + PERF_SAMPLE_CPU added by -C 0
+sample_type=391
+read_format=4|20
+disabled=0
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0
+exclude_hv=0
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=0
+task=1
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=1
+exclude_host=0
+exclude_guest=1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+mmap2=1
+comm_exec=1
+context_switch=0
+write_backward=0
+namespaces=0
+use_clockid=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 0ad18cf6dd22..cb6f1dd00dc4 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -123,6 +123,7 @@ static struct test_suite *generic_tests[] = {
&suite__sigtrap,
&suite__event_groups,
&suite__symbols,
+ &suite__util,
NULL,
};
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index ed3815163d1b..3af81012014e 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -269,6 +269,16 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
if (addr + len > map__end(al.map))
len = map__end(al.map) - addr;
+ /*
+ * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
+ * modules to manage long jumps. Check if the ip offset falls in stubs
+ * sections for kernel modules. And skip module address after text end
+ */
+ if (dso->is_kmod && al.addr > dso->text_end) {
+ pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
+ goto out;
+ }
+
/* Read the object code using perf */
ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
al.addr, buf1, len);
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 81229fa4f1e9..e3aa9d4fcf3a 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -9,6 +9,7 @@
#include <math.h>
#include <stdlib.h>
#include <string.h>
+#include <string2.h>
#include <linux/zalloc.h>
static int test_ids_union(void)
@@ -74,10 +75,13 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
int ret;
struct expr_parse_ctx *ctx;
bool is_intel = false;
- char buf[128];
+ char strcmp_cpuid_buf[256];
+ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
+ char *cpuid = perf_pmu__getcpuid(pmu);
+ char *escaped_cpuid1, *escaped_cpuid2;
- if (!get_cpuid(buf, sizeof(buf)))
- is_intel = strstr(buf, "Intel") != NULL;
+ TEST_ASSERT_VAL("get_cpuid", cpuid);
+ is_intel = strstr(cpuid, "Intel") != NULL;
TEST_ASSERT_EQUAL("ids_union", test_ids_union(), 0);
@@ -257,13 +261,32 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", &val_ptr));
+
+ /* Test no cpuid match */
+ ret = test(ctx, "strcmp_cpuid_str(0x0)", 0);
+
+ /*
+ * Test cpuid match with current cpuid. Special chars have to be
+ * escaped.
+ */
+ escaped_cpuid1 = strreplace_chars('-', cpuid, "\\-");
+ free(cpuid);
+ escaped_cpuid2 = strreplace_chars(',', escaped_cpuid1, "\\,");
+ free(escaped_cpuid1);
+ escaped_cpuid1 = strreplace_chars('=', escaped_cpuid2, "\\=");
+ free(escaped_cpuid2);
+ scnprintf(strcmp_cpuid_buf, sizeof(strcmp_cpuid_buf),
+ "strcmp_cpuid_str(%s)", escaped_cpuid1);
+ free(escaped_cpuid1);
+ ret |= test(ctx, strcmp_cpuid_buf, 1);
+
/* has_event returns 1 when an event exists. */
expr__add_id_val(ctx, strdup("cycles"), 2);
- ret = test(ctx, "has_event(cycles)", 1);
+ ret |= test(ctx, "has_event(cycles)", 1);
expr__ctx_free(ctx);
- return 0;
+ return ret;
}
DEFINE_SUITE("Simple expression parser", expr);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 2d19657ab5e0..5b6f1e883466 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -148,8 +148,8 @@ static int find_sample(struct sample *samples, size_t nr_samples,
struct thread *t, struct map *m, struct symbol *s)
{
while (nr_samples--) {
- if (RC_CHK_ACCESS(samples->thread) == RC_CHK_ACCESS(t) &&
- RC_CHK_ACCESS(samples->map) == RC_CHK_ACCESS(m) &&
+ if (RC_CHK_EQUAL(samples->thread, t) &&
+ RC_CHK_EQUAL(samples->map, m) &&
samples->sym == s)
return 1;
samples++;
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index ea4c341f5af1..d9945ed25bc5 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -70,8 +70,8 @@ make_python_perf_so := $(python_perf_so)
make_debug := DEBUG=1
make_nondistro := BUILD_NONDISTRO=1
make_extra_tests := EXTRA_TESTS=1
-make_bpf_skel := BUILD_BPF_SKEL=1
-make_gen_vmlinux_h := BUILD_BPF_SKEL=1 GEN_VMLINUX_H=1
+make_no_bpf_skel := BUILD_BPF_SKEL=0
+make_gen_vmlinux_h := GEN_VMLINUX_H=1
make_no_libperl := NO_LIBPERL=1
make_no_libpython := NO_LIBPYTHON=1
make_no_scripts := NO_LIBPYTHON=1 NO_LIBPERL=1
@@ -138,7 +138,8 @@ endif
run += make_python_perf_so
run += make_debug
run += make_nondistro
-run += make_build_bpf_skel
+run += make_extra_tests
+run += make_no_bpf_skel
run += make_gen_vmlinux_h
run += make_no_libperl
run += make_no_libpython
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index d47f1f871164..f78be21a5999 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -771,12 +771,12 @@ static int test__checkevent_pmu_events_mix(struct evlist *evlist)
return TEST_OK;
}
-static int test__checkterms_simple(struct list_head *terms)
+static int test__checkterms_simple(struct parse_events_terms *terms)
{
struct parse_events_term *term;
/* config=10 */
- term = list_entry(terms->next, struct parse_events_term, list);
+ term = list_entry(terms->terms.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
TEST_ASSERT_VAL("wrong type val",
@@ -2363,7 +2363,7 @@ static const struct evlist_test test__events_pmu[] = {
struct terms_test {
const char *str;
- int (*check)(struct list_head *terms);
+ int (*check)(struct parse_events_terms *terms);
};
static const struct terms_test test__terms[] = {
@@ -2467,11 +2467,11 @@ static int test__events2(struct test_suite *test __maybe_unused, int subtest __m
static int test_term(const struct terms_test *t)
{
- struct list_head terms;
+ struct parse_events_terms terms;
int ret;
- INIT_LIST_HEAD(&terms);
+ parse_events_terms__init(&terms);
ret = parse_events_terms(&terms, t->str, /*input=*/ NULL);
if (ret) {
pr_debug("failed to parse terms '%s', err %d\n",
@@ -2480,7 +2480,7 @@ static int test_term(const struct terms_test *t)
}
ret = t->check(&terms);
- parse_events_terms__purge(&terms);
+ parse_events_terms__exit(&terms);
return ret;
}
@@ -2514,9 +2514,14 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
struct stat st;
char path[PATH_MAX];
+ char pmu_event[PATH_MAX];
+ char *buf = NULL;
+ FILE *file;
struct dirent *ent;
+ size_t len = 0;
DIR *dir;
int err;
+ int n;
snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/",
sysfs__mountpoint(), pmu->name);
@@ -2538,11 +2543,45 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
struct evlist_test e = { .name = NULL, };
char name[2 * NAME_MAX + 1 + 12 + 3];
int test_ret;
+ bool is_event_parameterized = 0;
/* Names containing . are special and cannot be used directly */
if (strchr(ent->d_name, '.'))
continue;
+ /* exclude parametrized ones (name contains '?') */
+ n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name);
+ if (n >= PATH_MAX) {
+ pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX);
+ continue;
+ }
+
+ file = fopen(pmu_event, "r");
+ if (!file) {
+ pr_debug("can't open pmu event file for '%s'\n", ent->d_name);
+ ret = combine_test_results(ret, TEST_FAIL);
+ continue;
+ }
+
+ if (getline(&buf, &len, file) < 0) {
+ pr_debug(" pmu event: %s is a null event\n", ent->d_name);
+ ret = combine_test_results(ret, TEST_FAIL);
+ fclose(file);
+ continue;
+ }
+
+ if (strchr(buf, '?'))
+ is_event_parameterized = 1;
+
+ free(buf);
+ buf = NULL;
+ fclose(file);
+
+ if (is_event_parameterized == 1) {
+ pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event);
+ continue;
+ }
+
snprintf(name, sizeof(name), "%s/event=%s/u", pmu->name, ent->d_name);
e.name = name;
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index f5321fbdee79..a56d32905743 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -245,7 +245,7 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
},
.alias_str = "event=0x2b",
.alias_long_desc = "ddr write-cycles event",
- .matching_pmu = "uncore_sys_ddr_pmu",
+ .matching_pmu = "uncore_sys_ddr_pmu0",
};
static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
@@ -259,12 +259,27 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
},
.alias_str = "config=0x2c",
.alias_long_desc = "ccn read-cycles event",
- .matching_pmu = "uncore_sys_ccn_pmu",
+ .matching_pmu = "uncore_sys_ccn_pmu4",
+};
+
+static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = {
+ .event = {
+ .name = "sys_cmn_pmu.hnf_cache_miss",
+ .event = "eventid=0x1,type=0x5",
+ .desc = "Counts total cache misses in first lookup result (high priority)",
+ .topic = "uncore",
+ .pmu = "uncore_sys_cmn_pmu",
+ .compat = "(434|436|43c|43a).*",
+ },
+ .alias_str = "eventid=0x1,type=0x5",
+ .alias_long_desc = "Counts total cache misses in first lookup result (high priority)",
+ .matching_pmu = "uncore_sys_cmn_pmu0",
};
static const struct perf_pmu_test_event *sys_events[] = {
&sys_ddr_pmu_write_cycles,
&sys_ccn_pmu_read_cycles,
+ &sys_cmn_pmu_hnf_cache_miss,
NULL
};
@@ -615,6 +630,12 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
.count = &matched_count,
};
+ if (strcmp(pmu_name, test_event.matching_pmu)) {
+ pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n",
+ pmu_name, test_event.matching_pmu, pmu_name);
+ return -1;
+ }
+
err = perf_pmu__find_event(pmu, event->name, &args,
test_core_pmu_event_aliases_cb);
if (err) {
@@ -701,6 +722,46 @@ static struct perf_pmu_test_pmu test_pmus[] = {
&sys_ccn_pmu_read_cycles,
},
},
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_cmn_pmu0",
+ .is_uncore = 1,
+ .id = (char *)"43401",
+ },
+ .aliases = {
+ &sys_cmn_pmu_hnf_cache_miss,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_cmn_pmu0",
+ .is_uncore = 1,
+ .id = (char *)"43602",
+ },
+ .aliases = {
+ &sys_cmn_pmu_hnf_cache_miss,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_cmn_pmu0",
+ .is_uncore = 1,
+ .id = (char *)"43c03",
+ },
+ .aliases = {
+ &sys_cmn_pmu_hnf_cache_miss,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_cmn_pmu0",
+ .is_uncore = 1,
+ .id = (char *)"43a01",
+ },
+ .aliases = {
+ &sys_cmn_pmu_hnf_cache_miss,
+ },
+ }
};
/* Test that aliases generated are as expected */
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index eb60e5f66859..8f18127d876a 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -128,30 +128,35 @@ static int test_format_dir_put(char *dir)
return system(buf);
}
-static struct list_head *test_terms_list(void)
+static void add_test_terms(struct parse_events_terms *terms)
{
- static LIST_HEAD(terms);
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(test_terms); i++)
- list_add_tail(&test_terms[i].list, &terms);
+ for (i = 0; i < ARRAY_SIZE(test_terms); i++) {
+ struct parse_events_term *clone;
- return &terms;
+ parse_events_term__clone(&clone, &test_terms[i]);
+ list_add_tail(&clone->list, &terms->terms);
+ }
}
static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char dir[PATH_MAX];
char *format;
- struct list_head *terms = test_terms_list();
+ struct parse_events_terms terms;
struct perf_event_attr attr;
struct perf_pmu *pmu;
int fd;
int ret;
+ parse_events_terms__init(&terms);
+ add_test_terms(&terms);
pmu = zalloc(sizeof(*pmu));
- if (!pmu)
+ if (!pmu) {
+ parse_events_terms__exit(&terms);
return -ENOMEM;
+ }
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
@@ -159,6 +164,7 @@ static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe
format = test_format_dir_get(dir, sizeof(dir));
if (!format) {
free(pmu);
+ parse_events_terms__exit(&terms);
return -EINVAL;
}
@@ -175,7 +181,7 @@ static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe
if (ret)
goto out;
- ret = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/false, /*err=*/NULL);
+ ret = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/false, /*err=*/NULL);
if (ret)
goto out;
@@ -191,6 +197,7 @@ static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe
out:
test_format_dir_put(format);
perf_pmu__delete(pmu);
+ parse_events_terms__exit(&terms);
return ret;
}
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
index 779bc8608e1e..2d65defb7e0f 100755
--- a/tools/perf/tests/shell/coresight/asm_pure_loop.sh
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -5,9 +5,13 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="asm_pure_loop"
+
+# shellcheck source=../lib/coresight.sh
. "$(dirname $0)"/../lib/coresight.sh
+
ARGS=""
DATV="out"
+# shellcheck disable=SC2153
DATA="$DATD/perf-$TEST-$DATV.data"
perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
index 08a44e52ce9b..ddcc9bb850f5 100755
--- a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -5,9 +5,13 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="memcpy_thread"
+
+# shellcheck source=../lib/coresight.sh
. "$(dirname $0)"/../lib/coresight.sh
+
ARGS="16 10 1"
DATV="16k_10"
+# shellcheck disable=SC2153
DATA="$DATD/perf-$TEST-$DATV.data"
perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
index c83a200dede4..2ce5e139b2fd 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -5,9 +5,13 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="thread_loop"
+
+# shellcheck source=../lib/coresight.sh
. "$(dirname $0)"/../lib/coresight.sh
+
ARGS="10 1"
DATV="check-tid-10th"
+# shellcheck disable=SC2153
DATA="$DATD/perf-$TEST-$DATV.data"
STDO="$DATD/perf-$TEST-$DATV.stdout"
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
index 6346fd5e87c8..3ad9498753d7 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -5,9 +5,13 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="thread_loop"
+
+# shellcheck source=../lib/coresight.sh
. "$(dirname $0)"/../lib/coresight.sh
+
ARGS="2 20"
DATV="check-tid-2th"
+# shellcheck disable=SC2153
DATA="$DATD/perf-$TEST-$DATV.data"
STDO="$DATD/perf-$TEST-$DATV.stdout"
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
index 7304e3d3a6ff..4fbb4a29aad3 100755
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -5,9 +5,13 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="unroll_loop_thread"
+
+# shellcheck source=../lib/coresight.sh
. "$(dirname $0)"/../lib/coresight.sh
+
ARGS="10"
DATV="10"
+# shellcheck disable=SC2153
DATA="$DATD/perf-$TEST-$DATV.data"
perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
diff --git a/tools/perf/tests/shell/lib/coresight.sh b/tools/perf/tests/shell/lib/coresight.sh
index 6c3d34ec64d8..11ed2c25ed91 100644
--- a/tools/perf/tests/shell/lib/coresight.sh
+++ b/tools/perf/tests/shell/lib/coresight.sh
@@ -17,6 +17,8 @@ DIR="$TOOLS/$TEST"
BIN="$DIR/$TEST"
# If the test tool/binary does not exist and is executable then skip the test
if ! test -x "$BIN"; then exit 2; fi
+# If CoreSight is not available, skip the test
+perf list cs_etm | grep -q cs_etm || exit 2
DATD="."
# If the data dir env is set then make the data dir use that instead of ./
if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index d120e83db7d9..c1ec5762215b 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -32,6 +32,13 @@ check() {
err=2
exit
fi
+
+ # shellcheck disable=SC2046
+ if [ `nproc` -lt 4 ]; then
+ echo "[Skip] Low number of CPUs (`nproc`), lock event cannot be triggered certainly"
+ err=2
+ exit
+ fi
}
test_record()
@@ -123,6 +130,24 @@ test_aggr_addr()
fi
}
+test_aggr_cgroup()
+{
+ echo "Testing perf lock contention --lock-cgroup"
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -g -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
test_type_filter()
{
echo "Testing perf lock contention --type-filter (w/ spinlock)"
@@ -232,6 +257,31 @@ test_aggr_task_stack_filter()
exit
fi
}
+test_cgroup_filter()
+{
+ echo "Testing perf lock contention --cgroup-filter"
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ return
+ fi
+
+ perf lock con -a -b -g -E 1 -F wait_total -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a cgroup result:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+
+ cgroup=$(cat "${result}" | awk '{ print $3 }')
+ perf lock con -a -b -g -E 1 -G "${cgroup}" -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a result with cgroup filter:" "$(cat "${cgroup}")"
+ err=1
+ exit
+ fi
+}
+
test_csv_output()
{
@@ -275,10 +325,12 @@ test_bpf
test_record_concurrent
test_aggr_task
test_aggr_addr
+test_aggr_cgroup
test_type_filter
test_lock_filter
test_stack_filter
test_aggr_task_stack_filter
+test_cgroup_filter
test_csv_output
exit ${err}
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 871243d6d03a..554e12e83c55 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -4,10 +4,12 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
+# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname $0)"/lib/probe_vfs_getname.sh
add_probe_vfs_getname || skip_if_no_debuginfo
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 89214a6d9951..eebeea6bdc76 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -10,7 +10,9 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+# shellcheck source=lib/probe.sh
. "$(dirname "$0")/lib/probe.sh"
+# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname "$0")/lib/probe_vfs_getname.sh"
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 7f664f1889d9..5eedbe29bba1 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -9,10 +9,12 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+# shellcheck source=lib/probe.sh
. "$(dirname "$0")/lib/probe.sh"
skip_if_no_perf_probe || exit 2
+# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname "$0")/lib/probe_vfs_getname.sh"
record_open_file() {
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 4fbc74805d52..29443b8e8876 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -5,6 +5,7 @@
set -e
shelldir=$(dirname "$0")
+# shellcheck source=lib/waiting.sh
. "${shelldir}"/lib/waiting.sh
err=0
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index a0d14cd0aa79..a1ef8f0d2b5c 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -28,7 +28,7 @@ test_offcpu_priv() {
err=2
return
fi
- if perf record --off-cpu -o /dev/null --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ if perf version --build-options 2>&1 | grep HAVE_BPF_SKEL | grep -q OFF
then
echo "off-cpu test [Skipped missing BPF support]"
err=2
diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh
new file mode 100755
index 000000000000..ac70ac27d590
--- /dev/null
+++ b/tools/perf/tests/shell/record_sideband.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+# perf record sideband tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup()
+{
+ rm -rf ${perfdata}
+ trap - EXIT TERM INT
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+can_cpu_wide()
+{
+ if ! perf record -o ${perfdata} -BN --no-bpf-event -C $1 true > /dev/null 2>&1
+ then
+ echo "record sideband test [Skipped cannot record cpu$1]"
+ err=2
+ fi
+
+ rm -f ${perfdata}
+ return $err
+}
+
+test_system_wide_tracking()
+{
+ # Need CPU 0 and CPU 1
+ can_cpu_wide 0 || return 0
+ can_cpu_wide 1 || return 0
+
+ # Record on CPU 0 a task running on CPU 1
+ perf record -BN --no-bpf-event -o ${perfdata} -C 0 -- taskset --cpu-list 1 true
+
+ # Should get MMAP events from CPU 1
+ mmap_cnt=`perf script -i ${perfdata} --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l`
+
+ if [ ${mmap_cnt} -gt 0 ] ; then
+ return 0
+ fi
+
+ echo "Failed to record MMAP events on CPU 1 when tracing CPU 0"
+ return 1
+}
+
+test_system_wide_tracking
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index d890eb26e914..f1818fa6d9ce 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -6,6 +6,7 @@
set -e
+# shellcheck source=lib/stat_output.sh
. "$(dirname $0)"/lib/stat_output.sh
csv_sep=@
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
index 8bae9c8a835e..323123ff4d19 100755
--- a/tools/perf/tests/shell/stat+csv_summary.sh
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -10,7 +10,7 @@ set -e
#
perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary 2>&1 | \
grep -e summary | \
-while read summary _num _event _run _pct
+while read summary _ _ _ _
do
if [ $summary != "summary" ]; then
exit 1
@@ -23,7 +23,7 @@ done
#
perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary --no-csv-summary 2>&1 | \
grep -e summary | \
-while read _num _event _run _pct
+while read _ _ _ _
do
exit 1
done
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index a1918a15e36a..0c7d79a230ea 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -4,6 +4,8 @@
set -e
+THRESHOLD=0.015
+
# skip if system-wide mode is forbidden
perf stat -a true > /dev/null 2>&1 || exit 2
@@ -14,7 +16,7 @@ test_global_aggr()
{
perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep -e cycles -e instructions | \
- while read num evt _hash ipc rest
+ while read num evt _ ipc rest
do
# skip not counted events
if [ "$num" = "<not" ]; then
@@ -33,10 +35,18 @@ test_global_aggr()
fi
# use printf for rounding and a leading zero
- res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
+ res=`echo $num $cyc | awk '{printf "%.2f", $1 / $2}'`
if [ "$ipc" != "$res" ]; then
- echo "IPC is different: $res != $ipc ($num / $cyc)"
- exit 1
+ # check the difference from the real result for FP imperfections
+ diff=`echo $ipc $res $THRESHOLD | \
+ awk '{x = ($1 - $2) < 0 ? ($2 - $1) : ($1 - $2); print (x > $3)}'`
+
+ if [ $diff -eq 1 ]; then
+ echo "IPC is different: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+
+ echo "Warning: Difference of IPC is under the threshold"
fi
done
}
@@ -45,7 +55,7 @@ test_no_aggr()
{
perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep ^CPU | \
- while read cpu num evt _hash ipc rest
+ while read cpu num evt _ ipc rest
do
# skip not counted events
if [ "$num" = "<not" ]; then
@@ -67,10 +77,18 @@ test_no_aggr()
fi
# use printf for rounding and a leading zero
- res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
+ res=`echo $num $cyc | awk '{printf "%.2f", $1 / $2}'`
if [ "$ipc" != "$res" ]; then
- echo "IPC is different for $cpu: $res != $ipc ($num / $cyc)"
- exit 1
+ # check difference from the real result for FP imperfections
+ diff=`echo $ipc $res $THRESHOLD | \
+ awk '{x = ($1 - $2) < 0 ? ($2 - $1) : ($1 - $2); print (x > $3)}'`
+
+ if [ $diff -eq 1 ]; then
+ echo "IPC is different: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+
+ echo "Warning: Difference of IPC is under the threshold"
fi
done
}
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index fb2b10547a11..4fcdd1a9142c 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -6,6 +6,7 @@
set -e
+# shellcheck source=lib/stat_output.sh
. "$(dirname $0)"/lib/stat_output.sh
stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
index cb35e488809a..55ef9c9ded2d 100755
--- a/tools/perf/tests/shell/stat_all_metricgroups.sh
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -4,9 +4,21 @@
set -e
-for m in $(perf list --raw-dump metricgroups); do
+ParanoidAndNotRoot()
+{
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+system_wide_flag="-a"
+if ParanoidAndNotRoot 0
+then
+ system_wide_flag=""
+fi
+
+for m in $(perf list --raw-dump metricgroups)
+do
echo "Testing $m"
- perf stat -M "$m" -a true
+ perf stat -M "$m" $system_wide_flag sleep 0.01
done
exit 0
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index f1bf5621160f..65dd85207125 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -136,7 +136,9 @@ arm_cs_iterate_devices() {
arm_cs_etm_traverse_path_test() {
# Iterate for every ETM device
- for dev in /sys/bus/coresight/devices/etm*; do
+ for dev in /sys/bus/event_source/devices/cs_etm/cpu*; do
+ # Canonicalize the path
+ dev=`readlink -f $dev`
# Find the ETM device belonging to which CPU
cpu=`cat $dev/cpu`
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index 3a8b9bffa022..723ec501f99a 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -8,6 +8,7 @@ set -e
perf list | grep -q 'intel_pt//' || exit 2
shelldir=$(dirname "$0")
+# shellcheck source=lib/waiting.sh
. "${shelldir}"/lib/waiting.sh
skip_cnt=0
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 4014487cf4d9..3146a1eece07 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -10,6 +10,7 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index f33cfc3c19a4..b394f3ac2d66 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -145,6 +145,7 @@ DECLARE_SUITE(dlfilter);
DECLARE_SUITE(sigtrap);
DECLARE_SUITE(event_groups);
DECLARE_SUITE(symbols);
+DECLARE_SUITE(util);
/*
* PowerPC and S390 do not support creation of instruction breakpoints using the
diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c
index faf980b26252..7fa6f7c568e2 100644
--- a/tools/perf/tests/thread-maps-share.c
+++ b/tools/perf/tests/thread-maps-share.c
@@ -46,9 +46,9 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);
/* test the maps pointer is shared */
- TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t1)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t2)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(maps) == RC_CHK_ACCESS(thread__maps(t3)));
+ TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1)));
+ TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2)));
+ TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3)));
/*
* Verify the other leader was created by previous call.
@@ -73,8 +73,7 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
other_maps = thread__maps(other);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2);
- TEST_ASSERT_VAL("maps don't match", RC_CHK_ACCESS(other_maps) ==
- RC_CHK_ACCESS(thread__maps(other_leader)));
+ TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps(other_leader)));
/* release thread group */
thread__put(t3);
diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c
new file mode 100644
index 000000000000..6366db5cbf8c
--- /dev/null
+++ b/tools/perf/tests/util.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include "util/debug.h"
+
+#include <linux/compiler.h>
+#include <stdlib.h>
+#include <string2.h>
+
+static int test_strreplace(char needle, const char *haystack,
+ const char *replace, const char *expected)
+{
+ char *new = strreplace_chars(needle, haystack, replace);
+ int ret = strcmp(new, expected);
+
+ free(new);
+ return ret == 0;
+}
+
+static int test__util(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("empty string", test_strreplace(' ', "", "123", ""));
+ TEST_ASSERT_VAL("no match", test_strreplace('5', "123", "4", "123"));
+ TEST_ASSERT_VAL("replace 1", test_strreplace('3', "123", "4", "124"));
+ TEST_ASSERT_VAL("replace 2", test_strreplace('a', "abcabc", "ef", "efbcefbc"));
+ TEST_ASSERT_VAL("replace long", test_strreplace('a', "abcabc", "longlong",
+ "longlongbclonglongbc"));
+
+ return 0;
+}
+
+DEFINE_SUITE("util", util);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 70db5a717905..f4812b226818 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2416,12 +2416,12 @@ close_file_and_continue:
struct popup_action {
unsigned long time;
struct thread *thread;
+ struct evsel *evsel;
+ int (*fn)(struct hist_browser *browser, struct popup_action *act);
struct map_symbol ms;
int socket;
- struct evsel *evsel;
enum rstype rstype;
- int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
static int
@@ -3302,7 +3302,7 @@ do_hotkey: // key came straight from options ui__popup_menu()
&options[nr_options],
&bi->to.ms,
bi->to.al_addr);
- } else {
+ } else if (browser->he_selection) {
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 2f76230958ad..fb661c48992f 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -49,6 +49,7 @@ perf-y += dso.o
perf-y += dsos.o
perf-y += symbol.o
perf-y += symbol_fprintf.o
+perf-y += map_symbol.o
perf-y += color.o
perf-y += color_config.o
perf-y += metricgroup.o
@@ -165,6 +166,7 @@ endif
ifeq ($(CONFIG_LIBTRACEEVENT),y)
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
+ perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork_top.o
endif
perf-$(CONFIG_LIBELF) += symbol-elf.o
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
index 98d3235781c3..4f4900c18f3e 100644
--- a/tools/perf/util/arm-spe.h
+++ b/tools/perf/util/arm-spe.h
@@ -27,5 +27,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu);
+void arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu,
+ struct perf_event_attr *attr);
+
#endif
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 5dfa948fc986..0e4d6de3c2ad 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -3,6 +3,10 @@
%{
+#ifndef NDEBUG
+#define YYDEBUG 1
+#endif
+
#include <stdio.h>
#include <string.h>
#include <linux/compiler.h>
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 6732cbbcf9b3..7f9b0e46e008 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -104,7 +104,7 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
struct bpf_prog_profiler_bpf *skel;
struct bpf_counter *counter;
struct bpf_program *prog;
- char *prog_name;
+ char *prog_name = NULL;
int prog_fd;
int err;
@@ -155,10 +155,12 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
assert(skel != NULL);
counter->skel = skel;
list_add(&counter->list, &evsel->bpf_counter_list);
+ free(prog_name);
close(prog_fd);
return 0;
err_out:
bpf_prog_profiler_bpf__destroy(skel);
+ free(prog_name);
free(counter);
close(prog_fd);
return -1;
@@ -180,6 +182,7 @@ static int bpf_program_profiler__load(struct evsel *evsel, struct target *target
(*p != '\0' && *p != ',')) {
pr_err("Failed to parse bpf prog ids %s\n",
target->bpf_str);
+ free(bpf_str_);
return -1;
}
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
new file mode 100644
index 000000000000..035e02272790
--- /dev/null
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf_kwork_top.c
+ *
+ * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
+ */
+
+#include <time.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/time64.h>
+
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/kwork.h"
+
+#include <bpf/bpf.h>
+#include <perf/cpumap.h>
+
+#include "util/bpf_skel/kwork_top.skel.h"
+
+/*
+ * This should be in sync with "util/kwork_top.bpf.c"
+ */
+#define MAX_COMMAND_LEN 16
+
+struct time_data {
+ __u64 timestamp;
+};
+
+struct work_data {
+ __u64 runtime;
+};
+
+struct task_data {
+ __u32 tgid;
+ __u32 is_kthread;
+ char comm[MAX_COMMAND_LEN];
+};
+
+struct work_key {
+ __u32 type;
+ __u32 pid;
+ __u64 task_p;
+};
+
+struct task_key {
+ __u32 pid;
+ __u32 cpu;
+};
+
+struct kwork_class_bpf {
+ struct kwork_class *class;
+ void (*load_prepare)(void);
+};
+
+static struct kwork_top_bpf *skel;
+
+void perf_kwork__top_start(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ skel->bss->from_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+ skel->bss->enabled = 1;
+ pr_debug("perf kwork top start at: %lld\n", skel->bss->from_timestamp);
+}
+
+void perf_kwork__top_finish(void)
+{
+ struct timespec ts;
+
+ skel->bss->enabled = 0;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ skel->bss->to_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+ pr_debug("perf kwork top finish at: %lld\n", skel->bss->to_timestamp);
+}
+
+static void irq_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_irq_handler_entry, true);
+ bpf_program__set_autoload(skel->progs.on_irq_handler_exit, true);
+}
+
+static struct kwork_class_bpf kwork_irq_bpf = {
+ .load_prepare = irq_load_prepare,
+};
+
+static void softirq_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_softirq_entry, true);
+ bpf_program__set_autoload(skel->progs.on_softirq_exit, true);
+}
+
+static struct kwork_class_bpf kwork_softirq_bpf = {
+ .load_prepare = softirq_load_prepare,
+};
+
+static void sched_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_switch, true);
+}
+
+static struct kwork_class_bpf kwork_sched_bpf = {
+ .load_prepare = sched_load_prepare,
+};
+
+static struct kwork_class_bpf *
+kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
+ [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
+ [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
+ [KWORK_CLASS_SCHED] = &kwork_sched_bpf,
+};
+
+static bool valid_kwork_class_type(enum kwork_class_type type)
+{
+ return type >= 0 && type < KWORK_CLASS_MAX;
+}
+
+static int setup_filters(struct perf_kwork *kwork)
+{
+ u8 val = 1;
+ int i, nr_cpus, fd;
+ struct perf_cpu_map *map;
+
+ if (kwork->cpu_list) {
+ fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter);
+ if (fd < 0) {
+ pr_debug("Invalid cpu filter fd\n");
+ return -1;
+ }
+
+ map = perf_cpu_map__new(kwork->cpu_list);
+ if (!map) {
+ pr_debug("Invalid cpu_list\n");
+ return -1;
+ }
+
+ nr_cpus = libbpf_num_possible_cpus();
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
+
+ if (cpu.cpu >= nr_cpus) {
+ perf_cpu_map__put(map);
+ pr_err("Requested cpu %d too large\n", cpu.cpu);
+ return -1;
+ }
+ bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
+ }
+ perf_cpu_map__put(map);
+
+ skel->bss->has_cpu_filter = 1;
+ }
+
+ return 0;
+}
+
+int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ struct bpf_program *prog;
+ struct kwork_class *class;
+ struct kwork_class_bpf *class_bpf;
+ enum kwork_class_type type;
+
+ skel = kwork_top_bpf__open();
+ if (!skel) {
+ pr_debug("Failed to open kwork top skeleton\n");
+ return -1;
+ }
+
+ /*
+ * set all progs to non-autoload,
+ * then set corresponding progs according to config
+ */
+ bpf_object__for_each_program(prog, skel->obj)
+ bpf_program__set_autoload(prog, false);
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ type = class->type;
+ if (!valid_kwork_class_type(type) ||
+ !kwork_class_bpf_supported_list[type]) {
+ pr_err("Unsupported bpf trace class %s\n", class->name);
+ goto out;
+ }
+
+ class_bpf = kwork_class_bpf_supported_list[type];
+ class_bpf->class = class;
+
+ if (class_bpf->load_prepare)
+ class_bpf->load_prepare();
+ }
+
+ if (kwork_top_bpf__load(skel)) {
+ pr_debug("Failed to load kwork top skeleton\n");
+ goto out;
+ }
+
+ if (setup_filters(kwork))
+ goto out;
+
+ if (kwork_top_bpf__attach(skel)) {
+ pr_debug("Failed to attach kwork top skeleton\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kwork_top_bpf__destroy(skel);
+ return -1;
+}
+
+static void read_task_info(struct kwork_work *work)
+{
+ int fd;
+ struct task_data data;
+ struct task_key key = {
+ .pid = work->id,
+ .cpu = work->cpu,
+ };
+
+ fd = bpf_map__fd(skel->maps.kwork_top_tasks);
+ if (fd < 0) {
+ pr_debug("Invalid top tasks map fd\n");
+ return;
+ }
+
+ if (!bpf_map_lookup_elem(fd, &key, &data)) {
+ work->tgid = data.tgid;
+ work->is_kthread = data.is_kthread;
+ work->name = strdup(data.comm);
+ }
+}
+static int add_work(struct perf_kwork *kwork, struct work_key *key,
+ struct work_data *data, int cpu)
+{
+ struct kwork_class_bpf *bpf_trace;
+ struct kwork_work *work;
+ struct kwork_work tmp = {
+ .id = key->pid,
+ .cpu = cpu,
+ .name = NULL,
+ };
+ enum kwork_class_type type = key->type;
+
+ if (!valid_kwork_class_type(type)) {
+ pr_debug("Invalid class type %d to add work\n", type);
+ return -1;
+ }
+
+ bpf_trace = kwork_class_bpf_supported_list[type];
+ tmp.class = bpf_trace->class;
+
+ work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+ if (!work)
+ return -1;
+
+ work->total_runtime = data->runtime;
+ read_task_info(work);
+
+ return 0;
+}
+
+int perf_kwork__top_read_bpf(struct perf_kwork *kwork)
+{
+ int i, fd, nr_cpus;
+ struct work_data *data;
+ struct work_key key, prev;
+
+ fd = bpf_map__fd(skel->maps.kwork_top_works);
+ if (fd < 0) {
+ pr_debug("Invalid top runtime fd\n");
+ return -1;
+ }
+
+ nr_cpus = libbpf_num_possible_cpus();
+ data = calloc(nr_cpus, sizeof(struct work_data));
+ if (!data)
+ return -1;
+
+ memset(&prev, 0, sizeof(prev));
+ while (!bpf_map_get_next_key(fd, &prev, &key)) {
+ if ((bpf_map_lookup_elem(fd, &key, data)) != 0) {
+ pr_debug("Failed to lookup top elem\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ if (data[i].runtime == 0)
+ continue;
+
+ if (add_work(kwork, &key, &data[i], i))
+ return -1;
+ }
+ prev = key;
+ }
+ free(data);
+
+ return 0;
+}
+
+void perf_kwork__top_cleanup_bpf(void)
+{
+ kwork_top_bpf__destroy(skel);
+}
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index e7dddf0127bc..e105245eb905 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
@@ -20,7 +21,7 @@ static struct lock_contention_bpf *skel;
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
- int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
+ int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
struct evlist *evlist = con->evlist;
struct target *target = con->target;
@@ -50,6 +51,8 @@ int lock_contention_prepare(struct lock_contention *con)
ntasks = perf_thread_map__nr(evlist->core.threads);
if (con->filters->nr_types)
ntypes = con->filters->nr_types;
+ if (con->filters->nr_cgrps)
+ ncgrps = con->filters->nr_cgrps;
/* resolve lock name filters to addr */
if (con->filters->nr_syms) {
@@ -84,6 +87,7 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
+ bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
@@ -145,12 +149,29 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
}
+ if (con->filters->nr_cgrps) {
+ u8 val = 1;
+
+ skel->bss->has_cgroup = 1;
+ fd = bpf_map__fd(skel->maps.cgroup_filter);
+
+ for (i = 0; i < con->filters->nr_cgrps; i++)
+ bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
+ }
+
/* these don't work well if in the rodata section */
skel->bss->stack_skip = con->stack_skip;
skel->bss->aggr_mode = con->aggr_mode;
skel->bss->needs_callstack = con->save_callstack;
skel->bss->lock_owner = con->owner;
+ if (con->aggr_mode == LOCK_AGGR_CGROUP) {
+ if (cgroup_is_v2("perf_event"))
+ skel->bss->use_cgroup_v2 = 1;
+
+ read_all_cgroups(&con->cgroups);
+ }
+
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
lock_contention_bpf__attach(skel);
@@ -209,12 +230,12 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "siglock";
/* global locks with symbols */
- sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
+ sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
if (sym)
return sym->name;
/* try semi-global locks collected separately */
- if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr, &flags)) {
+ if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
if (flags == LOCK_CLASS_RQLOCK)
return "rq_lock";
}
@@ -222,6 +243,17 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "";
}
+ if (con->aggr_mode == LOCK_AGGR_CGROUP) {
+ u64 cgrp_id = key->lock_addr_or_cgroup;
+ struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
+
+ if (cgrp)
+ return cgrp->name;
+
+ snprintf(name_buf, sizeof(name_buf), "cgroup:%lu", cgrp_id);
+ return name_buf;
+ }
+
/* LOCK_AGGR_CALLER: skip lock internal functions */
while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
@@ -313,7 +345,8 @@ int lock_contention_read(struct lock_contention *con)
ls_key = key.pid;
break;
case LOCK_AGGR_ADDR:
- ls_key = key.lock_addr;
+ case LOCK_AGGR_CGROUP:
+ ls_key = key.lock_addr_or_cgroup;
break;
default:
goto next;
@@ -364,12 +397,20 @@ next:
return err;
}
-int lock_contention_finish(void)
+int lock_contention_finish(struct lock_contention *con)
{
if (skel) {
skel->bss->enabled = 0;
lock_contention_bpf__destroy(skel);
}
+ while (!RB_EMPTY_ROOT(&con->cgroups)) {
+ struct rb_node *node = rb_first(&con->cgroups);
+ struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
+
+ rb_erase(node, &con->cgroups);
+ cgroup__put(cgrp);
+ }
+
return 0;
}
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 01f70b8e705a..6af36142dc5a 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -98,28 +98,31 @@ static void off_cpu_finish(void *arg __maybe_unused)
/* v5.18 kernel added prev_state arg, so it needs to check the signature */
static void check_sched_switch_args(void)
{
- const struct btf *btf = bpf_object__btf(skel->obj);
+ struct btf *btf = btf__load_vmlinux_btf();
const struct btf_type *t1, *t2, *t3;
u32 type_id;
type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
BTF_KIND_TYPEDEF);
if ((s32)type_id < 0)
- return;
+ goto cleanup;
t1 = btf__type_by_id(btf, type_id);
if (t1 == NULL)
- return;
+ goto cleanup;
t2 = btf__type_by_id(btf, t1->type);
if (t2 == NULL || !btf_is_ptr(t2))
- return;
+ goto cleanup;
t3 = btf__type_by_id(btf, t2->type);
- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
+ /* btf_trace func proto has one more argument for the context */
+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
/* new format: pass prev_state as 4th arg */
skel->rodata->has_prev_state = true;
}
+cleanup:
+ btf__free(btf);
}
int off_cpu_prepare(struct evlist *evlist, struct target *target,
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 939ec769bf4a..52c270330ae0 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -153,7 +153,7 @@ static inline
unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
{
unsigned int augmented_len = sizeof(*augmented_arg);
- int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
+ int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
augmented_arg->size = augmented_arg->err = 0;
/*
@@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
_Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
socklen &= sizeof(augmented_args->saddr) - 1;
- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
socklen &= sizeof(augmented_args->saddr) - 1;
- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
if (augmented_args == NULL)
goto failure;
- if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
goto failure;
attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
@@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
goto failure;
// Now that we read attr->size and tested it against the size limits, read it completely
- if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
goto failure;
return augmented__output(args, augmented_args, len + size);
@@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
if (size > sizeof(augmented_args->__data))
goto failure;
- bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
+ bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
return augmented__output(args, augmented_args, len + size);
failure:
@@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args)
if (augmented_args == NULL)
return 1;
- bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
+ bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
/*
* Jump to syscall specific augmenter, even if the default one,
@@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args)
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
- bpf_probe_read(&exit_args, sizeof(exit_args), args);
+ bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
/*
* Jump to syscall specific return augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
new file mode 100644
index 000000000000..84c15ccbab44
--- /dev/null
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022, Huawei
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/*
+ * This should be in sync with "util/kwork.h"
+ */
+enum kwork_class_type {
+ KWORK_CLASS_IRQ,
+ KWORK_CLASS_SOFTIRQ,
+ KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_SCHED,
+ KWORK_CLASS_MAX,
+};
+
+#define MAX_ENTRIES 102400
+#define MAX_NR_CPUS 2048
+#define PF_KTHREAD 0x00200000
+#define MAX_COMMAND_LEN 16
+
+struct time_data {
+ __u64 timestamp;
+};
+
+struct work_data {
+ __u64 runtime;
+};
+
+struct task_data {
+ __u32 tgid;
+ __u32 is_kthread;
+ char comm[MAX_COMMAND_LEN];
+};
+
+struct work_key {
+ __u32 type;
+ __u32 pid;
+ __u64 task_p;
+};
+
+struct task_key {
+ __u32 pid;
+ __u32 cpu;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct time_data);
+} kwork_top_task_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct time_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_irq_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct task_key));
+ __uint(value_size, sizeof(struct task_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_tasks SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct work_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_works SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u8));
+ __uint(max_entries, MAX_NR_CPUS);
+} kwork_top_cpu_filter SEC(".maps");
+
+int enabled = 0;
+
+int has_cpu_filter = 0;
+
+__u64 from_timestamp = 0;
+__u64 to_timestamp = 0;
+
+static __always_inline int cpu_is_filtered(__u32 cpu)
+{
+ __u8 *cpu_val;
+
+ if (has_cpu_filter) {
+ cpu_val = bpf_map_lookup_elem(&kwork_top_cpu_filter, &cpu);
+ if (!cpu_val)
+ return 1;
+ }
+
+ return 0;
+}
+
+static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)
+{
+ struct task_key key = {
+ .pid = task->pid,
+ .cpu = cpu,
+ };
+
+ if (!bpf_map_lookup_elem(&kwork_top_tasks, &key)) {
+ struct task_data data = {
+ .tgid = task->tgid,
+ .is_kthread = task->flags & PF_KTHREAD ? 1 : 0,
+ };
+ BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
+
+ bpf_map_update_elem(&kwork_top_tasks, &key, &data, BPF_ANY);
+ }
+}
+
+static __always_inline void update_work(struct work_key *key, __u64 delta)
+{
+ struct work_data *data;
+
+ data = bpf_map_lookup_elem(&kwork_top_works, key);
+ if (data) {
+ data->runtime += delta;
+ } else {
+ struct work_data new_data = {
+ .runtime = delta,
+ };
+
+ bpf_map_update_elem(&kwork_top_works, key, &new_data, BPF_ANY);
+ }
+}
+
+static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu)
+{
+ __u64 delta;
+ struct time_data *pelem;
+
+ pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0);
+ if (pelem)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SCHED,
+ .pid = task->pid,
+ .task_p = (__u64)task,
+ };
+
+ update_work(&key, delta);
+ update_task_info(task, cpu);
+}
+
+static void on_sched_in(struct task_struct *task, __u64 ts)
+{
+ struct time_data *pelem;
+
+ pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (pelem)
+ pelem->timestamp = ts;
+}
+
+SEC("tp_btf/sched_switch")
+int on_switch(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+
+ prev = (struct task_struct *)ctx[1];
+ next = (struct task_struct *)ctx[2];
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ on_sched_out(prev, ts, cpu);
+ on_sched_in(next, ts);
+
+ return 0;
+}
+
+SEC("tp_btf/irq_handler_entry")
+int on_irq_handler_entry(u64 *cxt)
+{
+ struct task_struct *task;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ struct time_data data = {
+ .timestamp = ts,
+ };
+
+ bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp_btf/irq_handler_exit")
+int on_irq_handler_exit(u64 *cxt)
+{
+ __u64 delta;
+ struct task_struct *task;
+ struct time_data *pelem;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
+ if (pelem && pelem->timestamp != 0)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ update_work(&key, delta);
+
+ return 0;
+}
+
+SEC("tp_btf/softirq_entry")
+int on_softirq_entry(u64 *cxt)
+{
+ struct task_struct *task;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ struct time_data data = {
+ .timestamp = ts,
+ };
+
+ bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp_btf/softirq_exit")
+int on_softirq_exit(u64 *cxt)
+{
+ __u64 delta;
+ struct task_struct *task;
+ struct time_data *pelem;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
+ if (pelem)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ update_work(&key, delta);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 8d3cfbb3cc65..95cd8414f6ef 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -42,6 +42,14 @@ struct {
__uint(max_entries, MAX_ENTRIES);
} tstamp SEC(".maps");
+/* maintain per-CPU timestamp at the beginning of contention */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct tstamp_data));
+ __uint(max_entries, 1);
+} tstamp_cpu SEC(".maps");
+
/* actual lock contention statistics */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
@@ -92,6 +100,13 @@ struct {
__uint(max_entries, 1);
} addr_filter SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cgroup_filter SEC(".maps");
+
struct rw_semaphore___old {
struct task_struct *owner;
} __attribute__((preserve_access_index));
@@ -114,10 +129,14 @@ int has_cpu;
int has_task;
int has_type;
int has_addr;
+int has_cgroup;
int needs_callstack;
int stack_skip;
int lock_owner;
+int use_cgroup_v2;
+int perf_subsys_id = -1;
+
/* determine the key of lock stat */
int aggr_mode;
@@ -130,6 +149,29 @@ int data_fail;
int task_map_full;
int data_map_full;
+static inline __u64 get_current_cgroup_id(void)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (use_cgroup_v2)
+ return bpf_get_current_cgroup_id();
+
+ task = bpf_get_current_task_btf();
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+
+ cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
+ return BPF_CORE_READ(cgrp, kn, id);
+}
+
static inline int can_record(u64 *ctx)
{
if (has_cpu) {
@@ -168,6 +210,15 @@ static inline int can_record(u64 *ctx)
return 0;
}
+ if (has_cgroup) {
+ __u8 *ok;
+ __u64 cgrp = get_current_cgroup_id();
+
+ ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp);
+ if (!ok)
+ return 0;
+ }
+
return 1;
}
@@ -268,30 +319,57 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
return 0;
}
-SEC("tp_btf/contention_begin")
-int contention_begin(u64 *ctx)
+static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
{
__u32 pid;
struct tstamp_data *pelem;
- if (!enabled || !can_record(ctx))
- return 0;
+ /* Use per-cpu array map for spinlock and rwlock */
+ if (flags == (LCB_F_SPIN | LCB_F_READ) || flags == LCB_F_SPIN ||
+ flags == (LCB_F_SPIN | LCB_F_WRITE)) {
+ __u32 idx = 0;
+
+ pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
+ /* Do not update the element for nested locks */
+ if (pelem && pelem->lock)
+ pelem = NULL;
+ return pelem;
+ }
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
+ /* Do not update the element for nested locks */
if (pelem && pelem->lock)
- return 0;
+ return NULL;
if (pelem == NULL) {
struct tstamp_data zero = {};
- bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);
+ if (bpf_map_update_elem(&tstamp, &pid, &zero, BPF_NOEXIST) < 0) {
+ __sync_fetch_and_add(&task_fail, 1);
+ return NULL;
+ }
+
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem == NULL) {
__sync_fetch_and_add(&task_fail, 1);
- return 0;
+ return NULL;
}
}
+ return pelem;
+}
+
+SEC("tp_btf/contention_begin")
+int contention_begin(u64 *ctx)
+{
+ struct tstamp_data *pelem;
+
+ if (!enabled || !can_record(ctx))
+ return 0;
+
+ pelem = get_tstamp_elem(ctx[1]);
+ if (pelem == NULL)
+ return 0;
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
@@ -330,23 +408,42 @@ int contention_begin(u64 *ctx)
SEC("tp_btf/contention_end")
int contention_end(u64 *ctx)
{
- __u32 pid;
+ __u32 pid = 0, idx = 0;
struct tstamp_data *pelem;
struct contention_key key = {};
struct contention_data *data;
__u64 duration;
+ bool need_delete = false;
if (!enabled)
return 0;
- pid = bpf_get_current_pid_tgid();
- pelem = bpf_map_lookup_elem(&tstamp, &pid);
- if (!pelem || pelem->lock != ctx[0])
- return 0;
+ /*
+ * For spinlock and rwlock, it needs to get the timestamp for the
+ * per-cpu map. However, contention_end does not have the flags
+ * so it cannot know whether it reads percpu or hash map.
+ *
+ * Try per-cpu map first and check if there's active contention.
+ * If it is, do not read hash map because it cannot go to sleeping
+ * locks before releasing the spinning locks.
+ */
+ pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
+ if (pelem && pelem->lock) {
+ if (pelem->lock != ctx[0])
+ return 0;
+ } else {
+ pid = bpf_get_current_pid_tgid();
+ pelem = bpf_map_lookup_elem(&tstamp, &pid);
+ if (!pelem || pelem->lock != ctx[0])
+ return 0;
+ need_delete = true;
+ }
duration = bpf_ktime_get_ns() - pelem->timestamp;
if ((__s64)duration < 0) {
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&time_fail, 1);
return 0;
}
@@ -358,16 +455,22 @@ int contention_end(u64 *ctx)
case LOCK_AGGR_TASK:
if (lock_owner)
key.pid = pelem->flags;
- else
+ else {
+ if (!need_delete)
+ pid = bpf_get_current_pid_tgid();
key.pid = pid;
+ }
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
case LOCK_AGGR_ADDR:
- key.lock_addr = pelem->lock;
+ key.lock_addr_or_cgroup = pelem->lock;
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
+ case LOCK_AGGR_CGROUP:
+ key.lock_addr_or_cgroup = get_current_cgroup_id();
+ break;
default:
/* should not happen */
return 0;
@@ -376,7 +479,9 @@ int contention_end(u64 *ctx)
data = bpf_map_lookup_elem(&lock_stat, &key);
if (!data) {
if (data_map_full) {
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&data_fail, 1);
return 0;
}
@@ -399,7 +504,9 @@ int contention_end(u64 *ctx)
data_map_full = 1;
__sync_fetch_and_add(&data_fail, 1);
}
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
@@ -412,7 +519,9 @@ int contention_end(u64 *ctx)
if (data->min_time > duration)
data->min_time = duration;
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h
index 260062a9f2ab..08482daf61be 100644
--- a/tools/perf/util/bpf_skel/lock_data.h
+++ b/tools/perf/util/bpf_skel/lock_data.h
@@ -6,7 +6,7 @@
struct contention_key {
u32 stack_id;
u32 pid;
- u64 lock_addr;
+ u64 lock_addr_or_cgroup;
};
#define TASK_COMM_LEN 16
@@ -39,6 +39,7 @@ enum lock_aggr_mode {
LOCK_AGGR_ADDR = 0,
LOCK_AGGR_TASK,
LOCK_AGGR_CALLER,
+ LOCK_AGGR_CGROUP,
};
enum lock_class_sym {
diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
new file mode 100644
index 000000000000..49502c04183a
--- /dev/null
+++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
@@ -0,0 +1 @@
+!vmlinux.h
diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c
index 378f16a24751..ab760e267d41 100644
--- a/tools/perf/util/branch.c
+++ b/tools/perf/util/branch.c
@@ -109,7 +109,7 @@ const char *get_branch_type(struct branch_entry *e)
return branch_type_name(e->flags.type);
}
-void branch_type_stat_display(FILE *fp, struct branch_type_stat *st)
+void branch_type_stat_display(FILE *fp, const struct branch_type_stat *st)
{
u64 total = 0;
int i;
@@ -171,7 +171,7 @@ static int count_str_scnprintf(int idx, const char *str, char *bf, int size)
return scnprintf(bf, size, "%s%s", (idx) ? " " : " (", str);
}
-int branch_type_str(struct branch_type_stat *st, char *bf, int size)
+int branch_type_str(const struct branch_type_stat *st, char *bf, int size)
{
int i, j = 0, printed = 0;
u64 total = 0;
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index e41bfffe2217..87704d713ff6 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -86,8 +86,8 @@ void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
const char *branch_type_name(int type);
const char *branch_new_type_name(int new_type);
const char *get_branch_type(struct branch_entry *e);
-void branch_type_stat_display(FILE *fp, struct branch_type_stat *st);
-int branch_type_str(struct branch_type_stat *st, char *bf, int bfsize);
+void branch_type_stat_display(FILE *fp, const struct branch_type_stat *st);
+int branch_type_str(const struct branch_type_stat *st, char *bf, int bfsize);
const char *branch_spec_desc(int spec);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index aee937d14fbb..8262f69118db 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -586,7 +586,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call = zalloc(sizeof(*call));
if (!call) {
perror("not enough memory for the code path tree");
- return -1;
+ return -ENOMEM;
}
call->ip = cursor_node->ip;
call->ms = cursor_node->ms;
@@ -602,7 +602,15 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
* branch_from is set with value somewhere else
* to imply it's "to" of a branch.
*/
- call->brtype_stat.branch_to = true;
+ if (!call->brtype_stat) {
+ call->brtype_stat = zalloc(sizeof(*call->brtype_stat));
+ if (!call->brtype_stat) {
+ perror("not enough memory for the code path branch statistics");
+ free(call->brtype_stat);
+ return -ENOMEM;
+ }
+ }
+ call->brtype_stat->branch_to = true;
if (cursor_node->branch_flags.predicted)
call->predicted_count = 1;
@@ -610,7 +618,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
if (cursor_node->branch_flags.abort)
call->abort_count = 1;
- branch_type_count(&call->brtype_stat,
+ branch_type_count(call->brtype_stat,
&cursor_node->branch_flags,
cursor_node->branch_from,
cursor_node->ip);
@@ -618,7 +626,8 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
/*
* It's "from" of a branch
*/
- call->brtype_stat.branch_to = false;
+ if (call->brtype_stat && call->brtype_stat->branch_to)
+ call->brtype_stat->branch_to = false;
call->cycles_count =
cursor_node->branch_flags.cycles;
call->iter_count = cursor_node->nr_loop_iter;
@@ -650,8 +659,8 @@ add_child(struct callchain_node *parent,
list_for_each_entry_safe(call, tmp, &new->val, list) {
list_del_init(&call->list);
- map__zput(call->ms.map);
- maps__zput(call->ms.maps);
+ map_symbol__exit(&call->ms);
+ zfree(&call->brtype_stat);
free(call);
}
free(new);
@@ -762,7 +771,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
/*
* It's "to" of a branch
*/
- cnode->brtype_stat.branch_to = true;
+ if (!cnode->brtype_stat) {
+ cnode->brtype_stat = zalloc(sizeof(*cnode->brtype_stat));
+ if (!cnode->brtype_stat) {
+ perror("not enough memory for the code path branch statistics");
+ return MATCH_ERROR;
+ }
+ }
+ cnode->brtype_stat->branch_to = true;
if (node->branch_flags.predicted)
cnode->predicted_count++;
@@ -770,7 +786,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
if (node->branch_flags.abort)
cnode->abort_count++;
- branch_type_count(&cnode->brtype_stat,
+ branch_type_count(cnode->brtype_stat,
&node->branch_flags,
node->branch_from,
node->ip);
@@ -778,7 +794,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
/*
* It's "from" of a branch
*/
- cnode->brtype_stat.branch_to = false;
+ if (cnode->brtype_stat && cnode->brtype_stat->branch_to)
+ cnode->brtype_stat->branch_to = false;
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
@@ -1022,10 +1039,9 @@ merge_chain_branch(struct callchain_cursor *cursor,
};
callchain_cursor_append(cursor, list->ip, &ms, false, NULL, 0, 0, 0, list->srcline);
list_del_init(&list->list);
- map__zput(ms.map);
- maps__zput(ms.maps);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&ms);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
@@ -1077,8 +1093,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
}
node->ip = ip;
- maps__zput(node->ms.maps);
- map__zput(node->ms.map);
+ map_symbol__exit(&node->ms);
node->ms = *ms;
node->ms.maps = maps__get(ms->maps);
node->ms.map = map__get(ms->map);
@@ -1142,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
if (al->map == NULL)
goto out;
}
- if (RC_CHK_ACCESS(al->maps) == RC_CHK_ACCESS(machine__kernel_maps(machine))) {
+ if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
@@ -1339,7 +1354,7 @@ static int count_float_printf(int idx, const char *str, float value,
static int branch_to_str(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
int printed, i = 0;
@@ -1403,7 +1418,7 @@ static int counts_str_build(char *bf, int bfsize,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
int printed;
@@ -1430,7 +1445,7 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
char str[256];
@@ -1447,11 +1462,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
int callchain_list_counts__printf_value(struct callchain_list *clist,
FILE *fp, char *bf, int bfsize)
{
+ static const struct branch_type_stat empty_brtype_stat = {};
+ const struct branch_type_stat *brtype_stat;
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
u64 from_count;
+ brtype_stat = clist->brtype_stat ?: &empty_brtype_stat;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
abort_count = clist->abort_count;
@@ -1463,7 +1481,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
- from_count, &clist->brtype_stat);
+ from_count, brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
@@ -1474,15 +1492,15 @@ static void free_callchain_node(struct callchain_node *node)
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del_init(&list->list);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del_init(&list->list);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
@@ -1567,8 +1585,8 @@ int callchain_node__make_parent_list(struct callchain_node *node)
out:
list_for_each_entry_safe(chain, new, &head, list) {
list_del_init(&chain->list);
- map__zput(chain->ms.map);
- maps__zput(chain->ms.maps);
+ map_symbol__exit(&chain->ms);
+ zfree(&chain->brtype_stat);
free(chain);
}
return -ENOMEM;
@@ -1651,10 +1669,8 @@ void callchain_cursor_reset(struct callchain_cursor *cursor)
cursor->nr = 0;
cursor->last = &cursor->first;
- for (node = cursor->first; node != NULL; node = node->next) {
- map__zput(node->ms.map);
- maps__zput(node->ms.maps);
- }
+ for (node = cursor->first; node != NULL; node = node->next)
+ map_symbol__exit(&node->ms);
}
void callchain_param_setup(u64 sample_type, const char *arch)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index d2618a47deca..d5c66345ae31 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -116,22 +116,22 @@ extern struct callchain_param callchain_param;
extern struct callchain_param callchain_param_default;
struct callchain_list {
+ struct list_head list;
u64 ip;
struct map_symbol ms;
- struct /* for TUI */ {
- bool unfolded;
- bool has_children;
- };
+ const char *srcline;
u64 branch_count;
u64 from_count;
- u64 predicted_count;
- u64 abort_count;
u64 cycles_count;
u64 iter_count;
u64 iter_cycles;
- struct branch_type_stat brtype_stat;
- const char *srcline;
- struct list_head list;
+ struct branch_type_stat *brtype_stat;
+ u64 predicted_count;
+ u64 abort_count;
+ struct /* for TUI */ {
+ bool unfolded;
+ bool has_children;
+ };
};
/*
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index bfb13306d82c..fcb509058499 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -48,28 +48,36 @@ static int open_cgroup(const char *name)
}
#ifdef HAVE_FILE_HANDLE
-int read_cgroup_id(struct cgroup *cgrp)
+static u64 __read_cgroup_id(const char *path)
{
- char path[PATH_MAX + 1];
- char mnt[PATH_MAX + 1];
struct {
struct file_handle fh;
uint64_t cgroup_id;
} handle;
int mount_id;
+ handle.fh.handle_bytes = sizeof(handle.cgroup_id);
+ if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
+ return -1ULL;
+
+ return handle.cgroup_id;
+}
+
+int read_cgroup_id(struct cgroup *cgrp)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
return -1;
scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name);
- handle.fh.handle_bytes = sizeof(handle.cgroup_id);
- if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
- return -1;
-
- cgrp->id = handle.cgroup_id;
+ cgrp->id = __read_cgroup_id(path);
return 0;
}
+#else
+static inline u64 __read_cgroup_id(const char *path __maybe_unused) { return -1ULL; }
#endif /* HAVE_FILE_HANDLE */
#ifndef CGROUP2_SUPER_MAGIC
@@ -106,7 +114,7 @@ static struct cgroup *evlist__find_cgroup(struct evlist *evlist, const char *str
return NULL;
}
-static struct cgroup *cgroup__new(const char *name, bool do_open)
+struct cgroup *cgroup__new(const char *name, bool do_open)
{
struct cgroup *cgroup = zalloc(sizeof(*cgroup));
@@ -562,6 +570,11 @@ struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
return cgrp;
}
+struct cgroup *__cgroup__find(struct rb_root *root, uint64_t id)
+{
+ return __cgroup__findnew(root, id, /*create=*/false, /*path=*/NULL);
+}
+
struct cgroup *cgroup__find(struct perf_env *env, uint64_t id)
{
struct cgroup *cgrp;
@@ -587,3 +600,35 @@ void perf_env__purge_cgroups(struct perf_env *env)
}
up_write(&env->cgroups.lock);
}
+
+void read_all_cgroups(struct rb_root *root)
+{
+ char mnt[PATH_MAX];
+ struct cgroup_name *cn;
+ int prefix_len;
+
+ if (cgroupfs_find_mountpoint(mnt, sizeof(mnt), "perf_event"))
+ return;
+
+ /* cgroup_name will have a full path, skip the root directory */
+ prefix_len = strlen(mnt);
+
+ /* collect all cgroups in the cgroup_list */
+ if (nftw(mnt, add_cgroup_name, 20, 0) < 0)
+ return;
+
+ list_for_each_entry(cn, &cgroup_list, list) {
+ const char *name;
+ u64 cgrp_id;
+
+ /* cgroup_name might have a full path, skip the prefix */
+ name = cn->name + prefix_len;
+ if (name[0] == '\0')
+ name = "/";
+
+ cgrp_id = __read_cgroup_id(cn->name);
+ __cgroup__findnew(root, cgrp_id, /*create=*/true, name);
+ }
+
+ release_cgroup_list();
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index 12256b78608c..de8882d6e8d3 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -26,6 +26,7 @@ void cgroup__put(struct cgroup *cgroup);
struct evlist;
struct rblist;
+struct cgroup *cgroup__new(const char *name, bool do_open);
struct cgroup *evlist__findnew_cgroup(struct evlist *evlist, const char *name);
int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
struct rblist *metric_events, bool open_cgroup);
@@ -37,6 +38,7 @@ int parse_cgroups(const struct option *opt, const char *str, int unset);
struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
const char *path);
struct cgroup *cgroup__find(struct perf_env *env, uint64_t id);
+struct cgroup *__cgroup__find(struct rb_root *root, uint64_t id);
void perf_env__purge_cgroups(struct perf_env *env);
@@ -49,6 +51,9 @@ static inline int read_cgroup_id(struct cgroup *cgrp __maybe_unused)
}
#endif /* HAVE_FILE_HANDLE */
+/* read all cgroups in the system and save them in the rbtree */
+void read_all_cgroups(struct rb_root *root);
+
int cgroup_is_v2(const char *subsys);
#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 9729d006550d..a9873d14c632 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -283,22 +283,31 @@ static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
}
/*
- * Get a metadata for a specific cpu from an array.
+ * Get a metadata index for a specific cpu from an array.
*
*/
-static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
+static int get_cpu_data_idx(struct cs_etm_auxtrace *etm, int cpu)
{
int i;
- u64 *metadata = NULL;
for (i = 0; i < etm->num_cpu; i++) {
if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
- metadata = etm->metadata[i];
- break;
+ return i;
}
}
- return metadata;
+ return -1;
+}
+
+/*
+ * Get a metadata for a specific cpu from an array.
+ *
+ */
+static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
+{
+ int idx = get_cpu_data_idx(etm, cpu);
+
+ return (idx != -1) ? etm->metadata[idx] : NULL;
}
/*
@@ -641,66 +650,80 @@ static void cs_etm__packet_dump(const char *pkt_string)
}
static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx,
- u32 etmidr)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx, u32 etmidr)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
- t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
- t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+ t_params[t_idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[t_idx].etmv3.reg_ctrl = metadata[m_idx][CS_ETM_ETMCR];
+ t_params[t_idx].etmv3.reg_trc_id = metadata[m_idx][CS_ETM_ETMTRACEIDR];
}
static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
- t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
- t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
- t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
- t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
- t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+ t_params[t_idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[t_idx].etmv4.reg_idr0 = metadata[m_idx][CS_ETMV4_TRCIDR0];
+ t_params[t_idx].etmv4.reg_idr1 = metadata[m_idx][CS_ETMV4_TRCIDR1];
+ t_params[t_idx].etmv4.reg_idr2 = metadata[m_idx][CS_ETMV4_TRCIDR2];
+ t_params[t_idx].etmv4.reg_idr8 = metadata[m_idx][CS_ETMV4_TRCIDR8];
+ t_params[t_idx].etmv4.reg_configr = metadata[m_idx][CS_ETMV4_TRCCONFIGR];
+ t_params[t_idx].etmv4.reg_traceidr = metadata[m_idx][CS_ETMV4_TRCTRACEIDR];
}
static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = CS_ETM_PROTO_ETE;
- t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETE_TRCIDR0];
- t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETE_TRCIDR1];
- t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETE_TRCIDR2];
- t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETE_TRCIDR8];
- t_params[idx].ete.reg_configr = metadata[idx][CS_ETE_TRCCONFIGR];
- t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETE_TRCTRACEIDR];
- t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
+ t_params[t_idx].protocol = CS_ETM_PROTO_ETE;
+ t_params[t_idx].ete.reg_idr0 = metadata[m_idx][CS_ETE_TRCIDR0];
+ t_params[t_idx].ete.reg_idr1 = metadata[m_idx][CS_ETE_TRCIDR1];
+ t_params[t_idx].ete.reg_idr2 = metadata[m_idx][CS_ETE_TRCIDR2];
+ t_params[t_idx].ete.reg_idr8 = metadata[m_idx][CS_ETE_TRCIDR8];
+ t_params[t_idx].ete.reg_configr = metadata[m_idx][CS_ETE_TRCCONFIGR];
+ t_params[t_idx].ete.reg_traceidr = metadata[m_idx][CS_ETE_TRCTRACEIDR];
+ t_params[t_idx].ete.reg_devarch = metadata[m_idx][CS_ETE_TRCDEVARCH];
}
static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm,
+ bool formatted,
+ int sample_cpu,
int decoders)
{
- int i;
+ int t_idx, m_idx;
u32 etmidr;
u64 architecture;
- for (i = 0; i < decoders; i++) {
- architecture = etm->metadata[i][CS_ETM_MAGIC];
+ for (t_idx = 0; t_idx < decoders; t_idx++) {
+ if (formatted)
+ m_idx = t_idx;
+ else {
+ m_idx = get_cpu_data_idx(etm, sample_cpu);
+ if (m_idx == -1) {
+ pr_warning("CS_ETM: unknown CPU, falling back to first metadata\n");
+ m_idx = 0;
+ }
+ }
+
+ architecture = etm->metadata[m_idx][CS_ETM_MAGIC];
switch (architecture) {
case __perf_cs_etmv3_magic:
- etmidr = etm->metadata[i][CS_ETM_ETMIDR];
- cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ etmidr = etm->metadata[m_idx][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, t_idx, m_idx, etmidr);
break;
case __perf_cs_etmv4_magic:
- cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ cs_etm__set_trace_param_etmv4(t_params, etm, t_idx, m_idx);
break;
case __perf_cs_ete_magic:
- cs_etm__set_trace_param_ete(t_params, etm, i);
+ cs_etm__set_trace_param_ete(t_params, etm, t_idx, m_idx);
break;
default:
return -EINVAL;
@@ -1016,7 +1039,7 @@ out:
}
static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- bool formatted)
+ bool formatted, int sample_cpu)
{
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params = NULL;
@@ -1041,7 +1064,7 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!t_params)
goto out_free;
- if (cs_etm__init_trace_params(t_params, etm, decoders))
+ if (cs_etm__init_trace_params(t_params, etm, formatted, sample_cpu, decoders))
goto out_free;
/* Set decoder parameters to decode trace packets */
@@ -1081,14 +1104,15 @@ out_free:
static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr,
- bool formatted)
+ bool formatted,
+ int sample_cpu)
{
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
return 0;
- etmq = cs_etm__alloc_queue(etm, formatted);
+ etmq = cs_etm__alloc_queue(etm, formatted, sample_cpu);
if (!etmq)
return -ENOMEM;
@@ -2816,7 +2840,7 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
* formatted in piped mode (true).
*/
err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, true);
+ idx, true, -1);
if (err)
return err;
@@ -3022,7 +3046,7 @@ static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_o
idx = auxtrace_event->idx;
formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, formatted);
+ idx, formatted, sample->cpu);
}
/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 7cca37887917..4696267a32f0 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -242,7 +242,7 @@ struct cs_etm_packet_queue {
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *cs_etm_get_default_config(struct perf_pmu *pmu);
+void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
enum cs_etm_pid_fmt {
CS_ETM_PIDFMT_NONE,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index fc16299c915f..c29d8a382b19 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -17,6 +17,7 @@
#include "util.h" // rm_rf_perf_data()
#include "debug.h"
#include "header.h"
+#include "rlimit.h"
#include <internal/lib.h>
static void close_dir(struct perf_data_file *files, int nr)
@@ -35,6 +36,7 @@ void perf_data__close_dir(struct perf_data *data)
int perf_data__create_dir(struct perf_data *data, int nr)
{
+ enum rlimit_action set_rlimit = NO_CHANGE;
struct perf_data_file *files = NULL;
int i, ret;
@@ -54,11 +56,21 @@ int perf_data__create_dir(struct perf_data *data, int nr)
goto out_err;
}
+retry_open:
ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
if (ret < 0) {
+ /*
+ * If using parallel threads to collect data,
+ * perf record needs at least 6 fds per CPU.
+ * When we run out of them try to increase the limits.
+ */
+ if (errno == EMFILE && rlimit__increase_nofile(&set_rlimit))
+ goto retry_open;
+
ret = -errno;
goto out_err;
}
+ set_rlimit = NO_CHANGE;
file->fd = ret;
}
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 4a1dc21b0450..908e16813722 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -52,8 +52,10 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
d_al->sym_end = sym->end;
if (al->addr < sym->end)
d_al->symoff = al->addr - sym->start;
- else
+ else if (al->map)
d_al->symoff = al->addr - map__start(al->map) - sym->start;
+ else
+ d_al->symoff = 0;
d_al->sym_binding = sym->binding;
} else {
d_al->sym = NULL;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bdfead36b83a..1f629b6fb7cf 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -477,6 +477,7 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
dso->comp = m->comp;
}
+ dso->is_kmod = 1;
dso__set_short_name(dso, strdup(m->name), true);
}
@@ -1338,6 +1339,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->has_srcline = 1;
dso->a2l_fails = 1;
dso->kernel = DSO_SPACE__USER;
+ dso->is_kmod = 0;
dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE;
RB_CLEAR_NODE(&dso->rb_node);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index b41c9782c754..3759de8c2267 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -162,6 +162,7 @@ struct dso {
char *symsrc_filename;
unsigned int a2l_fails;
enum dso_space_type kernel;
+ bool is_kmod;
enum dso_swap_type needs_swap;
enum dso_binary_type symtab_type;
enum dso_binary_type binary_type;
@@ -181,6 +182,7 @@ struct dso {
u8 rel;
struct build_id bid;
u64 text_offset;
+ u64 text_end;
const char *short_name;
const char *long_name;
u16 long_name_len;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index a164164001fb..44140b7f596a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -457,7 +457,7 @@ const char *perf_env__cpuid(struct perf_env *env)
{
int status;
- if (!env || !env->cpuid) { /* Assume local operation */
+ if (!env->cpuid) { /* Assume local operation */
status = perf_env__read_cpuid(env);
if (status)
return NULL;
@@ -470,7 +470,7 @@ int perf_env__nr_pmu_mappings(struct perf_env *env)
{
int status;
- if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
+ if (!env->nr_pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return 0;
@@ -483,7 +483,7 @@ const char *perf_env__pmu_mappings(struct perf_env *env)
{
int status;
- if (!env || !env->pmu_mappings) { /* Assume local operation */
+ if (!env->pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return NULL;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 7ef43f72098e..e36da58522ef 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -251,6 +251,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
+ /* Avoid frequency mode for dummy events to avoid associated timers. */
+ .freq = 0,
+ .sample_period = 1,
};
return evsel__new_idx(&attr, evlist->core.nr_entries);
@@ -277,8 +280,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_guest = 1;
evsel->core.attr.exclude_hv = 1;
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evsel->name = strdup("dummy:u");
@@ -1694,6 +1695,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev
tracking_evsel->tracking = true;
}
+struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide)
+{
+ struct evsel *evsel;
+
+ evsel = evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(evsel)) {
+ evsel = evlist__add_aux_dummy(evlist, system_wide);
+ if (!evsel)
+ return NULL;
+
+ evlist__set_tracking_event(evlist, evsel);
+ } else if (system_wide) {
+ perf_evlist__go_system_wide(&evlist->core, &evsel->core);
+ }
+
+ return evsel;
+}
+
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
{
struct evsel *evsel;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 664c6bf7b3e0..98e7ddb2bd30 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -387,6 +387,7 @@ bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
struct evsel *evlist__get_tracking_event(struct evlist *evlist);
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
+struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide);
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a8a5ff87cc1f..72a5dfc38d38 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -49,6 +49,7 @@
#include "off_cpu.h"
#include "pmu.h"
#include "pmus.h"
+#include "rlimit.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include "util/bpf-filter.h"
@@ -1989,33 +1990,6 @@ bool evsel__detect_missing_features(struct evsel *evsel)
}
}
-bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
-{
- int old_errno;
- struct rlimit l;
-
- if (*set_rlimit < INCREASED_MAX) {
- old_errno = errno;
-
- if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
- if (*set_rlimit == NO_CHANGE) {
- l.rlim_cur = l.rlim_max;
- } else {
- l.rlim_cur = l.rlim_max + 1000;
- l.rlim_max = l.rlim_cur;
- }
- if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
- (*set_rlimit) += 1;
- errno = old_errno;
- return true;
- }
- }
- errno = old_errno;
- }
-
- return false;
-}
-
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads,
int start_cpu_map_idx, int end_cpu_map_idx)
@@ -2143,7 +2117,7 @@ try_fallback:
* perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits.
*/
- if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
+ if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
goto retry_open;
if (err != -EINVAL || idx > 0 || thread > 0)
@@ -2766,6 +2740,11 @@ struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
return tep_find_field(evsel->tp_format, name);
}
+struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
+{
+ return tep_find_common_field(evsel->tp_format, name);
+}
+
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
struct tep_format_field *field = evsel__field(evsel, name);
@@ -2831,6 +2810,14 @@ u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *n
return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}
+
+u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
+{
+ struct tep_format_field *field = evsel__common_field(evsel, name);
+
+ return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
+}
+
#endif
bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 848534ec74fa..d791316a1792 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -330,9 +330,6 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
bool evsel__detect_missing_features(struct evsel *evsel);
-enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
-bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
-
bool evsel__precise_ip_fallback(struct evsel *evsel);
struct perf_sample;
@@ -340,6 +337,7 @@ struct perf_sample;
#ifdef HAVE_LIBTRACEEVENT
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
+u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name);
static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
@@ -352,6 +350,7 @@ struct tep_format_field;
u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
+struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name);
static inline bool __evsel__match(const struct evsel *evsel, u32 type, u64 config)
{
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 4488f306de78..7be23b3ac082 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -509,7 +509,7 @@ double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
bool compute_ids __maybe_unused, const char *test_id)
{
double ret;
- struct perf_pmu *pmu = pmu__find_core_pmu();
+ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
char *cpuid = perf_pmu__getcpuid(pmu);
if (!cpuid)
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index 6c93b358cc2d..e364790babb5 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -1,6 +1,8 @@
/* Simple expression parser */
%{
+#ifndef NDEBUG
#define YYDEBUG 1
+#endif
#include <assert.h>
#include <math.h>
#include <stdlib.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d812e1e371a7..e86b9439ffee 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2573,7 +2573,7 @@ error:
static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
{
u32 nr, i;
- char *str;
+ char *str = NULL;
struct strbuf sb;
int cpu_nr = ff->ph->env.nr_cpus_avail;
u64 size = 0;
@@ -2601,7 +2601,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_cores = strbuf_detach(&sb, NULL);
@@ -2620,7 +2620,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_threads = strbuf_detach(&sb, NULL);
@@ -2684,7 +2684,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_dies = strbuf_detach(&sb, NULL);
@@ -2699,6 +2699,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
error:
strbuf_release(&sb);
+ zfree(&str);
free_cpu:
zfree(&ph->env.cpu);
return -1;
@@ -2736,10 +2737,9 @@ static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
goto error;
n->map = perf_cpu_map__new(str);
+ free(str);
if (!n->map)
goto error;
-
- free(str);
}
ff->ph->env.nr_numa_nodes = nr;
ff->ph->env.numa_nodes = nodes;
@@ -2913,10 +2913,10 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
return -1;
for (i = 0; i < cnt; i++) {
- struct cpu_cache_level c;
+ struct cpu_cache_level *c = &caches[i];
#define _R(v) \
- if (do_read_u32(ff, &c.v))\
+ if (do_read_u32(ff, &c->v)) \
goto out_free_caches; \
_R(level)
@@ -2926,22 +2926,25 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
#undef _R
#define _R(v) \
- c.v = do_read_string(ff); \
- if (!c.v) \
- goto out_free_caches;
+ c->v = do_read_string(ff); \
+ if (!c->v) \
+ goto out_free_caches; \
_R(type)
_R(size)
_R(map)
#undef _R
-
- caches[i] = c;
}
ff->ph->env.caches = caches;
ff->ph->env.caches_cnt = cnt;
return 0;
out_free_caches:
+ for (i = 0; i < cnt; i++) {
+ free(caches[i].type);
+ free(caches[i].size);
+ free(caches[i].map);
+ }
free(caches);
return -1;
}
@@ -3585,18 +3588,16 @@ static int perf_header__adds_write(struct perf_header *header,
struct feat_copier *fc)
{
int nr_sections;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ .ph = header,
+ };
struct perf_file_section *feat_sec, *p;
int sec_size;
u64 sec_start;
int feat;
int err;
- ff = (struct feat_fd){
- .fd = fd,
- .ph = header,
- };
-
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
@@ -3623,6 +3624,7 @@ static int perf_header__adds_write(struct perf_header *header,
err = do_write(&ff, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
+ free(ff.buf); /* TODO: added to silence clang-tidy. */
free(feat_sec);
return err;
}
@@ -3630,11 +3632,11 @@ static int perf_header__adds_write(struct perf_header *header,
int perf_header__write_pipe(int fd)
{
struct perf_pipe_file_header f_header;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ };
int err;
- ff = (struct feat_fd){ .fd = fd };
-
f_header = (struct perf_pipe_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
@@ -3645,7 +3647,7 @@ int perf_header__write_pipe(int fd)
pr_debug("failed to write perf pipe header\n");
return err;
}
-
+ free(ff.buf);
return 0;
}
@@ -3658,11 +3660,12 @@ static int perf_session__do_write_header(struct perf_session *session,
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct evsel *evsel;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ };
u64 attr_offset;
int err;
- ff = (struct feat_fd){ .fd = fd};
lseek(fd, sizeof(f_header), SEEK_SET);
evlist__for_each_entry(session->evlist, evsel) {
@@ -3670,6 +3673,7 @@ static int perf_session__do_write_header(struct perf_session *session,
err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
+ free(ff.buf);
return err;
}
}
@@ -3695,6 +3699,7 @@ static int perf_session__do_write_header(struct perf_session *session,
err = do_write(&ff, &f_attr, sizeof(f_attr));
if (err < 0) {
pr_debug("failed to write perf header attribute\n");
+ free(ff.buf);
return err;
}
}
@@ -3705,8 +3710,10 @@ static int perf_session__do_write_header(struct perf_session *session,
if (at_exit) {
err = perf_header__adds_write(header, evlist, fd, fc);
- if (err < 0)
+ if (err < 0) {
+ free(ff.buf);
return err;
+ }
}
f_header = (struct perf_file_header){
@@ -3728,6 +3735,7 @@ static int perf_session__do_write_header(struct perf_session *session,
lseek(fd, 0, SEEK_SET);
err = do_write(&ff, &f_header, sizeof(f_header));
+ free(ff.buf);
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 45b614bb73bf..43bd1ca62d58 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -108,8 +108,10 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
- if (data_offset == -1)
+ if (data_offset == -1) {
+ free(data);
return -errno;
+ }
}
err = readn(fd, data, size);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3dc8a4968beb..0888b7163b7c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -515,17 +515,16 @@ err_rawdata:
err_infos:
if (he->branch_info) {
- map__put(he->branch_info->from.ms.map);
- map__put(he->branch_info->to.ms.map);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__put(he->mem_info->iaddr.ms.map);
- map__put(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
}
err:
- maps__zput(he->ms.maps);
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
zfree(&he->stat_acc);
return -ENOMEM;
}
@@ -1317,20 +1316,19 @@ void hist_entry__delete(struct hist_entry *he)
struct hist_entry_ops *ops = he->ops;
thread__zput(he->thread);
- maps__zput(he->ms.maps);
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
if (he->branch_info) {
- map__zput(he->branch_info->from.ms.map);
- map__zput(he->branch_info->to.ms.map);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
zfree_srcline(&he->branch_info->srcline_from);
zfree_srcline(&he->branch_info->srcline_to);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__zput(he->mem_info->iaddr.ms.map);
- map__zput(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
mem_info__zput(he->mem_info);
}
@@ -2142,7 +2140,7 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
if (hists->thread_filter != NULL &&
- RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
+ !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
he->filtered |= (1 << HIST_FILTER__THREAD);
return true;
}
@@ -2676,8 +2674,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
/* If we have branch cycles always annotate them. */
if (bs && bs->nr && entries[0].flags.cycles) {
- int i;
-
bi = sample__resolve_bstack(sample, al);
if (bi) {
struct addr_map_symbol *prev = NULL;
@@ -2692,7 +2688,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
* Note that perf stores branches reversed from
* program order!
*/
- for (i = bs->nr - 1; i >= 0; i--) {
+ for (int i = bs->nr - 1; i >= 0; i--) {
addr_map_symbol__account_cycles(&bi[i].from,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
@@ -2701,6 +2697,10 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
if (total_cycles)
*total_cycles += bi[i].flags.cycles;
}
+ for (unsigned int i = 0; i < bs->nr; i++) {
+ map_symbol__exit(&bi[i].to.ms);
+ map_symbol__exit(&bi[i].from.ms);
+ }
free(bi);
}
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index af9710622a1f..bccb988a7a44 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -8,7 +8,9 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/kernel.h>
#include <linux/compiler.h>
+#include <asm-generic/unaligned.h>
#include "intel-pt-pkt-decoder.h"
@@ -17,17 +19,11 @@
#define BIT63 ((uint64_t)1 << 63)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define le16_to_cpu bswap_16
-#define le32_to_cpu bswap_32
-#define le64_to_cpu bswap_64
#define memcpy_le64(d, s, n) do { \
memcpy((d), (s), (n)); \
*(d) = le64_to_cpu(*(d)); \
} while (0)
#else
-#define le16_to_cpu
-#define le32_to_cpu
-#define le64_to_cpu
#define memcpy_le64 memcpy
#endif
@@ -83,7 +79,7 @@ static int intel_pt_get_long_tnt(const unsigned char *buf, size_t len,
if (len < 8)
return INTEL_PT_NEED_MORE_BYTES;
- payload = le64_to_cpu(*(uint64_t *)buf);
+ payload = get_unaligned_le64(buf);
for (count = 47; count; count--) {
if (payload & BIT63)
@@ -124,26 +120,21 @@ static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_CBR;
- packet->payload = le16_to_cpu(*(uint16_t *)(buf + 2));
+ packet->payload = get_unaligned_le16(buf + 2);
return 4;
}
static int intel_pt_get_vmcs(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
- unsigned int count = (52 - 5) >> 3;
-
- if (count < 1 || count > 7)
- return INTEL_PT_BAD_PACKET;
-
- if (len < count + 2)
+ if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_VMCS;
- packet->count = count;
- memcpy_le64(&packet->payload, buf + 2, count);
+ packet->count = 5;
+ memcpy_le64(&packet->payload, buf + 2, 5);
- return count + 2;
+ return 7;
}
static int intel_pt_get_ovf(struct intel_pt_pkt *packet)
@@ -199,7 +190,7 @@ static int intel_pt_get_mnt(const unsigned char *buf, size_t len,
if (len < 11)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MNT;
- memcpy_le64(&packet->payload, buf + 3, 8);
+ packet->payload = get_unaligned_le64(buf + 3);
return 11;
}
@@ -228,12 +219,12 @@ static int intel_pt_get_ptwrite(const unsigned char *buf, size_t len,
case 0:
if (len < 6)
return INTEL_PT_NEED_MORE_BYTES;
- packet->payload = le32_to_cpu(*(uint32_t *)(buf + 2));
+ packet->payload = get_unaligned_le32(buf + 2);
return 6;
case 1:
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+ packet->payload = get_unaligned_le64(buf + 2);
return 10;
default:
return INTEL_PT_BAD_PACKET;
@@ -258,7 +249,7 @@ static int intel_pt_get_mwait(const unsigned char *buf, size_t len,
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MWAIT;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+ packet->payload = get_unaligned_le64(buf + 2);
return 10;
}
@@ -311,7 +302,7 @@ static int intel_pt_get_bip_8(const unsigned char *buf, size_t len,
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BIP;
packet->count = buf[0] >> 3;
- memcpy_le64(&packet->payload, buf + 1, 8);
+ packet->payload = get_unaligned_le64(buf + 1);
return 9;
}
@@ -350,7 +341,7 @@ static int intel_pt_get_evd(const unsigned char *buf, size_t len,
packet->type = INTEL_PT_EVD;
packet->count = buf[2] & 0x3f;
packet->payload = buf[3];
- memcpy_le64(&packet->payload, buf + 3, 8);
+ packet->payload = get_unaligned_le64(buf + 3);
return 11;
}
@@ -465,13 +456,13 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 2;
- packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+ packet->payload = get_unaligned_le16(buf + 1);
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 4;
- packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
+ packet->payload = get_unaligned_le32(buf + 1);
break;
case 3:
case 4:
@@ -484,7 +475,7 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
if (len < 9)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 8;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+ packet->payload = get_unaligned_le64(buf + 1);
break;
default:
return INTEL_PT_BAD_PACKET;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dbf0bc71a63b..f38893e0b036 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
if (!ptq->state->to_ip)
ptq->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
else if (ptq->state->from_nr && !ptq->state->to_nr)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_VMEXIT;
else
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
diff --git a/tools/perf/util/intel-pt.h b/tools/perf/util/intel-pt.h
index c7d6068e3a6b..18fd0be52e6c 100644
--- a/tools/perf/util/intel-pt.h
+++ b/tools/perf/util/intel-pt.h
@@ -42,6 +42,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err);
int intel_pt_process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *intel_pt_pmu_default_config(struct perf_pmu *pmu);
+void intel_pt_pmu_default_config(const struct perf_pmu *intel_pt_pmu,
+ struct perf_event_attr *attr);
#endif
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 6b2b96c16ccd..1f657ef8975f 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -675,6 +675,7 @@ jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
jd->unwinding_size = jr->unwinding.unwinding_size;
jd->unwinding_mapped_size = jr->unwinding.mapped_size;
+ free(jd->unwinding_data);
jd->unwinding_data = unwinding_data;
return 0;
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
index 53b7327550b8..76fe2a821bcf 100644
--- a/tools/perf/util/kwork.h
+++ b/tools/perf/util/kwork.h
@@ -16,6 +16,7 @@ enum kwork_class_type {
KWORK_CLASS_IRQ,
KWORK_CLASS_SOFTIRQ,
KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_SCHED,
KWORK_CLASS_MAX,
};
@@ -23,6 +24,7 @@ enum kwork_report_type {
KWORK_REPORT_RUNTIME,
KWORK_REPORT_LATENCY,
KWORK_REPORT_TIMEHIST,
+ KWORK_REPORT_TOP,
};
enum kwork_trace_type {
@@ -91,6 +93,7 @@ struct kwork_atom_page {
DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
};
+struct perf_kwork;
struct kwork_class;
struct kwork_work {
/*
@@ -127,6 +130,13 @@ struct kwork_work {
u64 max_latency_start;
u64 max_latency_end;
u64 total_latency;
+
+ /*
+ * top report
+ */
+ u32 cpu_usage;
+ u32 tgid;
+ bool is_kthread;
};
struct kwork_class {
@@ -142,8 +152,10 @@ struct kwork_class {
int (*class_init)(struct kwork_class *class,
struct perf_session *session);
- void (*work_init)(struct kwork_class *class,
+ void (*work_init)(struct perf_kwork *kwork,
+ struct kwork_class *class,
struct kwork_work *work,
+ enum kwork_trace_type src_type,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
@@ -152,7 +164,6 @@ struct kwork_class {
char *buf, int len);
};
-struct perf_kwork;
struct trace_kwork_handler {
int (*raise_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
@@ -165,6 +176,23 @@ struct trace_kwork_handler {
int (*exit_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
+
+ int (*sched_switch_event)(struct perf_kwork *kwork,
+ struct kwork_class *class, struct evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+};
+
+struct __top_cpus_runtime {
+ u64 load;
+ u64 idle;
+ u64 irq;
+ u64 softirq;
+ u64 total;
+};
+
+struct kwork_top_stat {
+ DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
+ struct __top_cpus_runtime *cpus_runtime;
};
struct perf_kwork {
@@ -218,6 +246,11 @@ struct perf_kwork {
u64 all_runtime;
u64 all_count;
u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
+
+ /*
+ * perf kwork top data
+ */
+ struct kwork_top_stat top_stat;
};
struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
@@ -233,6 +266,13 @@ void perf_kwork__report_cleanup_bpf(void);
void perf_kwork__trace_start(void);
void perf_kwork__trace_finish(void);
+int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork);
+int perf_kwork__top_read_bpf(struct perf_kwork *kwork);
+void perf_kwork__top_cleanup_bpf(void);
+
+void perf_kwork__top_start(void);
+void perf_kwork__top_finish(void);
+
#else /* !HAVE_BPF_SKEL */
static inline int
@@ -252,6 +292,23 @@ static inline void perf_kwork__report_cleanup_bpf(void) {}
static inline void perf_kwork__trace_start(void) {}
static inline void perf_kwork__trace_finish(void) {}
+static inline int
+perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline int
+perf_kwork__top_read_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline void perf_kwork__top_cleanup_bpf(void) {}
+
+static inline void perf_kwork__top_start(void) {}
+static inline void perf_kwork__top_finish(void) {}
+
#endif /* HAVE_BPF_SKEL */
#endif /* PERF_UTIL_KWORK_H */
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index fa16532c971c..1a7248ff3889 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -9,9 +9,11 @@ struct lock_filter {
int nr_types;
int nr_addrs;
int nr_syms;
+ int nr_cgrps;
unsigned int *types;
unsigned long *addrs;
char **syms;
+ u64 *cgrps;
};
struct lock_stat {
@@ -136,6 +138,7 @@ struct lock_contention {
struct hlist_head *result;
struct lock_filter *filters;
struct lock_contention_fails fails;
+ struct rb_root cgroups;
unsigned long map_nr_entries;
int max_stack;
int stack_skip;
@@ -151,7 +154,7 @@ int lock_contention_prepare(struct lock_contention *con);
int lock_contention_start(void);
int lock_contention_stop(void);
int lock_contention_read(struct lock_contention *con);
-int lock_contention_finish(void);
+int lock_contention_finish(struct lock_contention *con);
#else /* !HAVE_BPF_SKEL */
@@ -162,7 +165,10 @@ static inline int lock_contention_prepare(struct lock_contention *con __maybe_un
static inline int lock_contention_start(void) { return 0; }
static inline int lock_contention_stop(void) { return 0; }
-static inline int lock_contention_finish(void) { return 0; }
+static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
+{
+ return 0;
+}
static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
{
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 88f31b3a63ac..90c750150b19 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -67,7 +67,6 @@ static void machine__threads_init(struct machine *machine)
threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
- INIT_LIST_HEAD(&threads->dead);
threads->last_match = NULL;
}
}
@@ -969,7 +968,7 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
if (!map)
return 0;
- if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map))
+ if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
maps__remove(machine__kernel_maps(machine), map);
else {
struct dso *dso = map__dso(map);
@@ -2058,7 +2057,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_n
if (!nd)
nd = thread_rb_node__find(th, &threads->entries.rb_root);
- if (threads->last_match && RC_CHK_ACCESS(threads->last_match) == RC_CHK_ACCESS(th))
+ if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th))
threads__set_last_match(threads, NULL);
if (lock)
@@ -2213,9 +2212,7 @@ int machine__process_event(struct machine *machine, union perf_event *event,
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
- if (!regexec(regex, sym->name, 0, NULL, 0))
- return true;
- return false;
+ return regexec(regex, sym->name, 0, NULL, 0) == 0;
}
static void ip__resolve_ams(struct thread *thread,
@@ -2392,8 +2389,7 @@ static int add_callchain_ip(struct thread *thread,
iter_cycles, branch_from, srcline);
out:
addr_location__exit(&al);
- maps__put(ms.maps);
- map__put(ms.map);
+ map_symbol__exit(&ms);
return err;
}
@@ -2624,16 +2620,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
save_lbr_cursor_node(thread, cursor, i);
}
- /* Add LBR ip from first entries.to */
- ip = entries[0].to;
- flags = &entries[0].flags;
- *branch_from = entries[0].from;
- err = add_callchain_ip(thread, cursor, parent,
- root_al, &cpumode, ip,
- true, flags, NULL,
- *branch_from);
- if (err)
- return err;
+ if (lbr_nr > 0) {
+ /* Add LBR ip from first entries.to */
+ ip = entries[0].to;
+ flags = &entries[0].flags;
+ *branch_from = entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -3117,8 +3115,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
if (ret != 0)
return ret;
}
- map__put(ilist_ms.map);
- maps__put(ilist_ms.maps);
+ map_symbol__exit(&ilist_ms);
return ret;
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d034ecaf89c1..1279acda6a8a 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -35,7 +35,6 @@ struct threads {
struct rb_root_cached entries;
struct rw_semaphore lock;
unsigned int nr;
- struct list_head dead;
struct thread *last_match;
};
diff --git a/tools/perf/util/map_symbol.c b/tools/perf/util/map_symbol.c
new file mode 100644
index 000000000000..bef5079f2403
--- /dev/null
+++ b/tools/perf/util/map_symbol.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "map_symbol.h"
+#include "maps.h"
+#include "map.h"
+
+void map_symbol__exit(struct map_symbol *ms)
+{
+ maps__zput(ms->maps);
+ map__zput(ms->map);
+}
+
+void addr_map_symbol__exit(struct addr_map_symbol *ams)
+{
+ map_symbol__exit(&ams->ms);
+}
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index e08817b0c30f..72d5ed938ed6 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -22,4 +22,8 @@ struct addr_map_symbol {
u64 phys_addr;
u64 data_page_size;
};
+
+void map_symbol__exit(struct map_symbol *ms);
+void addr_map_symbol__exit(struct addr_map_symbol *ams);
+
#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 39ffe8ceb380..954b235e12e5 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -185,7 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
{
int i = *argv_nr, k = 0;
struct perf_mem_event *e;
- struct perf_pmu *pmu;
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_mem_events__ptr(j);
@@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j, NULL);
} else {
+ struct perf_pmu *pmu = NULL;
+
if (!e->supported) {
perf_mem_events__print_unsupport_hybrid(e, j);
return -1;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 6231044a491e..0484736d9fe4 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->id || strcmp(pmu->id, pm->compat))
+ if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
continue;
return d->fn(pm, table, d->data);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 65608a3cba81..aa2f5c6fc7fc 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -34,8 +34,9 @@
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
-static int get_config_terms(struct list_head *head_config,
- struct list_head *head_terms __maybe_unused);
+static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms);
+static int parse_events_terms__copy(const struct parse_events_terms *src,
+ struct parse_events_terms *dest);
struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
@@ -153,26 +154,27 @@ const char *event_type(int type)
return "unknown";
}
-static char *get_config_str(struct list_head *head_terms, enum parse_events__term_type type_term)
+static char *get_config_str(struct parse_events_terms *head_terms,
+ enum parse_events__term_type type_term)
{
struct parse_events_term *term;
if (!head_terms)
return NULL;
- list_for_each_entry(term, head_terms, list)
+ list_for_each_entry(term, &head_terms->terms, list)
if (term->type_term == type_term)
return term->val.str;
return NULL;
}
-static char *get_config_metric_id(struct list_head *head_terms)
+static char *get_config_metric_id(struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
}
-static char *get_config_name(struct list_head *head_terms)
+static char *get_config_name(struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
@@ -188,11 +190,11 @@ static char *get_config_name(struct list_head *head_terms)
* @config_terms: the list of terms that may contain a raw term.
* @pmu: the PMU to scan for events from.
*/
-static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu)
+static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
{
struct parse_events_term *term;
- list_for_each_entry(term, config_terms, list) {
+ list_for_each_entry(term, &config_terms->terms, list) {
u64 num;
if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
@@ -356,7 +358,7 @@ static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err);
static int config_attr(struct perf_event_attr *attr,
- struct list_head *head,
+ struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term);
@@ -442,7 +444,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct list_head *head_config)
+ struct parse_events_terms *head_config)
{
struct perf_pmu *pmu = NULL;
bool found_supported = false;
@@ -520,7 +522,7 @@ static void tracepoint_error(struct parse_events_error *e, int err,
static int add_tracepoint(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, void *loc_)
+ struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
@@ -545,7 +547,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
char *evt_path;
struct dirent *evt_ent;
@@ -593,7 +595,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
static int add_tracepoint_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
return strpbrk(evt_name, "*?") ?
add_tracepoint_multi_event(list, idx, sys_name, evt_name,
@@ -605,7 +607,7 @@ static int add_tracepoint_event(struct list_head *list, int *idx,
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
struct dirent *events_ent;
DIR *events_dir;
@@ -680,7 +682,7 @@ do { \
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
- struct list_head *head_config __maybe_unused)
+ struct parse_events_terms *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
@@ -1066,21 +1068,20 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
#endif
static int config_attr(struct perf_event_attr *attr,
- struct list_head *head,
+ struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term)
{
struct parse_events_term *term;
- list_for_each_entry(term, head, list)
+ list_for_each_entry(term, &head->terms, list)
if (config_term(attr, term, err))
return -EINVAL;
return 0;
}
-static int get_config_terms(struct list_head *head_config,
- struct list_head *head_terms __maybe_unused)
+static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms)
{
#define ADD_CONFIG_TERM(__type, __weak) \
struct evsel_config_term *__t; \
@@ -1113,7 +1114,7 @@ do { \
struct parse_events_term *term;
- list_for_each_entry(term, head_config, list) {
+ list_for_each_entry(term, &head_config->terms, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
@@ -1194,14 +1195,14 @@ do { \
* Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
* each bit of attr->config that the user has changed.
*/
-static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
+static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
struct list_head *head_terms)
{
struct parse_events_term *term;
u64 bits = 0;
int type;
- list_for_each_entry(term, head_config, list) {
+ list_for_each_entry(term, &head_config->terms, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_USER:
type = perf_pmu__format_type(pmu, term->config);
@@ -1251,7 +1252,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *err,
- struct list_head *head_config, void *loc_)
+ struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
#ifdef HAVE_LIBTRACEEVENT
@@ -1284,7 +1285,7 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, struct list_head *head_config)
+ u64 config, struct parse_events_terms *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
@@ -1320,7 +1321,7 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct list_head *head_config,
+ struct parse_events_terms *head_config,
bool wildcard)
{
struct perf_pmu *pmu = NULL;
@@ -1369,7 +1370,7 @@ static bool config_term_percore(struct list_head *config_terms)
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, const char *name,
- struct list_head *head_config,
+ const struct parse_events_terms *const_parsed_terms,
bool auto_merge_stats, void *loc_)
{
struct perf_event_attr attr;
@@ -1379,6 +1380,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
struct parse_events_error *err = parse_state->error;
YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
+ struct parse_events_terms parsed_terms;
pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
@@ -1392,32 +1394,37 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
+ parse_events_terms__init(&parsed_terms);
+ if (const_parsed_terms) {
+ int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
+
+ if (ret)
+ return ret;
+ }
+
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- if (pmu->selectable && !head_config) {
+ if (pmu->selectable && list_empty(&parsed_terms.terms)) {
strbuf_addf(&sb, "%s//", name);
} else {
strbuf_addf(&sb, "%s/", name);
- parse_events_term__to_strbuf(head_config, &sb);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
strbuf_addch(&sb, '/');
}
fprintf(stderr, "Attempt to add: %s\n", sb.buf);
strbuf_release(&sb);
}
- if (head_config)
- fix_raw(head_config, pmu);
+ fix_raw(&parsed_terms, pmu);
+
+ memset(&attr, 0, sizeof(attr));
+ if (pmu->perf_event_attr_init_default)
+ pmu->perf_event_attr_init_default(pmu, &attr);
- if (pmu->default_config) {
- memcpy(&attr, pmu->default_config,
- sizeof(struct perf_event_attr));
- } else {
- memset(&attr, 0, sizeof(attr));
- }
attr.type = pmu->type;
- if (!head_config) {
+ if (list_empty(&parsed_terms.terms)) {
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
@@ -1426,14 +1433,16 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return evsel ? 0 : -ENOMEM;
}
- if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info, err))
+ if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, &info, err)) {
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
+ }
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(head_config, &sb);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf);
strbuf_release(&sb);
}
@@ -1442,39 +1451,53 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
*/
- if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
+ if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
+ }
- if (get_config_terms(head_config, &config_terms))
+ if (get_config_terms(&parsed_terms, &config_terms)) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
/*
* When using default config, record which bits of attr->config were
* changed by the user.
*/
- if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
+ if (pmu->perf_event_attr_init_default &&
+ get_config_chgs(pmu, &parsed_terms, &config_terms)) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
- if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
+ if (!parse_state->fake_pmu &&
+ perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) {
free_config_terms(&config_terms);
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
- get_config_name(head_config),
- get_config_metric_id(head_config), pmu,
+ get_config_name(&parsed_terms),
+ get_config_metric_id(&parsed_terms), pmu,
&config_terms, auto_merge_stats, /*cpu_list=*/NULL);
- if (!evsel)
+ if (!evsel) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
if (evsel->name)
evsel->use_config_name = true;
evsel->percore = config_term_percore(&evsel->config_terms);
- if (parse_state->fake_pmu)
+ if (parse_state->fake_pmu) {
+ parse_events_terms__exit(&parsed_terms);
return 0;
+ }
+ parse_events_terms__exit(&parsed_terms);
free((char *)evsel->unit);
evsel->unit = strdup(info.unit);
evsel->scale = info.scale;
@@ -1484,27 +1507,29 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- char *str, struct list_head *head,
+ const char *event_name,
+ const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc_)
{
struct parse_events_term *term;
struct list_head *list = NULL;
- struct list_head *orig_head = NULL;
struct perf_pmu *pmu = NULL;
YYLTYPE *loc = loc_;
int ok = 0;
const char *config;
+ struct parse_events_terms parsed_terms;
*listp = NULL;
- if (!head) {
- head = malloc(sizeof(struct list_head));
- if (!head)
- goto out_err;
+ parse_events_terms__init(&parsed_terms);
+ if (const_parsed_terms) {
+ int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
- INIT_LIST_HEAD(head);
+ if (ret)
+ return ret;
}
- config = strdup(str);
+
+ config = strdup(event_name);
if (!config)
goto out_err;
@@ -1515,7 +1540,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
zfree(&config);
goto out_err;
}
- list_add_tail(&term->list, head);
+ list_add_tail(&term->list, &parsed_terms.terms);
/* Add it for all PMUs that support the alias */
list = malloc(sizeof(struct list_head));
@@ -1530,44 +1555,42 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (parse_events__filter_pmu(parse_state, pmu))
continue;
- if (!perf_pmu__have_event(pmu, str))
+ if (!perf_pmu__have_event(pmu, event_name))
continue;
auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- parse_events_copy_term_list(head, &orig_head);
if (!parse_events_add_pmu(parse_state, list, pmu->name,
- orig_head, auto_merge_stats, loc)) {
+ &parsed_terms, auto_merge_stats, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(orig_head, &sb);
- pr_debug("%s -> %s/%s/\n", str, pmu->name, sb.buf);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
+ pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
strbuf_release(&sb);
ok++;
}
- parse_events_terms__delete(orig_head);
}
if (parse_state->fake_pmu) {
- if (!parse_events_add_pmu(parse_state, list, str, head,
+ if (!parse_events_add_pmu(parse_state, list, event_name, &parsed_terms,
/*auto_merge_stats=*/true, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(head, &sb);
- pr_debug("%s -> %s/%s/\n", str, "fake_pmu", sb.buf);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
+ pr_debug("%s -> %s/%s/\n", event_name, "fake_pmu", sb.buf);
strbuf_release(&sb);
ok++;
}
}
out_err:
+ parse_events_terms__exit(&parsed_terms);
if (ok)
*listp = list;
else
free(list);
- parse_events_terms__delete(head);
return ok ? 0 : -1;
}
@@ -1832,7 +1855,7 @@ static int parse_events__scanner(const char *str,
/*
* parse event config string, return a list of event terms.
*/
-int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
+int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
{
struct parse_events_state parse_state = {
.terms = NULL,
@@ -1841,14 +1864,10 @@ int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
int ret;
ret = parse_events__scanner(str, input, &parse_state);
+ if (!ret)
+ list_splice(&parse_state.terms->terms, &terms->terms);
- if (!ret) {
- list_splice(parse_state.terms, terms);
- zfree(&parse_state.terms);
- return 0;
- }
-
- parse_events_terms__delete(parse_state.terms);
+ zfree(&parse_state.terms);
return ret;
}
@@ -2530,8 +2549,10 @@ int parse_events_term__clone(struct parse_events_term **new,
return new_term(new, &temp, /*str=*/NULL, term->val.num);
str = strdup(term->val.str);
- if (!str)
+ if (!str) {
+ zfree(&temp.config);
return -ENOMEM;
+ }
return new_term(new, &temp, str, /*num=*/0);
}
@@ -2544,58 +2565,56 @@ void parse_events_term__delete(struct parse_events_term *term)
free(term);
}
-int parse_events_copy_term_list(struct list_head *old,
- struct list_head **new)
+static int parse_events_terms__copy(const struct parse_events_terms *src,
+ struct parse_events_terms *dest)
{
- struct parse_events_term *term, *n;
- int ret;
-
- if (!old) {
- *new = NULL;
- return 0;
- }
+ struct parse_events_term *term;
- *new = malloc(sizeof(struct list_head));
- if (!*new)
- return -ENOMEM;
- INIT_LIST_HEAD(*new);
+ list_for_each_entry (term, &src->terms, list) {
+ struct parse_events_term *n;
+ int ret;
- list_for_each_entry (term, old, list) {
ret = parse_events_term__clone(&n, term);
if (ret)
return ret;
- list_add_tail(&n->list, *new);
+
+ list_add_tail(&n->list, &dest->terms);
}
return 0;
}
-void parse_events_terms__purge(struct list_head *terms)
+void parse_events_terms__init(struct parse_events_terms *terms)
+{
+ INIT_LIST_HEAD(&terms->terms);
+}
+
+void parse_events_terms__exit(struct parse_events_terms *terms)
{
struct parse_events_term *term, *h;
- list_for_each_entry_safe(term, h, terms, list) {
+ list_for_each_entry_safe(term, h, &terms->terms, list) {
list_del_init(&term->list);
parse_events_term__delete(term);
}
}
-void parse_events_terms__delete(struct list_head *terms)
+void parse_events_terms__delete(struct parse_events_terms *terms)
{
if (!terms)
return;
- parse_events_terms__purge(terms);
+ parse_events_terms__exit(terms);
free(terms);
}
-int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb)
+int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
{
struct parse_events_term *term;
bool first = true;
- if (!term_list)
+ if (!terms)
return 0;
- list_for_each_entry(term, term_list, list) {
+ list_for_each_entry(term, &terms->terms, list) {
int ret;
if (!first) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 594e5d2dc67f..63c0a36a4bf1 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -44,7 +44,6 @@ static inline int parse_events(struct evlist *evlist, const char *str,
int parse_event(struct evlist *evlist, const char *str);
-int parse_events_terms(struct list_head *terms, const char *str, FILE *input);
int parse_filter(const struct option *opt, const char *str, int unset);
int exclude_perf(const struct option *opt, const char *arg, int unset);
@@ -140,6 +139,11 @@ struct parse_events_error {
char *first_help;
};
+/* A wrapper around a list of terms for the sake of better type safety. */
+struct parse_events_terms {
+ struct list_head terms;
+};
+
struct parse_events_state {
/* The list parsed events are placed on. */
struct list_head list;
@@ -148,7 +152,7 @@ struct parse_events_state {
/* Error information. */
struct parse_events_error *error;
/* Holds returned terms for term parsing. */
- struct list_head *terms;
+ struct parse_events_terms *terms;
/* Start token. */
int stoken;
/* Special fake PMU marker for testing. */
@@ -181,35 +185,38 @@ int parse_events_term__term(struct parse_events_term **term,
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
void parse_events_term__delete(struct parse_events_term *term);
-void parse_events_terms__delete(struct list_head *terms);
-void parse_events_terms__purge(struct list_head *terms);
-int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb);
+
+void parse_events_terms__delete(struct parse_events_terms *terms);
+void parse_events_terms__init(struct parse_events_terms *terms);
+void parse_events_terms__exit(struct parse_events_terms *terms);
+int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input);
+int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, const char *name);
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *error,
- struct list_head *head_config, void *loc);
+ struct parse_events_terms *head_config, void *loc);
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct list_head *head_config,
+ struct parse_events_terms *head_config,
bool wildcard);
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
int tool_event);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct list_head *head_config);
+ struct parse_events_terms *head_config);
int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u64 *config);
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
- struct list_head *head_config);
+ struct parse_events_terms *head_config);
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, const char *name,
- struct list_head *head_config,
+ const struct parse_events_terms *const_parsed_terms,
bool auto_merge_stats, void *loc);
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
@@ -217,13 +224,10 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
struct perf_pmu *pmu);
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- char *str,
- struct list_head *head_config,
+ const char *event_name,
+ const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc);
-int parse_events_copy_term_list(struct list_head *old,
- struct list_head **new);
-
void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 4ef4b6f171a0..e86c45675e1d 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -120,7 +120,7 @@ static int term(yyscan_t scanner, enum parse_events__term_type type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
- yylval->num = type;
+ yylval->term_type = type;
return PE_TERM;
}
@@ -156,8 +156,8 @@ event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+
num_dec [0-9]+
-num_hex 0x[a-fA-F0-9]+
-num_raw_hex [a-fA-F0-9]+
+num_hex 0x[a-fA-F0-9]{1,16}
+num_raw_hex [a-fA-F0-9]{1,16}
name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]*
name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 21bfe7e0d944..de098caf0c1c 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -6,14 +6,13 @@
%{
+#ifndef NDEBUG
#define YYDEBUG 1
+#endif
#include <errno.h>
-#include <fnmatch.h>
-#include <stdio.h>
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/zalloc.h>
#include "pmu.h"
#include "pmus.h"
#include "evsel.h"
@@ -70,7 +69,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
%type <num> PE_VALUE_SYM_TOOL
-%type <num> PE_TERM
+%type <term_type> PE_TERM
%type <num> value_sym
%type <str> PE_RAW
%type <str> PE_NAME
@@ -79,7 +78,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
%type <str> PE_DRV_CFG_TERM
-%type <str> name_or_raw name_or_legacy
+%type <str> name_or_raw
%destructor { free ($$); } <str>
%type <term> event_term
%destructor { parse_events_term__delete ($$); } <term>
@@ -104,6 +103,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <list_evsel> groups
%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
+%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
%type <hardware_term> PE_TERM_HW
%destructor { free ($$.str); } <hardware_term>
@@ -111,8 +111,9 @@ static void free_list_evsel(struct list_head* list_evsel)
{
char *str;
u64 num;
+ enum parse_events__term_type term_type;
struct list_head *list_evsel;
- struct list_head *list_terms;
+ struct parse_events_terms *list_terms;
struct parse_events_term *term;
struct tracepoint_name {
char *sys;
@@ -273,23 +274,18 @@ event_pmu:
PE_NAME opt_pmu_config
{
struct parse_events_state *parse_state = _parse_state;
- struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
+ /* List of created evsels. */
+ struct list_head *list = NULL;
char *pattern = NULL;
#define CLEANUP \
do { \
parse_events_terms__delete($2); \
- parse_events_terms__delete(orig_terms); \
free(list); \
free($1); \
free(pattern); \
} while(0)
- if (parse_events_copy_term_list($2, &orig_terms)) {
- CLEANUP;
- YYNOMEM;
- }
-
list = alloc_list();
if (!list) {
CLEANUP;
@@ -319,16 +315,11 @@ PE_NAME opt_pmu_config
!perf_pmu__match(pattern, pmu->alias_name, $1)) {
bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- if (parse_events_copy_term_list(orig_terms, &terms)) {
- CLEANUP;
- YYNOMEM;
- }
- if (!parse_events_add_pmu(parse_state, list, pmu->name, terms,
+ if (!parse_events_add_pmu(parse_state, list, pmu->name, $2,
auto_merge_stats, &@1)) {
ok++;
parse_state->wild_card_pmus = true;
}
- parse_events_terms__delete(terms);
}
}
@@ -336,7 +327,6 @@ PE_NAME opt_pmu_config
/* Failure to add, assume $1 is an event name. */
zfree(&list);
ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list, &@1);
- $2 = NULL;
}
if (!ok) {
struct parse_events_error *error = parse_state->error;
@@ -654,33 +644,31 @@ start_terms: event_config
event_config:
event_config ',' event_term
{
- struct list_head *head = $1;
+ struct parse_events_terms *head = $1;
struct parse_events_term *term = $3;
if (!head) {
parse_events_term__delete(term);
YYABORT;
}
- list_add_tail(&term->list, head);
+ list_add_tail(&term->list, &head->terms);
$$ = $1;
}
|
event_term
{
- struct list_head *head = malloc(sizeof(*head));
+ struct parse_events_terms *head = malloc(sizeof(*head));
struct parse_events_term *term = $1;
if (!head)
YYNOMEM;
- INIT_LIST_HEAD(head);
- list_add_tail(&term->list, head);
+ parse_events_terms__init(head);
+ list_add_tail(&term->list, &head->terms);
$$ = head;
}
name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
-name_or_legacy: PE_NAME | PE_LEGACY_CACHE
-
event_term:
PE_RAW
{
@@ -695,7 +683,7 @@ PE_RAW
$$ = term;
}
|
-name_or_raw '=' name_or_legacy
+name_or_raw '=' name_or_raw
{
struct parse_events_term *term;
int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
@@ -775,11 +763,10 @@ PE_TERM_HW
$$ = term;
}
|
-PE_TERM '=' name_or_legacy
+PE_TERM '=' name_or_raw
{
struct parse_events_term *term;
- int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3, &@1, &@3);
+ int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3, &@1, &@3);
if (err) {
free($3);
@@ -791,8 +778,7 @@ PE_TERM '=' name_or_legacy
PE_TERM '=' PE_TERM_HW
{
struct parse_events_term *term;
- int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3.str, &@1, &@3);
+ int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3.str, &@1, &@3);
if (err) {
free($3.str);
@@ -804,10 +790,7 @@ PE_TERM '=' PE_TERM_HW
PE_TERM '=' PE_TERM
{
struct parse_events_term *term;
- int err = parse_events_term__term(&term,
- (enum parse_events__term_type)$1,
- (enum parse_events__term_type)$3,
- &@1, &@3);
+ int err = parse_events_term__term(&term, $1, $3, &@1, &@3);
if (err)
PE_ABORT(err);
@@ -818,8 +801,9 @@ PE_TERM '=' PE_TERM
PE_TERM '=' PE_VALUE
{
struct parse_events_term *term;
- int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3, /*novalue=*/false, &@1, &@3);
+ int err = parse_events_term__num(&term, $1,
+ /*config=*/NULL, $3, /*novalue=*/false,
+ &@1, &@3);
if (err)
PE_ABORT(err);
@@ -830,9 +814,9 @@ PE_TERM '=' PE_VALUE
PE_TERM
{
struct parse_events_term *term;
- int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, /*num=*/1, /*novalue=*/true,
- &@1, /*loc_val=*/NULL);
+ int err = parse_events_term__num(&term, $1,
+ /*config=*/NULL, /*num=*/1, /*novalue=*/true,
+ &@1, /*loc_val=*/NULL);
if (err)
PE_ABORT(err);
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index 862e4a689868..5ccfe4b64cdf 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -145,7 +145,20 @@ static bool is_libpfm_event_supported(const char *name, struct perf_cpu_map *cpu
evsel->is_libpfm_event = true;
- if (evsel__open(evsel, cpus, threads) < 0)
+ ret = evsel__open(evsel, cpus, threads);
+ if (ret == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, cpus, threads);
+
+ }
+ if (ret < 0)
result = false;
evsel__close(evsel);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d515ba8a0e16..d3c9aa4326be 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -28,6 +28,7 @@
#include "strbuf.h"
#include "fncache.h"
#include "util/evsel_config.h"
+#include <regex.h>
struct perf_pmu perf_pmu__fake = {
.name = "fake",
@@ -52,7 +53,7 @@ struct perf_pmu_alias {
*/
char *topic;
/** @terms: Owned list of the original parsed parameters. */
- struct list_head terms;
+ struct parse_events_terms terms;
/** @list: List element of struct perf_pmu aliases. */
struct list_head list;
/**
@@ -155,7 +156,7 @@ static void __perf_pmu_format__load(struct perf_pmu_format *format, FILE *file)
format->loaded = true;
}
-static void perf_pmu_format__load(struct perf_pmu *pmu, struct perf_pmu_format *format)
+static void perf_pmu_format__load(const struct perf_pmu *pmu, struct perf_pmu_format *format)
{
char path[PATH_MAX];
FILE *file = NULL;
@@ -404,7 +405,7 @@ static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->long_desc);
zfree(&newalias->topic);
zfree(&newalias->pmu_name);
- parse_events_terms__purge(&newalias->terms);
+ parse_events_terms__exit(&newalias->terms);
free(newalias);
}
@@ -484,7 +485,7 @@ static int update_alias(const struct pmu_event *pe,
assign_str(pe->name, "topic", &data->alias->topic, pe->topic);
data->alias->per_pkg = pe->perpkg;
if (pe->event) {
- parse_events_terms__purge(&data->alias->terms);
+ parse_events_terms__exit(&data->alias->terms);
ret = parse_events_terms(&data->alias->terms, pe->event, /*input=*/NULL);
}
if (!ret && pe->unit) {
@@ -524,7 +525,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
if (!alias)
return -ENOMEM;
- INIT_LIST_HEAD(&alias->terms);
+ parse_events_terms__init(&alias->terms);
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = perpkg;
@@ -574,7 +575,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
return 0;
}
-static inline bool pmu_alias_info_file(char *name)
+static inline bool pmu_alias_info_file(const char *name)
{
size_t len;
@@ -656,17 +657,17 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
return 0;
}
-static int pmu_alias_terms(struct perf_pmu_alias *alias,
- struct list_head *terms)
+static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms)
{
struct parse_events_term *term, *cloned;
- LIST_HEAD(list);
- int ret;
+ struct parse_events_terms clone_terms;
+
+ parse_events_terms__init(&clone_terms);
+ list_for_each_entry(term, &alias->terms.terms, list) {
+ int ret = parse_events_term__clone(&cloned, term);
- list_for_each_entry(term, &alias->terms, list) {
- ret = parse_events_term__clone(&cloned, term);
if (ret) {
- parse_events_terms__purge(&list);
+ parse_events_terms__exit(&clone_terms);
return ret;
}
/*
@@ -674,9 +675,10 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
* which we don't want for implicit terms in aliases.
*/
cloned->weak = true;
- list_add_tail(&cloned->list, &list);
+ list_add_tail(&cloned->list, &clone_terms.terms);
}
- list_splice(&list, terms);
+ list_splice_init(&clone_terms.terms, terms);
+ parse_events_terms__exit(&clone_terms);
return 0;
}
@@ -775,11 +777,6 @@ char *perf_pmu__getcpuid(struct perf_pmu *pmu)
return cpuid;
}
-__weak const struct pmu_events_table *pmu_events_table__find(void)
-{
- return perf_pmu__find_events_table(NULL);
-}
-
__weak const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
return perf_pmu__find_metrics_table(NULL);
@@ -874,6 +871,28 @@ out:
return res;
}
+bool pmu_uncore_identifier_match(const char *compat, const char *id)
+{
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
+ if (regcomp(&re, compat, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", compat);
+ return false;
+ }
+
+ match = !regexec(&re, id, 1, pmatch, 0);
+ if (match) {
+ /* Ensure a full match. */
+ match = pmatch[0].rm_so == 0 && (size_t)pmatch[0].rm_eo == strlen(id);
+ }
+ regfree(&re);
+
+ return match;
+}
+
static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
@@ -914,8 +933,8 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
if (!pe->compat || !pe->pmu)
return 0;
- if (!strcmp(pmu->id, pe->compat) &&
- pmu_uncore_alias_match(pe->pmu, pmu->name)) {
+ if (pmu_uncore_alias_match(pe->pmu, pmu->name) &&
+ pmu_uncore_identifier_match(pe->compat, pmu->id)) {
perf_pmu__new_alias(pmu,
pe->name,
pe->desc,
@@ -935,22 +954,27 @@ void pmu_add_sys_aliases(struct perf_pmu *pmu)
pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, pmu);
}
-struct perf_event_attr * __weak
-perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+static char *pmu_find_alias_name(struct perf_pmu *pmu, int dirfd)
{
- return NULL;
-}
+ FILE *file = perf_pmu__open_file_at(pmu, dirfd, "alias");
+ char *line = NULL;
+ size_t line_len = 0;
+ ssize_t ret;
-const char * __weak
-pmu_find_real_name(const char *name)
-{
- return name;
-}
+ if (!file)
+ return NULL;
-const char * __weak
-pmu_find_alias_name(const char *name __maybe_unused)
-{
- return NULL;
+ ret = getline(&line, &line_len, file);
+ if (ret < 0) {
+ fclose(file);
+ return NULL;
+ }
+ /* Remove trailing newline. */
+ if (ret > 0 && line[ret - 1] == '\n')
+ line[--ret] = '\0';
+
+ fclose(file);
+ return line;
}
static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
@@ -961,12 +985,15 @@ static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
return max_precise;
}
-struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name)
+void __weak
+perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
+{
+}
+
+struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name)
{
struct perf_pmu *pmu;
__u32 type;
- const char *name = pmu_find_real_name(lookup_name);
- const char *alias_name;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
@@ -999,23 +1026,17 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
pmu->is_core = is_pmu_core(name);
pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
- alias_name = pmu_find_alias_name(name);
- if (alias_name) {
- pmu->alias_name = strdup(alias_name);
- if (!pmu->alias_name)
- goto err;
- }
-
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(dirfd, name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(dirfd, pmu);
+ pmu->alias_name = pmu_find_alias_name(pmu, dirfd);
pmu->events_table = perf_pmu__find_events_table(pmu);
pmu_add_sys_aliases(pmu);
list_add_tail(&pmu->list, pmus);
- pmu->default_config = perf_pmu__get_default_config(pmu);
+ perf_pmu__arch_init(pmu);
return pmu;
err:
@@ -1110,7 +1131,7 @@ void evsel__set_config_if_unset(struct perf_pmu *pmu, struct evsel *evsel,
}
static struct perf_pmu_format *
-pmu_find_format(struct list_head *formats, const char *name)
+pmu_find_format(const struct list_head *formats, const char *name)
{
struct perf_pmu_format *format;
@@ -1188,12 +1209,12 @@ static __u64 pmu_format_max_value(const unsigned long *format)
* in a config string) later on in the term list.
*/
static int pmu_resolve_param_term(struct parse_events_term *term,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
__u64 *value)
{
struct parse_events_term *t;
- list_for_each_entry(t, head_terms, list) {
+ list_for_each_entry(t, &head_terms->terms, list) {
if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM &&
t->config && !strcmp(t->config, term->config)) {
t->used = true;
@@ -1208,7 +1229,7 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
return -1;
}
-static char *pmu_formats_string(struct list_head *formats)
+static char *pmu_formats_string(const struct list_head *formats)
{
struct perf_pmu_format *format;
char *str = NULL;
@@ -1234,10 +1255,10 @@ error:
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
-static int pmu_config_term(struct perf_pmu *pmu,
+static int pmu_config_term(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct parse_events_term *term,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
bool zero, struct parse_events_error *err)
{
struct perf_pmu_format *format;
@@ -1357,15 +1378,15 @@ static int pmu_config_term(struct perf_pmu *pmu,
return 0;
}
-int perf_pmu__config_terms(struct perf_pmu *pmu,
+int perf_pmu__config_terms(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *terms,
bool zero, struct parse_events_error *err)
{
struct parse_events_term *term;
- list_for_each_entry(term, head_terms, list) {
- if (pmu_config_term(pmu, attr, term, head_terms, zero, err))
+ list_for_each_entry(term, &terms->terms, list) {
+ if (pmu_config_term(pmu, attr, term, terms, zero, err))
return -EINVAL;
}
@@ -1378,10 +1399,10 @@ int perf_pmu__config_terms(struct perf_pmu *pmu,
* 2) pmu format definitions - specified by pmu parameter
*/
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
struct parse_events_error *err)
{
- bool zero = !!pmu->default_config;
+ bool zero = !!pmu->perf_event_attr_init_default;
return perf_pmu__config_terms(pmu, attr, head_terms, zero, err);
}
@@ -1472,7 +1493,7 @@ static int check_info_data(struct perf_pmu *pmu,
* Find alias in the terms list and replace it with the terms
* defined for the alias
*/
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
struct perf_pmu_info *info, struct parse_events_error *err)
{
struct parse_events_term *term, *h;
@@ -1489,7 +1510,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
info->scale = 0.0;
info->snapshot = false;
- list_for_each_entry_safe(term, h, head_terms, list) {
+ list_for_each_entry_safe(term, h, &head_terms->terms, list) {
alias = pmu_find_alias(pmu, term);
if (!alias)
continue;
@@ -1634,7 +1655,7 @@ static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
: (int)strlen(pmu->name);
int used = snprintf(buf, len, "%.*s/%s", pmu_name_len, pmu->name, alias->name);
- list_for_each_entry(term, &alias->terms, list) {
+ list_for_each_entry(term, &alias->terms.terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
used += snprintf(buf + used, sub_non_neg(len, used),
",%s=%s", term->config,
@@ -1693,7 +1714,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
info.desc = event->desc;
info.long_desc = event->long_desc;
info.encoding_desc = buf + buf_used;
- parse_events_term__to_strbuf(&event->terms, &sb);
+ parse_events_terms__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
"%s/%s/", info.pmu_name, sb.buf) + 1;
info.topic = event->topic;
@@ -1749,7 +1770,7 @@ bool perf_pmu__is_software(const struct perf_pmu *pmu)
return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
}
-FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
+FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
@@ -1760,7 +1781,7 @@ FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
return fopen(path, "r");
}
-FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name)
+FILE *perf_pmu__open_file_at(const struct perf_pmu *pmu, int dirfd, const char *name)
{
int fd;
@@ -1771,7 +1792,7 @@ FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name)
return fdopen(fd, "r");
}
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt,
...)
{
va_list args;
@@ -1788,7 +1809,7 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
return ret;
}
-int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
+int perf_pmu__scan_file_at(const struct perf_pmu *pmu, int dirfd, const char *name,
const char *fmt, ...)
{
va_list args;
@@ -1805,7 +1826,7 @@ int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
return ret;
}
-bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name)
+bool perf_pmu__file_exists(const struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
@@ -2043,26 +2064,8 @@ void perf_pmu__delete(struct perf_pmu *pmu)
perf_cpu_map__put(pmu->cpus);
- zfree(&pmu->default_config);
zfree(&pmu->name);
zfree(&pmu->alias_name);
zfree(&pmu->id);
free(pmu);
}
-
-struct perf_pmu *pmu__find_core_pmu(void)
-{
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu))) {
- /*
- * The cpumap should cover all CPUs. Otherwise, some CPUs may
- * not support some events or have different event IDs.
- */
- if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
- return NULL;
-
- return pmu;
- }
- return NULL;
-}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 6a4e170c61d6..d2895d415f08 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,10 +92,11 @@ struct perf_pmu {
*/
int max_precise;
/**
- * @default_config: Optional default perf_event_attr determined in
- * architecture specific code.
+ * @perf_event_attr_init_default: Optional function to default
+ * initialize PMU specific parts of the perf_event_attr.
*/
- struct perf_event_attr *default_config;
+ void (*perf_event_attr_init_default)(const struct perf_pmu *pmu,
+ struct perf_event_attr *attr);
/**
* @cpus: Empty or the contents of either of:
* <sysfs>/bus/event_source/devices/<name>/cpumask.
@@ -191,15 +192,15 @@ typedef int (*pmu_event_callback)(void *state, struct pmu_event_info *info);
void pmu_add_sys_aliases(struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
struct parse_events_error *error);
-int perf_pmu__config_terms(struct perf_pmu *pmu,
+int perf_pmu__config_terms(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *terms,
bool zero, struct parse_events_error *error);
__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name);
int perf_pmu__format_type(struct perf_pmu *pmu, const char *name);
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
struct perf_pmu_info *info, struct parse_events_error *err);
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
@@ -222,24 +223,25 @@ bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name);
*/
bool perf_pmu__is_software(const struct perf_pmu *pmu);
-FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name);
-FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name);
+FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name);
+FILE *perf_pmu__open_file_at(const struct perf_pmu *pmu, int dirfd, const char *name);
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
-int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
+ __scanf(3, 4);
+int perf_pmu__scan_file_at(const struct perf_pmu *pmu, int dirfd, const char *name,
const char *fmt, ...) __scanf(4, 5);
-bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name);
+bool perf_pmu__file_exists(const struct perf_pmu *pmu, const char *name);
int perf_pmu__test(void);
-struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
+void perf_pmu__arch_init(struct perf_pmu *pmu);
void pmu_add_cpu_aliases_table(struct perf_pmu *pmu,
const struct pmu_events_table *table);
char *perf_pmu__getcpuid(struct perf_pmu *pmu);
-const struct pmu_events_table *pmu_events_table__find(void);
const struct pmu_metrics_table *pmu_metrics_table__find(void);
+bool pmu_uncore_identifier_match(const char *compat, const char *id);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
@@ -252,8 +254,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
int perf_pmu__match(const char *pattern, const char *name, const char *tok);
-const char *pmu_find_real_name(const char *name);
-const char *pmu_find_alias_name(const char *name);
double perf_pmu__cpu_slots_per_cycle(void);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
@@ -264,6 +264,6 @@ int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename,
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
-struct perf_pmu *pmu__find_core_pmu(void);
+struct perf_pmu *perf_pmus__find_core_pmu(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index 600c8c158c8e..198907a8a48a 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -5,6 +5,10 @@
%{
+#ifndef NDEBUG
+#define YYDEBUG 1
+#endif
+
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/bitmap.h>
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 6631367c756f..ce4931461741 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -10,6 +10,7 @@
#include <pthread.h>
#include <string.h>
#include <unistd.h>
+#include "cpumap.h"
#include "debug.h"
#include "evsel.h"
#include "pmus.h"
@@ -36,6 +37,8 @@ static LIST_HEAD(other_pmus);
static bool read_sysfs_core_pmus;
static bool read_sysfs_all_pmus;
+static void pmu_read_sysfs(bool core_only);
+
int pmu_name_len_no_suffix(const char *str, unsigned long *num)
{
int orig_len, len;
@@ -123,6 +126,14 @@ struct perf_pmu *perf_pmus__find(const char *name)
pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
close(dirfd);
+ if (!pmu) {
+ /*
+ * Looking up an inidividual PMU failed. This may mean name is
+ * an alias, so read the PMUs from sysfs and try to find again.
+ */
+ pmu_read_sysfs(core_pmu);
+ pmu = pmu_find(name);
+ }
return pmu;
}
@@ -268,7 +279,7 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
{
if (!pmu) {
pmu_read_sysfs(/*core_only=*/true);
- pmu = list_prepare_entry(pmu, &core_pmus, list);
+ return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
}
list_for_each_entry_continue(pmu, &core_pmus, list)
return pmu;
@@ -592,3 +603,8 @@ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
}
return pmu;
}
+
+struct perf_pmu *perf_pmus__find_core_pmu(void)
+{
+ return perf_pmus__scan_core(NULL);
+}
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index a7566edc86a3..b0fc48be623f 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -395,6 +395,8 @@ void print_symbol_events(const struct print_callbacks *print_cb, void *print_sta
*/
void print_events(const struct print_callbacks *print_cb, void *print_state)
{
+ char *tmp;
+
print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
@@ -418,17 +420,21 @@ void print_events(const struct print_callbacks *print_cb, void *print_state)
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
- print_cb->print_event(print_state,
- /*topic=*/NULL,
- /*pmu_name=*/NULL,
- "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
- /*event_alias=*/NULL,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- event_type_descriptors[PERF_TYPE_RAW],
- "(see 'man perf-list' on how to encode it)",
- /*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
+ if (asprintf(&tmp, "%s/t1=v1[,t2=v2,t3 ...]/modifier",
+ perf_pmus__scan_core(/*pmu=*/NULL)->name) > 0) {
+ print_cb->print_event(print_state,
+ /*topic=*/NULL,
+ /*pmu_name=*/NULL,
+ tmp,
+ /*event_alias=*/NULL,
+ /*scale_unit=*/NULL,
+ /*deprecated=*/false,
+ event_type_descriptors[PERF_TYPE_RAW],
+ "(see 'man perf-list' on how to encode it)",
+ /*long_desc=*/NULL,
+ /*encoding_desc=*/NULL);
+ free(tmp);
+ }
print_cb->print_event(print_state,
/*topic=*/NULL,
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 26e1c8d973ea..593b660ec75e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -40,6 +40,7 @@ util/rwsem.c
util/hashmap.c
util/perf_regs.c
util/fncache.c
+util/rlimit.c
util/perf-regs-arch/perf_regs_aarch64.c
util/perf-regs-arch/perf_regs_arm.c
util/perf-regs-arch/perf_regs_csky.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index c29f5f0bb552..8761f51b5c7c 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -98,7 +98,7 @@ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
return NULL;
}
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
{
return EOF;
}
diff --git a/tools/perf/util/rlimit.c b/tools/perf/util/rlimit.c
index 13521d392a22..f857405fe1aa 100644
--- a/tools/perf/util/rlimit.c
+++ b/tools/perf/util/rlimit.c
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
+#include <errno.h>
#include "util/debug.h"
#include "util/rlimit.h"
#include <sys/time.h>
@@ -27,3 +28,30 @@ void rlimit__bump_memlock(void)
}
}
}
+
+bool rlimit__increase_nofile(enum rlimit_action *set_rlimit)
+{
+ int old_errno;
+ struct rlimit l;
+
+ if (*set_rlimit < INCREASED_MAX) {
+ old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (*set_rlimit == NO_CHANGE) {
+ l.rlim_cur = l.rlim_max;
+ } else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ (*set_rlimit) += 1;
+ errno = old_errno;
+ return true;
+ }
+ }
+ errno = old_errno;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/rlimit.h b/tools/perf/util/rlimit.h
index 9f59d8e710a3..19050d7fb9d7 100644
--- a/tools/perf/util/rlimit.h
+++ b/tools/perf/util/rlimit.h
@@ -1,6 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
#ifndef __PERF_RLIMIT_H_
#define __PERF_RLIMIT_H_
-/* SPDX-License-Identifier: LGPL-2.1 */
+
+enum rlimit_action {
+ NO_CHANGE,
+ SET_TO_MAX,
+ INCREASED_MAX
+};
void rlimit__bump_memlock(void);
+
+bool rlimit__increase_nofile(enum rlimit_action *set_rlimit);
+
#endif // __PERF_RLIMIT_H_
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
index f3d29d8ddc99..5109167f27f7 100644
--- a/tools/perf/util/rwsem.c
+++ b/tools/perf/util/rwsem.c
@@ -2,32 +2,66 @@
#include "util.h"
#include "rwsem.h"
+#if RWS_ERRORCHECK
+#include "mutex.h"
+#endif
+
int init_rwsem(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_init(&sem->mtx);
+ return 0;
+#else
return pthread_rwlock_init(&sem->lock, NULL);
+#endif
}
int exit_rwsem(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_destroy(&sem->mtx);
+ return 0;
+#else
return pthread_rwlock_destroy(&sem->lock);
+#endif
}
int down_read(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_lock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock);
+#endif
}
int up_read(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_unlock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+#endif
}
int down_write(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_lock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock);
+#endif
}
int up_write(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_unlock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+#endif
}
diff --git a/tools/perf/util/rwsem.h b/tools/perf/util/rwsem.h
index 94565ad4d494..ef5cbc31d967 100644
--- a/tools/perf/util/rwsem.h
+++ b/tools/perf/util/rwsem.h
@@ -2,9 +2,20 @@
#define _PERF_RWSEM_H
#include <pthread.h>
+#include "mutex.h"
+
+/*
+ * Mutexes have additional error checking. Enable to use a mutex rather than a
+ * rwlock for debugging.
+ */
+#define RWS_ERRORCHECK 0
struct rw_semaphore {
+#if RWS_ERRORCHECK
+ struct mutex mtx;
+#else
pthread_rwlock_t lock;
+#endif
};
int init_rwsem(struct rw_semaphore *sem);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6aa1c7f2b444..80e4f6132740 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -128,7 +128,7 @@ static int hist_entry__thread_filter(struct hist_entry *he, int type, const void
if (type != HIST_FILTER__THREAD)
return -1;
- return th && RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(th);
+ return th && !RC_CHK_EQUAL(he->thread, th);
}
struct sort_entry sort_thread = {
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index cf05b0b56c57..116a642ad99d 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -301,3 +301,51 @@ unsigned int hex(char c)
return c - 'a' + 10;
return c - 'A' + 10;
}
+
+/*
+ * Replace all occurrences of character 'needle' in string 'haystack' with
+ * string 'replace'
+ *
+ * The new string could be longer so a new string is returned which must be
+ * freed.
+ */
+char *strreplace_chars(char needle, const char *haystack, const char *replace)
+{
+ int replace_len = strlen(replace);
+ char *new_s, *to;
+ const char *loc = strchr(haystack, needle);
+ const char *from = haystack;
+ int num = 0;
+
+ /* Count occurrences */
+ while (loc) {
+ loc = strchr(loc + 1, needle);
+ num++;
+ }
+
+ /* Allocate enough space for replacements and reset first location */
+ new_s = malloc(strlen(haystack) + (num * (replace_len - 1) + 1));
+ if (!new_s)
+ return NULL;
+ loc = strchr(haystack, needle);
+ to = new_s;
+
+ while (loc) {
+ /* Copy original string up to found char and update positions */
+ memcpy(to, from, 1 + loc - from);
+ to += loc - from;
+ from = loc + 1;
+
+ /* Copy replacement string and update positions */
+ memcpy(to, replace, replace_len);
+ to += replace_len;
+
+ /* needle next occurrence or end of string */
+ loc = strchr(from, needle);
+ }
+
+ /* Copy any remaining chars + null */
+ strcpy(to, from);
+
+ return new_s;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index 56c30fef9682..52cb8ba057c7 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,6 @@ char *strpbrk_esc(char *str, const char *stopset);
char *strdup_esc(const char *str);
unsigned int hex(char c);
+char *strreplace_chars(char needle, const char *haystack, const char *replace);
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 0e4dc31c6c9c..1892e9b6aa7f 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -754,6 +754,7 @@ int svg_build_topology_map(struct perf_env *env)
int i, nr_cpus;
struct topology t;
char *sib_core, *sib_thr;
+ int ret = -1;
nr_cpus = min(env->nr_cpus_online, MAX_NR_CPUS);
@@ -799,11 +800,11 @@ int svg_build_topology_map(struct perf_env *env)
scan_core_topology(topology_map, &t, nr_cpus);
- return 0;
+ ret = 0;
exit:
zfree(&t.sib_core);
zfree(&t.sib_thr);
- return -1;
+ return ret;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 95e99c332d7e..9e7eeaf616b8 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1514,8 +1514,10 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
}
if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
- ".text", NULL))
+ ".text", NULL)) {
dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
+ dso->text_end = tshdr.sh_offset + tshdr.sh_size;
+ }
if (runtime_ss->opdsec)
opddata = elf_rawdata(runtime_ss->opdsec, NULL);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 3f36675b7c8f..82cc74b9358e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,11 +202,10 @@ void symbols__fixup_duplicate(struct rb_root_cached *symbols)
curr = rb_entry(nd, struct symbol, rb_node);
again:
nd = rb_next(&curr->rb_node);
- next = rb_entry(nd, struct symbol, rb_node);
-
if (!nd)
break;
+ next = rb_entry(nd, struct symbol, rb_node);
if (curr->start != next->start)
continue;
@@ -719,6 +718,7 @@ static bool symbol__is_idle(const char *name)
"cpu_startup_entry",
"idle_cpu",
"intel_idle",
+ "intel_idle_ibrs",
"default_idle",
"native_safe_halt",
"enter_idle",
@@ -877,7 +877,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
*module++ = '\0';
curr_map_dso = map__dso(curr_map);
if (strcmp(curr_map_dso->short_name, module)) {
- if (RC_CHK_ACCESS(curr_map) != RC_CHK_ACCESS(initial_map) &&
+ if (!RC_CHK_EQUAL(curr_map, initial_map) &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) {
/*
@@ -1469,7 +1469,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
list_del_init(&new_node->node);
- if (RC_CHK_ACCESS(new_map) == RC_CHK_ACCESS(replacement_map)) {
+ if (RC_CHK_EQUAL(new_map, replacement_map)) {
struct map *map_ref;
map__set_start(map, map__start(new_map));
@@ -1733,8 +1733,10 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
/* PE symbols can only have 4 bytes, so use .text high bits */
dso->text_offset = section->vma - (u32)section->vma;
dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
+ dso->text_end = (section->vma - dso->text_offset) + section->size;
} else {
dso->text_offset = section->vma - section->filepos;
+ dso->text_end = section->filepos + section->size;
}
}
@@ -2789,8 +2791,11 @@ struct mem_info *mem_info__get(struct mem_info *mi)
void mem_info__put(struct mem_info *mi)
{
- if (mi && refcount_dec_and_test(&mi->refcnt))
+ if (mi && refcount_dec_and_test(&mi->refcnt)) {
+ addr_map_symbol__exit(&mi->iaddr);
+ addr_map_symbol__exit(&mi->daddr);
free(mi);
+ }
}
struct mem_info *mem_info__new(void)
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 319ccf09a435..c8755679281e 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -313,7 +313,8 @@ static int record_event_files(struct tracepoint_path *tps)
}
err = 0;
out:
- closedir(dir);
+ if (dir)
+ closedir(dir);
put_tracing_file(path);
return err;
diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1
index df3087000efb..145d6f06fa72 100644
--- a/tools/power/cpupower/man/cpupower-powercap-info.1
+++ b/tools/power/cpupower/man/cpupower-powercap-info.1
@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
.SH "DOCUMENTATION"
kernel sources:
-Documentation/power/powercap/powercap.txt
+Documentation/power/powercap/powercap.rst
.SH "SEE ALSO"
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index ff527ac065cf..6fba29f3222d 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -136,7 +136,15 @@ else
NO_SUBDIR = :
endif
-ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
+# If the user is running make -s (silent mode), suppress echoing of commands
+# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
+ifeq ($(filter 3.%,$(MAKE_VERSION)),)
+short-opts := $(firstword -$(MAKEFLAGS))
+else
+short-opts := $(filter-out --%,$(MAKEFLAGS))
+endif
+
+ifneq ($(findstring s,$(short-opts)),)
silent=1
endif
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index fb6ab9cef84f..b88546299902 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 2;
else
cxld->interleave_ways = 1;
- cxld->interleave_granularity = 256;
+ cxld->interleave_granularity = 4096;
cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 464fc39ed277..ee61fa3a2411 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -89,6 +89,12 @@ static struct cxl_cel_entry mock_cel[] = {
.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
EFFECT(CONF_CHANGE_IMMEDIATE)),
},
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
+ EFFECT(SECURITY_CHANGE_IMMEDIATE) |
+ EFFECT(BACKGROUND_OP)),
+ },
};
/* See CXL 2.0 Table 181 Get Health Info Output Payload */
@@ -133,7 +139,6 @@ struct mock_event_log {
};
struct mock_event_store {
- struct cxl_memdev_state *mds;
struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
u32 ev_status;
};
@@ -150,8 +155,10 @@ struct cxl_mockmem_data {
int user_limit;
int master_limit;
struct mock_event_store mes;
+ struct cxl_memdev_state *mds;
u8 event_buf[SZ_4K];
u64 timestamp;
+ unsigned long sanitize_timeout;
};
static struct mock_event_log *event_find_log(struct device *dev, int log_type)
@@ -326,7 +333,7 @@ static void cxl_mock_event_trigger(struct device *dev)
event_reset_log(log);
}
- cxl_mem_get_event_records(mes->mds, mes->ev_status);
+ cxl_mem_get_event_records(mdata->mds, mes->ev_status);
}
struct cxl_event_record_raw maint_needed = {
@@ -567,9 +574,26 @@ static int mock_partition_info(struct cxl_mbox_cmd *cmd)
return 0;
}
+void cxl_mockmem_sanitize_work(struct work_struct *work)
+{
+ struct cxl_memdev_state *mds =
+ container_of(work, typeof(*mds), security.poll_dwork.work);
+
+ mutex_lock(&mds->mbox_mutex);
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
+ mds->security.sanitize_active = false;
+ mutex_unlock(&mds->mbox_mutex);
+
+ dev_dbg(mds->cxlds.dev, "sanitize complete\n");
+}
+
static int mock_sanitize(struct cxl_mockmem_data *mdata,
struct cxl_mbox_cmd *cmd)
{
+ struct cxl_memdev_state *mds = mdata->mds;
+ int rc = 0;
+
if (cmd->size_in != 0)
return -EINVAL;
@@ -585,7 +609,16 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
return -ENXIO;
}
- return 0; /* assume less than 2 secs, no bg */
+ mutex_lock(&mds->mbox_mutex);
+ if (schedule_delayed_work(&mds->security.poll_dwork,
+ msecs_to_jiffies(mdata->sanitize_timeout))) {
+ mds->security.sanitize_active = true;
+ dev_dbg(mds->cxlds.dev, "sanitize issued\n");
+ } else
+ rc = -EBUSY;
+ mutex_unlock(&mds->mbox_mutex);
+
+ return rc;
}
static int mock_secure_erase(struct cxl_mockmem_data *mdata,
@@ -1237,6 +1270,7 @@ static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
}
memcpy(fw + offset, transfer->data, length);
+ usleep_range(1500, 2000);
return 0;
}
@@ -1415,16 +1449,16 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (IS_ERR(mds))
return PTR_ERR(mds);
+ mdata->mds = mds;
mds->mbox_send = cxl_mock_mbox_send;
mds->payload_size = SZ_4K;
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
+ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
cxlds = &mds->cxlds;
cxlds->serial = pdev->id;
- if (is_rcd(pdev)) {
+ if (is_rcd(pdev))
cxlds->rcd = true;
- cxlds->component_reg_phys = CXL_RESOURCE_NONE;
- }
rc = cxl_enumerate_cmds(mds);
if (rc)
@@ -1447,14 +1481,17 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- mdata->mes.mds = mds;
cxl_mock_add_event_logs(&mdata->mes);
- cxlmd = devm_cxl_add_memdev(cxlds);
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- rc = cxl_memdev_setup_fw_upload(mds);
+ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ if (rc)
+ return rc;
+
+ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
if (rc)
return rc;
@@ -1526,10 +1563,38 @@ static ssize_t fw_buf_checksum_show(struct device *dev,
static DEVICE_ATTR_RO(fw_buf_checksum);
+static ssize_t sanitize_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
+}
+
+static ssize_t sanitize_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ mdata->sanitize_timeout = val;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sanitize_timeout);
+
static struct attribute *cxl_mock_mem_attrs[] = {
&dev_attr_security_lock.attr,
&dev_attr_event_trigger.attr,
&dev_attr_fw_buf_checksum.attr,
+ &dev_attr_sanitize_timeout.attr,
NULL
};
ATTRIBUTE_GROUPS(cxl_mock_mem);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index a5e246f7b202..91907b321f91 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -39,9 +39,7 @@ struct bpf_testmod_struct_arg_4 {
int b;
};
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
- "Global functions as their definitions will be in bpf_testmod.ko BTF");
+__bpf_hook_start();
noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
@@ -335,7 +333,7 @@ noinline int bpf_fentry_shadow_test(int a)
}
EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
-__diag_pop();
+__bpf_hook_end();
static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.attr = { .name = "bpf_testmod", .mode = 0666, },
diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
index 1a9eeefda9a8..8bf497a9843e 100644
--- a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
@@ -326,20 +326,14 @@ static int map_create(__u32 type, const char *name, struct bpf_map_create_opts *
static int create_hash(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = BPF_F_NO_PREALLOC,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts);
}
static int create_percpu_hash(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = BPF_F_NO_PREALLOC,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts);
}
@@ -356,21 +350,17 @@ static int create_percpu_hash_prealloc(void)
static int create_lru_hash(__u32 type, __u32 map_flags)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
- .map_flags = map_flags,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = map_flags);
return map_create(type, "lru_hash", &map_opts);
}
static int create_hash_of_maps(void)
{
- struct bpf_map_create_opts map_opts = {
- .sz = sizeof(map_opts),
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
.map_flags = BPF_F_NO_PREALLOC,
.inner_map_fd = create_small_hash(),
- };
+ );
int ret;
ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
index e02feb5fae97..574d9a0cdc8e 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
@@ -4,6 +4,7 @@
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
+#include "iters_css_task.skel.h"
#include "cgroup_iter.skel.h"
#include "cgroup_helpers.h"
@@ -263,6 +264,35 @@ close_cgrp:
close(cgrp_fd);
}
+static void test_walk_self_only_css_task(void)
+{
+ struct iters_css_task *skel;
+ int err;
+
+ skel = iters_css_task__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.cgroup_id_printer, true);
+
+ err = iters_css_task__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = join_cgroup(cg_path[CHILD2]);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]);
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2],
+ BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task");
+ ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+cleanup:
+ iters_css_task__destroy(skel);
+}
+
void test_cgroup_iter(void)
{
struct cgroup_iter *skel = NULL;
@@ -293,6 +323,9 @@ void test_cgroup_iter(void)
test_walk_self_only(skel);
if (test__start_subtest("cgroup_iter__dead_self_only"))
test_walk_dead_self_only(skel);
+ if (test__start_subtest("cgroup_iter__self_only_css_task"))
+ test_walk_self_only_css_task();
+
out:
cgroup_iter__destroy(skel);
cleanup_cgroups();
diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c
index c2425791c923..bf84d4a1d9ae 100644
--- a/tools/testing/selftests/bpf/prog_tests/iters.c
+++ b/tools/testing/selftests/bpf/prog_tests/iters.c
@@ -294,6 +294,7 @@ void test_iters(void)
RUN_TESTS(iters_state_safety);
RUN_TESTS(iters_looping);
RUN_TESTS(iters);
+ RUN_TESTS(iters_css_task);
if (env.has_testmod)
RUN_TESTS(iters_testmod_seq);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
index 214d9f4a94a5..ea933fd151c3 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
@@ -8,7 +8,8 @@
#include <sys/types.h>
#include <test_progs.h>
-#define TDIR "/sys/kernel/debug"
+/* TDIR must be in a location we can create a directory in. */
+#define TDIR "/tmp/test_bpffs_testdir"
static int read_iter(char *file)
{
@@ -43,8 +44,11 @@ static int fn(void)
if (!ASSERT_OK(err, "mount /"))
goto out;
- err = umount(TDIR);
- if (!ASSERT_OK(err, "umount " TDIR))
+ err = mkdir(TDIR, 0777);
+ /* If the directory already exists we can carry on. It may be left over
+ * from a previous run.
+ */
+ if ((err && errno != EEXIST) && !ASSERT_OK(err, "mkdir " TDIR))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
@@ -138,6 +142,7 @@ out:
rmdir(TDIR "/fs1");
rmdir(TDIR "/fs2");
umount(TDIR);
+ rmdir(TDIR);
exit(err);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index e3e68c97b40c..e5c61aa6604a 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -46,6 +46,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
#include "verifier_raw_tp_writable.skel.h"
@@ -153,6 +154,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c
index 5089ce384a1c..9ac758649cb8 100644
--- a/tools/testing/selftests/bpf/progs/iters_css_task.c
+++ b/tools/testing/selftests/bpf/progs/iters_css_task.c
@@ -10,6 +10,7 @@
char _license[] SEC("license") = "GPL";
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
void bpf_cgroup_release(struct cgroup *p) __ksym;
@@ -45,3 +46,57 @@ int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma,
return -EPERM;
}
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("?iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+ if (task->pid == target_pid)
+ css_task_cnt++;
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int BPF_PROG(iter_css_task_for_each_sleep)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c
index c3bf96a67dba..6b1588d70652 100644
--- a/tools/testing/selftests/bpf/progs/iters_task_failure.c
+++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c
@@ -84,8 +84,8 @@ int BPF_PROG(iter_css_lock_and_unlock)
return 0;
}
-SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
-__failure __msg("css_task_iter is only allowed in bpf_lsm and bpf iter-s")
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
int BPF_PROG(iter_css_task_for_each)
{
u64 cg_id = bpf_get_current_cgroup_id();
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
new file mode 100644
index 000000000000..193c0f8272d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 SUSE LLC */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
+__naked int bpf_neg(void)
+{
+ asm volatile (
+ "r2 = 8;"
+ "r2 = -r2;"
+ "if r2 != -8 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_le(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = le16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_be(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = be16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
+ __clang_major__ >= 18
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_bswap(void)
+{
+ asm volatile (
+ "r2 = 0;"
+ "r2 = bswap16 r2;"
+ "if r2 != 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += r2;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#endif /* v4 instruction */
diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
index 3af2501082b2..b616575c3b00 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
@@ -65,3 +65,35 @@
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
+{
+ "BPF_ST_MEM stack imm sign",
+ /* Check if verifier correctly reasons about sign of an
+ * immediate spilled to stack by BPF_ST instruction.
+ *
+ * fp[-8] = -44;
+ * r0 = fp[-8];
+ * if r0 s< 0 goto ret0;
+ * r0 = -1;
+ * exit;
+ * ret0:
+ * r0 = 0;
+ * exit;
+ */
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, -44),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* Use prog type that requires return value in range [0, 1] */
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .result = VERBOSE_ACCEPT,
+ .runs = -1,
+ .errstr = "0: (7a) *(u64 *)(r10 -8) = -44 ; R10=fp0 fp-8_w=-44\
+ 2: (c5) if r0 s< 0x0 goto pc+2\
+ R0_w=-44",
+},
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 17c0f92ff160..c3ba40d0b9de 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -430,7 +430,7 @@ static void print_usage(void)
static void read_args(int argc, char *argv[])
{
- char opt;
+ int opt;
while ((opt = getopt(argc, argv, "mh")) != -1) {
switch (opt) {
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
index 21db6b720754..e21c9c27ece4 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
@@ -40,7 +40,9 @@ riscv*)
esac
: "Test get argument (1)"
-if grep -q eventfs_add_dir available_filter_functions; then
+if grep -q eventfs_create_dir available_filter_functions; then
+ DIR_NAME="eventfs_create_dir"
+elif grep -q eventfs_add_dir available_filter_functions; then
DIR_NAME="eventfs_add_dir"
else
DIR_NAME="tracefs_create_dir"
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
index 4e086f871cee..93217d459556 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -40,7 +40,9 @@ riscv*)
esac
: "Test get argument (1)"
-if grep -q eventfs_add_dir available_filter_functions; then
+if grep -q eventfs_create_dir available_filter_functions; then
+ DIR_NAME="eventfs_create_dir"
+elif grep -q eventfs_add_dir available_filter_functions; then
DIR_NAME="eventfs_add_dir"
else
DIR_NAME="tracefs_create_dir"
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
index 792c3f0a59b4..646f778dfb1e 100644
--- a/tools/testing/selftests/landlock/base_test.c
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -75,7 +75,7 @@ TEST(abi_version)
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
- ASSERT_EQ(3, landlock_create_ruleset(NULL, 0,
+ ASSERT_EQ(4, landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index d7987ae8d7fc..5b79758cae62 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -112,10 +112,13 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
cap_t cap_p;
/* Only these three capabilities are useful for the tests. */
const cap_value_t caps[] = {
+ /* clang-format off */
CAP_DAC_OVERRIDE,
CAP_MKNOD,
CAP_SYS_ADMIN,
CAP_SYS_CHROOT,
+ CAP_NET_BIND_SERVICE,
+ /* clang-format on */
};
cap_p = cap_get_proc();
@@ -256,3 +259,13 @@ static int __maybe_unused send_fd(int usock, int fd_tx)
return -errno;
return 0;
}
+
+static void __maybe_unused
+enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
+{
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0))
+ {
+ TH_LOG("Failed to enforce ruleset: %s", strerror(errno));
+ }
+}
diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
index 3dc9e438eab1..0086efaa7b68 100644
--- a/tools/testing/selftests/landlock/config
+++ b/tools/testing/selftests/landlock/config
@@ -1,5 +1,9 @@
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
+CONFIG_INET=y
+CONFIG_IPV6=y
+CONFIG_NET=y
+CONFIG_NET_NS=y
CONFIG_OVERLAY_FS=y
CONFIG_PROC_FS=y
CONFIG_SECURITY=y
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 251594306d40..18e1f86a6234 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -677,16 +677,6 @@ static int create_ruleset(struct __test_metadata *const _metadata,
return ruleset_fd;
}
-static void enforce_ruleset(struct __test_metadata *const _metadata,
- const int ruleset_fd)
-{
- ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
- ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0))
- {
- TH_LOG("Failed to enforce ruleset: %s", strerror(errno));
- }
-}
-
TEST_F_FORK(layout0, proc_nsfs)
{
const struct rule rules[] = {
@@ -1635,6 +1625,65 @@ TEST_F_FORK(layout1, move_mount)
clear_cap(_metadata, CAP_SYS_ADMIN);
}
+TEST_F_FORK(layout1, topology_changes_with_net_only)
+{
+ const struct landlock_ruleset_attr ruleset_net = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ int ruleset_fd;
+
+ /* Add network restrictions. */
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_net, sizeof(ruleset_net), 0);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Mount, remount, move_mount, umount, and pivot_root checks. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount_opt(&mnt_tmp, dir_s1d2));
+ ASSERT_EQ(0, mount(NULL, dir_s1d2, NULL, MS_PRIVATE | MS_REC, NULL));
+ ASSERT_EQ(0, syscall(__NR_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD,
+ dir_s2d2, 0));
+ ASSERT_EQ(0, umount(dir_s2d2));
+ ASSERT_EQ(0, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3));
+ ASSERT_EQ(0, chdir("/"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+TEST_F_FORK(layout1, topology_changes_with_net_and_fs)
+{
+ const struct landlock_ruleset_attr ruleset_net_fs = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE,
+ };
+ int ruleset_fd;
+
+ /* Add network and filesystem restrictions. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_net_fs,
+ sizeof(ruleset_net_fs), 0);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Mount, remount, move_mount, umount, and pivot_root checks. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(-1, mount_opt(&mnt_tmp, dir_s1d2));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(-1, mount(NULL, dir_s3d2, NULL, MS_PRIVATE | MS_REC, NULL));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(-1, syscall(__NR_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD,
+ dir_s2d2, 0));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(-1, umount(dir_s3d2));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(-1, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3));
+ ASSERT_EQ(EPERM, errno);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
TEST_F_FORK(layout1, release_inodes)
{
const struct rule rules[] = {
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
new file mode 100644
index 000000000000..929e21c4db05
--- /dev/null
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -0,0 +1,1738 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock tests - Network
+ *
+ * Copyright © 2022-2023 Huawei Tech. Co., Ltd.
+ * Copyright © 2023 Microsoft Corporation
+ */
+
+#define _GNU_SOURCE
+#include <arpa/inet.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <linux/in.h>
+#include <sched.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "common.h"
+
+const short sock_port_start = (1 << 10);
+
+static const char loopback_ipv4[] = "127.0.0.1";
+static const char loopback_ipv6[] = "::1";
+
+/* Number pending connections queue to be hold. */
+const short backlog = 10;
+
+enum sandbox_type {
+ NO_SANDBOX,
+ /* This may be used to test rules that allow *and* deny accesses. */
+ TCP_SANDBOX,
+};
+
+struct protocol_variant {
+ int domain;
+ int type;
+};
+
+struct service_fixture {
+ struct protocol_variant protocol;
+ /* port is also stored in ipv4_addr.sin_port or ipv6_addr.sin6_port */
+ unsigned short port;
+ union {
+ struct sockaddr_in ipv4_addr;
+ struct sockaddr_in6 ipv6_addr;
+ struct {
+ struct sockaddr_un unix_addr;
+ socklen_t unix_addr_len;
+ };
+ };
+};
+
+static int set_service(struct service_fixture *const srv,
+ const struct protocol_variant prot,
+ const unsigned short index)
+{
+ memset(srv, 0, sizeof(*srv));
+
+ /*
+ * Copies all protocol properties in case of the variant only contains
+ * a subset of them.
+ */
+ srv->protocol = prot;
+
+ /* Checks for port overflow. */
+ if (index > 2)
+ return 1;
+ srv->port = sock_port_start << (2 * index);
+
+ switch (prot.domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ srv->ipv4_addr.sin_family = prot.domain;
+ srv->ipv4_addr.sin_port = htons(srv->port);
+ srv->ipv4_addr.sin_addr.s_addr = inet_addr(loopback_ipv4);
+ return 0;
+
+ case AF_INET6:
+ srv->ipv6_addr.sin6_family = prot.domain;
+ srv->ipv6_addr.sin6_port = htons(srv->port);
+ inet_pton(AF_INET6, loopback_ipv6, &srv->ipv6_addr.sin6_addr);
+ return 0;
+
+ case AF_UNIX:
+ srv->unix_addr.sun_family = prot.domain;
+ sprintf(srv->unix_addr.sun_path,
+ "_selftests-landlock-net-tid%d-index%d", gettid(),
+ index);
+ srv->unix_addr_len = SUN_LEN(&srv->unix_addr);
+ srv->unix_addr.sun_path[0] = '\0';
+ return 0;
+ }
+ return 1;
+}
+
+static void setup_loopback(struct __test_metadata *const _metadata)
+{
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, unshare(CLONE_NEWNET));
+ ASSERT_EQ(0, system("ip link set dev lo up"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+static bool is_restricted(const struct protocol_variant *const prot,
+ const enum sandbox_type sandbox)
+{
+ switch (prot->domain) {
+ case AF_INET:
+ case AF_INET6:
+ switch (prot->type) {
+ case SOCK_STREAM:
+ return sandbox == TCP_SANDBOX;
+ }
+ break;
+ }
+ return false;
+}
+
+static int socket_variant(const struct service_fixture *const srv)
+{
+ int ret;
+
+ ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
+ 0);
+ if (ret < 0)
+ return -errno;
+ return ret;
+}
+
+#ifndef SIN6_LEN_RFC2133
+#define SIN6_LEN_RFC2133 24
+#endif
+
+static socklen_t get_addrlen(const struct service_fixture *const srv,
+ const bool minimal)
+{
+ switch (srv->protocol.domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ return sizeof(srv->ipv4_addr);
+
+ case AF_INET6:
+ if (minimal)
+ return SIN6_LEN_RFC2133;
+ return sizeof(srv->ipv6_addr);
+
+ case AF_UNIX:
+ if (minimal)
+ return sizeof(srv->unix_addr) -
+ sizeof(srv->unix_addr.sun_path);
+ return srv->unix_addr_len;
+
+ default:
+ return 0;
+ }
+}
+
+static void set_port(struct service_fixture *const srv, uint16_t port)
+{
+ switch (srv->protocol.domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ srv->ipv4_addr.sin_port = htons(port);
+ return;
+
+ case AF_INET6:
+ srv->ipv6_addr.sin6_port = htons(port);
+ return;
+
+ default:
+ return;
+ }
+}
+
+static uint16_t get_binded_port(int socket_fd,
+ const struct protocol_variant *const prot)
+{
+ struct sockaddr_in ipv4_addr;
+ struct sockaddr_in6 ipv6_addr;
+ socklen_t ipv4_addr_len, ipv6_addr_len;
+
+ /* Gets binded port. */
+ switch (prot->domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ ipv4_addr_len = sizeof(ipv4_addr);
+ getsockname(socket_fd, &ipv4_addr, &ipv4_addr_len);
+ return ntohs(ipv4_addr.sin_port);
+
+ case AF_INET6:
+ ipv6_addr_len = sizeof(ipv6_addr);
+ getsockname(socket_fd, &ipv6_addr, &ipv6_addr_len);
+ return ntohs(ipv6_addr.sin6_port);
+
+ default:
+ return 0;
+ }
+}
+
+static int bind_variant_addrlen(const int sock_fd,
+ const struct service_fixture *const srv,
+ const socklen_t addrlen)
+{
+ int ret;
+
+ switch (srv->protocol.domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ ret = bind(sock_fd, &srv->ipv4_addr, addrlen);
+ break;
+
+ case AF_INET6:
+ ret = bind(sock_fd, &srv->ipv6_addr, addrlen);
+ break;
+
+ case AF_UNIX:
+ ret = bind(sock_fd, &srv->unix_addr, addrlen);
+ break;
+
+ default:
+ errno = EAFNOSUPPORT;
+ return -errno;
+ }
+
+ if (ret < 0)
+ return -errno;
+ return ret;
+}
+
+static int bind_variant(const int sock_fd,
+ const struct service_fixture *const srv)
+{
+ return bind_variant_addrlen(sock_fd, srv, get_addrlen(srv, false));
+}
+
+static int connect_variant_addrlen(const int sock_fd,
+ const struct service_fixture *const srv,
+ const socklen_t addrlen)
+{
+ int ret;
+
+ switch (srv->protocol.domain) {
+ case AF_UNSPEC:
+ case AF_INET:
+ ret = connect(sock_fd, &srv->ipv4_addr, addrlen);
+ break;
+
+ case AF_INET6:
+ ret = connect(sock_fd, &srv->ipv6_addr, addrlen);
+ break;
+
+ case AF_UNIX:
+ ret = connect(sock_fd, &srv->unix_addr, addrlen);
+ break;
+
+ default:
+ errno = -EAFNOSUPPORT;
+ return -errno;
+ }
+
+ if (ret < 0)
+ return -errno;
+ return ret;
+}
+
+static int connect_variant(const int sock_fd,
+ const struct service_fixture *const srv)
+{
+ return connect_variant_addrlen(sock_fd, srv, get_addrlen(srv, false));
+}
+
+FIXTURE(protocol)
+{
+ struct service_fixture srv0, srv1, srv2, unspec_any0, unspec_srv0;
+};
+
+FIXTURE_VARIANT(protocol)
+{
+ const enum sandbox_type sandbox;
+ const struct protocol_variant prot;
+};
+
+FIXTURE_SETUP(protocol)
+{
+ const struct protocol_variant prot_unspec = {
+ .domain = AF_UNSPEC,
+ .type = SOCK_STREAM,
+ };
+
+ disable_caps(_metadata);
+
+ ASSERT_EQ(0, set_service(&self->srv0, variant->prot, 0));
+ ASSERT_EQ(0, set_service(&self->srv1, variant->prot, 1));
+ ASSERT_EQ(0, set_service(&self->srv2, variant->prot, 2));
+
+ ASSERT_EQ(0, set_service(&self->unspec_srv0, prot_unspec, 0));
+
+ ASSERT_EQ(0, set_service(&self->unspec_any0, prot_unspec, 0));
+ self->unspec_any0.ipv4_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(protocol)
+{
+}
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_udp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_udp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_stream) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_UNIX,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_UNIX,
+ .type = SOCK_DGRAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_udp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_udp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_UNIX,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_datagram) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_UNIX,
+ .type = SOCK_DGRAM,
+ },
+};
+
+static void test_bind_and_connect(struct __test_metadata *const _metadata,
+ const struct service_fixture *const srv,
+ const bool deny_bind, const bool deny_connect)
+{
+ char buf = '\0';
+ int inval_fd, bind_fd, client_fd, status, ret;
+ pid_t child;
+
+ /* Starts invalid addrlen tests with bind. */
+ inval_fd = socket_variant(srv);
+ ASSERT_LE(0, inval_fd)
+ {
+ TH_LOG("Failed to create socket: %s", strerror(errno));
+ }
+
+ /* Tries to bind with zero as addrlen. */
+ EXPECT_EQ(-EINVAL, bind_variant_addrlen(inval_fd, srv, 0));
+
+ /* Tries to bind with too small addrlen. */
+ EXPECT_EQ(-EINVAL, bind_variant_addrlen(inval_fd, srv,
+ get_addrlen(srv, true) - 1));
+
+ /* Tries to bind with minimal addrlen. */
+ ret = bind_variant_addrlen(inval_fd, srv, get_addrlen(srv, true));
+ if (deny_bind) {
+ EXPECT_EQ(-EACCES, ret);
+ } else {
+ EXPECT_EQ(0, ret)
+ {
+ TH_LOG("Failed to bind to socket: %s", strerror(errno));
+ }
+ }
+ EXPECT_EQ(0, close(inval_fd));
+
+ /* Starts invalid addrlen tests with connect. */
+ inval_fd = socket_variant(srv);
+ ASSERT_LE(0, inval_fd);
+
+ /* Tries to connect with zero as addrlen. */
+ EXPECT_EQ(-EINVAL, connect_variant_addrlen(inval_fd, srv, 0));
+
+ /* Tries to connect with too small addrlen. */
+ EXPECT_EQ(-EINVAL, connect_variant_addrlen(inval_fd, srv,
+ get_addrlen(srv, true) - 1));
+
+ /* Tries to connect with minimal addrlen. */
+ ret = connect_variant_addrlen(inval_fd, srv, get_addrlen(srv, true));
+ if (srv->protocol.domain == AF_UNIX) {
+ EXPECT_EQ(-EINVAL, ret);
+ } else if (deny_connect) {
+ EXPECT_EQ(-EACCES, ret);
+ } else if (srv->protocol.type == SOCK_STREAM) {
+ /* No listening server, whatever the value of deny_bind. */
+ EXPECT_EQ(-ECONNREFUSED, ret);
+ } else {
+ EXPECT_EQ(0, ret)
+ {
+ TH_LOG("Failed to connect to socket: %s",
+ strerror(errno));
+ }
+ }
+ EXPECT_EQ(0, close(inval_fd));
+
+ /* Starts connection tests. */
+ bind_fd = socket_variant(srv);
+ ASSERT_LE(0, bind_fd);
+
+ ret = bind_variant(bind_fd, srv);
+ if (deny_bind) {
+ EXPECT_EQ(-EACCES, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+
+ /* Creates a listening socket. */
+ if (srv->protocol.type == SOCK_STREAM)
+ EXPECT_EQ(0, listen(bind_fd, backlog));
+ }
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int connect_fd, ret;
+
+ /* Closes listening socket for the child. */
+ EXPECT_EQ(0, close(bind_fd));
+
+ /* Starts connection tests. */
+ connect_fd = socket_variant(srv);
+ ASSERT_LE(0, connect_fd);
+ ret = connect_variant(connect_fd, srv);
+ if (deny_connect) {
+ EXPECT_EQ(-EACCES, ret);
+ } else if (deny_bind) {
+ /* No listening server. */
+ EXPECT_EQ(-ECONNREFUSED, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(1, write(connect_fd, ".", 1));
+ }
+
+ EXPECT_EQ(0, close(connect_fd));
+ _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ return;
+ }
+
+ /* Accepts connection from the child. */
+ client_fd = bind_fd;
+ if (!deny_bind && !deny_connect) {
+ if (srv->protocol.type == SOCK_STREAM) {
+ client_fd = accept(bind_fd, NULL, 0);
+ ASSERT_LE(0, client_fd);
+ }
+
+ EXPECT_EQ(1, read(client_fd, &buf, 1));
+ EXPECT_EQ('.', buf);
+ }
+
+ EXPECT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(1, WIFEXITED(status));
+ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+
+ /* Closes connection, if any. */
+ if (client_fd != bind_fd)
+ EXPECT_LE(0, close(client_fd));
+
+ /* Closes listening socket. */
+ EXPECT_EQ(0, close(bind_fd));
+}
+
+TEST_F(protocol, bind)
+{
+ if (variant->sandbox == TCP_SANDBOX) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr tcp_bind_connect_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+ const struct landlock_net_port_attr tcp_connect_p1 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv1.port,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows connect and bind for the first port. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_p0, 0));
+
+ /* Allows connect and denies bind for the second port. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_connect_p1, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ /* Binds a socket to the first port. */
+ test_bind_and_connect(_metadata, &self->srv0, false, false);
+
+ /* Binds a socket to the second port. */
+ test_bind_and_connect(_metadata, &self->srv1,
+ is_restricted(&variant->prot, variant->sandbox),
+ false);
+
+ /* Binds a socket to the third port. */
+ test_bind_and_connect(_metadata, &self->srv2,
+ is_restricted(&variant->prot, variant->sandbox),
+ is_restricted(&variant->prot, variant->sandbox));
+}
+
+TEST_F(protocol, connect)
+{
+ if (variant->sandbox == TCP_SANDBOX) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr tcp_bind_connect_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+ const struct landlock_net_port_attr tcp_bind_p1 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv1.port,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows connect and bind for the first port. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_p0, 0));
+
+ /* Allows bind and denies connect for the second port. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_p1, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ test_bind_and_connect(_metadata, &self->srv0, false, false);
+
+ test_bind_and_connect(_metadata, &self->srv1, false,
+ is_restricted(&variant->prot, variant->sandbox));
+
+ test_bind_and_connect(_metadata, &self->srv2,
+ is_restricted(&variant->prot, variant->sandbox),
+ is_restricted(&variant->prot, variant->sandbox));
+}
+
+TEST_F(protocol, bind_unspec)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP,
+ };
+ const struct landlock_net_port_attr tcp_bind = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv0.port,
+ };
+ int bind_fd, ret;
+
+ if (variant->sandbox == TCP_SANDBOX) {
+ const int ruleset_fd = landlock_create_ruleset(
+ &ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows bind. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+
+ /* Allowed bind on AF_UNSPEC/INADDR_ANY. */
+ ret = bind_variant(bind_fd, &self->unspec_any0);
+ if (variant->prot.domain == AF_INET) {
+ EXPECT_EQ(0, ret)
+ {
+ TH_LOG("Failed to bind to unspec/any socket: %s",
+ strerror(errno));
+ }
+ } else {
+ EXPECT_EQ(-EINVAL, ret);
+ }
+ EXPECT_EQ(0, close(bind_fd));
+
+ if (variant->sandbox == TCP_SANDBOX) {
+ const int ruleset_fd = landlock_create_ruleset(
+ &ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Denies bind. */
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+
+ /* Denied bind on AF_UNSPEC/INADDR_ANY. */
+ ret = bind_variant(bind_fd, &self->unspec_any0);
+ if (variant->prot.domain == AF_INET) {
+ if (is_restricted(&variant->prot, variant->sandbox)) {
+ EXPECT_EQ(-EACCES, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+ } else {
+ EXPECT_EQ(-EINVAL, ret);
+ }
+ EXPECT_EQ(0, close(bind_fd));
+
+ /* Checks bind with AF_UNSPEC and the loopback address. */
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+ ret = bind_variant(bind_fd, &self->unspec_srv0);
+ if (variant->prot.domain == AF_INET) {
+ EXPECT_EQ(-EAFNOSUPPORT, ret);
+ } else {
+ EXPECT_EQ(-EINVAL, ret)
+ {
+ TH_LOG("Wrong bind error: %s", strerror(errno));
+ }
+ }
+ EXPECT_EQ(0, close(bind_fd));
+}
+
+TEST_F(protocol, connect_unspec)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr tcp_connect = {
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+ int bind_fd, client_fd, status;
+ pid_t child;
+
+ /* Specific connection tests. */
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+ EXPECT_EQ(0, bind_variant(bind_fd, &self->srv0));
+ if (self->srv0.protocol.type == SOCK_STREAM)
+ EXPECT_EQ(0, listen(bind_fd, backlog));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int connect_fd, ret;
+
+ /* Closes listening socket for the child. */
+ EXPECT_EQ(0, close(bind_fd));
+
+ connect_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, connect_fd);
+ EXPECT_EQ(0, connect_variant(connect_fd, &self->srv0));
+
+ /* Tries to connect again, or set peer. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ if (self->srv0.protocol.type == SOCK_STREAM) {
+ EXPECT_EQ(-EISCONN, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+
+ if (variant->sandbox == TCP_SANDBOX) {
+ const int ruleset_fd = landlock_create_ruleset(
+ &ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows connect. */
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd,
+ LANDLOCK_RULE_NET_PORT,
+ &tcp_connect, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ /* Disconnects already connected socket, or set peer. */
+ ret = connect_variant(connect_fd, &self->unspec_any0);
+ if (self->srv0.protocol.domain == AF_UNIX &&
+ self->srv0.protocol.type == SOCK_STREAM) {
+ EXPECT_EQ(-EINVAL, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+
+ /* Tries to reconnect, or set peer. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ if (self->srv0.protocol.domain == AF_UNIX &&
+ self->srv0.protocol.type == SOCK_STREAM) {
+ EXPECT_EQ(-EISCONN, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+
+ if (variant->sandbox == TCP_SANDBOX) {
+ const int ruleset_fd = landlock_create_ruleset(
+ &ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Denies connect. */
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ ret = connect_variant(connect_fd, &self->unspec_any0);
+ if (self->srv0.protocol.domain == AF_UNIX &&
+ self->srv0.protocol.type == SOCK_STREAM) {
+ EXPECT_EQ(-EINVAL, ret);
+ } else {
+ /* Always allowed to disconnect. */
+ EXPECT_EQ(0, ret);
+ }
+
+ EXPECT_EQ(0, close(connect_fd));
+ _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ return;
+ }
+
+ client_fd = bind_fd;
+ if (self->srv0.protocol.type == SOCK_STREAM) {
+ client_fd = accept(bind_fd, NULL, 0);
+ ASSERT_LE(0, client_fd);
+ }
+
+ EXPECT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(1, WIFEXITED(status));
+ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+
+ /* Closes connection, if any. */
+ if (client_fd != bind_fd)
+ EXPECT_LE(0, close(client_fd));
+
+ /* Closes listening socket. */
+ EXPECT_EQ(0, close(bind_fd));
+}
+
+FIXTURE(ipv4)
+{
+ struct service_fixture srv0, srv1;
+};
+
+FIXTURE_VARIANT(ipv4)
+{
+ const enum sandbox_type sandbox;
+ const int type;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(ipv4, no_sandbox_with_tcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .type = SOCK_STREAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(ipv4, tcp_sandbox_with_tcp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .type = SOCK_STREAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(ipv4, no_sandbox_with_udp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .type = SOCK_DGRAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(ipv4, tcp_sandbox_with_udp) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .type = SOCK_DGRAM,
+};
+
+FIXTURE_SETUP(ipv4)
+{
+ const struct protocol_variant prot = {
+ .domain = AF_INET,
+ .type = variant->type,
+ };
+
+ disable_caps(_metadata);
+
+ set_service(&self->srv0, prot, 0);
+ set_service(&self->srv1, prot, 1);
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(ipv4)
+{
+}
+
+TEST_F(ipv4, from_unix_to_inet)
+{
+ int unix_stream_fd, unix_dgram_fd;
+
+ if (variant->sandbox == TCP_SANDBOX) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr tcp_bind_connect_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+ int ruleset_fd;
+
+ /* Denies connect and bind to check errno value. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows connect and bind for srv0. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_p0, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ unix_stream_fd = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+ ASSERT_LE(0, unix_stream_fd);
+
+ unix_dgram_fd = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
+ ASSERT_LE(0, unix_dgram_fd);
+
+ /* Checks unix stream bind and connect for srv0. */
+ EXPECT_EQ(-EINVAL, bind_variant(unix_stream_fd, &self->srv0));
+ EXPECT_EQ(-EINVAL, connect_variant(unix_stream_fd, &self->srv0));
+
+ /* Checks unix stream bind and connect for srv1. */
+ EXPECT_EQ(-EINVAL, bind_variant(unix_stream_fd, &self->srv1))
+ {
+ TH_LOG("Wrong bind error: %s", strerror(errno));
+ }
+ EXPECT_EQ(-EINVAL, connect_variant(unix_stream_fd, &self->srv1));
+
+ /* Checks unix datagram bind and connect for srv0. */
+ EXPECT_EQ(-EINVAL, bind_variant(unix_dgram_fd, &self->srv0));
+ EXPECT_EQ(-EINVAL, connect_variant(unix_dgram_fd, &self->srv0));
+
+ /* Checks unix datagram bind and connect for srv1. */
+ EXPECT_EQ(-EINVAL, bind_variant(unix_dgram_fd, &self->srv1));
+ EXPECT_EQ(-EINVAL, connect_variant(unix_dgram_fd, &self->srv1));
+}
+
+FIXTURE(tcp_layers)
+{
+ struct service_fixture srv0, srv1;
+};
+
+FIXTURE_VARIANT(tcp_layers)
+{
+ const size_t num_layers;
+ const int domain;
+};
+
+FIXTURE_SETUP(tcp_layers)
+{
+ const struct protocol_variant prot = {
+ .domain = variant->domain,
+ .type = SOCK_STREAM,
+ };
+
+ disable_caps(_metadata);
+
+ ASSERT_EQ(0, set_service(&self->srv0, prot, 0));
+ ASSERT_EQ(0, set_service(&self->srv1, prot, 1));
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(tcp_layers)
+{
+}
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, no_sandbox_with_ipv4) {
+ /* clang-format on */
+ .domain = AF_INET,
+ .num_layers = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, one_sandbox_with_ipv4) {
+ /* clang-format on */
+ .domain = AF_INET,
+ .num_layers = 1,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, two_sandboxes_with_ipv4) {
+ /* clang-format on */
+ .domain = AF_INET,
+ .num_layers = 2,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, three_sandboxes_with_ipv4) {
+ /* clang-format on */
+ .domain = AF_INET,
+ .num_layers = 3,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, no_sandbox_with_ipv6) {
+ /* clang-format on */
+ .domain = AF_INET6,
+ .num_layers = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, one_sandbox_with_ipv6) {
+ /* clang-format on */
+ .domain = AF_INET6,
+ .num_layers = 1,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, two_sandboxes_with_ipv6) {
+ /* clang-format on */
+ .domain = AF_INET6,
+ .num_layers = 2,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(tcp_layers, three_sandboxes_with_ipv6) {
+ /* clang-format on */
+ .domain = AF_INET6,
+ .num_layers = 3,
+};
+
+TEST_F(tcp_layers, ruleset_overlap)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr tcp_bind = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv0.port,
+ };
+ const struct landlock_net_port_attr tcp_bind_connect = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+
+ if (variant->num_layers >= 1) {
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Allows bind. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind, 0));
+ /* Also allows bind, but allows connect too. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ if (variant->num_layers >= 2) {
+ int ruleset_fd;
+
+ /* Creates another ruleset layer. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Only allows bind. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ if (variant->num_layers >= 3) {
+ int ruleset_fd;
+
+ /* Creates another ruleset layer. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Try to allow bind and connect. */
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ /*
+ * Forbids to connect to the socket because only one ruleset layer
+ * allows connect.
+ */
+ test_bind_and_connect(_metadata, &self->srv0, false,
+ variant->num_layers >= 2);
+}
+
+TEST_F(tcp_layers, ruleset_expand)
+{
+ if (variant->num_layers >= 1) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP,
+ };
+ /* Allows bind for srv0. */
+ const struct landlock_net_port_attr bind_srv0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv0.port,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &bind_srv0, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ if (variant->num_layers >= 2) {
+ /* Expands network mask with connect action. */
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ /* Allows bind for srv0 and connect to srv0. */
+ const struct landlock_net_port_attr tcp_bind_connect_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = self->srv0.port,
+ };
+ /* Try to allow bind for srv1. */
+ const struct landlock_net_port_attr tcp_bind_p1 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv1.port,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_p0, 0));
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_p1, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ if (variant->num_layers >= 3) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ /* Allows connect to srv0, without bind rule. */
+ const struct landlock_net_port_attr tcp_bind_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv0.port,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_p0, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ test_bind_and_connect(_metadata, &self->srv0, false,
+ variant->num_layers >= 3);
+
+ test_bind_and_connect(_metadata, &self->srv1, variant->num_layers >= 1,
+ variant->num_layers >= 2);
+}
+
+/* clang-format off */
+FIXTURE(mini) {};
+/* clang-format on */
+
+FIXTURE_SETUP(mini)
+{
+ disable_caps(_metadata);
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(mini)
+{
+}
+
+/* clang-format off */
+
+#define ACCESS_LAST LANDLOCK_ACCESS_NET_CONNECT_TCP
+
+#define ACCESS_ALL ( \
+ LANDLOCK_ACCESS_NET_BIND_TCP | \
+ LANDLOCK_ACCESS_NET_CONNECT_TCP)
+
+/* clang-format on */
+
+TEST_F(mini, network_access_rights)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = ACCESS_ALL,
+ };
+ struct landlock_net_port_attr net_port = {
+ .port = sock_port_start,
+ };
+ int ruleset_fd;
+ __u64 access;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ for (access = 1; access <= ACCESS_LAST; access <<= 1) {
+ net_port.allowed_access = access;
+ EXPECT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &net_port, 0))
+ {
+ TH_LOG("Failed to add rule with access 0x%llx: %s",
+ access, strerror(errno));
+ }
+ }
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
+/* Checks invalid attribute, out of landlock network access range. */
+TEST_F(mini, unknown_access_rights)
+{
+ __u64 access_mask;
+
+ for (access_mask = 1ULL << 63; access_mask != ACCESS_LAST;
+ access_mask >>= 1) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = access_mask,
+ };
+
+ EXPECT_EQ(-1, landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0));
+ EXPECT_EQ(EINVAL, errno);
+ }
+}
+
+TEST_F(mini, inval)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP
+ };
+ const struct landlock_net_port_attr tcp_bind_connect = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = sock_port_start,
+ };
+ const struct landlock_net_port_attr tcp_denied = {
+ .allowed_access = 0,
+ .port = sock_port_start,
+ };
+ const struct landlock_net_port_attr tcp_bind = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = sock_port_start,
+ };
+ int ruleset_fd;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Checks unhandled allowed_access. */
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect, 0));
+ EXPECT_EQ(EINVAL, errno);
+
+ /* Checks zero access value. */
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_denied, 0));
+ EXPECT_EQ(ENOMSG, errno);
+
+ /* Adds with legitimate values. */
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind, 0));
+}
+
+TEST_F(mini, tcp_port_overflow)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr port_max_bind = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = UINT16_MAX,
+ };
+ const struct landlock_net_port_attr port_max_connect = {
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = UINT16_MAX,
+ };
+ const struct landlock_net_port_attr port_overflow1 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = UINT16_MAX + 1,
+ };
+ const struct landlock_net_port_attr port_overflow2 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = UINT16_MAX + 2,
+ };
+ const struct landlock_net_port_attr port_overflow3 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = UINT32_MAX + 1UL,
+ };
+ const struct landlock_net_port_attr port_overflow4 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = UINT32_MAX + 2UL,
+ };
+ const struct protocol_variant ipv4_tcp = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ };
+ struct service_fixture srv_denied, srv_max_allowed;
+ int ruleset_fd;
+
+ ASSERT_EQ(0, set_service(&srv_denied, ipv4_tcp, 0));
+
+ /* Be careful to avoid port inconsistencies. */
+ srv_max_allowed = srv_denied;
+ srv_max_allowed.port = port_max_bind.port;
+ srv_max_allowed.ipv4_addr.sin_port = htons(port_max_bind.port);
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_max_bind, 0));
+
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_overflow1, 0));
+ EXPECT_EQ(EINVAL, errno);
+
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_overflow2, 0));
+ EXPECT_EQ(EINVAL, errno);
+
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_overflow3, 0));
+ EXPECT_EQ(EINVAL, errno);
+
+ /* Interleaves with invalid rule additions. */
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_max_connect, 0));
+
+ EXPECT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &port_overflow4, 0));
+ EXPECT_EQ(EINVAL, errno);
+
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ test_bind_and_connect(_metadata, &srv_denied, true, true);
+ test_bind_and_connect(_metadata, &srv_max_allowed, false, false);
+}
+
+FIXTURE(ipv4_tcp)
+{
+ struct service_fixture srv0, srv1;
+};
+
+FIXTURE_SETUP(ipv4_tcp)
+{
+ const struct protocol_variant ipv4_tcp = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ };
+
+ disable_caps(_metadata);
+
+ ASSERT_EQ(0, set_service(&self->srv0, ipv4_tcp, 0));
+ ASSERT_EQ(0, set_service(&self->srv1, ipv4_tcp, 1));
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(ipv4_tcp)
+{
+}
+
+TEST_F(ipv4_tcp, port_endianness)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+ const struct landlock_net_port_attr bind_host_endian_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ /* Host port format. */
+ .port = self->srv0.port,
+ };
+ const struct landlock_net_port_attr connect_big_endian_p0 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ /* Big endian port format. */
+ .port = htons(self->srv0.port),
+ };
+ const struct landlock_net_port_attr bind_connect_host_endian_p1 = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ /* Host port format. */
+ .port = self->srv1.port,
+ };
+ const unsigned int one = 1;
+ const char little_endian = *(const char *)&one;
+ int ruleset_fd;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &bind_host_endian_p0, 0));
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &connect_big_endian_p0, 0));
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &bind_connect_host_endian_p1, 0));
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ /* No restriction for big endinan CPU. */
+ test_bind_and_connect(_metadata, &self->srv0, false, little_endian);
+
+ /* No restriction for any CPU. */
+ test_bind_and_connect(_metadata, &self->srv1, false, false);
+}
+
+TEST_F(ipv4_tcp, with_fs)
+{
+ const struct landlock_ruleset_attr ruleset_attr_fs_net = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP,
+ };
+ struct landlock_path_beneath_attr path_beneath = {
+ .allowed_access = LANDLOCK_ACCESS_FS_READ_DIR,
+ .parent_fd = -1,
+ };
+ struct landlock_net_port_attr tcp_bind = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP,
+ .port = self->srv0.port,
+ };
+ int ruleset_fd, bind_fd, dir_fd;
+
+ /* Creates ruleset both for filesystem and network access. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr_fs_net,
+ sizeof(ruleset_attr_fs_net), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Adds a filesystem rule. */
+ path_beneath.parent_fd = open("/dev", O_PATH | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ EXPECT_EQ(0, close(path_beneath.parent_fd));
+
+ /* Adds a network rule. */
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ /* Tests file access. */
+ dir_fd = open("/dev", O_RDONLY);
+ EXPECT_LE(0, dir_fd);
+ EXPECT_EQ(0, close(dir_fd));
+
+ dir_fd = open("/", O_RDONLY);
+ EXPECT_EQ(-1, dir_fd);
+ EXPECT_EQ(EACCES, errno);
+
+ /* Tests port binding. */
+ bind_fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, 0);
+ ASSERT_LE(0, bind_fd);
+ EXPECT_EQ(0, bind_variant(bind_fd, &self->srv0));
+ EXPECT_EQ(0, close(bind_fd));
+
+ bind_fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, 0);
+ ASSERT_LE(0, bind_fd);
+ EXPECT_EQ(-EACCES, bind_variant(bind_fd, &self->srv1));
+}
+
+FIXTURE(port_specific)
+{
+ struct service_fixture srv0;
+};
+
+FIXTURE_VARIANT(port_specific)
+{
+ const enum sandbox_type sandbox;
+ const struct protocol_variant prot;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(port_specific, no_sandbox_with_ipv4) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(port_specific, sandbox_with_ipv4) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(port_specific, no_sandbox_with_ipv6) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(port_specific, sandbox_with_ipv6) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ },
+};
+
+FIXTURE_SETUP(port_specific)
+{
+ disable_caps(_metadata);
+
+ ASSERT_EQ(0, set_service(&self->srv0, variant->prot, 0));
+
+ setup_loopback(_metadata);
+};
+
+FIXTURE_TEARDOWN(port_specific)
+{
+}
+
+TEST_F(port_specific, bind_connect_zero)
+{
+ int bind_fd, connect_fd, ret;
+ uint16_t port;
+
+ /* Adds a rule layer with bind and connect actions. */
+ if (variant->sandbox == TCP_SANDBOX) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP
+ };
+ const struct landlock_net_port_attr tcp_bind_connect_zero = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = 0,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Checks zero port value on bind and connect actions. */
+ EXPECT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_zero, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+
+ connect_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, connect_fd);
+
+ /* Sets address port to 0 for both protocol families. */
+ set_port(&self->srv0, 0);
+ /*
+ * Binds on port 0, which selects a random port within
+ * ip_local_port_range.
+ */
+ ret = bind_variant(bind_fd, &self->srv0);
+ EXPECT_EQ(0, ret);
+
+ EXPECT_EQ(0, listen(bind_fd, backlog));
+
+ /* Connects on port 0. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ EXPECT_EQ(-ECONNREFUSED, ret);
+
+ /* Sets binded port for both protocol families. */
+ port = get_binded_port(bind_fd, &variant->prot);
+ EXPECT_NE(0, port);
+ set_port(&self->srv0, port);
+ /* Connects on the binded port. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ if (is_restricted(&variant->prot, variant->sandbox)) {
+ /* Denied by Landlock. */
+ EXPECT_EQ(-EACCES, ret);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+
+ EXPECT_EQ(0, close(connect_fd));
+ EXPECT_EQ(0, close(bind_fd));
+}
+
+TEST_F(port_specific, bind_connect_1023)
+{
+ int bind_fd, connect_fd, ret;
+
+ /* Adds a rule layer with bind and connect actions. */
+ if (variant->sandbox == TCP_SANDBOX) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP
+ };
+ /* A rule with port value less than 1024. */
+ const struct landlock_net_port_attr tcp_bind_connect_low_range = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = 1023,
+ };
+ /* A rule with 1024 port. */
+ const struct landlock_net_port_attr tcp_bind_connect = {
+ .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP |
+ LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .port = 1024,
+ };
+ int ruleset_fd;
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect_low_range, 0));
+ ASSERT_EQ(0,
+ landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ &tcp_bind_connect, 0));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+ }
+
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+
+ connect_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, connect_fd);
+
+ /* Sets address port to 1023 for both protocol families. */
+ set_port(&self->srv0, 1023);
+ /* Binds on port 1023. */
+ ret = bind_variant(bind_fd, &self->srv0);
+ /* Denied by the system. */
+ EXPECT_EQ(-EACCES, ret);
+
+ /* Binds on port 1023. */
+ set_cap(_metadata, CAP_NET_BIND_SERVICE);
+ ret = bind_variant(bind_fd, &self->srv0);
+ clear_cap(_metadata, CAP_NET_BIND_SERVICE);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, listen(bind_fd, backlog));
+
+ /* Connects on the binded port 1023. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ EXPECT_EQ(0, ret);
+
+ EXPECT_EQ(0, close(connect_fd));
+ EXPECT_EQ(0, close(bind_fd));
+
+ bind_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, bind_fd);
+
+ connect_fd = socket_variant(&self->srv0);
+ ASSERT_LE(0, connect_fd);
+
+ /* Sets address port to 1024 for both protocol families. */
+ set_port(&self->srv0, 1024);
+ /* Binds on port 1024. */
+ ret = bind_variant(bind_fd, &self->srv0);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, listen(bind_fd, backlog));
+
+ /* Connects on the binded port 1024. */
+ ret = connect_variant(connect_fd, &self->srv0);
+ EXPECT_EQ(0, ret);
+
+ EXPECT_EQ(0, close(connect_fd));
+ EXPECT_EQ(0, close(bind_fd));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index f838dd370f6a..b3b2dc5a630c 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -2048,7 +2048,7 @@ run_test() {
case $ret in
0)
all_skipped=false
- [ $exitcode=$ksft_skip ] && exitcode=0
+ [ $exitcode -eq $ksft_skip ] && exitcode=0
;;
$ksft_skip)
[ $all_skipped = true ] && exitcode=$ksft_skip
diff --git a/tools/testing/selftests/riscv/hwprobe/Makefile b/tools/testing/selftests/riscv/hwprobe/Makefile
index ebdbb3c22e54..f224b84591fb 100644
--- a/tools/testing/selftests/riscv/hwprobe/Makefile
+++ b/tools/testing/selftests/riscv/hwprobe/Makefile
@@ -2,9 +2,14 @@
# Copyright (C) 2021 ARM Limited
# Originally tools/testing/arm64/abi/Makefile
-TEST_GEN_PROGS := hwprobe
+CFLAGS += -I$(top_srcdir)/tools/include
+
+TEST_GEN_PROGS := hwprobe cbo
include ../../lib.mk
$(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S
- $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
+ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
+
+$(OUTPUT)/cbo: cbo.c sys_hwprobe.S
+ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
new file mode 100644
index 000000000000..50a2cc8aef38
--- /dev/null
+++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Ventana Micro Systems Inc.
+ *
+ * Run with 'taskset -c <cpu-list> cbo' to only execute hwprobe on a
+ * subset of cpus, as well as only executing the tests on those cpus.
+ */
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <sched.h>
+#include <signal.h>
+#include <assert.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <asm/ucontext.h>
+
+#include "hwprobe.h"
+#include "../../kselftest.h"
+
+#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
+
+static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
+
+static bool illegal_insn;
+
+static void sigill_handler(int sig, siginfo_t *info, void *context)
+{
+ unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext;
+ uint32_t insn = *(uint32_t *)regs[0];
+
+ assert(insn == MK_CBO(regs[11]));
+
+ illegal_insn = true;
+ regs[0] += 4;
+}
+
+static void cbo_insn(char *base, int fn)
+{
+ uint32_t insn = MK_CBO(fn);
+
+ asm volatile(
+ "mv a0, %0\n"
+ "li a1, %1\n"
+ ".4byte %2\n"
+ : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
+}
+
+static void cbo_inval(char *base) { cbo_insn(base, 0); }
+static void cbo_clean(char *base) { cbo_insn(base, 1); }
+static void cbo_flush(char *base) { cbo_insn(base, 2); }
+static void cbo_zero(char *base) { cbo_insn(base, 4); }
+
+static void test_no_zicbom(void *arg)
+{
+ ksft_print_msg("Testing Zicbom instructions remain privileged\n");
+
+ illegal_insn = false;
+ cbo_clean(&mem[0]);
+ ksft_test_result(illegal_insn, "No cbo.clean\n");
+
+ illegal_insn = false;
+ cbo_flush(&mem[0]);
+ ksft_test_result(illegal_insn, "No cbo.flush\n");
+
+ illegal_insn = false;
+ cbo_inval(&mem[0]);
+ ksft_test_result(illegal_insn, "No cbo.inval\n");
+}
+
+static void test_no_zicboz(void *arg)
+{
+ ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n");
+
+ illegal_insn = false;
+ cbo_zero(&mem[0]);
+ ksft_test_result(illegal_insn, "No cbo.zero\n");
+}
+
+static bool is_power_of_2(__u64 n)
+{
+ return n != 0 && (n & (n - 1)) == 0;
+}
+
+static void test_zicboz(void *arg)
+{
+ struct riscv_hwprobe pair = {
+ .key = RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE,
+ };
+ cpu_set_t *cpus = (cpu_set_t *)arg;
+ __u64 block_size;
+ int i, j;
+ long rc;
+
+ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0);
+ block_size = pair.value;
+ ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE &&
+ is_power_of_2(block_size), "Zicboz block size\n");
+ ksft_print_msg("Zicboz block size: %ld\n", block_size);
+
+ illegal_insn = false;
+ cbo_zero(&mem[block_size]);
+ ksft_test_result(!illegal_insn, "cbo.zero\n");
+
+ if (illegal_insn || !is_power_of_2(block_size)) {
+ ksft_test_result_skip("cbo.zero check\n");
+ return;
+ }
+
+ assert(block_size <= 1024);
+
+ for (i = 0; i < 4096 / block_size; ++i) {
+ if (i % 2)
+ cbo_zero(&mem[i * block_size]);
+ }
+
+ for (i = 0; i < 4096 / block_size; ++i) {
+ char expected = i % 2 ? 0x0 : 0xa5;
+
+ for (j = 0; j < block_size; ++j) {
+ if (mem[i * block_size + j] != expected) {
+ ksft_test_result_fail("cbo.zero check\n");
+ ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n",
+ i * block_size + j, expected);
+ return;
+ }
+ }
+ }
+
+ ksft_test_result_pass("cbo.zero check\n");
+}
+
+static void check_no_zicboz_cpus(cpu_set_t *cpus)
+{
+ struct riscv_hwprobe pair = {
+ .key = RISCV_HWPROBE_KEY_IMA_EXT_0,
+ };
+ cpu_set_t one_cpu;
+ int i = 0, c = 0;
+ long rc;
+
+ while (i++ < CPU_COUNT(cpus)) {
+ while (!CPU_ISSET(c, cpus))
+ ++c;
+
+ CPU_ZERO(&one_cpu);
+ CPU_SET(c, &one_cpu);
+
+ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0);
+ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
+
+ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ)
+ ksft_exit_fail_msg("Zicboz is only present on a subset of harts.\n"
+ "Use taskset to select a set of harts where Zicboz\n"
+ "presence (present or not) is consistent for each hart\n");
+ ++c;
+ }
+}
+
+enum {
+ TEST_ZICBOZ,
+ TEST_NO_ZICBOZ,
+ TEST_NO_ZICBOM,
+};
+
+static struct test_info {
+ bool enabled;
+ unsigned int nr_tests;
+ void (*test_fn)(void *arg);
+} tests[] = {
+ [TEST_ZICBOZ] = { .nr_tests = 3, test_zicboz },
+ [TEST_NO_ZICBOZ] = { .nr_tests = 1, test_no_zicboz },
+ [TEST_NO_ZICBOM] = { .nr_tests = 3, test_no_zicbom },
+};
+
+int main(int argc, char **argv)
+{
+ struct sigaction act = {
+ .sa_sigaction = &sigill_handler,
+ .sa_flags = SA_SIGINFO,
+ };
+ struct riscv_hwprobe pair;
+ unsigned int plan = 0;
+ cpu_set_t cpus;
+ long rc;
+ int i;
+
+ if (argc > 1 && !strcmp(argv[1], "--sigill")) {
+ rc = sigaction(SIGILL, &act, NULL);
+ assert(rc == 0);
+ tests[TEST_NO_ZICBOZ].enabled = true;
+ tests[TEST_NO_ZICBOM].enabled = true;
+ }
+
+ rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus);
+ assert(rc == 0);
+
+ ksft_print_header();
+
+ pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
+ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0);
+ if (rc < 0)
+ ksft_exit_fail_msg("hwprobe() failed with %d\n", rc);
+ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
+
+ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) {
+ tests[TEST_ZICBOZ].enabled = true;
+ tests[TEST_NO_ZICBOZ].enabled = false;
+ } else {
+ check_no_zicboz_cpus(&cpus);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i)
+ plan += tests[i].enabled ? tests[i].nr_tests : 0;
+
+ if (plan == 0)
+ ksft_print_msg("No tests enabled.\n");
+ else
+ ksft_set_plan(plan);
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ if (tests[i].enabled)
+ tests[i].test_fn(&cpus);
+ }
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.c b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
index 09f290a67420..c474891df307 100644
--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.c
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
@@ -1,14 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <stddef.h>
-#include <asm/hwprobe.h>
-
-/*
- * Rather than relying on having a new enough libc to define this, just do it
- * ourselves. This way we don't need to be coupled to a new-enough libc to
- * contain the call.
- */
-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus, unsigned int flags);
+#include "hwprobe.h"
+#include "../../kselftest.h"
int main(int argc, char **argv)
{
@@ -16,6 +8,9 @@ int main(int argc, char **argv)
unsigned long cpus;
long out;
+ ksft_print_header();
+ ksft_set_plan(5);
+
/* Fake the CPU_SET ops. */
cpus = -1;
@@ -25,13 +20,16 @@ int main(int argc, char **argv)
*/
for (long i = 0; i < 8; i++)
pairs[i].key = i;
+
out = riscv_hwprobe(pairs, 8, 1, &cpus, 0);
if (out != 0)
- return -1;
+ ksft_exit_fail_msg("hwprobe() failed with %ld\n", out);
+
for (long i = 0; i < 4; ++i) {
/* Fail if the kernel claims not to recognize a base key. */
if ((i < 4) && (pairs[i].key != i))
- return -2;
+ ksft_exit_fail_msg("Failed to recognize base key: key != i, "
+ "key=%ld, i=%ld\n", pairs[i].key, i);
if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
continue;
@@ -39,52 +37,30 @@ int main(int argc, char **argv)
if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
continue;
- return -3;
+ ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value);
}
- /*
- * This should also work with a NULL CPU set, but should not work
- * with an improperly supplied CPU set.
- */
out = riscv_hwprobe(pairs, 8, 0, 0, 0);
- if (out != 0)
- return -4;
+ ksft_test_result(out == 0, "NULL CPU set\n");
out = riscv_hwprobe(pairs, 8, 0, &cpus, 0);
- if (out == 0)
- return -5;
+ ksft_test_result(out != 0, "Bad CPU set\n");
out = riscv_hwprobe(pairs, 8, 1, 0, 0);
- if (out == 0)
- return -6;
+ ksft_test_result(out != 0, "NULL CPU set with non-zero count\n");
- /*
- * Check that keys work by providing one that we know exists, and
- * checking to make sure the resultig pair is what we asked for.
- */
pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
- if (out != 0)
- return -7;
- if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
- return -8;
+ ksft_test_result(out == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR,
+ "Existing key is maintained\n");
- /*
- * Check that an unknown key gets overwritten with -1,
- * but doesn't block elements after it.
- */
pairs[0].key = 0x5555;
pairs[1].key = 1;
pairs[1].value = 0xAAAA;
out = riscv_hwprobe(pairs, 2, 0, 0, 0);
- if (out != 0)
- return -9;
-
- if (pairs[0].key != -1)
- return -10;
-
- if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA))
- return -11;
+ ksft_test_result(out == 0 && pairs[0].key == -1 &&
+ pairs[1].key == 1 && pairs[1].value != 0xAAAA,
+ "Unknown key overwritten with -1 and doesn't block other elements\n");
- return 0;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
new file mode 100644
index 000000000000..721b0ce73a56
--- /dev/null
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_RISCV_HWPROBE_H
+#define SELFTEST_RISCV_HWPROBE_H
+#include <stddef.h>
+#include <asm/hwprobe.h>
+
+/*
+ * Rather than relying on having a new enough libc to define this, just do it
+ * ourselves. This way we don't need to be coupled to a new-enough libc to
+ * contain the call.
+ */
+long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpu_count, unsigned long *cpus, unsigned int flags);
+
+#endif
diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c
index f5575ef2007c..cef1ff1af223 100644
--- a/tools/testing/selftests/user_events/abi_test.c
+++ b/tools/testing/selftests/user_events/abi_test.c
@@ -24,6 +24,18 @@
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__abi_event/enable";
+static bool event_exists(void)
+{
+ int fd = open(enable_file, O_RDWR);
+
+ if (fd < 0)
+ return false;
+
+ close(fd);
+
+ return true;
+}
+
static int change_event(bool enable)
{
int fd = open(enable_file, O_RDWR);
@@ -47,7 +59,22 @@ static int change_event(bool enable)
return ret;
}
-static int reg_enable(void *enable, int size, int bit)
+static int event_delete(void)
+{
+ int fd = open(data_file, O_RDWR);
+ int ret;
+
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, DIAG_IOCSDEL, "__abi_event");
+
+ close(fd);
+
+ return ret;
+}
+
+static int reg_enable_flags(void *enable, int size, int bit, int flags)
{
struct user_reg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -58,6 +85,7 @@ static int reg_enable(void *enable, int size, int bit)
reg.size = sizeof(reg);
reg.name_args = (__u64)"__abi_event";
+ reg.flags = flags;
reg.enable_bit = bit;
reg.enable_addr = (__u64)enable;
reg.enable_size = size;
@@ -69,6 +97,11 @@ static int reg_enable(void *enable, int size, int bit)
return ret;
}
+static int reg_enable(void *enable, int size, int bit)
+{
+ return reg_enable_flags(enable, size, bit, 0);
+}
+
static int reg_disable(void *enable, int bit)
{
struct user_unreg reg = {0};
@@ -128,6 +161,26 @@ TEST_F(user, enablement) {
ASSERT_EQ(0, change_event(false));
}
+TEST_F(user, flags) {
+ /* USER_EVENT_REG_PERSIST is allowed */
+ ASSERT_EQ(0, reg_enable_flags(&self->check, sizeof(int), 0,
+ USER_EVENT_REG_PERSIST));
+ ASSERT_EQ(0, reg_disable(&self->check, 0));
+
+ /* Ensure it exists after close and disable */
+ ASSERT_TRUE(event_exists());
+
+ /* Ensure we can delete it */
+ ASSERT_EQ(0, event_delete());
+
+ /* USER_EVENT_REG_MAX or above is not allowed */
+ ASSERT_EQ(-1, reg_enable_flags(&self->check, sizeof(int), 0,
+ USER_EVENT_REG_MAX));
+
+ /* Ensure it does not exist after invalid flags */
+ ASSERT_FALSE(event_exists());
+}
+
TEST_F(user, bit_sizes) {
/* Allow 0-31 bits for 32-bit */
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 0));
diff --git a/tools/testing/selftests/user_events/dyn_test.c b/tools/testing/selftests/user_events/dyn_test.c
index a85980190bea..bdf9ab127488 100644
--- a/tools/testing/selftests/user_events/dyn_test.c
+++ b/tools/testing/selftests/user_events/dyn_test.c
@@ -17,9 +17,25 @@
#include "../kselftest_harness.h"
#include "user_events_selftests.h"
+const char *dyn_file = "/sys/kernel/tracing/dynamic_events";
const char *abi_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable";
+static int event_delete(void)
+{
+ int fd = open(abi_file, O_RDWR);
+ int ret;
+
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, DIAG_IOCSDEL, "__test_event");
+
+ close(fd);
+
+ return ret;
+}
+
static bool wait_for_delete(void)
{
int i;
@@ -64,7 +80,31 @@ static int unreg_event(int fd, int *check, int bit)
return ioctl(fd, DIAG_IOCSUNREG, &unreg);
}
-static int parse(int *check, const char *value)
+static int parse_dyn(const char *value)
+{
+ int fd = open(dyn_file, O_RDWR | O_APPEND);
+ int len = strlen(value);
+ int ret;
+
+ if (fd == -1)
+ return -1;
+
+ ret = write(fd, value, len);
+
+ if (ret == len)
+ ret = 0;
+ else
+ ret = -1;
+
+ close(fd);
+
+ if (ret == 0)
+ event_delete();
+
+ return ret;
+}
+
+static int parse_abi(int *check, const char *value)
{
int fd = open(abi_file, O_RDWR);
int ret;
@@ -90,6 +130,18 @@ static int parse(int *check, const char *value)
return ret;
}
+static int parse(int *check, const char *value)
+{
+ int abi_ret = parse_abi(check, value);
+ int dyn_ret = parse_dyn(value);
+
+ /* Ensure both ABI and DYN parse the same way */
+ if (dyn_ret != abi_ret)
+ return -1;
+
+ return dyn_ret;
+}
+
static int check_match(int *check, const char *first, const char *second, bool *match)
{
int fd = open(abi_file, O_RDWR);
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 92336721321a..ae2b33c21c45 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -85,6 +85,48 @@ void vsock_wait_remote_close(int fd)
close(epollfd);
}
+/* Bind to <bind_port>, connect to <cid, port> and return the file descriptor. */
+int vsock_bind_connect(unsigned int cid, unsigned int port, unsigned int bind_port, int type)
+{
+ struct sockaddr_vm sa_client = {
+ .svm_family = AF_VSOCK,
+ .svm_cid = VMADDR_CID_ANY,
+ .svm_port = bind_port,
+ };
+ struct sockaddr_vm sa_server = {
+ .svm_family = AF_VSOCK,
+ .svm_cid = cid,
+ .svm_port = port,
+ };
+
+ int client_fd, ret;
+
+ client_fd = socket(AF_VSOCK, type, 0);
+ if (client_fd < 0) {
+ perror("socket");
+ exit(EXIT_FAILURE);
+ }
+
+ if (bind(client_fd, (struct sockaddr *)&sa_client, sizeof(sa_client))) {
+ perror("bind");
+ exit(EXIT_FAILURE);
+ }
+
+ timeout_begin(TIMEOUT);
+ do {
+ ret = connect(client_fd, (struct sockaddr *)&sa_server, sizeof(sa_server));
+ timeout_check("connect");
+ } while (ret < 0 && errno == EINTR);
+ timeout_end();
+
+ if (ret < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ return client_fd;
+}
+
/* Connect to <cid, port> and return the file descriptor. */
static int vsock_connect(unsigned int cid, unsigned int port, int type)
{
@@ -104,6 +146,10 @@ static int vsock_connect(unsigned int cid, unsigned int port, int type)
control_expectln("LISTENING");
fd = socket(AF_VSOCK, type, 0);
+ if (fd < 0) {
+ perror("socket");
+ exit(EXIT_FAILURE);
+ }
timeout_begin(TIMEOUT);
do {
@@ -132,11 +178,8 @@ int vsock_seqpacket_connect(unsigned int cid, unsigned int port)
return vsock_connect(cid, port, SOCK_SEQPACKET);
}
-/* Listen on <cid, port> and return the first incoming connection. The remote
- * address is stored to clientaddrp. clientaddrp may be NULL.
- */
-static int vsock_accept(unsigned int cid, unsigned int port,
- struct sockaddr_vm *clientaddrp, int type)
+/* Listen on <cid, port> and return the file descriptor. */
+static int vsock_listen(unsigned int cid, unsigned int port, int type)
{
union {
struct sockaddr sa;
@@ -148,16 +191,13 @@ static int vsock_accept(unsigned int cid, unsigned int port,
.svm_cid = cid,
},
};
- union {
- struct sockaddr sa;
- struct sockaddr_vm svm;
- } clientaddr;
- socklen_t clientaddr_len = sizeof(clientaddr.svm);
int fd;
- int client_fd;
- int old_errno;
fd = socket(AF_VSOCK, type, 0);
+ if (fd < 0) {
+ perror("socket");
+ exit(EXIT_FAILURE);
+ }
if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
perror("bind");
@@ -169,6 +209,24 @@ static int vsock_accept(unsigned int cid, unsigned int port,
exit(EXIT_FAILURE);
}
+ return fd;
+}
+
+/* Listen on <cid, port> and return the first incoming connection. The remote
+ * address is stored to clientaddrp. clientaddrp may be NULL.
+ */
+static int vsock_accept(unsigned int cid, unsigned int port,
+ struct sockaddr_vm *clientaddrp, int type)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } clientaddr;
+ socklen_t clientaddr_len = sizeof(clientaddr.svm);
+ int fd, client_fd, old_errno;
+
+ fd = vsock_listen(cid, port, type);
+
control_writeln("LISTENING");
timeout_begin(TIMEOUT);
@@ -207,6 +265,11 @@ int vsock_stream_accept(unsigned int cid, unsigned int port,
return vsock_accept(cid, port, clientaddrp, SOCK_STREAM);
}
+int vsock_stream_listen(unsigned int cid, unsigned int port)
+{
+ return vsock_listen(cid, port, SOCK_STREAM);
+}
+
int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp)
{
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index a77175d25864..03c88d0cb861 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -36,9 +36,12 @@ struct test_case {
void init_signals(void);
unsigned int parse_cid(const char *str);
int vsock_stream_connect(unsigned int cid, unsigned int port);
+int vsock_bind_connect(unsigned int cid, unsigned int port,
+ unsigned int bind_port, int type);
int vsock_seqpacket_connect(unsigned int cid, unsigned int port);
int vsock_stream_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp);
+int vsock_stream_listen(unsigned int cid, unsigned int port);
int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp);
void vsock_wait_remote_close(int fd);
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index c1f7bc9abd22..5b0e93f9996c 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -1180,6 +1180,51 @@ static void test_stream_shutrd_server(const struct test_opts *opts)
close(fd);
}
+static void test_double_bind_connect_server(const struct test_opts *opts)
+{
+ int listen_fd, client_fd, i;
+ struct sockaddr_vm sa_client;
+ socklen_t socklen_client = sizeof(sa_client);
+
+ listen_fd = vsock_stream_listen(VMADDR_CID_ANY, 1234);
+
+ for (i = 0; i < 2; i++) {
+ control_writeln("LISTENING");
+
+ timeout_begin(TIMEOUT);
+ do {
+ client_fd = accept(listen_fd, (struct sockaddr *)&sa_client,
+ &socklen_client);
+ timeout_check("accept");
+ } while (client_fd < 0 && errno == EINTR);
+ timeout_end();
+
+ if (client_fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Waiting for remote peer to close connection */
+ vsock_wait_remote_close(client_fd);
+ }
+
+ close(listen_fd);
+}
+
+static void test_double_bind_connect_client(const struct test_opts *opts)
+{
+ int i, client_fd;
+
+ for (i = 0; i < 2; i++) {
+ /* Wait until server is ready to accept a new connection */
+ control_expectln("LISTENING");
+
+ client_fd = vsock_bind_connect(opts->peer_cid, 1234, 4321, SOCK_STREAM);
+
+ close(client_fd);
+ }
+}
+
static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM connection reset",
@@ -1285,6 +1330,11 @@ static struct test_case test_cases[] = {
.run_client = test_stream_msgzcopy_empty_errq_client,
.run_server = test_stream_msgzcopy_empty_errq_server,
},
+ {
+ .name = "SOCK_STREAM double bind connect",
+ .run_client = test_double_bind_connect_client,
+ .run_server = test_double_bind_connect_server,
+ },
{},
};
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index 623a38908ed5..c769d7b3842c 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -538,7 +538,7 @@ static const int find_mount(const char *fs, char *mp, int sizeof_mp)
{
char mount_point[MAX_PATH];
char type[100];
- int found;
+ int found = 0;
FILE *fp;
fp = fopen("/proc/mounts", "r");
diff --git a/tools/verification/dot2/dot2k b/tools/verification/dot2/dot2k
index 9dcd38abe20a..d4d7e52d549e 100644
--- a/tools/verification/dot2/dot2k
+++ b/tools/verification/dot2/dot2k
@@ -15,8 +15,6 @@ if __name__ == '__main__':
import os
import platform
import sys
- import sys
- import argparse
parser = argparse.ArgumentParser(description='transform .dot file into kernel rv monitor')
parser.add_argument('-d', "--dot", dest="dot_file", required=True)